aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/python/ipython/py3
diff options
context:
space:
mode:
authornkozlovskiy <nmk@ydb.tech>2023-09-29 12:24:06 +0300
committernkozlovskiy <nmk@ydb.tech>2023-09-29 12:41:34 +0300
commite0e3e1717e3d33762ce61950504f9637a6e669ed (patch)
treebca3ff6939b10ed60c3d5c12439963a1146b9711 /contrib/python/ipython/py3
parent38f2c5852db84c7b4d83adfcb009eb61541d1ccd (diff)
downloadydb-e0e3e1717e3d33762ce61950504f9637a6e669ed.tar.gz
add ydb deps
Diffstat (limited to 'contrib/python/ipython/py3')
-rw-r--r--contrib/python/ipython/py3/.dist-info/METADATA147
-rw-r--r--contrib/python/ipython/py3/.dist-info/entry_points.txt8
-rw-r--r--contrib/python/ipython/py3/.dist-info/top_level.txt1
-rw-r--r--contrib/python/ipython/py3/COPYING.rst41
-rw-r--r--contrib/python/ipython/py3/IPython/__init__.py162
-rw-r--r--contrib/python/ipython/py3/IPython/__main__.py15
-rw-r--r--contrib/python/ipython/py3/IPython/consoleapp.py12
-rw-r--r--contrib/python/ipython/py3/IPython/core/__init__.py0
-rw-r--r--contrib/python/ipython/py3/IPython/core/alias.py258
-rw-r--r--contrib/python/ipython/py3/IPython/core/application.py488
-rw-r--r--contrib/python/ipython/py3/IPython/core/async_helpers.py156
-rw-r--r--contrib/python/ipython/py3/IPython/core/autocall.py70
-rw-r--r--contrib/python/ipython/py3/IPython/core/builtin_trap.py86
-rw-r--r--contrib/python/ipython/py3/IPython/core/compilerop.py214
-rw-r--r--contrib/python/ipython/py3/IPython/core/completer.py3347
-rw-r--r--contrib/python/ipython/py3/IPython/core/completerlib.py418
-rw-r--r--contrib/python/ipython/py3/IPython/core/crashhandler.py236
-rw-r--r--contrib/python/ipython/py3/IPython/core/debugger.py997
-rw-r--r--contrib/python/ipython/py3/IPython/core/display.py1290
-rw-r--r--contrib/python/ipython/py3/IPython/core/display_functions.py391
-rw-r--r--contrib/python/ipython/py3/IPython/core/display_trap.py70
-rw-r--r--contrib/python/ipython/py3/IPython/core/displayhook.py331
-rw-r--r--contrib/python/ipython/py3/IPython/core/displaypub.py138
-rw-r--r--contrib/python/ipython/py3/IPython/core/error.py60
-rw-r--r--contrib/python/ipython/py3/IPython/core/events.py166
-rw-r--r--contrib/python/ipython/py3/IPython/core/excolors.py165
-rw-r--r--contrib/python/ipython/py3/IPython/core/extensions.py151
-rw-r--r--contrib/python/ipython/py3/IPython/core/formatters.py1028
-rw-r--r--contrib/python/ipython/py3/IPython/core/getipython.py24
-rw-r--r--contrib/python/ipython/py3/IPython/core/guarded_eval.py733
-rw-r--r--contrib/python/ipython/py3/IPython/core/history.py968
-rw-r--r--contrib/python/ipython/py3/IPython/core/historyapp.py161
-rw-r--r--contrib/python/ipython/py3/IPython/core/hooks.py173
-rw-r--r--contrib/python/ipython/py3/IPython/core/inputsplitter.py773
-rw-r--r--contrib/python/ipython/py3/IPython/core/inputtransformer.py536
-rw-r--r--contrib/python/ipython/py3/IPython/core/inputtransformer2.py797
-rw-r--r--contrib/python/ipython/py3/IPython/core/interactiveshell.py3910
-rw-r--r--contrib/python/ipython/py3/IPython/core/latex_symbols.py1301
-rw-r--r--contrib/python/ipython/py3/IPython/core/logger.py227
-rw-r--r--contrib/python/ipython/py3/IPython/core/macro.py53
-rw-r--r--contrib/python/ipython/py3/IPython/core/magic.py757
-rw-r--r--contrib/python/ipython/py3/IPython/core/magic_arguments.py310
-rw-r--r--contrib/python/ipython/py3/IPython/core/magics/__init__.py42
-rw-r--r--contrib/python/ipython/py3/IPython/core/magics/auto.py144
-rw-r--r--contrib/python/ipython/py3/IPython/core/magics/basic.py663
-rw-r--r--contrib/python/ipython/py3/IPython/core/magics/code.py755
-rw-r--r--contrib/python/ipython/py3/IPython/core/magics/config.py140
-rw-r--r--contrib/python/ipython/py3/IPython/core/magics/display.py93
-rw-r--r--contrib/python/ipython/py3/IPython/core/magics/execution.py1522
-rw-r--r--contrib/python/ipython/py3/IPython/core/magics/extension.py63
-rw-r--r--contrib/python/ipython/py3/IPython/core/magics/history.py338
-rw-r--r--contrib/python/ipython/py3/IPython/core/magics/logging.py195
-rw-r--r--contrib/python/ipython/py3/IPython/core/magics/namespace.py711
-rw-r--r--contrib/python/ipython/py3/IPython/core/magics/osm.py855
-rw-r--r--contrib/python/ipython/py3/IPython/core/magics/packaging.py112
-rw-r--r--contrib/python/ipython/py3/IPython/core/magics/pylab.py169
-rw-r--r--contrib/python/ipython/py3/IPython/core/magics/script.py371
-rw-r--r--contrib/python/ipython/py3/IPython/core/oinspect.py1171
-rw-r--r--contrib/python/ipython/py3/IPython/core/page.py348
-rw-r--r--contrib/python/ipython/py3/IPython/core/payload.py55
-rw-r--r--contrib/python/ipython/py3/IPython/core/payloadpage.py51
-rw-r--r--contrib/python/ipython/py3/IPython/core/prefilter.py700
-rw-r--r--contrib/python/ipython/py3/IPython/core/profile/README_STARTUP11
-rw-r--r--contrib/python/ipython/py3/IPython/core/profileapp.py312
-rw-r--r--contrib/python/ipython/py3/IPython/core/profiledir.py223
-rw-r--r--contrib/python/ipython/py3/IPython/core/prompts.py21
-rw-r--r--contrib/python/ipython/py3/IPython/core/pylabtools.py425
-rw-r--r--contrib/python/ipython/py3/IPython/core/release.py54
-rw-r--r--contrib/python/ipython/py3/IPython/core/shellapp.py451
-rw-r--r--contrib/python/ipython/py3/IPython/core/splitinput.py138
-rw-r--r--contrib/python/ipython/py3/IPython/core/ultratb.py1518
-rw-r--r--contrib/python/ipython/py3/IPython/core/usage.py341
-rw-r--r--contrib/python/ipython/py3/IPython/display.py44
-rw-r--r--contrib/python/ipython/py3/IPython/extensions/__init__.py2
-rw-r--r--contrib/python/ipython/py3/IPython/extensions/autoreload.py727
-rw-r--r--contrib/python/ipython/py3/IPython/extensions/storemagic.py236
-rw-r--r--contrib/python/ipython/py3/IPython/external/__init__.py7
-rw-r--r--contrib/python/ipython/py3/IPython/external/qt_for_kernel.py124
-rw-r--r--contrib/python/ipython/py3/IPython/external/qt_loaders.py410
-rw-r--r--contrib/python/ipython/py3/IPython/lib/__init__.py11
-rw-r--r--contrib/python/ipython/py3/IPython/lib/backgroundjobs.py491
-rw-r--r--contrib/python/ipython/py3/IPython/lib/clipboard.py101
-rw-r--r--contrib/python/ipython/py3/IPython/lib/deepreload.py310
-rw-r--r--contrib/python/ipython/py3/IPython/lib/demo.py672
-rw-r--r--contrib/python/ipython/py3/IPython/lib/display.py677
-rw-r--r--contrib/python/ipython/py3/IPython/lib/editorhooks.py127
-rw-r--r--contrib/python/ipython/py3/IPython/lib/guisupport.py155
-rw-r--r--contrib/python/ipython/py3/IPython/lib/latextools.py257
-rw-r--r--contrib/python/ipython/py3/IPython/lib/lexers.py540
-rw-r--r--contrib/python/ipython/py3/IPython/lib/pretty.py953
-rw-r--r--contrib/python/ipython/py3/IPython/paths.py125
-rw-r--r--contrib/python/ipython/py3/IPython/py.typed0
-rw-r--r--contrib/python/ipython/py3/IPython/sphinxext/__init__.py0
-rw-r--r--contrib/python/ipython/py3/IPython/sphinxext/custom_doctests.py155
-rw-r--r--contrib/python/ipython/py3/IPython/sphinxext/ipython_console_highlighting.py28
-rw-r--r--contrib/python/ipython/py3/IPython/sphinxext/ipython_directive.py1272
-rw-r--r--contrib/python/ipython/py3/IPython/terminal/__init__.py0
-rw-r--r--contrib/python/ipython/py3/IPython/terminal/console.py19
-rw-r--r--contrib/python/ipython/py3/IPython/terminal/debugger.py177
-rw-r--r--contrib/python/ipython/py3/IPython/terminal/embed.py420
-rw-r--r--contrib/python/ipython/py3/IPython/terminal/interactiveshell.py993
-rw-r--r--contrib/python/ipython/py3/IPython/terminal/ipapp.py343
-rw-r--r--contrib/python/ipython/py3/IPython/terminal/magics.py214
-rw-r--r--contrib/python/ipython/py3/IPython/terminal/prompts.py108
-rw-r--r--contrib/python/ipython/py3/IPython/terminal/pt_inputhooks/__init__.py138
-rw-r--r--contrib/python/ipython/py3/IPython/terminal/pt_inputhooks/asyncio.py62
-rw-r--r--contrib/python/ipython/py3/IPython/terminal/pt_inputhooks/glut.py140
-rw-r--r--contrib/python/ipython/py3/IPython/terminal/pt_inputhooks/gtk.py60
-rw-r--r--contrib/python/ipython/py3/IPython/terminal/pt_inputhooks/gtk3.py14
-rw-r--r--contrib/python/ipython/py3/IPython/terminal/pt_inputhooks/gtk4.py27
-rw-r--r--contrib/python/ipython/py3/IPython/terminal/pt_inputhooks/osx.py157
-rw-r--r--contrib/python/ipython/py3/IPython/terminal/pt_inputhooks/pyglet.py66
-rw-r--r--contrib/python/ipython/py3/IPython/terminal/pt_inputhooks/qt.py86
-rw-r--r--contrib/python/ipython/py3/IPython/terminal/pt_inputhooks/tk.py90
-rw-r--r--contrib/python/ipython/py3/IPython/terminal/pt_inputhooks/wx.py219
-rw-r--r--contrib/python/ipython/py3/IPython/terminal/ptutils.py204
-rw-r--r--contrib/python/ipython/py3/IPython/terminal/shortcuts/__init__.py630
-rw-r--r--contrib/python/ipython/py3/IPython/terminal/shortcuts/auto_match.py104
-rw-r--r--contrib/python/ipython/py3/IPython/terminal/shortcuts/auto_suggest.py401
-rw-r--r--contrib/python/ipython/py3/IPython/terminal/shortcuts/filters.py322
-rw-r--r--contrib/python/ipython/py3/IPython/testing/__init__.py20
-rw-r--r--contrib/python/ipython/py3/IPython/testing/decorators.py201
-rw-r--r--contrib/python/ipython/py3/IPython/testing/globalipapp.py114
-rw-r--r--contrib/python/ipython/py3/IPython/testing/ipunittest.py178
-rw-r--r--contrib/python/ipython/py3/IPython/testing/plugin/README.txt34
-rw-r--r--contrib/python/ipython/py3/IPython/testing/plugin/__init__.py0
-rw-r--r--contrib/python/ipython/py3/IPython/testing/plugin/dtexample.py167
-rw-r--r--contrib/python/ipython/py3/IPython/testing/plugin/ipdoctest.py299
-rw-r--r--contrib/python/ipython/py3/IPython/testing/plugin/pytest_ipdoctest.py859
-rw-r--r--contrib/python/ipython/py3/IPython/testing/plugin/setup.py18
-rw-r--r--contrib/python/ipython/py3/IPython/testing/plugin/simple.py44
-rw-r--r--contrib/python/ipython/py3/IPython/testing/plugin/simplevars.py2
-rw-r--r--contrib/python/ipython/py3/IPython/testing/plugin/test_combo.txt36
-rw-r--r--contrib/python/ipython/py3/IPython/testing/plugin/test_example.txt24
-rw-r--r--contrib/python/ipython/py3/IPython/testing/plugin/test_exampleip.txt30
-rw-r--r--contrib/python/ipython/py3/IPython/testing/plugin/test_ipdoctest.py92
-rw-r--r--contrib/python/ipython/py3/IPython/testing/plugin/test_refs.py39
-rw-r--r--contrib/python/ipython/py3/IPython/testing/skipdoctest.py19
-rw-r--r--contrib/python/ipython/py3/IPython/testing/tools.py476
-rw-r--r--contrib/python/ipython/py3/IPython/utils/PyColorize.py331
-rw-r--r--contrib/python/ipython/py3/IPython/utils/__init__.py0
-rw-r--r--contrib/python/ipython/py3/IPython/utils/_process_cli.py69
-rw-r--r--contrib/python/ipython/py3/IPython/utils/_process_common.py210
-rw-r--r--contrib/python/ipython/py3/IPython/utils/_process_posix.py216
-rw-r--r--contrib/python/ipython/py3/IPython/utils/_process_win32.py184
-rw-r--r--contrib/python/ipython/py3/IPython/utils/_process_win32_controller.py573
-rw-r--r--contrib/python/ipython/py3/IPython/utils/_sysinfo.py2
-rw-r--r--contrib/python/ipython/py3/IPython/utils/capture.py170
-rw-r--r--contrib/python/ipython/py3/IPython/utils/colorable.py25
-rw-r--r--contrib/python/ipython/py3/IPython/utils/coloransi.py187
-rw-r--r--contrib/python/ipython/py3/IPython/utils/contexts.py61
-rw-r--r--contrib/python/ipython/py3/IPython/utils/daemonize.py4
-rw-r--r--contrib/python/ipython/py3/IPython/utils/data.py30
-rw-r--r--contrib/python/ipython/py3/IPython/utils/decorators.py83
-rw-r--r--contrib/python/ipython/py3/IPython/utils/dir2.py84
-rw-r--r--contrib/python/ipython/py3/IPython/utils/docs.py3
-rw-r--r--contrib/python/ipython/py3/IPython/utils/encoding.py71
-rw-r--r--contrib/python/ipython/py3/IPython/utils/eventful.py5
-rw-r--r--contrib/python/ipython/py3/IPython/utils/frame.py92
-rw-r--r--contrib/python/ipython/py3/IPython/utils/generics.py29
-rw-r--r--contrib/python/ipython/py3/IPython/utils/importstring.py39
-rw-r--r--contrib/python/ipython/py3/IPython/utils/io.py151
-rw-r--r--contrib/python/ipython/py3/IPython/utils/ipstruct.py379
-rw-r--r--contrib/python/ipython/py3/IPython/utils/jsonutil.py5
-rw-r--r--contrib/python/ipython/py3/IPython/utils/localinterfaces.py5
-rw-r--r--contrib/python/ipython/py3/IPython/utils/log.py5
-rw-r--r--contrib/python/ipython/py3/IPython/utils/module_paths.py70
-rw-r--r--contrib/python/ipython/py3/IPython/utils/openpy.py105
-rw-r--r--contrib/python/ipython/py3/IPython/utils/path.py391
-rw-r--r--contrib/python/ipython/py3/IPython/utils/process.py69
-rw-r--r--contrib/python/ipython/py3/IPython/utils/py3compat.py67
-rw-r--r--contrib/python/ipython/py3/IPython/utils/sentinel.py17
-rw-r--r--contrib/python/ipython/py3/IPython/utils/shimmodule.py89
-rw-r--r--contrib/python/ipython/py3/IPython/utils/signatures.py12
-rw-r--r--contrib/python/ipython/py3/IPython/utils/strdispatch.py68
-rw-r--r--contrib/python/ipython/py3/IPython/utils/sysinfo.py142
-rw-r--r--contrib/python/ipython/py3/IPython/utils/syspathcontext.py71
-rw-r--r--contrib/python/ipython/py3/IPython/utils/tempdir.py59
-rw-r--r--contrib/python/ipython/py3/IPython/utils/terminal.py125
-rw-r--r--contrib/python/ipython/py3/IPython/utils/text.py752
-rw-r--r--contrib/python/ipython/py3/IPython/utils/timing.py123
-rw-r--r--contrib/python/ipython/py3/IPython/utils/tokenutil.py127
-rw-r--r--contrib/python/ipython/py3/IPython/utils/traitlets.py6
-rw-r--r--contrib/python/ipython/py3/IPython/utils/tz.py48
-rw-r--r--contrib/python/ipython/py3/IPython/utils/ulinecache.py21
-rw-r--r--contrib/python/ipython/py3/IPython/utils/version.py43
-rw-r--r--contrib/python/ipython/py3/IPython/utils/wildcard.py111
-rw-r--r--contrib/python/ipython/py3/LICENSE33
-rw-r--r--contrib/python/ipython/py3/README.rst174
-rw-r--r--contrib/python/ipython/py3/bin/ya.make11
-rw-r--r--contrib/python/ipython/py3/ya.make237
191 files changed, 54215 insertions, 0 deletions
diff --git a/contrib/python/ipython/py3/.dist-info/METADATA b/contrib/python/ipython/py3/.dist-info/METADATA
new file mode 100644
index 0000000000..694e2535e3
--- /dev/null
+++ b/contrib/python/ipython/py3/.dist-info/METADATA
@@ -0,0 +1,147 @@
+Metadata-Version: 2.1
+Name: ipython
+Version: 8.14.0
+Summary: IPython: Productive Interactive Computing
+Home-page: https://ipython.org
+Author: The IPython Development Team
+Author-email: ipython-dev@python.org
+License: BSD-3-Clause
+Project-URL: Documentation, https://ipython.readthedocs.io/
+Project-URL: Funding, https://numfocus.org/
+Project-URL: Source, https://github.com/ipython/ipython
+Project-URL: Tracker, https://github.com/ipython/ipython/issues
+Keywords: Interactive,Interpreter,Shell,Embedding
+Platform: Linux
+Platform: Mac OSX
+Platform: Windows
+Classifier: Framework :: IPython
+Classifier: Framework :: Jupyter
+Classifier: Intended Audience :: Developers
+Classifier: Intended Audience :: Science/Research
+Classifier: License :: OSI Approved :: BSD License
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3 :: Only
+Classifier: Topic :: System :: Shells
+Requires-Python: >=3.9
+Description-Content-Type: text/x-rst
+License-File: LICENSE
+Requires-Dist: backcall
+Requires-Dist: decorator
+Requires-Dist: jedi (>=0.13)
+Requires-Dist: matplotlib-inline
+Requires-Dist: pickleshare
+Requires-Dist: prompt-toolkit (!=3.0.37,<3.1.0,>=3.0.30)
+Requires-Dist: pygments (>=2.4.0)
+Requires-Dist: stack-data
+Requires-Dist: traitlets (>=5)
+Requires-Dist: typing-extensions ; python_version < "3.10"
+Requires-Dist: pexpect (>4.3) ; sys_platform != "win32"
+Requires-Dist: appnope ; sys_platform == "darwin"
+Requires-Dist: colorama ; sys_platform == "win32"
+Provides-Extra: all
+Requires-Dist: black ; extra == 'all'
+Requires-Dist: ipykernel ; extra == 'all'
+Requires-Dist: setuptools (>=18.5) ; extra == 'all'
+Requires-Dist: sphinx (>=1.3) ; extra == 'all'
+Requires-Dist: sphinx-rtd-theme ; extra == 'all'
+Requires-Dist: docrepr ; extra == 'all'
+Requires-Dist: matplotlib ; extra == 'all'
+Requires-Dist: stack-data ; extra == 'all'
+Requires-Dist: pytest (<7) ; extra == 'all'
+Requires-Dist: typing-extensions ; extra == 'all'
+Requires-Dist: pytest (<7.1) ; extra == 'all'
+Requires-Dist: pytest-asyncio ; extra == 'all'
+Requires-Dist: testpath ; extra == 'all'
+Requires-Dist: nbconvert ; extra == 'all'
+Requires-Dist: nbformat ; extra == 'all'
+Requires-Dist: ipywidgets ; extra == 'all'
+Requires-Dist: notebook ; extra == 'all'
+Requires-Dist: ipyparallel ; extra == 'all'
+Requires-Dist: qtconsole ; extra == 'all'
+Requires-Dist: curio ; extra == 'all'
+Requires-Dist: matplotlib (!=3.2.0) ; extra == 'all'
+Requires-Dist: numpy (>=1.21) ; extra == 'all'
+Requires-Dist: pandas ; extra == 'all'
+Requires-Dist: trio ; extra == 'all'
+Provides-Extra: black
+Requires-Dist: black ; extra == 'black'
+Provides-Extra: doc
+Requires-Dist: ipykernel ; extra == 'doc'
+Requires-Dist: setuptools (>=18.5) ; extra == 'doc'
+Requires-Dist: sphinx (>=1.3) ; extra == 'doc'
+Requires-Dist: sphinx-rtd-theme ; extra == 'doc'
+Requires-Dist: docrepr ; extra == 'doc'
+Requires-Dist: matplotlib ; extra == 'doc'
+Requires-Dist: stack-data ; extra == 'doc'
+Requires-Dist: pytest (<7) ; extra == 'doc'
+Requires-Dist: typing-extensions ; extra == 'doc'
+Requires-Dist: pytest (<7.1) ; extra == 'doc'
+Requires-Dist: pytest-asyncio ; extra == 'doc'
+Requires-Dist: testpath ; extra == 'doc'
+Provides-Extra: kernel
+Requires-Dist: ipykernel ; extra == 'kernel'
+Provides-Extra: nbconvert
+Requires-Dist: nbconvert ; extra == 'nbconvert'
+Provides-Extra: nbformat
+Requires-Dist: nbformat ; extra == 'nbformat'
+Provides-Extra: notebook
+Requires-Dist: ipywidgets ; extra == 'notebook'
+Requires-Dist: notebook ; extra == 'notebook'
+Provides-Extra: parallel
+Requires-Dist: ipyparallel ; extra == 'parallel'
+Provides-Extra: qtconsole
+Requires-Dist: qtconsole ; extra == 'qtconsole'
+Provides-Extra: terminal
+Provides-Extra: test
+Requires-Dist: pytest (<7.1) ; extra == 'test'
+Requires-Dist: pytest-asyncio ; extra == 'test'
+Requires-Dist: testpath ; extra == 'test'
+Provides-Extra: test_extra
+Requires-Dist: pytest (<7.1) ; extra == 'test_extra'
+Requires-Dist: pytest-asyncio ; extra == 'test_extra'
+Requires-Dist: testpath ; extra == 'test_extra'
+Requires-Dist: curio ; extra == 'test_extra'
+Requires-Dist: matplotlib (!=3.2.0) ; extra == 'test_extra'
+Requires-Dist: nbformat ; extra == 'test_extra'
+Requires-Dist: numpy (>=1.21) ; extra == 'test_extra'
+Requires-Dist: pandas ; extra == 'test_extra'
+Requires-Dist: trio ; extra == 'test_extra'
+
+IPython provides a rich toolkit to help you make the most out of using Python
+interactively. Its main components are:
+
+ * A powerful interactive Python shell
+ * A `Jupyter <https://jupyter.org/>`_ kernel to work with Python code in Jupyter
+ notebooks and other interactive frontends.
+
+The enhanced interactive Python shells have the following main features:
+
+ * Comprehensive object introspection.
+
+ * Input history, persistent across sessions.
+
+ * Caching of output results during a session with automatically generated
+ references.
+
+ * Extensible tab completion, with support by default for completion of python
+ variables and keywords, filenames and function keywords.
+
+ * Extensible system of 'magic' commands for controlling the environment and
+ performing many tasks related either to IPython or the operating system.
+
+ * A rich configuration system with easy switching between different setups
+ (simpler than changing $PYTHONSTARTUP environment variables every time).
+
+ * Session logging and reloading.
+
+ * Extensible syntax processing for special purpose situations.
+
+ * Access to the system shell with user-extensible alias system.
+
+ * Easily embeddable in other Python programs and GUIs.
+
+ * Integrated access to the pdb debugger and the Python profiler.
+
+The latest development version is always available from IPython's `GitHub
+site <http://github.com/ipython>`_.
diff --git a/contrib/python/ipython/py3/.dist-info/entry_points.txt b/contrib/python/ipython/py3/.dist-info/entry_points.txt
new file mode 100644
index 0000000000..3de4479bae
--- /dev/null
+++ b/contrib/python/ipython/py3/.dist-info/entry_points.txt
@@ -0,0 +1,8 @@
+[console_scripts]
+ipython = IPython:start_ipython
+ipython3 = IPython:start_ipython
+
+[pygments.lexers]
+ipython = IPython.lib.lexers:IPythonLexer
+ipython3 = IPython.lib.lexers:IPython3Lexer
+ipythonconsole = IPython.lib.lexers:IPythonConsoleLexer
diff --git a/contrib/python/ipython/py3/.dist-info/top_level.txt b/contrib/python/ipython/py3/.dist-info/top_level.txt
new file mode 100644
index 0000000000..7fed997b4c
--- /dev/null
+++ b/contrib/python/ipython/py3/.dist-info/top_level.txt
@@ -0,0 +1 @@
+IPython
diff --git a/contrib/python/ipython/py3/COPYING.rst b/contrib/python/ipython/py3/COPYING.rst
new file mode 100644
index 0000000000..e5c79ef38f
--- /dev/null
+++ b/contrib/python/ipython/py3/COPYING.rst
@@ -0,0 +1,41 @@
+=============================
+ The IPython licensing terms
+=============================
+
+IPython is licensed under the terms of the Modified BSD License (also known as
+New or Revised or 3-Clause BSD). See the LICENSE file.
+
+
+About the IPython Development Team
+----------------------------------
+
+Fernando Perez began IPython in 2001 based on code from Janko Hauser
+<jhauser@zscout.de> and Nathaniel Gray <n8gray@caltech.edu>. Fernando is still
+the project lead.
+
+The IPython Development Team is the set of all contributors to the IPython
+project. This includes all of the IPython subprojects.
+
+The core team that coordinates development on GitHub can be found here:
+https://github.com/ipython/.
+
+Our Copyright Policy
+--------------------
+
+IPython uses a shared copyright model. Each contributor maintains copyright
+over their contributions to IPython. But, it is important to note that these
+contributions are typically only changes to the repositories. Thus, the IPython
+source code, in its entirety is not the copyright of any single person or
+institution. Instead, it is the collective copyright of the entire IPython
+Development Team. If individual contributors want to maintain a record of what
+changes/contributions they have specific copyright on, they should indicate
+their copyright in the commit message of the change, when they commit the
+change to one of the IPython repositories.
+
+With this in mind, the following banner should be used in any source code file
+to indicate the copyright and license terms:
+
+::
+
+ # Copyright (c) IPython Development Team.
+ # Distributed under the terms of the Modified BSD License.
diff --git a/contrib/python/ipython/py3/IPython/__init__.py b/contrib/python/ipython/py3/IPython/__init__.py
new file mode 100644
index 0000000000..3322562a16
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/__init__.py
@@ -0,0 +1,162 @@
+# PYTHON_ARGCOMPLETE_OK
+"""
+IPython: tools for interactive and parallel computing in Python.
+
+https://ipython.org
+"""
+#-----------------------------------------------------------------------------
+# Copyright (c) 2008-2011, IPython Development Team.
+# Copyright (c) 2001-2007, Fernando Perez <fernando.perez@colorado.edu>
+# Copyright (c) 2001, Janko Hauser <jhauser@zscout.de>
+# Copyright (c) 2001, Nathaniel Gray <n8gray@caltech.edu>
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+import sys
+
+#-----------------------------------------------------------------------------
+# Setup everything
+#-----------------------------------------------------------------------------
+
+# Don't forget to also update setup.py when this changes!
+if sys.version_info < (3, 9):
+ raise ImportError(
+ """
+IPython 8.13+ supports Python 3.9 and above, following NEP 29.
+IPython 8.0-8.12 supports Python 3.8 and above, following NEP 29.
+When using Python 2.7, please install IPython 5.x LTS Long Term Support version.
+Python 3.3 and 3.4 were supported up to IPython 6.x.
+Python 3.5 was supported with IPython 7.0 to 7.9.
+Python 3.6 was supported with IPython up to 7.16.
+Python 3.7 was still supported with the 7.x branch.
+
+See IPython `README.rst` file for more information:
+
+ https://github.com/ipython/ipython/blob/main/README.rst
+
+"""
+ )
+
+#-----------------------------------------------------------------------------
+# Setup the top level names
+#-----------------------------------------------------------------------------
+
+from .core.getipython import get_ipython
+from .core import release
+from .core.application import Application
+from .terminal.embed import embed
+
+from .core.interactiveshell import InteractiveShell
+from .utils.sysinfo import sys_info
+from .utils.frame import extract_module_locals
+
+__all__ = ["start_ipython", "embed", "start_kernel", "embed_kernel"]
+
+# Release data
+__author__ = '%s <%s>' % (release.author, release.author_email)
+__license__ = release.license
+__version__ = release.version
+version_info = release.version_info
+# list of CVEs that should have been patched in this release.
+# this is informational and should not be relied upon.
+__patched_cves__ = {"CVE-2022-21699", "CVE-2023-24816"}
+
+
+def embed_kernel(module=None, local_ns=None, **kwargs):
+ """Embed and start an IPython kernel in a given scope.
+
+ If you don't want the kernel to initialize the namespace
+ from the scope of the surrounding function,
+ and/or you want to load full IPython configuration,
+ you probably want `IPython.start_kernel()` instead.
+
+ Parameters
+ ----------
+ module : types.ModuleType, optional
+ The module to load into IPython globals (default: caller)
+ local_ns : dict, optional
+ The namespace to load into IPython user namespace (default: caller)
+ **kwargs : various, optional
+ Further keyword args are relayed to the IPKernelApp constructor,
+ such as `config`, a traitlets :class:`Config` object (see :ref:`configure_start_ipython`),
+ allowing configuration of the kernel (see :ref:`kernel_options`). Will only have an effect
+ on the first embed_kernel call for a given process.
+ """
+
+ (caller_module, caller_locals) = extract_module_locals(1)
+ if module is None:
+ module = caller_module
+ if local_ns is None:
+ local_ns = caller_locals
+
+ # Only import .zmq when we really need it
+ from ipykernel.embed import embed_kernel as real_embed_kernel
+ real_embed_kernel(module=module, local_ns=local_ns, **kwargs)
+
+def start_ipython(argv=None, **kwargs):
+ """Launch a normal IPython instance (as opposed to embedded)
+
+ `IPython.embed()` puts a shell in a particular calling scope,
+ such as a function or method for debugging purposes,
+ which is often not desirable.
+
+ `start_ipython()` does full, regular IPython initialization,
+ including loading startup files, configuration, etc.
+ much of which is skipped by `embed()`.
+
+ This is a public API method, and will survive implementation changes.
+
+ Parameters
+ ----------
+ argv : list or None, optional
+ If unspecified or None, IPython will parse command-line options from sys.argv.
+ To prevent any command-line parsing, pass an empty list: `argv=[]`.
+ user_ns : dict, optional
+ specify this dictionary to initialize the IPython user namespace with particular values.
+ **kwargs : various, optional
+ Any other kwargs will be passed to the Application constructor,
+ such as `config`, a traitlets :class:`Config` object (see :ref:`configure_start_ipython`),
+ allowing configuration of the instance (see :ref:`terminal_options`).
+ """
+ from IPython.terminal.ipapp import launch_new_instance
+ return launch_new_instance(argv=argv, **kwargs)
+
+def start_kernel(argv=None, **kwargs):
+ """Launch a normal IPython kernel instance (as opposed to embedded)
+
+ `IPython.embed_kernel()` puts a shell in a particular calling scope,
+ such as a function or method for debugging purposes,
+ which is often not desirable.
+
+ `start_kernel()` does full, regular IPython initialization,
+ including loading startup files, configuration, etc.
+ much of which is skipped by `embed_kernel()`.
+
+ Parameters
+ ----------
+ argv : list or None, optional
+ If unspecified or None, IPython will parse command-line options from sys.argv.
+ To prevent any command-line parsing, pass an empty list: `argv=[]`.
+ user_ns : dict, optional
+ specify this dictionary to initialize the IPython user namespace with particular values.
+ **kwargs : various, optional
+ Any other kwargs will be passed to the Application constructor,
+ such as `config`, a traitlets :class:`Config` object (see :ref:`configure_start_ipython`),
+ allowing configuration of the kernel (see :ref:`kernel_options`).
+ """
+ import warnings
+
+ warnings.warn(
+ "start_kernel is deprecated since IPython 8.0, use from `ipykernel.kernelapp.launch_new_instance`",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ from ipykernel.kernelapp import launch_new_instance
+ return launch_new_instance(argv=argv, **kwargs)
diff --git a/contrib/python/ipython/py3/IPython/__main__.py b/contrib/python/ipython/py3/IPython/__main__.py
new file mode 100644
index 0000000000..8e9f989a82
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/__main__.py
@@ -0,0 +1,15 @@
+# PYTHON_ARGCOMPLETE_OK
+# encoding: utf-8
+"""Terminal-based IPython entry point.
+"""
+#-----------------------------------------------------------------------------
+# Copyright (c) 2012, IPython Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from IPython import start_ipython
+
+start_ipython()
diff --git a/contrib/python/ipython/py3/IPython/consoleapp.py b/contrib/python/ipython/py3/IPython/consoleapp.py
new file mode 100644
index 0000000000..c2bbe1888f
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/consoleapp.py
@@ -0,0 +1,12 @@
+"""
+Shim to maintain backwards compatibility with old IPython.consoleapp imports.
+"""
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+from warnings import warn
+
+warn("The `IPython.consoleapp` package has been deprecated since IPython 4.0."
+ "You should import from jupyter_client.consoleapp instead.", stacklevel=2)
+
+from jupyter_client.consoleapp import *
diff --git a/contrib/python/ipython/py3/IPython/core/__init__.py b/contrib/python/ipython/py3/IPython/core/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/__init__.py
diff --git a/contrib/python/ipython/py3/IPython/core/alias.py b/contrib/python/ipython/py3/IPython/core/alias.py
new file mode 100644
index 0000000000..2ad990231a
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/alias.py
@@ -0,0 +1,258 @@
+# encoding: utf-8
+"""
+System command aliases.
+
+Authors:
+
+* Fernando Perez
+* Brian Granger
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (C) 2008-2011 The IPython Development Team
+#
+# Distributed under the terms of the BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+import os
+import re
+import sys
+
+from traitlets.config.configurable import Configurable
+from .error import UsageError
+
+from traitlets import List, Instance
+from logging import error
+
+#-----------------------------------------------------------------------------
+# Utilities
+#-----------------------------------------------------------------------------
+
+# This is used as the pattern for calls to split_user_input.
+shell_line_split = re.compile(r'^(\s*)()(\S+)(.*$)')
+
+def default_aliases():
+ """Return list of shell aliases to auto-define.
+ """
+ # Note: the aliases defined here should be safe to use on a kernel
+ # regardless of what frontend it is attached to. Frontends that use a
+ # kernel in-process can define additional aliases that will only work in
+ # their case. For example, things like 'less' or 'clear' that manipulate
+ # the terminal should NOT be declared here, as they will only work if the
+ # kernel is running inside a true terminal, and not over the network.
+
+ if os.name == 'posix':
+ default_aliases = [('mkdir', 'mkdir'), ('rmdir', 'rmdir'),
+ ('mv', 'mv'), ('rm', 'rm'), ('cp', 'cp'),
+ ('cat', 'cat'),
+ ]
+ # Useful set of ls aliases. The GNU and BSD options are a little
+ # different, so we make aliases that provide as similar as possible
+ # behavior in ipython, by passing the right flags for each platform
+ if sys.platform.startswith('linux'):
+ ls_aliases = [('ls', 'ls -F --color'),
+ # long ls
+ ('ll', 'ls -F -o --color'),
+ # ls normal files only
+ ('lf', 'ls -F -o --color %l | grep ^-'),
+ # ls symbolic links
+ ('lk', 'ls -F -o --color %l | grep ^l'),
+ # directories or links to directories,
+ ('ldir', 'ls -F -o --color %l | grep /$'),
+ # things which are executable
+ ('lx', 'ls -F -o --color %l | grep ^-..x'),
+ ]
+ elif sys.platform.startswith('openbsd') or sys.platform.startswith('netbsd'):
+ # OpenBSD, NetBSD. The ls implementation on these platforms do not support
+ # the -G switch and lack the ability to use colorized output.
+ ls_aliases = [('ls', 'ls -F'),
+ # long ls
+ ('ll', 'ls -F -l'),
+ # ls normal files only
+ ('lf', 'ls -F -l %l | grep ^-'),
+ # ls symbolic links
+ ('lk', 'ls -F -l %l | grep ^l'),
+ # directories or links to directories,
+ ('ldir', 'ls -F -l %l | grep /$'),
+ # things which are executable
+ ('lx', 'ls -F -l %l | grep ^-..x'),
+ ]
+ else:
+ # BSD, OSX, etc.
+ ls_aliases = [('ls', 'ls -F -G'),
+ # long ls
+ ('ll', 'ls -F -l -G'),
+ # ls normal files only
+ ('lf', 'ls -F -l -G %l | grep ^-'),
+ # ls symbolic links
+ ('lk', 'ls -F -l -G %l | grep ^l'),
+ # directories or links to directories,
+ ('ldir', 'ls -F -G -l %l | grep /$'),
+ # things which are executable
+ ('lx', 'ls -F -l -G %l | grep ^-..x'),
+ ]
+ default_aliases = default_aliases + ls_aliases
+ elif os.name in ['nt', 'dos']:
+ default_aliases = [('ls', 'dir /on'),
+ ('ddir', 'dir /ad /on'), ('ldir', 'dir /ad /on'),
+ ('mkdir', 'mkdir'), ('rmdir', 'rmdir'),
+ ('echo', 'echo'), ('ren', 'ren'), ('copy', 'copy'),
+ ]
+ else:
+ default_aliases = []
+
+ return default_aliases
+
+
+class AliasError(Exception):
+ pass
+
+
+class InvalidAliasError(AliasError):
+ pass
+
+class Alias(object):
+ """Callable object storing the details of one alias.
+
+ Instances are registered as magic functions to allow use of aliases.
+ """
+
+ # Prepare blacklist
+ blacklist = {'cd','popd','pushd','dhist','alias','unalias'}
+
+ def __init__(self, shell, name, cmd):
+ self.shell = shell
+ self.name = name
+ self.cmd = cmd
+ self.__doc__ = "Alias for `!{}`".format(cmd)
+ self.nargs = self.validate()
+
+ def validate(self):
+ """Validate the alias, and return the number of arguments."""
+ if self.name in self.blacklist:
+ raise InvalidAliasError("The name %s can't be aliased "
+ "because it is a keyword or builtin." % self.name)
+ try:
+ caller = self.shell.magics_manager.magics['line'][self.name]
+ except KeyError:
+ pass
+ else:
+ if not isinstance(caller, Alias):
+ raise InvalidAliasError("The name %s can't be aliased "
+ "because it is another magic command." % self.name)
+
+ if not (isinstance(self.cmd, str)):
+ raise InvalidAliasError("An alias command must be a string, "
+ "got: %r" % self.cmd)
+
+ nargs = self.cmd.count('%s') - self.cmd.count('%%s')
+
+ if (nargs > 0) and (self.cmd.find('%l') >= 0):
+ raise InvalidAliasError('The %s and %l specifiers are mutually '
+ 'exclusive in alias definitions.')
+
+ return nargs
+
+ def __repr__(self):
+ return "<alias {} for {!r}>".format(self.name, self.cmd)
+
+ def __call__(self, rest=''):
+ cmd = self.cmd
+ nargs = self.nargs
+ # Expand the %l special to be the user's input line
+ if cmd.find('%l') >= 0:
+ cmd = cmd.replace('%l', rest)
+ rest = ''
+
+ if nargs==0:
+ if cmd.find('%%s') >= 1:
+ cmd = cmd.replace('%%s', '%s')
+ # Simple, argument-less aliases
+ cmd = '%s %s' % (cmd, rest)
+ else:
+ # Handle aliases with positional arguments
+ args = rest.split(None, nargs)
+ if len(args) < nargs:
+ raise UsageError('Alias <%s> requires %s arguments, %s given.' %
+ (self.name, nargs, len(args)))
+ cmd = '%s %s' % (cmd % tuple(args[:nargs]),' '.join(args[nargs:]))
+
+ self.shell.system(cmd)
+
+#-----------------------------------------------------------------------------
+# Main AliasManager class
+#-----------------------------------------------------------------------------
+
+class AliasManager(Configurable):
+
+ default_aliases = List(default_aliases()).tag(config=True)
+ user_aliases = List(default_value=[]).tag(config=True)
+ shell = Instance('IPython.core.interactiveshell.InteractiveShellABC', allow_none=True)
+
+ def __init__(self, shell=None, **kwargs):
+ super(AliasManager, self).__init__(shell=shell, **kwargs)
+ # For convenient access
+ self.linemagics = self.shell.magics_manager.magics['line']
+ self.init_aliases()
+
+ def init_aliases(self):
+ # Load default & user aliases
+ for name, cmd in self.default_aliases + self.user_aliases:
+ if cmd.startswith('ls ') and self.shell.colors == 'NoColor':
+ cmd = cmd.replace(' --color', '')
+ self.soft_define_alias(name, cmd)
+
+ @property
+ def aliases(self):
+ return [(n, func.cmd) for (n, func) in self.linemagics.items()
+ if isinstance(func, Alias)]
+
+ def soft_define_alias(self, name, cmd):
+ """Define an alias, but don't raise on an AliasError."""
+ try:
+ self.define_alias(name, cmd)
+ except AliasError as e:
+ error("Invalid alias: %s" % e)
+
+ def define_alias(self, name, cmd):
+ """Define a new alias after validating it.
+
+ This will raise an :exc:`AliasError` if there are validation
+ problems.
+ """
+ caller = Alias(shell=self.shell, name=name, cmd=cmd)
+ self.shell.magics_manager.register_function(caller, magic_kind='line',
+ magic_name=name)
+
+ def get_alias(self, name):
+ """Return an alias, or None if no alias by that name exists."""
+ aname = self.linemagics.get(name, None)
+ return aname if isinstance(aname, Alias) else None
+
+ def is_alias(self, name):
+ """Return whether or not a given name has been defined as an alias"""
+ return self.get_alias(name) is not None
+
+ def undefine_alias(self, name):
+ if self.is_alias(name):
+ del self.linemagics[name]
+ else:
+ raise ValueError('%s is not an alias' % name)
+
+ def clear_aliases(self):
+ for name, cmd in self.aliases:
+ self.undefine_alias(name)
+
+ def retrieve_alias(self, name):
+ """Retrieve the command to which an alias expands."""
+ caller = self.get_alias(name)
+ if caller:
+ return caller.cmd
+ else:
+ raise ValueError('%s is not an alias' % name)
diff --git a/contrib/python/ipython/py3/IPython/core/application.py b/contrib/python/ipython/py3/IPython/core/application.py
new file mode 100644
index 0000000000..e0a8174f15
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/application.py
@@ -0,0 +1,488 @@
+# encoding: utf-8
+"""
+An application for IPython.
+
+All top-level applications should use the classes in this module for
+handling configuration and creating configurables.
+
+The job of an :class:`Application` is to create the master configuration
+object and then create the configurable objects, passing the config to them.
+"""
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+import atexit
+from copy import deepcopy
+import logging
+import os
+import shutil
+import sys
+
+from pathlib import Path
+
+from traitlets.config.application import Application, catch_config_error
+from traitlets.config.loader import ConfigFileNotFound, PyFileConfigLoader
+from IPython.core import release, crashhandler
+from IPython.core.profiledir import ProfileDir, ProfileDirError
+from IPython.paths import get_ipython_dir, get_ipython_package_dir
+from IPython.utils.path import ensure_dir_exists
+from traitlets import (
+ List, Unicode, Type, Bool, Set, Instance, Undefined,
+ default, observe,
+)
+
+if os.name == "nt":
+ programdata = os.environ.get("PROGRAMDATA", None)
+ if programdata is not None:
+ SYSTEM_CONFIG_DIRS = [str(Path(programdata) / "ipython")]
+ else: # PROGRAMDATA is not defined by default on XP.
+ SYSTEM_CONFIG_DIRS = []
+else:
+ SYSTEM_CONFIG_DIRS = [
+ "/usr/local/etc/ipython",
+ "/etc/ipython",
+ ]
+
+
+ENV_CONFIG_DIRS = []
+_env_config_dir = os.path.join(sys.prefix, 'etc', 'ipython')
+if _env_config_dir not in SYSTEM_CONFIG_DIRS:
+ # only add ENV_CONFIG if sys.prefix is not already included
+ ENV_CONFIG_DIRS.append(_env_config_dir)
+
+
+_envvar = os.environ.get('IPYTHON_SUPPRESS_CONFIG_ERRORS')
+if _envvar in {None, ''}:
+ IPYTHON_SUPPRESS_CONFIG_ERRORS = None
+else:
+ if _envvar.lower() in {'1','true'}:
+ IPYTHON_SUPPRESS_CONFIG_ERRORS = True
+ elif _envvar.lower() in {'0','false'} :
+ IPYTHON_SUPPRESS_CONFIG_ERRORS = False
+ else:
+ sys.exit("Unsupported value for environment variable: 'IPYTHON_SUPPRESS_CONFIG_ERRORS' is set to '%s' which is none of {'0', '1', 'false', 'true', ''}."% _envvar )
+
+# aliases and flags
+
+base_aliases = {}
+if isinstance(Application.aliases, dict):
+ # traitlets 5
+ base_aliases.update(Application.aliases)
+base_aliases.update(
+ {
+ "profile-dir": "ProfileDir.location",
+ "profile": "BaseIPythonApplication.profile",
+ "ipython-dir": "BaseIPythonApplication.ipython_dir",
+ "log-level": "Application.log_level",
+ "config": "BaseIPythonApplication.extra_config_file",
+ }
+)
+
+base_flags = dict()
+if isinstance(Application.flags, dict):
+ # traitlets 5
+ base_flags.update(Application.flags)
+base_flags.update(
+ dict(
+ debug=(
+ {"Application": {"log_level": logging.DEBUG}},
+ "set log level to logging.DEBUG (maximize logging output)",
+ ),
+ quiet=(
+ {"Application": {"log_level": logging.CRITICAL}},
+ "set log level to logging.CRITICAL (minimize logging output)",
+ ),
+ init=(
+ {
+ "BaseIPythonApplication": {
+ "copy_config_files": True,
+ "auto_create": True,
+ }
+ },
+ """Initialize profile with default config files. This is equivalent
+ to running `ipython profile create <profile>` prior to startup.
+ """,
+ ),
+ )
+)
+
+
+class ProfileAwareConfigLoader(PyFileConfigLoader):
+ """A Python file config loader that is aware of IPython profiles."""
+ def load_subconfig(self, fname, path=None, profile=None):
+ if profile is not None:
+ try:
+ profile_dir = ProfileDir.find_profile_dir_by_name(
+ get_ipython_dir(),
+ profile,
+ )
+ except ProfileDirError:
+ return
+ path = profile_dir.location
+ return super(ProfileAwareConfigLoader, self).load_subconfig(fname, path=path)
+
+class BaseIPythonApplication(Application):
+ name = "ipython"
+ description = "IPython: an enhanced interactive Python shell."
+ version = Unicode(release.version)
+
+ aliases = base_aliases
+ flags = base_flags
+ classes = List([ProfileDir])
+
+ # enable `load_subconfig('cfg.py', profile='name')`
+ python_config_loader_class = ProfileAwareConfigLoader
+
+ # Track whether the config_file has changed,
+ # because some logic happens only if we aren't using the default.
+ config_file_specified = Set()
+
+ config_file_name = Unicode()
+ @default('config_file_name')
+ def _config_file_name_default(self):
+ return self.name.replace('-','_') + u'_config.py'
+ @observe('config_file_name')
+ def _config_file_name_changed(self, change):
+ if change['new'] != change['old']:
+ self.config_file_specified.add(change['new'])
+
+ # The directory that contains IPython's builtin profiles.
+ builtin_profile_dir = Unicode(
+ os.path.join(get_ipython_package_dir(), u'config', u'profile', u'default')
+ )
+
+ config_file_paths = List(Unicode())
+ @default('config_file_paths')
+ def _config_file_paths_default(self):
+ return []
+
+ extra_config_file = Unicode(
+ help="""Path to an extra config file to load.
+
+ If specified, load this config file in addition to any other IPython config.
+ """).tag(config=True)
+ @observe('extra_config_file')
+ def _extra_config_file_changed(self, change):
+ old = change['old']
+ new = change['new']
+ try:
+ self.config_files.remove(old)
+ except ValueError:
+ pass
+ self.config_file_specified.add(new)
+ self.config_files.append(new)
+
+ profile = Unicode(u'default',
+ help="""The IPython profile to use."""
+ ).tag(config=True)
+
+ @observe('profile')
+ def _profile_changed(self, change):
+ self.builtin_profile_dir = os.path.join(
+ get_ipython_package_dir(), u'config', u'profile', change['new']
+ )
+
+ add_ipython_dir_to_sys_path = Bool(
+ False,
+ """Should the IPython profile directory be added to sys path ?
+
+ This option was non-existing before IPython 8.0, and ipython_dir was added to
+ sys path to allow import of extensions present there. This was historical
+ baggage from when pip did not exist. This now default to false,
+ but can be set to true for legacy reasons.
+ """,
+ ).tag(config=True)
+
+ ipython_dir = Unicode(
+ help="""
+ The name of the IPython directory. This directory is used for logging
+ configuration (through profiles), history storage, etc. The default
+ is usually $HOME/.ipython. This option can also be specified through
+ the environment variable IPYTHONDIR.
+ """
+ ).tag(config=True)
+ @default('ipython_dir')
+ def _ipython_dir_default(self):
+ d = get_ipython_dir()
+ self._ipython_dir_changed({
+ 'name': 'ipython_dir',
+ 'old': d,
+ 'new': d,
+ })
+ return d
+
+ _in_init_profile_dir = False
+ profile_dir = Instance(ProfileDir, allow_none=True)
+ @default('profile_dir')
+ def _profile_dir_default(self):
+ # avoid recursion
+ if self._in_init_profile_dir:
+ return
+ # profile_dir requested early, force initialization
+ self.init_profile_dir()
+ return self.profile_dir
+
+ overwrite = Bool(False,
+ help="""Whether to overwrite existing config files when copying"""
+ ).tag(config=True)
+ auto_create = Bool(False,
+ help="""Whether to create profile dir if it doesn't exist"""
+ ).tag(config=True)
+
+ config_files = List(Unicode())
+ @default('config_files')
+ def _config_files_default(self):
+ return [self.config_file_name]
+
+ copy_config_files = Bool(False,
+ help="""Whether to install the default config files into the profile dir.
+ If a new profile is being created, and IPython contains config files for that
+ profile, then they will be staged into the new directory. Otherwise,
+ default config files will be automatically generated.
+ """).tag(config=True)
+
+ verbose_crash = Bool(False,
+ help="""Create a massive crash report when IPython encounters what may be an
+ internal error. The default is to append a short message to the
+ usual traceback""").tag(config=True)
+
+ # The class to use as the crash handler.
+ crash_handler_class = Type(crashhandler.CrashHandler)
+
+ @catch_config_error
+ def __init__(self, **kwargs):
+ super(BaseIPythonApplication, self).__init__(**kwargs)
+ # ensure current working directory exists
+ try:
+ os.getcwd()
+ except:
+ # exit if cwd doesn't exist
+ self.log.error("Current working directory doesn't exist.")
+ self.exit(1)
+
+ #-------------------------------------------------------------------------
+ # Various stages of Application creation
+ #-------------------------------------------------------------------------
+
+ def init_crash_handler(self):
+ """Create a crash handler, typically setting sys.excepthook to it."""
+ self.crash_handler = self.crash_handler_class(self)
+ sys.excepthook = self.excepthook
+ def unset_crashhandler():
+ sys.excepthook = sys.__excepthook__
+ atexit.register(unset_crashhandler)
+
+ def excepthook(self, etype, evalue, tb):
+ """this is sys.excepthook after init_crashhandler
+
+ set self.verbose_crash=True to use our full crashhandler, instead of
+ a regular traceback with a short message (crash_handler_lite)
+ """
+
+ if self.verbose_crash:
+ return self.crash_handler(etype, evalue, tb)
+ else:
+ return crashhandler.crash_handler_lite(etype, evalue, tb)
+
+ @observe('ipython_dir')
+ def _ipython_dir_changed(self, change):
+ old = change['old']
+ new = change['new']
+ if old is not Undefined:
+ str_old = os.path.abspath(old)
+ if str_old in sys.path:
+ sys.path.remove(str_old)
+ if self.add_ipython_dir_to_sys_path:
+ str_path = os.path.abspath(new)
+ sys.path.append(str_path)
+ ensure_dir_exists(new)
+ readme = os.path.join(new, "README")
+ readme_src = os.path.join(
+ get_ipython_package_dir(), "config", "profile", "README"
+ )
+ if not os.path.exists(readme) and os.path.exists(readme_src):
+ shutil.copy(readme_src, readme)
+ for d in ("extensions", "nbextensions"):
+ path = os.path.join(new, d)
+ try:
+ ensure_dir_exists(path)
+ except OSError as e:
+ # this will not be EEXIST
+ self.log.error("couldn't create path %s: %s", path, e)
+ self.log.debug("IPYTHONDIR set to: %s", new)
+
+ def load_config_file(self, suppress_errors=IPYTHON_SUPPRESS_CONFIG_ERRORS):
+ """Load the config file.
+
+ By default, errors in loading config are handled, and a warning
+ printed on screen. For testing, the suppress_errors option is set
+ to False, so errors will make tests fail.
+
+ `suppress_errors` default value is to be `None` in which case the
+ behavior default to the one of `traitlets.Application`.
+
+ The default value can be set :
+ - to `False` by setting 'IPYTHON_SUPPRESS_CONFIG_ERRORS' environment variable to '0', or 'false' (case insensitive).
+ - to `True` by setting 'IPYTHON_SUPPRESS_CONFIG_ERRORS' environment variable to '1' or 'true' (case insensitive).
+ - to `None` by setting 'IPYTHON_SUPPRESS_CONFIG_ERRORS' environment variable to '' (empty string) or leaving it unset.
+
+ Any other value are invalid, and will make IPython exit with a non-zero return code.
+ """
+
+
+ self.log.debug("Searching path %s for config files", self.config_file_paths)
+ base_config = 'ipython_config.py'
+ self.log.debug("Attempting to load config file: %s" %
+ base_config)
+ try:
+ if suppress_errors is not None:
+ old_value = Application.raise_config_file_errors
+ Application.raise_config_file_errors = not suppress_errors;
+ Application.load_config_file(
+ self,
+ base_config,
+ path=self.config_file_paths
+ )
+ except ConfigFileNotFound:
+ # ignore errors loading parent
+ self.log.debug("Config file %s not found", base_config)
+ pass
+ if suppress_errors is not None:
+ Application.raise_config_file_errors = old_value
+
+ for config_file_name in self.config_files:
+ if not config_file_name or config_file_name == base_config:
+ continue
+ self.log.debug("Attempting to load config file: %s" %
+ self.config_file_name)
+ try:
+ Application.load_config_file(
+ self,
+ config_file_name,
+ path=self.config_file_paths
+ )
+ except ConfigFileNotFound:
+ # Only warn if the default config file was NOT being used.
+ if config_file_name in self.config_file_specified:
+ msg = self.log.warning
+ else:
+ msg = self.log.debug
+ msg("Config file not found, skipping: %s", config_file_name)
+ except Exception:
+ # For testing purposes.
+ if not suppress_errors:
+ raise
+ self.log.warning("Error loading config file: %s" %
+ self.config_file_name, exc_info=True)
+
+ def init_profile_dir(self):
+ """initialize the profile dir"""
+ self._in_init_profile_dir = True
+ if self.profile_dir is not None:
+ # already ran
+ return
+ if 'ProfileDir.location' not in self.config:
+ # location not specified, find by profile name
+ try:
+ p = ProfileDir.find_profile_dir_by_name(self.ipython_dir, self.profile, self.config)
+ except ProfileDirError:
+ # not found, maybe create it (always create default profile)
+ if self.auto_create or self.profile == 'default':
+ try:
+ p = ProfileDir.create_profile_dir_by_name(self.ipython_dir, self.profile, self.config)
+ except ProfileDirError:
+ self.log.fatal("Could not create profile: %r"%self.profile)
+ self.exit(1)
+ else:
+ self.log.info("Created profile dir: %r"%p.location)
+ else:
+ self.log.fatal("Profile %r not found."%self.profile)
+ self.exit(1)
+ else:
+ self.log.debug("Using existing profile dir: %r", p.location)
+ else:
+ location = self.config.ProfileDir.location
+ # location is fully specified
+ try:
+ p = ProfileDir.find_profile_dir(location, self.config)
+ except ProfileDirError:
+ # not found, maybe create it
+ if self.auto_create:
+ try:
+ p = ProfileDir.create_profile_dir(location, self.config)
+ except ProfileDirError:
+ self.log.fatal("Could not create profile directory: %r"%location)
+ self.exit(1)
+ else:
+ self.log.debug("Creating new profile dir: %r"%location)
+ else:
+ self.log.fatal("Profile directory %r not found."%location)
+ self.exit(1)
+ else:
+ self.log.debug("Using existing profile dir: %r", p.location)
+ # if profile_dir is specified explicitly, set profile name
+ dir_name = os.path.basename(p.location)
+ if dir_name.startswith('profile_'):
+ self.profile = dir_name[8:]
+
+ self.profile_dir = p
+ self.config_file_paths.append(p.location)
+ self._in_init_profile_dir = False
+
+ def init_config_files(self):
+ """[optionally] copy default config files into profile dir."""
+ self.config_file_paths.extend(ENV_CONFIG_DIRS)
+ self.config_file_paths.extend(SYSTEM_CONFIG_DIRS)
+ # copy config files
+ path = Path(self.builtin_profile_dir)
+ if self.copy_config_files:
+ src = self.profile
+
+ cfg = self.config_file_name
+ if path and (path / cfg).exists():
+ self.log.warning(
+ "Staging %r from %s into %r [overwrite=%s]"
+ % (cfg, src, self.profile_dir.location, self.overwrite)
+ )
+ self.profile_dir.copy_config_file(cfg, path=path, overwrite=self.overwrite)
+ else:
+ self.stage_default_config_file()
+ else:
+ # Still stage *bundled* config files, but not generated ones
+ # This is necessary for `ipython profile=sympy` to load the profile
+ # on the first go
+ files = path.glob("*.py")
+ for fullpath in files:
+ cfg = fullpath.name
+ if self.profile_dir.copy_config_file(cfg, path=path, overwrite=False):
+ # file was copied
+ self.log.warning("Staging bundled %s from %s into %r"%(
+ cfg, self.profile, self.profile_dir.location)
+ )
+
+
+ def stage_default_config_file(self):
+ """auto generate default config file, and stage it into the profile."""
+ s = self.generate_config_file()
+ config_file = Path(self.profile_dir.location) / self.config_file_name
+ if self.overwrite or not config_file.exists():
+ self.log.warning("Generating default config file: %r", (config_file))
+ config_file.write_text(s, encoding="utf-8")
+
+ @catch_config_error
+ def initialize(self, argv=None):
+ # don't hook up crash handler before parsing command-line
+ self.parse_command_line(argv)
+ self.init_crash_handler()
+ if self.subapp is not None:
+ # stop here if subapp is taking over
+ return
+ # save a copy of CLI config to re-load after config files
+ # so that it has highest priority
+ cl_config = deepcopy(self.config)
+ self.init_profile_dir()
+ self.init_config_files()
+ self.load_config_file()
+ # enforce cl-opts override configfile opts:
+ self.update_config(cl_config)
diff --git a/contrib/python/ipython/py3/IPython/core/async_helpers.py b/contrib/python/ipython/py3/IPython/core/async_helpers.py
new file mode 100644
index 0000000000..0e7db0bb54
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/async_helpers.py
@@ -0,0 +1,156 @@
+"""
+Async helper function that are invalid syntax on Python 3.5 and below.
+
+This code is best effort, and may have edge cases not behaving as expected. In
+particular it contain a number of heuristics to detect whether code is
+effectively async and need to run in an event loop or not.
+
+Some constructs (like top-level `return`, or `yield`) are taken care of
+explicitly to actually raise a SyntaxError and stay as close as possible to
+Python semantics.
+"""
+
+
+import ast
+import asyncio
+import inspect
+from functools import wraps
+
+_asyncio_event_loop = None
+
+
+def get_asyncio_loop():
+ """asyncio has deprecated get_event_loop
+
+ Replicate it here, with our desired semantics:
+
+ - always returns a valid, not-closed loop
+ - not thread-local like asyncio's,
+ because we only want one loop for IPython
+ - if called from inside a coroutine (e.g. in ipykernel),
+ return the running loop
+
+ .. versionadded:: 8.0
+ """
+ try:
+ return asyncio.get_running_loop()
+ except RuntimeError:
+ # not inside a coroutine,
+ # track our own global
+ pass
+
+ # not thread-local like asyncio's,
+ # because we only track one event loop to run for IPython itself,
+ # always in the main thread.
+ global _asyncio_event_loop
+ if _asyncio_event_loop is None or _asyncio_event_loop.is_closed():
+ _asyncio_event_loop = asyncio.new_event_loop()
+ return _asyncio_event_loop
+
+
+class _AsyncIORunner:
+ def __call__(self, coro):
+ """
+ Handler for asyncio autoawait
+ """
+ return get_asyncio_loop().run_until_complete(coro)
+
+ def __str__(self):
+ return "asyncio"
+
+
+_asyncio_runner = _AsyncIORunner()
+
+
+class _AsyncIOProxy:
+ """Proxy-object for an asyncio
+
+ Any coroutine methods will be wrapped in event_loop.run_
+ """
+
+ def __init__(self, obj, event_loop):
+ self._obj = obj
+ self._event_loop = event_loop
+
+ def __repr__(self):
+ return f"<_AsyncIOProxy({self._obj!r})>"
+
+ def __getattr__(self, key):
+ attr = getattr(self._obj, key)
+ if inspect.iscoroutinefunction(attr):
+ # if it's a coroutine method,
+ # return a threadsafe wrapper onto the _current_ asyncio loop
+ @wraps(attr)
+ def _wrapped(*args, **kwargs):
+ concurrent_future = asyncio.run_coroutine_threadsafe(
+ attr(*args, **kwargs), self._event_loop
+ )
+ return asyncio.wrap_future(concurrent_future)
+
+ return _wrapped
+ else:
+ return attr
+
+ def __dir__(self):
+ return dir(self._obj)
+
+
+def _curio_runner(coroutine):
+ """
+ handler for curio autoawait
+ """
+ import curio
+
+ return curio.run(coroutine)
+
+
+def _trio_runner(async_fn):
+ import trio
+
+ async def loc(coro):
+ """
+ We need the dummy no-op async def to protect from
+ trio's internal. See https://github.com/python-trio/trio/issues/89
+ """
+ return await coro
+
+ return trio.run(loc, async_fn)
+
+
+def _pseudo_sync_runner(coro):
+ """
+ A runner that does not really allow async execution, and just advance the coroutine.
+
+ See discussion in https://github.com/python-trio/trio/issues/608,
+
+ Credit to Nathaniel Smith
+ """
+ try:
+ coro.send(None)
+ except StopIteration as exc:
+ return exc.value
+ else:
+ # TODO: do not raise but return an execution result with the right info.
+ raise RuntimeError(
+ "{coro_name!r} needs a real async loop".format(coro_name=coro.__name__)
+ )
+
+
+def _should_be_async(cell: str) -> bool:
+ """Detect if a block of code need to be wrapped in an `async def`
+
+ Attempt to parse the block of code, it it compile we're fine.
+ Otherwise we wrap if and try to compile.
+
+ If it works, assume it should be async. Otherwise Return False.
+
+ Not handled yet: If the block of code has a return statement as the top
+ level, it will be seen as async. This is a know limitation.
+ """
+ try:
+ code = compile(
+ cell, "<>", "exec", flags=getattr(ast, "PyCF_ALLOW_TOP_LEVEL_AWAIT", 0x0)
+ )
+ return inspect.CO_COROUTINE & code.co_flags == inspect.CO_COROUTINE
+ except (SyntaxError, MemoryError):
+ return False
diff --git a/contrib/python/ipython/py3/IPython/core/autocall.py b/contrib/python/ipython/py3/IPython/core/autocall.py
new file mode 100644
index 0000000000..54beec3f58
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/autocall.py
@@ -0,0 +1,70 @@
+# encoding: utf-8
+"""
+Autocall capabilities for IPython.core.
+
+Authors:
+
+* Brian Granger
+* Fernando Perez
+* Thomas Kluyver
+
+Notes
+-----
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (C) 2008-2011 The IPython Development Team
+#
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+
+#-----------------------------------------------------------------------------
+# Code
+#-----------------------------------------------------------------------------
+
+class IPyAutocall(object):
+ """ Instances of this class are always autocalled
+
+ This happens regardless of 'autocall' variable state. Use this to
+ develop macro-like mechanisms.
+ """
+ _ip = None
+ rewrite = True
+ def __init__(self, ip=None):
+ self._ip = ip
+
+ def set_ip(self, ip):
+ """Will be used to set _ip point to current ipython instance b/f call
+
+ Override this method if you don't want this to happen.
+
+ """
+ self._ip = ip
+
+
+class ExitAutocall(IPyAutocall):
+ """An autocallable object which will be added to the user namespace so that
+ exit, exit(), quit or quit() are all valid ways to close the shell."""
+ rewrite = False
+
+ def __call__(self):
+ self._ip.ask_exit()
+
+class ZMQExitAutocall(ExitAutocall):
+ """Exit IPython. Autocallable, so it needn't be explicitly called.
+
+ Parameters
+ ----------
+ keep_kernel : bool
+ If True, leave the kernel alive. Otherwise, tell the kernel to exit too
+ (default).
+ """
+ def __call__(self, keep_kernel=False):
+ self._ip.keepkernel_on_exit = keep_kernel
+ self._ip.ask_exit()
diff --git a/contrib/python/ipython/py3/IPython/core/builtin_trap.py b/contrib/python/ipython/py3/IPython/core/builtin_trap.py
new file mode 100644
index 0000000000..a8ea4abcd9
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/builtin_trap.py
@@ -0,0 +1,86 @@
+"""
+A context manager for managing things injected into :mod:`builtins`.
+"""
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+import builtins as builtin_mod
+
+from traitlets.config.configurable import Configurable
+
+from traitlets import Instance
+
+
+class __BuiltinUndefined(object): pass
+BuiltinUndefined = __BuiltinUndefined()
+
+class __HideBuiltin(object): pass
+HideBuiltin = __HideBuiltin()
+
+
+class BuiltinTrap(Configurable):
+
+ shell = Instance('IPython.core.interactiveshell.InteractiveShellABC',
+ allow_none=True)
+
+ def __init__(self, shell=None):
+ super(BuiltinTrap, self).__init__(shell=shell, config=None)
+ self._orig_builtins = {}
+ # We define this to track if a single BuiltinTrap is nested.
+ # Only turn off the trap when the outermost call to __exit__ is made.
+ self._nested_level = 0
+ self.shell = shell
+ # builtins we always add - if set to HideBuiltin, they will just
+ # be removed instead of being replaced by something else
+ self.auto_builtins = {'exit': HideBuiltin,
+ 'quit': HideBuiltin,
+ 'get_ipython': self.shell.get_ipython,
+ }
+
+ def __enter__(self):
+ if self._nested_level == 0:
+ self.activate()
+ self._nested_level += 1
+ # I return self, so callers can use add_builtin in a with clause.
+ return self
+
+ def __exit__(self, type, value, traceback):
+ if self._nested_level == 1:
+ self.deactivate()
+ self._nested_level -= 1
+ # Returning False will cause exceptions to propagate
+ return False
+
+ def add_builtin(self, key, value):
+ """Add a builtin and save the original."""
+ bdict = builtin_mod.__dict__
+ orig = bdict.get(key, BuiltinUndefined)
+ if value is HideBuiltin:
+ if orig is not BuiltinUndefined: #same as 'key in bdict'
+ self._orig_builtins[key] = orig
+ del bdict[key]
+ else:
+ self._orig_builtins[key] = orig
+ bdict[key] = value
+
+ def remove_builtin(self, key, orig):
+ """Remove an added builtin and re-set the original."""
+ if orig is BuiltinUndefined:
+ del builtin_mod.__dict__[key]
+ else:
+ builtin_mod.__dict__[key] = orig
+
+ def activate(self):
+ """Store ipython references in the __builtin__ namespace."""
+
+ add_builtin = self.add_builtin
+ for name, func in self.auto_builtins.items():
+ add_builtin(name, func)
+
+ def deactivate(self):
+ """Remove any builtins which might have been added by add_builtins, or
+ restore overwritten ones to their previous values."""
+ remove_builtin = self.remove_builtin
+ for key, val in self._orig_builtins.items():
+ remove_builtin(key, val)
+ self._orig_builtins.clear()
+ self._builtins_added = False
diff --git a/contrib/python/ipython/py3/IPython/core/compilerop.py b/contrib/python/ipython/py3/IPython/core/compilerop.py
new file mode 100644
index 0000000000..7799a4fc99
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/compilerop.py
@@ -0,0 +1,214 @@
+"""Compiler tools with improved interactive support.
+
+Provides compilation machinery similar to codeop, but with caching support so
+we can provide interactive tracebacks.
+
+Authors
+-------
+* Robert Kern
+* Fernando Perez
+* Thomas Kluyver
+"""
+
+# Note: though it might be more natural to name this module 'compiler', that
+# name is in the stdlib and name collisions with the stdlib tend to produce
+# weird problems (often with third-party tools).
+
+#-----------------------------------------------------------------------------
+# Copyright (C) 2010-2011 The IPython Development Team.
+#
+# Distributed under the terms of the BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+# Stdlib imports
+import __future__
+from ast import PyCF_ONLY_AST
+import codeop
+import functools
+import hashlib
+import linecache
+import operator
+import time
+from contextlib import contextmanager
+
+#-----------------------------------------------------------------------------
+# Constants
+#-----------------------------------------------------------------------------
+
+# Roughly equal to PyCF_MASK | PyCF_MASK_OBSOLETE as defined in pythonrun.h,
+# this is used as a bitmask to extract future-related code flags.
+PyCF_MASK = functools.reduce(operator.or_,
+ (getattr(__future__, fname).compiler_flag
+ for fname in __future__.all_feature_names))
+
+#-----------------------------------------------------------------------------
+# Local utilities
+#-----------------------------------------------------------------------------
+
+def code_name(code, number=0):
+ """ Compute a (probably) unique name for code for caching.
+
+ This now expects code to be unicode.
+ """
+ hash_digest = hashlib.sha1(code.encode("utf-8")).hexdigest()
+ # Include the number and 12 characters of the hash in the name. It's
+ # pretty much impossible that in a single session we'll have collisions
+ # even with truncated hashes, and the full one makes tracebacks too long
+ return '<ipython-input-{0}-{1}>'.format(number, hash_digest[:12])
+
+#-----------------------------------------------------------------------------
+# Classes and functions
+#-----------------------------------------------------------------------------
+
+class CachingCompiler(codeop.Compile):
+ """A compiler that caches code compiled from interactive statements.
+ """
+
+ def __init__(self):
+ codeop.Compile.__init__(self)
+
+ # Caching a dictionary { filename: execution_count } for nicely
+ # rendered tracebacks. The filename corresponds to the filename
+ # argument used for the builtins.compile function.
+ self._filename_map = {}
+
+ def ast_parse(self, source, filename='<unknown>', symbol='exec'):
+ """Parse code to an AST with the current compiler flags active.
+
+ Arguments are exactly the same as ast.parse (in the standard library),
+ and are passed to the built-in compile function."""
+ return compile(source, filename, symbol, self.flags | PyCF_ONLY_AST, 1)
+
+ def reset_compiler_flags(self):
+ """Reset compiler flags to default state."""
+ # This value is copied from codeop.Compile.__init__, so if that ever
+ # changes, it will need to be updated.
+ self.flags = codeop.PyCF_DONT_IMPLY_DEDENT
+
+ @property
+ def compiler_flags(self):
+ """Flags currently active in the compilation process.
+ """
+ return self.flags
+
+ def get_code_name(self, raw_code, transformed_code, number):
+ """Compute filename given the code, and the cell number.
+
+ Parameters
+ ----------
+ raw_code : str
+ The raw cell code.
+ transformed_code : str
+ The executable Python source code to cache and compile.
+ number : int
+ A number which forms part of the code's name. Used for the execution
+ counter.
+
+ Returns
+ -------
+ The computed filename.
+ """
+ return code_name(transformed_code, number)
+
+ def format_code_name(self, name):
+ """Return a user-friendly label and name for a code block.
+
+ Parameters
+ ----------
+ name : str
+ The name for the code block returned from get_code_name
+
+ Returns
+ -------
+ A (label, name) pair that can be used in tracebacks, or None if the default formatting should be used.
+ """
+ if name in self._filename_map:
+ return "Cell", "In[%s]" % self._filename_map[name]
+
+ def cache(self, transformed_code, number=0, raw_code=None):
+ """Make a name for a block of code, and cache the code.
+
+ Parameters
+ ----------
+ transformed_code : str
+ The executable Python source code to cache and compile.
+ number : int
+ A number which forms part of the code's name. Used for the execution
+ counter.
+ raw_code : str
+ The raw code before transformation, if None, set to `transformed_code`.
+
+ Returns
+ -------
+ The name of the cached code (as a string). Pass this as the filename
+ argument to compilation, so that tracebacks are correctly hooked up.
+ """
+ if raw_code is None:
+ raw_code = transformed_code
+
+ name = self.get_code_name(raw_code, transformed_code, number)
+
+ # Save the execution count
+ self._filename_map[name] = number
+
+ # Since Python 2.5, setting mtime to `None` means the lines will
+ # never be removed by `linecache.checkcache`. This means all the
+ # monkeypatching has *never* been necessary, since this code was
+ # only added in 2010, at which point IPython had already stopped
+ # supporting Python 2.4.
+ #
+ # Note that `linecache.clearcache` and `linecache.updatecache` may
+ # still remove our code from the cache, but those show explicit
+ # intent, and we should not try to interfere. Normally the former
+ # is never called except when out of memory, and the latter is only
+ # called for lines *not* in the cache.
+ entry = (
+ len(transformed_code),
+ None,
+ [line + "\n" for line in transformed_code.splitlines()],
+ name,
+ )
+ linecache.cache[name] = entry
+ return name
+
+ @contextmanager
+ def extra_flags(self, flags):
+ ## bits that we'll set to 1
+ turn_on_bits = ~self.flags & flags
+
+
+ self.flags = self.flags | flags
+ try:
+ yield
+ finally:
+ # turn off only the bits we turned on so that something like
+ # __future__ that set flags stays.
+ self.flags &= ~turn_on_bits
+
+
+def check_linecache_ipython(*args):
+ """Deprecated since IPython 8.6. Call linecache.checkcache() directly.
+
+ It was already not necessary to call this function directly. If no
+ CachingCompiler had been created, this function would fail badly. If
+ an instance had been created, this function would've been monkeypatched
+ into place.
+
+ As of IPython 8.6, the monkeypatching has gone away entirely. But there
+ were still internal callers of this function, so maybe external callers
+ also existed?
+ """
+ import warnings
+
+ warnings.warn(
+ "Deprecated Since IPython 8.6, Just call linecache.checkcache() directly.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ linecache.checkcache()
diff --git a/contrib/python/ipython/py3/IPython/core/completer.py b/contrib/python/ipython/py3/IPython/core/completer.py
new file mode 100644
index 0000000000..cc5f6c4270
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/completer.py
@@ -0,0 +1,3347 @@
+"""Completion for IPython.
+
+This module started as fork of the rlcompleter module in the Python standard
+library. The original enhancements made to rlcompleter have been sent
+upstream and were accepted as of Python 2.3,
+
+This module now support a wide variety of completion mechanism both available
+for normal classic Python code, as well as completer for IPython specific
+Syntax like magics.
+
+Latex and Unicode completion
+============================
+
+IPython and compatible frontends not only can complete your code, but can help
+you to input a wide range of characters. In particular we allow you to insert
+a unicode character using the tab completion mechanism.
+
+Forward latex/unicode completion
+--------------------------------
+
+Forward completion allows you to easily type a unicode character using its latex
+name, or unicode long description. To do so type a backslash follow by the
+relevant name and press tab:
+
+
+Using latex completion:
+
+.. code::
+
+ \\alpha<tab>
+ α
+
+or using unicode completion:
+
+
+.. code::
+
+ \\GREEK SMALL LETTER ALPHA<tab>
+ α
+
+
+Only valid Python identifiers will complete. Combining characters (like arrow or
+dots) are also available, unlike latex they need to be put after the their
+counterpart that is to say, ``F\\\\vec<tab>`` is correct, not ``\\\\vec<tab>F``.
+
+Some browsers are known to display combining characters incorrectly.
+
+Backward latex completion
+-------------------------
+
+It is sometime challenging to know how to type a character, if you are using
+IPython, or any compatible frontend you can prepend backslash to the character
+and press :kbd:`Tab` to expand it to its latex form.
+
+.. code::
+
+ \\α<tab>
+ \\alpha
+
+
+Both forward and backward completions can be deactivated by setting the
+:std:configtrait:`Completer.backslash_combining_completions` option to
+``False``.
+
+
+Experimental
+============
+
+Starting with IPython 6.0, this module can make use of the Jedi library to
+generate completions both using static analysis of the code, and dynamically
+inspecting multiple namespaces. Jedi is an autocompletion and static analysis
+for Python. The APIs attached to this new mechanism is unstable and will
+raise unless use in an :any:`provisionalcompleter` context manager.
+
+You will find that the following are experimental:
+
+ - :any:`provisionalcompleter`
+ - :any:`IPCompleter.completions`
+ - :any:`Completion`
+ - :any:`rectify_completions`
+
+.. note::
+
+ better name for :any:`rectify_completions` ?
+
+We welcome any feedback on these new API, and we also encourage you to try this
+module in debug mode (start IPython with ``--Completer.debug=True``) in order
+to have extra logging information if :any:`jedi` is crashing, or if current
+IPython completer pending deprecations are returning results not yet handled
+by :any:`jedi`
+
+Using Jedi for tab completion allow snippets like the following to work without
+having to execute any code:
+
+ >>> myvar = ['hello', 42]
+ ... myvar[1].bi<tab>
+
+Tab completion will be able to infer that ``myvar[1]`` is a real number without
+executing almost any code unlike the deprecated :any:`IPCompleter.greedy`
+option.
+
+Be sure to update :any:`jedi` to the latest stable version or to try the
+current development version to get better completions.
+
+Matchers
+========
+
+All completions routines are implemented using unified *Matchers* API.
+The matchers API is provisional and subject to change without notice.
+
+The built-in matchers include:
+
+- :any:`IPCompleter.dict_key_matcher`: dictionary key completions,
+- :any:`IPCompleter.magic_matcher`: completions for magics,
+- :any:`IPCompleter.unicode_name_matcher`,
+ :any:`IPCompleter.fwd_unicode_matcher`
+ and :any:`IPCompleter.latex_name_matcher`: see `Forward latex/unicode completion`_,
+- :any:`back_unicode_name_matcher` and :any:`back_latex_name_matcher`: see `Backward latex completion`_,
+- :any:`IPCompleter.file_matcher`: paths to files and directories,
+- :any:`IPCompleter.python_func_kw_matcher` - function keywords,
+- :any:`IPCompleter.python_matches` - globals and attributes (v1 API),
+- ``IPCompleter.jedi_matcher`` - static analysis with Jedi,
+- :any:`IPCompleter.custom_completer_matcher` - pluggable completer with a default
+ implementation in :any:`InteractiveShell` which uses IPython hooks system
+ (`complete_command`) with string dispatch (including regular expressions).
+ Differently to other matchers, ``custom_completer_matcher`` will not suppress
+ Jedi results to match behaviour in earlier IPython versions.
+
+Custom matchers can be added by appending to ``IPCompleter.custom_matchers`` list.
+
+Matcher API
+-----------
+
+Simplifying some details, the ``Matcher`` interface can described as
+
+.. code-block::
+
+ MatcherAPIv1 = Callable[[str], list[str]]
+ MatcherAPIv2 = Callable[[CompletionContext], SimpleMatcherResult]
+
+ Matcher = MatcherAPIv1 | MatcherAPIv2
+
+The ``MatcherAPIv1`` reflects the matcher API as available prior to IPython 8.6.0
+and remains supported as a simplest way for generating completions. This is also
+currently the only API supported by the IPython hooks system `complete_command`.
+
+To distinguish between matcher versions ``matcher_api_version`` attribute is used.
+More precisely, the API allows to omit ``matcher_api_version`` for v1 Matchers,
+and requires a literal ``2`` for v2 Matchers.
+
+Once the API stabilises future versions may relax the requirement for specifying
+``matcher_api_version`` by switching to :any:`functools.singledispatch`, therefore
+please do not rely on the presence of ``matcher_api_version`` for any purposes.
+
+Suppression of competing matchers
+---------------------------------
+
+By default results from all matchers are combined, in the order determined by
+their priority. Matchers can request to suppress results from subsequent
+matchers by setting ``suppress`` to ``True`` in the ``MatcherResult``.
+
+When multiple matchers simultaneously request surpression, the results from of
+the matcher with higher priority will be returned.
+
+Sometimes it is desirable to suppress most but not all other matchers;
+this can be achieved by adding a set of identifiers of matchers which
+should not be suppressed to ``MatcherResult`` under ``do_not_suppress`` key.
+
+The suppression behaviour can is user-configurable via
+:std:configtrait:`IPCompleter.suppress_competing_matchers`.
+"""
+
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+#
+# Some of this code originated from rlcompleter in the Python standard library
+# Copyright (C) 2001 Python Software Foundation, www.python.org
+
+from __future__ import annotations
+import builtins as builtin_mod
+import enum
+import glob
+import inspect
+import itertools
+import keyword
+import os
+import re
+import string
+import sys
+import tokenize
+import time
+import unicodedata
+import uuid
+import warnings
+from ast import literal_eval
+from collections import defaultdict
+from contextlib import contextmanager
+from dataclasses import dataclass
+from functools import cached_property, partial
+from types import SimpleNamespace
+from typing import (
+ Iterable,
+ Iterator,
+ List,
+ Tuple,
+ Union,
+ Any,
+ Sequence,
+ Dict,
+ Optional,
+ TYPE_CHECKING,
+ Set,
+ Sized,
+ TypeVar,
+ Literal,
+)
+
+from IPython.core.guarded_eval import guarded_eval, EvaluationContext
+from IPython.core.error import TryNext
+from IPython.core.inputtransformer2 import ESC_MAGIC
+from IPython.core.latex_symbols import latex_symbols, reverse_latex_symbol
+from IPython.core.oinspect import InspectColors
+from IPython.testing.skipdoctest import skip_doctest
+from IPython.utils import generics
+from IPython.utils.decorators import sphinx_options
+from IPython.utils.dir2 import dir2, get_real_method
+from IPython.utils.docs import GENERATING_DOCUMENTATION
+from IPython.utils.path import ensure_dir_exists
+from IPython.utils.process import arg_split
+from traitlets import (
+ Bool,
+ Enum,
+ Int,
+ List as ListTrait,
+ Unicode,
+ Dict as DictTrait,
+ Union as UnionTrait,
+ observe,
+)
+from traitlets.config.configurable import Configurable
+
+import __main__
+
+# skip module docstests
+__skip_doctest__ = True
+
+
+try:
+ import jedi
+ jedi.settings.case_insensitive_completion = False
+ import jedi.api.helpers
+ import jedi.api.classes
+ JEDI_INSTALLED = True
+except ImportError:
+ JEDI_INSTALLED = False
+
+
+if TYPE_CHECKING or GENERATING_DOCUMENTATION and sys.version_info >= (3, 11):
+ from typing import cast
+ from typing_extensions import TypedDict, NotRequired, Protocol, TypeAlias, TypeGuard
+else:
+ from typing import Generic
+
+ def cast(type_, obj):
+ """Workaround for `TypeError: MatcherAPIv2() takes no arguments`"""
+ return obj
+
+ # do not require on runtime
+ NotRequired = Tuple # requires Python >=3.11
+ TypedDict = Dict # by extension of `NotRequired` requires 3.11 too
+ Protocol = object # requires Python >=3.8
+ TypeAlias = Any # requires Python >=3.10
+ TypeGuard = Generic # requires Python >=3.10
+if GENERATING_DOCUMENTATION:
+ from typing import TypedDict
+
+# -----------------------------------------------------------------------------
+# Globals
+#-----------------------------------------------------------------------------
+
+# ranges where we have most of the valid unicode names. We could be more finer
+# grained but is it worth it for performance While unicode have character in the
+# range 0, 0x110000, we seem to have name for about 10% of those. (131808 as I
+# write this). With below range we cover them all, with a density of ~67%
+# biggest next gap we consider only adds up about 1% density and there are 600
+# gaps that would need hard coding.
+_UNICODE_RANGES = [(32, 0x323B0), (0xE0001, 0xE01F0)]
+
+# Public API
+__all__ = ["Completer", "IPCompleter"]
+
+if sys.platform == 'win32':
+ PROTECTABLES = ' '
+else:
+ PROTECTABLES = ' ()[]{}?=\\|;:\'#*"^&'
+
+# Protect against returning an enormous number of completions which the frontend
+# may have trouble processing.
+MATCHES_LIMIT = 500
+
+# Completion type reported when no type can be inferred.
+_UNKNOWN_TYPE = "<unknown>"
+
+# sentinel value to signal lack of a match
+not_found = object()
+
+class ProvisionalCompleterWarning(FutureWarning):
+ """
+ Exception raise by an experimental feature in this module.
+
+ Wrap code in :any:`provisionalcompleter` context manager if you
+ are certain you want to use an unstable feature.
+ """
+ pass
+
+warnings.filterwarnings('error', category=ProvisionalCompleterWarning)
+
+
+@skip_doctest
+@contextmanager
+def provisionalcompleter(action='ignore'):
+ """
+ This context manager has to be used in any place where unstable completer
+ behavior and API may be called.
+
+ >>> with provisionalcompleter():
+ ... completer.do_experimental_things() # works
+
+ >>> completer.do_experimental_things() # raises.
+
+ .. note::
+
+ Unstable
+
+ By using this context manager you agree that the API in use may change
+ without warning, and that you won't complain if they do so.
+
+ You also understand that, if the API is not to your liking, you should report
+ a bug to explain your use case upstream.
+
+ We'll be happy to get your feedback, feature requests, and improvements on
+ any of the unstable APIs!
+ """
+ with warnings.catch_warnings():
+ warnings.filterwarnings(action, category=ProvisionalCompleterWarning)
+ yield
+
+
+def has_open_quotes(s):
+ """Return whether a string has open quotes.
+
+ This simply counts whether the number of quote characters of either type in
+ the string is odd.
+
+ Returns
+ -------
+ If there is an open quote, the quote character is returned. Else, return
+ False.
+ """
+ # We check " first, then ', so complex cases with nested quotes will get
+ # the " to take precedence.
+ if s.count('"') % 2:
+ return '"'
+ elif s.count("'") % 2:
+ return "'"
+ else:
+ return False
+
+
+def protect_filename(s, protectables=PROTECTABLES):
+ """Escape a string to protect certain characters."""
+ if set(s) & set(protectables):
+ if sys.platform == "win32":
+ return '"' + s + '"'
+ else:
+ return "".join(("\\" + c if c in protectables else c) for c in s)
+ else:
+ return s
+
+
+def expand_user(path:str) -> Tuple[str, bool, str]:
+ """Expand ``~``-style usernames in strings.
+
+ This is similar to :func:`os.path.expanduser`, but it computes and returns
+ extra information that will be useful if the input was being used in
+ computing completions, and you wish to return the completions with the
+ original '~' instead of its expanded value.
+
+ Parameters
+ ----------
+ path : str
+ String to be expanded. If no ~ is present, the output is the same as the
+ input.
+
+ Returns
+ -------
+ newpath : str
+ Result of ~ expansion in the input path.
+ tilde_expand : bool
+ Whether any expansion was performed or not.
+ tilde_val : str
+ The value that ~ was replaced with.
+ """
+ # Default values
+ tilde_expand = False
+ tilde_val = ''
+ newpath = path
+
+ if path.startswith('~'):
+ tilde_expand = True
+ rest = len(path)-1
+ newpath = os.path.expanduser(path)
+ if rest:
+ tilde_val = newpath[:-rest]
+ else:
+ tilde_val = newpath
+
+ return newpath, tilde_expand, tilde_val
+
+
+def compress_user(path:str, tilde_expand:bool, tilde_val:str) -> str:
+ """Does the opposite of expand_user, with its outputs.
+ """
+ if tilde_expand:
+ return path.replace(tilde_val, '~')
+ else:
+ return path
+
+
+def completions_sorting_key(word):
+ """key for sorting completions
+
+ This does several things:
+
+ - Demote any completions starting with underscores to the end
+ - Insert any %magic and %%cellmagic completions in the alphabetical order
+ by their name
+ """
+ prio1, prio2 = 0, 0
+
+ if word.startswith('__'):
+ prio1 = 2
+ elif word.startswith('_'):
+ prio1 = 1
+
+ if word.endswith('='):
+ prio1 = -1
+
+ if word.startswith('%%'):
+ # If there's another % in there, this is something else, so leave it alone
+ if not "%" in word[2:]:
+ word = word[2:]
+ prio2 = 2
+ elif word.startswith('%'):
+ if not "%" in word[1:]:
+ word = word[1:]
+ prio2 = 1
+
+ return prio1, word, prio2
+
+
+class _FakeJediCompletion:
+ """
+ This is a workaround to communicate to the UI that Jedi has crashed and to
+ report a bug. Will be used only id :any:`IPCompleter.debug` is set to true.
+
+ Added in IPython 6.0 so should likely be removed for 7.0
+
+ """
+
+ def __init__(self, name):
+
+ self.name = name
+ self.complete = name
+ self.type = 'crashed'
+ self.name_with_symbols = name
+ self.signature = ""
+ self._origin = "fake"
+ self.text = "crashed"
+
+ def __repr__(self):
+ return '<Fake completion object jedi has crashed>'
+
+
+_JediCompletionLike = Union["jedi.api.Completion", _FakeJediCompletion]
+
+
+class Completion:
+ """
+ Completion object used and returned by IPython completers.
+
+ .. warning::
+
+ Unstable
+
+ This function is unstable, API may change without warning.
+ It will also raise unless use in proper context manager.
+
+ This act as a middle ground :any:`Completion` object between the
+ :any:`jedi.api.classes.Completion` object and the Prompt Toolkit completion
+ object. While Jedi need a lot of information about evaluator and how the
+ code should be ran/inspected, PromptToolkit (and other frontend) mostly
+ need user facing information.
+
+ - Which range should be replaced replaced by what.
+ - Some metadata (like completion type), or meta information to displayed to
+ the use user.
+
+ For debugging purpose we can also store the origin of the completion (``jedi``,
+ ``IPython.python_matches``, ``IPython.magics_matches``...).
+ """
+
+ __slots__ = ['start', 'end', 'text', 'type', 'signature', '_origin']
+
+ def __init__(
+ self,
+ start: int,
+ end: int,
+ text: str,
+ *,
+ type: Optional[str] = None,
+ _origin="",
+ signature="",
+ ) -> None:
+ warnings.warn(
+ "``Completion`` is a provisional API (as of IPython 6.0). "
+ "It may change without warnings. "
+ "Use in corresponding context manager.",
+ category=ProvisionalCompleterWarning,
+ stacklevel=2,
+ )
+
+ self.start = start
+ self.end = end
+ self.text = text
+ self.type = type
+ self.signature = signature
+ self._origin = _origin
+
+ def __repr__(self):
+ return '<Completion start=%s end=%s text=%r type=%r, signature=%r,>' % \
+ (self.start, self.end, self.text, self.type or '?', self.signature or '?')
+
+ def __eq__(self, other) -> bool:
+ """
+ Equality and hash do not hash the type (as some completer may not be
+ able to infer the type), but are use to (partially) de-duplicate
+ completion.
+
+ Completely de-duplicating completion is a bit tricker that just
+ comparing as it depends on surrounding text, which Completions are not
+ aware of.
+ """
+ return self.start == other.start and \
+ self.end == other.end and \
+ self.text == other.text
+
+ def __hash__(self):
+ return hash((self.start, self.end, self.text))
+
+
+class SimpleCompletion:
+ """Completion item to be included in the dictionary returned by new-style Matcher (API v2).
+
+ .. warning::
+
+ Provisional
+
+ This class is used to describe the currently supported attributes of
+ simple completion items, and any additional implementation details
+ should not be relied on. Additional attributes may be included in
+ future versions, and meaning of text disambiguated from the current
+ dual meaning of "text to insert" and "text to used as a label".
+ """
+
+ __slots__ = ["text", "type"]
+
+ def __init__(self, text: str, *, type: Optional[str] = None):
+ self.text = text
+ self.type = type
+
+ def __repr__(self):
+ return f"<SimpleCompletion text={self.text!r} type={self.type!r}>"
+
+
+class _MatcherResultBase(TypedDict):
+ """Definition of dictionary to be returned by new-style Matcher (API v2)."""
+
+ #: Suffix of the provided ``CompletionContext.token``, if not given defaults to full token.
+ matched_fragment: NotRequired[str]
+
+ #: Whether to suppress results from all other matchers (True), some
+ #: matchers (set of identifiers) or none (False); default is False.
+ suppress: NotRequired[Union[bool, Set[str]]]
+
+ #: Identifiers of matchers which should NOT be suppressed when this matcher
+ #: requests to suppress all other matchers; defaults to an empty set.
+ do_not_suppress: NotRequired[Set[str]]
+
+ #: Are completions already ordered and should be left as-is? default is False.
+ ordered: NotRequired[bool]
+
+
+@sphinx_options(show_inherited_members=True, exclude_inherited_from=["dict"])
+class SimpleMatcherResult(_MatcherResultBase, TypedDict):
+ """Result of new-style completion matcher."""
+
+ # note: TypedDict is added again to the inheritance chain
+ # in order to get __orig_bases__ for documentation
+
+ #: List of candidate completions
+ completions: Sequence[SimpleCompletion] | Iterator[SimpleCompletion]
+
+
+class _JediMatcherResult(_MatcherResultBase):
+ """Matching result returned by Jedi (will be processed differently)"""
+
+ #: list of candidate completions
+ completions: Iterator[_JediCompletionLike]
+
+
+AnyMatcherCompletion = Union[_JediCompletionLike, SimpleCompletion]
+AnyCompletion = TypeVar("AnyCompletion", AnyMatcherCompletion, Completion)
+
+
+@dataclass
+class CompletionContext:
+ """Completion context provided as an argument to matchers in the Matcher API v2."""
+
+ # rationale: many legacy matchers relied on completer state (`self.text_until_cursor`)
+ # which was not explicitly visible as an argument of the matcher, making any refactor
+ # prone to errors; by explicitly passing `cursor_position` we can decouple the matchers
+ # from the completer, and make substituting them in sub-classes easier.
+
+ #: Relevant fragment of code directly preceding the cursor.
+ #: The extraction of token is implemented via splitter heuristic
+ #: (following readline behaviour for legacy reasons), which is user configurable
+ #: (by switching the greedy mode).
+ token: str
+
+ #: The full available content of the editor or buffer
+ full_text: str
+
+ #: Cursor position in the line (the same for ``full_text`` and ``text``).
+ cursor_position: int
+
+ #: Cursor line in ``full_text``.
+ cursor_line: int
+
+ #: The maximum number of completions that will be used downstream.
+ #: Matchers can use this information to abort early.
+ #: The built-in Jedi matcher is currently excepted from this limit.
+ # If not given, return all possible completions.
+ limit: Optional[int]
+
+ @cached_property
+ def text_until_cursor(self) -> str:
+ return self.line_with_cursor[: self.cursor_position]
+
+ @cached_property
+ def line_with_cursor(self) -> str:
+ return self.full_text.split("\n")[self.cursor_line]
+
+
+#: Matcher results for API v2.
+MatcherResult = Union[SimpleMatcherResult, _JediMatcherResult]
+
+
+class _MatcherAPIv1Base(Protocol):
+ def __call__(self, text: str) -> List[str]:
+ """Call signature."""
+ ...
+
+ #: Used to construct the default matcher identifier
+ __qualname__: str
+
+
+class _MatcherAPIv1Total(_MatcherAPIv1Base, Protocol):
+ #: API version
+ matcher_api_version: Optional[Literal[1]]
+
+ def __call__(self, text: str) -> List[str]:
+ """Call signature."""
+ ...
+
+
+#: Protocol describing Matcher API v1.
+MatcherAPIv1: TypeAlias = Union[_MatcherAPIv1Base, _MatcherAPIv1Total]
+
+
+class MatcherAPIv2(Protocol):
+ """Protocol describing Matcher API v2."""
+
+ #: API version
+ matcher_api_version: Literal[2] = 2
+
+ def __call__(self, context: CompletionContext) -> MatcherResult:
+ """Call signature."""
+ ...
+
+ #: Used to construct the default matcher identifier
+ __qualname__: str
+
+
+Matcher: TypeAlias = Union[MatcherAPIv1, MatcherAPIv2]
+
+
+def _is_matcher_v1(matcher: Matcher) -> TypeGuard[MatcherAPIv1]:
+ api_version = _get_matcher_api_version(matcher)
+ return api_version == 1
+
+
+def _is_matcher_v2(matcher: Matcher) -> TypeGuard[MatcherAPIv2]:
+ api_version = _get_matcher_api_version(matcher)
+ return api_version == 2
+
+
+def _is_sizable(value: Any) -> TypeGuard[Sized]:
+ """Determines whether objects is sizable"""
+ return hasattr(value, "__len__")
+
+
+def _is_iterator(value: Any) -> TypeGuard[Iterator]:
+ """Determines whether objects is sizable"""
+ return hasattr(value, "__next__")
+
+
+def has_any_completions(result: MatcherResult) -> bool:
+ """Check if any result includes any completions."""
+ completions = result["completions"]
+ if _is_sizable(completions):
+ return len(completions) != 0
+ if _is_iterator(completions):
+ try:
+ old_iterator = completions
+ first = next(old_iterator)
+ result["completions"] = cast(
+ Iterator[SimpleCompletion],
+ itertools.chain([first], old_iterator),
+ )
+ return True
+ except StopIteration:
+ return False
+ raise ValueError(
+ "Completions returned by matcher need to be an Iterator or a Sizable"
+ )
+
+
+def completion_matcher(
+ *,
+ priority: Optional[float] = None,
+ identifier: Optional[str] = None,
+ api_version: int = 1,
+):
+ """Adds attributes describing the matcher.
+
+ Parameters
+ ----------
+ priority : Optional[float]
+ The priority of the matcher, determines the order of execution of matchers.
+ Higher priority means that the matcher will be executed first. Defaults to 0.
+ identifier : Optional[str]
+ identifier of the matcher allowing users to modify the behaviour via traitlets,
+ and also used to for debugging (will be passed as ``origin`` with the completions).
+
+ Defaults to matcher function's ``__qualname__`` (for example,
+ ``IPCompleter.file_matcher`` for the built-in matched defined
+ as a ``file_matcher`` method of the ``IPCompleter`` class).
+ api_version: Optional[int]
+ version of the Matcher API used by this matcher.
+ Currently supported values are 1 and 2.
+ Defaults to 1.
+ """
+
+ def wrapper(func: Matcher):
+ func.matcher_priority = priority or 0 # type: ignore
+ func.matcher_identifier = identifier or func.__qualname__ # type: ignore
+ func.matcher_api_version = api_version # type: ignore
+ if TYPE_CHECKING:
+ if api_version == 1:
+ func = cast(MatcherAPIv1, func)
+ elif api_version == 2:
+ func = cast(MatcherAPIv2, func)
+ return func
+
+ return wrapper
+
+
+def _get_matcher_priority(matcher: Matcher):
+ return getattr(matcher, "matcher_priority", 0)
+
+
+def _get_matcher_id(matcher: Matcher):
+ return getattr(matcher, "matcher_identifier", matcher.__qualname__)
+
+
+def _get_matcher_api_version(matcher):
+ return getattr(matcher, "matcher_api_version", 1)
+
+
+context_matcher = partial(completion_matcher, api_version=2)
+
+
+_IC = Iterable[Completion]
+
+
+def _deduplicate_completions(text: str, completions: _IC)-> _IC:
+ """
+ Deduplicate a set of completions.
+
+ .. warning::
+
+ Unstable
+
+ This function is unstable, API may change without warning.
+
+ Parameters
+ ----------
+ text : str
+ text that should be completed.
+ completions : Iterator[Completion]
+ iterator over the completions to deduplicate
+
+ Yields
+ ------
+ `Completions` objects
+ Completions coming from multiple sources, may be different but end up having
+ the same effect when applied to ``text``. If this is the case, this will
+ consider completions as equal and only emit the first encountered.
+ Not folded in `completions()` yet for debugging purpose, and to detect when
+ the IPython completer does return things that Jedi does not, but should be
+ at some point.
+ """
+ completions = list(completions)
+ if not completions:
+ return
+
+ new_start = min(c.start for c in completions)
+ new_end = max(c.end for c in completions)
+
+ seen = set()
+ for c in completions:
+ new_text = text[new_start:c.start] + c.text + text[c.end:new_end]
+ if new_text not in seen:
+ yield c
+ seen.add(new_text)
+
+
+def rectify_completions(text: str, completions: _IC, *, _debug: bool = False) -> _IC:
+ """
+ Rectify a set of completions to all have the same ``start`` and ``end``
+
+ .. warning::
+
+ Unstable
+
+ This function is unstable, API may change without warning.
+ It will also raise unless use in proper context manager.
+
+ Parameters
+ ----------
+ text : str
+ text that should be completed.
+ completions : Iterator[Completion]
+ iterator over the completions to rectify
+ _debug : bool
+ Log failed completion
+
+ Notes
+ -----
+ :any:`jedi.api.classes.Completion` s returned by Jedi may not have the same start and end, though
+ the Jupyter Protocol requires them to behave like so. This will readjust
+ the completion to have the same ``start`` and ``end`` by padding both
+ extremities with surrounding text.
+
+ During stabilisation should support a ``_debug`` option to log which
+ completion are return by the IPython completer and not found in Jedi in
+ order to make upstream bug report.
+ """
+ warnings.warn("`rectify_completions` is a provisional API (as of IPython 6.0). "
+ "It may change without warnings. "
+ "Use in corresponding context manager.",
+ category=ProvisionalCompleterWarning, stacklevel=2)
+
+ completions = list(completions)
+ if not completions:
+ return
+ starts = (c.start for c in completions)
+ ends = (c.end for c in completions)
+
+ new_start = min(starts)
+ new_end = max(ends)
+
+ seen_jedi = set()
+ seen_python_matches = set()
+ for c in completions:
+ new_text = text[new_start:c.start] + c.text + text[c.end:new_end]
+ if c._origin == 'jedi':
+ seen_jedi.add(new_text)
+ elif c._origin == 'IPCompleter.python_matches':
+ seen_python_matches.add(new_text)
+ yield Completion(new_start, new_end, new_text, type=c.type, _origin=c._origin, signature=c.signature)
+ diff = seen_python_matches.difference(seen_jedi)
+ if diff and _debug:
+ print('IPython.python matches have extras:', diff)
+
+
+if sys.platform == 'win32':
+ DELIMS = ' \t\n`!@#$^&*()=+[{]}|;\'",<>?'
+else:
+ DELIMS = ' \t\n`!@#$^&*()=+[{]}\\|;:\'",<>?'
+
+GREEDY_DELIMS = ' =\r\n'
+
+
+class CompletionSplitter(object):
+ """An object to split an input line in a manner similar to readline.
+
+ By having our own implementation, we can expose readline-like completion in
+ a uniform manner to all frontends. This object only needs to be given the
+ line of text to be split and the cursor position on said line, and it
+ returns the 'word' to be completed on at the cursor after splitting the
+ entire line.
+
+ What characters are used as splitting delimiters can be controlled by
+ setting the ``delims`` attribute (this is a property that internally
+ automatically builds the necessary regular expression)"""
+
+ # Private interface
+
+ # A string of delimiter characters. The default value makes sense for
+ # IPython's most typical usage patterns.
+ _delims = DELIMS
+
+ # The expression (a normal string) to be compiled into a regular expression
+ # for actual splitting. We store it as an attribute mostly for ease of
+ # debugging, since this type of code can be so tricky to debug.
+ _delim_expr = None
+
+ # The regular expression that does the actual splitting
+ _delim_re = None
+
+ def __init__(self, delims=None):
+ delims = CompletionSplitter._delims if delims is None else delims
+ self.delims = delims
+
+ @property
+ def delims(self):
+ """Return the string of delimiter characters."""
+ return self._delims
+
+ @delims.setter
+ def delims(self, delims):
+ """Set the delimiters for line splitting."""
+ expr = '[' + ''.join('\\'+ c for c in delims) + ']'
+ self._delim_re = re.compile(expr)
+ self._delims = delims
+ self._delim_expr = expr
+
+ def split_line(self, line, cursor_pos=None):
+ """Split a line of text with a cursor at the given position.
+ """
+ l = line if cursor_pos is None else line[:cursor_pos]
+ return self._delim_re.split(l)[-1]
+
+
+
+class Completer(Configurable):
+
+ greedy = Bool(
+ False,
+ help="""Activate greedy completion.
+
+ .. deprecated:: 8.8
+ Use :std:configtrait:`Completer.evaluation` and :std:configtrait:`Completer.auto_close_dict_keys` instead.
+
+ When enabled in IPython 8.8 or newer, changes configuration as follows:
+
+ - ``Completer.evaluation = 'unsafe'``
+ - ``Completer.auto_close_dict_keys = True``
+ """,
+ ).tag(config=True)
+
+ evaluation = Enum(
+ ("forbidden", "minimal", "limited", "unsafe", "dangerous"),
+ default_value="limited",
+ help="""Policy for code evaluation under completion.
+
+ Successive options allow to enable more eager evaluation for better
+ completion suggestions, including for nested dictionaries, nested lists,
+ or even results of function calls.
+ Setting ``unsafe`` or higher can lead to evaluation of arbitrary user
+ code on :kbd:`Tab` with potentially unwanted or dangerous side effects.
+
+ Allowed values are:
+
+ - ``forbidden``: no evaluation of code is permitted,
+ - ``minimal``: evaluation of literals and access to built-in namespace;
+ no item/attribute evaluationm no access to locals/globals,
+ no evaluation of any operations or comparisons.
+ - ``limited``: access to all namespaces, evaluation of hard-coded methods
+ (for example: :any:`dict.keys`, :any:`object.__getattr__`,
+ :any:`object.__getitem__`) on allow-listed objects (for example:
+ :any:`dict`, :any:`list`, :any:`tuple`, ``pandas.Series``),
+ - ``unsafe``: evaluation of all methods and function calls but not of
+ syntax with side-effects like `del x`,
+ - ``dangerous``: completely arbitrary evaluation.
+ """,
+ ).tag(config=True)
+
+ use_jedi = Bool(default_value=JEDI_INSTALLED,
+ help="Experimental: Use Jedi to generate autocompletions. "
+ "Default to True if jedi is installed.").tag(config=True)
+
+ jedi_compute_type_timeout = Int(default_value=400,
+ help="""Experimental: restrict time (in milliseconds) during which Jedi can compute types.
+ Set to 0 to stop computing types. Non-zero value lower than 100ms may hurt
+ performance by preventing jedi to build its cache.
+ """).tag(config=True)
+
+ debug = Bool(default_value=False,
+ help='Enable debug for the Completer. Mostly print extra '
+ 'information for experimental jedi integration.')\
+ .tag(config=True)
+
+ backslash_combining_completions = Bool(True,
+ help="Enable unicode completions, e.g. \\alpha<tab> . "
+ "Includes completion of latex commands, unicode names, and expanding "
+ "unicode characters back to latex commands.").tag(config=True)
+
+ auto_close_dict_keys = Bool(
+ False,
+ help="""
+ Enable auto-closing dictionary keys.
+
+ When enabled string keys will be suffixed with a final quote
+ (matching the opening quote), tuple keys will also receive a
+ separating comma if needed, and keys which are final will
+ receive a closing bracket (``]``).
+ """,
+ ).tag(config=True)
+
+ def __init__(self, namespace=None, global_namespace=None, **kwargs):
+ """Create a new completer for the command line.
+
+ Completer(namespace=ns, global_namespace=ns2) -> completer instance.
+
+ If unspecified, the default namespace where completions are performed
+ is __main__ (technically, __main__.__dict__). Namespaces should be
+ given as dictionaries.
+
+ An optional second namespace can be given. This allows the completer
+ to handle cases where both the local and global scopes need to be
+ distinguished.
+ """
+
+ # Don't bind to namespace quite yet, but flag whether the user wants a
+ # specific namespace or to use __main__.__dict__. This will allow us
+ # to bind to __main__.__dict__ at completion time, not now.
+ if namespace is None:
+ self.use_main_ns = True
+ else:
+ self.use_main_ns = False
+ self.namespace = namespace
+
+ # The global namespace, if given, can be bound directly
+ if global_namespace is None:
+ self.global_namespace = {}
+ else:
+ self.global_namespace = global_namespace
+
+ self.custom_matchers = []
+
+ super(Completer, self).__init__(**kwargs)
+
+ def complete(self, text, state):
+ """Return the next possible completion for 'text'.
+
+ This is called successively with state == 0, 1, 2, ... until it
+ returns None. The completion should begin with 'text'.
+
+ """
+ if self.use_main_ns:
+ self.namespace = __main__.__dict__
+
+ if state == 0:
+ if "." in text:
+ self.matches = self.attr_matches(text)
+ else:
+ self.matches = self.global_matches(text)
+ try:
+ return self.matches[state]
+ except IndexError:
+ return None
+
+ def global_matches(self, text):
+ """Compute matches when text is a simple name.
+
+ Return a list of all keywords, built-in functions and names currently
+ defined in self.namespace or self.global_namespace that match.
+
+ """
+ matches = []
+ match_append = matches.append
+ n = len(text)
+ for lst in [
+ keyword.kwlist,
+ builtin_mod.__dict__.keys(),
+ list(self.namespace.keys()),
+ list(self.global_namespace.keys()),
+ ]:
+ for word in lst:
+ if word[:n] == text and word != "__builtins__":
+ match_append(word)
+
+ snake_case_re = re.compile(r"[^_]+(_[^_]+)+?\Z")
+ for lst in [list(self.namespace.keys()), list(self.global_namespace.keys())]:
+ shortened = {
+ "_".join([sub[0] for sub in word.split("_")]): word
+ for word in lst
+ if snake_case_re.match(word)
+ }
+ for word in shortened.keys():
+ if word[:n] == text and word != "__builtins__":
+ match_append(shortened[word])
+ return matches
+
+ def attr_matches(self, text):
+ """Compute matches when text contains a dot.
+
+ Assuming the text is of the form NAME.NAME....[NAME], and is
+ evaluatable in self.namespace or self.global_namespace, it will be
+ evaluated and its attributes (as revealed by dir()) are used as
+ possible completions. (For class instances, class members are
+ also considered.)
+
+ WARNING: this can still invoke arbitrary C code, if an object
+ with a __getattr__ hook is evaluated.
+
+ """
+ m2 = re.match(r"(.+)\.(\w*)$", self.line_buffer)
+ if not m2:
+ return []
+ expr, attr = m2.group(1, 2)
+
+ obj = self._evaluate_expr(expr)
+
+ if obj is not_found:
+ return []
+
+ if self.limit_to__all__ and hasattr(obj, '__all__'):
+ words = get__all__entries(obj)
+ else:
+ words = dir2(obj)
+
+ try:
+ words = generics.complete_object(obj, words)
+ except TryNext:
+ pass
+ except AssertionError:
+ raise
+ except Exception:
+ # Silence errors from completion function
+ pass
+ # Build match list to return
+ n = len(attr)
+
+ # Note: ideally we would just return words here and the prefix
+ # reconciliator would know that we intend to append to rather than
+ # replace the input text; this requires refactoring to return range
+ # which ought to be replaced (as does jedi).
+ tokens = _parse_tokens(expr)
+ rev_tokens = reversed(tokens)
+ skip_over = {tokenize.ENDMARKER, tokenize.NEWLINE}
+ name_turn = True
+
+ parts = []
+ for token in rev_tokens:
+ if token.type in skip_over:
+ continue
+ if token.type == tokenize.NAME and name_turn:
+ parts.append(token.string)
+ name_turn = False
+ elif token.type == tokenize.OP and token.string == "." and not name_turn:
+ parts.append(token.string)
+ name_turn = True
+ else:
+ # short-circuit if not empty nor name token
+ break
+
+ prefix_after_space = "".join(reversed(parts))
+
+ return ["%s.%s" % (prefix_after_space, w) for w in words if w[:n] == attr]
+
+ def _evaluate_expr(self, expr):
+ obj = not_found
+ done = False
+ while not done and expr:
+ try:
+ obj = guarded_eval(
+ expr,
+ EvaluationContext(
+ globals=self.global_namespace,
+ locals=self.namespace,
+ evaluation=self.evaluation,
+ ),
+ )
+ done = True
+ except Exception as e:
+ if self.debug:
+ print("Evaluation exception", e)
+ # trim the expression to remove any invalid prefix
+ # e.g. user starts `(d[`, so we get `expr = '(d'`,
+ # where parenthesis is not closed.
+ # TODO: make this faster by reusing parts of the computation?
+ expr = expr[1:]
+ return obj
+
+def get__all__entries(obj):
+ """returns the strings in the __all__ attribute"""
+ try:
+ words = getattr(obj, '__all__')
+ except:
+ return []
+
+ return [w for w in words if isinstance(w, str)]
+
+
+class _DictKeyState(enum.Flag):
+ """Represent state of the key match in context of other possible matches.
+
+ - given `d1 = {'a': 1}` completion on `d1['<tab>` will yield `{'a': END_OF_ITEM}` as there is no tuple.
+ - given `d2 = {('a', 'b'): 1}`: `d2['a', '<tab>` will yield `{'b': END_OF_TUPLE}` as there is no tuple members to add beyond `'b'`.
+ - given `d3 = {('a', 'b'): 1}`: `d3['<tab>` will yield `{'a': IN_TUPLE}` as `'a'` can be added.
+ - given `d4 = {'a': 1, ('a', 'b'): 2}`: `d4['<tab>` will yield `{'a': END_OF_ITEM & END_OF_TUPLE}`
+ """
+
+ BASELINE = 0
+ END_OF_ITEM = enum.auto()
+ END_OF_TUPLE = enum.auto()
+ IN_TUPLE = enum.auto()
+
+
+def _parse_tokens(c):
+ """Parse tokens even if there is an error."""
+ tokens = []
+ token_generator = tokenize.generate_tokens(iter(c.splitlines()).__next__)
+ while True:
+ try:
+ tokens.append(next(token_generator))
+ except tokenize.TokenError:
+ return tokens
+ except StopIteration:
+ return tokens
+
+
+def _match_number_in_dict_key_prefix(prefix: str) -> Union[str, None]:
+ """Match any valid Python numeric literal in a prefix of dictionary keys.
+
+ References:
+ - https://docs.python.org/3/reference/lexical_analysis.html#numeric-literals
+ - https://docs.python.org/3/library/tokenize.html
+ """
+ if prefix[-1].isspace():
+ # if user typed a space we do not have anything to complete
+ # even if there was a valid number token before
+ return None
+ tokens = _parse_tokens(prefix)
+ rev_tokens = reversed(tokens)
+ skip_over = {tokenize.ENDMARKER, tokenize.NEWLINE}
+ number = None
+ for token in rev_tokens:
+ if token.type in skip_over:
+ continue
+ if number is None:
+ if token.type == tokenize.NUMBER:
+ number = token.string
+ continue
+ else:
+ # we did not match a number
+ return None
+ if token.type == tokenize.OP:
+ if token.string == ",":
+ break
+ if token.string in {"+", "-"}:
+ number = token.string + number
+ else:
+ return None
+ return number
+
+
+_INT_FORMATS = {
+ "0b": bin,
+ "0o": oct,
+ "0x": hex,
+}
+
+
+def match_dict_keys(
+ keys: List[Union[str, bytes, Tuple[Union[str, bytes], ...]]],
+ prefix: str,
+ delims: str,
+ extra_prefix: Optional[Tuple[Union[str, bytes], ...]] = None,
+) -> Tuple[str, int, Dict[str, _DictKeyState]]:
+ """Used by dict_key_matches, matching the prefix to a list of keys
+
+ Parameters
+ ----------
+ keys
+ list of keys in dictionary currently being completed.
+ prefix
+ Part of the text already typed by the user. E.g. `mydict[b'fo`
+ delims
+ String of delimiters to consider when finding the current key.
+ extra_prefix : optional
+ Part of the text already typed in multi-key index cases. E.g. for
+ `mydict['foo', "bar", 'b`, this would be `('foo', 'bar')`.
+
+ Returns
+ -------
+ A tuple of three elements: ``quote``, ``token_start``, ``matched``, with
+ ``quote`` being the quote that need to be used to close current string.
+ ``token_start`` the position where the replacement should start occurring,
+ ``matches`` a dictionary of replacement/completion keys on keys and values
+ indicating whether the state.
+ """
+ prefix_tuple = extra_prefix if extra_prefix else ()
+
+ prefix_tuple_size = sum(
+ [
+ # for pandas, do not count slices as taking space
+ not isinstance(k, slice)
+ for k in prefix_tuple
+ ]
+ )
+ text_serializable_types = (str, bytes, int, float, slice)
+
+ def filter_prefix_tuple(key):
+ # Reject too short keys
+ if len(key) <= prefix_tuple_size:
+ return False
+ # Reject keys which cannot be serialised to text
+ for k in key:
+ if not isinstance(k, text_serializable_types):
+ return False
+ # Reject keys that do not match the prefix
+ for k, pt in zip(key, prefix_tuple):
+ if k != pt and not isinstance(pt, slice):
+ return False
+ # All checks passed!
+ return True
+
+ filtered_key_is_final: Dict[
+ Union[str, bytes, int, float], _DictKeyState
+ ] = defaultdict(lambda: _DictKeyState.BASELINE)
+
+ for k in keys:
+ # If at least one of the matches is not final, mark as undetermined.
+ # This can happen with `d = {111: 'b', (111, 222): 'a'}` where
+ # `111` appears final on first match but is not final on the second.
+
+ if isinstance(k, tuple):
+ if filter_prefix_tuple(k):
+ key_fragment = k[prefix_tuple_size]
+ filtered_key_is_final[key_fragment] |= (
+ _DictKeyState.END_OF_TUPLE
+ if len(k) == prefix_tuple_size + 1
+ else _DictKeyState.IN_TUPLE
+ )
+ elif prefix_tuple_size > 0:
+ # we are completing a tuple but this key is not a tuple,
+ # so we should ignore it
+ pass
+ else:
+ if isinstance(k, text_serializable_types):
+ filtered_key_is_final[k] |= _DictKeyState.END_OF_ITEM
+
+ filtered_keys = filtered_key_is_final.keys()
+
+ if not prefix:
+ return "", 0, {repr(k): v for k, v in filtered_key_is_final.items()}
+
+ quote_match = re.search("(?:\"|')", prefix)
+ is_user_prefix_numeric = False
+
+ if quote_match:
+ quote = quote_match.group()
+ valid_prefix = prefix + quote
+ try:
+ prefix_str = literal_eval(valid_prefix)
+ except Exception:
+ return "", 0, {}
+ else:
+ # If it does not look like a string, let's assume
+ # we are dealing with a number or variable.
+ number_match = _match_number_in_dict_key_prefix(prefix)
+
+ # We do not want the key matcher to suggest variable names so we yield:
+ if number_match is None:
+ # The alternative would be to assume that user forgort the quote
+ # and if the substring matches, suggest adding it at the start.
+ return "", 0, {}
+
+ prefix_str = number_match
+ is_user_prefix_numeric = True
+ quote = ""
+
+ pattern = '[^' + ''.join('\\' + c for c in delims) + ']*$'
+ token_match = re.search(pattern, prefix, re.UNICODE)
+ assert token_match is not None # silence mypy
+ token_start = token_match.start()
+ token_prefix = token_match.group()
+
+ matched: Dict[str, _DictKeyState] = {}
+
+ str_key: Union[str, bytes]
+
+ for key in filtered_keys:
+ if isinstance(key, (int, float)):
+ # User typed a number but this key is not a number.
+ if not is_user_prefix_numeric:
+ continue
+ str_key = str(key)
+ if isinstance(key, int):
+ int_base = prefix_str[:2].lower()
+ # if user typed integer using binary/oct/hex notation:
+ if int_base in _INT_FORMATS:
+ int_format = _INT_FORMATS[int_base]
+ str_key = int_format(key)
+ else:
+ # User typed a string but this key is a number.
+ if is_user_prefix_numeric:
+ continue
+ str_key = key
+ try:
+ if not str_key.startswith(prefix_str):
+ continue
+ except (AttributeError, TypeError, UnicodeError) as e:
+ # Python 3+ TypeError on b'a'.startswith('a') or vice-versa
+ continue
+
+ # reformat remainder of key to begin with prefix
+ rem = str_key[len(prefix_str) :]
+ # force repr wrapped in '
+ rem_repr = repr(rem + '"') if isinstance(rem, str) else repr(rem + b'"')
+ rem_repr = rem_repr[1 + rem_repr.index("'"):-2]
+ if quote == '"':
+ # The entered prefix is quoted with ",
+ # but the match is quoted with '.
+ # A contained " hence needs escaping for comparison:
+ rem_repr = rem_repr.replace('"', '\\"')
+
+ # then reinsert prefix from start of token
+ match = "%s%s" % (token_prefix, rem_repr)
+
+ matched[match] = filtered_key_is_final[key]
+ return quote, token_start, matched
+
+
+def cursor_to_position(text:str, line:int, column:int)->int:
+ """
+ Convert the (line,column) position of the cursor in text to an offset in a
+ string.
+
+ Parameters
+ ----------
+ text : str
+ The text in which to calculate the cursor offset
+ line : int
+ Line of the cursor; 0-indexed
+ column : int
+ Column of the cursor 0-indexed
+
+ Returns
+ -------
+ Position of the cursor in ``text``, 0-indexed.
+
+ See Also
+ --------
+ position_to_cursor : reciprocal of this function
+
+ """
+ lines = text.split('\n')
+ assert line <= len(lines), '{} <= {}'.format(str(line), str(len(lines)))
+
+ return sum(len(l) + 1 for l in lines[:line]) + column
+
+def position_to_cursor(text:str, offset:int)->Tuple[int, int]:
+ """
+ Convert the position of the cursor in text (0 indexed) to a line
+ number(0-indexed) and a column number (0-indexed) pair
+
+ Position should be a valid position in ``text``.
+
+ Parameters
+ ----------
+ text : str
+ The text in which to calculate the cursor offset
+ offset : int
+ Position of the cursor in ``text``, 0-indexed.
+
+ Returns
+ -------
+ (line, column) : (int, int)
+ Line of the cursor; 0-indexed, column of the cursor 0-indexed
+
+ See Also
+ --------
+ cursor_to_position : reciprocal of this function
+
+ """
+
+ assert 0 <= offset <= len(text) , "0 <= %s <= %s" % (offset , len(text))
+
+ before = text[:offset]
+ blines = before.split('\n') # ! splitnes trim trailing \n
+ line = before.count('\n')
+ col = len(blines[-1])
+ return line, col
+
+
+def _safe_isinstance(obj, module, class_name, *attrs):
+ """Checks if obj is an instance of module.class_name if loaded
+ """
+ if module in sys.modules:
+ m = sys.modules[module]
+ for attr in [class_name, *attrs]:
+ m = getattr(m, attr)
+ return isinstance(obj, m)
+
+
+@context_matcher()
+def back_unicode_name_matcher(context: CompletionContext):
+ """Match Unicode characters back to Unicode name
+
+ Same as :any:`back_unicode_name_matches`, but adopted to new Matcher API.
+ """
+ fragment, matches = back_unicode_name_matches(context.text_until_cursor)
+ return _convert_matcher_v1_result_to_v2(
+ matches, type="unicode", fragment=fragment, suppress_if_matches=True
+ )
+
+
+def back_unicode_name_matches(text: str) -> Tuple[str, Sequence[str]]:
+ """Match Unicode characters back to Unicode name
+
+ This does ``☃`` -> ``\\snowman``
+
+ Note that snowman is not a valid python3 combining character but will be expanded.
+ Though it will not recombine back to the snowman character by the completion machinery.
+
+ This will not either back-complete standard sequences like \\n, \\b ...
+
+ .. deprecated:: 8.6
+ You can use :meth:`back_unicode_name_matcher` instead.
+
+ Returns
+ =======
+
+ Return a tuple with two elements:
+
+ - The Unicode character that was matched (preceded with a backslash), or
+ empty string,
+ - a sequence (of 1), name for the match Unicode character, preceded by
+ backslash, or empty if no match.
+ """
+ if len(text)<2:
+ return '', ()
+ maybe_slash = text[-2]
+ if maybe_slash != '\\':
+ return '', ()
+
+ char = text[-1]
+ # no expand on quote for completion in strings.
+ # nor backcomplete standard ascii keys
+ if char in string.ascii_letters or char in ('"',"'"):
+ return '', ()
+ try :
+ unic = unicodedata.name(char)
+ return '\\'+char,('\\'+unic,)
+ except KeyError:
+ pass
+ return '', ()
+
+
+@context_matcher()
+def back_latex_name_matcher(context: CompletionContext):
+ """Match latex characters back to unicode name
+
+ Same as :any:`back_latex_name_matches`, but adopted to new Matcher API.
+ """
+ fragment, matches = back_latex_name_matches(context.text_until_cursor)
+ return _convert_matcher_v1_result_to_v2(
+ matches, type="latex", fragment=fragment, suppress_if_matches=True
+ )
+
+
+def back_latex_name_matches(text: str) -> Tuple[str, Sequence[str]]:
+ """Match latex characters back to unicode name
+
+ This does ``\\ℵ`` -> ``\\aleph``
+
+ .. deprecated:: 8.6
+ You can use :meth:`back_latex_name_matcher` instead.
+ """
+ if len(text)<2:
+ return '', ()
+ maybe_slash = text[-2]
+ if maybe_slash != '\\':
+ return '', ()
+
+
+ char = text[-1]
+ # no expand on quote for completion in strings.
+ # nor backcomplete standard ascii keys
+ if char in string.ascii_letters or char in ('"',"'"):
+ return '', ()
+ try :
+ latex = reverse_latex_symbol[char]
+ # '\\' replace the \ as well
+ return '\\'+char,[latex]
+ except KeyError:
+ pass
+ return '', ()
+
+
+def _formatparamchildren(parameter) -> str:
+ """
+ Get parameter name and value from Jedi Private API
+
+ Jedi does not expose a simple way to get `param=value` from its API.
+
+ Parameters
+ ----------
+ parameter
+ Jedi's function `Param`
+
+ Returns
+ -------
+ A string like 'a', 'b=1', '*args', '**kwargs'
+
+ """
+ description = parameter.description
+ if not description.startswith('param '):
+ raise ValueError('Jedi function parameter description have change format.'
+ 'Expected "param ...", found %r".' % description)
+ return description[6:]
+
+def _make_signature(completion)-> str:
+ """
+ Make the signature from a jedi completion
+
+ Parameters
+ ----------
+ completion : jedi.Completion
+ object does not complete a function type
+
+ Returns
+ -------
+ a string consisting of the function signature, with the parenthesis but
+ without the function name. example:
+ `(a, *args, b=1, **kwargs)`
+
+ """
+
+ return '(%s)'% ', '.join([f for f in (_formatparamchildren(p) for p in completion.params) if f])
+ # it looks like this might work on jedi 0.17
+ if hasattr(completion, 'get_signatures'):
+ signatures = completion.get_signatures()
+ if not signatures:
+ return '(?)'
+
+ c0 = completion.get_signatures()[0]
+ return '('+c0.to_string().split('(', maxsplit=1)[1]
+
+ return '(%s)'% ', '.join([f for f in (_formatparamchildren(p) for signature in completion.get_signatures()
+ for p in signature.defined_names()) if f])
+
+
+_CompleteResult = Dict[str, MatcherResult]
+
+
+DICT_MATCHER_REGEX = re.compile(
+ r"""(?x)
+( # match dict-referring - or any get item object - expression
+ .+
+)
+\[ # open bracket
+\s* # and optional whitespace
+# Capture any number of serializable objects (e.g. "a", "b", 'c')
+# and slices
+((?:(?:
+ (?: # closed string
+ [uUbB]? # string prefix (r not handled)
+ (?:
+ '(?:[^']|(?<!\\)\\')*'
+ |
+ "(?:[^"]|(?<!\\)\\")*"
+ )
+ )
+ |
+ # capture integers and slices
+ (?:[-+]?\d+)?(?::(?:[-+]?\d+)?){0,2}
+ |
+ # integer in bin/hex/oct notation
+ 0[bBxXoO]_?(?:\w|\d)+
+ )
+ \s*,\s*
+)*)
+((?:
+ (?: # unclosed string
+ [uUbB]? # string prefix (r not handled)
+ (?:
+ '(?:[^']|(?<!\\)\\')*
+ |
+ "(?:[^"]|(?<!\\)\\")*
+ )
+ )
+ |
+ # unfinished integer
+ (?:[-+]?\d+)
+ |
+ # integer in bin/hex/oct notation
+ 0[bBxXoO]_?(?:\w|\d)+
+ )
+)?
+$
+"""
+)
+
+
+def _convert_matcher_v1_result_to_v2(
+ matches: Sequence[str],
+ type: str,
+ fragment: Optional[str] = None,
+ suppress_if_matches: bool = False,
+) -> SimpleMatcherResult:
+ """Utility to help with transition"""
+ result = {
+ "completions": [SimpleCompletion(text=match, type=type) for match in matches],
+ "suppress": (True if matches else False) if suppress_if_matches else False,
+ }
+ if fragment is not None:
+ result["matched_fragment"] = fragment
+ return cast(SimpleMatcherResult, result)
+
+
+class IPCompleter(Completer):
+ """Extension of the completer class with IPython-specific features"""
+
+ @observe('greedy')
+ def _greedy_changed(self, change):
+ """update the splitter and readline delims when greedy is changed"""
+ if change["new"]:
+ self.evaluation = "unsafe"
+ self.auto_close_dict_keys = True
+ self.splitter.delims = GREEDY_DELIMS
+ else:
+ self.evaluation = "limited"
+ self.auto_close_dict_keys = False
+ self.splitter.delims = DELIMS
+
+ dict_keys_only = Bool(
+ False,
+ help="""
+ Whether to show dict key matches only.
+
+ (disables all matchers except for `IPCompleter.dict_key_matcher`).
+ """,
+ )
+
+ suppress_competing_matchers = UnionTrait(
+ [Bool(allow_none=True), DictTrait(Bool(None, allow_none=True))],
+ default_value=None,
+ help="""
+ Whether to suppress completions from other *Matchers*.
+
+ When set to ``None`` (default) the matchers will attempt to auto-detect
+ whether suppression of other matchers is desirable. For example, at
+ the beginning of a line followed by `%` we expect a magic completion
+ to be the only applicable option, and after ``my_dict['`` we usually
+ expect a completion with an existing dictionary key.
+
+ If you want to disable this heuristic and see completions from all matchers,
+ set ``IPCompleter.suppress_competing_matchers = False``.
+ To disable the heuristic for specific matchers provide a dictionary mapping:
+ ``IPCompleter.suppress_competing_matchers = {'IPCompleter.dict_key_matcher': False}``.
+
+ Set ``IPCompleter.suppress_competing_matchers = True`` to limit
+ completions to the set of matchers with the highest priority;
+ this is equivalent to ``IPCompleter.merge_completions`` and
+ can be beneficial for performance, but will sometimes omit relevant
+ candidates from matchers further down the priority list.
+ """,
+ ).tag(config=True)
+
+ merge_completions = Bool(
+ True,
+ help="""Whether to merge completion results into a single list
+
+ If False, only the completion results from the first non-empty
+ completer will be returned.
+
+ As of version 8.6.0, setting the value to ``False`` is an alias for:
+ ``IPCompleter.suppress_competing_matchers = True.``.
+ """,
+ ).tag(config=True)
+
+ disable_matchers = ListTrait(
+ Unicode(),
+ help="""List of matchers to disable.
+
+ The list should contain matcher identifiers (see :any:`completion_matcher`).
+ """,
+ ).tag(config=True)
+
+ omit__names = Enum(
+ (0, 1, 2),
+ default_value=2,
+ help="""Instruct the completer to omit private method names
+
+ Specifically, when completing on ``object.<tab>``.
+
+ When 2 [default]: all names that start with '_' will be excluded.
+
+ When 1: all 'magic' names (``__foo__``) will be excluded.
+
+ When 0: nothing will be excluded.
+ """
+ ).tag(config=True)
+ limit_to__all__ = Bool(False,
+ help="""
+ DEPRECATED as of version 5.0.
+
+ Instruct the completer to use __all__ for the completion
+
+ Specifically, when completing on ``object.<tab>``.
+
+ When True: only those names in obj.__all__ will be included.
+
+ When False [default]: the __all__ attribute is ignored
+ """,
+ ).tag(config=True)
+
+ profile_completions = Bool(
+ default_value=False,
+ help="If True, emit profiling data for completion subsystem using cProfile."
+ ).tag(config=True)
+
+ profiler_output_dir = Unicode(
+ default_value=".completion_profiles",
+ help="Template for path at which to output profile data for completions."
+ ).tag(config=True)
+
+ @observe('limit_to__all__')
+ def _limit_to_all_changed(self, change):
+ warnings.warn('`IPython.core.IPCompleter.limit_to__all__` configuration '
+ 'value has been deprecated since IPython 5.0, will be made to have '
+ 'no effects and then removed in future version of IPython.',
+ UserWarning)
+
+ def __init__(
+ self, shell=None, namespace=None, global_namespace=None, config=None, **kwargs
+ ):
+ """IPCompleter() -> completer
+
+ Return a completer object.
+
+ Parameters
+ ----------
+ shell
+ a pointer to the ipython shell itself. This is needed
+ because this completer knows about magic functions, and those can
+ only be accessed via the ipython instance.
+ namespace : dict, optional
+ an optional dict where completions are performed.
+ global_namespace : dict, optional
+ secondary optional dict for completions, to
+ handle cases (such as IPython embedded inside functions) where
+ both Python scopes are visible.
+ config : Config
+ traitlet's config object
+ **kwargs
+ passed to super class unmodified.
+ """
+
+ self.magic_escape = ESC_MAGIC
+ self.splitter = CompletionSplitter()
+
+ # _greedy_changed() depends on splitter and readline being defined:
+ super().__init__(
+ namespace=namespace,
+ global_namespace=global_namespace,
+ config=config,
+ **kwargs,
+ )
+
+ # List where completion matches will be stored
+ self.matches = []
+ self.shell = shell
+ # Regexp to split filenames with spaces in them
+ self.space_name_re = re.compile(r'([^\\] )')
+ # Hold a local ref. to glob.glob for speed
+ self.glob = glob.glob
+
+ # Determine if we are running on 'dumb' terminals, like (X)Emacs
+ # buffers, to avoid completion problems.
+ term = os.environ.get('TERM','xterm')
+ self.dumb_terminal = term in ['dumb','emacs']
+
+ # Special handling of backslashes needed in win32 platforms
+ if sys.platform == "win32":
+ self.clean_glob = self._clean_glob_win32
+ else:
+ self.clean_glob = self._clean_glob
+
+ #regexp to parse docstring for function signature
+ self.docstring_sig_re = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
+ self.docstring_kwd_re = re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
+ #use this if positional argument name is also needed
+ #= re.compile(r'[\s|\[]*(\w+)(?:\s*=?\s*.*)')
+
+ self.magic_arg_matchers = [
+ self.magic_config_matcher,
+ self.magic_color_matcher,
+ ]
+
+ # This is set externally by InteractiveShell
+ self.custom_completers = None
+
+ # This is a list of names of unicode characters that can be completed
+ # into their corresponding unicode value. The list is large, so we
+ # lazily initialize it on first use. Consuming code should access this
+ # attribute through the `@unicode_names` property.
+ self._unicode_names = None
+
+ self._backslash_combining_matchers = [
+ self.latex_name_matcher,
+ self.unicode_name_matcher,
+ back_latex_name_matcher,
+ back_unicode_name_matcher,
+ self.fwd_unicode_matcher,
+ ]
+
+ if not self.backslash_combining_completions:
+ for matcher in self._backslash_combining_matchers:
+ self.disable_matchers.append(_get_matcher_id(matcher))
+
+ if not self.merge_completions:
+ self.suppress_competing_matchers = True
+
+ @property
+ def matchers(self) -> List[Matcher]:
+ """All active matcher routines for completion"""
+ if self.dict_keys_only:
+ return [self.dict_key_matcher]
+
+ if self.use_jedi:
+ return [
+ *self.custom_matchers,
+ *self._backslash_combining_matchers,
+ *self.magic_arg_matchers,
+ self.custom_completer_matcher,
+ self.magic_matcher,
+ self._jedi_matcher,
+ self.dict_key_matcher,
+ self.file_matcher,
+ ]
+ else:
+ return [
+ *self.custom_matchers,
+ *self._backslash_combining_matchers,
+ *self.magic_arg_matchers,
+ self.custom_completer_matcher,
+ self.dict_key_matcher,
+ # TODO: convert python_matches to v2 API
+ self.magic_matcher,
+ self.python_matches,
+ self.file_matcher,
+ self.python_func_kw_matcher,
+ ]
+
+ def all_completions(self, text:str) -> List[str]:
+ """
+ Wrapper around the completion methods for the benefit of emacs.
+ """
+ prefix = text.rpartition('.')[0]
+ with provisionalcompleter():
+ return ['.'.join([prefix, c.text]) if prefix and self.use_jedi else c.text
+ for c in self.completions(text, len(text))]
+
+ return self.complete(text)[1]
+
+ def _clean_glob(self, text:str):
+ return self.glob("%s*" % text)
+
+ def _clean_glob_win32(self, text:str):
+ return [f.replace("\\","/")
+ for f in self.glob("%s*" % text)]
+
+ @context_matcher()
+ def file_matcher(self, context: CompletionContext) -> SimpleMatcherResult:
+ """Same as :any:`file_matches`, but adopted to new Matcher API."""
+ matches = self.file_matches(context.token)
+ # TODO: add a heuristic for suppressing (e.g. if it has OS-specific delimiter,
+ # starts with `/home/`, `C:\`, etc)
+ return _convert_matcher_v1_result_to_v2(matches, type="path")
+
+ def file_matches(self, text: str) -> List[str]:
+ """Match filenames, expanding ~USER type strings.
+
+ Most of the seemingly convoluted logic in this completer is an
+ attempt to handle filenames with spaces in them. And yet it's not
+ quite perfect, because Python's readline doesn't expose all of the
+ GNU readline details needed for this to be done correctly.
+
+ For a filename with a space in it, the printed completions will be
+ only the parts after what's already been typed (instead of the
+ full completions, as is normally done). I don't think with the
+ current (as of Python 2.3) Python readline it's possible to do
+ better.
+
+ .. deprecated:: 8.6
+ You can use :meth:`file_matcher` instead.
+ """
+
+ # chars that require escaping with backslash - i.e. chars
+ # that readline treats incorrectly as delimiters, but we
+ # don't want to treat as delimiters in filename matching
+ # when escaped with backslash
+ if text.startswith('!'):
+ text = text[1:]
+ text_prefix = u'!'
+ else:
+ text_prefix = u''
+
+ text_until_cursor = self.text_until_cursor
+ # track strings with open quotes
+ open_quotes = has_open_quotes(text_until_cursor)
+
+ if '(' in text_until_cursor or '[' in text_until_cursor:
+ lsplit = text
+ else:
+ try:
+ # arg_split ~ shlex.split, but with unicode bugs fixed by us
+ lsplit = arg_split(text_until_cursor)[-1]
+ except ValueError:
+ # typically an unmatched ", or backslash without escaped char.
+ if open_quotes:
+ lsplit = text_until_cursor.split(open_quotes)[-1]
+ else:
+ return []
+ except IndexError:
+ # tab pressed on empty line
+ lsplit = ""
+
+ if not open_quotes and lsplit != protect_filename(lsplit):
+ # if protectables are found, do matching on the whole escaped name
+ has_protectables = True
+ text0,text = text,lsplit
+ else:
+ has_protectables = False
+ text = os.path.expanduser(text)
+
+ if text == "":
+ return [text_prefix + protect_filename(f) for f in self.glob("*")]
+
+ # Compute the matches from the filesystem
+ if sys.platform == 'win32':
+ m0 = self.clean_glob(text)
+ else:
+ m0 = self.clean_glob(text.replace('\\', ''))
+
+ if has_protectables:
+ # If we had protectables, we need to revert our changes to the
+ # beginning of filename so that we don't double-write the part
+ # of the filename we have so far
+ len_lsplit = len(lsplit)
+ matches = [text_prefix + text0 +
+ protect_filename(f[len_lsplit:]) for f in m0]
+ else:
+ if open_quotes:
+ # if we have a string with an open quote, we don't need to
+ # protect the names beyond the quote (and we _shouldn't_, as
+ # it would cause bugs when the filesystem call is made).
+ matches = m0 if sys.platform == "win32" else\
+ [protect_filename(f, open_quotes) for f in m0]
+ else:
+ matches = [text_prefix +
+ protect_filename(f) for f in m0]
+
+ # Mark directories in input list by appending '/' to their names.
+ return [x+'/' if os.path.isdir(x) else x for x in matches]
+
+ @context_matcher()
+ def magic_matcher(self, context: CompletionContext) -> SimpleMatcherResult:
+ """Match magics."""
+ text = context.token
+ matches = self.magic_matches(text)
+ result = _convert_matcher_v1_result_to_v2(matches, type="magic")
+ is_magic_prefix = len(text) > 0 and text[0] == "%"
+ result["suppress"] = is_magic_prefix and bool(result["completions"])
+ return result
+
+ def magic_matches(self, text: str):
+ """Match magics.
+
+ .. deprecated:: 8.6
+ You can use :meth:`magic_matcher` instead.
+ """
+ # Get all shell magics now rather than statically, so magics loaded at
+ # runtime show up too.
+ lsm = self.shell.magics_manager.lsmagic()
+ line_magics = lsm['line']
+ cell_magics = lsm['cell']
+ pre = self.magic_escape
+ pre2 = pre+pre
+
+ explicit_magic = text.startswith(pre)
+
+ # Completion logic:
+ # - user gives %%: only do cell magics
+ # - user gives %: do both line and cell magics
+ # - no prefix: do both
+ # In other words, line magics are skipped if the user gives %% explicitly
+ #
+ # We also exclude magics that match any currently visible names:
+ # https://github.com/ipython/ipython/issues/4877, unless the user has
+ # typed a %:
+ # https://github.com/ipython/ipython/issues/10754
+ bare_text = text.lstrip(pre)
+ global_matches = self.global_matches(bare_text)
+ if not explicit_magic:
+ def matches(magic):
+ """
+ Filter magics, in particular remove magics that match
+ a name present in global namespace.
+ """
+ return ( magic.startswith(bare_text) and
+ magic not in global_matches )
+ else:
+ def matches(magic):
+ return magic.startswith(bare_text)
+
+ comp = [ pre2+m for m in cell_magics if matches(m)]
+ if not text.startswith(pre2):
+ comp += [ pre+m for m in line_magics if matches(m)]
+
+ return comp
+
+ @context_matcher()
+ def magic_config_matcher(self, context: CompletionContext) -> SimpleMatcherResult:
+ """Match class names and attributes for %config magic."""
+ # NOTE: uses `line_buffer` equivalent for compatibility
+ matches = self.magic_config_matches(context.line_with_cursor)
+ return _convert_matcher_v1_result_to_v2(matches, type="param")
+
+ def magic_config_matches(self, text: str) -> List[str]:
+ """Match class names and attributes for %config magic.
+
+ .. deprecated:: 8.6
+ You can use :meth:`magic_config_matcher` instead.
+ """
+ texts = text.strip().split()
+
+ if len(texts) > 0 and (texts[0] == 'config' or texts[0] == '%config'):
+ # get all configuration classes
+ classes = sorted(set([ c for c in self.shell.configurables
+ if c.__class__.class_traits(config=True)
+ ]), key=lambda x: x.__class__.__name__)
+ classnames = [ c.__class__.__name__ for c in classes ]
+
+ # return all classnames if config or %config is given
+ if len(texts) == 1:
+ return classnames
+
+ # match classname
+ classname_texts = texts[1].split('.')
+ classname = classname_texts[0]
+ classname_matches = [ c for c in classnames
+ if c.startswith(classname) ]
+
+ # return matched classes or the matched class with attributes
+ if texts[1].find('.') < 0:
+ return classname_matches
+ elif len(classname_matches) == 1 and \
+ classname_matches[0] == classname:
+ cls = classes[classnames.index(classname)].__class__
+ help = cls.class_get_help()
+ # strip leading '--' from cl-args:
+ help = re.sub(re.compile(r'^--', re.MULTILINE), '', help)
+ return [ attr.split('=')[0]
+ for attr in help.strip().splitlines()
+ if attr.startswith(texts[1]) ]
+ return []
+
+ @context_matcher()
+ def magic_color_matcher(self, context: CompletionContext) -> SimpleMatcherResult:
+ """Match color schemes for %colors magic."""
+ # NOTE: uses `line_buffer` equivalent for compatibility
+ matches = self.magic_color_matches(context.line_with_cursor)
+ return _convert_matcher_v1_result_to_v2(matches, type="param")
+
+ def magic_color_matches(self, text: str) -> List[str]:
+ """Match color schemes for %colors magic.
+
+ .. deprecated:: 8.6
+ You can use :meth:`magic_color_matcher` instead.
+ """
+ texts = text.split()
+ if text.endswith(' '):
+ # .split() strips off the trailing whitespace. Add '' back
+ # so that: '%colors ' -> ['%colors', '']
+ texts.append('')
+
+ if len(texts) == 2 and (texts[0] == 'colors' or texts[0] == '%colors'):
+ prefix = texts[1]
+ return [ color for color in InspectColors.keys()
+ if color.startswith(prefix) ]
+ return []
+
+ @context_matcher(identifier="IPCompleter.jedi_matcher")
+ def _jedi_matcher(self, context: CompletionContext) -> _JediMatcherResult:
+ matches = self._jedi_matches(
+ cursor_column=context.cursor_position,
+ cursor_line=context.cursor_line,
+ text=context.full_text,
+ )
+ return {
+ "completions": matches,
+ # static analysis should not suppress other matchers
+ "suppress": False,
+ }
+
+ def _jedi_matches(
+ self, cursor_column: int, cursor_line: int, text: str
+ ) -> Iterator[_JediCompletionLike]:
+ """
+ Return a list of :any:`jedi.api.Completion`s object from a ``text`` and
+ cursor position.
+
+ Parameters
+ ----------
+ cursor_column : int
+ column position of the cursor in ``text``, 0-indexed.
+ cursor_line : int
+ line position of the cursor in ``text``, 0-indexed
+ text : str
+ text to complete
+
+ Notes
+ -----
+ If ``IPCompleter.debug`` is ``True`` may return a :any:`_FakeJediCompletion`
+ object containing a string with the Jedi debug information attached.
+
+ .. deprecated:: 8.6
+ You can use :meth:`_jedi_matcher` instead.
+ """
+ namespaces = [self.namespace]
+ if self.global_namespace is not None:
+ namespaces.append(self.global_namespace)
+
+ completion_filter = lambda x:x
+ offset = cursor_to_position(text, cursor_line, cursor_column)
+ # filter output if we are completing for object members
+ if offset:
+ pre = text[offset-1]
+ if pre == '.':
+ if self.omit__names == 2:
+ completion_filter = lambda c:not c.name.startswith('_')
+ elif self.omit__names == 1:
+ completion_filter = lambda c:not (c.name.startswith('__') and c.name.endswith('__'))
+ elif self.omit__names == 0:
+ completion_filter = lambda x:x
+ else:
+ raise ValueError("Don't understand self.omit__names == {}".format(self.omit__names))
+
+ interpreter = jedi.Interpreter(text[:offset], namespaces, column=cursor_column, line=cursor_line + 1)
+ try_jedi = True
+
+ try:
+ # find the first token in the current tree -- if it is a ' or " then we are in a string
+ completing_string = False
+ try:
+ first_child = next(c for c in interpreter._get_module().tree_node.children if hasattr(c, 'value'))
+ except StopIteration:
+ pass
+ else:
+ # note the value may be ', ", or it may also be ''' or """, or
+ # in some cases, """what/you/typed..., but all of these are
+ # strings.
+ completing_string = len(first_child.value) > 0 and first_child.value[0] in {"'", '"'}
+
+ # if we are in a string jedi is likely not the right candidate for
+ # now. Skip it.
+ try_jedi = not completing_string
+ except Exception as e:
+ # many of things can go wrong, we are using private API just don't crash.
+ if self.debug:
+ print("Error detecting if completing a non-finished string :", e, '|')
+
+ if not try_jedi:
+ return iter([])
+ try:
+ return filter(completion_filter, interpreter.completions())
+ except Exception as e:
+ if self.debug:
+ return iter(
+ [
+ _FakeJediCompletion(
+ 'Oops Jedi has crashed, please report a bug with the following:\n"""\n%s\ns"""'
+ % (e)
+ )
+ ]
+ )
+ else:
+ return iter([])
+
+ @completion_matcher(api_version=1)
+ def python_matches(self, text: str) -> Iterable[str]:
+ """Match attributes or global python names"""
+ if "." in text:
+ try:
+ matches = self.attr_matches(text)
+ if text.endswith('.') and self.omit__names:
+ if self.omit__names == 1:
+ # true if txt is _not_ a __ name, false otherwise:
+ no__name = (lambda txt:
+ re.match(r'.*\.__.*?__',txt) is None)
+ else:
+ # true if txt is _not_ a _ name, false otherwise:
+ no__name = (lambda txt:
+ re.match(r'\._.*?',txt[txt.rindex('.'):]) is None)
+ matches = filter(no__name, matches)
+ except NameError:
+ # catches <undefined attributes>.<tab>
+ matches = []
+ else:
+ matches = self.global_matches(text)
+ return matches
+
+ def _default_arguments_from_docstring(self, doc):
+ """Parse the first line of docstring for call signature.
+
+ Docstring should be of the form 'min(iterable[, key=func])\n'.
+ It can also parse cython docstring of the form
+ 'Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)'.
+ """
+ if doc is None:
+ return []
+
+ #care only the firstline
+ line = doc.lstrip().splitlines()[0]
+
+ #p = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
+ #'min(iterable[, key=func])\n' -> 'iterable[, key=func]'
+ sig = self.docstring_sig_re.search(line)
+ if sig is None:
+ return []
+ # iterable[, key=func]' -> ['iterable[' ,' key=func]']
+ sig = sig.groups()[0].split(',')
+ ret = []
+ for s in sig:
+ #re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
+ ret += self.docstring_kwd_re.findall(s)
+ return ret
+
+ def _default_arguments(self, obj):
+ """Return the list of default arguments of obj if it is callable,
+ or empty list otherwise."""
+ call_obj = obj
+ ret = []
+ if inspect.isbuiltin(obj):
+ pass
+ elif not (inspect.isfunction(obj) or inspect.ismethod(obj)):
+ if inspect.isclass(obj):
+ #for cython embedsignature=True the constructor docstring
+ #belongs to the object itself not __init__
+ ret += self._default_arguments_from_docstring(
+ getattr(obj, '__doc__', ''))
+ # for classes, check for __init__,__new__
+ call_obj = (getattr(obj, '__init__', None) or
+ getattr(obj, '__new__', None))
+ # for all others, check if they are __call__able
+ elif hasattr(obj, '__call__'):
+ call_obj = obj.__call__
+ ret += self._default_arguments_from_docstring(
+ getattr(call_obj, '__doc__', ''))
+
+ _keeps = (inspect.Parameter.KEYWORD_ONLY,
+ inspect.Parameter.POSITIONAL_OR_KEYWORD)
+
+ try:
+ sig = inspect.signature(obj)
+ ret.extend(k for k, v in sig.parameters.items() if
+ v.kind in _keeps)
+ except ValueError:
+ pass
+
+ return list(set(ret))
+
+ @context_matcher()
+ def python_func_kw_matcher(self, context: CompletionContext) -> SimpleMatcherResult:
+ """Match named parameters (kwargs) of the last open function."""
+ matches = self.python_func_kw_matches(context.token)
+ return _convert_matcher_v1_result_to_v2(matches, type="param")
+
+ def python_func_kw_matches(self, text):
+ """Match named parameters (kwargs) of the last open function.
+
+ .. deprecated:: 8.6
+ You can use :meth:`python_func_kw_matcher` instead.
+ """
+
+ if "." in text: # a parameter cannot be dotted
+ return []
+ try: regexp = self.__funcParamsRegex
+ except AttributeError:
+ regexp = self.__funcParamsRegex = re.compile(r'''
+ '.*?(?<!\\)' | # single quoted strings or
+ ".*?(?<!\\)" | # double quoted strings or
+ \w+ | # identifier
+ \S # other characters
+ ''', re.VERBOSE | re.DOTALL)
+ # 1. find the nearest identifier that comes before an unclosed
+ # parenthesis before the cursor
+ # e.g. for "foo (1+bar(x), pa<cursor>,a=1)", the candidate is "foo"
+ tokens = regexp.findall(self.text_until_cursor)
+ iterTokens = reversed(tokens); openPar = 0
+
+ for token in iterTokens:
+ if token == ')':
+ openPar -= 1
+ elif token == '(':
+ openPar += 1
+ if openPar > 0:
+ # found the last unclosed parenthesis
+ break
+ else:
+ return []
+ # 2. Concatenate dotted names ("foo.bar" for "foo.bar(x, pa" )
+ ids = []
+ isId = re.compile(r'\w+$').match
+
+ while True:
+ try:
+ ids.append(next(iterTokens))
+ if not isId(ids[-1]):
+ ids.pop(); break
+ if not next(iterTokens) == '.':
+ break
+ except StopIteration:
+ break
+
+ # Find all named arguments already assigned to, as to avoid suggesting
+ # them again
+ usedNamedArgs = set()
+ par_level = -1
+ for token, next_token in zip(tokens, tokens[1:]):
+ if token == '(':
+ par_level += 1
+ elif token == ')':
+ par_level -= 1
+
+ if par_level != 0:
+ continue
+
+ if next_token != '=':
+ continue
+
+ usedNamedArgs.add(token)
+
+ argMatches = []
+ try:
+ callableObj = '.'.join(ids[::-1])
+ namedArgs = self._default_arguments(eval(callableObj,
+ self.namespace))
+
+ # Remove used named arguments from the list, no need to show twice
+ for namedArg in set(namedArgs) - usedNamedArgs:
+ if namedArg.startswith(text):
+ argMatches.append("%s=" %namedArg)
+ except:
+ pass
+
+ return argMatches
+
+ @staticmethod
+ def _get_keys(obj: Any) -> List[Any]:
+ # Objects can define their own completions by defining an
+ # _ipy_key_completions_() method.
+ method = get_real_method(obj, '_ipython_key_completions_')
+ if method is not None:
+ return method()
+
+ # Special case some common in-memory dict-like types
+ if isinstance(obj, dict) or _safe_isinstance(obj, "pandas", "DataFrame"):
+ try:
+ return list(obj.keys())
+ except Exception:
+ return []
+ elif _safe_isinstance(obj, "pandas", "core", "indexing", "_LocIndexer"):
+ try:
+ return list(obj.obj.keys())
+ except Exception:
+ return []
+ elif _safe_isinstance(obj, 'numpy', 'ndarray') or\
+ _safe_isinstance(obj, 'numpy', 'void'):
+ return obj.dtype.names or []
+ return []
+
+ @context_matcher()
+ def dict_key_matcher(self, context: CompletionContext) -> SimpleMatcherResult:
+ """Match string keys in a dictionary, after e.g. ``foo[``."""
+ matches = self.dict_key_matches(context.token)
+ return _convert_matcher_v1_result_to_v2(
+ matches, type="dict key", suppress_if_matches=True
+ )
+
+ def dict_key_matches(self, text: str) -> List[str]:
+ """Match string keys in a dictionary, after e.g. ``foo[``.
+
+ .. deprecated:: 8.6
+ You can use :meth:`dict_key_matcher` instead.
+ """
+
+ # Short-circuit on closed dictionary (regular expression would
+ # not match anyway, but would take quite a while).
+ if self.text_until_cursor.strip().endswith("]"):
+ return []
+
+ match = DICT_MATCHER_REGEX.search(self.text_until_cursor)
+
+ if match is None:
+ return []
+
+ expr, prior_tuple_keys, key_prefix = match.groups()
+
+ obj = self._evaluate_expr(expr)
+
+ if obj is not_found:
+ return []
+
+ keys = self._get_keys(obj)
+ if not keys:
+ return keys
+
+ tuple_prefix = guarded_eval(
+ prior_tuple_keys,
+ EvaluationContext(
+ globals=self.global_namespace,
+ locals=self.namespace,
+ evaluation=self.evaluation,
+ in_subscript=True,
+ ),
+ )
+
+ closing_quote, token_offset, matches = match_dict_keys(
+ keys, key_prefix, self.splitter.delims, extra_prefix=tuple_prefix
+ )
+ if not matches:
+ return []
+
+ # get the cursor position of
+ # - the text being completed
+ # - the start of the key text
+ # - the start of the completion
+ text_start = len(self.text_until_cursor) - len(text)
+ if key_prefix:
+ key_start = match.start(3)
+ completion_start = key_start + token_offset
+ else:
+ key_start = completion_start = match.end()
+
+ # grab the leading prefix, to make sure all completions start with `text`
+ if text_start > key_start:
+ leading = ''
+ else:
+ leading = text[text_start:completion_start]
+
+ # append closing quote and bracket as appropriate
+ # this is *not* appropriate if the opening quote or bracket is outside
+ # the text given to this method, e.g. `d["""a\nt
+ can_close_quote = False
+ can_close_bracket = False
+
+ continuation = self.line_buffer[len(self.text_until_cursor) :].strip()
+
+ if continuation.startswith(closing_quote):
+ # do not close if already closed, e.g. `d['a<tab>'`
+ continuation = continuation[len(closing_quote) :]
+ else:
+ can_close_quote = True
+
+ continuation = continuation.strip()
+
+ # e.g. `pandas.DataFrame` has different tuple indexer behaviour,
+ # handling it is out of scope, so let's avoid appending suffixes.
+ has_known_tuple_handling = isinstance(obj, dict)
+
+ can_close_bracket = (
+ not continuation.startswith("]") and self.auto_close_dict_keys
+ )
+ can_close_tuple_item = (
+ not continuation.startswith(",")
+ and has_known_tuple_handling
+ and self.auto_close_dict_keys
+ )
+ can_close_quote = can_close_quote and self.auto_close_dict_keys
+
+ # fast path if closing qoute should be appended but not suffix is allowed
+ if not can_close_quote and not can_close_bracket and closing_quote:
+ return [leading + k for k in matches]
+
+ results = []
+
+ end_of_tuple_or_item = _DictKeyState.END_OF_TUPLE | _DictKeyState.END_OF_ITEM
+
+ for k, state_flag in matches.items():
+ result = leading + k
+ if can_close_quote and closing_quote:
+ result += closing_quote
+
+ if state_flag == end_of_tuple_or_item:
+ # We do not know which suffix to add,
+ # e.g. both tuple item and string
+ # match this item.
+ pass
+
+ if state_flag in end_of_tuple_or_item and can_close_bracket:
+ result += "]"
+ if state_flag == _DictKeyState.IN_TUPLE and can_close_tuple_item:
+ result += ", "
+ results.append(result)
+ return results
+
+ @context_matcher()
+ def unicode_name_matcher(self, context: CompletionContext):
+ """Same as :any:`unicode_name_matches`, but adopted to new Matcher API."""
+ fragment, matches = self.unicode_name_matches(context.text_until_cursor)
+ return _convert_matcher_v1_result_to_v2(
+ matches, type="unicode", fragment=fragment, suppress_if_matches=True
+ )
+
+ @staticmethod
+ def unicode_name_matches(text: str) -> Tuple[str, List[str]]:
+ """Match Latex-like syntax for unicode characters base
+ on the name of the character.
+
+ This does ``\\GREEK SMALL LETTER ETA`` -> ``η``
+
+ Works only on valid python 3 identifier, or on combining characters that
+ will combine to form a valid identifier.
+ """
+ slashpos = text.rfind('\\')
+ if slashpos > -1:
+ s = text[slashpos+1:]
+ try :
+ unic = unicodedata.lookup(s)
+ # allow combining chars
+ if ('a'+unic).isidentifier():
+ return '\\'+s,[unic]
+ except KeyError:
+ pass
+ return '', []
+
+ @context_matcher()
+ def latex_name_matcher(self, context: CompletionContext):
+ """Match Latex syntax for unicode characters.
+
+ This does both ``\\alp`` -> ``\\alpha`` and ``\\alpha`` -> ``α``
+ """
+ fragment, matches = self.latex_matches(context.text_until_cursor)
+ return _convert_matcher_v1_result_to_v2(
+ matches, type="latex", fragment=fragment, suppress_if_matches=True
+ )
+
+ def latex_matches(self, text: str) -> Tuple[str, Sequence[str]]:
+ """Match Latex syntax for unicode characters.
+
+ This does both ``\\alp`` -> ``\\alpha`` and ``\\alpha`` -> ``α``
+
+ .. deprecated:: 8.6
+ You can use :meth:`latex_name_matcher` instead.
+ """
+ slashpos = text.rfind('\\')
+ if slashpos > -1:
+ s = text[slashpos:]
+ if s in latex_symbols:
+ # Try to complete a full latex symbol to unicode
+ # \\alpha -> α
+ return s, [latex_symbols[s]]
+ else:
+ # If a user has partially typed a latex symbol, give them
+ # a full list of options \al -> [\aleph, \alpha]
+ matches = [k for k in latex_symbols if k.startswith(s)]
+ if matches:
+ return s, matches
+ return '', ()
+
+ @context_matcher()
+ def custom_completer_matcher(self, context):
+ """Dispatch custom completer.
+
+ If a match is found, suppresses all other matchers except for Jedi.
+ """
+ matches = self.dispatch_custom_completer(context.token) or []
+ result = _convert_matcher_v1_result_to_v2(
+ matches, type=_UNKNOWN_TYPE, suppress_if_matches=True
+ )
+ result["ordered"] = True
+ result["do_not_suppress"] = {_get_matcher_id(self._jedi_matcher)}
+ return result
+
+ def dispatch_custom_completer(self, text):
+ """
+ .. deprecated:: 8.6
+ You can use :meth:`custom_completer_matcher` instead.
+ """
+ if not self.custom_completers:
+ return
+
+ line = self.line_buffer
+ if not line.strip():
+ return None
+
+ # Create a little structure to pass all the relevant information about
+ # the current completion to any custom completer.
+ event = SimpleNamespace()
+ event.line = line
+ event.symbol = text
+ cmd = line.split(None,1)[0]
+ event.command = cmd
+ event.text_until_cursor = self.text_until_cursor
+
+ # for foo etc, try also to find completer for %foo
+ if not cmd.startswith(self.magic_escape):
+ try_magic = self.custom_completers.s_matches(
+ self.magic_escape + cmd)
+ else:
+ try_magic = []
+
+ for c in itertools.chain(self.custom_completers.s_matches(cmd),
+ try_magic,
+ self.custom_completers.flat_matches(self.text_until_cursor)):
+ try:
+ res = c(event)
+ if res:
+ # first, try case sensitive match
+ withcase = [r for r in res if r.startswith(text)]
+ if withcase:
+ return withcase
+ # if none, then case insensitive ones are ok too
+ text_low = text.lower()
+ return [r for r in res if r.lower().startswith(text_low)]
+ except TryNext:
+ pass
+ except KeyboardInterrupt:
+ """
+ If custom completer take too long,
+ let keyboard interrupt abort and return nothing.
+ """
+ break
+
+ return None
+
+ def completions(self, text: str, offset: int)->Iterator[Completion]:
+ """
+ Returns an iterator over the possible completions
+
+ .. warning::
+
+ Unstable
+
+ This function is unstable, API may change without warning.
+ It will also raise unless use in proper context manager.
+
+ Parameters
+ ----------
+ text : str
+ Full text of the current input, multi line string.
+ offset : int
+ Integer representing the position of the cursor in ``text``. Offset
+ is 0-based indexed.
+
+ Yields
+ ------
+ Completion
+
+ Notes
+ -----
+ The cursor on a text can either be seen as being "in between"
+ characters or "On" a character depending on the interface visible to
+ the user. For consistency the cursor being on "in between" characters X
+ and Y is equivalent to the cursor being "on" character Y, that is to say
+ the character the cursor is on is considered as being after the cursor.
+
+ Combining characters may span more that one position in the
+ text.
+
+ .. note::
+
+ If ``IPCompleter.debug`` is :any:`True` will yield a ``--jedi/ipython--``
+ fake Completion token to distinguish completion returned by Jedi
+ and usual IPython completion.
+
+ .. note::
+
+ Completions are not completely deduplicated yet. If identical
+ completions are coming from different sources this function does not
+ ensure that each completion object will only be present once.
+ """
+ warnings.warn("_complete is a provisional API (as of IPython 6.0). "
+ "It may change without warnings. "
+ "Use in corresponding context manager.",
+ category=ProvisionalCompleterWarning, stacklevel=2)
+
+ seen = set()
+ profiler:Optional[cProfile.Profile]
+ try:
+ if self.profile_completions:
+ import cProfile
+ profiler = cProfile.Profile()
+ profiler.enable()
+ else:
+ profiler = None
+
+ for c in self._completions(text, offset, _timeout=self.jedi_compute_type_timeout/1000):
+ if c and (c in seen):
+ continue
+ yield c
+ seen.add(c)
+ except KeyboardInterrupt:
+ """if completions take too long and users send keyboard interrupt,
+ do not crash and return ASAP. """
+ pass
+ finally:
+ if profiler is not None:
+ profiler.disable()
+ ensure_dir_exists(self.profiler_output_dir)
+ output_path = os.path.join(self.profiler_output_dir, str(uuid.uuid4()))
+ print("Writing profiler output to", output_path)
+ profiler.dump_stats(output_path)
+
+ def _completions(self, full_text: str, offset: int, *, _timeout) -> Iterator[Completion]:
+ """
+ Core completion module.Same signature as :any:`completions`, with the
+ extra `timeout` parameter (in seconds).
+
+ Computing jedi's completion ``.type`` can be quite expensive (it is a
+ lazy property) and can require some warm-up, more warm up than just
+ computing the ``name`` of a completion. The warm-up can be :
+
+ - Long warm-up the first time a module is encountered after
+ install/update: actually build parse/inference tree.
+
+ - first time the module is encountered in a session: load tree from
+ disk.
+
+ We don't want to block completions for tens of seconds so we give the
+ completer a "budget" of ``_timeout`` seconds per invocation to compute
+ completions types, the completions that have not yet been computed will
+ be marked as "unknown" an will have a chance to be computed next round
+ are things get cached.
+
+ Keep in mind that Jedi is not the only thing treating the completion so
+ keep the timeout short-ish as if we take more than 0.3 second we still
+ have lots of processing to do.
+
+ """
+ deadline = time.monotonic() + _timeout
+
+ before = full_text[:offset]
+ cursor_line, cursor_column = position_to_cursor(full_text, offset)
+
+ jedi_matcher_id = _get_matcher_id(self._jedi_matcher)
+
+ def is_non_jedi_result(
+ result: MatcherResult, identifier: str
+ ) -> TypeGuard[SimpleMatcherResult]:
+ return identifier != jedi_matcher_id
+
+ results = self._complete(
+ full_text=full_text, cursor_line=cursor_line, cursor_pos=cursor_column
+ )
+
+ non_jedi_results: Dict[str, SimpleMatcherResult] = {
+ identifier: result
+ for identifier, result in results.items()
+ if is_non_jedi_result(result, identifier)
+ }
+
+ jedi_matches = (
+ cast(_JediMatcherResult, results[jedi_matcher_id])["completions"]
+ if jedi_matcher_id in results
+ else ()
+ )
+
+ iter_jm = iter(jedi_matches)
+ if _timeout:
+ for jm in iter_jm:
+ try:
+ type_ = jm.type
+ except Exception:
+ if self.debug:
+ print("Error in Jedi getting type of ", jm)
+ type_ = None
+ delta = len(jm.name_with_symbols) - len(jm.complete)
+ if type_ == 'function':
+ signature = _make_signature(jm)
+ else:
+ signature = ''
+ yield Completion(start=offset - delta,
+ end=offset,
+ text=jm.name_with_symbols,
+ type=type_,
+ signature=signature,
+ _origin='jedi')
+
+ if time.monotonic() > deadline:
+ break
+
+ for jm in iter_jm:
+ delta = len(jm.name_with_symbols) - len(jm.complete)
+ yield Completion(
+ start=offset - delta,
+ end=offset,
+ text=jm.name_with_symbols,
+ type=_UNKNOWN_TYPE, # don't compute type for speed
+ _origin="jedi",
+ signature="",
+ )
+
+ # TODO:
+ # Suppress this, right now just for debug.
+ if jedi_matches and non_jedi_results and self.debug:
+ some_start_offset = before.rfind(
+ next(iter(non_jedi_results.values()))["matched_fragment"]
+ )
+ yield Completion(
+ start=some_start_offset,
+ end=offset,
+ text="--jedi/ipython--",
+ _origin="debug",
+ type="none",
+ signature="",
+ )
+
+ ordered: List[Completion] = []
+ sortable: List[Completion] = []
+
+ for origin, result in non_jedi_results.items():
+ matched_text = result["matched_fragment"]
+ start_offset = before.rfind(matched_text)
+ is_ordered = result.get("ordered", False)
+ container = ordered if is_ordered else sortable
+
+ # I'm unsure if this is always true, so let's assert and see if it
+ # crash
+ assert before.endswith(matched_text)
+
+ for simple_completion in result["completions"]:
+ completion = Completion(
+ start=start_offset,
+ end=offset,
+ text=simple_completion.text,
+ _origin=origin,
+ signature="",
+ type=simple_completion.type or _UNKNOWN_TYPE,
+ )
+ container.append(completion)
+
+ yield from list(self._deduplicate(ordered + self._sort(sortable)))[
+ :MATCHES_LIMIT
+ ]
+
+ def complete(self, text=None, line_buffer=None, cursor_pos=None) -> Tuple[str, Sequence[str]]:
+ """Find completions for the given text and line context.
+
+ Note that both the text and the line_buffer are optional, but at least
+ one of them must be given.
+
+ Parameters
+ ----------
+ text : string, optional
+ Text to perform the completion on. If not given, the line buffer
+ is split using the instance's CompletionSplitter object.
+ line_buffer : string, optional
+ If not given, the completer attempts to obtain the current line
+ buffer via readline. This keyword allows clients which are
+ requesting for text completions in non-readline contexts to inform
+ the completer of the entire text.
+ cursor_pos : int, optional
+ Index of the cursor in the full line buffer. Should be provided by
+ remote frontends where kernel has no access to frontend state.
+
+ Returns
+ -------
+ Tuple of two items:
+ text : str
+ Text that was actually used in the completion.
+ matches : list
+ A list of completion matches.
+
+ Notes
+ -----
+ This API is likely to be deprecated and replaced by
+ :any:`IPCompleter.completions` in the future.
+
+ """
+ warnings.warn('`Completer.complete` is pending deprecation since '
+ 'IPython 6.0 and will be replaced by `Completer.completions`.',
+ PendingDeprecationWarning)
+ # potential todo, FOLD the 3rd throw away argument of _complete
+ # into the first 2 one.
+ # TODO: Q: does the above refer to jedi completions (i.e. 0-indexed?)
+ # TODO: should we deprecate now, or does it stay?
+
+ results = self._complete(
+ line_buffer=line_buffer, cursor_pos=cursor_pos, text=text, cursor_line=0
+ )
+
+ jedi_matcher_id = _get_matcher_id(self._jedi_matcher)
+
+ return self._arrange_and_extract(
+ results,
+ # TODO: can we confirm that excluding Jedi here was a deliberate choice in previous version?
+ skip_matchers={jedi_matcher_id},
+ # this API does not support different start/end positions (fragments of token).
+ abort_if_offset_changes=True,
+ )
+
+ def _arrange_and_extract(
+ self,
+ results: Dict[str, MatcherResult],
+ skip_matchers: Set[str],
+ abort_if_offset_changes: bool,
+ ):
+ sortable: List[AnyMatcherCompletion] = []
+ ordered: List[AnyMatcherCompletion] = []
+ most_recent_fragment = None
+ for identifier, result in results.items():
+ if identifier in skip_matchers:
+ continue
+ if not result["completions"]:
+ continue
+ if not most_recent_fragment:
+ most_recent_fragment = result["matched_fragment"]
+ if (
+ abort_if_offset_changes
+ and result["matched_fragment"] != most_recent_fragment
+ ):
+ break
+ if result.get("ordered", False):
+ ordered.extend(result["completions"])
+ else:
+ sortable.extend(result["completions"])
+
+ if not most_recent_fragment:
+ most_recent_fragment = "" # to satisfy typechecker (and just in case)
+
+ return most_recent_fragment, [
+ m.text for m in self._deduplicate(ordered + self._sort(sortable))
+ ]
+
+ def _complete(self, *, cursor_line, cursor_pos, line_buffer=None, text=None,
+ full_text=None) -> _CompleteResult:
+ """
+ Like complete but can also returns raw jedi completions as well as the
+ origin of the completion text. This could (and should) be made much
+ cleaner but that will be simpler once we drop the old (and stateful)
+ :any:`complete` API.
+
+ With current provisional API, cursor_pos act both (depending on the
+ caller) as the offset in the ``text`` or ``line_buffer``, or as the
+ ``column`` when passing multiline strings this could/should be renamed
+ but would add extra noise.
+
+ Parameters
+ ----------
+ cursor_line
+ Index of the line the cursor is on. 0 indexed.
+ cursor_pos
+ Position of the cursor in the current line/line_buffer/text. 0
+ indexed.
+ line_buffer : optional, str
+ The current line the cursor is in, this is mostly due to legacy
+ reason that readline could only give a us the single current line.
+ Prefer `full_text`.
+ text : str
+ The current "token" the cursor is in, mostly also for historical
+ reasons. as the completer would trigger only after the current line
+ was parsed.
+ full_text : str
+ Full text of the current cell.
+
+ Returns
+ -------
+ An ordered dictionary where keys are identifiers of completion
+ matchers and values are ``MatcherResult``s.
+ """
+
+ # if the cursor position isn't given, the only sane assumption we can
+ # make is that it's at the end of the line (the common case)
+ if cursor_pos is None:
+ cursor_pos = len(line_buffer) if text is None else len(text)
+
+ if self.use_main_ns:
+ self.namespace = __main__.__dict__
+
+ # if text is either None or an empty string, rely on the line buffer
+ if (not line_buffer) and full_text:
+ line_buffer = full_text.split('\n')[cursor_line]
+ if not text: # issue #11508: check line_buffer before calling split_line
+ text = (
+ self.splitter.split_line(line_buffer, cursor_pos) if line_buffer else ""
+ )
+
+ # If no line buffer is given, assume the input text is all there was
+ if line_buffer is None:
+ line_buffer = text
+
+ # deprecated - do not use `line_buffer` in new code.
+ self.line_buffer = line_buffer
+ self.text_until_cursor = self.line_buffer[:cursor_pos]
+
+ if not full_text:
+ full_text = line_buffer
+
+ context = CompletionContext(
+ full_text=full_text,
+ cursor_position=cursor_pos,
+ cursor_line=cursor_line,
+ token=text,
+ limit=MATCHES_LIMIT,
+ )
+
+ # Start with a clean slate of completions
+ results: Dict[str, MatcherResult] = {}
+
+ jedi_matcher_id = _get_matcher_id(self._jedi_matcher)
+
+ suppressed_matchers: Set[str] = set()
+
+ matchers = {
+ _get_matcher_id(matcher): matcher
+ for matcher in sorted(
+ self.matchers, key=_get_matcher_priority, reverse=True
+ )
+ }
+
+ for matcher_id, matcher in matchers.items():
+ matcher_id = _get_matcher_id(matcher)
+
+ if matcher_id in self.disable_matchers:
+ continue
+
+ if matcher_id in results:
+ warnings.warn(f"Duplicate matcher ID: {matcher_id}.")
+
+ if matcher_id in suppressed_matchers:
+ continue
+
+ result: MatcherResult
+ try:
+ if _is_matcher_v1(matcher):
+ result = _convert_matcher_v1_result_to_v2(
+ matcher(text), type=_UNKNOWN_TYPE
+ )
+ elif _is_matcher_v2(matcher):
+ result = matcher(context)
+ else:
+ api_version = _get_matcher_api_version(matcher)
+ raise ValueError(f"Unsupported API version {api_version}")
+ except:
+ # Show the ugly traceback if the matcher causes an
+ # exception, but do NOT crash the kernel!
+ sys.excepthook(*sys.exc_info())
+ continue
+
+ # set default value for matched fragment if suffix was not selected.
+ result["matched_fragment"] = result.get("matched_fragment", context.token)
+
+ if not suppressed_matchers:
+ suppression_recommended: Union[bool, Set[str]] = result.get(
+ "suppress", False
+ )
+
+ suppression_config = (
+ self.suppress_competing_matchers.get(matcher_id, None)
+ if isinstance(self.suppress_competing_matchers, dict)
+ else self.suppress_competing_matchers
+ )
+ should_suppress = (
+ (suppression_config is True)
+ or (suppression_recommended and (suppression_config is not False))
+ ) and has_any_completions(result)
+
+ if should_suppress:
+ suppression_exceptions: Set[str] = result.get(
+ "do_not_suppress", set()
+ )
+ if isinstance(suppression_recommended, Iterable):
+ to_suppress = set(suppression_recommended)
+ else:
+ to_suppress = set(matchers)
+ suppressed_matchers = to_suppress - suppression_exceptions
+
+ new_results = {}
+ for previous_matcher_id, previous_result in results.items():
+ if previous_matcher_id not in suppressed_matchers:
+ new_results[previous_matcher_id] = previous_result
+ results = new_results
+
+ results[matcher_id] = result
+
+ _, matches = self._arrange_and_extract(
+ results,
+ # TODO Jedi completions non included in legacy stateful API; was this deliberate or omission?
+ # if it was omission, we can remove the filtering step, otherwise remove this comment.
+ skip_matchers={jedi_matcher_id},
+ abort_if_offset_changes=False,
+ )
+
+ # populate legacy stateful API
+ self.matches = matches
+
+ return results
+
+ @staticmethod
+ def _deduplicate(
+ matches: Sequence[AnyCompletion],
+ ) -> Iterable[AnyCompletion]:
+ filtered_matches: Dict[str, AnyCompletion] = {}
+ for match in matches:
+ text = match.text
+ if (
+ text not in filtered_matches
+ or filtered_matches[text].type == _UNKNOWN_TYPE
+ ):
+ filtered_matches[text] = match
+
+ return filtered_matches.values()
+
+ @staticmethod
+ def _sort(matches: Sequence[AnyCompletion]):
+ return sorted(matches, key=lambda x: completions_sorting_key(x.text))
+
+ @context_matcher()
+ def fwd_unicode_matcher(self, context: CompletionContext):
+ """Same as :any:`fwd_unicode_match`, but adopted to new Matcher API."""
+ # TODO: use `context.limit` to terminate early once we matched the maximum
+ # number that will be used downstream; can be added as an optional to
+ # `fwd_unicode_match(text: str, limit: int = None)` or we could re-implement here.
+ fragment, matches = self.fwd_unicode_match(context.text_until_cursor)
+ return _convert_matcher_v1_result_to_v2(
+ matches, type="unicode", fragment=fragment, suppress_if_matches=True
+ )
+
+ def fwd_unicode_match(self, text: str) -> Tuple[str, Sequence[str]]:
+ """
+ Forward match a string starting with a backslash with a list of
+ potential Unicode completions.
+
+ Will compute list of Unicode character names on first call and cache it.
+
+ .. deprecated:: 8.6
+ You can use :meth:`fwd_unicode_matcher` instead.
+
+ Returns
+ -------
+ At tuple with:
+ - matched text (empty if no matches)
+ - list of potential completions, empty tuple otherwise)
+ """
+ # TODO: self.unicode_names is here a list we traverse each time with ~100k elements.
+ # We could do a faster match using a Trie.
+
+ # Using pygtrie the following seem to work:
+
+ # s = PrefixSet()
+
+ # for c in range(0,0x10FFFF + 1):
+ # try:
+ # s.add(unicodedata.name(chr(c)))
+ # except ValueError:
+ # pass
+ # [''.join(k) for k in s.iter(prefix)]
+
+ # But need to be timed and adds an extra dependency.
+
+ slashpos = text.rfind('\\')
+ # if text starts with slash
+ if slashpos > -1:
+ # PERF: It's important that we don't access self._unicode_names
+ # until we're inside this if-block. _unicode_names is lazily
+ # initialized, and it takes a user-noticeable amount of time to
+ # initialize it, so we don't want to initialize it unless we're
+ # actually going to use it.
+ s = text[slashpos + 1 :]
+ sup = s.upper()
+ candidates = [x for x in self.unicode_names if x.startswith(sup)]
+ if candidates:
+ return s, candidates
+ candidates = [x for x in self.unicode_names if sup in x]
+ if candidates:
+ return s, candidates
+ splitsup = sup.split(" ")
+ candidates = [
+ x for x in self.unicode_names if all(u in x for u in splitsup)
+ ]
+ if candidates:
+ return s, candidates
+
+ return "", ()
+
+ # if text does not start with slash
+ else:
+ return '', ()
+
+ @property
+ def unicode_names(self) -> List[str]:
+ """List of names of unicode code points that can be completed.
+
+ The list is lazily initialized on first access.
+ """
+ if self._unicode_names is None:
+ names = []
+ for c in range(0,0x10FFFF + 1):
+ try:
+ names.append(unicodedata.name(chr(c)))
+ except ValueError:
+ pass
+ self._unicode_names = _unicode_name_compute(_UNICODE_RANGES)
+
+ return self._unicode_names
+
+def _unicode_name_compute(ranges:List[Tuple[int,int]]) -> List[str]:
+ names = []
+ for start,stop in ranges:
+ for c in range(start, stop) :
+ try:
+ names.append(unicodedata.name(chr(c)))
+ except ValueError:
+ pass
+ return names
diff --git a/contrib/python/ipython/py3/IPython/core/completerlib.py b/contrib/python/ipython/py3/IPython/core/completerlib.py
new file mode 100644
index 0000000000..65efa42254
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/completerlib.py
@@ -0,0 +1,418 @@
+# encoding: utf-8
+"""Implementations for various useful completers.
+
+These are all loaded by default by IPython.
+"""
+#-----------------------------------------------------------------------------
+# Copyright (C) 2010-2011 The IPython Development Team.
+#
+# Distributed under the terms of the BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+# Stdlib imports
+import glob
+import inspect
+import itertools
+import os
+import re
+import sys
+from importlib import import_module
+from importlib.machinery import all_suffixes
+
+
+# Third-party imports
+from time import time
+from zipimport import zipimporter
+
+# Our own imports
+from .completer import expand_user, compress_user
+from .error import TryNext
+from ..utils._process_common import arg_split
+
+# FIXME: this should be pulled in with the right call via the component system
+from IPython import get_ipython
+
+from typing import List
+
+from __res import importer
+
+#-----------------------------------------------------------------------------
+# Globals and constants
+#-----------------------------------------------------------------------------
+_suffixes = all_suffixes()
+
+# Time in seconds after which the rootmodules will be stored permanently in the
+# ipython ip.db database (kept in the user's .ipython dir).
+TIMEOUT_STORAGE = 2
+
+# Time in seconds after which we give up
+TIMEOUT_GIVEUP = 20
+
+# Regular expression for the python import statement
+import_re = re.compile(r'(?P<name>[^\W\d]\w*?)'
+ r'(?P<package>[/\\]__init__)?'
+ r'(?P<suffix>%s)$' %
+ r'|'.join(re.escape(s) for s in _suffixes))
+
+# RE for the ipython %run command (python + ipython scripts)
+magic_run_re = re.compile(r'.*(\.ipy|\.ipynb|\.py[w]?)$')
+
+#-----------------------------------------------------------------------------
+# Local utilities
+#-----------------------------------------------------------------------------
+
+arcadia_rootmodules_cache = None
+arcadia_modules_cache = None
+
+
+def arcadia_init_cache():
+ global arcadia_rootmodules_cache, arcadia_modules_cache
+ arcadia_rootmodules_cache = set()
+ arcadia_modules_cache = {}
+
+ all_modules = itertools.chain(
+ sys.builtin_module_names,
+ importer.memory
+ )
+
+ for name in all_modules:
+ path = name.split('.')
+ arcadia_rootmodules_cache.add(path[0])
+
+ prefix = path[0]
+ for element in path[1:]:
+ if element == '__init__':
+ continue
+
+ arcadia_modules_cache.setdefault(prefix, set()).add(element)
+ prefix += '.' + element
+
+ arcadia_rootmodules_cache = sorted(arcadia_rootmodules_cache)
+ arcadia_modules_cache = {k: sorted(v) for k, v in arcadia_modules_cache.items()}
+
+
+def arcadia_module_list(mod):
+ if arcadia_modules_cache is None:
+ arcadia_init_cache()
+
+ return arcadia_modules_cache.get(mod, ())
+
+
+def arcadia_get_root_modules():
+ if arcadia_rootmodules_cache is None:
+ arcadia_init_cache()
+
+ return arcadia_rootmodules_cache
+
+
+def module_list(path):
+ """
+ Return the list containing the names of the modules available in the given
+ folder.
+ """
+ # sys.path has the cwd as an empty string, but isdir/listdir need it as '.'
+ if path == '':
+ path = '.'
+
+ # A few local constants to be used in loops below
+ pjoin = os.path.join
+
+ if os.path.isdir(path):
+ # Build a list of all files in the directory and all files
+ # in its subdirectories. For performance reasons, do not
+ # recurse more than one level into subdirectories.
+ files = []
+ for root, dirs, nondirs in os.walk(path, followlinks=True):
+ subdir = root[len(path)+1:]
+ if subdir:
+ files.extend(pjoin(subdir, f) for f in nondirs)
+ dirs[:] = [] # Do not recurse into additional subdirectories.
+ else:
+ files.extend(nondirs)
+
+ else:
+ try:
+ files = list(zipimporter(path)._files.keys())
+ except:
+ files = []
+
+ # Build a list of modules which match the import_re regex.
+ modules = []
+ for f in files:
+ m = import_re.match(f)
+ if m:
+ modules.append(m.group('name'))
+ return list(set(modules))
+
+
+def get_root_modules():
+ """
+ Returns a list containing the names of all the modules available in the
+ folders of the pythonpath.
+
+ ip.db['rootmodules_cache'] maps sys.path entries to list of modules.
+ """
+ ip = get_ipython()
+ if ip is None:
+ # No global shell instance to store cached list of modules.
+ # Don't try to scan for modules every time.
+ return list(sys.builtin_module_names)
+
+ rootmodules_cache = ip.db.get('rootmodules_cache', {})
+ rootmodules = list(sys.builtin_module_names)
+ start_time = time()
+ store = False
+ for path in sys.path:
+ try:
+ modules = rootmodules_cache[path]
+ except KeyError:
+ modules = module_list(path)
+ try:
+ modules.remove('__init__')
+ except ValueError:
+ pass
+ if path not in ('', '.'): # cwd modules should not be cached
+ rootmodules_cache[path] = modules
+ if time() - start_time > TIMEOUT_STORAGE and not store:
+ store = True
+ print("\nCaching the list of root modules, please wait!")
+ print("(This will only be done once - type '%rehashx' to "
+ "reset cache!)\n")
+ sys.stdout.flush()
+ if time() - start_time > TIMEOUT_GIVEUP:
+ print("This is taking too long, we give up.\n")
+ return []
+ rootmodules.extend(modules)
+ if store:
+ ip.db['rootmodules_cache'] = rootmodules_cache
+ rootmodules = list(set(rootmodules))
+ return rootmodules
+
+
+def is_importable(module, attr, only_modules):
+ if only_modules:
+ return inspect.ismodule(getattr(module, attr))
+ else:
+ return not(attr[:2] == '__' and attr[-2:] == '__')
+
+def is_possible_submodule(module, attr):
+ try:
+ obj = getattr(module, attr)
+ except AttributeError:
+ # Is possilby an unimported submodule
+ return True
+ except TypeError:
+ # https://github.com/ipython/ipython/issues/9678
+ return False
+ return inspect.ismodule(obj)
+
+
+def try_import(mod: str, only_modules=False) -> List[str]:
+ """
+ Try to import given module and return list of potential completions.
+ """
+ mod = mod.rstrip('.')
+ try:
+ m = import_module(mod)
+ except:
+ return []
+
+ filename = getattr(m, '__file__', '')
+ m_is_init = '__init__' in (filename or '') or filename == mod
+
+ completions = []
+ if (not hasattr(m, '__file__')) or (not only_modules) or m_is_init:
+ completions.extend( [attr for attr in dir(m) if
+ is_importable(m, attr, only_modules)])
+
+ m_all = getattr(m, "__all__", [])
+ if only_modules:
+ completions.extend(attr for attr in m_all if is_possible_submodule(m, attr))
+ else:
+ completions.extend(m_all)
+
+ if m_is_init:
+ completions.extend(arcadia_module_list(mod))
+ completions_set = {c for c in completions if isinstance(c, str)}
+ completions_set.discard('__init__')
+ return sorted(completions_set)
+
+
+#-----------------------------------------------------------------------------
+# Completion-related functions.
+#-----------------------------------------------------------------------------
+
+def quick_completer(cmd, completions):
+ r""" Easily create a trivial completer for a command.
+
+ Takes either a list of completions, or all completions in string (that will
+ be split on whitespace).
+
+ Example::
+
+ [d:\ipython]|1> import ipy_completers
+ [d:\ipython]|2> ipy_completers.quick_completer('foo', ['bar','baz'])
+ [d:\ipython]|3> foo b<TAB>
+ bar baz
+ [d:\ipython]|3> foo ba
+ """
+
+ if isinstance(completions, str):
+ completions = completions.split()
+
+ def do_complete(self, event):
+ return completions
+
+ get_ipython().set_hook('complete_command',do_complete, str_key = cmd)
+
+def module_completion(line):
+ """
+ Returns a list containing the completion possibilities for an import line.
+
+ The line looks like this :
+ 'import xml.d'
+ 'from xml.dom import'
+ """
+
+ words = line.split(' ')
+ nwords = len(words)
+
+ # from whatever <tab> -> 'import '
+ if nwords == 3 and words[0] == 'from':
+ return ['import ']
+
+ # 'from xy<tab>' or 'import xy<tab>'
+ if nwords < 3 and (words[0] in {'%aimport', 'import', 'from'}) :
+ if nwords == 1:
+ return arcadia_get_root_modules()
+ mod = words[1].split('.')
+ if len(mod) < 2:
+ return arcadia_get_root_modules()
+ completion_list = try_import('.'.join(mod[:-1]), True)
+ return ['.'.join(mod[:-1] + [el]) for el in completion_list]
+
+ # 'from xyz import abc<tab>'
+ if nwords >= 3 and words[0] == 'from':
+ mod = words[1]
+ return try_import(mod)
+
+#-----------------------------------------------------------------------------
+# Completers
+#-----------------------------------------------------------------------------
+# These all have the func(self, event) signature to be used as custom
+# completers
+
+def module_completer(self,event):
+ """Give completions after user has typed 'import ...' or 'from ...'"""
+
+ # This works in all versions of python. While 2.5 has
+ # pkgutil.walk_packages(), that particular routine is fairly dangerous,
+ # since it imports *EVERYTHING* on sys.path. That is: a) very slow b) full
+ # of possibly problematic side effects.
+ # This search the folders in the sys.path for available modules.
+
+ return module_completion(event.line)
+
+# FIXME: there's a lot of logic common to the run, cd and builtin file
+# completers, that is currently reimplemented in each.
+
+def magic_run_completer(self, event):
+ """Complete files that end in .py or .ipy or .ipynb for the %run command.
+ """
+ comps = arg_split(event.line, strict=False)
+ # relpath should be the current token that we need to complete.
+ if (len(comps) > 1) and (not event.line.endswith(' ')):
+ relpath = comps[-1].strip("'\"")
+ else:
+ relpath = ''
+
+ #print("\nev=", event) # dbg
+ #print("rp=", relpath) # dbg
+ #print('comps=', comps) # dbg
+
+ lglob = glob.glob
+ isdir = os.path.isdir
+ relpath, tilde_expand, tilde_val = expand_user(relpath)
+
+ # Find if the user has already typed the first filename, after which we
+ # should complete on all files, since after the first one other files may
+ # be arguments to the input script.
+
+ if any(magic_run_re.match(c) for c in comps):
+ matches = [f.replace('\\','/') + ('/' if isdir(f) else '')
+ for f in lglob(relpath+'*')]
+ else:
+ dirs = [f.replace('\\','/') + "/" for f in lglob(relpath+'*') if isdir(f)]
+ pys = [f.replace('\\','/')
+ for f in lglob(relpath+'*.py') + lglob(relpath+'*.ipy') +
+ lglob(relpath+'*.ipynb') + lglob(relpath + '*.pyw')]
+
+ matches = dirs + pys
+
+ #print('run comp:', dirs+pys) # dbg
+ return [compress_user(p, tilde_expand, tilde_val) for p in matches]
+
+
+def cd_completer(self, event):
+ """Completer function for cd, which only returns directories."""
+ ip = get_ipython()
+ relpath = event.symbol
+
+ #print(event) # dbg
+ if event.line.endswith('-b') or ' -b ' in event.line:
+ # return only bookmark completions
+ bkms = self.db.get('bookmarks', None)
+ if bkms:
+ return bkms.keys()
+ else:
+ return []
+
+ if event.symbol == '-':
+ width_dh = str(len(str(len(ip.user_ns['_dh']) + 1)))
+ # jump in directory history by number
+ fmt = '-%0' + width_dh +'d [%s]'
+ ents = [ fmt % (i,s) for i,s in enumerate(ip.user_ns['_dh'])]
+ if len(ents) > 1:
+ return ents
+ return []
+
+ if event.symbol.startswith('--'):
+ return ["--" + os.path.basename(d) for d in ip.user_ns['_dh']]
+
+ # Expand ~ in path and normalize directory separators.
+ relpath, tilde_expand, tilde_val = expand_user(relpath)
+ relpath = relpath.replace('\\','/')
+
+ found = []
+ for d in [f.replace('\\','/') + '/' for f in glob.glob(relpath+'*')
+ if os.path.isdir(f)]:
+ if ' ' in d:
+ # we don't want to deal with any of that, complex code
+ # for this is elsewhere
+ raise TryNext
+
+ found.append(d)
+
+ if not found:
+ if os.path.isdir(relpath):
+ return [compress_user(relpath, tilde_expand, tilde_val)]
+
+ # if no completions so far, try bookmarks
+ bks = self.db.get('bookmarks',{})
+ bkmatches = [s for s in bks if s.startswith(event.symbol)]
+ if bkmatches:
+ return bkmatches
+
+ raise TryNext
+
+ return [compress_user(p, tilde_expand, tilde_val) for p in found]
+
+def reset_completer(self, event):
+ "A completer for %reset magic"
+ return '-f -s in out array dhist'.split()
diff --git a/contrib/python/ipython/py3/IPython/core/crashhandler.py b/contrib/python/ipython/py3/IPython/core/crashhandler.py
new file mode 100644
index 0000000000..f60a75bbc5
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/crashhandler.py
@@ -0,0 +1,236 @@
+# encoding: utf-8
+"""sys.excepthook for IPython itself, leaves a detailed report on disk.
+
+Authors:
+
+* Fernando Perez
+* Brian E. Granger
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (C) 2001-2007 Fernando Perez. <fperez@colorado.edu>
+# Copyright (C) 2008-2011 The IPython Development Team
+#
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+import sys
+import traceback
+from pprint import pformat
+from pathlib import Path
+
+from IPython.core import ultratb
+from IPython.core.release import author_email
+from IPython.utils.sysinfo import sys_info
+from IPython.utils.py3compat import input
+
+from IPython.core.release import __version__ as version
+
+from typing import Optional
+
+#-----------------------------------------------------------------------------
+# Code
+#-----------------------------------------------------------------------------
+
+# Template for the user message.
+_default_message_template = """\
+Oops, {app_name} crashed. We do our best to make it stable, but...
+
+A crash report was automatically generated with the following information:
+ - A verbatim copy of the crash traceback.
+ - A copy of your input history during this session.
+ - Data on your current {app_name} configuration.
+
+It was left in the file named:
+\t'{crash_report_fname}'
+If you can email this file to the developers, the information in it will help
+them in understanding and correcting the problem.
+
+You can mail it to: {contact_name} at {contact_email}
+with the subject '{app_name} Crash Report'.
+
+If you want to do it now, the following command will work (under Unix):
+mail -s '{app_name} Crash Report' {contact_email} < {crash_report_fname}
+
+In your email, please also include information about:
+- The operating system under which the crash happened: Linux, macOS, Windows,
+ other, and which exact version (for example: Ubuntu 16.04.3, macOS 10.13.2,
+ Windows 10 Pro), and whether it is 32-bit or 64-bit;
+- How {app_name} was installed: using pip or conda, from GitHub, as part of
+ a Docker container, or other, providing more detail if possible;
+- How to reproduce the crash: what exact sequence of instructions can one
+ input to get the same crash? Ideally, find a minimal yet complete sequence
+ of instructions that yields the crash.
+
+To ensure accurate tracking of this issue, please file a report about it at:
+{bug_tracker}
+"""
+
+_lite_message_template = """
+If you suspect this is an IPython {version} bug, please report it at:
+ https://github.com/ipython/ipython/issues
+or send an email to the mailing list at {email}
+
+You can print a more detailed traceback right now with "%tb", or use "%debug"
+to interactively debug it.
+
+Extra-detailed tracebacks for bug-reporting purposes can be enabled via:
+ {config}Application.verbose_crash=True
+"""
+
+
+class CrashHandler(object):
+ """Customizable crash handlers for IPython applications.
+
+ Instances of this class provide a :meth:`__call__` method which can be
+ used as a ``sys.excepthook``. The :meth:`__call__` signature is::
+
+ def __call__(self, etype, evalue, etb)
+ """
+
+ message_template = _default_message_template
+ section_sep = '\n\n'+'*'*75+'\n\n'
+
+ def __init__(
+ self,
+ app,
+ contact_name: Optional[str] = None,
+ contact_email: Optional[str] = None,
+ bug_tracker: Optional[str] = None,
+ show_crash_traceback: bool = True,
+ call_pdb: bool = False,
+ ):
+ """Create a new crash handler
+
+ Parameters
+ ----------
+ app : Application
+ A running :class:`Application` instance, which will be queried at
+ crash time for internal information.
+ contact_name : str
+ A string with the name of the person to contact.
+ contact_email : str
+ A string with the email address of the contact.
+ bug_tracker : str
+ A string with the URL for your project's bug tracker.
+ show_crash_traceback : bool
+ If false, don't print the crash traceback on stderr, only generate
+ the on-disk report
+ call_pdb
+ Whether to call pdb on crash
+
+ Attributes
+ ----------
+ These instances contain some non-argument attributes which allow for
+ further customization of the crash handler's behavior. Please see the
+ source for further details.
+
+ """
+ self.crash_report_fname = "Crash_report_%s.txt" % app.name
+ self.app = app
+ self.call_pdb = call_pdb
+ #self.call_pdb = True # dbg
+ self.show_crash_traceback = show_crash_traceback
+ self.info = dict(app_name = app.name,
+ contact_name = contact_name,
+ contact_email = contact_email,
+ bug_tracker = bug_tracker,
+ crash_report_fname = self.crash_report_fname)
+
+
+ def __call__(self, etype, evalue, etb):
+ """Handle an exception, call for compatible with sys.excepthook"""
+
+ # do not allow the crash handler to be called twice without reinstalling it
+ # this prevents unlikely errors in the crash handling from entering an
+ # infinite loop.
+ sys.excepthook = sys.__excepthook__
+
+ # Report tracebacks shouldn't use color in general (safer for users)
+ color_scheme = 'NoColor'
+
+ # Use this ONLY for developer debugging (keep commented out for release)
+ #color_scheme = 'Linux' # dbg
+ try:
+ rptdir = self.app.ipython_dir
+ except:
+ rptdir = Path.cwd()
+ if rptdir is None or not Path.is_dir(rptdir):
+ rptdir = Path.cwd()
+ report_name = rptdir / self.crash_report_fname
+ # write the report filename into the instance dict so it can get
+ # properly expanded out in the user message template
+ self.crash_report_fname = report_name
+ self.info['crash_report_fname'] = report_name
+ TBhandler = ultratb.VerboseTB(
+ color_scheme=color_scheme,
+ long_header=1,
+ call_pdb=self.call_pdb,
+ )
+ if self.call_pdb:
+ TBhandler(etype,evalue,etb)
+ return
+ else:
+ traceback = TBhandler.text(etype,evalue,etb,context=31)
+
+ # print traceback to screen
+ if self.show_crash_traceback:
+ print(traceback, file=sys.stderr)
+
+ # and generate a complete report on disk
+ try:
+ report = open(report_name, "w", encoding="utf-8")
+ except:
+ print('Could not create crash report on disk.', file=sys.stderr)
+ return
+
+ with report:
+ # Inform user on stderr of what happened
+ print('\n'+'*'*70+'\n', file=sys.stderr)
+ print(self.message_template.format(**self.info), file=sys.stderr)
+
+ # Construct report on disk
+ report.write(self.make_report(traceback))
+
+ input("Hit <Enter> to quit (your terminal may close):")
+
+ def make_report(self,traceback):
+ """Return a string containing a crash report."""
+
+ sec_sep = self.section_sep
+
+ report = ['*'*75+'\n\n'+'IPython post-mortem report\n\n']
+ rpt_add = report.append
+ rpt_add(sys_info())
+
+ try:
+ config = pformat(self.app.config)
+ rpt_add(sec_sep)
+ rpt_add('Application name: %s\n\n' % self.app_name)
+ rpt_add('Current user configuration structure:\n\n')
+ rpt_add(config)
+ except:
+ pass
+ rpt_add(sec_sep+'Crash traceback:\n\n' + traceback)
+
+ return ''.join(report)
+
+
+def crash_handler_lite(etype, evalue, tb):
+ """a light excepthook, adding a small message to the usual traceback"""
+ traceback.print_exception(etype, evalue, tb)
+
+ from IPython.core.interactiveshell import InteractiveShell
+ if InteractiveShell.initialized():
+ # we are in a Shell environment, give %magic example
+ config = "%config "
+ else:
+ # we are not in a shell, show generic config
+ config = "c."
+ print(_lite_message_template.format(email=author_email, config=config, version=version), file=sys.stderr)
+
diff --git a/contrib/python/ipython/py3/IPython/core/debugger.py b/contrib/python/ipython/py3/IPython/core/debugger.py
new file mode 100644
index 0000000000..c8082e34e7
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/debugger.py
@@ -0,0 +1,997 @@
+# -*- coding: utf-8 -*-
+"""
+Pdb debugger class.
+
+
+This is an extension to PDB which adds a number of new features.
+Note that there is also the `IPython.terminal.debugger` class which provides UI
+improvements.
+
+We also strongly recommend to use this via the `ipdb` package, which provides
+extra configuration options.
+
+Among other things, this subclass of PDB:
+ - supports many IPython magics like pdef/psource
+ - hide frames in tracebacks based on `__tracebackhide__`
+ - allows to skip frames based on `__debuggerskip__`
+
+The skipping and hiding frames are configurable via the `skip_predicates`
+command.
+
+By default, frames from readonly files will be hidden, frames containing
+``__tracebackhide__=True`` will be hidden.
+
+Frames containing ``__debuggerskip__`` will be stepped over, frames who's parent
+frames value of ``__debuggerskip__`` is ``True`` will be skipped.
+
+ >>> def helpers_helper():
+ ... pass
+ ...
+ ... def helper_1():
+ ... print("don't step in me")
+ ... helpers_helpers() # will be stepped over unless breakpoint set.
+ ...
+ ...
+ ... def helper_2():
+ ... print("in me neither")
+ ...
+
+One can define a decorator that wraps a function between the two helpers:
+
+ >>> def pdb_skipped_decorator(function):
+ ...
+ ...
+ ... def wrapped_fn(*args, **kwargs):
+ ... __debuggerskip__ = True
+ ... helper_1()
+ ... __debuggerskip__ = False
+ ... result = function(*args, **kwargs)
+ ... __debuggerskip__ = True
+ ... helper_2()
+ ... # setting __debuggerskip__ to False again is not necessary
+ ... return result
+ ...
+ ... return wrapped_fn
+
+When decorating a function, ipdb will directly step into ``bar()`` by
+default:
+
+ >>> @foo_decorator
+ ... def bar(x, y):
+ ... return x * y
+
+
+You can toggle the behavior with
+
+ ipdb> skip_predicates debuggerskip false
+
+or configure it in your ``.pdbrc``
+
+
+
+License
+-------
+
+Modified from the standard pdb.Pdb class to avoid including readline, so that
+the command line completion of other programs which include this isn't
+damaged.
+
+In the future, this class will be expanded with improvements over the standard
+pdb.
+
+The original code in this file is mainly lifted out of cmd.py in Python 2.2,
+with minor changes. Licensing should therefore be under the standard Python
+terms. For details on the PSF (Python Software Foundation) standard license,
+see:
+
+https://docs.python.org/2/license.html
+
+
+All the changes since then are under the same license as IPython.
+
+"""
+
+#*****************************************************************************
+#
+# This file is licensed under the PSF license.
+#
+# Copyright (C) 2001 Python Software Foundation, www.python.org
+# Copyright (C) 2005-2006 Fernando Perez. <fperez@colorado.edu>
+#
+#
+#*****************************************************************************
+
+import inspect
+import linecache
+import sys
+import re
+import os
+
+from IPython import get_ipython
+from IPython.utils import PyColorize
+from IPython.utils import coloransi, py3compat
+from IPython.core.excolors import exception_colors
+
+# skip module docstests
+__skip_doctest__ = True
+
+prompt = 'ipdb> '
+
+# We have to check this directly from sys.argv, config struct not yet available
+from pdb import Pdb as OldPdb
+
+# Allow the set_trace code to operate outside of an ipython instance, even if
+# it does so with some limitations. The rest of this support is implemented in
+# the Tracer constructor.
+
+DEBUGGERSKIP = "__debuggerskip__"
+
+
+def make_arrow(pad):
+ """generate the leading arrow in front of traceback or debugger"""
+ if pad >= 2:
+ return '-'*(pad-2) + '> '
+ elif pad == 1:
+ return '>'
+ return ''
+
+
+def BdbQuit_excepthook(et, ev, tb, excepthook=None):
+ """Exception hook which handles `BdbQuit` exceptions.
+
+ All other exceptions are processed using the `excepthook`
+ parameter.
+ """
+ raise ValueError(
+ "`BdbQuit_excepthook` is deprecated since version 5.1",
+ )
+
+
+def BdbQuit_IPython_excepthook(self, et, ev, tb, tb_offset=None):
+ raise ValueError(
+ "`BdbQuit_IPython_excepthook` is deprecated since version 5.1",
+ DeprecationWarning, stacklevel=2)
+
+
+RGX_EXTRA_INDENT = re.compile(r'(?<=\n)\s+')
+
+
+def strip_indentation(multiline_string):
+ return RGX_EXTRA_INDENT.sub('', multiline_string)
+
+
+def decorate_fn_with_doc(new_fn, old_fn, additional_text=""):
+ """Make new_fn have old_fn's doc string. This is particularly useful
+ for the ``do_...`` commands that hook into the help system.
+ Adapted from from a comp.lang.python posting
+ by Duncan Booth."""
+ def wrapper(*args, **kw):
+ return new_fn(*args, **kw)
+ if old_fn.__doc__:
+ wrapper.__doc__ = strip_indentation(old_fn.__doc__) + additional_text
+ return wrapper
+
+
+class Pdb(OldPdb):
+ """Modified Pdb class, does not load readline.
+
+ for a standalone version that uses prompt_toolkit, see
+ `IPython.terminal.debugger.TerminalPdb` and
+ `IPython.terminal.debugger.set_trace()`
+
+
+ This debugger can hide and skip frames that are tagged according to some predicates.
+ See the `skip_predicates` commands.
+
+ """
+
+ default_predicates = {
+ "tbhide": True,
+ "readonly": False,
+ "ipython_internal": True,
+ "debuggerskip": True,
+ }
+
+ def __init__(self, completekey=None, stdin=None, stdout=None, context=5, **kwargs):
+ """Create a new IPython debugger.
+
+ Parameters
+ ----------
+ completekey : default None
+ Passed to pdb.Pdb.
+ stdin : default None
+ Passed to pdb.Pdb.
+ stdout : default None
+ Passed to pdb.Pdb.
+ context : int
+ Number of lines of source code context to show when
+ displaying stacktrace information.
+ **kwargs
+ Passed to pdb.Pdb.
+
+ Notes
+ -----
+ The possibilities are python version dependent, see the python
+ docs for more info.
+ """
+
+ # Parent constructor:
+ try:
+ self.context = int(context)
+ if self.context <= 0:
+ raise ValueError("Context must be a positive integer")
+ except (TypeError, ValueError) as e:
+ raise ValueError("Context must be a positive integer") from e
+
+ # `kwargs` ensures full compatibility with stdlib's `pdb.Pdb`.
+ OldPdb.__init__(self, completekey, stdin, stdout, **kwargs)
+
+ # IPython changes...
+ self.shell = get_ipython()
+
+ if self.shell is None:
+ save_main = sys.modules['__main__']
+ # No IPython instance running, we must create one
+ from IPython.terminal.interactiveshell import \
+ TerminalInteractiveShell
+ self.shell = TerminalInteractiveShell.instance()
+ # needed by any code which calls __import__("__main__") after
+ # the debugger was entered. See also #9941.
+ sys.modules["__main__"] = save_main
+
+
+ color_scheme = self.shell.colors
+
+ self.aliases = {}
+
+ # Create color table: we copy the default one from the traceback
+ # module and add a few attributes needed for debugging
+ self.color_scheme_table = exception_colors()
+
+ # shorthands
+ C = coloransi.TermColors
+ cst = self.color_scheme_table
+
+ cst['NoColor'].colors.prompt = C.NoColor
+ cst['NoColor'].colors.breakpoint_enabled = C.NoColor
+ cst['NoColor'].colors.breakpoint_disabled = C.NoColor
+
+ cst['Linux'].colors.prompt = C.Green
+ cst['Linux'].colors.breakpoint_enabled = C.LightRed
+ cst['Linux'].colors.breakpoint_disabled = C.Red
+
+ cst['LightBG'].colors.prompt = C.Blue
+ cst['LightBG'].colors.breakpoint_enabled = C.LightRed
+ cst['LightBG'].colors.breakpoint_disabled = C.Red
+
+ cst['Neutral'].colors.prompt = C.Blue
+ cst['Neutral'].colors.breakpoint_enabled = C.LightRed
+ cst['Neutral'].colors.breakpoint_disabled = C.Red
+
+ # Add a python parser so we can syntax highlight source while
+ # debugging.
+ self.parser = PyColorize.Parser(style=color_scheme)
+ self.set_colors(color_scheme)
+
+ # Set the prompt - the default prompt is '(Pdb)'
+ self.prompt = prompt
+ self.skip_hidden = True
+ self.report_skipped = True
+
+ # list of predicates we use to skip frames
+ self._predicates = self.default_predicates
+
+ #
+ def set_colors(self, scheme):
+ """Shorthand access to the color table scheme selector method."""
+ self.color_scheme_table.set_active_scheme(scheme)
+ self.parser.style = scheme
+
+ def set_trace(self, frame=None):
+ if frame is None:
+ frame = sys._getframe().f_back
+ self.initial_frame = frame
+ return super().set_trace(frame)
+
+ def _hidden_predicate(self, frame):
+ """
+ Given a frame return whether it it should be hidden or not by IPython.
+ """
+
+ if self._predicates["readonly"]:
+ fname = frame.f_code.co_filename
+ # we need to check for file existence and interactively define
+ # function would otherwise appear as RO.
+ if os.path.isfile(fname) and not os.access(fname, os.W_OK):
+ return True
+
+ if self._predicates["tbhide"]:
+ if frame in (self.curframe, getattr(self, "initial_frame", None)):
+ return False
+ frame_locals = self._get_frame_locals(frame)
+ if "__tracebackhide__" not in frame_locals:
+ return False
+ return frame_locals["__tracebackhide__"]
+ return False
+
+ def hidden_frames(self, stack):
+ """
+ Given an index in the stack return whether it should be skipped.
+
+ This is used in up/down and where to skip frames.
+ """
+ # The f_locals dictionary is updated from the actual frame
+ # locals whenever the .f_locals accessor is called, so we
+ # avoid calling it here to preserve self.curframe_locals.
+ # Furthermore, there is no good reason to hide the current frame.
+ ip_hide = [self._hidden_predicate(s[0]) for s in stack]
+ ip_start = [i for i, s in enumerate(ip_hide) if s == "__ipython_bottom__"]
+ if ip_start and self._predicates["ipython_internal"]:
+ ip_hide = [h if i > ip_start[0] else True for (i, h) in enumerate(ip_hide)]
+ return ip_hide
+
+ def interaction(self, frame, traceback):
+ try:
+ OldPdb.interaction(self, frame, traceback)
+ except KeyboardInterrupt:
+ self.stdout.write("\n" + self.shell.get_exception_only())
+
+ def precmd(self, line):
+ """Perform useful escapes on the command before it is executed."""
+
+ if line.endswith("??"):
+ line = "pinfo2 " + line[:-2]
+ elif line.endswith("?"):
+ line = "pinfo " + line[:-1]
+
+ line = super().precmd(line)
+
+ return line
+
+ def new_do_frame(self, arg):
+ OldPdb.do_frame(self, arg)
+
+ def new_do_quit(self, arg):
+
+ if hasattr(self, 'old_all_completions'):
+ self.shell.Completer.all_completions = self.old_all_completions
+
+ return OldPdb.do_quit(self, arg)
+
+ do_q = do_quit = decorate_fn_with_doc(new_do_quit, OldPdb.do_quit)
+
+ def new_do_restart(self, arg):
+ """Restart command. In the context of ipython this is exactly the same
+ thing as 'quit'."""
+ self.msg("Restart doesn't make sense here. Using 'quit' instead.")
+ return self.do_quit(arg)
+
+ def print_stack_trace(self, context=None):
+ Colors = self.color_scheme_table.active_colors
+ ColorsNormal = Colors.Normal
+ if context is None:
+ context = self.context
+ try:
+ context = int(context)
+ if context <= 0:
+ raise ValueError("Context must be a positive integer")
+ except (TypeError, ValueError) as e:
+ raise ValueError("Context must be a positive integer") from e
+ try:
+ skipped = 0
+ for hidden, frame_lineno in zip(self.hidden_frames(self.stack), self.stack):
+ if hidden and self.skip_hidden:
+ skipped += 1
+ continue
+ if skipped:
+ print(
+ f"{Colors.excName} [... skipping {skipped} hidden frame(s)]{ColorsNormal}\n"
+ )
+ skipped = 0
+ self.print_stack_entry(frame_lineno, context=context)
+ if skipped:
+ print(
+ f"{Colors.excName} [... skipping {skipped} hidden frame(s)]{ColorsNormal}\n"
+ )
+ except KeyboardInterrupt:
+ pass
+
+ def print_stack_entry(self, frame_lineno, prompt_prefix='\n-> ',
+ context=None):
+ if context is None:
+ context = self.context
+ try:
+ context = int(context)
+ if context <= 0:
+ raise ValueError("Context must be a positive integer")
+ except (TypeError, ValueError) as e:
+ raise ValueError("Context must be a positive integer") from e
+ print(self.format_stack_entry(frame_lineno, '', context), file=self.stdout)
+
+ # vds: >>
+ frame, lineno = frame_lineno
+ filename = frame.f_code.co_filename
+ self.shell.hooks.synchronize_with_editor(filename, lineno, 0)
+ # vds: <<
+
+ def _get_frame_locals(self, frame):
+ """ "
+ Accessing f_local of current frame reset the namespace, so we want to avoid
+ that or the following can happen
+
+ ipdb> foo
+ "old"
+ ipdb> foo = "new"
+ ipdb> foo
+ "new"
+ ipdb> where
+ ipdb> foo
+ "old"
+
+ So if frame is self.current_frame we instead return self.curframe_locals
+
+ """
+ if frame is self.curframe:
+ return self.curframe_locals
+ else:
+ return frame.f_locals
+
+ def format_stack_entry(self, frame_lineno, lprefix=': ', context=None):
+ if context is None:
+ context = self.context
+ try:
+ context = int(context)
+ if context <= 0:
+ print("Context must be a positive integer", file=self.stdout)
+ except (TypeError, ValueError):
+ print("Context must be a positive integer", file=self.stdout)
+
+ import reprlib
+
+ ret = []
+
+ Colors = self.color_scheme_table.active_colors
+ ColorsNormal = Colors.Normal
+ tpl_link = "%s%%s%s" % (Colors.filenameEm, ColorsNormal)
+ tpl_call = "%s%%s%s%%s%s" % (Colors.vName, Colors.valEm, ColorsNormal)
+ tpl_line = "%%s%s%%s %s%%s" % (Colors.lineno, ColorsNormal)
+ tpl_line_em = "%%s%s%%s %s%%s%s" % (Colors.linenoEm, Colors.line, ColorsNormal)
+
+ frame, lineno = frame_lineno
+
+ return_value = ''
+ loc_frame = self._get_frame_locals(frame)
+ if "__return__" in loc_frame:
+ rv = loc_frame["__return__"]
+ # return_value += '->'
+ return_value += reprlib.repr(rv) + "\n"
+ ret.append(return_value)
+
+ #s = filename + '(' + `lineno` + ')'
+ filename = self.canonic(frame.f_code.co_filename)
+ link = tpl_link % py3compat.cast_unicode(filename)
+
+ if frame.f_code.co_name:
+ func = frame.f_code.co_name
+ else:
+ func = "<lambda>"
+
+ call = ""
+ if func != "?":
+ if "__args__" in loc_frame:
+ args = reprlib.repr(loc_frame["__args__"])
+ else:
+ args = '()'
+ call = tpl_call % (func, args)
+
+ # The level info should be generated in the same format pdb uses, to
+ # avoid breaking the pdbtrack functionality of python-mode in *emacs.
+ if frame is self.curframe:
+ ret.append('> ')
+ else:
+ ret.append(" ")
+ ret.append("%s(%s)%s\n" % (link, lineno, call))
+
+ start = lineno - 1 - context//2
+ lines = linecache.getlines(filename)
+ start = min(start, len(lines) - context)
+ start = max(start, 0)
+ lines = lines[start : start + context]
+
+ for i, line in enumerate(lines):
+ show_arrow = start + 1 + i == lineno
+ linetpl = (frame is self.curframe or show_arrow) and tpl_line_em or tpl_line
+ ret.append(
+ self.__format_line(
+ linetpl, filename, start + 1 + i, line, arrow=show_arrow
+ )
+ )
+ return "".join(ret)
+
+ def __format_line(self, tpl_line, filename, lineno, line, arrow=False):
+ bp_mark = ""
+ bp_mark_color = ""
+
+ new_line, err = self.parser.format2(line, 'str')
+ if not err:
+ line = new_line
+
+ bp = None
+ if lineno in self.get_file_breaks(filename):
+ bps = self.get_breaks(filename, lineno)
+ bp = bps[-1]
+
+ if bp:
+ Colors = self.color_scheme_table.active_colors
+ bp_mark = str(bp.number)
+ bp_mark_color = Colors.breakpoint_enabled
+ if not bp.enabled:
+ bp_mark_color = Colors.breakpoint_disabled
+
+ numbers_width = 7
+ if arrow:
+ # This is the line with the error
+ pad = numbers_width - len(str(lineno)) - len(bp_mark)
+ num = '%s%s' % (make_arrow(pad), str(lineno))
+ else:
+ num = '%*s' % (numbers_width - len(bp_mark), str(lineno))
+
+ return tpl_line % (bp_mark_color + bp_mark, num, line)
+
+ def print_list_lines(self, filename, first, last):
+ """The printing (as opposed to the parsing part of a 'list'
+ command."""
+ try:
+ Colors = self.color_scheme_table.active_colors
+ ColorsNormal = Colors.Normal
+ tpl_line = '%%s%s%%s %s%%s' % (Colors.lineno, ColorsNormal)
+ tpl_line_em = '%%s%s%%s %s%%s%s' % (Colors.linenoEm, Colors.line, ColorsNormal)
+ src = []
+ if filename == "<string>" and hasattr(self, "_exec_filename"):
+ filename = self._exec_filename
+
+ for lineno in range(first, last+1):
+ line = linecache.getline(filename, lineno)
+ if not line:
+ break
+
+ if lineno == self.curframe.f_lineno:
+ line = self.__format_line(
+ tpl_line_em, filename, lineno, line, arrow=True
+ )
+ else:
+ line = self.__format_line(
+ tpl_line, filename, lineno, line, arrow=False
+ )
+
+ src.append(line)
+ self.lineno = lineno
+
+ print(''.join(src), file=self.stdout)
+
+ except KeyboardInterrupt:
+ pass
+
+ def do_skip_predicates(self, args):
+ """
+ Turn on/off individual predicates as to whether a frame should be hidden/skip.
+
+ The global option to skip (or not) hidden frames is set with skip_hidden
+
+ To change the value of a predicate
+
+ skip_predicates key [true|false]
+
+ Call without arguments to see the current values.
+
+ To permanently change the value of an option add the corresponding
+ command to your ``~/.pdbrc`` file. If you are programmatically using the
+ Pdb instance you can also change the ``default_predicates`` class
+ attribute.
+ """
+ if not args.strip():
+ print("current predicates:")
+ for p, v in self._predicates.items():
+ print(" ", p, ":", v)
+ return
+ type_value = args.strip().split(" ")
+ if len(type_value) != 2:
+ print(
+ f"Usage: skip_predicates <type> <value>, with <type> one of {set(self._predicates.keys())}"
+ )
+ return
+
+ type_, value = type_value
+ if type_ not in self._predicates:
+ print(f"{type_!r} not in {set(self._predicates.keys())}")
+ return
+ if value.lower() not in ("true", "yes", "1", "no", "false", "0"):
+ print(
+ f"{value!r} is invalid - use one of ('true', 'yes', '1', 'no', 'false', '0')"
+ )
+ return
+
+ self._predicates[type_] = value.lower() in ("true", "yes", "1")
+ if not any(self._predicates.values()):
+ print(
+ "Warning, all predicates set to False, skip_hidden may not have any effects."
+ )
+
+ def do_skip_hidden(self, arg):
+ """
+ Change whether or not we should skip frames with the
+ __tracebackhide__ attribute.
+ """
+ if not arg.strip():
+ print(
+ f"skip_hidden = {self.skip_hidden}, use 'yes','no', 'true', or 'false' to change."
+ )
+ elif arg.strip().lower() in ("true", "yes"):
+ self.skip_hidden = True
+ elif arg.strip().lower() in ("false", "no"):
+ self.skip_hidden = False
+ if not any(self._predicates.values()):
+ print(
+ "Warning, all predicates set to False, skip_hidden may not have any effects."
+ )
+
+ def do_list(self, arg):
+ """Print lines of code from the current stack frame
+ """
+ self.lastcmd = 'list'
+ last = None
+ if arg:
+ try:
+ x = eval(arg, {}, {})
+ if type(x) == type(()):
+ first, last = x
+ first = int(first)
+ last = int(last)
+ if last < first:
+ # Assume it's a count
+ last = first + last
+ else:
+ first = max(1, int(x) - 5)
+ except:
+ print('*** Error in argument:', repr(arg), file=self.stdout)
+ return
+ elif self.lineno is None:
+ first = max(1, self.curframe.f_lineno - 5)
+ else:
+ first = self.lineno + 1
+ if last is None:
+ last = first + 10
+ self.print_list_lines(self.curframe.f_code.co_filename, first, last)
+
+ # vds: >>
+ lineno = first
+ filename = self.curframe.f_code.co_filename
+ self.shell.hooks.synchronize_with_editor(filename, lineno, 0)
+ # vds: <<
+
+ do_l = do_list
+
+ def getsourcelines(self, obj):
+ lines, lineno = inspect.findsource(obj)
+ if inspect.isframe(obj) and obj.f_globals is self._get_frame_locals(obj):
+ # must be a module frame: do not try to cut a block out of it
+ return lines, 1
+ elif inspect.ismodule(obj):
+ return lines, 1
+ return inspect.getblock(lines[lineno:]), lineno+1
+
+ def do_longlist(self, arg):
+ """Print lines of code from the current stack frame.
+
+ Shows more lines than 'list' does.
+ """
+ self.lastcmd = 'longlist'
+ try:
+ lines, lineno = self.getsourcelines(self.curframe)
+ except OSError as err:
+ self.error(err)
+ return
+ last = lineno + len(lines)
+ self.print_list_lines(self.curframe.f_code.co_filename, lineno, last)
+ do_ll = do_longlist
+
+ def do_debug(self, arg):
+ """debug code
+ Enter a recursive debugger that steps through the code
+ argument (which is an arbitrary expression or statement to be
+ executed in the current environment).
+ """
+ trace_function = sys.gettrace()
+ sys.settrace(None)
+ globals = self.curframe.f_globals
+ locals = self.curframe_locals
+ p = self.__class__(completekey=self.completekey,
+ stdin=self.stdin, stdout=self.stdout)
+ p.use_rawinput = self.use_rawinput
+ p.prompt = "(%s) " % self.prompt.strip()
+ self.message("ENTERING RECURSIVE DEBUGGER")
+ sys.call_tracing(p.run, (arg, globals, locals))
+ self.message("LEAVING RECURSIVE DEBUGGER")
+ sys.settrace(trace_function)
+ self.lastcmd = p.lastcmd
+
+ def do_pdef(self, arg):
+ """Print the call signature for any callable object.
+
+ The debugger interface to %pdef"""
+ namespaces = [
+ ("Locals", self.curframe_locals),
+ ("Globals", self.curframe.f_globals),
+ ]
+ self.shell.find_line_magic("pdef")(arg, namespaces=namespaces)
+
+ def do_pdoc(self, arg):
+ """Print the docstring for an object.
+
+ The debugger interface to %pdoc."""
+ namespaces = [
+ ("Locals", self.curframe_locals),
+ ("Globals", self.curframe.f_globals),
+ ]
+ self.shell.find_line_magic("pdoc")(arg, namespaces=namespaces)
+
+ def do_pfile(self, arg):
+ """Print (or run through pager) the file where an object is defined.
+
+ The debugger interface to %pfile.
+ """
+ namespaces = [
+ ("Locals", self.curframe_locals),
+ ("Globals", self.curframe.f_globals),
+ ]
+ self.shell.find_line_magic("pfile")(arg, namespaces=namespaces)
+
+ def do_pinfo(self, arg):
+ """Provide detailed information about an object.
+
+ The debugger interface to %pinfo, i.e., obj?."""
+ namespaces = [
+ ("Locals", self.curframe_locals),
+ ("Globals", self.curframe.f_globals),
+ ]
+ self.shell.find_line_magic("pinfo")(arg, namespaces=namespaces)
+
+ def do_pinfo2(self, arg):
+ """Provide extra detailed information about an object.
+
+ The debugger interface to %pinfo2, i.e., obj??."""
+ namespaces = [
+ ("Locals", self.curframe_locals),
+ ("Globals", self.curframe.f_globals),
+ ]
+ self.shell.find_line_magic("pinfo2")(arg, namespaces=namespaces)
+
+ def do_psource(self, arg):
+ """Print (or run through pager) the source code for an object."""
+ namespaces = [
+ ("Locals", self.curframe_locals),
+ ("Globals", self.curframe.f_globals),
+ ]
+ self.shell.find_line_magic("psource")(arg, namespaces=namespaces)
+
+ def do_where(self, arg):
+ """w(here)
+ Print a stack trace, with the most recent frame at the bottom.
+ An arrow indicates the "current frame", which determines the
+ context of most commands. 'bt' is an alias for this command.
+
+ Take a number as argument as an (optional) number of context line to
+ print"""
+ if arg:
+ try:
+ context = int(arg)
+ except ValueError as err:
+ self.error(err)
+ return
+ self.print_stack_trace(context)
+ else:
+ self.print_stack_trace()
+
+ do_w = do_where
+
+ def break_anywhere(self, frame):
+ """
+ _stop_in_decorator_internals is overly restrictive, as we may still want
+ to trace function calls, so we need to also update break_anywhere so
+ that is we don't `stop_here`, because of debugger skip, we may still
+ stop at any point inside the function
+
+ """
+
+ sup = super().break_anywhere(frame)
+ if sup:
+ return sup
+ if self._predicates["debuggerskip"]:
+ if DEBUGGERSKIP in frame.f_code.co_varnames:
+ return True
+ if frame.f_back and self._get_frame_locals(frame.f_back).get(DEBUGGERSKIP):
+ return True
+ return False
+
+ def _is_in_decorator_internal_and_should_skip(self, frame):
+ """
+ Utility to tell us whether we are in a decorator internal and should stop.
+
+ """
+
+ # if we are disabled don't skip
+ if not self._predicates["debuggerskip"]:
+ return False
+
+ # if frame is tagged, skip by default.
+ if DEBUGGERSKIP in frame.f_code.co_varnames:
+ return True
+
+ # if one of the parent frame value set to True skip as well.
+
+ cframe = frame
+ while getattr(cframe, "f_back", None):
+ cframe = cframe.f_back
+ if self._get_frame_locals(cframe).get(DEBUGGERSKIP):
+ return True
+
+ return False
+
+ def stop_here(self, frame):
+ if self._is_in_decorator_internal_and_should_skip(frame) is True:
+ return False
+
+ hidden = False
+ if self.skip_hidden:
+ hidden = self._hidden_predicate(frame)
+ if hidden:
+ if self.report_skipped:
+ Colors = self.color_scheme_table.active_colors
+ ColorsNormal = Colors.Normal
+ print(
+ f"{Colors.excName} [... skipped 1 hidden frame]{ColorsNormal}\n"
+ )
+ return super().stop_here(frame)
+
+ def do_up(self, arg):
+ """u(p) [count]
+ Move the current frame count (default one) levels up in the
+ stack trace (to an older frame).
+
+ Will skip hidden frames.
+ """
+ # modified version of upstream that skips
+ # frames with __tracebackhide__
+ if self.curindex == 0:
+ self.error("Oldest frame")
+ return
+ try:
+ count = int(arg or 1)
+ except ValueError:
+ self.error("Invalid frame count (%s)" % arg)
+ return
+ skipped = 0
+ if count < 0:
+ _newframe = 0
+ else:
+ counter = 0
+ hidden_frames = self.hidden_frames(self.stack)
+ for i in range(self.curindex - 1, -1, -1):
+ if hidden_frames[i] and self.skip_hidden:
+ skipped += 1
+ continue
+ counter += 1
+ if counter >= count:
+ break
+ else:
+ # if no break occurred.
+ self.error(
+ "all frames above hidden, use `skip_hidden False` to get get into those."
+ )
+ return
+
+ Colors = self.color_scheme_table.active_colors
+ ColorsNormal = Colors.Normal
+ _newframe = i
+ self._select_frame(_newframe)
+ if skipped:
+ print(
+ f"{Colors.excName} [... skipped {skipped} hidden frame(s)]{ColorsNormal}\n"
+ )
+
+ def do_down(self, arg):
+ """d(own) [count]
+ Move the current frame count (default one) levels down in the
+ stack trace (to a newer frame).
+
+ Will skip hidden frames.
+ """
+ if self.curindex + 1 == len(self.stack):
+ self.error("Newest frame")
+ return
+ try:
+ count = int(arg or 1)
+ except ValueError:
+ self.error("Invalid frame count (%s)" % arg)
+ return
+ if count < 0:
+ _newframe = len(self.stack) - 1
+ else:
+ counter = 0
+ skipped = 0
+ hidden_frames = self.hidden_frames(self.stack)
+ for i in range(self.curindex + 1, len(self.stack)):
+ if hidden_frames[i] and self.skip_hidden:
+ skipped += 1
+ continue
+ counter += 1
+ if counter >= count:
+ break
+ else:
+ self.error(
+ "all frames below hidden, use `skip_hidden False` to get get into those."
+ )
+ return
+
+ Colors = self.color_scheme_table.active_colors
+ ColorsNormal = Colors.Normal
+ if skipped:
+ print(
+ f"{Colors.excName} [... skipped {skipped} hidden frame(s)]{ColorsNormal}\n"
+ )
+ _newframe = i
+
+ self._select_frame(_newframe)
+
+ do_d = do_down
+ do_u = do_up
+
+ def do_context(self, context):
+ """context number_of_lines
+ Set the number of lines of source code to show when displaying
+ stacktrace information.
+ """
+ try:
+ new_context = int(context)
+ if new_context <= 0:
+ raise ValueError()
+ self.context = new_context
+ except ValueError:
+ self.error("The 'context' command requires a positive integer argument.")
+
+
+class InterruptiblePdb(Pdb):
+ """Version of debugger where KeyboardInterrupt exits the debugger altogether."""
+
+ def cmdloop(self, intro=None):
+ """Wrap cmdloop() such that KeyboardInterrupt stops the debugger."""
+ try:
+ return OldPdb.cmdloop(self, intro=intro)
+ except KeyboardInterrupt:
+ self.stop_here = lambda frame: False
+ self.do_quit("")
+ sys.settrace(None)
+ self.quitting = False
+ raise
+
+ def _cmdloop(self):
+ while True:
+ try:
+ # keyboard interrupts allow for an easy way to cancel
+ # the current command, so allow them during interactive input
+ self.allow_kbdint = True
+ self.cmdloop()
+ self.allow_kbdint = False
+ break
+ except KeyboardInterrupt:
+ self.message('--KeyboardInterrupt--')
+ raise
+
+
+def set_trace(frame=None):
+ """
+ Start debugging from `frame`.
+
+ If frame is not specified, debugging starts from caller's frame.
+ """
+ Pdb().set_trace(frame or sys._getframe().f_back)
diff --git a/contrib/python/ipython/py3/IPython/core/display.py b/contrib/python/ipython/py3/IPython/core/display.py
new file mode 100644
index 0000000000..ffa6e185c4
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/display.py
@@ -0,0 +1,1290 @@
+# -*- coding: utf-8 -*-
+"""Top-level display functions for displaying object in different formats."""
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+
+from binascii import b2a_base64, hexlify
+import html
+import json
+import mimetypes
+import os
+import struct
+import warnings
+from copy import deepcopy
+from os.path import splitext
+from pathlib import Path, PurePath
+
+from IPython.utils.py3compat import cast_unicode
+from IPython.testing.skipdoctest import skip_doctest
+from . import display_functions
+
+
+__all__ = ['display_pretty', 'display_html', 'display_markdown',
+ 'display_svg', 'display_png', 'display_jpeg', 'display_latex', 'display_json',
+ 'display_javascript', 'display_pdf', 'DisplayObject', 'TextDisplayObject',
+ 'Pretty', 'HTML', 'Markdown', 'Math', 'Latex', 'SVG', 'ProgressBar', 'JSON',
+ 'GeoJSON', 'Javascript', 'Image', 'set_matplotlib_formats',
+ 'set_matplotlib_close',
+ 'Video']
+
+_deprecated_names = ["display", "clear_output", "publish_display_data", "update_display", "DisplayHandle"]
+
+__all__ = __all__ + _deprecated_names
+
+
+# ----- warn to import from IPython.display -----
+
+from warnings import warn
+
+
+def __getattr__(name):
+ if name in _deprecated_names:
+ warn(f"Importing {name} from IPython.core.display is deprecated since IPython 7.14, please import from IPython display", DeprecationWarning, stacklevel=2)
+ return getattr(display_functions, name)
+
+ if name in globals().keys():
+ return globals()[name]
+ else:
+ raise AttributeError(f"module {__name__} has no attribute {name}")
+
+
+#-----------------------------------------------------------------------------
+# utility functions
+#-----------------------------------------------------------------------------
+
+def _safe_exists(path):
+ """Check path, but don't let exceptions raise"""
+ try:
+ return os.path.exists(path)
+ except Exception:
+ return False
+
+
+def _display_mimetype(mimetype, objs, raw=False, metadata=None):
+ """internal implementation of all display_foo methods
+
+ Parameters
+ ----------
+ mimetype : str
+ The mimetype to be published (e.g. 'image/png')
+ *objs : object
+ The Python objects to display, or if raw=True raw text data to
+ display.
+ raw : bool
+ Are the data objects raw data or Python objects that need to be
+ formatted before display? [default: False]
+ metadata : dict (optional)
+ Metadata to be associated with the specific mimetype output.
+ """
+ if metadata:
+ metadata = {mimetype: metadata}
+ if raw:
+ # turn list of pngdata into list of { 'image/png': pngdata }
+ objs = [ {mimetype: obj} for obj in objs ]
+ display_functions.display(*objs, raw=raw, metadata=metadata, include=[mimetype])
+
+#-----------------------------------------------------------------------------
+# Main functions
+#-----------------------------------------------------------------------------
+
+
+def display_pretty(*objs, **kwargs):
+ """Display the pretty (default) representation of an object.
+
+ Parameters
+ ----------
+ *objs : object
+ The Python objects to display, or if raw=True raw text data to
+ display.
+ raw : bool
+ Are the data objects raw data or Python objects that need to be
+ formatted before display? [default: False]
+ metadata : dict (optional)
+ Metadata to be associated with the specific mimetype output.
+ """
+ _display_mimetype('text/plain', objs, **kwargs)
+
+
+def display_html(*objs, **kwargs):
+ """Display the HTML representation of an object.
+
+ Note: If raw=False and the object does not have a HTML
+ representation, no HTML will be shown.
+
+ Parameters
+ ----------
+ *objs : object
+ The Python objects to display, or if raw=True raw HTML data to
+ display.
+ raw : bool
+ Are the data objects raw data or Python objects that need to be
+ formatted before display? [default: False]
+ metadata : dict (optional)
+ Metadata to be associated with the specific mimetype output.
+ """
+ _display_mimetype('text/html', objs, **kwargs)
+
+
+def display_markdown(*objs, **kwargs):
+ """Displays the Markdown representation of an object.
+
+ Parameters
+ ----------
+ *objs : object
+ The Python objects to display, or if raw=True raw markdown data to
+ display.
+ raw : bool
+ Are the data objects raw data or Python objects that need to be
+ formatted before display? [default: False]
+ metadata : dict (optional)
+ Metadata to be associated with the specific mimetype output.
+ """
+
+ _display_mimetype('text/markdown', objs, **kwargs)
+
+
+def display_svg(*objs, **kwargs):
+ """Display the SVG representation of an object.
+
+ Parameters
+ ----------
+ *objs : object
+ The Python objects to display, or if raw=True raw svg data to
+ display.
+ raw : bool
+ Are the data objects raw data or Python objects that need to be
+ formatted before display? [default: False]
+ metadata : dict (optional)
+ Metadata to be associated with the specific mimetype output.
+ """
+ _display_mimetype('image/svg+xml', objs, **kwargs)
+
+
+def display_png(*objs, **kwargs):
+ """Display the PNG representation of an object.
+
+ Parameters
+ ----------
+ *objs : object
+ The Python objects to display, or if raw=True raw png data to
+ display.
+ raw : bool
+ Are the data objects raw data or Python objects that need to be
+ formatted before display? [default: False]
+ metadata : dict (optional)
+ Metadata to be associated with the specific mimetype output.
+ """
+ _display_mimetype('image/png', objs, **kwargs)
+
+
+def display_jpeg(*objs, **kwargs):
+ """Display the JPEG representation of an object.
+
+ Parameters
+ ----------
+ *objs : object
+ The Python objects to display, or if raw=True raw JPEG data to
+ display.
+ raw : bool
+ Are the data objects raw data or Python objects that need to be
+ formatted before display? [default: False]
+ metadata : dict (optional)
+ Metadata to be associated with the specific mimetype output.
+ """
+ _display_mimetype('image/jpeg', objs, **kwargs)
+
+
+def display_latex(*objs, **kwargs):
+ """Display the LaTeX representation of an object.
+
+ Parameters
+ ----------
+ *objs : object
+ The Python objects to display, or if raw=True raw latex data to
+ display.
+ raw : bool
+ Are the data objects raw data or Python objects that need to be
+ formatted before display? [default: False]
+ metadata : dict (optional)
+ Metadata to be associated with the specific mimetype output.
+ """
+ _display_mimetype('text/latex', objs, **kwargs)
+
+
+def display_json(*objs, **kwargs):
+ """Display the JSON representation of an object.
+
+ Note that not many frontends support displaying JSON.
+
+ Parameters
+ ----------
+ *objs : object
+ The Python objects to display, or if raw=True raw json data to
+ display.
+ raw : bool
+ Are the data objects raw data or Python objects that need to be
+ formatted before display? [default: False]
+ metadata : dict (optional)
+ Metadata to be associated with the specific mimetype output.
+ """
+ _display_mimetype('application/json', objs, **kwargs)
+
+
+def display_javascript(*objs, **kwargs):
+ """Display the Javascript representation of an object.
+
+ Parameters
+ ----------
+ *objs : object
+ The Python objects to display, or if raw=True raw javascript data to
+ display.
+ raw : bool
+ Are the data objects raw data or Python objects that need to be
+ formatted before display? [default: False]
+ metadata : dict (optional)
+ Metadata to be associated with the specific mimetype output.
+ """
+ _display_mimetype('application/javascript', objs, **kwargs)
+
+
+def display_pdf(*objs, **kwargs):
+ """Display the PDF representation of an object.
+
+ Parameters
+ ----------
+ *objs : object
+ The Python objects to display, or if raw=True raw javascript data to
+ display.
+ raw : bool
+ Are the data objects raw data or Python objects that need to be
+ formatted before display? [default: False]
+ metadata : dict (optional)
+ Metadata to be associated with the specific mimetype output.
+ """
+ _display_mimetype('application/pdf', objs, **kwargs)
+
+
+#-----------------------------------------------------------------------------
+# Smart classes
+#-----------------------------------------------------------------------------
+
+
+class DisplayObject(object):
+ """An object that wraps data to be displayed."""
+
+ _read_flags = 'r'
+ _show_mem_addr = False
+ metadata = None
+
+ def __init__(self, data=None, url=None, filename=None, metadata=None):
+ """Create a display object given raw data.
+
+ When this object is returned by an expression or passed to the
+ display function, it will result in the data being displayed
+ in the frontend. The MIME type of the data should match the
+ subclasses used, so the Png subclass should be used for 'image/png'
+ data. If the data is a URL, the data will first be downloaded
+ and then displayed. If
+
+ Parameters
+ ----------
+ data : unicode, str or bytes
+ The raw data or a URL or file to load the data from
+ url : unicode
+ A URL to download the data from.
+ filename : unicode
+ Path to a local file to load the data from.
+ metadata : dict
+ Dict of metadata associated to be the object when displayed
+ """
+ if isinstance(data, (Path, PurePath)):
+ data = str(data)
+
+ if data is not None and isinstance(data, str):
+ if data.startswith('http') and url is None:
+ url = data
+ filename = None
+ data = None
+ elif _safe_exists(data) and filename is None:
+ url = None
+ filename = data
+ data = None
+
+ self.url = url
+ self.filename = filename
+ # because of @data.setter methods in
+ # subclasses ensure url and filename are set
+ # before assigning to self.data
+ self.data = data
+
+ if metadata is not None:
+ self.metadata = metadata
+ elif self.metadata is None:
+ self.metadata = {}
+
+ self.reload()
+ self._check_data()
+
+ def __repr__(self):
+ if not self._show_mem_addr:
+ cls = self.__class__
+ r = "<%s.%s object>" % (cls.__module__, cls.__name__)
+ else:
+ r = super(DisplayObject, self).__repr__()
+ return r
+
+ def _check_data(self):
+ """Override in subclasses if there's something to check."""
+ pass
+
+ def _data_and_metadata(self):
+ """shortcut for returning metadata with shape information, if defined"""
+ if self.metadata:
+ return self.data, deepcopy(self.metadata)
+ else:
+ return self.data
+
+ def reload(self):
+ """Reload the raw data from file or URL."""
+ if self.filename is not None:
+ encoding = None if "b" in self._read_flags else "utf-8"
+ with open(self.filename, self._read_flags, encoding=encoding) as f:
+ self.data = f.read()
+ elif self.url is not None:
+ # Deferred import
+ from urllib.request import urlopen
+ response = urlopen(self.url)
+ data = response.read()
+ # extract encoding from header, if there is one:
+ encoding = None
+ if 'content-type' in response.headers:
+ for sub in response.headers['content-type'].split(';'):
+ sub = sub.strip()
+ if sub.startswith('charset'):
+ encoding = sub.split('=')[-1].strip()
+ break
+ if 'content-encoding' in response.headers:
+ # TODO: do deflate?
+ if 'gzip' in response.headers['content-encoding']:
+ import gzip
+ from io import BytesIO
+
+ # assume utf-8 if encoding is not specified
+ with gzip.open(
+ BytesIO(data), "rt", encoding=encoding or "utf-8"
+ ) as fp:
+ encoding = None
+ data = fp.read()
+
+ # decode data, if an encoding was specified
+ # We only touch self.data once since
+ # subclasses such as SVG have @data.setter methods
+ # that transform self.data into ... well svg.
+ if encoding:
+ self.data = data.decode(encoding, 'replace')
+ else:
+ self.data = data
+
+
+class TextDisplayObject(DisplayObject):
+ """Create a text display object given raw data.
+
+ Parameters
+ ----------
+ data : str or unicode
+ The raw data or a URL or file to load the data from.
+ url : unicode
+ A URL to download the data from.
+ filename : unicode
+ Path to a local file to load the data from.
+ metadata : dict
+ Dict of metadata associated to be the object when displayed
+ """
+ def _check_data(self):
+ if self.data is not None and not isinstance(self.data, str):
+ raise TypeError("%s expects text, not %r" % (self.__class__.__name__, self.data))
+
+class Pretty(TextDisplayObject):
+
+ def _repr_pretty_(self, pp, cycle):
+ return pp.text(self.data)
+
+
+class HTML(TextDisplayObject):
+
+ def __init__(self, data=None, url=None, filename=None, metadata=None):
+ def warn():
+ if not data:
+ return False
+
+ #
+ # Avoid calling lower() on the entire data, because it could be a
+ # long string and we're only interested in its beginning and end.
+ #
+ prefix = data[:10].lower()
+ suffix = data[-10:].lower()
+ return prefix.startswith("<iframe ") and suffix.endswith("</iframe>")
+
+ if warn():
+ warnings.warn("Consider using IPython.display.IFrame instead")
+ super(HTML, self).__init__(data=data, url=url, filename=filename, metadata=metadata)
+
+ def _repr_html_(self):
+ return self._data_and_metadata()
+
+ def __html__(self):
+ """
+ This method exists to inform other HTML-using modules (e.g. Markupsafe,
+ htmltag, etc) that this object is HTML and does not need things like
+ special characters (<>&) escaped.
+ """
+ return self._repr_html_()
+
+
+class Markdown(TextDisplayObject):
+
+ def _repr_markdown_(self):
+ return self._data_and_metadata()
+
+
+class Math(TextDisplayObject):
+
+ def _repr_latex_(self):
+ s = r"$\displaystyle %s$" % self.data.strip('$')
+ if self.metadata:
+ return s, deepcopy(self.metadata)
+ else:
+ return s
+
+
+class Latex(TextDisplayObject):
+
+ def _repr_latex_(self):
+ return self._data_and_metadata()
+
+
+class SVG(DisplayObject):
+ """Embed an SVG into the display.
+
+ Note if you just want to view a svg image via a URL use `:class:Image` with
+ a url=URL keyword argument.
+ """
+
+ _read_flags = 'rb'
+ # wrap data in a property, which extracts the <svg> tag, discarding
+ # document headers
+ _data = None
+
+ @property
+ def data(self):
+ return self._data
+
+ @data.setter
+ def data(self, svg):
+ if svg is None:
+ self._data = None
+ return
+ # parse into dom object
+ from xml.dom import minidom
+ x = minidom.parseString(svg)
+ # get svg tag (should be 1)
+ found_svg = x.getElementsByTagName('svg')
+ if found_svg:
+ svg = found_svg[0].toxml()
+ else:
+ # fallback on the input, trust the user
+ # but this is probably an error.
+ pass
+ svg = cast_unicode(svg)
+ self._data = svg
+
+ def _repr_svg_(self):
+ return self._data_and_metadata()
+
+class ProgressBar(DisplayObject):
+ """Progressbar supports displaying a progressbar like element
+ """
+ def __init__(self, total):
+ """Creates a new progressbar
+
+ Parameters
+ ----------
+ total : int
+ maximum size of the progressbar
+ """
+ self.total = total
+ self._progress = 0
+ self.html_width = '60ex'
+ self.text_width = 60
+ self._display_id = hexlify(os.urandom(8)).decode('ascii')
+
+ def __repr__(self):
+ fraction = self.progress / self.total
+ filled = '=' * int(fraction * self.text_width)
+ rest = ' ' * (self.text_width - len(filled))
+ return '[{}{}] {}/{}'.format(
+ filled, rest,
+ self.progress, self.total,
+ )
+
+ def _repr_html_(self):
+ return "<progress style='width:{}' max='{}' value='{}'></progress>".format(
+ self.html_width, self.total, self.progress)
+
+ def display(self):
+ display_functions.display(self, display_id=self._display_id)
+
+ def update(self):
+ display_functions.display(self, display_id=self._display_id, update=True)
+
+ @property
+ def progress(self):
+ return self._progress
+
+ @progress.setter
+ def progress(self, value):
+ self._progress = value
+ self.update()
+
+ def __iter__(self):
+ self.display()
+ self._progress = -1 # First iteration is 0
+ return self
+
+ def __next__(self):
+ """Returns current value and increments display by one."""
+ self.progress += 1
+ if self.progress < self.total:
+ return self.progress
+ else:
+ raise StopIteration()
+
+class JSON(DisplayObject):
+ """JSON expects a JSON-able dict or list
+
+ not an already-serialized JSON string.
+
+ Scalar types (None, number, string) are not allowed, only dict or list containers.
+ """
+ # wrap data in a property, which warns about passing already-serialized JSON
+ _data = None
+ def __init__(self, data=None, url=None, filename=None, expanded=False, metadata=None, root='root', **kwargs):
+ """Create a JSON display object given raw data.
+
+ Parameters
+ ----------
+ data : dict or list
+ JSON data to display. Not an already-serialized JSON string.
+ Scalar types (None, number, string) are not allowed, only dict
+ or list containers.
+ url : unicode
+ A URL to download the data from.
+ filename : unicode
+ Path to a local file to load the data from.
+ expanded : boolean
+ Metadata to control whether a JSON display component is expanded.
+ metadata : dict
+ Specify extra metadata to attach to the json display object.
+ root : str
+ The name of the root element of the JSON tree
+ """
+ self.metadata = {
+ 'expanded': expanded,
+ 'root': root,
+ }
+ if metadata:
+ self.metadata.update(metadata)
+ if kwargs:
+ self.metadata.update(kwargs)
+ super(JSON, self).__init__(data=data, url=url, filename=filename)
+
+ def _check_data(self):
+ if self.data is not None and not isinstance(self.data, (dict, list)):
+ raise TypeError("%s expects JSONable dict or list, not %r" % (self.__class__.__name__, self.data))
+
+ @property
+ def data(self):
+ return self._data
+
+ @data.setter
+ def data(self, data):
+ if isinstance(data, (Path, PurePath)):
+ data = str(data)
+
+ if isinstance(data, str):
+ if self.filename is None and self.url is None:
+ warnings.warn("JSON expects JSONable dict or list, not JSON strings")
+ data = json.loads(data)
+ self._data = data
+
+ def _data_and_metadata(self):
+ return self.data, self.metadata
+
+ def _repr_json_(self):
+ return self._data_and_metadata()
+
+
+_css_t = """var link = document.createElement("link");
+ link.rel = "stylesheet";
+ link.type = "text/css";
+ link.href = "%s";
+ document.head.appendChild(link);
+"""
+
+_lib_t1 = """new Promise(function(resolve, reject) {
+ var script = document.createElement("script");
+ script.onload = resolve;
+ script.onerror = reject;
+ script.src = "%s";
+ document.head.appendChild(script);
+}).then(() => {
+"""
+
+_lib_t2 = """
+});"""
+
+class GeoJSON(JSON):
+ """GeoJSON expects JSON-able dict
+
+ not an already-serialized JSON string.
+
+ Scalar types (None, number, string) are not allowed, only dict containers.
+ """
+
+ def __init__(self, *args, **kwargs):
+ """Create a GeoJSON display object given raw data.
+
+ Parameters
+ ----------
+ data : dict or list
+ VegaLite data. Not an already-serialized JSON string.
+ Scalar types (None, number, string) are not allowed, only dict
+ or list containers.
+ url_template : string
+ Leaflet TileLayer URL template: http://leafletjs.com/reference.html#url-template
+ layer_options : dict
+ Leaflet TileLayer options: http://leafletjs.com/reference.html#tilelayer-options
+ url : unicode
+ A URL to download the data from.
+ filename : unicode
+ Path to a local file to load the data from.
+ metadata : dict
+ Specify extra metadata to attach to the json display object.
+
+ Examples
+ --------
+ The following will display an interactive map of Mars with a point of
+ interest on frontend that do support GeoJSON display.
+
+ >>> from IPython.display import GeoJSON
+
+ >>> GeoJSON(data={
+ ... "type": "Feature",
+ ... "geometry": {
+ ... "type": "Point",
+ ... "coordinates": [-81.327, 296.038]
+ ... }
+ ... },
+ ... url_template="http://s3-eu-west-1.amazonaws.com/whereonmars.cartodb.net/{basemap_id}/{z}/{x}/{y}.png",
+ ... layer_options={
+ ... "basemap_id": "celestia_mars-shaded-16k_global",
+ ... "attribution" : "Celestia/praesepe",
+ ... "minZoom" : 0,
+ ... "maxZoom" : 18,
+ ... })
+ <IPython.core.display.GeoJSON object>
+
+ In the terminal IPython, you will only see the text representation of
+ the GeoJSON object.
+
+ """
+
+ super(GeoJSON, self).__init__(*args, **kwargs)
+
+
+ def _ipython_display_(self):
+ bundle = {
+ 'application/geo+json': self.data,
+ 'text/plain': '<IPython.display.GeoJSON object>'
+ }
+ metadata = {
+ 'application/geo+json': self.metadata
+ }
+ display_functions.display(bundle, metadata=metadata, raw=True)
+
+class Javascript(TextDisplayObject):
+
+ def __init__(self, data=None, url=None, filename=None, lib=None, css=None):
+ """Create a Javascript display object given raw data.
+
+ When this object is returned by an expression or passed to the
+ display function, it will result in the data being displayed
+ in the frontend. If the data is a URL, the data will first be
+ downloaded and then displayed.
+
+ In the Notebook, the containing element will be available as `element`,
+ and jQuery will be available. Content appended to `element` will be
+ visible in the output area.
+
+ Parameters
+ ----------
+ data : unicode, str or bytes
+ The Javascript source code or a URL to download it from.
+ url : unicode
+ A URL to download the data from.
+ filename : unicode
+ Path to a local file to load the data from.
+ lib : list or str
+ A sequence of Javascript library URLs to load asynchronously before
+ running the source code. The full URLs of the libraries should
+ be given. A single Javascript library URL can also be given as a
+ string.
+ css : list or str
+ A sequence of css files to load before running the source code.
+ The full URLs of the css files should be given. A single css URL
+ can also be given as a string.
+ """
+ if isinstance(lib, str):
+ lib = [lib]
+ elif lib is None:
+ lib = []
+ if isinstance(css, str):
+ css = [css]
+ elif css is None:
+ css = []
+ if not isinstance(lib, (list,tuple)):
+ raise TypeError('expected sequence, got: %r' % lib)
+ if not isinstance(css, (list,tuple)):
+ raise TypeError('expected sequence, got: %r' % css)
+ self.lib = lib
+ self.css = css
+ super(Javascript, self).__init__(data=data, url=url, filename=filename)
+
+ def _repr_javascript_(self):
+ r = ''
+ for c in self.css:
+ r += _css_t % c
+ for l in self.lib:
+ r += _lib_t1 % l
+ r += self.data
+ r += _lib_t2*len(self.lib)
+ return r
+
+# constants for identifying png/jpeg data
+_PNG = b'\x89PNG\r\n\x1a\n'
+_JPEG = b'\xff\xd8'
+
+def _pngxy(data):
+ """read the (width, height) from a PNG header"""
+ ihdr = data.index(b'IHDR')
+ # next 8 bytes are width/height
+ return struct.unpack('>ii', data[ihdr+4:ihdr+12])
+
+def _jpegxy(data):
+ """read the (width, height) from a JPEG header"""
+ # adapted from http://www.64lines.com/jpeg-width-height
+
+ idx = 4
+ while True:
+ block_size = struct.unpack('>H', data[idx:idx+2])[0]
+ idx = idx + block_size
+ if data[idx:idx+2] == b'\xFF\xC0':
+ # found Start of Frame
+ iSOF = idx
+ break
+ else:
+ # read another block
+ idx += 2
+
+ h, w = struct.unpack('>HH', data[iSOF+5:iSOF+9])
+ return w, h
+
+def _gifxy(data):
+ """read the (width, height) from a GIF header"""
+ return struct.unpack('<HH', data[6:10])
+
+
+class Image(DisplayObject):
+
+ _read_flags = 'rb'
+ _FMT_JPEG = u'jpeg'
+ _FMT_PNG = u'png'
+ _FMT_GIF = u'gif'
+ _ACCEPTABLE_EMBEDDINGS = [_FMT_JPEG, _FMT_PNG, _FMT_GIF]
+ _MIMETYPES = {
+ _FMT_PNG: 'image/png',
+ _FMT_JPEG: 'image/jpeg',
+ _FMT_GIF: 'image/gif',
+ }
+
+ def __init__(
+ self,
+ data=None,
+ url=None,
+ filename=None,
+ format=None,
+ embed=None,
+ width=None,
+ height=None,
+ retina=False,
+ unconfined=False,
+ metadata=None,
+ alt=None,
+ ):
+ """Create a PNG/JPEG/GIF image object given raw data.
+
+ When this object is returned by an input cell or passed to the
+ display function, it will result in the image being displayed
+ in the frontend.
+
+ Parameters
+ ----------
+ data : unicode, str or bytes
+ The raw image data or a URL or filename to load the data from.
+ This always results in embedded image data.
+
+ url : unicode
+ A URL to download the data from. If you specify `url=`,
+ the image data will not be embedded unless you also specify `embed=True`.
+
+ filename : unicode
+ Path to a local file to load the data from.
+ Images from a file are always embedded.
+
+ format : unicode
+ The format of the image data (png/jpeg/jpg/gif). If a filename or URL is given
+ for format will be inferred from the filename extension.
+
+ embed : bool
+ Should the image data be embedded using a data URI (True) or be
+ loaded using an <img> tag. Set this to True if you want the image
+ to be viewable later with no internet connection in the notebook.
+
+ Default is `True`, unless the keyword argument `url` is set, then
+ default value is `False`.
+
+ Note that QtConsole is not able to display images if `embed` is set to `False`
+
+ width : int
+ Width in pixels to which to constrain the image in html
+
+ height : int
+ Height in pixels to which to constrain the image in html
+
+ retina : bool
+ Automatically set the width and height to half of the measured
+ width and height.
+ This only works for embedded images because it reads the width/height
+ from image data.
+ For non-embedded images, you can just set the desired display width
+ and height directly.
+
+ unconfined : bool
+ Set unconfined=True to disable max-width confinement of the image.
+
+ metadata : dict
+ Specify extra metadata to attach to the image.
+
+ alt : unicode
+ Alternative text for the image, for use by screen readers.
+
+ Examples
+ --------
+ embedded image data, works in qtconsole and notebook
+ when passed positionally, the first arg can be any of raw image data,
+ a URL, or a filename from which to load image data.
+ The result is always embedding image data for inline images.
+
+ >>> Image('https://www.google.fr/images/srpr/logo3w.png') # doctest: +SKIP
+ <IPython.core.display.Image object>
+
+ >>> Image('/path/to/image.jpg')
+ <IPython.core.display.Image object>
+
+ >>> Image(b'RAW_PNG_DATA...')
+ <IPython.core.display.Image object>
+
+ Specifying Image(url=...) does not embed the image data,
+ it only generates ``<img>`` tag with a link to the source.
+ This will not work in the qtconsole or offline.
+
+ >>> Image(url='https://www.google.fr/images/srpr/logo3w.png')
+ <IPython.core.display.Image object>
+
+ """
+ if isinstance(data, (Path, PurePath)):
+ data = str(data)
+
+ if filename is not None:
+ ext = self._find_ext(filename)
+ elif url is not None:
+ ext = self._find_ext(url)
+ elif data is None:
+ raise ValueError("No image data found. Expecting filename, url, or data.")
+ elif isinstance(data, str) and (
+ data.startswith('http') or _safe_exists(data)
+ ):
+ ext = self._find_ext(data)
+ else:
+ ext = None
+
+ if format is None:
+ if ext is not None:
+ if ext == u'jpg' or ext == u'jpeg':
+ format = self._FMT_JPEG
+ elif ext == u'png':
+ format = self._FMT_PNG
+ elif ext == u'gif':
+ format = self._FMT_GIF
+ else:
+ format = ext.lower()
+ elif isinstance(data, bytes):
+ # infer image type from image data header,
+ # only if format has not been specified.
+ if data[:2] == _JPEG:
+ format = self._FMT_JPEG
+
+ # failed to detect format, default png
+ if format is None:
+ format = self._FMT_PNG
+
+ if format.lower() == 'jpg':
+ # jpg->jpeg
+ format = self._FMT_JPEG
+
+ self.format = format.lower()
+ self.embed = embed if embed is not None else (url is None)
+
+ if self.embed and self.format not in self._ACCEPTABLE_EMBEDDINGS:
+ raise ValueError("Cannot embed the '%s' image format" % (self.format))
+ if self.embed:
+ self._mimetype = self._MIMETYPES.get(self.format)
+
+ self.width = width
+ self.height = height
+ self.retina = retina
+ self.unconfined = unconfined
+ self.alt = alt
+ super(Image, self).__init__(data=data, url=url, filename=filename,
+ metadata=metadata)
+
+ if self.width is None and self.metadata.get('width', {}):
+ self.width = metadata['width']
+
+ if self.height is None and self.metadata.get('height', {}):
+ self.height = metadata['height']
+
+ if self.alt is None and self.metadata.get("alt", {}):
+ self.alt = metadata["alt"]
+
+ if retina:
+ self._retina_shape()
+
+
+ def _retina_shape(self):
+ """load pixel-doubled width and height from image data"""
+ if not self.embed:
+ return
+ if self.format == self._FMT_PNG:
+ w, h = _pngxy(self.data)
+ elif self.format == self._FMT_JPEG:
+ w, h = _jpegxy(self.data)
+ elif self.format == self._FMT_GIF:
+ w, h = _gifxy(self.data)
+ else:
+ # retina only supports png
+ return
+ self.width = w // 2
+ self.height = h // 2
+
+ def reload(self):
+ """Reload the raw data from file or URL."""
+ if self.embed:
+ super(Image,self).reload()
+ if self.retina:
+ self._retina_shape()
+
+ def _repr_html_(self):
+ if not self.embed:
+ width = height = klass = alt = ""
+ if self.width:
+ width = ' width="%d"' % self.width
+ if self.height:
+ height = ' height="%d"' % self.height
+ if self.unconfined:
+ klass = ' class="unconfined"'
+ if self.alt:
+ alt = ' alt="%s"' % html.escape(self.alt)
+ return '<img src="{url}"{width}{height}{klass}{alt}/>'.format(
+ url=self.url,
+ width=width,
+ height=height,
+ klass=klass,
+ alt=alt,
+ )
+
+ def _repr_mimebundle_(self, include=None, exclude=None):
+ """Return the image as a mimebundle
+
+ Any new mimetype support should be implemented here.
+ """
+ if self.embed:
+ mimetype = self._mimetype
+ data, metadata = self._data_and_metadata(always_both=True)
+ if metadata:
+ metadata = {mimetype: metadata}
+ return {mimetype: data}, metadata
+ else:
+ return {'text/html': self._repr_html_()}
+
+ def _data_and_metadata(self, always_both=False):
+ """shortcut for returning metadata with shape information, if defined"""
+ try:
+ b64_data = b2a_base64(self.data, newline=False).decode("ascii")
+ except TypeError as e:
+ raise FileNotFoundError(
+ "No such file or directory: '%s'" % (self.data)) from e
+ md = {}
+ if self.metadata:
+ md.update(self.metadata)
+ if self.width:
+ md['width'] = self.width
+ if self.height:
+ md['height'] = self.height
+ if self.unconfined:
+ md['unconfined'] = self.unconfined
+ if self.alt:
+ md["alt"] = self.alt
+ if md or always_both:
+ return b64_data, md
+ else:
+ return b64_data
+
+ def _repr_png_(self):
+ if self.embed and self.format == self._FMT_PNG:
+ return self._data_and_metadata()
+
+ def _repr_jpeg_(self):
+ if self.embed and self.format == self._FMT_JPEG:
+ return self._data_and_metadata()
+
+ def _find_ext(self, s):
+ base, ext = splitext(s)
+
+ if not ext:
+ return base
+
+ # `splitext` includes leading period, so we skip it
+ return ext[1:].lower()
+
+
+class Video(DisplayObject):
+
+ def __init__(self, data=None, url=None, filename=None, embed=False,
+ mimetype=None, width=None, height=None, html_attributes="controls"):
+ """Create a video object given raw data or an URL.
+
+ When this object is returned by an input cell or passed to the
+ display function, it will result in the video being displayed
+ in the frontend.
+
+ Parameters
+ ----------
+ data : unicode, str or bytes
+ The raw video data or a URL or filename to load the data from.
+ Raw data will require passing ``embed=True``.
+
+ url : unicode
+ A URL for the video. If you specify ``url=``,
+ the image data will not be embedded.
+
+ filename : unicode
+ Path to a local file containing the video.
+ Will be interpreted as a local URL unless ``embed=True``.
+
+ embed : bool
+ Should the video be embedded using a data URI (True) or be
+ loaded using a <video> tag (False).
+
+ Since videos are large, embedding them should be avoided, if possible.
+ You must confirm embedding as your intention by passing ``embed=True``.
+
+ Local files can be displayed with URLs without embedding the content, via::
+
+ Video('./video.mp4')
+
+ mimetype : unicode
+ Specify the mimetype for embedded videos.
+ Default will be guessed from file extension, if available.
+
+ width : int
+ Width in pixels to which to constrain the video in HTML.
+ If not supplied, defaults to the width of the video.
+
+ height : int
+ Height in pixels to which to constrain the video in html.
+ If not supplied, defaults to the height of the video.
+
+ html_attributes : str
+ Attributes for the HTML ``<video>`` block.
+ Default: ``"controls"`` to get video controls.
+ Other examples: ``"controls muted"`` for muted video with controls,
+ ``"loop autoplay"`` for looping autoplaying video without controls.
+
+ Examples
+ --------
+ ::
+
+ Video('https://archive.org/download/Sita_Sings_the_Blues/Sita_Sings_the_Blues_small.mp4')
+ Video('path/to/video.mp4')
+ Video('path/to/video.mp4', embed=True)
+ Video('path/to/video.mp4', embed=True, html_attributes="controls muted autoplay")
+ Video(b'raw-videodata', embed=True)
+ """
+ if isinstance(data, (Path, PurePath)):
+ data = str(data)
+
+ if url is None and isinstance(data, str) and data.startswith(('http:', 'https:')):
+ url = data
+ data = None
+ elif data is not None and os.path.exists(data):
+ filename = data
+ data = None
+
+ if data and not embed:
+ msg = ''.join([
+ "To embed videos, you must pass embed=True ",
+ "(this may make your notebook files huge)\n",
+ "Consider passing Video(url='...')",
+ ])
+ raise ValueError(msg)
+
+ self.mimetype = mimetype
+ self.embed = embed
+ self.width = width
+ self.height = height
+ self.html_attributes = html_attributes
+ super(Video, self).__init__(data=data, url=url, filename=filename)
+
+ def _repr_html_(self):
+ width = height = ''
+ if self.width:
+ width = ' width="%d"' % self.width
+ if self.height:
+ height = ' height="%d"' % self.height
+
+ # External URLs and potentially local files are not embedded into the
+ # notebook output.
+ if not self.embed:
+ url = self.url if self.url is not None else self.filename
+ output = """<video src="{0}" {1} {2} {3}>
+ Your browser does not support the <code>video</code> element.
+ </video>""".format(url, self.html_attributes, width, height)
+ return output
+
+ # Embedded videos are base64-encoded.
+ mimetype = self.mimetype
+ if self.filename is not None:
+ if not mimetype:
+ mimetype, _ = mimetypes.guess_type(self.filename)
+
+ with open(self.filename, 'rb') as f:
+ video = f.read()
+ else:
+ video = self.data
+ if isinstance(video, str):
+ # unicode input is already b64-encoded
+ b64_video = video
+ else:
+ b64_video = b2a_base64(video, newline=False).decode("ascii").rstrip()
+
+ output = """<video {0} {1} {2}>
+ <source src="data:{3};base64,{4}" type="{3}">
+ Your browser does not support the video tag.
+ </video>""".format(self.html_attributes, width, height, mimetype, b64_video)
+ return output
+
+ def reload(self):
+ # TODO
+ pass
+
+
+@skip_doctest
+def set_matplotlib_formats(*formats, **kwargs):
+ """
+ .. deprecated:: 7.23
+
+ use `matplotlib_inline.backend_inline.set_matplotlib_formats()`
+
+ Select figure formats for the inline backend. Optionally pass quality for JPEG.
+
+ For example, this enables PNG and JPEG output with a JPEG quality of 90%::
+
+ In [1]: set_matplotlib_formats('png', 'jpeg', quality=90)
+
+ To set this in your config files use the following::
+
+ c.InlineBackend.figure_formats = {'png', 'jpeg'}
+ c.InlineBackend.print_figure_kwargs.update({'quality' : 90})
+
+ Parameters
+ ----------
+ *formats : strs
+ One or more figure formats to enable: 'png', 'retina', 'jpeg', 'svg', 'pdf'.
+ **kwargs
+ Keyword args will be relayed to ``figure.canvas.print_figure``.
+ """
+ warnings.warn(
+ "`set_matplotlib_formats` is deprecated since IPython 7.23, directly "
+ "use `matplotlib_inline.backend_inline.set_matplotlib_formats()`",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+
+ from matplotlib_inline.backend_inline import (
+ set_matplotlib_formats as set_matplotlib_formats_orig,
+ )
+
+ set_matplotlib_formats_orig(*formats, **kwargs)
+
+@skip_doctest
+def set_matplotlib_close(close=True):
+ """
+ .. deprecated:: 7.23
+
+ use `matplotlib_inline.backend_inline.set_matplotlib_close()`
+
+ Set whether the inline backend closes all figures automatically or not.
+
+ By default, the inline backend used in the IPython Notebook will close all
+ matplotlib figures automatically after each cell is run. This means that
+ plots in different cells won't interfere. Sometimes, you may want to make
+ a plot in one cell and then refine it in later cells. This can be accomplished
+ by::
+
+ In [1]: set_matplotlib_close(False)
+
+ To set this in your config files use the following::
+
+ c.InlineBackend.close_figures = False
+
+ Parameters
+ ----------
+ close : bool
+ Should all matplotlib figures be automatically closed after each cell is
+ run?
+ """
+ warnings.warn(
+ "`set_matplotlib_close` is deprecated since IPython 7.23, directly "
+ "use `matplotlib_inline.backend_inline.set_matplotlib_close()`",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+
+ from matplotlib_inline.backend_inline import (
+ set_matplotlib_close as set_matplotlib_close_orig,
+ )
+
+ set_matplotlib_close_orig(close)
diff --git a/contrib/python/ipython/py3/IPython/core/display_functions.py b/contrib/python/ipython/py3/IPython/core/display_functions.py
new file mode 100644
index 0000000000..567cf3fa60
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/display_functions.py
@@ -0,0 +1,391 @@
+# -*- coding: utf-8 -*-
+"""Top-level display functions for displaying object in different formats."""
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+
+from binascii import b2a_hex
+import os
+import sys
+import warnings
+
+__all__ = ['display', 'clear_output', 'publish_display_data', 'update_display', 'DisplayHandle']
+
+#-----------------------------------------------------------------------------
+# utility functions
+#-----------------------------------------------------------------------------
+
+
+def _merge(d1, d2):
+ """Like update, but merges sub-dicts instead of clobbering at the top level.
+
+ Updates d1 in-place
+ """
+
+ if not isinstance(d2, dict) or not isinstance(d1, dict):
+ return d2
+ for key, value in d2.items():
+ d1[key] = _merge(d1.get(key), value)
+ return d1
+
+
+#-----------------------------------------------------------------------------
+# Main functions
+#-----------------------------------------------------------------------------
+
+class _Sentinel:
+ def __repr__(self):
+ return "<deprecated>"
+
+
+_sentinel = _Sentinel()
+
+# use * to indicate transient is keyword-only
+def publish_display_data(
+ data, metadata=None, source=_sentinel, *, transient=None, **kwargs
+):
+ """Publish data and metadata to all frontends.
+
+ See the ``display_data`` message in the messaging documentation for
+ more details about this message type.
+
+ Keys of data and metadata can be any mime-type.
+
+ Parameters
+ ----------
+ data : dict
+ A dictionary having keys that are valid MIME types (like
+ 'text/plain' or 'image/svg+xml') and values that are the data for
+ that MIME type. The data itself must be a JSON'able data
+ structure. Minimally all data should have the 'text/plain' data,
+ which can be displayed by all frontends. If more than the plain
+ text is given, it is up to the frontend to decide which
+ representation to use.
+ metadata : dict
+ A dictionary for metadata related to the data. This can contain
+ arbitrary key, value pairs that frontends can use to interpret
+ the data. mime-type keys matching those in data can be used
+ to specify metadata about particular representations.
+ source : str, deprecated
+ Unused.
+ transient : dict, keyword-only
+ A dictionary of transient data, such as display_id.
+ """
+ from IPython.core.interactiveshell import InteractiveShell
+
+ if source is not _sentinel:
+ warnings.warn(
+ "The `source` parameter emit a deprecation warning since"
+ " IPython 8.0, it had no effects for a long time and will "
+ " be removed in future versions.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ display_pub = InteractiveShell.instance().display_pub
+
+ # only pass transient if supplied,
+ # to avoid errors with older ipykernel.
+ # TODO: We could check for ipykernel version and provide a detailed upgrade message.
+ if transient:
+ kwargs['transient'] = transient
+
+ display_pub.publish(
+ data=data,
+ metadata=metadata,
+ **kwargs
+ )
+
+
+def _new_id():
+ """Generate a new random text id with urandom"""
+ return b2a_hex(os.urandom(16)).decode('ascii')
+
+
+def display(
+ *objs,
+ include=None,
+ exclude=None,
+ metadata=None,
+ transient=None,
+ display_id=None,
+ raw=False,
+ clear=False,
+ **kwargs
+):
+ """Display a Python object in all frontends.
+
+ By default all representations will be computed and sent to the frontends.
+ Frontends can decide which representation is used and how.
+
+ In terminal IPython this will be similar to using :func:`print`, for use in richer
+ frontends see Jupyter notebook examples with rich display logic.
+
+ Parameters
+ ----------
+ *objs : object
+ The Python objects to display.
+ raw : bool, optional
+ Are the objects to be displayed already mimetype-keyed dicts of raw display data,
+ or Python objects that need to be formatted before display? [default: False]
+ include : list, tuple or set, optional
+ A list of format type strings (MIME types) to include in the
+ format data dict. If this is set *only* the format types included
+ in this list will be computed.
+ exclude : list, tuple or set, optional
+ A list of format type strings (MIME types) to exclude in the format
+ data dict. If this is set all format types will be computed,
+ except for those included in this argument.
+ metadata : dict, optional
+ A dictionary of metadata to associate with the output.
+ mime-type keys in this dictionary will be associated with the individual
+ representation formats, if they exist.
+ transient : dict, optional
+ A dictionary of transient data to associate with the output.
+ Data in this dict should not be persisted to files (e.g. notebooks).
+ display_id : str, bool optional
+ Set an id for the display.
+ This id can be used for updating this display area later via update_display.
+ If given as `True`, generate a new `display_id`
+ clear : bool, optional
+ Should the output area be cleared before displaying anything? If True,
+ this will wait for additional output before clearing. [default: False]
+ **kwargs : additional keyword-args, optional
+ Additional keyword-arguments are passed through to the display publisher.
+
+ Returns
+ -------
+ handle: DisplayHandle
+ Returns a handle on updatable displays for use with :func:`update_display`,
+ if `display_id` is given. Returns :any:`None` if no `display_id` is given
+ (default).
+
+ Examples
+ --------
+ >>> class Json(object):
+ ... def __init__(self, json):
+ ... self.json = json
+ ... def _repr_pretty_(self, pp, cycle):
+ ... import json
+ ... pp.text(json.dumps(self.json, indent=2))
+ ... def __repr__(self):
+ ... return str(self.json)
+ ...
+
+ >>> d = Json({1:2, 3: {4:5}})
+
+ >>> print(d)
+ {1: 2, 3: {4: 5}}
+
+ >>> display(d)
+ {
+ "1": 2,
+ "3": {
+ "4": 5
+ }
+ }
+
+ >>> def int_formatter(integer, pp, cycle):
+ ... pp.text('I'*integer)
+
+ >>> plain = get_ipython().display_formatter.formatters['text/plain']
+ >>> plain.for_type(int, int_formatter)
+ <function _repr_pprint at 0x...>
+ >>> display(7-5)
+ II
+
+ >>> del plain.type_printers[int]
+ >>> display(7-5)
+ 2
+
+ See Also
+ --------
+ :func:`update_display`
+
+ Notes
+ -----
+ In Python, objects can declare their textual representation using the
+ `__repr__` method. IPython expands on this idea and allows objects to declare
+ other, rich representations including:
+
+ - HTML
+ - JSON
+ - PNG
+ - JPEG
+ - SVG
+ - LaTeX
+
+ A single object can declare some or all of these representations; all are
+ handled by IPython's display system.
+
+ The main idea of the first approach is that you have to implement special
+ display methods when you define your class, one for each representation you
+ want to use. Here is a list of the names of the special methods and the
+ values they must return:
+
+ - `_repr_html_`: return raw HTML as a string, or a tuple (see below).
+ - `_repr_json_`: return a JSONable dict, or a tuple (see below).
+ - `_repr_jpeg_`: return raw JPEG data, or a tuple (see below).
+ - `_repr_png_`: return raw PNG data, or a tuple (see below).
+ - `_repr_svg_`: return raw SVG data as a string, or a tuple (see below).
+ - `_repr_latex_`: return LaTeX commands in a string surrounded by "$",
+ or a tuple (see below).
+ - `_repr_mimebundle_`: return a full mimebundle containing the mapping
+ from all mimetypes to data.
+ Use this for any mime-type not listed above.
+
+ The above functions may also return the object's metadata alonside the
+ data. If the metadata is available, the functions will return a tuple
+ containing the data and metadata, in that order. If there is no metadata
+ available, then the functions will return the data only.
+
+ When you are directly writing your own classes, you can adapt them for
+ display in IPython by following the above approach. But in practice, you
+ often need to work with existing classes that you can't easily modify.
+
+ You can refer to the documentation on integrating with the display system in
+ order to register custom formatters for already existing types
+ (:ref:`integrating_rich_display`).
+
+ .. versionadded:: 5.4 display available without import
+ .. versionadded:: 6.1 display available without import
+
+ Since IPython 5.4 and 6.1 :func:`display` is automatically made available to
+ the user without import. If you are using display in a document that might
+ be used in a pure python context or with older version of IPython, use the
+ following import at the top of your file::
+
+ from IPython.display import display
+
+ """
+ from IPython.core.interactiveshell import InteractiveShell
+
+ if not InteractiveShell.initialized():
+ # Directly print objects.
+ print(*objs)
+ return
+
+ if transient is None:
+ transient = {}
+ if metadata is None:
+ metadata={}
+ if display_id:
+ if display_id is True:
+ display_id = _new_id()
+ transient['display_id'] = display_id
+ if kwargs.get('update') and 'display_id' not in transient:
+ raise TypeError('display_id required for update_display')
+ if transient:
+ kwargs['transient'] = transient
+
+ if not objs and display_id:
+ # if given no objects, but still a request for a display_id,
+ # we assume the user wants to insert an empty output that
+ # can be updated later
+ objs = [{}]
+ raw = True
+
+ if not raw:
+ format = InteractiveShell.instance().display_formatter.format
+
+ if clear:
+ clear_output(wait=True)
+
+ for obj in objs:
+ if raw:
+ publish_display_data(data=obj, metadata=metadata, **kwargs)
+ else:
+ format_dict, md_dict = format(obj, include=include, exclude=exclude)
+ if not format_dict:
+ # nothing to display (e.g. _ipython_display_ took over)
+ continue
+ if metadata:
+ # kwarg-specified metadata gets precedence
+ _merge(md_dict, metadata)
+ publish_display_data(data=format_dict, metadata=md_dict, **kwargs)
+ if display_id:
+ return DisplayHandle(display_id)
+
+
+# use * for keyword-only display_id arg
+def update_display(obj, *, display_id, **kwargs):
+ """Update an existing display by id
+
+ Parameters
+ ----------
+ obj
+ The object with which to update the display
+ display_id : keyword-only
+ The id of the display to update
+
+ See Also
+ --------
+ :func:`display`
+ """
+ kwargs['update'] = True
+ display(obj, display_id=display_id, **kwargs)
+
+
+class DisplayHandle(object):
+ """A handle on an updatable display
+
+ Call `.update(obj)` to display a new object.
+
+ Call `.display(obj`) to add a new instance of this display,
+ and update existing instances.
+
+ See Also
+ --------
+
+ :func:`display`, :func:`update_display`
+
+ """
+
+ def __init__(self, display_id=None):
+ if display_id is None:
+ display_id = _new_id()
+ self.display_id = display_id
+
+ def __repr__(self):
+ return "<%s display_id=%s>" % (self.__class__.__name__, self.display_id)
+
+ def display(self, obj, **kwargs):
+ """Make a new display with my id, updating existing instances.
+
+ Parameters
+ ----------
+ obj
+ object to display
+ **kwargs
+ additional keyword arguments passed to display
+ """
+ display(obj, display_id=self.display_id, **kwargs)
+
+ def update(self, obj, **kwargs):
+ """Update existing displays with my id
+
+ Parameters
+ ----------
+ obj
+ object to display
+ **kwargs
+ additional keyword arguments passed to update_display
+ """
+ update_display(obj, display_id=self.display_id, **kwargs)
+
+
+def clear_output(wait=False):
+ """Clear the output of the current cell receiving output.
+
+ Parameters
+ ----------
+ wait : bool [default: false]
+ Wait to clear the output until new output is available to replace it."""
+ from IPython.core.interactiveshell import InteractiveShell
+ if InteractiveShell.initialized():
+ InteractiveShell.instance().display_pub.clear_output(wait)
+ else:
+ print('\033[2K\r', end='')
+ sys.stdout.flush()
+ print('\033[2K\r', end='')
+ sys.stderr.flush()
diff --git a/contrib/python/ipython/py3/IPython/core/display_trap.py b/contrib/python/ipython/py3/IPython/core/display_trap.py
new file mode 100644
index 0000000000..9931dfe2df
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/display_trap.py
@@ -0,0 +1,70 @@
+# encoding: utf-8
+"""
+A context manager for handling sys.displayhook.
+
+Authors:
+
+* Robert Kern
+* Brian Granger
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (C) 2008-2011 The IPython Development Team
+#
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+import sys
+
+from traitlets.config.configurable import Configurable
+from traitlets import Any
+
+#-----------------------------------------------------------------------------
+# Classes and functions
+#-----------------------------------------------------------------------------
+
+
+class DisplayTrap(Configurable):
+ """Object to manage sys.displayhook.
+
+ This came from IPython.core.kernel.display_hook, but is simplified
+ (no callbacks or formatters) until more of the core is refactored.
+ """
+
+ hook = Any()
+
+ def __init__(self, hook=None):
+ super(DisplayTrap, self).__init__(hook=hook, config=None)
+ self.old_hook = None
+ # We define this to track if a single BuiltinTrap is nested.
+ # Only turn off the trap when the outermost call to __exit__ is made.
+ self._nested_level = 0
+
+ def __enter__(self):
+ if self._nested_level == 0:
+ self.set()
+ self._nested_level += 1
+ return self
+
+ def __exit__(self, type, value, traceback):
+ if self._nested_level == 1:
+ self.unset()
+ self._nested_level -= 1
+ # Returning False will cause exceptions to propagate
+ return False
+
+ def set(self):
+ """Set the hook."""
+ if sys.displayhook is not self.hook:
+ self.old_hook = sys.displayhook
+ sys.displayhook = self.hook
+
+ def unset(self):
+ """Unset the hook."""
+ sys.displayhook = self.old_hook
+
diff --git a/contrib/python/ipython/py3/IPython/core/displayhook.py b/contrib/python/ipython/py3/IPython/core/displayhook.py
new file mode 100644
index 0000000000..aba4f904d8
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/displayhook.py
@@ -0,0 +1,331 @@
+# -*- coding: utf-8 -*-
+"""Displayhook for IPython.
+
+This defines a callable class that IPython uses for `sys.displayhook`.
+"""
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+import builtins as builtin_mod
+import sys
+import io as _io
+import tokenize
+
+from traitlets.config.configurable import Configurable
+from traitlets import Instance, Float
+from warnings import warn
+
+# TODO: Move the various attributes (cache_size, [others now moved]). Some
+# of these are also attributes of InteractiveShell. They should be on ONE object
+# only and the other objects should ask that one object for their values.
+
+class DisplayHook(Configurable):
+ """The custom IPython displayhook to replace sys.displayhook.
+
+ This class does many things, but the basic idea is that it is a callable
+ that gets called anytime user code returns a value.
+ """
+
+ shell = Instance('IPython.core.interactiveshell.InteractiveShellABC',
+ allow_none=True)
+ exec_result = Instance('IPython.core.interactiveshell.ExecutionResult',
+ allow_none=True)
+ cull_fraction = Float(0.2)
+
+ def __init__(self, shell=None, cache_size=1000, **kwargs):
+ super(DisplayHook, self).__init__(shell=shell, **kwargs)
+ cache_size_min = 3
+ if cache_size <= 0:
+ self.do_full_cache = 0
+ cache_size = 0
+ elif cache_size < cache_size_min:
+ self.do_full_cache = 0
+ cache_size = 0
+ warn('caching was disabled (min value for cache size is %s).' %
+ cache_size_min,stacklevel=3)
+ else:
+ self.do_full_cache = 1
+
+ self.cache_size = cache_size
+
+ # we need a reference to the user-level namespace
+ self.shell = shell
+
+ self._,self.__,self.___ = '','',''
+
+ # these are deliberately global:
+ to_user_ns = {'_':self._,'__':self.__,'___':self.___}
+ self.shell.user_ns.update(to_user_ns)
+
+ @property
+ def prompt_count(self):
+ return self.shell.execution_count
+
+ #-------------------------------------------------------------------------
+ # Methods used in __call__. Override these methods to modify the behavior
+ # of the displayhook.
+ #-------------------------------------------------------------------------
+
+ def check_for_underscore(self):
+ """Check if the user has set the '_' variable by hand."""
+ # If something injected a '_' variable in __builtin__, delete
+ # ipython's automatic one so we don't clobber that. gettext() in
+ # particular uses _, so we need to stay away from it.
+ if '_' in builtin_mod.__dict__:
+ try:
+ user_value = self.shell.user_ns['_']
+ if user_value is not self._:
+ return
+ del self.shell.user_ns['_']
+ except KeyError:
+ pass
+
+ def quiet(self):
+ """Should we silence the display hook because of ';'?"""
+ # do not print output if input ends in ';'
+
+ try:
+ cell = self.shell.history_manager.input_hist_parsed[-1]
+ except IndexError:
+ # some uses of ipshellembed may fail here
+ return False
+
+ return self.semicolon_at_end_of_expression(cell)
+
+ @staticmethod
+ def semicolon_at_end_of_expression(expression):
+ """Parse Python expression and detects whether last token is ';'"""
+
+ sio = _io.StringIO(expression)
+ tokens = list(tokenize.generate_tokens(sio.readline))
+
+ for token in reversed(tokens):
+ if token[0] in (tokenize.ENDMARKER, tokenize.NL, tokenize.NEWLINE, tokenize.COMMENT):
+ continue
+ if (token[0] == tokenize.OP) and (token[1] == ';'):
+ return True
+ else:
+ return False
+
+ def start_displayhook(self):
+ """Start the displayhook, initializing resources."""
+ pass
+
+ def write_output_prompt(self):
+ """Write the output prompt.
+
+ The default implementation simply writes the prompt to
+ ``sys.stdout``.
+ """
+ # Use write, not print which adds an extra space.
+ sys.stdout.write(self.shell.separate_out)
+ outprompt = 'Out[{}]: '.format(self.shell.execution_count)
+ if self.do_full_cache:
+ sys.stdout.write(outprompt)
+
+ def compute_format_data(self, result):
+ """Compute format data of the object to be displayed.
+
+ The format data is a generalization of the :func:`repr` of an object.
+ In the default implementation the format data is a :class:`dict` of
+ key value pair where the keys are valid MIME types and the values
+ are JSON'able data structure containing the raw data for that MIME
+ type. It is up to frontends to determine pick a MIME to to use and
+ display that data in an appropriate manner.
+
+ This method only computes the format data for the object and should
+ NOT actually print or write that to a stream.
+
+ Parameters
+ ----------
+ result : object
+ The Python object passed to the display hook, whose format will be
+ computed.
+
+ Returns
+ -------
+ (format_dict, md_dict) : dict
+ format_dict is a :class:`dict` whose keys are valid MIME types and values are
+ JSON'able raw data for that MIME type. It is recommended that
+ all return values of this should always include the "text/plain"
+ MIME type representation of the object.
+ md_dict is a :class:`dict` with the same MIME type keys
+ of metadata associated with each output.
+
+ """
+ return self.shell.display_formatter.format(result)
+
+ # This can be set to True by the write_output_prompt method in a subclass
+ prompt_end_newline = False
+
+ def write_format_data(self, format_dict, md_dict=None) -> None:
+ """Write the format data dict to the frontend.
+
+ This default version of this method simply writes the plain text
+ representation of the object to ``sys.stdout``. Subclasses should
+ override this method to send the entire `format_dict` to the
+ frontends.
+
+ Parameters
+ ----------
+ format_dict : dict
+ The format dict for the object passed to `sys.displayhook`.
+ md_dict : dict (optional)
+ The metadata dict to be associated with the display data.
+ """
+ if 'text/plain' not in format_dict:
+ # nothing to do
+ return
+ # We want to print because we want to always make sure we have a
+ # newline, even if all the prompt separators are ''. This is the
+ # standard IPython behavior.
+ result_repr = format_dict['text/plain']
+ if '\n' in result_repr:
+ # So that multi-line strings line up with the left column of
+ # the screen, instead of having the output prompt mess up
+ # their first line.
+ # We use the prompt template instead of the expanded prompt
+ # because the expansion may add ANSI escapes that will interfere
+ # with our ability to determine whether or not we should add
+ # a newline.
+ if not self.prompt_end_newline:
+ # But avoid extraneous empty lines.
+ result_repr = '\n' + result_repr
+
+ try:
+ print(result_repr)
+ except UnicodeEncodeError:
+ # If a character is not supported by the terminal encoding replace
+ # it with its \u or \x representation
+ print(result_repr.encode(sys.stdout.encoding,'backslashreplace').decode(sys.stdout.encoding))
+
+ def update_user_ns(self, result):
+ """Update user_ns with various things like _, __, _1, etc."""
+
+ # Avoid recursive reference when displaying _oh/Out
+ if self.cache_size and result is not self.shell.user_ns['_oh']:
+ if len(self.shell.user_ns['_oh']) >= self.cache_size and self.do_full_cache:
+ self.cull_cache()
+
+ # Don't overwrite '_' and friends if '_' is in __builtin__
+ # (otherwise we cause buggy behavior for things like gettext). and
+ # do not overwrite _, __ or ___ if one of these has been assigned
+ # by the user.
+ update_unders = True
+ for unders in ['_'*i for i in range(1,4)]:
+ if not unders in self.shell.user_ns:
+ continue
+ if getattr(self, unders) is not self.shell.user_ns.get(unders):
+ update_unders = False
+
+ self.___ = self.__
+ self.__ = self._
+ self._ = result
+
+ if ('_' not in builtin_mod.__dict__) and (update_unders):
+ self.shell.push({'_':self._,
+ '__':self.__,
+ '___':self.___}, interactive=False)
+
+ # hackish access to top-level namespace to create _1,_2... dynamically
+ to_main = {}
+ if self.do_full_cache:
+ new_result = '_%s' % self.prompt_count
+ to_main[new_result] = result
+ self.shell.push(to_main, interactive=False)
+ self.shell.user_ns['_oh'][self.prompt_count] = result
+
+ def fill_exec_result(self, result):
+ if self.exec_result is not None:
+ self.exec_result.result = result
+
+ def log_output(self, format_dict):
+ """Log the output."""
+ if 'text/plain' not in format_dict:
+ # nothing to do
+ return
+ if self.shell.logger.log_output:
+ self.shell.logger.log_write(format_dict['text/plain'], 'output')
+ self.shell.history_manager.output_hist_reprs[self.prompt_count] = \
+ format_dict['text/plain']
+
+ def finish_displayhook(self):
+ """Finish up all displayhook activities."""
+ sys.stdout.write(self.shell.separate_out2)
+ sys.stdout.flush()
+
+ def __call__(self, result=None):
+ """Printing with history cache management.
+
+ This is invoked every time the interpreter needs to print, and is
+ activated by setting the variable sys.displayhook to it.
+ """
+ self.check_for_underscore()
+ if result is not None and not self.quiet():
+ self.start_displayhook()
+ self.write_output_prompt()
+ format_dict, md_dict = self.compute_format_data(result)
+ self.update_user_ns(result)
+ self.fill_exec_result(result)
+ if format_dict:
+ self.write_format_data(format_dict, md_dict)
+ self.log_output(format_dict)
+ self.finish_displayhook()
+
+ def cull_cache(self):
+ """Output cache is full, cull the oldest entries"""
+ oh = self.shell.user_ns.get('_oh', {})
+ sz = len(oh)
+ cull_count = max(int(sz * self.cull_fraction), 2)
+ warn('Output cache limit (currently {sz} entries) hit.\n'
+ 'Flushing oldest {cull_count} entries.'.format(sz=sz, cull_count=cull_count))
+
+ for i, n in enumerate(sorted(oh)):
+ if i >= cull_count:
+ break
+ self.shell.user_ns.pop('_%i' % n, None)
+ oh.pop(n, None)
+
+
+ def flush(self):
+ if not self.do_full_cache:
+ raise ValueError("You shouldn't have reached the cache flush "
+ "if full caching is not enabled!")
+ # delete auto-generated vars from global namespace
+
+ for n in range(1,self.prompt_count + 1):
+ key = '_'+repr(n)
+ try:
+ del self.shell.user_ns[key]
+ except: pass
+ # In some embedded circumstances, the user_ns doesn't have the
+ # '_oh' key set up.
+ oh = self.shell.user_ns.get('_oh', None)
+ if oh is not None:
+ oh.clear()
+
+ # Release our own references to objects:
+ self._, self.__, self.___ = '', '', ''
+
+ if '_' not in builtin_mod.__dict__:
+ self.shell.user_ns.update({'_':self._,'__':self.__,'___':self.___})
+ import gc
+ # TODO: Is this really needed?
+ # IronPython blocks here forever
+ if sys.platform != "cli":
+ gc.collect()
+
+
+class CapturingDisplayHook(object):
+ def __init__(self, shell, outputs=None):
+ self.shell = shell
+ if outputs is None:
+ outputs = []
+ self.outputs = outputs
+
+ def __call__(self, result=None):
+ if result is None:
+ return
+ format_dict, md_dict = self.shell.display_formatter.format(result)
+ self.outputs.append({ 'data': format_dict, 'metadata': md_dict })
diff --git a/contrib/python/ipython/py3/IPython/core/displaypub.py b/contrib/python/ipython/py3/IPython/core/displaypub.py
new file mode 100644
index 0000000000..74028ec79e
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/displaypub.py
@@ -0,0 +1,138 @@
+"""An interface for publishing rich data to frontends.
+
+There are two components of the display system:
+
+* Display formatters, which take a Python object and compute the
+ representation of the object in various formats (text, HTML, SVG, etc.).
+* The display publisher that is used to send the representation data to the
+ various frontends.
+
+This module defines the logic display publishing. The display publisher uses
+the ``display_data`` message type that is defined in the IPython messaging
+spec.
+"""
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+
+import sys
+
+from traitlets.config.configurable import Configurable
+from traitlets import List
+
+# This used to be defined here - it is imported for backwards compatibility
+from .display_functions import publish_display_data
+
+#-----------------------------------------------------------------------------
+# Main payload class
+#-----------------------------------------------------------------------------
+
+
+class DisplayPublisher(Configurable):
+ """A traited class that publishes display data to frontends.
+
+ Instances of this class are created by the main IPython object and should
+ be accessed there.
+ """
+
+ def __init__(self, shell=None, *args, **kwargs):
+ self.shell = shell
+ super().__init__(*args, **kwargs)
+
+ def _validate_data(self, data, metadata=None):
+ """Validate the display data.
+
+ Parameters
+ ----------
+ data : dict
+ The formata data dictionary.
+ metadata : dict
+ Any metadata for the data.
+ """
+
+ if not isinstance(data, dict):
+ raise TypeError('data must be a dict, got: %r' % data)
+ if metadata is not None:
+ if not isinstance(metadata, dict):
+ raise TypeError('metadata must be a dict, got: %r' % data)
+
+ # use * to indicate transient, update are keyword-only
+ def publish(self, data, metadata=None, source=None, *, transient=None, update=False, **kwargs) -> None:
+ """Publish data and metadata to all frontends.
+
+ See the ``display_data`` message in the messaging documentation for
+ more details about this message type.
+
+ The following MIME types are currently implemented:
+
+ * text/plain
+ * text/html
+ * text/markdown
+ * text/latex
+ * application/json
+ * application/javascript
+ * image/png
+ * image/jpeg
+ * image/svg+xml
+
+ Parameters
+ ----------
+ data : dict
+ A dictionary having keys that are valid MIME types (like
+ 'text/plain' or 'image/svg+xml') and values that are the data for
+ that MIME type. The data itself must be a JSON'able data
+ structure. Minimally all data should have the 'text/plain' data,
+ which can be displayed by all frontends. If more than the plain
+ text is given, it is up to the frontend to decide which
+ representation to use.
+ metadata : dict
+ A dictionary for metadata related to the data. This can contain
+ arbitrary key, value pairs that frontends can use to interpret
+ the data. Metadata specific to each mime-type can be specified
+ in the metadata dict with the same mime-type keys as
+ the data itself.
+ source : str, deprecated
+ Unused.
+ transient : dict, keyword-only
+ A dictionary for transient data.
+ Data in this dictionary should not be persisted as part of saving this output.
+ Examples include 'display_id'.
+ update : bool, keyword-only, default: False
+ If True, only update existing outputs with the same display_id,
+ rather than creating a new output.
+ """
+
+ handlers = {}
+ if self.shell is not None:
+ handlers = getattr(self.shell, 'mime_renderers', {})
+
+ for mime, handler in handlers.items():
+ if mime in data:
+ handler(data[mime], metadata.get(mime, None))
+ return
+
+ if 'text/plain' in data:
+ print(data['text/plain'])
+
+ def clear_output(self, wait=False):
+ """Clear the output of the cell receiving output."""
+ print('\033[2K\r', end='')
+ sys.stdout.flush()
+ print('\033[2K\r', end='')
+ sys.stderr.flush()
+
+
+class CapturingDisplayPublisher(DisplayPublisher):
+ """A DisplayPublisher that stores"""
+ outputs = List()
+
+ def publish(self, data, metadata=None, source=None, *, transient=None, update=False):
+ self.outputs.append({'data':data, 'metadata':metadata,
+ 'transient':transient, 'update':update})
+
+ def clear_output(self, wait=False):
+ super(CapturingDisplayPublisher, self).clear_output(wait)
+
+ # empty the list, *do not* reassign a new list
+ self.outputs.clear()
diff --git a/contrib/python/ipython/py3/IPython/core/error.py b/contrib/python/ipython/py3/IPython/core/error.py
new file mode 100644
index 0000000000..684cbc8da6
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/error.py
@@ -0,0 +1,60 @@
+# encoding: utf-8
+"""
+Global exception classes for IPython.core.
+
+Authors:
+
+* Brian Granger
+* Fernando Perez
+* Min Ragan-Kelley
+
+Notes
+-----
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (C) 2008 The IPython Development Team
+#
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Exception classes
+#-----------------------------------------------------------------------------
+
+class IPythonCoreError(Exception):
+ pass
+
+
+class TryNext(IPythonCoreError):
+ """Try next hook exception.
+
+ Raise this in your hook function to indicate that the next hook handler
+ should be used to handle the operation.
+ """
+
+class UsageError(IPythonCoreError):
+ """Error in magic function arguments, etc.
+
+ Something that probably won't warrant a full traceback, but should
+ nevertheless interrupt a macro / batch file.
+ """
+
+class StdinNotImplementedError(IPythonCoreError, NotImplementedError):
+ """raw_input was requested in a context where it is not supported
+
+ For use in IPython kernels, where only some frontends may support
+ stdin requests.
+ """
+
+class InputRejected(Exception):
+ """Input rejected by ast transformer.
+
+ Raise this in your NodeTransformer to indicate that InteractiveShell should
+ not execute the supplied input.
+ """
diff --git a/contrib/python/ipython/py3/IPython/core/events.py b/contrib/python/ipython/py3/IPython/core/events.py
new file mode 100644
index 0000000000..3a66e75e5a
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/events.py
@@ -0,0 +1,166 @@
+"""Infrastructure for registering and firing callbacks on application events.
+
+Unlike :mod:`IPython.core.hooks`, which lets end users set single functions to
+be called at specific times, or a collection of alternative methods to try,
+callbacks are designed to be used by extension authors. A number of callbacks
+can be registered for the same event without needing to be aware of one another.
+
+The functions defined in this module are no-ops indicating the names of available
+events and the arguments which will be passed to them.
+
+.. note::
+
+ This API is experimental in IPython 2.0, and may be revised in future versions.
+"""
+
+from backcall import callback_prototype
+
+
+class EventManager(object):
+ """Manage a collection of events and a sequence of callbacks for each.
+
+ This is attached to :class:`~IPython.core.interactiveshell.InteractiveShell`
+ instances as an ``events`` attribute.
+
+ .. note::
+
+ This API is experimental in IPython 2.0, and may be revised in future versions.
+ """
+
+ def __init__(self, shell, available_events, print_on_error=True):
+ """Initialise the :class:`CallbackManager`.
+
+ Parameters
+ ----------
+ shell
+ The :class:`~IPython.core.interactiveshell.InteractiveShell` instance
+ available_events
+ An iterable of names for callback events.
+ print_on_error:
+ A boolean flag to set whether the EventManager will print a warning which a event errors.
+ """
+ self.shell = shell
+ self.callbacks = {n:[] for n in available_events}
+ self.print_on_error = print_on_error
+
+ def register(self, event, function):
+ """Register a new event callback.
+
+ Parameters
+ ----------
+ event : str
+ The event for which to register this callback.
+ function : callable
+ A function to be called on the given event. It should take the same
+ parameters as the appropriate callback prototype.
+
+ Raises
+ ------
+ TypeError
+ If ``function`` is not callable.
+ KeyError
+ If ``event`` is not one of the known events.
+ """
+ if not callable(function):
+ raise TypeError('Need a callable, got %r' % function)
+ callback_proto = available_events.get(event)
+ if function not in self.callbacks[event]:
+ self.callbacks[event].append(callback_proto.adapt(function))
+
+ def unregister(self, event, function):
+ """Remove a callback from the given event."""
+ if function in self.callbacks[event]:
+ return self.callbacks[event].remove(function)
+
+ # Remove callback in case ``function`` was adapted by `backcall`.
+ for callback in self.callbacks[event]:
+ try:
+ if callback.__wrapped__ is function:
+ return self.callbacks[event].remove(callback)
+ except AttributeError:
+ pass
+
+ raise ValueError('Function {!r} is not registered as a {} callback'.format(function, event))
+
+ def trigger(self, event, *args, **kwargs):
+ """Call callbacks for ``event``.
+
+ Any additional arguments are passed to all callbacks registered for this
+ event. Exceptions raised by callbacks are caught, and a message printed.
+ """
+ for func in self.callbacks[event][:]:
+ try:
+ func(*args, **kwargs)
+ except (Exception, KeyboardInterrupt):
+ if self.print_on_error:
+ print("Error in callback {} (for {}):".format(func, event))
+ self.shell.showtraceback()
+
+# event_name -> prototype mapping
+available_events = {}
+
+def _define_event(callback_function):
+ callback_proto = callback_prototype(callback_function)
+ available_events[callback_function.__name__] = callback_proto
+ return callback_proto
+
+# ------------------------------------------------------------------------------
+# Callback prototypes
+#
+# No-op functions which describe the names of available events and the
+# signatures of callbacks for those events.
+# ------------------------------------------------------------------------------
+
+@_define_event
+def pre_execute():
+ """Fires before code is executed in response to user/frontend action.
+
+ This includes comm and widget messages and silent execution, as well as user
+ code cells.
+ """
+ pass
+
+@_define_event
+def pre_run_cell(info):
+ """Fires before user-entered code runs.
+
+ Parameters
+ ----------
+ info : :class:`~IPython.core.interactiveshell.ExecutionInfo`
+ An object containing information used for the code execution.
+ """
+ pass
+
+@_define_event
+def post_execute():
+ """Fires after code is executed in response to user/frontend action.
+
+ This includes comm and widget messages and silent execution, as well as user
+ code cells.
+ """
+ pass
+
+@_define_event
+def post_run_cell(result):
+ """Fires after user-entered code runs.
+
+ Parameters
+ ----------
+ result : :class:`~IPython.core.interactiveshell.ExecutionResult`
+ The object which will be returned as the execution result.
+ """
+ pass
+
+@_define_event
+def shell_initialized(ip):
+ """Fires after initialisation of :class:`~IPython.core.interactiveshell.InteractiveShell`.
+
+ This is before extensions and startup scripts are loaded, so it can only be
+ set by subclassing.
+
+ Parameters
+ ----------
+ ip : :class:`~IPython.core.interactiveshell.InteractiveShell`
+ The newly initialised shell.
+ """
+ pass
diff --git a/contrib/python/ipython/py3/IPython/core/excolors.py b/contrib/python/ipython/py3/IPython/core/excolors.py
new file mode 100644
index 0000000000..85eef81f0e
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/excolors.py
@@ -0,0 +1,165 @@
+# -*- coding: utf-8 -*-
+"""
+Color schemes for exception handling code in IPython.
+"""
+
+import os
+
+#*****************************************************************************
+# Copyright (C) 2005-2006 Fernando Perez <fperez@colorado.edu>
+#
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#*****************************************************************************
+
+from IPython.utils.coloransi import ColorSchemeTable, TermColors, ColorScheme
+
+def exception_colors():
+ """Return a color table with fields for exception reporting.
+
+ The table is an instance of ColorSchemeTable with schemes added for
+ 'Neutral', 'Linux', 'LightBG' and 'NoColor' and fields for exception handling filled
+ in.
+
+ Examples:
+
+ >>> ec = exception_colors()
+ >>> ec.active_scheme_name
+ ''
+ >>> print(ec.active_colors)
+ None
+
+ Now we activate a color scheme:
+ >>> ec.set_active_scheme('NoColor')
+ >>> ec.active_scheme_name
+ 'NoColor'
+ >>> sorted(ec.active_colors.keys())
+ ['Normal', 'caret', 'em', 'excName', 'filename', 'filenameEm', 'line',
+ 'lineno', 'linenoEm', 'name', 'nameEm', 'normalEm', 'topline', 'vName',
+ 'val', 'valEm']
+ """
+
+ ex_colors = ColorSchemeTable()
+
+ # Populate it with color schemes
+ C = TermColors # shorthand and local lookup
+ ex_colors.add_scheme(ColorScheme(
+ 'NoColor',
+ # The color to be used for the top line
+ topline = C.NoColor,
+
+ # The colors to be used in the traceback
+ filename = C.NoColor,
+ lineno = C.NoColor,
+ name = C.NoColor,
+ vName = C.NoColor,
+ val = C.NoColor,
+ em = C.NoColor,
+
+ # Emphasized colors for the last frame of the traceback
+ normalEm = C.NoColor,
+ filenameEm = C.NoColor,
+ linenoEm = C.NoColor,
+ nameEm = C.NoColor,
+ valEm = C.NoColor,
+
+ # Colors for printing the exception
+ excName = C.NoColor,
+ line = C.NoColor,
+ caret = C.NoColor,
+ Normal = C.NoColor
+ ))
+
+ # make some schemes as instances so we can copy them for modification easily
+ ex_colors.add_scheme(ColorScheme(
+ 'Linux',
+ # The color to be used for the top line
+ topline = C.LightRed,
+
+ # The colors to be used in the traceback
+ filename = C.Green,
+ lineno = C.Green,
+ name = C.Purple,
+ vName = C.Cyan,
+ val = C.Green,
+ em = C.LightCyan,
+
+ # Emphasized colors for the last frame of the traceback
+ normalEm = C.LightCyan,
+ filenameEm = C.LightGreen,
+ linenoEm = C.LightGreen,
+ nameEm = C.LightPurple,
+ valEm = C.LightBlue,
+
+ # Colors for printing the exception
+ excName = C.LightRed,
+ line = C.Yellow,
+ caret = C.White,
+ Normal = C.Normal
+ ))
+
+ # For light backgrounds, swap dark/light colors
+ ex_colors.add_scheme(ColorScheme(
+ 'LightBG',
+ # The color to be used for the top line
+ topline = C.Red,
+
+ # The colors to be used in the traceback
+ filename = C.LightGreen,
+ lineno = C.LightGreen,
+ name = C.LightPurple,
+ vName = C.Cyan,
+ val = C.LightGreen,
+ em = C.Cyan,
+
+ # Emphasized colors for the last frame of the traceback
+ normalEm = C.Cyan,
+ filenameEm = C.Green,
+ linenoEm = C.Green,
+ nameEm = C.Purple,
+ valEm = C.Blue,
+
+ # Colors for printing the exception
+ excName = C.Red,
+ #line = C.Brown, # brown often is displayed as yellow
+ line = C.Red,
+ caret = C.Normal,
+ Normal = C.Normal,
+ ))
+
+ ex_colors.add_scheme(ColorScheme(
+ 'Neutral',
+ # The color to be used for the top line
+ topline = C.Red,
+
+ # The colors to be used in the traceback
+ filename = C.LightGreen,
+ lineno = C.LightGreen,
+ name = C.LightPurple,
+ vName = C.Cyan,
+ val = C.LightGreen,
+ em = C.Cyan,
+
+ # Emphasized colors for the last frame of the traceback
+ normalEm = C.Cyan,
+ filenameEm = C.Green,
+ linenoEm = C.Green,
+ nameEm = C.Purple,
+ valEm = C.Blue,
+
+ # Colors for printing the exception
+ excName = C.Red,
+ #line = C.Brown, # brown often is displayed as yellow
+ line = C.Red,
+ caret = C.Normal,
+ Normal = C.Normal,
+ ))
+
+ # Hack: the 'neutral' colours are not very visible on a dark background on
+ # Windows. Since Windows command prompts have a dark background by default, and
+ # relatively few users are likely to alter that, we will use the 'Linux' colours,
+ # designed for a dark background, as the default on Windows.
+ if os.name == "nt":
+ ex_colors.add_scheme(ex_colors['Linux'].copy('Neutral'))
+
+ return ex_colors
diff --git a/contrib/python/ipython/py3/IPython/core/extensions.py b/contrib/python/ipython/py3/IPython/core/extensions.py
new file mode 100644
index 0000000000..21fba40eaf
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/extensions.py
@@ -0,0 +1,151 @@
+# encoding: utf-8
+"""A class for managing IPython extensions."""
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+import os
+import os.path
+import sys
+from importlib import import_module, reload
+
+from traitlets.config.configurable import Configurable
+from IPython.utils.path import ensure_dir_exists, compress_user
+from IPython.utils.decorators import undoc
+from traitlets import Instance
+
+
+#-----------------------------------------------------------------------------
+# Main class
+#-----------------------------------------------------------------------------
+
+BUILTINS_EXTS = {"storemagic": False, "autoreload": False}
+
+
+class ExtensionManager(Configurable):
+ """A class to manage IPython extensions.
+
+ An IPython extension is an importable Python module that has
+ a function with the signature::
+
+ def load_ipython_extension(ipython):
+ # Do things with ipython
+
+ This function is called after your extension is imported and the
+ currently active :class:`InteractiveShell` instance is passed as
+ the only argument. You can do anything you want with IPython at
+ that point, including defining new magic and aliases, adding new
+ components, etc.
+
+ You can also optionally define an :func:`unload_ipython_extension(ipython)`
+ function, which will be called if the user unloads or reloads the extension.
+ The extension manager will only call :func:`load_ipython_extension` again
+ if the extension is reloaded.
+
+ You can put your extension modules anywhere you want, as long as
+ they can be imported by Python's standard import mechanism. However,
+ to make it easy to write extensions, you can also put your extensions
+ in ``os.path.join(self.ipython_dir, 'extensions')``. This directory
+ is added to ``sys.path`` automatically.
+ """
+
+ shell = Instance('IPython.core.interactiveshell.InteractiveShellABC', allow_none=True)
+
+ def __init__(self, shell=None, **kwargs):
+ super(ExtensionManager, self).__init__(shell=shell, **kwargs)
+ self.shell.observe(
+ self._on_ipython_dir_changed, names=('ipython_dir',)
+ )
+ self.loaded = set()
+
+ @property
+ def ipython_extension_dir(self):
+ return os.path.join(self.shell.ipython_dir, u'extensions')
+
+ def _on_ipython_dir_changed(self, change):
+ ensure_dir_exists(self.ipython_extension_dir)
+
+ def load_extension(self, module_str: str):
+ """Load an IPython extension by its module name.
+
+ Returns the string "already loaded" if the extension is already loaded,
+ "no load function" if the module doesn't have a load_ipython_extension
+ function, or None if it succeeded.
+ """
+ try:
+ return self._load_extension(module_str)
+ except ModuleNotFoundError:
+ if module_str in BUILTINS_EXTS:
+ BUILTINS_EXTS[module_str] = True
+ return self._load_extension("IPython.extensions." + module_str)
+ raise
+
+ def _load_extension(self, module_str: str):
+ if module_str in self.loaded:
+ return "already loaded"
+
+ from IPython.utils.syspathcontext import prepended_to_syspath
+
+ with self.shell.builtin_trap:
+ if module_str not in sys.modules:
+ mod = import_module(module_str)
+ mod = sys.modules[module_str]
+ if self._call_load_ipython_extension(mod):
+ self.loaded.add(module_str)
+ else:
+ return "no load function"
+
+ def unload_extension(self, module_str: str):
+ """Unload an IPython extension by its module name.
+
+ This function looks up the extension's name in ``sys.modules`` and
+ simply calls ``mod.unload_ipython_extension(self)``.
+
+ Returns the string "no unload function" if the extension doesn't define
+ a function to unload itself, "not loaded" if the extension isn't loaded,
+ otherwise None.
+ """
+ if BUILTINS_EXTS.get(module_str, False) is True:
+ module_str = "IPython.extensions." + module_str
+ if module_str not in self.loaded:
+ return "not loaded"
+
+ if module_str in sys.modules:
+ mod = sys.modules[module_str]
+ if self._call_unload_ipython_extension(mod):
+ self.loaded.discard(module_str)
+ else:
+ return "no unload function"
+
+ def reload_extension(self, module_str: str):
+ """Reload an IPython extension by calling reload.
+
+ If the module has not been loaded before,
+ :meth:`InteractiveShell.load_extension` is called. Otherwise
+ :func:`reload` is called and then the :func:`load_ipython_extension`
+ function of the module, if it exists is called.
+ """
+ from IPython.utils.syspathcontext import prepended_to_syspath
+
+ if BUILTINS_EXTS.get(module_str, False) is True:
+ module_str = "IPython.extensions." + module_str
+
+ if (module_str in self.loaded) and (module_str in sys.modules):
+ self.unload_extension(module_str)
+ mod = sys.modules[module_str]
+ with prepended_to_syspath(self.ipython_extension_dir):
+ reload(mod)
+ if self._call_load_ipython_extension(mod):
+ self.loaded.add(module_str)
+ else:
+ self.load_extension(module_str)
+
+ def _call_load_ipython_extension(self, mod):
+ if hasattr(mod, 'load_ipython_extension'):
+ mod.load_ipython_extension(self.shell)
+ return True
+
+ def _call_unload_ipython_extension(self, mod):
+ if hasattr(mod, 'unload_ipython_extension'):
+ mod.unload_ipython_extension(self.shell)
+ return True
diff --git a/contrib/python/ipython/py3/IPython/core/formatters.py b/contrib/python/ipython/py3/IPython/core/formatters.py
new file mode 100644
index 0000000000..15cf703c2a
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/formatters.py
@@ -0,0 +1,1028 @@
+# -*- coding: utf-8 -*-
+"""Display formatters.
+
+Inheritance diagram:
+
+.. inheritance-diagram:: IPython.core.formatters
+ :parts: 3
+"""
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+import abc
+import sys
+import traceback
+import warnings
+from io import StringIO
+
+from decorator import decorator
+
+from traitlets.config.configurable import Configurable
+from .getipython import get_ipython
+from ..utils.sentinel import Sentinel
+from ..utils.dir2 import get_real_method
+from ..lib import pretty
+from traitlets import (
+ Bool, Dict, Integer, Unicode, CUnicode, ObjectName, List,
+ ForwardDeclaredInstance,
+ default, observe,
+)
+
+from typing import Any
+
+
+class DisplayFormatter(Configurable):
+
+ active_types = List(Unicode(),
+ help="""List of currently active mime-types to display.
+ You can use this to set a white-list for formats to display.
+
+ Most users will not need to change this value.
+ """).tag(config=True)
+
+ @default('active_types')
+ def _active_types_default(self):
+ return self.format_types
+
+ @observe('active_types')
+ def _active_types_changed(self, change):
+ for key, formatter in self.formatters.items():
+ if key in change['new']:
+ formatter.enabled = True
+ else:
+ formatter.enabled = False
+
+ ipython_display_formatter = ForwardDeclaredInstance('FormatterABC')
+ @default('ipython_display_formatter')
+ def _default_formatter(self):
+ return IPythonDisplayFormatter(parent=self)
+
+ mimebundle_formatter = ForwardDeclaredInstance('FormatterABC')
+ @default('mimebundle_formatter')
+ def _default_mime_formatter(self):
+ return MimeBundleFormatter(parent=self)
+
+ # A dict of formatter whose keys are format types (MIME types) and whose
+ # values are subclasses of BaseFormatter.
+ formatters = Dict()
+ @default('formatters')
+ def _formatters_default(self):
+ """Activate the default formatters."""
+ formatter_classes = [
+ PlainTextFormatter,
+ HTMLFormatter,
+ MarkdownFormatter,
+ SVGFormatter,
+ PNGFormatter,
+ PDFFormatter,
+ JPEGFormatter,
+ LatexFormatter,
+ JSONFormatter,
+ JavascriptFormatter
+ ]
+ d = {}
+ for cls in formatter_classes:
+ f = cls(parent=self)
+ d[f.format_type] = f
+ return d
+
+ def format(self, obj, include=None, exclude=None):
+ """Return a format data dict for an object.
+
+ By default all format types will be computed.
+
+ The following MIME types are usually implemented:
+
+ * text/plain
+ * text/html
+ * text/markdown
+ * text/latex
+ * application/json
+ * application/javascript
+ * application/pdf
+ * image/png
+ * image/jpeg
+ * image/svg+xml
+
+ Parameters
+ ----------
+ obj : object
+ The Python object whose format data will be computed.
+ include : list, tuple or set; optional
+ A list of format type strings (MIME types) to include in the
+ format data dict. If this is set *only* the format types included
+ in this list will be computed.
+ exclude : list, tuple or set; optional
+ A list of format type string (MIME types) to exclude in the format
+ data dict. If this is set all format types will be computed,
+ except for those included in this argument.
+ Mimetypes present in exclude will take precedence over the ones in include
+
+ Returns
+ -------
+ (format_dict, metadata_dict) : tuple of two dicts
+ format_dict is a dictionary of key/value pairs, one of each format that was
+ generated for the object. The keys are the format types, which
+ will usually be MIME type strings and the values and JSON'able
+ data structure containing the raw data for the representation in
+ that format.
+
+ metadata_dict is a dictionary of metadata about each mime-type output.
+ Its keys will be a strict subset of the keys in format_dict.
+
+ Notes
+ -----
+ If an object implement `_repr_mimebundle_` as well as various
+ `_repr_*_`, the data returned by `_repr_mimebundle_` will take
+ precedence and the corresponding `_repr_*_` for this mimetype will
+ not be called.
+
+ """
+ format_dict = {}
+ md_dict = {}
+
+ if self.ipython_display_formatter(obj):
+ # object handled itself, don't proceed
+ return {}, {}
+
+ format_dict, md_dict = self.mimebundle_formatter(obj, include=include, exclude=exclude)
+
+ if format_dict or md_dict:
+ if include:
+ format_dict = {k:v for k,v in format_dict.items() if k in include}
+ md_dict = {k:v for k,v in md_dict.items() if k in include}
+ if exclude:
+ format_dict = {k:v for k,v in format_dict.items() if k not in exclude}
+ md_dict = {k:v for k,v in md_dict.items() if k not in exclude}
+
+ for format_type, formatter in self.formatters.items():
+ if format_type in format_dict:
+ # already got it from mimebundle, maybe don't render again.
+ # exception: manually registered per-mime renderer
+ # check priority:
+ # 1. user-registered per-mime formatter
+ # 2. mime-bundle (user-registered or repr method)
+ # 3. default per-mime formatter (e.g. repr method)
+ try:
+ formatter.lookup(obj)
+ except KeyError:
+ # no special formatter, use mime-bundle-provided value
+ continue
+ if include and format_type not in include:
+ continue
+ if exclude and format_type in exclude:
+ continue
+
+ md = None
+ try:
+ data = formatter(obj)
+ except:
+ # FIXME: log the exception
+ raise
+
+ # formatters can return raw data or (data, metadata)
+ if isinstance(data, tuple) and len(data) == 2:
+ data, md = data
+
+ if data is not None:
+ format_dict[format_type] = data
+ if md is not None:
+ md_dict[format_type] = md
+ return format_dict, md_dict
+
+ @property
+ def format_types(self):
+ """Return the format types (MIME types) of the active formatters."""
+ return list(self.formatters.keys())
+
+
+#-----------------------------------------------------------------------------
+# Formatters for specific format types (text, html, svg, etc.)
+#-----------------------------------------------------------------------------
+
+
+def _safe_repr(obj):
+ """Try to return a repr of an object
+
+ always returns a string, at least.
+ """
+ try:
+ return repr(obj)
+ except Exception as e:
+ return "un-repr-able object (%r)" % e
+
+
+class FormatterWarning(UserWarning):
+ """Warning class for errors in formatters"""
+
+@decorator
+def catch_format_error(method, self, *args, **kwargs):
+ """show traceback on failed format call"""
+ try:
+ r = method(self, *args, **kwargs)
+ except NotImplementedError:
+ # don't warn on NotImplementedErrors
+ return self._check_return(None, args[0])
+ except Exception:
+ exc_info = sys.exc_info()
+ ip = get_ipython()
+ if ip is not None:
+ ip.showtraceback(exc_info)
+ else:
+ traceback.print_exception(*exc_info)
+ return self._check_return(None, args[0])
+ return self._check_return(r, args[0])
+
+
+class FormatterABC(metaclass=abc.ABCMeta):
+ """ Abstract base class for Formatters.
+
+ A formatter is a callable class that is responsible for computing the
+ raw format data for a particular format type (MIME type). For example,
+ an HTML formatter would have a format type of `text/html` and would return
+ the HTML representation of the object when called.
+ """
+
+ # The format type of the data returned, usually a MIME type.
+ format_type = 'text/plain'
+
+ # Is the formatter enabled...
+ enabled = True
+
+ @abc.abstractmethod
+ def __call__(self, obj):
+ """Return a JSON'able representation of the object.
+
+ If the object cannot be formatted by this formatter,
+ warn and return None.
+ """
+ return repr(obj)
+
+
+def _mod_name_key(typ):
+ """Return a (__module__, __name__) tuple for a type.
+
+ Used as key in Formatter.deferred_printers.
+ """
+ module = getattr(typ, '__module__', None)
+ name = getattr(typ, '__name__', None)
+ return (module, name)
+
+
+def _get_type(obj):
+ """Return the type of an instance (old and new-style)"""
+ return getattr(obj, '__class__', None) or type(obj)
+
+
+_raise_key_error = Sentinel('_raise_key_error', __name__,
+"""
+Special value to raise a KeyError
+
+Raise KeyError in `BaseFormatter.pop` if passed as the default value to `pop`
+""")
+
+
+class BaseFormatter(Configurable):
+ """A base formatter class that is configurable.
+
+ This formatter should usually be used as the base class of all formatters.
+ It is a traited :class:`Configurable` class and includes an extensible
+ API for users to determine how their objects are formatted. The following
+ logic is used to find a function to format an given object.
+
+ 1. The object is introspected to see if it has a method with the name
+ :attr:`print_method`. If is does, that object is passed to that method
+ for formatting.
+ 2. If no print method is found, three internal dictionaries are consulted
+ to find print method: :attr:`singleton_printers`, :attr:`type_printers`
+ and :attr:`deferred_printers`.
+
+ Users should use these dictionaries to register functions that will be
+ used to compute the format data for their objects (if those objects don't
+ have the special print methods). The easiest way of using these
+ dictionaries is through the :meth:`for_type` and :meth:`for_type_by_name`
+ methods.
+
+ If no function/callable is found to compute the format data, ``None`` is
+ returned and this format type is not used.
+ """
+
+ format_type = Unicode("text/plain")
+ _return_type: Any = str
+
+ enabled = Bool(True).tag(config=True)
+
+ print_method = ObjectName('__repr__')
+
+ # The singleton printers.
+ # Maps the IDs of the builtin singleton objects to the format functions.
+ singleton_printers = Dict().tag(config=True)
+
+ # The type-specific printers.
+ # Map type objects to the format functions.
+ type_printers = Dict().tag(config=True)
+
+ # The deferred-import type-specific printers.
+ # Map (modulename, classname) pairs to the format functions.
+ deferred_printers = Dict().tag(config=True)
+
+ @catch_format_error
+ def __call__(self, obj):
+ """Compute the format for an object."""
+ if self.enabled:
+ # lookup registered printer
+ try:
+ printer = self.lookup(obj)
+ except KeyError:
+ pass
+ else:
+ return printer(obj)
+ # Finally look for special method names
+ method = get_real_method(obj, self.print_method)
+ if method is not None:
+ return method()
+ return None
+ else:
+ return None
+
+ def __contains__(self, typ):
+ """map in to lookup_by_type"""
+ try:
+ self.lookup_by_type(typ)
+ except KeyError:
+ return False
+ else:
+ return True
+
+ def _check_return(self, r, obj):
+ """Check that a return value is appropriate
+
+ Return the value if so, None otherwise, warning if invalid.
+ """
+ if r is None or isinstance(r, self._return_type) or \
+ (isinstance(r, tuple) and r and isinstance(r[0], self._return_type)):
+ return r
+ else:
+ warnings.warn(
+ "%s formatter returned invalid type %s (expected %s) for object: %s" % \
+ (self.format_type, type(r), self._return_type, _safe_repr(obj)),
+ FormatterWarning
+ )
+
+ def lookup(self, obj):
+ """Look up the formatter for a given instance.
+
+ Parameters
+ ----------
+ obj : object instance
+
+ Returns
+ -------
+ f : callable
+ The registered formatting callable for the type.
+
+ Raises
+ ------
+ KeyError if the type has not been registered.
+ """
+ # look for singleton first
+ obj_id = id(obj)
+ if obj_id in self.singleton_printers:
+ return self.singleton_printers[obj_id]
+ # then lookup by type
+ return self.lookup_by_type(_get_type(obj))
+
+ def lookup_by_type(self, typ):
+ """Look up the registered formatter for a type.
+
+ Parameters
+ ----------
+ typ : type or '__module__.__name__' string for a type
+
+ Returns
+ -------
+ f : callable
+ The registered formatting callable for the type.
+
+ Raises
+ ------
+ KeyError if the type has not been registered.
+ """
+ if isinstance(typ, str):
+ typ_key = tuple(typ.rsplit('.',1))
+ if typ_key not in self.deferred_printers:
+ # We may have it cached in the type map. We will have to
+ # iterate over all of the types to check.
+ for cls in self.type_printers:
+ if _mod_name_key(cls) == typ_key:
+ return self.type_printers[cls]
+ else:
+ return self.deferred_printers[typ_key]
+ else:
+ for cls in pretty._get_mro(typ):
+ if cls in self.type_printers or self._in_deferred_types(cls):
+ return self.type_printers[cls]
+
+ # If we have reached here, the lookup failed.
+ raise KeyError("No registered printer for {0!r}".format(typ))
+
+ def for_type(self, typ, func=None):
+ """Add a format function for a given type.
+
+ Parameters
+ ----------
+ typ : type or '__module__.__name__' string for a type
+ The class of the object that will be formatted using `func`.
+
+ func : callable
+ A callable for computing the format data.
+ `func` will be called with the object to be formatted,
+ and will return the raw data in this formatter's format.
+ Subclasses may use a different call signature for the
+ `func` argument.
+
+ If `func` is None or not specified, there will be no change,
+ only returning the current value.
+
+ Returns
+ -------
+ oldfunc : callable
+ The currently registered callable.
+ If you are registering a new formatter,
+ this will be the previous value (to enable restoring later).
+ """
+ # if string given, interpret as 'pkg.module.class_name'
+ if isinstance(typ, str):
+ type_module, type_name = typ.rsplit('.', 1)
+ return self.for_type_by_name(type_module, type_name, func)
+
+ try:
+ oldfunc = self.lookup_by_type(typ)
+ except KeyError:
+ oldfunc = None
+
+ if func is not None:
+ self.type_printers[typ] = func
+
+ return oldfunc
+
+ def for_type_by_name(self, type_module, type_name, func=None):
+ """Add a format function for a type specified by the full dotted
+ module and name of the type, rather than the type of the object.
+
+ Parameters
+ ----------
+ type_module : str
+ The full dotted name of the module the type is defined in, like
+ ``numpy``.
+
+ type_name : str
+ The name of the type (the class name), like ``dtype``
+
+ func : callable
+ A callable for computing the format data.
+ `func` will be called with the object to be formatted,
+ and will return the raw data in this formatter's format.
+ Subclasses may use a different call signature for the
+ `func` argument.
+
+ If `func` is None or unspecified, there will be no change,
+ only returning the current value.
+
+ Returns
+ -------
+ oldfunc : callable
+ The currently registered callable.
+ If you are registering a new formatter,
+ this will be the previous value (to enable restoring later).
+ """
+ key = (type_module, type_name)
+
+ try:
+ oldfunc = self.lookup_by_type("%s.%s" % key)
+ except KeyError:
+ oldfunc = None
+
+ if func is not None:
+ self.deferred_printers[key] = func
+ return oldfunc
+
+ def pop(self, typ, default=_raise_key_error):
+ """Pop a formatter for the given type.
+
+ Parameters
+ ----------
+ typ : type or '__module__.__name__' string for a type
+ default : object
+ value to be returned if no formatter is registered for typ.
+
+ Returns
+ -------
+ obj : object
+ The last registered object for the type.
+
+ Raises
+ ------
+ KeyError if the type is not registered and default is not specified.
+ """
+
+ if isinstance(typ, str):
+ typ_key = tuple(typ.rsplit('.',1))
+ if typ_key not in self.deferred_printers:
+ # We may have it cached in the type map. We will have to
+ # iterate over all of the types to check.
+ for cls in self.type_printers:
+ if _mod_name_key(cls) == typ_key:
+ old = self.type_printers.pop(cls)
+ break
+ else:
+ old = default
+ else:
+ old = self.deferred_printers.pop(typ_key)
+ else:
+ if typ in self.type_printers:
+ old = self.type_printers.pop(typ)
+ else:
+ old = self.deferred_printers.pop(_mod_name_key(typ), default)
+ if old is _raise_key_error:
+ raise KeyError("No registered value for {0!r}".format(typ))
+ return old
+
+ def _in_deferred_types(self, cls):
+ """
+ Check if the given class is specified in the deferred type registry.
+
+ Successful matches will be moved to the regular type registry for future use.
+ """
+ mod = getattr(cls, '__module__', None)
+ name = getattr(cls, '__name__', None)
+ key = (mod, name)
+ if key in self.deferred_printers:
+ # Move the printer over to the regular registry.
+ printer = self.deferred_printers.pop(key)
+ self.type_printers[cls] = printer
+ return True
+ return False
+
+
+class PlainTextFormatter(BaseFormatter):
+ """The default pretty-printer.
+
+ This uses :mod:`IPython.lib.pretty` to compute the format data of
+ the object. If the object cannot be pretty printed, :func:`repr` is used.
+ See the documentation of :mod:`IPython.lib.pretty` for details on
+ how to write pretty printers. Here is a simple example::
+
+ def dtype_pprinter(obj, p, cycle):
+ if cycle:
+ return p.text('dtype(...)')
+ if hasattr(obj, 'fields'):
+ if obj.fields is None:
+ p.text(repr(obj))
+ else:
+ p.begin_group(7, 'dtype([')
+ for i, field in enumerate(obj.descr):
+ if i > 0:
+ p.text(',')
+ p.breakable()
+ p.pretty(field)
+ p.end_group(7, '])')
+ """
+
+ # The format type of data returned.
+ format_type = Unicode('text/plain')
+
+ # This subclass ignores this attribute as it always need to return
+ # something.
+ enabled = Bool(True).tag(config=False)
+
+ max_seq_length = Integer(pretty.MAX_SEQ_LENGTH,
+ help="""Truncate large collections (lists, dicts, tuples, sets) to this size.
+
+ Set to 0 to disable truncation.
+ """
+ ).tag(config=True)
+
+ # Look for a _repr_pretty_ methods to use for pretty printing.
+ print_method = ObjectName('_repr_pretty_')
+
+ # Whether to pretty-print or not.
+ pprint = Bool(True).tag(config=True)
+
+ # Whether to be verbose or not.
+ verbose = Bool(False).tag(config=True)
+
+ # The maximum width.
+ max_width = Integer(79).tag(config=True)
+
+ # The newline character.
+ newline = Unicode('\n').tag(config=True)
+
+ # format-string for pprinting floats
+ float_format = Unicode('%r')
+ # setter for float precision, either int or direct format-string
+ float_precision = CUnicode('').tag(config=True)
+
+ @observe('float_precision')
+ def _float_precision_changed(self, change):
+ """float_precision changed, set float_format accordingly.
+
+ float_precision can be set by int or str.
+ This will set float_format, after interpreting input.
+ If numpy has been imported, numpy print precision will also be set.
+
+ integer `n` sets format to '%.nf', otherwise, format set directly.
+
+ An empty string returns to defaults (repr for float, 8 for numpy).
+
+ This parameter can be set via the '%precision' magic.
+ """
+ new = change['new']
+ if '%' in new:
+ # got explicit format string
+ fmt = new
+ try:
+ fmt%3.14159
+ except Exception as e:
+ raise ValueError("Precision must be int or format string, not %r"%new) from e
+ elif new:
+ # otherwise, should be an int
+ try:
+ i = int(new)
+ assert i >= 0
+ except ValueError as e:
+ raise ValueError("Precision must be int or format string, not %r"%new) from e
+ except AssertionError as e:
+ raise ValueError("int precision must be non-negative, not %r"%i) from e
+
+ fmt = '%%.%if'%i
+ if 'numpy' in sys.modules:
+ # set numpy precision if it has been imported
+ import numpy
+ numpy.set_printoptions(precision=i)
+ else:
+ # default back to repr
+ fmt = '%r'
+ if 'numpy' in sys.modules:
+ import numpy
+ # numpy default is 8
+ numpy.set_printoptions(precision=8)
+ self.float_format = fmt
+
+ # Use the default pretty printers from IPython.lib.pretty.
+ @default('singleton_printers')
+ def _singleton_printers_default(self):
+ return pretty._singleton_pprinters.copy()
+
+ @default('type_printers')
+ def _type_printers_default(self):
+ d = pretty._type_pprinters.copy()
+ d[float] = lambda obj,p,cycle: p.text(self.float_format%obj)
+ # if NumPy is used, set precision for its float64 type
+ if "numpy" in sys.modules:
+ import numpy
+
+ d[numpy.float64] = lambda obj, p, cycle: p.text(self.float_format % obj)
+ return d
+
+ @default('deferred_printers')
+ def _deferred_printers_default(self):
+ return pretty._deferred_type_pprinters.copy()
+
+ #### FormatterABC interface ####
+
+ @catch_format_error
+ def __call__(self, obj):
+ """Compute the pretty representation of the object."""
+ if not self.pprint:
+ return repr(obj)
+ else:
+ stream = StringIO()
+ printer = pretty.RepresentationPrinter(stream, self.verbose,
+ self.max_width, self.newline,
+ max_seq_length=self.max_seq_length,
+ singleton_pprinters=self.singleton_printers,
+ type_pprinters=self.type_printers,
+ deferred_pprinters=self.deferred_printers)
+ printer.pretty(obj)
+ printer.flush()
+ return stream.getvalue()
+
+
+class HTMLFormatter(BaseFormatter):
+ """An HTML formatter.
+
+ To define the callables that compute the HTML representation of your
+ objects, define a :meth:`_repr_html_` method or use the :meth:`for_type`
+ or :meth:`for_type_by_name` methods to register functions that handle
+ this.
+
+ The return value of this formatter should be a valid HTML snippet that
+ could be injected into an existing DOM. It should *not* include the
+ ```<html>`` or ```<body>`` tags.
+ """
+ format_type = Unicode('text/html')
+
+ print_method = ObjectName('_repr_html_')
+
+
+class MarkdownFormatter(BaseFormatter):
+ """A Markdown formatter.
+
+ To define the callables that compute the Markdown representation of your
+ objects, define a :meth:`_repr_markdown_` method or use the :meth:`for_type`
+ or :meth:`for_type_by_name` methods to register functions that handle
+ this.
+
+ The return value of this formatter should be a valid Markdown.
+ """
+ format_type = Unicode('text/markdown')
+
+ print_method = ObjectName('_repr_markdown_')
+
+class SVGFormatter(BaseFormatter):
+ """An SVG formatter.
+
+ To define the callables that compute the SVG representation of your
+ objects, define a :meth:`_repr_svg_` method or use the :meth:`for_type`
+ or :meth:`for_type_by_name` methods to register functions that handle
+ this.
+
+ The return value of this formatter should be valid SVG enclosed in
+ ```<svg>``` tags, that could be injected into an existing DOM. It should
+ *not* include the ```<html>`` or ```<body>`` tags.
+ """
+ format_type = Unicode('image/svg+xml')
+
+ print_method = ObjectName('_repr_svg_')
+
+
+class PNGFormatter(BaseFormatter):
+ """A PNG formatter.
+
+ To define the callables that compute the PNG representation of your
+ objects, define a :meth:`_repr_png_` method or use the :meth:`for_type`
+ or :meth:`for_type_by_name` methods to register functions that handle
+ this.
+
+ The return value of this formatter should be raw PNG data, *not*
+ base64 encoded.
+ """
+ format_type = Unicode('image/png')
+
+ print_method = ObjectName('_repr_png_')
+
+ _return_type = (bytes, str)
+
+
+class JPEGFormatter(BaseFormatter):
+ """A JPEG formatter.
+
+ To define the callables that compute the JPEG representation of your
+ objects, define a :meth:`_repr_jpeg_` method or use the :meth:`for_type`
+ or :meth:`for_type_by_name` methods to register functions that handle
+ this.
+
+ The return value of this formatter should be raw JPEG data, *not*
+ base64 encoded.
+ """
+ format_type = Unicode('image/jpeg')
+
+ print_method = ObjectName('_repr_jpeg_')
+
+ _return_type = (bytes, str)
+
+
+class LatexFormatter(BaseFormatter):
+ """A LaTeX formatter.
+
+ To define the callables that compute the LaTeX representation of your
+ objects, define a :meth:`_repr_latex_` method or use the :meth:`for_type`
+ or :meth:`for_type_by_name` methods to register functions that handle
+ this.
+
+ The return value of this formatter should be a valid LaTeX equation,
+ enclosed in either ```$```, ```$$``` or another LaTeX equation
+ environment.
+ """
+ format_type = Unicode('text/latex')
+
+ print_method = ObjectName('_repr_latex_')
+
+
+class JSONFormatter(BaseFormatter):
+ """A JSON string formatter.
+
+ To define the callables that compute the JSONable representation of
+ your objects, define a :meth:`_repr_json_` method or use the :meth:`for_type`
+ or :meth:`for_type_by_name` methods to register functions that handle
+ this.
+
+ The return value of this formatter should be a JSONable list or dict.
+ JSON scalars (None, number, string) are not allowed, only dict or list containers.
+ """
+ format_type = Unicode('application/json')
+ _return_type = (list, dict)
+
+ print_method = ObjectName('_repr_json_')
+
+ def _check_return(self, r, obj):
+ """Check that a return value is appropriate
+
+ Return the value if so, None otherwise, warning if invalid.
+ """
+ if r is None:
+ return
+ md = None
+ if isinstance(r, tuple):
+ # unpack data, metadata tuple for type checking on first element
+ r, md = r
+
+ assert not isinstance(
+ r, str
+ ), "JSON-as-string has been deprecated since IPython < 3"
+
+ if md is not None:
+ # put the tuple back together
+ r = (r, md)
+ return super(JSONFormatter, self)._check_return(r, obj)
+
+
+class JavascriptFormatter(BaseFormatter):
+ """A Javascript formatter.
+
+ To define the callables that compute the Javascript representation of
+ your objects, define a :meth:`_repr_javascript_` method or use the
+ :meth:`for_type` or :meth:`for_type_by_name` methods to register functions
+ that handle this.
+
+ The return value of this formatter should be valid Javascript code and
+ should *not* be enclosed in ```<script>``` tags.
+ """
+ format_type = Unicode('application/javascript')
+
+ print_method = ObjectName('_repr_javascript_')
+
+
+class PDFFormatter(BaseFormatter):
+ """A PDF formatter.
+
+ To define the callables that compute the PDF representation of your
+ objects, define a :meth:`_repr_pdf_` method or use the :meth:`for_type`
+ or :meth:`for_type_by_name` methods to register functions that handle
+ this.
+
+ The return value of this formatter should be raw PDF data, *not*
+ base64 encoded.
+ """
+ format_type = Unicode('application/pdf')
+
+ print_method = ObjectName('_repr_pdf_')
+
+ _return_type = (bytes, str)
+
+class IPythonDisplayFormatter(BaseFormatter):
+ """An escape-hatch Formatter for objects that know how to display themselves.
+
+ To define the callables that compute the representation of your
+ objects, define a :meth:`_ipython_display_` method or use the :meth:`for_type`
+ or :meth:`for_type_by_name` methods to register functions that handle
+ this. Unlike mime-type displays, this method should not return anything,
+ instead calling any appropriate display methods itself.
+
+ This display formatter has highest priority.
+ If it fires, no other display formatter will be called.
+
+ Prior to IPython 6.1, `_ipython_display_` was the only way to display custom mime-types
+ without registering a new Formatter.
+
+ IPython 6.1 introduces `_repr_mimebundle_` for displaying custom mime-types,
+ so `_ipython_display_` should only be used for objects that require unusual
+ display patterns, such as multiple display calls.
+ """
+ print_method = ObjectName('_ipython_display_')
+ _return_type = (type(None), bool)
+
+ @catch_format_error
+ def __call__(self, obj):
+ """Compute the format for an object."""
+ if self.enabled:
+ # lookup registered printer
+ try:
+ printer = self.lookup(obj)
+ except KeyError:
+ pass
+ else:
+ printer(obj)
+ return True
+ # Finally look for special method names
+ method = get_real_method(obj, self.print_method)
+ if method is not None:
+ method()
+ return True
+
+
+class MimeBundleFormatter(BaseFormatter):
+ """A Formatter for arbitrary mime-types.
+
+ Unlike other `_repr_<mimetype>_` methods,
+ `_repr_mimebundle_` should return mime-bundle data,
+ either the mime-keyed `data` dictionary or the tuple `(data, metadata)`.
+ Any mime-type is valid.
+
+ To define the callables that compute the mime-bundle representation of your
+ objects, define a :meth:`_repr_mimebundle_` method or use the :meth:`for_type`
+ or :meth:`for_type_by_name` methods to register functions that handle
+ this.
+
+ .. versionadded:: 6.1
+ """
+ print_method = ObjectName('_repr_mimebundle_')
+ _return_type = dict
+
+ def _check_return(self, r, obj):
+ r = super(MimeBundleFormatter, self)._check_return(r, obj)
+ # always return (data, metadata):
+ if r is None:
+ return {}, {}
+ if not isinstance(r, tuple):
+ return r, {}
+ return r
+
+ @catch_format_error
+ def __call__(self, obj, include=None, exclude=None):
+ """Compute the format for an object.
+
+ Identical to parent's method but we pass extra parameters to the method.
+
+ Unlike other _repr_*_ `_repr_mimebundle_` should allow extra kwargs, in
+ particular `include` and `exclude`.
+ """
+ if self.enabled:
+ # lookup registered printer
+ try:
+ printer = self.lookup(obj)
+ except KeyError:
+ pass
+ else:
+ return printer(obj)
+ # Finally look for special method names
+ method = get_real_method(obj, self.print_method)
+
+ if method is not None:
+ return method(include=include, exclude=exclude)
+ return None
+ else:
+ return None
+
+
+FormatterABC.register(BaseFormatter)
+FormatterABC.register(PlainTextFormatter)
+FormatterABC.register(HTMLFormatter)
+FormatterABC.register(MarkdownFormatter)
+FormatterABC.register(SVGFormatter)
+FormatterABC.register(PNGFormatter)
+FormatterABC.register(PDFFormatter)
+FormatterABC.register(JPEGFormatter)
+FormatterABC.register(LatexFormatter)
+FormatterABC.register(JSONFormatter)
+FormatterABC.register(JavascriptFormatter)
+FormatterABC.register(IPythonDisplayFormatter)
+FormatterABC.register(MimeBundleFormatter)
+
+
+def format_display_data(obj, include=None, exclude=None):
+ """Return a format data dict for an object.
+
+ By default all format types will be computed.
+
+ Parameters
+ ----------
+ obj : object
+ The Python object whose format data will be computed.
+
+ Returns
+ -------
+ format_dict : dict
+ A dictionary of key/value pairs, one or each format that was
+ generated for the object. The keys are the format types, which
+ will usually be MIME type strings and the values and JSON'able
+ data structure containing the raw data for the representation in
+ that format.
+ include : list or tuple, optional
+ A list of format type strings (MIME types) to include in the
+ format data dict. If this is set *only* the format types included
+ in this list will be computed.
+ exclude : list or tuple, optional
+ A list of format type string (MIME types) to exclude in the format
+ data dict. If this is set all format types will be computed,
+ except for those included in this argument.
+ """
+ from .interactiveshell import InteractiveShell
+
+ return InteractiveShell.instance().display_formatter.format(
+ obj,
+ include,
+ exclude
+ )
diff --git a/contrib/python/ipython/py3/IPython/core/getipython.py b/contrib/python/ipython/py3/IPython/core/getipython.py
new file mode 100644
index 0000000000..5e9b13cf3c
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/getipython.py
@@ -0,0 +1,24 @@
+# encoding: utf-8
+"""Simple function to call to get the current InteractiveShell instance
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (C) 2013 The IPython Development Team
+#
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Classes and functions
+#-----------------------------------------------------------------------------
+
+
+def get_ipython():
+ """Get the global InteractiveShell instance.
+
+ Returns None if no InteractiveShell instance is registered.
+ """
+ from IPython.core.interactiveshell import InteractiveShell
+ if InteractiveShell.initialized():
+ return InteractiveShell.instance()
diff --git a/contrib/python/ipython/py3/IPython/core/guarded_eval.py b/contrib/python/ipython/py3/IPython/core/guarded_eval.py
new file mode 100644
index 0000000000..d576a2a769
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/guarded_eval.py
@@ -0,0 +1,733 @@
+from typing import (
+ Any,
+ Callable,
+ Dict,
+ Set,
+ Sequence,
+ Tuple,
+ NamedTuple,
+ Type,
+ Literal,
+ Union,
+ TYPE_CHECKING,
+)
+import ast
+import builtins
+import collections
+import operator
+import sys
+from functools import cached_property
+from dataclasses import dataclass, field
+from types import MethodDescriptorType, ModuleType
+
+from IPython.utils.docs import GENERATING_DOCUMENTATION
+from IPython.utils.decorators import undoc
+
+
+if TYPE_CHECKING or GENERATING_DOCUMENTATION:
+ from typing_extensions import Protocol
+else:
+ # do not require on runtime
+ Protocol = object # requires Python >=3.8
+
+
+@undoc
+class HasGetItem(Protocol):
+ def __getitem__(self, key) -> None:
+ ...
+
+
+@undoc
+class InstancesHaveGetItem(Protocol):
+ def __call__(self, *args, **kwargs) -> HasGetItem:
+ ...
+
+
+@undoc
+class HasGetAttr(Protocol):
+ def __getattr__(self, key) -> None:
+ ...
+
+
+@undoc
+class DoesNotHaveGetAttr(Protocol):
+ pass
+
+
+# By default `__getattr__` is not explicitly implemented on most objects
+MayHaveGetattr = Union[HasGetAttr, DoesNotHaveGetAttr]
+
+
+def _unbind_method(func: Callable) -> Union[Callable, None]:
+ """Get unbound method for given bound method.
+
+ Returns None if cannot get unbound method, or method is already unbound.
+ """
+ owner = getattr(func, "__self__", None)
+ owner_class = type(owner)
+ name = getattr(func, "__name__", None)
+ instance_dict_overrides = getattr(owner, "__dict__", None)
+ if (
+ owner is not None
+ and name
+ and (
+ not instance_dict_overrides
+ or (instance_dict_overrides and name not in instance_dict_overrides)
+ )
+ ):
+ return getattr(owner_class, name)
+ return None
+
+
+@undoc
+@dataclass
+class EvaluationPolicy:
+ """Definition of evaluation policy."""
+
+ allow_locals_access: bool = False
+ allow_globals_access: bool = False
+ allow_item_access: bool = False
+ allow_attr_access: bool = False
+ allow_builtins_access: bool = False
+ allow_all_operations: bool = False
+ allow_any_calls: bool = False
+ allowed_calls: Set[Callable] = field(default_factory=set)
+
+ def can_get_item(self, value, item):
+ return self.allow_item_access
+
+ def can_get_attr(self, value, attr):
+ return self.allow_attr_access
+
+ def can_operate(self, dunders: Tuple[str, ...], a, b=None):
+ if self.allow_all_operations:
+ return True
+
+ def can_call(self, func):
+ if self.allow_any_calls:
+ return True
+
+ if func in self.allowed_calls:
+ return True
+
+ owner_method = _unbind_method(func)
+
+ if owner_method and owner_method in self.allowed_calls:
+ return True
+
+
+def _get_external(module_name: str, access_path: Sequence[str]):
+ """Get value from external module given a dotted access path.
+
+ Raises:
+ * `KeyError` if module is removed not found, and
+ * `AttributeError` if acess path does not match an exported object
+ """
+ member_type = sys.modules[module_name]
+ for attr in access_path:
+ member_type = getattr(member_type, attr)
+ return member_type
+
+
+def _has_original_dunder_external(
+ value,
+ module_name: str,
+ access_path: Sequence[str],
+ method_name: str,
+):
+ if module_name not in sys.modules:
+ # LBYLB as it is faster
+ return False
+ try:
+ member_type = _get_external(module_name, access_path)
+ value_type = type(value)
+ if type(value) == member_type:
+ return True
+ if method_name == "__getattribute__":
+ # we have to short-circuit here due to an unresolved issue in
+ # `isinstance` implementation: https://bugs.python.org/issue32683
+ return False
+ if isinstance(value, member_type):
+ method = getattr(value_type, method_name, None)
+ member_method = getattr(member_type, method_name, None)
+ if member_method == method:
+ return True
+ except (AttributeError, KeyError):
+ return False
+
+
+def _has_original_dunder(
+ value, allowed_types, allowed_methods, allowed_external, method_name
+):
+ # note: Python ignores `__getattr__`/`__getitem__` on instances,
+ # we only need to check at class level
+ value_type = type(value)
+
+ # strict type check passes → no need to check method
+ if value_type in allowed_types:
+ return True
+
+ method = getattr(value_type, method_name, None)
+
+ if method is None:
+ return None
+
+ if method in allowed_methods:
+ return True
+
+ for module_name, *access_path in allowed_external:
+ if _has_original_dunder_external(value, module_name, access_path, method_name):
+ return True
+
+ return False
+
+
+@undoc
+@dataclass
+class SelectivePolicy(EvaluationPolicy):
+ allowed_getitem: Set[InstancesHaveGetItem] = field(default_factory=set)
+ allowed_getitem_external: Set[Tuple[str, ...]] = field(default_factory=set)
+
+ allowed_getattr: Set[MayHaveGetattr] = field(default_factory=set)
+ allowed_getattr_external: Set[Tuple[str, ...]] = field(default_factory=set)
+
+ allowed_operations: Set = field(default_factory=set)
+ allowed_operations_external: Set[Tuple[str, ...]] = field(default_factory=set)
+
+ _operation_methods_cache: Dict[str, Set[Callable]] = field(
+ default_factory=dict, init=False
+ )
+
+ def can_get_attr(self, value, attr):
+ has_original_attribute = _has_original_dunder(
+ value,
+ allowed_types=self.allowed_getattr,
+ allowed_methods=self._getattribute_methods,
+ allowed_external=self.allowed_getattr_external,
+ method_name="__getattribute__",
+ )
+ has_original_attr = _has_original_dunder(
+ value,
+ allowed_types=self.allowed_getattr,
+ allowed_methods=self._getattr_methods,
+ allowed_external=self.allowed_getattr_external,
+ method_name="__getattr__",
+ )
+
+ accept = False
+
+ # Many objects do not have `__getattr__`, this is fine.
+ if has_original_attr is None and has_original_attribute:
+ accept = True
+ else:
+ # Accept objects without modifications to `__getattr__` and `__getattribute__`
+ accept = has_original_attr and has_original_attribute
+
+ if accept:
+ # We still need to check for overriden properties.
+
+ value_class = type(value)
+ if not hasattr(value_class, attr):
+ return True
+
+ class_attr_val = getattr(value_class, attr)
+ is_property = isinstance(class_attr_val, property)
+
+ if not is_property:
+ return True
+
+ # Properties in allowed types are ok (although we do not include any
+ # properties in our default allow list currently).
+ if type(value) in self.allowed_getattr:
+ return True # pragma: no cover
+
+ # Properties in subclasses of allowed types may be ok if not changed
+ for module_name, *access_path in self.allowed_getattr_external:
+ try:
+ external_class = _get_external(module_name, access_path)
+ external_class_attr_val = getattr(external_class, attr)
+ except (KeyError, AttributeError):
+ return False # pragma: no cover
+ return class_attr_val == external_class_attr_val
+
+ return False
+
+ def can_get_item(self, value, item):
+ """Allow accessing `__getiitem__` of allow-listed instances unless it was not modified."""
+ return _has_original_dunder(
+ value,
+ allowed_types=self.allowed_getitem,
+ allowed_methods=self._getitem_methods,
+ allowed_external=self.allowed_getitem_external,
+ method_name="__getitem__",
+ )
+
+ def can_operate(self, dunders: Tuple[str, ...], a, b=None):
+ objects = [a]
+ if b is not None:
+ objects.append(b)
+ return all(
+ [
+ _has_original_dunder(
+ obj,
+ allowed_types=self.allowed_operations,
+ allowed_methods=self._operator_dunder_methods(dunder),
+ allowed_external=self.allowed_operations_external,
+ method_name=dunder,
+ )
+ for dunder in dunders
+ for obj in objects
+ ]
+ )
+
+ def _operator_dunder_methods(self, dunder: str) -> Set[Callable]:
+ if dunder not in self._operation_methods_cache:
+ self._operation_methods_cache[dunder] = self._safe_get_methods(
+ self.allowed_operations, dunder
+ )
+ return self._operation_methods_cache[dunder]
+
+ @cached_property
+ def _getitem_methods(self) -> Set[Callable]:
+ return self._safe_get_methods(self.allowed_getitem, "__getitem__")
+
+ @cached_property
+ def _getattr_methods(self) -> Set[Callable]:
+ return self._safe_get_methods(self.allowed_getattr, "__getattr__")
+
+ @cached_property
+ def _getattribute_methods(self) -> Set[Callable]:
+ return self._safe_get_methods(self.allowed_getattr, "__getattribute__")
+
+ def _safe_get_methods(self, classes, name) -> Set[Callable]:
+ return {
+ method
+ for class_ in classes
+ for method in [getattr(class_, name, None)]
+ if method
+ }
+
+
+class _DummyNamedTuple(NamedTuple):
+ """Used internally to retrieve methods of named tuple instance."""
+
+
+class EvaluationContext(NamedTuple):
+ #: Local namespace
+ locals: dict
+ #: Global namespace
+ globals: dict
+ #: Evaluation policy identifier
+ evaluation: Literal[
+ "forbidden", "minimal", "limited", "unsafe", "dangerous"
+ ] = "forbidden"
+ #: Whether the evalution of code takes place inside of a subscript.
+ #: Useful for evaluating ``:-1, 'col'`` in ``df[:-1, 'col']``.
+ in_subscript: bool = False
+
+
+class _IdentitySubscript:
+ """Returns the key itself when item is requested via subscript."""
+
+ def __getitem__(self, key):
+ return key
+
+
+IDENTITY_SUBSCRIPT = _IdentitySubscript()
+SUBSCRIPT_MARKER = "__SUBSCRIPT_SENTINEL__"
+
+
+class GuardRejection(Exception):
+ """Exception raised when guard rejects evaluation attempt."""
+
+ pass
+
+
+def guarded_eval(code: str, context: EvaluationContext):
+ """Evaluate provided code in the evaluation context.
+
+ If evaluation policy given by context is set to ``forbidden``
+ no evaluation will be performed; if it is set to ``dangerous``
+ standard :func:`eval` will be used; finally, for any other,
+ policy :func:`eval_node` will be called on parsed AST.
+ """
+ locals_ = context.locals
+
+ if context.evaluation == "forbidden":
+ raise GuardRejection("Forbidden mode")
+
+ # note: not using `ast.literal_eval` as it does not implement
+ # getitem at all, for example it fails on simple `[0][1]`
+
+ if context.in_subscript:
+ # syntatic sugar for ellipsis (:) is only available in susbcripts
+ # so we need to trick the ast parser into thinking that we have
+ # a subscript, but we need to be able to later recognise that we did
+ # it so we can ignore the actual __getitem__ operation
+ if not code:
+ return tuple()
+ locals_ = locals_.copy()
+ locals_[SUBSCRIPT_MARKER] = IDENTITY_SUBSCRIPT
+ code = SUBSCRIPT_MARKER + "[" + code + "]"
+ context = EvaluationContext(**{**context._asdict(), **{"locals": locals_}})
+
+ if context.evaluation == "dangerous":
+ return eval(code, context.globals, context.locals)
+
+ expression = ast.parse(code, mode="eval")
+
+ return eval_node(expression, context)
+
+
+BINARY_OP_DUNDERS: Dict[Type[ast.operator], Tuple[str]] = {
+ ast.Add: ("__add__",),
+ ast.Sub: ("__sub__",),
+ ast.Mult: ("__mul__",),
+ ast.Div: ("__truediv__",),
+ ast.FloorDiv: ("__floordiv__",),
+ ast.Mod: ("__mod__",),
+ ast.Pow: ("__pow__",),
+ ast.LShift: ("__lshift__",),
+ ast.RShift: ("__rshift__",),
+ ast.BitOr: ("__or__",),
+ ast.BitXor: ("__xor__",),
+ ast.BitAnd: ("__and__",),
+ ast.MatMult: ("__matmul__",),
+}
+
+COMP_OP_DUNDERS: Dict[Type[ast.cmpop], Tuple[str, ...]] = {
+ ast.Eq: ("__eq__",),
+ ast.NotEq: ("__ne__", "__eq__"),
+ ast.Lt: ("__lt__", "__gt__"),
+ ast.LtE: ("__le__", "__ge__"),
+ ast.Gt: ("__gt__", "__lt__"),
+ ast.GtE: ("__ge__", "__le__"),
+ ast.In: ("__contains__",),
+ # Note: ast.Is, ast.IsNot, ast.NotIn are handled specially
+}
+
+UNARY_OP_DUNDERS: Dict[Type[ast.unaryop], Tuple[str, ...]] = {
+ ast.USub: ("__neg__",),
+ ast.UAdd: ("__pos__",),
+ # we have to check both __inv__ and __invert__!
+ ast.Invert: ("__invert__", "__inv__"),
+ ast.Not: ("__not__",),
+}
+
+
+def _find_dunder(node_op, dunders) -> Union[Tuple[str, ...], None]:
+ dunder = None
+ for op, candidate_dunder in dunders.items():
+ if isinstance(node_op, op):
+ dunder = candidate_dunder
+ return dunder
+
+
+def eval_node(node: Union[ast.AST, None], context: EvaluationContext):
+ """Evaluate AST node in provided context.
+
+ Applies evaluation restrictions defined in the context. Currently does not support evaluation of functions with keyword arguments.
+
+ Does not evaluate actions that always have side effects:
+
+ - class definitions (``class sth: ...``)
+ - function definitions (``def sth: ...``)
+ - variable assignments (``x = 1``)
+ - augmented assignments (``x += 1``)
+ - deletions (``del x``)
+
+ Does not evaluate operations which do not return values:
+
+ - assertions (``assert x``)
+ - pass (``pass``)
+ - imports (``import x``)
+ - control flow:
+
+ - conditionals (``if x:``) except for ternary IfExp (``a if x else b``)
+ - loops (``for`` and `while``)
+ - exception handling
+
+ The purpose of this function is to guard against unwanted side-effects;
+ it does not give guarantees on protection from malicious code execution.
+ """
+ policy = EVALUATION_POLICIES[context.evaluation]
+ if node is None:
+ return None
+ if isinstance(node, ast.Expression):
+ return eval_node(node.body, context)
+ if isinstance(node, ast.BinOp):
+ left = eval_node(node.left, context)
+ right = eval_node(node.right, context)
+ dunders = _find_dunder(node.op, BINARY_OP_DUNDERS)
+ if dunders:
+ if policy.can_operate(dunders, left, right):
+ return getattr(left, dunders[0])(right)
+ else:
+ raise GuardRejection(
+ f"Operation (`{dunders}`) for",
+ type(left),
+ f"not allowed in {context.evaluation} mode",
+ )
+ if isinstance(node, ast.Compare):
+ left = eval_node(node.left, context)
+ all_true = True
+ negate = False
+ for op, right in zip(node.ops, node.comparators):
+ right = eval_node(right, context)
+ dunder = None
+ dunders = _find_dunder(op, COMP_OP_DUNDERS)
+ if not dunders:
+ if isinstance(op, ast.NotIn):
+ dunders = COMP_OP_DUNDERS[ast.In]
+ negate = True
+ if isinstance(op, ast.Is):
+ dunder = "is_"
+ if isinstance(op, ast.IsNot):
+ dunder = "is_"
+ negate = True
+ if not dunder and dunders:
+ dunder = dunders[0]
+ if dunder:
+ a, b = (right, left) if dunder == "__contains__" else (left, right)
+ if dunder == "is_" or dunders and policy.can_operate(dunders, a, b):
+ result = getattr(operator, dunder)(a, b)
+ if negate:
+ result = not result
+ if not result:
+ all_true = False
+ left = right
+ else:
+ raise GuardRejection(
+ f"Comparison (`{dunder}`) for",
+ type(left),
+ f"not allowed in {context.evaluation} mode",
+ )
+ else:
+ raise ValueError(
+ f"Comparison `{dunder}` not supported"
+ ) # pragma: no cover
+ return all_true
+ if isinstance(node, ast.Constant):
+ return node.value
+ if isinstance(node, ast.Tuple):
+ return tuple(eval_node(e, context) for e in node.elts)
+ if isinstance(node, ast.List):
+ return [eval_node(e, context) for e in node.elts]
+ if isinstance(node, ast.Set):
+ return {eval_node(e, context) for e in node.elts}
+ if isinstance(node, ast.Dict):
+ return dict(
+ zip(
+ [eval_node(k, context) for k in node.keys],
+ [eval_node(v, context) for v in node.values],
+ )
+ )
+ if isinstance(node, ast.Slice):
+ return slice(
+ eval_node(node.lower, context),
+ eval_node(node.upper, context),
+ eval_node(node.step, context),
+ )
+ if isinstance(node, ast.UnaryOp):
+ value = eval_node(node.operand, context)
+ dunders = _find_dunder(node.op, UNARY_OP_DUNDERS)
+ if dunders:
+ if policy.can_operate(dunders, value):
+ return getattr(value, dunders[0])()
+ else:
+ raise GuardRejection(
+ f"Operation (`{dunders}`) for",
+ type(value),
+ f"not allowed in {context.evaluation} mode",
+ )
+ if isinstance(node, ast.Subscript):
+ value = eval_node(node.value, context)
+ slice_ = eval_node(node.slice, context)
+ if policy.can_get_item(value, slice_):
+ return value[slice_]
+ raise GuardRejection(
+ "Subscript access (`__getitem__`) for",
+ type(value), # not joined to avoid calling `repr`
+ f" not allowed in {context.evaluation} mode",
+ )
+ if isinstance(node, ast.Name):
+ if policy.allow_locals_access and node.id in context.locals:
+ return context.locals[node.id]
+ if policy.allow_globals_access and node.id in context.globals:
+ return context.globals[node.id]
+ if policy.allow_builtins_access and hasattr(builtins, node.id):
+ # note: do not use __builtins__, it is implementation detail of cPython
+ return getattr(builtins, node.id)
+ if not policy.allow_globals_access and not policy.allow_locals_access:
+ raise GuardRejection(
+ f"Namespace access not allowed in {context.evaluation} mode"
+ )
+ else:
+ raise NameError(f"{node.id} not found in locals, globals, nor builtins")
+ if isinstance(node, ast.Attribute):
+ value = eval_node(node.value, context)
+ if policy.can_get_attr(value, node.attr):
+ return getattr(value, node.attr)
+ raise GuardRejection(
+ "Attribute access (`__getattr__`) for",
+ type(value), # not joined to avoid calling `repr`
+ f"not allowed in {context.evaluation} mode",
+ )
+ if isinstance(node, ast.IfExp):
+ test = eval_node(node.test, context)
+ if test:
+ return eval_node(node.body, context)
+ else:
+ return eval_node(node.orelse, context)
+ if isinstance(node, ast.Call):
+ func = eval_node(node.func, context)
+ if policy.can_call(func) and not node.keywords:
+ args = [eval_node(arg, context) for arg in node.args]
+ return func(*args)
+ raise GuardRejection(
+ "Call for",
+ func, # not joined to avoid calling `repr`
+ f"not allowed in {context.evaluation} mode",
+ )
+ raise ValueError("Unhandled node", ast.dump(node))
+
+
+SUPPORTED_EXTERNAL_GETITEM = {
+ ("pandas", "core", "indexing", "_iLocIndexer"),
+ ("pandas", "core", "indexing", "_LocIndexer"),
+ ("pandas", "DataFrame"),
+ ("pandas", "Series"),
+ ("numpy", "ndarray"),
+ ("numpy", "void"),
+}
+
+
+BUILTIN_GETITEM: Set[InstancesHaveGetItem] = {
+ dict,
+ str, # type: ignore[arg-type]
+ bytes, # type: ignore[arg-type]
+ list,
+ tuple,
+ collections.defaultdict,
+ collections.deque,
+ collections.OrderedDict,
+ collections.ChainMap,
+ collections.UserDict,
+ collections.UserList,
+ collections.UserString, # type: ignore[arg-type]
+ _DummyNamedTuple,
+ _IdentitySubscript,
+}
+
+
+def _list_methods(cls, source=None):
+ """For use on immutable objects or with methods returning a copy"""
+ return [getattr(cls, k) for k in (source if source else dir(cls))]
+
+
+dict_non_mutating_methods = ("copy", "keys", "values", "items")
+list_non_mutating_methods = ("copy", "index", "count")
+set_non_mutating_methods = set(dir(set)) & set(dir(frozenset))
+
+
+dict_keys: Type[collections.abc.KeysView] = type({}.keys())
+
+NUMERICS = {int, float, complex}
+
+ALLOWED_CALLS = {
+ bytes,
+ *_list_methods(bytes),
+ dict,
+ *_list_methods(dict, dict_non_mutating_methods),
+ dict_keys.isdisjoint,
+ list,
+ *_list_methods(list, list_non_mutating_methods),
+ set,
+ *_list_methods(set, set_non_mutating_methods),
+ frozenset,
+ *_list_methods(frozenset),
+ range,
+ str,
+ *_list_methods(str),
+ tuple,
+ *_list_methods(tuple),
+ *NUMERICS,
+ *[method for numeric_cls in NUMERICS for method in _list_methods(numeric_cls)],
+ collections.deque,
+ *_list_methods(collections.deque, list_non_mutating_methods),
+ collections.defaultdict,
+ *_list_methods(collections.defaultdict, dict_non_mutating_methods),
+ collections.OrderedDict,
+ *_list_methods(collections.OrderedDict, dict_non_mutating_methods),
+ collections.UserDict,
+ *_list_methods(collections.UserDict, dict_non_mutating_methods),
+ collections.UserList,
+ *_list_methods(collections.UserList, list_non_mutating_methods),
+ collections.UserString,
+ *_list_methods(collections.UserString, dir(str)),
+ collections.Counter,
+ *_list_methods(collections.Counter, dict_non_mutating_methods),
+ collections.Counter.elements,
+ collections.Counter.most_common,
+}
+
+BUILTIN_GETATTR: Set[MayHaveGetattr] = {
+ *BUILTIN_GETITEM,
+ set,
+ frozenset,
+ object,
+ type, # `type` handles a lot of generic cases, e.g. numbers as in `int.real`.
+ *NUMERICS,
+ dict_keys,
+ MethodDescriptorType,
+ ModuleType,
+}
+
+
+BUILTIN_OPERATIONS = {*BUILTIN_GETATTR}
+
+EVALUATION_POLICIES = {
+ "minimal": EvaluationPolicy(
+ allow_builtins_access=True,
+ allow_locals_access=False,
+ allow_globals_access=False,
+ allow_item_access=False,
+ allow_attr_access=False,
+ allowed_calls=set(),
+ allow_any_calls=False,
+ allow_all_operations=False,
+ ),
+ "limited": SelectivePolicy(
+ allowed_getitem=BUILTIN_GETITEM,
+ allowed_getitem_external=SUPPORTED_EXTERNAL_GETITEM,
+ allowed_getattr=BUILTIN_GETATTR,
+ allowed_getattr_external={
+ # pandas Series/Frame implements custom `__getattr__`
+ ("pandas", "DataFrame"),
+ ("pandas", "Series"),
+ },
+ allowed_operations=BUILTIN_OPERATIONS,
+ allow_builtins_access=True,
+ allow_locals_access=True,
+ allow_globals_access=True,
+ allowed_calls=ALLOWED_CALLS,
+ ),
+ "unsafe": EvaluationPolicy(
+ allow_builtins_access=True,
+ allow_locals_access=True,
+ allow_globals_access=True,
+ allow_attr_access=True,
+ allow_item_access=True,
+ allow_any_calls=True,
+ allow_all_operations=True,
+ ),
+}
+
+
+__all__ = [
+ "guarded_eval",
+ "eval_node",
+ "GuardRejection",
+ "EvaluationContext",
+ "_unbind_method",
+]
diff --git a/contrib/python/ipython/py3/IPython/core/history.py b/contrib/python/ipython/py3/IPython/core/history.py
new file mode 100644
index 0000000000..fd5a8680bf
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/history.py
@@ -0,0 +1,968 @@
+""" History related magics and functionality """
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+
+import atexit
+import datetime
+from pathlib import Path
+import re
+import sqlite3
+import threading
+
+from traitlets.config.configurable import LoggingConfigurable
+from decorator import decorator
+from IPython.utils.decorators import undoc
+from IPython.paths import locate_profile
+from traitlets import (
+ Any,
+ Bool,
+ Dict,
+ Instance,
+ Integer,
+ List,
+ Unicode,
+ Union,
+ TraitError,
+ default,
+ observe,
+)
+
+#-----------------------------------------------------------------------------
+# Classes and functions
+#-----------------------------------------------------------------------------
+
+@undoc
+class DummyDB(object):
+ """Dummy DB that will act as a black hole for history.
+
+ Only used in the absence of sqlite"""
+ def execute(*args, **kwargs):
+ return []
+
+ def commit(self, *args, **kwargs):
+ pass
+
+ def __enter__(self, *args, **kwargs):
+ pass
+
+ def __exit__(self, *args, **kwargs):
+ pass
+
+
+@decorator
+def only_when_enabled(f, self, *a, **kw):
+ """Decorator: return an empty list in the absence of sqlite."""
+ if not self.enabled:
+ return []
+ else:
+ return f(self, *a, **kw)
+
+
+# use 16kB as threshold for whether a corrupt history db should be saved
+# that should be at least 100 entries or so
+_SAVE_DB_SIZE = 16384
+
+@decorator
+def catch_corrupt_db(f, self, *a, **kw):
+ """A decorator which wraps HistoryAccessor method calls to catch errors from
+ a corrupt SQLite database, move the old database out of the way, and create
+ a new one.
+
+ We avoid clobbering larger databases because this may be triggered due to filesystem issues,
+ not just a corrupt file.
+ """
+ try:
+ return f(self, *a, **kw)
+ except (sqlite3.DatabaseError, sqlite3.OperationalError) as e:
+ self._corrupt_db_counter += 1
+ self.log.error("Failed to open SQLite history %s (%s).", self.hist_file, e)
+ if self.hist_file != ':memory:':
+ if self._corrupt_db_counter > self._corrupt_db_limit:
+ self.hist_file = ':memory:'
+ self.log.error("Failed to load history too many times, history will not be saved.")
+ elif self.hist_file.is_file():
+ # move the file out of the way
+ base = str(self.hist_file.parent / self.hist_file.stem)
+ ext = self.hist_file.suffix
+ size = self.hist_file.stat().st_size
+ if size >= _SAVE_DB_SIZE:
+ # if there's significant content, avoid clobbering
+ now = datetime.datetime.now().isoformat().replace(':', '.')
+ newpath = base + '-corrupt-' + now + ext
+ # don't clobber previous corrupt backups
+ for i in range(100):
+ if not Path(newpath).exists():
+ break
+ else:
+ newpath = base + '-corrupt-' + now + (u'-%i' % i) + ext
+ else:
+ # not much content, possibly empty; don't worry about clobbering
+ # maybe we should just delete it?
+ newpath = base + '-corrupt' + ext
+ self.hist_file.rename(newpath)
+ self.log.error("History file was moved to %s and a new file created.", newpath)
+ self.init_db()
+ return []
+ else:
+ # Failed with :memory:, something serious is wrong
+ raise
+
+
+class HistoryAccessorBase(LoggingConfigurable):
+ """An abstract class for History Accessors """
+
+ def get_tail(self, n=10, raw=True, output=False, include_latest=False):
+ raise NotImplementedError
+
+ def search(self, pattern="*", raw=True, search_raw=True,
+ output=False, n=None, unique=False):
+ raise NotImplementedError
+
+ def get_range(self, session, start=1, stop=None, raw=True,output=False):
+ raise NotImplementedError
+
+ def get_range_by_str(self, rangestr, raw=True, output=False):
+ raise NotImplementedError
+
+
+class HistoryAccessor(HistoryAccessorBase):
+ """Access the history database without adding to it.
+
+ This is intended for use by standalone history tools. IPython shells use
+ HistoryManager, below, which is a subclass of this."""
+
+ # counter for init_db retries, so we don't keep trying over and over
+ _corrupt_db_counter = 0
+ # after two failures, fallback on :memory:
+ _corrupt_db_limit = 2
+
+ # String holding the path to the history file
+ hist_file = Union(
+ [Instance(Path), Unicode()],
+ help="""Path to file to use for SQLite history database.
+
+ By default, IPython will put the history database in the IPython
+ profile directory. If you would rather share one history among
+ profiles, you can set this value in each, so that they are consistent.
+
+ Due to an issue with fcntl, SQLite is known to misbehave on some NFS
+ mounts. If you see IPython hanging, try setting this to something on a
+ local disk, e.g::
+
+ ipython --HistoryManager.hist_file=/tmp/ipython_hist.sqlite
+
+ you can also use the specific value `:memory:` (including the colon
+ at both end but not the back ticks), to avoid creating an history file.
+
+ """,
+ ).tag(config=True)
+
+ enabled = Bool(True,
+ help="""enable the SQLite history
+
+ set enabled=False to disable the SQLite history,
+ in which case there will be no stored history, no SQLite connection,
+ and no background saving thread. This may be necessary in some
+ threaded environments where IPython is embedded.
+ """,
+ ).tag(config=True)
+
+ connection_options = Dict(
+ help="""Options for configuring the SQLite connection
+
+ These options are passed as keyword args to sqlite3.connect
+ when establishing database connections.
+ """
+ ).tag(config=True)
+
+ # The SQLite database
+ db = Any()
+ @observe('db')
+ def _db_changed(self, change):
+ """validate the db, since it can be an Instance of two different types"""
+ new = change['new']
+ connection_types = (DummyDB, sqlite3.Connection)
+ if not isinstance(new, connection_types):
+ msg = "%s.db must be sqlite3 Connection or DummyDB, not %r" % \
+ (self.__class__.__name__, new)
+ raise TraitError(msg)
+
+ def __init__(self, profile="default", hist_file="", **traits):
+ """Create a new history accessor.
+
+ Parameters
+ ----------
+ profile : str
+ The name of the profile from which to open history.
+ hist_file : str
+ Path to an SQLite history database stored by IPython. If specified,
+ hist_file overrides profile.
+ config : :class:`~traitlets.config.loader.Config`
+ Config object. hist_file can also be set through this.
+ """
+ super(HistoryAccessor, self).__init__(**traits)
+ # defer setting hist_file from kwarg until after init,
+ # otherwise the default kwarg value would clobber any value
+ # set by config
+ if hist_file:
+ self.hist_file = hist_file
+
+ try:
+ self.hist_file
+ except TraitError:
+ # No one has set the hist_file, yet.
+ self.hist_file = self._get_hist_file_name(profile)
+
+ self.init_db()
+
+ def _get_hist_file_name(self, profile='default'):
+ """Find the history file for the given profile name.
+
+ This is overridden by the HistoryManager subclass, to use the shell's
+ active profile.
+
+ Parameters
+ ----------
+ profile : str
+ The name of a profile which has a history file.
+ """
+ return Path(locate_profile(profile)) / "history.sqlite"
+
+ @catch_corrupt_db
+ def init_db(self):
+ """Connect to the database, and create tables if necessary."""
+ if not self.enabled:
+ self.db = DummyDB()
+ return
+
+ # use detect_types so that timestamps return datetime objects
+ kwargs = dict(detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES)
+ kwargs.update(self.connection_options)
+ self.db = sqlite3.connect(str(self.hist_file), **kwargs)
+ with self.db:
+ self.db.execute(
+ """CREATE TABLE IF NOT EXISTS sessions (session integer
+ primary key autoincrement, start timestamp,
+ end timestamp, num_cmds integer, remark text)"""
+ )
+ self.db.execute(
+ """CREATE TABLE IF NOT EXISTS history
+ (session integer, line integer, source text, source_raw text,
+ PRIMARY KEY (session, line))"""
+ )
+ # Output history is optional, but ensure the table's there so it can be
+ # enabled later.
+ self.db.execute(
+ """CREATE TABLE IF NOT EXISTS output_history
+ (session integer, line integer, output text,
+ PRIMARY KEY (session, line))"""
+ )
+ # success! reset corrupt db count
+ self._corrupt_db_counter = 0
+
+ def writeout_cache(self):
+ """Overridden by HistoryManager to dump the cache before certain
+ database lookups."""
+ pass
+
+ ## -------------------------------
+ ## Methods for retrieving history:
+ ## -------------------------------
+ def _run_sql(self, sql, params, raw=True, output=False, latest=False):
+ """Prepares and runs an SQL query for the history database.
+
+ Parameters
+ ----------
+ sql : str
+ Any filtering expressions to go after SELECT ... FROM ...
+ params : tuple
+ Parameters passed to the SQL query (to replace "?")
+ raw, output : bool
+ See :meth:`get_range`
+ latest : bool
+ Select rows with max (session, line)
+
+ Returns
+ -------
+ Tuples as :meth:`get_range`
+ """
+ toget = 'source_raw' if raw else 'source'
+ sqlfrom = "history"
+ if output:
+ sqlfrom = "history LEFT JOIN output_history USING (session, line)"
+ toget = "history.%s, output_history.output" % toget
+ if latest:
+ toget += ", MAX(session * 128 * 1024 + line)"
+ this_querry = "SELECT session, line, %s FROM %s " % (toget, sqlfrom) + sql
+ cur = self.db.execute(this_querry, params)
+ if latest:
+ cur = (row[:-1] for row in cur)
+ if output: # Regroup into 3-tuples, and parse JSON
+ return ((ses, lin, (inp, out)) for ses, lin, inp, out in cur)
+ return cur
+
+ @only_when_enabled
+ @catch_corrupt_db
+ def get_session_info(self, session):
+ """Get info about a session.
+
+ Parameters
+ ----------
+ session : int
+ Session number to retrieve.
+
+ Returns
+ -------
+ session_id : int
+ Session ID number
+ start : datetime
+ Timestamp for the start of the session.
+ end : datetime
+ Timestamp for the end of the session, or None if IPython crashed.
+ num_cmds : int
+ Number of commands run, or None if IPython crashed.
+ remark : unicode
+ A manually set description.
+ """
+ query = "SELECT * from sessions where session == ?"
+ return self.db.execute(query, (session,)).fetchone()
+
+ @catch_corrupt_db
+ def get_last_session_id(self):
+ """Get the last session ID currently in the database.
+
+ Within IPython, this should be the same as the value stored in
+ :attr:`HistoryManager.session_number`.
+ """
+ for record in self.get_tail(n=1, include_latest=True):
+ return record[0]
+
+ @catch_corrupt_db
+ def get_tail(self, n=10, raw=True, output=False, include_latest=False):
+ """Get the last n lines from the history database.
+
+ Parameters
+ ----------
+ n : int
+ The number of lines to get
+ raw, output : bool
+ See :meth:`get_range`
+ include_latest : bool
+ If False (default), n+1 lines are fetched, and the latest one
+ is discarded. This is intended to be used where the function
+ is called by a user command, which it should not return.
+
+ Returns
+ -------
+ Tuples as :meth:`get_range`
+ """
+ self.writeout_cache()
+ if not include_latest:
+ n += 1
+ cur = self._run_sql(
+ "ORDER BY session DESC, line DESC LIMIT ?", (n,), raw=raw, output=output
+ )
+ if not include_latest:
+ return reversed(list(cur)[1:])
+ return reversed(list(cur))
+
+ @catch_corrupt_db
+ def search(self, pattern="*", raw=True, search_raw=True,
+ output=False, n=None, unique=False):
+ """Search the database using unix glob-style matching (wildcards
+ * and ?).
+
+ Parameters
+ ----------
+ pattern : str
+ The wildcarded pattern to match when searching
+ search_raw : bool
+ If True, search the raw input, otherwise, the parsed input
+ raw, output : bool
+ See :meth:`get_range`
+ n : None or int
+ If an integer is given, it defines the limit of
+ returned entries.
+ unique : bool
+ When it is true, return only unique entries.
+
+ Returns
+ -------
+ Tuples as :meth:`get_range`
+ """
+ tosearch = "source_raw" if search_raw else "source"
+ if output:
+ tosearch = "history." + tosearch
+ self.writeout_cache()
+ sqlform = "WHERE %s GLOB ?" % tosearch
+ params = (pattern,)
+ if unique:
+ sqlform += ' GROUP BY {0}'.format(tosearch)
+ if n is not None:
+ sqlform += " ORDER BY session DESC, line DESC LIMIT ?"
+ params += (n,)
+ elif unique:
+ sqlform += " ORDER BY session, line"
+ cur = self._run_sql(sqlform, params, raw=raw, output=output, latest=unique)
+ if n is not None:
+ return reversed(list(cur))
+ return cur
+
+ @catch_corrupt_db
+ def get_range(self, session, start=1, stop=None, raw=True,output=False):
+ """Retrieve input by session.
+
+ Parameters
+ ----------
+ session : int
+ Session number to retrieve.
+ start : int
+ First line to retrieve.
+ stop : int
+ End of line range (excluded from output itself). If None, retrieve
+ to the end of the session.
+ raw : bool
+ If True, return untranslated input
+ output : bool
+ If True, attempt to include output. This will be 'real' Python
+ objects for the current session, or text reprs from previous
+ sessions if db_log_output was enabled at the time. Where no output
+ is found, None is used.
+
+ Returns
+ -------
+ entries
+ An iterator over the desired lines. Each line is a 3-tuple, either
+ (session, line, input) if output is False, or
+ (session, line, (input, output)) if output is True.
+ """
+ if stop:
+ lineclause = "line >= ? AND line < ?"
+ params = (session, start, stop)
+ else:
+ lineclause = "line>=?"
+ params = (session, start)
+
+ return self._run_sql("WHERE session==? AND %s" % lineclause,
+ params, raw=raw, output=output)
+
+ def get_range_by_str(self, rangestr, raw=True, output=False):
+ """Get lines of history from a string of ranges, as used by magic
+ commands %hist, %save, %macro, etc.
+
+ Parameters
+ ----------
+ rangestr : str
+ A string specifying ranges, e.g. "5 ~2/1-4". If empty string is used,
+ this will return everything from current session's history.
+
+ See the documentation of :func:`%history` for the full details.
+
+ raw, output : bool
+ As :meth:`get_range`
+
+ Returns
+ -------
+ Tuples as :meth:`get_range`
+ """
+ for sess, s, e in extract_hist_ranges(rangestr):
+ for line in self.get_range(sess, s, e, raw=raw, output=output):
+ yield line
+
+
+class HistoryManager(HistoryAccessor):
+ """A class to organize all history-related functionality in one place.
+ """
+ # Public interface
+
+ # An instance of the IPython shell we are attached to
+ shell = Instance('IPython.core.interactiveshell.InteractiveShellABC',
+ allow_none=True)
+ # Lists to hold processed and raw history. These start with a blank entry
+ # so that we can index them starting from 1
+ input_hist_parsed = List([""])
+ input_hist_raw = List([""])
+ # A list of directories visited during session
+ dir_hist = List()
+ @default('dir_hist')
+ def _dir_hist_default(self):
+ try:
+ return [Path.cwd()]
+ except OSError:
+ return []
+
+ # A dict of output history, keyed with ints from the shell's
+ # execution count.
+ output_hist = Dict()
+ # The text/plain repr of outputs.
+ output_hist_reprs = Dict()
+
+ # The number of the current session in the history database
+ session_number = Integer()
+
+ db_log_output = Bool(False,
+ help="Should the history database include output? (default: no)"
+ ).tag(config=True)
+ db_cache_size = Integer(0,
+ help="Write to database every x commands (higher values save disk access & power).\n"
+ "Values of 1 or less effectively disable caching."
+ ).tag(config=True)
+ # The input and output caches
+ db_input_cache = List()
+ db_output_cache = List()
+
+ # History saving in separate thread
+ save_thread = Instance('IPython.core.history.HistorySavingThread',
+ allow_none=True)
+ save_flag = Instance(threading.Event, allow_none=True)
+
+ # Private interface
+ # Variables used to store the three last inputs from the user. On each new
+ # history update, we populate the user's namespace with these, shifted as
+ # necessary.
+ _i00 = Unicode(u'')
+ _i = Unicode(u'')
+ _ii = Unicode(u'')
+ _iii = Unicode(u'')
+
+ # A regex matching all forms of the exit command, so that we don't store
+ # them in the history (it's annoying to rewind the first entry and land on
+ # an exit call).
+ _exit_re = re.compile(r"(exit|quit)(\s*\(.*\))?$")
+
+ def __init__(self, shell=None, config=None, **traits):
+ """Create a new history manager associated with a shell instance.
+ """
+ super(HistoryManager, self).__init__(shell=shell, config=config,
+ **traits)
+ self.save_flag = threading.Event()
+ self.db_input_cache_lock = threading.Lock()
+ self.db_output_cache_lock = threading.Lock()
+
+ try:
+ self.new_session()
+ except sqlite3.OperationalError:
+ self.log.error("Failed to create history session in %s. History will not be saved.",
+ self.hist_file, exc_info=True)
+ self.hist_file = ':memory:'
+
+ if self.enabled and self.hist_file != ':memory:':
+ self.save_thread = HistorySavingThread(self)
+ self.save_thread.start()
+
+ def _get_hist_file_name(self, profile=None):
+ """Get default history file name based on the Shell's profile.
+
+ The profile parameter is ignored, but must exist for compatibility with
+ the parent class."""
+ profile_dir = self.shell.profile_dir.location
+ return Path(profile_dir) / "history.sqlite"
+
+ @only_when_enabled
+ def new_session(self, conn=None):
+ """Get a new session number."""
+ if conn is None:
+ conn = self.db
+
+ with conn:
+ cur = conn.execute(
+ """INSERT INTO sessions VALUES (NULL, ?, NULL,
+ NULL, '') """,
+ (datetime.datetime.now(),),
+ )
+ self.session_number = cur.lastrowid
+
+ def end_session(self):
+ """Close the database session, filling in the end time and line count."""
+ self.writeout_cache()
+ with self.db:
+ self.db.execute("""UPDATE sessions SET end=?, num_cmds=? WHERE
+ session==?""", (datetime.datetime.now(),
+ len(self.input_hist_parsed)-1, self.session_number))
+ self.session_number = 0
+
+ def name_session(self, name):
+ """Give the current session a name in the history database."""
+ with self.db:
+ self.db.execute("UPDATE sessions SET remark=? WHERE session==?",
+ (name, self.session_number))
+
+ def reset(self, new_session=True):
+ """Clear the session history, releasing all object references, and
+ optionally open a new session."""
+ self.output_hist.clear()
+ # The directory history can't be completely empty
+ self.dir_hist[:] = [Path.cwd()]
+
+ if new_session:
+ if self.session_number:
+ self.end_session()
+ self.input_hist_parsed[:] = [""]
+ self.input_hist_raw[:] = [""]
+ self.new_session()
+
+ # ------------------------------
+ # Methods for retrieving history
+ # ------------------------------
+ def get_session_info(self, session=0):
+ """Get info about a session.
+
+ Parameters
+ ----------
+ session : int
+ Session number to retrieve. The current session is 0, and negative
+ numbers count back from current session, so -1 is the previous session.
+
+ Returns
+ -------
+ session_id : int
+ Session ID number
+ start : datetime
+ Timestamp for the start of the session.
+ end : datetime
+ Timestamp for the end of the session, or None if IPython crashed.
+ num_cmds : int
+ Number of commands run, or None if IPython crashed.
+ remark : unicode
+ A manually set description.
+ """
+ if session <= 0:
+ session += self.session_number
+
+ return super(HistoryManager, self).get_session_info(session=session)
+
+ @catch_corrupt_db
+ def get_tail(self, n=10, raw=True, output=False, include_latest=False):
+ """Get the last n lines from the history database.
+
+ Most recent entry last.
+
+ Completion will be reordered so that that the last ones are when
+ possible from current session.
+
+ Parameters
+ ----------
+ n : int
+ The number of lines to get
+ raw, output : bool
+ See :meth:`get_range`
+ include_latest : bool
+ If False (default), n+1 lines are fetched, and the latest one
+ is discarded. This is intended to be used where the function
+ is called by a user command, which it should not return.
+
+ Returns
+ -------
+ Tuples as :meth:`get_range`
+ """
+ self.writeout_cache()
+ if not include_latest:
+ n += 1
+ # cursor/line/entry
+ this_cur = list(
+ self._run_sql(
+ "WHERE session == ? ORDER BY line DESC LIMIT ? ",
+ (self.session_number, n),
+ raw=raw,
+ output=output,
+ )
+ )
+ other_cur = list(
+ self._run_sql(
+ "WHERE session != ? ORDER BY session DESC, line DESC LIMIT ?",
+ (self.session_number, n),
+ raw=raw,
+ output=output,
+ )
+ )
+
+ everything = this_cur + other_cur
+
+ everything = everything[:n]
+
+ if not include_latest:
+ return list(everything)[:0:-1]
+ return list(everything)[::-1]
+
+ def _get_range_session(self, start=1, stop=None, raw=True, output=False):
+ """Get input and output history from the current session. Called by
+ get_range, and takes similar parameters."""
+ input_hist = self.input_hist_raw if raw else self.input_hist_parsed
+
+ n = len(input_hist)
+ if start < 0:
+ start += n
+ if not stop or (stop > n):
+ stop = n
+ elif stop < 0:
+ stop += n
+
+ for i in range(start, stop):
+ if output:
+ line = (input_hist[i], self.output_hist_reprs.get(i))
+ else:
+ line = input_hist[i]
+ yield (0, i, line)
+
+ def get_range(self, session=0, start=1, stop=None, raw=True,output=False):
+ """Retrieve input by session.
+
+ Parameters
+ ----------
+ session : int
+ Session number to retrieve. The current session is 0, and negative
+ numbers count back from current session, so -1 is previous session.
+ start : int
+ First line to retrieve.
+ stop : int
+ End of line range (excluded from output itself). If None, retrieve
+ to the end of the session.
+ raw : bool
+ If True, return untranslated input
+ output : bool
+ If True, attempt to include output. This will be 'real' Python
+ objects for the current session, or text reprs from previous
+ sessions if db_log_output was enabled at the time. Where no output
+ is found, None is used.
+
+ Returns
+ -------
+ entries
+ An iterator over the desired lines. Each line is a 3-tuple, either
+ (session, line, input) if output is False, or
+ (session, line, (input, output)) if output is True.
+ """
+ if session <= 0:
+ session += self.session_number
+ if session==self.session_number: # Current session
+ return self._get_range_session(start, stop, raw, output)
+ return super(HistoryManager, self).get_range(session, start, stop, raw,
+ output)
+
+ ## ----------------------------
+ ## Methods for storing history:
+ ## ----------------------------
+ def store_inputs(self, line_num, source, source_raw=None):
+ """Store source and raw input in history and create input cache
+ variables ``_i*``.
+
+ Parameters
+ ----------
+ line_num : int
+ The prompt number of this input.
+ source : str
+ Python input.
+ source_raw : str, optional
+ If given, this is the raw input without any IPython transformations
+ applied to it. If not given, ``source`` is used.
+ """
+ if source_raw is None:
+ source_raw = source
+ source = source.rstrip('\n')
+ source_raw = source_raw.rstrip('\n')
+
+ # do not store exit/quit commands
+ if self._exit_re.match(source_raw.strip()):
+ return
+
+ self.input_hist_parsed.append(source)
+ self.input_hist_raw.append(source_raw)
+
+ with self.db_input_cache_lock:
+ self.db_input_cache.append((line_num, source, source_raw))
+ # Trigger to flush cache and write to DB.
+ if len(self.db_input_cache) >= self.db_cache_size:
+ self.save_flag.set()
+
+ # update the auto _i variables
+ self._iii = self._ii
+ self._ii = self._i
+ self._i = self._i00
+ self._i00 = source_raw
+
+ # hackish access to user namespace to create _i1,_i2... dynamically
+ new_i = '_i%s' % line_num
+ to_main = {'_i': self._i,
+ '_ii': self._ii,
+ '_iii': self._iii,
+ new_i : self._i00 }
+
+ if self.shell is not None:
+ self.shell.push(to_main, interactive=False)
+
+ def store_output(self, line_num):
+ """If database output logging is enabled, this saves all the
+ outputs from the indicated prompt number to the database. It's
+ called by run_cell after code has been executed.
+
+ Parameters
+ ----------
+ line_num : int
+ The line number from which to save outputs
+ """
+ if (not self.db_log_output) or (line_num not in self.output_hist_reprs):
+ return
+ output = self.output_hist_reprs[line_num]
+
+ with self.db_output_cache_lock:
+ self.db_output_cache.append((line_num, output))
+ if self.db_cache_size <= 1:
+ self.save_flag.set()
+
+ def _writeout_input_cache(self, conn):
+ with conn:
+ for line in self.db_input_cache:
+ conn.execute("INSERT INTO history VALUES (?, ?, ?, ?)",
+ (self.session_number,)+line)
+
+ def _writeout_output_cache(self, conn):
+ with conn:
+ for line in self.db_output_cache:
+ conn.execute("INSERT INTO output_history VALUES (?, ?, ?)",
+ (self.session_number,)+line)
+
+ @only_when_enabled
+ def writeout_cache(self, conn=None):
+ """Write any entries in the cache to the database."""
+ if conn is None:
+ conn = self.db
+
+ with self.db_input_cache_lock:
+ try:
+ self._writeout_input_cache(conn)
+ except sqlite3.IntegrityError:
+ self.new_session(conn)
+ print("ERROR! Session/line number was not unique in",
+ "database. History logging moved to new session",
+ self.session_number)
+ try:
+ # Try writing to the new session. If this fails, don't
+ # recurse
+ self._writeout_input_cache(conn)
+ except sqlite3.IntegrityError:
+ pass
+ finally:
+ self.db_input_cache = []
+
+ with self.db_output_cache_lock:
+ try:
+ self._writeout_output_cache(conn)
+ except sqlite3.IntegrityError:
+ print("!! Session/line number for output was not unique",
+ "in database. Output will not be stored.")
+ finally:
+ self.db_output_cache = []
+
+
+class HistorySavingThread(threading.Thread):
+ """This thread takes care of writing history to the database, so that
+ the UI isn't held up while that happens.
+
+ It waits for the HistoryManager's save_flag to be set, then writes out
+ the history cache. The main thread is responsible for setting the flag when
+ the cache size reaches a defined threshold."""
+ daemon = True
+ stop_now = False
+ enabled = True
+ def __init__(self, history_manager):
+ super(HistorySavingThread, self).__init__(name="IPythonHistorySavingThread")
+ self.history_manager = history_manager
+ self.enabled = history_manager.enabled
+ atexit.register(self.stop)
+
+ @only_when_enabled
+ def run(self):
+ # We need a separate db connection per thread:
+ try:
+ self.db = sqlite3.connect(
+ str(self.history_manager.hist_file),
+ **self.history_manager.connection_options,
+ )
+ while True:
+ self.history_manager.save_flag.wait()
+ if self.stop_now:
+ self.db.close()
+ return
+ self.history_manager.save_flag.clear()
+ self.history_manager.writeout_cache(self.db)
+ except Exception as e:
+ print(("The history saving thread hit an unexpected error (%s)."
+ "History will not be written to the database.") % repr(e))
+
+ def stop(self):
+ """This can be called from the main thread to safely stop this thread.
+
+ Note that it does not attempt to write out remaining history before
+ exiting. That should be done by calling the HistoryManager's
+ end_session method."""
+ self.stop_now = True
+ self.history_manager.save_flag.set()
+ self.join()
+
+
+# To match, e.g. ~5/8-~2/3
+range_re = re.compile(r"""
+((?P<startsess>~?\d+)/)?
+(?P<start>\d+)?
+((?P<sep>[\-:])
+ ((?P<endsess>~?\d+)/)?
+ (?P<end>\d+))?
+$""", re.VERBOSE)
+
+
+def extract_hist_ranges(ranges_str):
+ """Turn a string of history ranges into 3-tuples of (session, start, stop).
+
+ Empty string results in a `[(0, 1, None)]`, i.e. "everything from current
+ session".
+
+ Examples
+ --------
+ >>> list(extract_hist_ranges("~8/5-~7/4 2"))
+ [(-8, 5, None), (-7, 1, 5), (0, 2, 3)]
+ """
+ if ranges_str == "":
+ yield (0, 1, None) # Everything from current session
+ return
+
+ for range_str in ranges_str.split():
+ rmatch = range_re.match(range_str)
+ if not rmatch:
+ continue
+ start = rmatch.group("start")
+ if start:
+ start = int(start)
+ end = rmatch.group("end")
+ # If no end specified, get (a, a + 1)
+ end = int(end) if end else start + 1
+ else: # start not specified
+ if not rmatch.group('startsess'): # no startsess
+ continue
+ start = 1
+ end = None # provide the entire session hist
+
+ if rmatch.group("sep") == "-": # 1-3 == 1:4 --> [1, 2, 3]
+ end += 1
+ startsess = rmatch.group("startsess") or "0"
+ endsess = rmatch.group("endsess") or startsess
+ startsess = int(startsess.replace("~","-"))
+ endsess = int(endsess.replace("~","-"))
+ assert endsess >= startsess, "start session must be earlier than end session"
+
+ if endsess == startsess:
+ yield (startsess, start, end)
+ continue
+ # Multiple sessions in one range:
+ yield (startsess, start, None)
+ for sess in range(startsess+1, endsess):
+ yield (sess, 1, None)
+ yield (endsess, 1, end)
+
+
+def _format_lineno(session, line):
+ """Helper function to format line numbers properly."""
+ if session == 0:
+ return str(line)
+ return "%s#%s" % (session, line)
diff --git a/contrib/python/ipython/py3/IPython/core/historyapp.py b/contrib/python/ipython/py3/IPython/core/historyapp.py
new file mode 100644
index 0000000000..01a55343f8
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/historyapp.py
@@ -0,0 +1,161 @@
+# encoding: utf-8
+"""
+An application for managing IPython history.
+
+To be invoked as the `ipython history` subcommand.
+"""
+
+import sqlite3
+from pathlib import Path
+
+from traitlets.config.application import Application
+from .application import BaseIPythonApplication
+from traitlets import Bool, Int, Dict
+from ..utils.io import ask_yes_no
+
+trim_hist_help = """Trim the IPython history database to the last 1000 entries.
+
+This actually copies the last 1000 entries to a new database, and then replaces
+the old file with the new. Use the `--keep=` argument to specify a number
+other than 1000.
+"""
+
+clear_hist_help = """Clear the IPython history database, deleting all entries.
+
+Because this is a destructive operation, IPython will prompt the user if they
+really want to do this. Passing a `-f` flag will force clearing without a
+prompt.
+
+This is an handy alias to `ipython history trim --keep=0`
+"""
+
+
+class HistoryTrim(BaseIPythonApplication):
+ description = trim_hist_help
+
+ backup = Bool(False,
+ help="Keep the old history file as history.sqlite.<N>"
+ ).tag(config=True)
+
+ keep = Int(1000,
+ help="Number of recent lines to keep in the database."
+ ).tag(config=True)
+
+ flags = Dict(dict(
+ backup = ({'HistoryTrim' : {'backup' : True}},
+ backup.help
+ )
+ ))
+
+ aliases=Dict(dict(
+ keep = 'HistoryTrim.keep'
+ ))
+
+ def start(self):
+ profile_dir = Path(self.profile_dir.location)
+ hist_file = profile_dir / "history.sqlite"
+ con = sqlite3.connect(hist_file)
+
+ # Grab the recent history from the current database.
+ inputs = list(con.execute('SELECT session, line, source, source_raw FROM '
+ 'history ORDER BY session DESC, line DESC LIMIT ?', (self.keep+1,)))
+ if len(inputs) <= self.keep:
+ print("There are already at most %d entries in the history database." % self.keep)
+ print("Not doing anything. Use --keep= argument to keep fewer entries")
+ return
+
+ print("Trimming history to the most recent %d entries." % self.keep)
+
+ inputs.pop() # Remove the extra element we got to check the length.
+ inputs.reverse()
+ if inputs:
+ first_session = inputs[0][0]
+ outputs = list(con.execute('SELECT session, line, output FROM '
+ 'output_history WHERE session >= ?', (first_session,)))
+ sessions = list(con.execute('SELECT session, start, end, num_cmds, remark FROM '
+ 'sessions WHERE session >= ?', (first_session,)))
+ con.close()
+
+ # Create the new history database.
+ new_hist_file = profile_dir / "history.sqlite.new"
+ i = 0
+ while new_hist_file.exists():
+ # Make sure we don't interfere with an existing file.
+ i += 1
+ new_hist_file = profile_dir / ("history.sqlite.new" + str(i))
+ new_db = sqlite3.connect(new_hist_file)
+ new_db.execute("""CREATE TABLE IF NOT EXISTS sessions (session integer
+ primary key autoincrement, start timestamp,
+ end timestamp, num_cmds integer, remark text)""")
+ new_db.execute("""CREATE TABLE IF NOT EXISTS history
+ (session integer, line integer, source text, source_raw text,
+ PRIMARY KEY (session, line))""")
+ new_db.execute("""CREATE TABLE IF NOT EXISTS output_history
+ (session integer, line integer, output text,
+ PRIMARY KEY (session, line))""")
+ new_db.commit()
+
+
+ if inputs:
+ with new_db:
+ # Add the recent history into the new database.
+ new_db.executemany('insert into sessions values (?,?,?,?,?)', sessions)
+ new_db.executemany('insert into history values (?,?,?,?)', inputs)
+ new_db.executemany('insert into output_history values (?,?,?)', outputs)
+ new_db.close()
+
+ if self.backup:
+ i = 1
+ backup_hist_file = profile_dir / ("history.sqlite.old.%d" % i)
+ while backup_hist_file.exists():
+ i += 1
+ backup_hist_file = profile_dir / ("history.sqlite.old.%d" % i)
+ hist_file.rename(backup_hist_file)
+ print("Backed up longer history file to", backup_hist_file)
+ else:
+ hist_file.unlink()
+
+ new_hist_file.rename(hist_file)
+
+class HistoryClear(HistoryTrim):
+ description = clear_hist_help
+ keep = Int(0,
+ help="Number of recent lines to keep in the database.")
+
+ force = Bool(False,
+ help="Don't prompt user for confirmation"
+ ).tag(config=True)
+
+ flags = Dict(dict(
+ force = ({'HistoryClear' : {'force' : True}},
+ force.help),
+ f = ({'HistoryTrim' : {'force' : True}},
+ force.help
+ )
+ ))
+ aliases = Dict()
+
+ def start(self):
+ if self.force or ask_yes_no("Really delete all ipython history? ",
+ default="no", interrupt="no"):
+ HistoryTrim.start(self)
+
+class HistoryApp(Application):
+ name = u'ipython-history'
+ description = "Manage the IPython history database."
+
+ subcommands = Dict(dict(
+ trim = (HistoryTrim, HistoryTrim.description.splitlines()[0]),
+ clear = (HistoryClear, HistoryClear.description.splitlines()[0]),
+ ))
+
+ def start(self):
+ if self.subapp is None:
+ print("No subcommand specified. Must specify one of: %s" % \
+ (self.subcommands.keys()))
+ print()
+ self.print_description()
+ self.print_subcommands()
+ self.exit(1)
+ else:
+ return self.subapp.start()
diff --git a/contrib/python/ipython/py3/IPython/core/hooks.py b/contrib/python/ipython/py3/IPython/core/hooks.py
new file mode 100644
index 0000000000..f73c565763
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/hooks.py
@@ -0,0 +1,173 @@
+"""Hooks for IPython.
+
+In Python, it is possible to overwrite any method of any object if you really
+want to. But IPython exposes a few 'hooks', methods which are *designed* to
+be overwritten by users for customization purposes. This module defines the
+default versions of all such hooks, which get used by IPython if not
+overridden by the user.
+
+Hooks are simple functions, but they should be declared with ``self`` as their
+first argument, because when activated they are registered into IPython as
+instance methods. The self argument will be the IPython running instance
+itself, so hooks have full access to the entire IPython object.
+
+If you wish to define a new hook and activate it, you can make an :doc:`extension
+</config/extensions/index>` or a :ref:`startup script <startup_files>`. For
+example, you could use a startup file like this::
+
+ import os
+
+ def calljed(self,filename, linenum):
+ "My editor hook calls the jed editor directly."
+ print "Calling my own editor, jed ..."
+ if os.system('jed +%d %s' % (linenum,filename)) != 0:
+ raise TryNext()
+
+ def load_ipython_extension(ip):
+ ip.set_hook('editor', calljed)
+
+"""
+
+#*****************************************************************************
+# Copyright (C) 2005 Fernando Perez. <fperez@colorado.edu>
+#
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#*****************************************************************************
+
+import os
+import subprocess
+import sys
+
+from .error import TryNext
+
+# List here all the default hooks. For now it's just the editor functions
+# but over time we'll move here all the public API for user-accessible things.
+
+__all__ = [
+ "editor",
+ "synchronize_with_editor",
+ "show_in_pager",
+ "pre_prompt_hook",
+ "clipboard_get",
+]
+
+deprecated = {'pre_run_code_hook': "a callback for the 'pre_execute' or 'pre_run_cell' event",
+ 'late_startup_hook': "a callback for the 'shell_initialized' event",
+ 'shutdown_hook': "the atexit module",
+ }
+
+def editor(self, filename, linenum=None, wait=True):
+ """Open the default editor at the given filename and linenumber.
+
+ This is IPython's default editor hook, you can use it as an example to
+ write your own modified one. To set your own editor function as the
+ new editor hook, call ip.set_hook('editor',yourfunc)."""
+
+ # IPython configures a default editor at startup by reading $EDITOR from
+ # the environment, and falling back on vi (unix) or notepad (win32).
+ editor = self.editor
+
+ # marker for at which line to open the file (for existing objects)
+ if linenum is None or editor=='notepad':
+ linemark = ''
+ else:
+ linemark = '+%d' % int(linenum)
+
+ # Enclose in quotes if necessary and legal
+ if ' ' in editor and os.path.isfile(editor) and editor[0] != '"':
+ editor = '"%s"' % editor
+
+ # Call the actual editor
+ proc = subprocess.Popen('%s %s %s' % (editor, linemark, filename),
+ shell=True)
+ if wait and proc.wait() != 0:
+ raise TryNext()
+
+
+def synchronize_with_editor(self, filename, linenum, column):
+ pass
+
+
+class CommandChainDispatcher:
+ """ Dispatch calls to a chain of commands until some func can handle it
+
+ Usage: instantiate, execute "add" to add commands (with optional
+ priority), execute normally via f() calling mechanism.
+
+ """
+ def __init__(self,commands=None):
+ if commands is None:
+ self.chain = []
+ else:
+ self.chain = commands
+
+
+ def __call__(self,*args, **kw):
+ """ Command chain is called just like normal func.
+
+ This will call all funcs in chain with the same args as were given to
+ this function, and return the result of first func that didn't raise
+ TryNext"""
+ last_exc = TryNext()
+ for prio,cmd in self.chain:
+ #print "prio",prio,"cmd",cmd #dbg
+ try:
+ return cmd(*args, **kw)
+ except TryNext as exc:
+ last_exc = exc
+ # if no function will accept it, raise TryNext up to the caller
+ raise last_exc
+
+ def __str__(self):
+ return str(self.chain)
+
+ def add(self, func, priority=0):
+ """ Add a func to the cmd chain with given priority """
+ self.chain.append((priority, func))
+ self.chain.sort(key=lambda x: x[0])
+
+ def __iter__(self):
+ """ Return all objects in chain.
+
+ Handy if the objects are not callable.
+ """
+ return iter(self.chain)
+
+
+def show_in_pager(self, data, start, screen_lines):
+ """ Run a string through pager """
+ # raising TryNext here will use the default paging functionality
+ raise TryNext
+
+
+def pre_prompt_hook(self):
+ """ Run before displaying the next prompt
+
+ Use this e.g. to display output from asynchronous operations (in order
+ to not mess up text entry)
+ """
+
+ return None
+
+
+def clipboard_get(self):
+ """ Get text from the clipboard.
+ """
+ from ..lib.clipboard import (
+ osx_clipboard_get,
+ tkinter_clipboard_get,
+ win32_clipboard_get,
+ wayland_clipboard_get,
+ )
+ if sys.platform == 'win32':
+ chain = [win32_clipboard_get, tkinter_clipboard_get]
+ elif sys.platform == 'darwin':
+ chain = [osx_clipboard_get, tkinter_clipboard_get]
+ else:
+ chain = [wayland_clipboard_get, tkinter_clipboard_get]
+ dispatcher = CommandChainDispatcher()
+ for func in chain:
+ dispatcher.add(func)
+ text = dispatcher()
+ return text
diff --git a/contrib/python/ipython/py3/IPython/core/inputsplitter.py b/contrib/python/ipython/py3/IPython/core/inputsplitter.py
new file mode 100644
index 0000000000..10707d3d6b
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/inputsplitter.py
@@ -0,0 +1,773 @@
+"""DEPRECATED: Input handling and transformation machinery.
+
+This module was deprecated in IPython 7.0, in favour of inputtransformer2.
+
+The first class in this module, :class:`InputSplitter`, is designed to tell when
+input from a line-oriented frontend is complete and should be executed, and when
+the user should be prompted for another line of code instead. The name 'input
+splitter' is largely for historical reasons.
+
+A companion, :class:`IPythonInputSplitter`, provides the same functionality but
+with full support for the extended IPython syntax (magics, system calls, etc).
+The code to actually do these transformations is in :mod:`IPython.core.inputtransformer`.
+:class:`IPythonInputSplitter` feeds the raw code to the transformers in order
+and stores the results.
+
+For more details, see the class docstrings below.
+"""
+
+from warnings import warn
+
+warn('IPython.core.inputsplitter is deprecated since IPython 7 in favor of `IPython.core.inputtransformer2`',
+ DeprecationWarning)
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+import ast
+import codeop
+import io
+import re
+import sys
+import tokenize
+import warnings
+
+from typing import List
+
+from IPython.core.inputtransformer import (leading_indent,
+ classic_prompt,
+ ipy_prompt,
+ cellmagic,
+ assemble_logical_lines,
+ help_end,
+ escaped_commands,
+ assign_from_magic,
+ assign_from_system,
+ assemble_python_lines,
+ )
+
+# These are available in this module for backwards compatibility.
+from IPython.core.inputtransformer import (ESC_SHELL, ESC_SH_CAP, ESC_HELP,
+ ESC_HELP2, ESC_MAGIC, ESC_MAGIC2,
+ ESC_QUOTE, ESC_QUOTE2, ESC_PAREN, ESC_SEQUENCES)
+
+#-----------------------------------------------------------------------------
+# Utilities
+#-----------------------------------------------------------------------------
+
+# FIXME: These are general-purpose utilities that later can be moved to the
+# general ward. Kept here for now because we're being very strict about test
+# coverage with this code, and this lets us ensure that we keep 100% coverage
+# while developing.
+
+# compiled regexps for autoindent management
+dedent_re = re.compile('|'.join([
+ r'^\s+raise(\s.*)?$', # raise statement (+ space + other stuff, maybe)
+ r'^\s+raise\([^\)]*\).*$', # wacky raise with immediate open paren
+ r'^\s+return(\s.*)?$', # normal return (+ space + other stuff, maybe)
+ r'^\s+return\([^\)]*\).*$', # wacky return with immediate open paren
+ r'^\s+pass\s*$', # pass (optionally followed by trailing spaces)
+ r'^\s+break\s*$', # break (optionally followed by trailing spaces)
+ r'^\s+continue\s*$', # continue (optionally followed by trailing spaces)
+]))
+ini_spaces_re = re.compile(r'^([ \t\r\f\v]+)')
+
+# regexp to match pure comment lines so we don't accidentally insert 'if 1:'
+# before pure comments
+comment_line_re = re.compile(r'^\s*\#')
+
+
+def num_ini_spaces(s):
+ """Return the number of initial spaces in a string.
+
+ Note that tabs are counted as a single space. For now, we do *not* support
+ mixing of tabs and spaces in the user's input.
+
+ Parameters
+ ----------
+ s : string
+
+ Returns
+ -------
+ n : int
+ """
+
+ ini_spaces = ini_spaces_re.match(s)
+ if ini_spaces:
+ return ini_spaces.end()
+ else:
+ return 0
+
+# Fake token types for partial_tokenize:
+INCOMPLETE_STRING = tokenize.N_TOKENS
+IN_MULTILINE_STATEMENT = tokenize.N_TOKENS + 1
+
+# The 2 classes below have the same API as TokenInfo, but don't try to look up
+# a token type name that they won't find.
+class IncompleteString:
+ type = exact_type = INCOMPLETE_STRING
+ def __init__(self, s, start, end, line):
+ self.s = s
+ self.start = start
+ self.end = end
+ self.line = line
+
+class InMultilineStatement:
+ type = exact_type = IN_MULTILINE_STATEMENT
+ def __init__(self, pos, line):
+ self.s = ''
+ self.start = self.end = pos
+ self.line = line
+
+def partial_tokens(s):
+ """Iterate over tokens from a possibly-incomplete string of code.
+
+ This adds two special token types: INCOMPLETE_STRING and
+ IN_MULTILINE_STATEMENT. These can only occur as the last token yielded, and
+ represent the two main ways for code to be incomplete.
+ """
+ readline = io.StringIO(s).readline
+ token = tokenize.TokenInfo(tokenize.NEWLINE, '', (1, 0), (1, 0), '')
+ try:
+ for token in tokenize.generate_tokens(readline):
+ yield token
+ except tokenize.TokenError as e:
+ # catch EOF error
+ lines = s.splitlines(keepends=True)
+ end = len(lines), len(lines[-1])
+ if 'multi-line string' in e.args[0]:
+ l, c = start = token.end
+ s = lines[l-1][c:] + ''.join(lines[l:])
+ yield IncompleteString(s, start, end, lines[-1])
+ elif 'multi-line statement' in e.args[0]:
+ yield InMultilineStatement(end, lines[-1])
+ else:
+ raise
+
+def find_next_indent(code):
+ """Find the number of spaces for the next line of indentation"""
+ tokens = list(partial_tokens(code))
+ if tokens[-1].type == tokenize.ENDMARKER:
+ tokens.pop()
+ if not tokens:
+ return 0
+ while (tokens[-1].type in {tokenize.DEDENT, tokenize.NEWLINE, tokenize.COMMENT}):
+ tokens.pop()
+
+ if tokens[-1].type == INCOMPLETE_STRING:
+ # Inside a multiline string
+ return 0
+
+ # Find the indents used before
+ prev_indents = [0]
+ def _add_indent(n):
+ if n != prev_indents[-1]:
+ prev_indents.append(n)
+
+ tokiter = iter(tokens)
+ for tok in tokiter:
+ if tok.type in {tokenize.INDENT, tokenize.DEDENT}:
+ _add_indent(tok.end[1])
+ elif (tok.type == tokenize.NL):
+ try:
+ _add_indent(next(tokiter).start[1])
+ except StopIteration:
+ break
+
+ last_indent = prev_indents.pop()
+
+ # If we've just opened a multiline statement (e.g. 'a = ['), indent more
+ if tokens[-1].type == IN_MULTILINE_STATEMENT:
+ if tokens[-2].exact_type in {tokenize.LPAR, tokenize.LSQB, tokenize.LBRACE}:
+ return last_indent + 4
+ return last_indent
+
+ if tokens[-1].exact_type == tokenize.COLON:
+ # Line ends with colon - indent
+ return last_indent + 4
+
+ if last_indent:
+ # Examine the last line for dedent cues - statements like return or
+ # raise which normally end a block of code.
+ last_line_starts = 0
+ for i, tok in enumerate(tokens):
+ if tok.type == tokenize.NEWLINE:
+ last_line_starts = i + 1
+
+ last_line_tokens = tokens[last_line_starts:]
+ names = [t.string for t in last_line_tokens if t.type == tokenize.NAME]
+ if names and names[0] in {'raise', 'return', 'pass', 'break', 'continue'}:
+ # Find the most recent indentation less than the current level
+ for indent in reversed(prev_indents):
+ if indent < last_indent:
+ return indent
+
+ return last_indent
+
+
+def last_blank(src):
+ """Determine if the input source ends in a blank.
+
+ A blank is either a newline or a line consisting of whitespace.
+
+ Parameters
+ ----------
+ src : string
+ A single or multiline string.
+ """
+ if not src: return False
+ ll = src.splitlines()[-1]
+ return (ll == '') or ll.isspace()
+
+
+last_two_blanks_re = re.compile(r'\n\s*\n\s*$', re.MULTILINE)
+last_two_blanks_re2 = re.compile(r'.+\n\s*\n\s+$', re.MULTILINE)
+
+def last_two_blanks(src):
+ """Determine if the input source ends in two blanks.
+
+ A blank is either a newline or a line consisting of whitespace.
+
+ Parameters
+ ----------
+ src : string
+ A single or multiline string.
+ """
+ if not src: return False
+ # The logic here is tricky: I couldn't get a regexp to work and pass all
+ # the tests, so I took a different approach: split the source by lines,
+ # grab the last two and prepend '###\n' as a stand-in for whatever was in
+ # the body before the last two lines. Then, with that structure, it's
+ # possible to analyze with two regexps. Not the most elegant solution, but
+ # it works. If anyone tries to change this logic, make sure to validate
+ # the whole test suite first!
+ new_src = '\n'.join(['###\n'] + src.splitlines()[-2:])
+ return (bool(last_two_blanks_re.match(new_src)) or
+ bool(last_two_blanks_re2.match(new_src)) )
+
+
+def remove_comments(src):
+ """Remove all comments from input source.
+
+ Note: comments are NOT recognized inside of strings!
+
+ Parameters
+ ----------
+ src : string
+ A single or multiline input string.
+
+ Returns
+ -------
+ String with all Python comments removed.
+ """
+
+ return re.sub('#.*', '', src)
+
+
+def get_input_encoding():
+ """Return the default standard input encoding.
+
+ If sys.stdin has no encoding, 'ascii' is returned."""
+ # There are strange environments for which sys.stdin.encoding is None. We
+ # ensure that a valid encoding is returned.
+ encoding = getattr(sys.stdin, 'encoding', None)
+ if encoding is None:
+ encoding = 'ascii'
+ return encoding
+
+#-----------------------------------------------------------------------------
+# Classes and functions for normal Python syntax handling
+#-----------------------------------------------------------------------------
+
+class InputSplitter(object):
+ r"""An object that can accumulate lines of Python source before execution.
+
+ This object is designed to be fed python source line-by-line, using
+ :meth:`push`. It will return on each push whether the currently pushed
+ code could be executed already. In addition, it provides a method called
+ :meth:`push_accepts_more` that can be used to query whether more input
+ can be pushed into a single interactive block.
+
+ This is a simple example of how an interactive terminal-based client can use
+ this tool::
+
+ isp = InputSplitter()
+ while isp.push_accepts_more():
+ indent = ' '*isp.indent_spaces
+ prompt = '>>> ' + indent
+ line = indent + raw_input(prompt)
+ isp.push(line)
+ print 'Input source was:\n', isp.source_reset(),
+ """
+ # A cache for storing the current indentation
+ # The first value stores the most recently processed source input
+ # The second value is the number of spaces for the current indentation
+ # If self.source matches the first value, the second value is a valid
+ # current indentation. Otherwise, the cache is invalid and the indentation
+ # must be recalculated.
+ _indent_spaces_cache = None, None
+ # String, indicating the default input encoding. It is computed by default
+ # at initialization time via get_input_encoding(), but it can be reset by a
+ # client with specific knowledge of the encoding.
+ encoding = ''
+ # String where the current full source input is stored, properly encoded.
+ # Reading this attribute is the normal way of querying the currently pushed
+ # source code, that has been properly encoded.
+ source = ''
+ # Code object corresponding to the current source. It is automatically
+ # synced to the source, so it can be queried at any time to obtain the code
+ # object; it will be None if the source doesn't compile to valid Python.
+ code = None
+
+ # Private attributes
+
+ # List with lines of input accumulated so far
+ _buffer: List[str]
+ # Command compiler
+ _compile: codeop.CommandCompiler
+ # Boolean indicating whether the current block is complete
+ _is_complete = None
+ # Boolean indicating whether the current block has an unrecoverable syntax error
+ _is_invalid = False
+
+ def __init__(self) -> None:
+ """Create a new InputSplitter instance."""
+ self._buffer = []
+ self._compile = codeop.CommandCompiler()
+ self.encoding = get_input_encoding()
+
+ def reset(self):
+ """Reset the input buffer and associated state."""
+ self._buffer[:] = []
+ self.source = ''
+ self.code = None
+ self._is_complete = False
+ self._is_invalid = False
+
+ def source_reset(self):
+ """Return the input source and perform a full reset.
+ """
+ out = self.source
+ self.reset()
+ return out
+
+ def check_complete(self, source):
+ """Return whether a block of code is ready to execute, or should be continued
+
+ This is a non-stateful API, and will reset the state of this InputSplitter.
+
+ Parameters
+ ----------
+ source : string
+ Python input code, which can be multiline.
+
+ Returns
+ -------
+ status : str
+ One of 'complete', 'incomplete', or 'invalid' if source is not a
+ prefix of valid code.
+ indent_spaces : int or None
+ The number of spaces by which to indent the next line of code. If
+ status is not 'incomplete', this is None.
+ """
+ self.reset()
+ try:
+ self.push(source)
+ except SyntaxError:
+ # Transformers in IPythonInputSplitter can raise SyntaxError,
+ # which push() will not catch.
+ return 'invalid', None
+ else:
+ if self._is_invalid:
+ return 'invalid', None
+ elif self.push_accepts_more():
+ return 'incomplete', self.get_indent_spaces()
+ else:
+ return 'complete', None
+ finally:
+ self.reset()
+
+ def push(self, lines:str) -> bool:
+ """Push one or more lines of input.
+
+ This stores the given lines and returns a status code indicating
+ whether the code forms a complete Python block or not.
+
+ Any exceptions generated in compilation are swallowed, but if an
+ exception was produced, the method returns True.
+
+ Parameters
+ ----------
+ lines : string
+ One or more lines of Python input.
+
+ Returns
+ -------
+ is_complete : boolean
+ True if the current input source (the result of the current input
+ plus prior inputs) forms a complete Python execution block. Note that
+ this value is also stored as a private attribute (``_is_complete``), so it
+ can be queried at any time.
+ """
+ assert isinstance(lines, str)
+ self._store(lines)
+ source = self.source
+
+ # Before calling _compile(), reset the code object to None so that if an
+ # exception is raised in compilation, we don't mislead by having
+ # inconsistent code/source attributes.
+ self.code, self._is_complete = None, None
+ self._is_invalid = False
+
+ # Honor termination lines properly
+ if source.endswith('\\\n'):
+ return False
+
+ try:
+ with warnings.catch_warnings():
+ warnings.simplefilter('error', SyntaxWarning)
+ self.code = self._compile(source, symbol="exec")
+ # Invalid syntax can produce any of a number of different errors from
+ # inside the compiler, so we have to catch them all. Syntax errors
+ # immediately produce a 'ready' block, so the invalid Python can be
+ # sent to the kernel for evaluation with possible ipython
+ # special-syntax conversion.
+ except (SyntaxError, OverflowError, ValueError, TypeError,
+ MemoryError, SyntaxWarning):
+ self._is_complete = True
+ self._is_invalid = True
+ else:
+ # Compilation didn't produce any exceptions (though it may not have
+ # given a complete code object)
+ self._is_complete = self.code is not None
+
+ return self._is_complete
+
+ def push_accepts_more(self):
+ """Return whether a block of interactive input can accept more input.
+
+ This method is meant to be used by line-oriented frontends, who need to
+ guess whether a block is complete or not based solely on prior and
+ current input lines. The InputSplitter considers it has a complete
+ interactive block and will not accept more input when either:
+
+ * A SyntaxError is raised
+
+ * The code is complete and consists of a single line or a single
+ non-compound statement
+
+ * The code is complete and has a blank line at the end
+
+ If the current input produces a syntax error, this method immediately
+ returns False but does *not* raise the syntax error exception, as
+ typically clients will want to send invalid syntax to an execution
+ backend which might convert the invalid syntax into valid Python via
+ one of the dynamic IPython mechanisms.
+ """
+
+ # With incomplete input, unconditionally accept more
+ # A syntax error also sets _is_complete to True - see push()
+ if not self._is_complete:
+ #print("Not complete") # debug
+ return True
+
+ # The user can make any (complete) input execute by leaving a blank line
+ last_line = self.source.splitlines()[-1]
+ if (not last_line) or last_line.isspace():
+ #print("Blank line") # debug
+ return False
+
+ # If there's just a single line or AST node, and we're flush left, as is
+ # the case after a simple statement such as 'a=1', we want to execute it
+ # straight away.
+ if self.get_indent_spaces() == 0:
+ if len(self.source.splitlines()) <= 1:
+ return False
+
+ try:
+ code_ast = ast.parse("".join(self._buffer))
+ except Exception:
+ #print("Can't parse AST") # debug
+ return False
+ else:
+ if len(code_ast.body) == 1 and \
+ not hasattr(code_ast.body[0], 'body'):
+ #print("Simple statement") # debug
+ return False
+
+ # General fallback - accept more code
+ return True
+
+ def get_indent_spaces(self):
+ sourcefor, n = self._indent_spaces_cache
+ if sourcefor == self.source:
+ return n
+
+ # self.source always has a trailing newline
+ n = find_next_indent(self.source[:-1])
+ self._indent_spaces_cache = (self.source, n)
+ return n
+
+ # Backwards compatibility. I think all code that used .indent_spaces was
+ # inside IPython, but we can leave this here until IPython 7 in case any
+ # other modules are using it. -TK, November 2017
+ indent_spaces = property(get_indent_spaces)
+
+ def _store(self, lines, buffer=None, store='source'):
+ """Store one or more lines of input.
+
+ If input lines are not newline-terminated, a newline is automatically
+ appended."""
+
+ if buffer is None:
+ buffer = self._buffer
+
+ if lines.endswith('\n'):
+ buffer.append(lines)
+ else:
+ buffer.append(lines+'\n')
+ setattr(self, store, self._set_source(buffer))
+
+ def _set_source(self, buffer):
+ return u''.join(buffer)
+
+
+class IPythonInputSplitter(InputSplitter):
+ """An input splitter that recognizes all of IPython's special syntax."""
+
+ # String with raw, untransformed input.
+ source_raw = ''
+
+ # Flag to track when a transformer has stored input that it hasn't given
+ # back yet.
+ transformer_accumulating = False
+
+ # Flag to track when assemble_python_lines has stored input that it hasn't
+ # given back yet.
+ within_python_line = False
+
+ # Private attributes
+
+ # List with lines of raw input accumulated so far.
+ _buffer_raw = None
+
+ def __init__(self, line_input_checker=True, physical_line_transforms=None,
+ logical_line_transforms=None, python_line_transforms=None):
+ super(IPythonInputSplitter, self).__init__()
+ self._buffer_raw = []
+ self._validate = True
+
+ if physical_line_transforms is not None:
+ self.physical_line_transforms = physical_line_transforms
+ else:
+ self.physical_line_transforms = [
+ leading_indent(),
+ classic_prompt(),
+ ipy_prompt(),
+ cellmagic(end_on_blank_line=line_input_checker),
+ ]
+
+ self.assemble_logical_lines = assemble_logical_lines()
+ if logical_line_transforms is not None:
+ self.logical_line_transforms = logical_line_transforms
+ else:
+ self.logical_line_transforms = [
+ help_end(),
+ escaped_commands(),
+ assign_from_magic(),
+ assign_from_system(),
+ ]
+
+ self.assemble_python_lines = assemble_python_lines()
+ if python_line_transforms is not None:
+ self.python_line_transforms = python_line_transforms
+ else:
+ # We don't use any of these at present
+ self.python_line_transforms = []
+
+ @property
+ def transforms(self):
+ "Quick access to all transformers."
+ return self.physical_line_transforms + \
+ [self.assemble_logical_lines] + self.logical_line_transforms + \
+ [self.assemble_python_lines] + self.python_line_transforms
+
+ @property
+ def transforms_in_use(self):
+ """Transformers, excluding logical line transformers if we're in a
+ Python line."""
+ t = self.physical_line_transforms[:]
+ if not self.within_python_line:
+ t += [self.assemble_logical_lines] + self.logical_line_transforms
+ return t + [self.assemble_python_lines] + self.python_line_transforms
+
+ def reset(self):
+ """Reset the input buffer and associated state."""
+ super(IPythonInputSplitter, self).reset()
+ self._buffer_raw[:] = []
+ self.source_raw = ''
+ self.transformer_accumulating = False
+ self.within_python_line = False
+
+ for t in self.transforms:
+ try:
+ t.reset()
+ except SyntaxError:
+ # Nothing that calls reset() expects to handle transformer
+ # errors
+ pass
+
+ def flush_transformers(self):
+ def _flush(transform, outs):
+ """yield transformed lines
+
+ always strings, never None
+
+ transform: the current transform
+ outs: an iterable of previously transformed inputs.
+ Each may be multiline, which will be passed
+ one line at a time to transform.
+ """
+ for out in outs:
+ for line in out.splitlines():
+ # push one line at a time
+ tmp = transform.push(line)
+ if tmp is not None:
+ yield tmp
+
+ # reset the transform
+ tmp = transform.reset()
+ if tmp is not None:
+ yield tmp
+
+ out = []
+ for t in self.transforms_in_use:
+ out = _flush(t, out)
+
+ out = list(out)
+ if out:
+ self._store('\n'.join(out))
+
+ def raw_reset(self):
+ """Return raw input only and perform a full reset.
+ """
+ out = self.source_raw
+ self.reset()
+ return out
+
+ def source_reset(self):
+ try:
+ self.flush_transformers()
+ return self.source
+ finally:
+ self.reset()
+
+ def push_accepts_more(self):
+ if self.transformer_accumulating:
+ return True
+ else:
+ return super(IPythonInputSplitter, self).push_accepts_more()
+
+ def transform_cell(self, cell):
+ """Process and translate a cell of input.
+ """
+ self.reset()
+ try:
+ self.push(cell)
+ self.flush_transformers()
+ return self.source
+ finally:
+ self.reset()
+
+ def push(self, lines:str) -> bool:
+ """Push one or more lines of IPython input.
+
+ This stores the given lines and returns a status code indicating
+ whether the code forms a complete Python block or not, after processing
+ all input lines for special IPython syntax.
+
+ Any exceptions generated in compilation are swallowed, but if an
+ exception was produced, the method returns True.
+
+ Parameters
+ ----------
+ lines : string
+ One or more lines of Python input.
+
+ Returns
+ -------
+ is_complete : boolean
+ True if the current input source (the result of the current input
+ plus prior inputs) forms a complete Python execution block. Note that
+ this value is also stored as a private attribute (_is_complete), so it
+ can be queried at any time.
+ """
+ assert isinstance(lines, str)
+ # We must ensure all input is pure unicode
+ # ''.splitlines() --> [], but we need to push the empty line to transformers
+ lines_list = lines.splitlines()
+ if not lines_list:
+ lines_list = ['']
+
+ # Store raw source before applying any transformations to it. Note
+ # that this must be done *after* the reset() call that would otherwise
+ # flush the buffer.
+ self._store(lines, self._buffer_raw, 'source_raw')
+
+ transformed_lines_list = []
+ for line in lines_list:
+ transformed = self._transform_line(line)
+ if transformed is not None:
+ transformed_lines_list.append(transformed)
+
+ if transformed_lines_list:
+ transformed_lines = '\n'.join(transformed_lines_list)
+ return super(IPythonInputSplitter, self).push(transformed_lines)
+ else:
+ # Got nothing back from transformers - they must be waiting for
+ # more input.
+ return False
+
+ def _transform_line(self, line):
+ """Push a line of input code through the various transformers.
+
+ Returns any output from the transformers, or None if a transformer
+ is accumulating lines.
+
+ Sets self.transformer_accumulating as a side effect.
+ """
+ def _accumulating(dbg):
+ #print(dbg)
+ self.transformer_accumulating = True
+ return None
+
+ for transformer in self.physical_line_transforms:
+ line = transformer.push(line)
+ if line is None:
+ return _accumulating(transformer)
+
+ if not self.within_python_line:
+ line = self.assemble_logical_lines.push(line)
+ if line is None:
+ return _accumulating('acc logical line')
+
+ for transformer in self.logical_line_transforms:
+ line = transformer.push(line)
+ if line is None:
+ return _accumulating(transformer)
+
+ line = self.assemble_python_lines.push(line)
+ if line is None:
+ self.within_python_line = True
+ return _accumulating('acc python line')
+ else:
+ self.within_python_line = False
+
+ for transformer in self.python_line_transforms:
+ line = transformer.push(line)
+ if line is None:
+ return _accumulating(transformer)
+
+ #print("transformers clear") #debug
+ self.transformer_accumulating = False
+ return line
+
diff --git a/contrib/python/ipython/py3/IPython/core/inputtransformer.py b/contrib/python/ipython/py3/IPython/core/inputtransformer.py
new file mode 100644
index 0000000000..77f69f388f
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/inputtransformer.py
@@ -0,0 +1,536 @@
+"""DEPRECATED: Input transformer classes to support IPython special syntax.
+
+This module was deprecated in IPython 7.0, in favour of inputtransformer2.
+
+This includes the machinery to recognise and transform ``%magic`` commands,
+``!system`` commands, ``help?`` querying, prompt stripping, and so forth.
+"""
+import abc
+import functools
+import re
+import tokenize
+from tokenize import generate_tokens, untokenize, TokenError
+from io import StringIO
+
+from IPython.core.splitinput import LineInfo
+
+#-----------------------------------------------------------------------------
+# Globals
+#-----------------------------------------------------------------------------
+
+# The escape sequences that define the syntax transformations IPython will
+# apply to user input. These can NOT be just changed here: many regular
+# expressions and other parts of the code may use their hardcoded values, and
+# for all intents and purposes they constitute the 'IPython syntax', so they
+# should be considered fixed.
+
+ESC_SHELL = '!' # Send line to underlying system shell
+ESC_SH_CAP = '!!' # Send line to system shell and capture output
+ESC_HELP = '?' # Find information about object
+ESC_HELP2 = '??' # Find extra-detailed information about object
+ESC_MAGIC = '%' # Call magic function
+ESC_MAGIC2 = '%%' # Call cell-magic function
+ESC_QUOTE = ',' # Split args on whitespace, quote each as string and call
+ESC_QUOTE2 = ';' # Quote all args as a single string, call
+ESC_PAREN = '/' # Call first argument with rest of line as arguments
+
+ESC_SEQUENCES = [ESC_SHELL, ESC_SH_CAP, ESC_HELP ,\
+ ESC_HELP2, ESC_MAGIC, ESC_MAGIC2,\
+ ESC_QUOTE, ESC_QUOTE2, ESC_PAREN ]
+
+
+class InputTransformer(metaclass=abc.ABCMeta):
+ """Abstract base class for line-based input transformers."""
+
+ @abc.abstractmethod
+ def push(self, line):
+ """Send a line of input to the transformer, returning the transformed
+ input or None if the transformer is waiting for more input.
+
+ Must be overridden by subclasses.
+
+ Implementations may raise ``SyntaxError`` if the input is invalid. No
+ other exceptions may be raised.
+ """
+ pass
+
+ @abc.abstractmethod
+ def reset(self):
+ """Return, transformed any lines that the transformer has accumulated,
+ and reset its internal state.
+
+ Must be overridden by subclasses.
+ """
+ pass
+
+ @classmethod
+ def wrap(cls, func):
+ """Can be used by subclasses as a decorator, to return a factory that
+ will allow instantiation with the decorated object.
+ """
+ @functools.wraps(func)
+ def transformer_factory(**kwargs):
+ return cls(func, **kwargs)
+
+ return transformer_factory
+
+class StatelessInputTransformer(InputTransformer):
+ """Wrapper for a stateless input transformer implemented as a function."""
+ def __init__(self, func):
+ self.func = func
+
+ def __repr__(self):
+ return "StatelessInputTransformer(func={0!r})".format(self.func)
+
+ def push(self, line):
+ """Send a line of input to the transformer, returning the
+ transformed input."""
+ return self.func(line)
+
+ def reset(self):
+ """No-op - exists for compatibility."""
+ pass
+
+class CoroutineInputTransformer(InputTransformer):
+ """Wrapper for an input transformer implemented as a coroutine."""
+ def __init__(self, coro, **kwargs):
+ # Prime it
+ self.coro = coro(**kwargs)
+ next(self.coro)
+
+ def __repr__(self):
+ return "CoroutineInputTransformer(coro={0!r})".format(self.coro)
+
+ def push(self, line):
+ """Send a line of input to the transformer, returning the
+ transformed input or None if the transformer is waiting for more
+ input.
+ """
+ return self.coro.send(line)
+
+ def reset(self):
+ """Return, transformed any lines that the transformer has
+ accumulated, and reset its internal state.
+ """
+ return self.coro.send(None)
+
+class TokenInputTransformer(InputTransformer):
+ """Wrapper for a token-based input transformer.
+
+ func should accept a list of tokens (5-tuples, see tokenize docs), and
+ return an iterable which can be passed to tokenize.untokenize().
+ """
+ def __init__(self, func):
+ self.func = func
+ self.buf = []
+ self.reset_tokenizer()
+
+ def reset_tokenizer(self):
+ it = iter(self.buf)
+ self.tokenizer = generate_tokens(it.__next__)
+
+ def push(self, line):
+ self.buf.append(line + '\n')
+ if all(l.isspace() for l in self.buf):
+ return self.reset()
+
+ tokens = []
+ stop_at_NL = False
+ try:
+ for intok in self.tokenizer:
+ tokens.append(intok)
+ t = intok[0]
+ if t == tokenize.NEWLINE or (stop_at_NL and t == tokenize.NL):
+ # Stop before we try to pull a line we don't have yet
+ break
+ elif t == tokenize.ERRORTOKEN:
+ stop_at_NL = True
+ except TokenError:
+ # Multi-line statement - stop and try again with the next line
+ self.reset_tokenizer()
+ return None
+
+ return self.output(tokens)
+
+ def output(self, tokens):
+ self.buf.clear()
+ self.reset_tokenizer()
+ return untokenize(self.func(tokens)).rstrip('\n')
+
+ def reset(self):
+ l = ''.join(self.buf)
+ self.buf.clear()
+ self.reset_tokenizer()
+ if l:
+ return l.rstrip('\n')
+
+class assemble_python_lines(TokenInputTransformer):
+ def __init__(self):
+ super(assemble_python_lines, self).__init__(None)
+
+ def output(self, tokens):
+ return self.reset()
+
+@CoroutineInputTransformer.wrap
+def assemble_logical_lines():
+ r"""Join lines following explicit line continuations (\)"""
+ line = ''
+ while True:
+ line = (yield line)
+ if not line or line.isspace():
+ continue
+
+ parts = []
+ while line is not None:
+ if line.endswith('\\') and (not has_comment(line)):
+ parts.append(line[:-1])
+ line = (yield None) # Get another line
+ else:
+ parts.append(line)
+ break
+
+ # Output
+ line = ''.join(parts)
+
+# Utilities
+def _make_help_call(target, esc, lspace):
+ """Prepares a pinfo(2)/psearch call from a target name and the escape
+ (i.e. ? or ??)"""
+ method = 'pinfo2' if esc == '??' \
+ else 'psearch' if '*' in target \
+ else 'pinfo'
+ arg = " ".join([method, target])
+ #Prepare arguments for get_ipython().run_line_magic(magic_name, magic_args)
+ t_magic_name, _, t_magic_arg_s = arg.partition(' ')
+ t_magic_name = t_magic_name.lstrip(ESC_MAGIC)
+ return "%sget_ipython().run_line_magic(%r, %r)" % (
+ lspace,
+ t_magic_name,
+ t_magic_arg_s,
+ )
+
+
+# These define the transformations for the different escape characters.
+def _tr_system(line_info):
+ "Translate lines escaped with: !"
+ cmd = line_info.line.lstrip().lstrip(ESC_SHELL)
+ return '%sget_ipython().system(%r)' % (line_info.pre, cmd)
+
+def _tr_system2(line_info):
+ "Translate lines escaped with: !!"
+ cmd = line_info.line.lstrip()[2:]
+ return '%sget_ipython().getoutput(%r)' % (line_info.pre, cmd)
+
+def _tr_help(line_info):
+ "Translate lines escaped with: ?/??"
+ # A naked help line should just fire the intro help screen
+ if not line_info.line[1:]:
+ return 'get_ipython().show_usage()'
+
+ return _make_help_call(line_info.ifun, line_info.esc, line_info.pre)
+
+def _tr_magic(line_info):
+ "Translate lines escaped with: %"
+ tpl = '%sget_ipython().run_line_magic(%r, %r)'
+ if line_info.line.startswith(ESC_MAGIC2):
+ return line_info.line
+ cmd = ' '.join([line_info.ifun, line_info.the_rest]).strip()
+ #Prepare arguments for get_ipython().run_line_magic(magic_name, magic_args)
+ t_magic_name, _, t_magic_arg_s = cmd.partition(' ')
+ t_magic_name = t_magic_name.lstrip(ESC_MAGIC)
+ return tpl % (line_info.pre, t_magic_name, t_magic_arg_s)
+
+def _tr_quote(line_info):
+ "Translate lines escaped with: ,"
+ return '%s%s("%s")' % (line_info.pre, line_info.ifun,
+ '", "'.join(line_info.the_rest.split()) )
+
+def _tr_quote2(line_info):
+ "Translate lines escaped with: ;"
+ return '%s%s("%s")' % (line_info.pre, line_info.ifun,
+ line_info.the_rest)
+
+def _tr_paren(line_info):
+ "Translate lines escaped with: /"
+ return '%s%s(%s)' % (line_info.pre, line_info.ifun,
+ ", ".join(line_info.the_rest.split()))
+
+tr = { ESC_SHELL : _tr_system,
+ ESC_SH_CAP : _tr_system2,
+ ESC_HELP : _tr_help,
+ ESC_HELP2 : _tr_help,
+ ESC_MAGIC : _tr_magic,
+ ESC_QUOTE : _tr_quote,
+ ESC_QUOTE2 : _tr_quote2,
+ ESC_PAREN : _tr_paren }
+
+@StatelessInputTransformer.wrap
+def escaped_commands(line):
+ """Transform escaped commands - %magic, !system, ?help + various autocalls.
+ """
+ if not line or line.isspace():
+ return line
+ lineinf = LineInfo(line)
+ if lineinf.esc not in tr:
+ return line
+
+ return tr[lineinf.esc](lineinf)
+
+_initial_space_re = re.compile(r'\s*')
+
+_help_end_re = re.compile(r"""(%{0,2}
+ (?!\d)[\w*]+ # Variable name
+ (\.(?!\d)[\w*]+)* # .etc.etc
+ )
+ (\?\??)$ # ? or ??
+ """,
+ re.VERBOSE)
+
+# Extra pseudotokens for multiline strings and data structures
+_MULTILINE_STRING = object()
+_MULTILINE_STRUCTURE = object()
+
+def _line_tokens(line):
+ """Helper for has_comment and ends_in_comment_or_string."""
+ readline = StringIO(line).readline
+ toktypes = set()
+ try:
+ for t in generate_tokens(readline):
+ toktypes.add(t[0])
+ except TokenError as e:
+ # There are only two cases where a TokenError is raised.
+ if 'multi-line string' in e.args[0]:
+ toktypes.add(_MULTILINE_STRING)
+ else:
+ toktypes.add(_MULTILINE_STRUCTURE)
+ return toktypes
+
+def has_comment(src):
+ """Indicate whether an input line has (i.e. ends in, or is) a comment.
+
+ This uses tokenize, so it can distinguish comments from # inside strings.
+
+ Parameters
+ ----------
+ src : string
+ A single line input string.
+
+ Returns
+ -------
+ comment : bool
+ True if source has a comment.
+ """
+ return (tokenize.COMMENT in _line_tokens(src))
+
+def ends_in_comment_or_string(src):
+ """Indicates whether or not an input line ends in a comment or within
+ a multiline string.
+
+ Parameters
+ ----------
+ src : string
+ A single line input string.
+
+ Returns
+ -------
+ comment : bool
+ True if source ends in a comment or multiline string.
+ """
+ toktypes = _line_tokens(src)
+ return (tokenize.COMMENT in toktypes) or (_MULTILINE_STRING in toktypes)
+
+
+@StatelessInputTransformer.wrap
+def help_end(line):
+ """Translate lines with ?/?? at the end"""
+ m = _help_end_re.search(line)
+ if m is None or ends_in_comment_or_string(line):
+ return line
+ target = m.group(1)
+ esc = m.group(3)
+ lspace = _initial_space_re.match(line).group(0)
+
+ return _make_help_call(target, esc, lspace)
+
+
+@CoroutineInputTransformer.wrap
+def cellmagic(end_on_blank_line=False):
+ """Captures & transforms cell magics.
+
+ After a cell magic is started, this stores up any lines it gets until it is
+ reset (sent None).
+ """
+ tpl = 'get_ipython().run_cell_magic(%r, %r, %r)'
+ cellmagic_help_re = re.compile(r'%%\w+\?')
+ line = ''
+ while True:
+ line = (yield line)
+ # consume leading empty lines
+ while not line:
+ line = (yield line)
+
+ if not line.startswith(ESC_MAGIC2):
+ # This isn't a cell magic, idle waiting for reset then start over
+ while line is not None:
+ line = (yield line)
+ continue
+
+ if cellmagic_help_re.match(line):
+ # This case will be handled by help_end
+ continue
+
+ first = line
+ body = []
+ line = (yield None)
+ while (line is not None) and \
+ ((line.strip() != '') or not end_on_blank_line):
+ body.append(line)
+ line = (yield None)
+
+ # Output
+ magic_name, _, first = first.partition(' ')
+ magic_name = magic_name.lstrip(ESC_MAGIC2)
+ line = tpl % (magic_name, first, u'\n'.join(body))
+
+
+def _strip_prompts(prompt_re, initial_re=None, turnoff_re=None):
+ """Remove matching input prompts from a block of input.
+
+ Parameters
+ ----------
+ prompt_re : regular expression
+ A regular expression matching any input prompt (including continuation)
+ initial_re : regular expression, optional
+ A regular expression matching only the initial prompt, but not continuation.
+ If no initial expression is given, prompt_re will be used everywhere.
+ Used mainly for plain Python prompts, where the continuation prompt
+ ``...`` is a valid Python expression in Python 3, so shouldn't be stripped.
+
+ Notes
+ -----
+ If `initial_re` and `prompt_re differ`,
+ only `initial_re` will be tested against the first line.
+ If any prompt is found on the first two lines,
+ prompts will be stripped from the rest of the block.
+ """
+ if initial_re is None:
+ initial_re = prompt_re
+ line = ''
+ while True:
+ line = (yield line)
+
+ # First line of cell
+ if line is None:
+ continue
+ out, n1 = initial_re.subn('', line, count=1)
+ if turnoff_re and not n1:
+ if turnoff_re.match(line):
+ # We're in e.g. a cell magic; disable this transformer for
+ # the rest of the cell.
+ while line is not None:
+ line = (yield line)
+ continue
+
+ line = (yield out)
+
+ if line is None:
+ continue
+ # check for any prompt on the second line of the cell,
+ # because people often copy from just after the first prompt,
+ # so we might not see it in the first line.
+ out, n2 = prompt_re.subn('', line, count=1)
+ line = (yield out)
+
+ if n1 or n2:
+ # Found a prompt in the first two lines - check for it in
+ # the rest of the cell as well.
+ while line is not None:
+ line = (yield prompt_re.sub('', line, count=1))
+
+ else:
+ # Prompts not in input - wait for reset
+ while line is not None:
+ line = (yield line)
+
+@CoroutineInputTransformer.wrap
+def classic_prompt():
+ """Strip the >>>/... prompts of the Python interactive shell."""
+ # FIXME: non-capturing version (?:...) usable?
+ prompt_re = re.compile(r'^(>>>|\.\.\.)( |$)')
+ initial_re = re.compile(r'^>>>( |$)')
+ # Any %magic/!system is IPython syntax, so we needn't look for >>> prompts
+ turnoff_re = re.compile(r'^[%!]')
+ return _strip_prompts(prompt_re, initial_re, turnoff_re)
+
+@CoroutineInputTransformer.wrap
+def ipy_prompt():
+ """Strip IPython's In [1]:/...: prompts."""
+ # FIXME: non-capturing version (?:...) usable?
+ prompt_re = re.compile(r'^(In \[\d+\]: |\s*\.{3,}: ?)')
+ # Disable prompt stripping inside cell magics
+ turnoff_re = re.compile(r'^%%')
+ return _strip_prompts(prompt_re, turnoff_re=turnoff_re)
+
+
+@CoroutineInputTransformer.wrap
+def leading_indent():
+ """Remove leading indentation.
+
+ If the first line starts with a spaces or tabs, the same whitespace will be
+ removed from each following line until it is reset.
+ """
+ space_re = re.compile(r'^[ \t]+')
+ line = ''
+ while True:
+ line = (yield line)
+
+ if line is None:
+ continue
+
+ m = space_re.match(line)
+ if m:
+ space = m.group(0)
+ while line is not None:
+ if line.startswith(space):
+ line = line[len(space):]
+ line = (yield line)
+ else:
+ # No leading spaces - wait for reset
+ while line is not None:
+ line = (yield line)
+
+
+_assign_pat = \
+r'''(?P<lhs>(\s*)
+ ([\w\.]+) # Initial identifier
+ (\s*,\s*
+ \*?[\w\.]+)* # Further identifiers for unpacking
+ \s*?,? # Trailing comma
+ )
+ \s*=\s*
+'''
+
+assign_system_re = re.compile(r'{}!\s*(?P<cmd>.*)'.format(_assign_pat), re.VERBOSE)
+assign_system_template = '%s = get_ipython().getoutput(%r)'
+@StatelessInputTransformer.wrap
+def assign_from_system(line):
+ """Transform assignment from system commands (e.g. files = !ls)"""
+ m = assign_system_re.match(line)
+ if m is None:
+ return line
+
+ return assign_system_template % m.group('lhs', 'cmd')
+
+assign_magic_re = re.compile(r'{}%\s*(?P<cmd>.*)'.format(_assign_pat), re.VERBOSE)
+assign_magic_template = '%s = get_ipython().run_line_magic(%r, %r)'
+@StatelessInputTransformer.wrap
+def assign_from_magic(line):
+ """Transform assignment from magic commands (e.g. a = %who_ls)"""
+ m = assign_magic_re.match(line)
+ if m is None:
+ return line
+ #Prepare arguments for get_ipython().run_line_magic(magic_name, magic_args)
+ m_lhs, m_cmd = m.group('lhs', 'cmd')
+ t_magic_name, _, t_magic_arg_s = m_cmd.partition(' ')
+ t_magic_name = t_magic_name.lstrip(ESC_MAGIC)
+ return assign_magic_template % (m_lhs, t_magic_name, t_magic_arg_s)
diff --git a/contrib/python/ipython/py3/IPython/core/inputtransformer2.py b/contrib/python/ipython/py3/IPython/core/inputtransformer2.py
new file mode 100644
index 0000000000..37f0e7699c
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/inputtransformer2.py
@@ -0,0 +1,797 @@
+"""Input transformer machinery to support IPython special syntax.
+
+This includes the machinery to recognise and transform ``%magic`` commands,
+``!system`` commands, ``help?`` querying, prompt stripping, and so forth.
+
+Added: IPython 7.0. Replaces inputsplitter and inputtransformer which were
+deprecated in 7.0.
+"""
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+import ast
+from codeop import CommandCompiler, Compile
+import re
+import tokenize
+from typing import List, Tuple, Optional, Any
+import warnings
+
+_indent_re = re.compile(r'^[ \t]+')
+
+def leading_empty_lines(lines):
+ """Remove leading empty lines
+
+ If the leading lines are empty or contain only whitespace, they will be
+ removed.
+ """
+ if not lines:
+ return lines
+ for i, line in enumerate(lines):
+ if line and not line.isspace():
+ return lines[i:]
+ return lines
+
+def leading_indent(lines):
+ """Remove leading indentation.
+
+ If the first line starts with a spaces or tabs, the same whitespace will be
+ removed from each following line in the cell.
+ """
+ if not lines:
+ return lines
+ m = _indent_re.match(lines[0])
+ if not m:
+ return lines
+ space = m.group(0)
+ n = len(space)
+ return [l[n:] if l.startswith(space) else l
+ for l in lines]
+
+class PromptStripper:
+ """Remove matching input prompts from a block of input.
+
+ Parameters
+ ----------
+ prompt_re : regular expression
+ A regular expression matching any input prompt (including continuation,
+ e.g. ``...``)
+ initial_re : regular expression, optional
+ A regular expression matching only the initial prompt, but not continuation.
+ If no initial expression is given, prompt_re will be used everywhere.
+ Used mainly for plain Python prompts (``>>>``), where the continuation prompt
+ ``...`` is a valid Python expression in Python 3, so shouldn't be stripped.
+
+ Notes
+ -----
+
+ If initial_re and prompt_re differ,
+ only initial_re will be tested against the first line.
+ If any prompt is found on the first two lines,
+ prompts will be stripped from the rest of the block.
+ """
+ def __init__(self, prompt_re, initial_re=None):
+ self.prompt_re = prompt_re
+ self.initial_re = initial_re or prompt_re
+
+ def _strip(self, lines):
+ return [self.prompt_re.sub('', l, count=1) for l in lines]
+
+ def __call__(self, lines):
+ if not lines:
+ return lines
+ if self.initial_re.match(lines[0]) or \
+ (len(lines) > 1 and self.prompt_re.match(lines[1])):
+ return self._strip(lines)
+ return lines
+
+classic_prompt = PromptStripper(
+ prompt_re=re.compile(r'^(>>>|\.\.\.)( |$)'),
+ initial_re=re.compile(r'^>>>( |$)')
+)
+
+ipython_prompt = PromptStripper(
+ re.compile(
+ r"""
+ ^( # Match from the beginning of a line, either:
+
+ # 1. First-line prompt:
+ ((\[nav\]|\[ins\])?\ )? # Vi editing mode prompt, if it's there
+ In\ # The 'In' of the prompt, with a space
+ \[\d+\]: # Command index, as displayed in the prompt
+ \ # With a mandatory trailing space
+
+ | # ... or ...
+
+ # 2. The three dots of the multiline prompt
+ \s* # All leading whitespace characters
+ \.{3,}: # The three (or more) dots
+ \ ? # With an optional trailing space
+
+ )
+ """,
+ re.VERBOSE,
+ )
+)
+
+
+def cell_magic(lines):
+ if not lines or not lines[0].startswith('%%'):
+ return lines
+ if re.match(r'%%\w+\?', lines[0]):
+ # This case will be handled by help_end
+ return lines
+ magic_name, _, first_line = lines[0][2:].rstrip().partition(' ')
+ body = ''.join(lines[1:])
+ return ['get_ipython().run_cell_magic(%r, %r, %r)\n'
+ % (magic_name, first_line, body)]
+
+
+def _find_assign_op(token_line) -> Optional[int]:
+ """Get the index of the first assignment in the line ('=' not inside brackets)
+
+ Note: We don't try to support multiple special assignment (a = b = %foo)
+ """
+ paren_level = 0
+ for i, ti in enumerate(token_line):
+ s = ti.string
+ if s == '=' and paren_level == 0:
+ return i
+ if s in {'(','[','{'}:
+ paren_level += 1
+ elif s in {')', ']', '}'}:
+ if paren_level > 0:
+ paren_level -= 1
+ return None
+
+def find_end_of_continued_line(lines, start_line: int):
+ """Find the last line of a line explicitly extended using backslashes.
+
+ Uses 0-indexed line numbers.
+ """
+ end_line = start_line
+ while lines[end_line].endswith('\\\n'):
+ end_line += 1
+ if end_line >= len(lines):
+ break
+ return end_line
+
+def assemble_continued_line(lines, start: Tuple[int, int], end_line: int):
+ r"""Assemble a single line from multiple continued line pieces
+
+ Continued lines are lines ending in ``\``, and the line following the last
+ ``\`` in the block.
+
+ For example, this code continues over multiple lines::
+
+ if (assign_ix is not None) \
+ and (len(line) >= assign_ix + 2) \
+ and (line[assign_ix+1].string == '%') \
+ and (line[assign_ix+2].type == tokenize.NAME):
+
+ This statement contains four continued line pieces.
+ Assembling these pieces into a single line would give::
+
+ if (assign_ix is not None) and (len(line) >= assign_ix + 2) and (line[...
+
+ This uses 0-indexed line numbers. *start* is (lineno, colno).
+
+ Used to allow ``%magic`` and ``!system`` commands to be continued over
+ multiple lines.
+ """
+ parts = [lines[start[0]][start[1]:]] + lines[start[0]+1:end_line+1]
+ return ' '.join([p.rstrip()[:-1] for p in parts[:-1]] # Strip backslash+newline
+ + [parts[-1].rstrip()]) # Strip newline from last line
+
+class TokenTransformBase:
+ """Base class for transformations which examine tokens.
+
+ Special syntax should not be transformed when it occurs inside strings or
+ comments. This is hard to reliably avoid with regexes. The solution is to
+ tokenise the code as Python, and recognise the special syntax in the tokens.
+
+ IPython's special syntax is not valid Python syntax, so tokenising may go
+ wrong after the special syntax starts. These classes therefore find and
+ transform *one* instance of special syntax at a time into regular Python
+ syntax. After each transformation, tokens are regenerated to find the next
+ piece of special syntax.
+
+ Subclasses need to implement one class method (find)
+ and one regular method (transform).
+
+ The priority attribute can select which transformation to apply if multiple
+ transformers match in the same place. Lower numbers have higher priority.
+ This allows "%magic?" to be turned into a help call rather than a magic call.
+ """
+ # Lower numbers -> higher priority (for matches in the same location)
+ priority = 10
+
+ def sortby(self):
+ return self.start_line, self.start_col, self.priority
+
+ def __init__(self, start):
+ self.start_line = start[0] - 1 # Shift from 1-index to 0-index
+ self.start_col = start[1]
+
+ @classmethod
+ def find(cls, tokens_by_line):
+ """Find one instance of special syntax in the provided tokens.
+
+ Tokens are grouped into logical lines for convenience,
+ so it is easy to e.g. look at the first token of each line.
+ *tokens_by_line* is a list of lists of tokenize.TokenInfo objects.
+
+ This should return an instance of its class, pointing to the start
+ position it has found, or None if it found no match.
+ """
+ raise NotImplementedError
+
+ def transform(self, lines: List[str]):
+ """Transform one instance of special syntax found by ``find()``
+
+ Takes a list of strings representing physical lines,
+ returns a similar list of transformed lines.
+ """
+ raise NotImplementedError
+
+class MagicAssign(TokenTransformBase):
+ """Transformer for assignments from magics (a = %foo)"""
+ @classmethod
+ def find(cls, tokens_by_line):
+ """Find the first magic assignment (a = %foo) in the cell.
+ """
+ for line in tokens_by_line:
+ assign_ix = _find_assign_op(line)
+ if (assign_ix is not None) \
+ and (len(line) >= assign_ix + 2) \
+ and (line[assign_ix+1].string == '%') \
+ and (line[assign_ix+2].type == tokenize.NAME):
+ return cls(line[assign_ix+1].start)
+
+ def transform(self, lines: List[str]):
+ """Transform a magic assignment found by the ``find()`` classmethod.
+ """
+ start_line, start_col = self.start_line, self.start_col
+ lhs = lines[start_line][:start_col]
+ end_line = find_end_of_continued_line(lines, start_line)
+ rhs = assemble_continued_line(lines, (start_line, start_col), end_line)
+ assert rhs.startswith('%'), rhs
+ magic_name, _, args = rhs[1:].partition(' ')
+
+ lines_before = lines[:start_line]
+ call = "get_ipython().run_line_magic({!r}, {!r})".format(magic_name, args)
+ new_line = lhs + call + '\n'
+ lines_after = lines[end_line+1:]
+
+ return lines_before + [new_line] + lines_after
+
+
+class SystemAssign(TokenTransformBase):
+ """Transformer for assignments from system commands (a = !foo)"""
+ @classmethod
+ def find(cls, tokens_by_line):
+ """Find the first system assignment (a = !foo) in the cell.
+ """
+ for line in tokens_by_line:
+ assign_ix = _find_assign_op(line)
+ if (assign_ix is not None) \
+ and not line[assign_ix].line.strip().startswith('=') \
+ and (len(line) >= assign_ix + 2) \
+ and (line[assign_ix + 1].type == tokenize.ERRORTOKEN):
+ ix = assign_ix + 1
+
+ while ix < len(line) and line[ix].type == tokenize.ERRORTOKEN:
+ if line[ix].string == '!':
+ return cls(line[ix].start)
+ elif not line[ix].string.isspace():
+ break
+ ix += 1
+
+ def transform(self, lines: List[str]):
+ """Transform a system assignment found by the ``find()`` classmethod.
+ """
+ start_line, start_col = self.start_line, self.start_col
+
+ lhs = lines[start_line][:start_col]
+ end_line = find_end_of_continued_line(lines, start_line)
+ rhs = assemble_continued_line(lines, (start_line, start_col), end_line)
+ assert rhs.startswith('!'), rhs
+ cmd = rhs[1:]
+
+ lines_before = lines[:start_line]
+ call = "get_ipython().getoutput({!r})".format(cmd)
+ new_line = lhs + call + '\n'
+ lines_after = lines[end_line + 1:]
+
+ return lines_before + [new_line] + lines_after
+
+# The escape sequences that define the syntax transformations IPython will
+# apply to user input. These can NOT be just changed here: many regular
+# expressions and other parts of the code may use their hardcoded values, and
+# for all intents and purposes they constitute the 'IPython syntax', so they
+# should be considered fixed.
+
+ESC_SHELL = '!' # Send line to underlying system shell
+ESC_SH_CAP = '!!' # Send line to system shell and capture output
+ESC_HELP = '?' # Find information about object
+ESC_HELP2 = '??' # Find extra-detailed information about object
+ESC_MAGIC = '%' # Call magic function
+ESC_MAGIC2 = '%%' # Call cell-magic function
+ESC_QUOTE = ',' # Split args on whitespace, quote each as string and call
+ESC_QUOTE2 = ';' # Quote all args as a single string, call
+ESC_PAREN = '/' # Call first argument with rest of line as arguments
+
+ESCAPE_SINGLES = {'!', '?', '%', ',', ';', '/'}
+ESCAPE_DOUBLES = {'!!', '??'} # %% (cell magic) is handled separately
+
+def _make_help_call(target, esc):
+ """Prepares a pinfo(2)/psearch call from a target name and the escape
+ (i.e. ? or ??)"""
+ method = 'pinfo2' if esc == '??' \
+ else 'psearch' if '*' in target \
+ else 'pinfo'
+ arg = " ".join([method, target])
+ #Prepare arguments for get_ipython().run_line_magic(magic_name, magic_args)
+ t_magic_name, _, t_magic_arg_s = arg.partition(' ')
+ t_magic_name = t_magic_name.lstrip(ESC_MAGIC)
+ return "get_ipython().run_line_magic(%r, %r)" % (t_magic_name, t_magic_arg_s)
+
+
+def _tr_help(content):
+ """Translate lines escaped with: ?
+
+ A naked help line should fire the intro help screen (shell.show_usage())
+ """
+ if not content:
+ return 'get_ipython().show_usage()'
+
+ return _make_help_call(content, '?')
+
+def _tr_help2(content):
+ """Translate lines escaped with: ??
+
+ A naked help line should fire the intro help screen (shell.show_usage())
+ """
+ if not content:
+ return 'get_ipython().show_usage()'
+
+ return _make_help_call(content, '??')
+
+def _tr_magic(content):
+ "Translate lines escaped with a percent sign: %"
+ name, _, args = content.partition(' ')
+ return 'get_ipython().run_line_magic(%r, %r)' % (name, args)
+
+def _tr_quote(content):
+ "Translate lines escaped with a comma: ,"
+ name, _, args = content.partition(' ')
+ return '%s("%s")' % (name, '", "'.join(args.split()) )
+
+def _tr_quote2(content):
+ "Translate lines escaped with a semicolon: ;"
+ name, _, args = content.partition(' ')
+ return '%s("%s")' % (name, args)
+
+def _tr_paren(content):
+ "Translate lines escaped with a slash: /"
+ name, _, args = content.partition(' ')
+ return '%s(%s)' % (name, ", ".join(args.split()))
+
+tr = { ESC_SHELL : 'get_ipython().system({!r})'.format,
+ ESC_SH_CAP : 'get_ipython().getoutput({!r})'.format,
+ ESC_HELP : _tr_help,
+ ESC_HELP2 : _tr_help2,
+ ESC_MAGIC : _tr_magic,
+ ESC_QUOTE : _tr_quote,
+ ESC_QUOTE2 : _tr_quote2,
+ ESC_PAREN : _tr_paren }
+
+class EscapedCommand(TokenTransformBase):
+ """Transformer for escaped commands like %foo, !foo, or /foo"""
+ @classmethod
+ def find(cls, tokens_by_line):
+ """Find the first escaped command (%foo, !foo, etc.) in the cell.
+ """
+ for line in tokens_by_line:
+ if not line:
+ continue
+ ix = 0
+ ll = len(line)
+ while ll > ix and line[ix].type in {tokenize.INDENT, tokenize.DEDENT}:
+ ix += 1
+ if ix >= ll:
+ continue
+ if line[ix].string in ESCAPE_SINGLES:
+ return cls(line[ix].start)
+
+ def transform(self, lines):
+ """Transform an escaped line found by the ``find()`` classmethod.
+ """
+ start_line, start_col = self.start_line, self.start_col
+
+ indent = lines[start_line][:start_col]
+ end_line = find_end_of_continued_line(lines, start_line)
+ line = assemble_continued_line(lines, (start_line, start_col), end_line)
+
+ if len(line) > 1 and line[:2] in ESCAPE_DOUBLES:
+ escape, content = line[:2], line[2:]
+ else:
+ escape, content = line[:1], line[1:]
+
+ if escape in tr:
+ call = tr[escape](content)
+ else:
+ call = ''
+
+ lines_before = lines[:start_line]
+ new_line = indent + call + '\n'
+ lines_after = lines[end_line + 1:]
+
+ return lines_before + [new_line] + lines_after
+
+
+_help_end_re = re.compile(
+ r"""(%{0,2}
+ (?!\d)[\w*]+ # Variable name
+ (\.(?!\d)[\w*]+|\[-?[0-9]+\])* # .etc.etc or [0], we only support literal integers.
+ )
+ (\?\??)$ # ? or ??
+ """,
+ re.VERBOSE,
+)
+
+
+class HelpEnd(TokenTransformBase):
+ """Transformer for help syntax: obj? and obj??"""
+ # This needs to be higher priority (lower number) than EscapedCommand so
+ # that inspecting magics (%foo?) works.
+ priority = 5
+
+ def __init__(self, start, q_locn):
+ super().__init__(start)
+ self.q_line = q_locn[0] - 1 # Shift from 1-indexed to 0-indexed
+ self.q_col = q_locn[1]
+
+ @classmethod
+ def find(cls, tokens_by_line):
+ """Find the first help command (foo?) in the cell.
+ """
+ for line in tokens_by_line:
+ # Last token is NEWLINE; look at last but one
+ if len(line) > 2 and line[-2].string == '?':
+ # Find the first token that's not INDENT/DEDENT
+ ix = 0
+ while line[ix].type in {tokenize.INDENT, tokenize.DEDENT}:
+ ix += 1
+ return cls(line[ix].start, line[-2].start)
+
+ def transform(self, lines):
+ """Transform a help command found by the ``find()`` classmethod.
+ """
+
+ piece = "".join(lines[self.start_line : self.q_line + 1])
+ indent, content = piece[: self.start_col], piece[self.start_col :]
+ lines_before = lines[: self.start_line]
+ lines_after = lines[self.q_line + 1 :]
+
+ m = _help_end_re.search(content)
+ if not m:
+ raise SyntaxError(content)
+ assert m is not None, content
+ target = m.group(1)
+ esc = m.group(3)
+
+
+ call = _make_help_call(target, esc)
+ new_line = indent + call + '\n'
+
+ return lines_before + [new_line] + lines_after
+
+def make_tokens_by_line(lines:List[str]):
+ """Tokenize a series of lines and group tokens by line.
+
+ The tokens for a multiline Python string or expression are grouped as one
+ line. All lines except the last lines should keep their line ending ('\\n',
+ '\\r\\n') for this to properly work. Use `.splitlines(keeplineending=True)`
+ for example when passing block of text to this function.
+
+ """
+ # NL tokens are used inside multiline expressions, but also after blank
+ # lines or comments. This is intentional - see https://bugs.python.org/issue17061
+ # We want to group the former case together but split the latter, so we
+ # track parentheses level, similar to the internals of tokenize.
+
+ # reexported from token on 3.7+
+ NEWLINE, NL = tokenize.NEWLINE, tokenize.NL # type: ignore
+ tokens_by_line: List[List[Any]] = [[]]
+ if len(lines) > 1 and not lines[0].endswith(("\n", "\r", "\r\n", "\x0b", "\x0c")):
+ warnings.warn(
+ "`make_tokens_by_line` received a list of lines which do not have lineending markers ('\\n', '\\r', '\\r\\n', '\\x0b', '\\x0c'), behavior will be unspecified",
+ stacklevel=2,
+ )
+ parenlev = 0
+ try:
+ for token in tokenize.generate_tokens(iter(lines).__next__):
+ tokens_by_line[-1].append(token)
+ if (token.type == NEWLINE) \
+ or ((token.type == NL) and (parenlev <= 0)):
+ tokens_by_line.append([])
+ elif token.string in {'(', '[', '{'}:
+ parenlev += 1
+ elif token.string in {')', ']', '}'}:
+ if parenlev > 0:
+ parenlev -= 1
+ except tokenize.TokenError:
+ # Input ended in a multiline string or expression. That's OK for us.
+ pass
+
+
+ if not tokens_by_line[-1]:
+ tokens_by_line.pop()
+
+
+ return tokens_by_line
+
+
+def has_sunken_brackets(tokens: List[tokenize.TokenInfo]):
+ """Check if the depth of brackets in the list of tokens drops below 0"""
+ parenlev = 0
+ for token in tokens:
+ if token.string in {"(", "[", "{"}:
+ parenlev += 1
+ elif token.string in {")", "]", "}"}:
+ parenlev -= 1
+ if parenlev < 0:
+ return True
+ return False
+
+
+def show_linewise_tokens(s: str):
+ """For investigation and debugging"""
+ warnings.warn(
+ "show_linewise_tokens is deprecated since IPython 8.6",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ if not s.endswith("\n"):
+ s += "\n"
+ lines = s.splitlines(keepends=True)
+ for line in make_tokens_by_line(lines):
+ print("Line -------")
+ for tokinfo in line:
+ print(" ", tokinfo)
+
+# Arbitrary limit to prevent getting stuck in infinite loops
+TRANSFORM_LOOP_LIMIT = 500
+
+class TransformerManager:
+ """Applies various transformations to a cell or code block.
+
+ The key methods for external use are ``transform_cell()``
+ and ``check_complete()``.
+ """
+ def __init__(self):
+ self.cleanup_transforms = [
+ leading_empty_lines,
+ leading_indent,
+ classic_prompt,
+ ipython_prompt,
+ ]
+ self.line_transforms = [
+ cell_magic,
+ ]
+ self.token_transformers = [
+ MagicAssign,
+ SystemAssign,
+ EscapedCommand,
+ HelpEnd,
+ ]
+
+ def do_one_token_transform(self, lines):
+ """Find and run the transform earliest in the code.
+
+ Returns (changed, lines).
+
+ This method is called repeatedly until changed is False, indicating
+ that all available transformations are complete.
+
+ The tokens following IPython special syntax might not be valid, so
+ the transformed code is retokenised every time to identify the next
+ piece of special syntax. Hopefully long code cells are mostly valid
+ Python, not using lots of IPython special syntax, so this shouldn't be
+ a performance issue.
+ """
+ tokens_by_line = make_tokens_by_line(lines)
+ candidates = []
+ for transformer_cls in self.token_transformers:
+ transformer = transformer_cls.find(tokens_by_line)
+ if transformer:
+ candidates.append(transformer)
+
+ if not candidates:
+ # Nothing to transform
+ return False, lines
+ ordered_transformers = sorted(candidates, key=TokenTransformBase.sortby)
+ for transformer in ordered_transformers:
+ try:
+ return True, transformer.transform(lines)
+ except SyntaxError:
+ pass
+ return False, lines
+
+ def do_token_transforms(self, lines):
+ for _ in range(TRANSFORM_LOOP_LIMIT):
+ changed, lines = self.do_one_token_transform(lines)
+ if not changed:
+ return lines
+
+ raise RuntimeError("Input transformation still changing after "
+ "%d iterations. Aborting." % TRANSFORM_LOOP_LIMIT)
+
+ def transform_cell(self, cell: str) -> str:
+ """Transforms a cell of input code"""
+ if not cell.endswith('\n'):
+ cell += '\n' # Ensure the cell has a trailing newline
+ lines = cell.splitlines(keepends=True)
+ for transform in self.cleanup_transforms + self.line_transforms:
+ lines = transform(lines)
+
+ lines = self.do_token_transforms(lines)
+ return ''.join(lines)
+
+ def check_complete(self, cell: str):
+ """Return whether a block of code is ready to execute, or should be continued
+
+ Parameters
+ ----------
+ cell : string
+ Python input code, which can be multiline.
+
+ Returns
+ -------
+ status : str
+ One of 'complete', 'incomplete', or 'invalid' if source is not a
+ prefix of valid code.
+ indent_spaces : int or None
+ The number of spaces by which to indent the next line of code. If
+ status is not 'incomplete', this is None.
+ """
+ # Remember if the lines ends in a new line.
+ ends_with_newline = False
+ for character in reversed(cell):
+ if character == '\n':
+ ends_with_newline = True
+ break
+ elif character.strip():
+ break
+ else:
+ continue
+
+ if not ends_with_newline:
+ # Append an newline for consistent tokenization
+ # See https://bugs.python.org/issue33899
+ cell += '\n'
+
+ lines = cell.splitlines(keepends=True)
+
+ if not lines:
+ return 'complete', None
+
+ if lines[-1].endswith('\\'):
+ # Explicit backslash continuation
+ return 'incomplete', find_last_indent(lines)
+
+ try:
+ for transform in self.cleanup_transforms:
+ if not getattr(transform, 'has_side_effects', False):
+ lines = transform(lines)
+ except SyntaxError:
+ return 'invalid', None
+
+ if lines[0].startswith('%%'):
+ # Special case for cell magics - completion marked by blank line
+ if lines[-1].strip():
+ return 'incomplete', find_last_indent(lines)
+ else:
+ return 'complete', None
+
+ try:
+ for transform in self.line_transforms:
+ if not getattr(transform, 'has_side_effects', False):
+ lines = transform(lines)
+ lines = self.do_token_transforms(lines)
+ except SyntaxError:
+ return 'invalid', None
+
+ tokens_by_line = make_tokens_by_line(lines)
+
+ # Bail if we got one line and there are more closing parentheses than
+ # the opening ones
+ if (
+ len(lines) == 1
+ and tokens_by_line
+ and has_sunken_brackets(tokens_by_line[0])
+ ):
+ return "invalid", None
+
+ if not tokens_by_line:
+ return 'incomplete', find_last_indent(lines)
+
+ if tokens_by_line[-1][-1].type != tokenize.ENDMARKER:
+ # We're in a multiline string or expression
+ return 'incomplete', find_last_indent(lines)
+
+ newline_types = {tokenize.NEWLINE, tokenize.COMMENT, tokenize.ENDMARKER} # type: ignore
+
+ # Pop the last line which only contains DEDENTs and ENDMARKER
+ last_token_line = None
+ if {t.type for t in tokens_by_line[-1]} in [
+ {tokenize.DEDENT, tokenize.ENDMARKER},
+ {tokenize.ENDMARKER}
+ ] and len(tokens_by_line) > 1:
+ last_token_line = tokens_by_line.pop()
+
+ while tokens_by_line[-1] and tokens_by_line[-1][-1].type in newline_types:
+ tokens_by_line[-1].pop()
+
+ if not tokens_by_line[-1]:
+ return 'incomplete', find_last_indent(lines)
+
+ if tokens_by_line[-1][-1].string == ':':
+ # The last line starts a block (e.g. 'if foo:')
+ ix = 0
+ while tokens_by_line[-1][ix].type in {tokenize.INDENT, tokenize.DEDENT}:
+ ix += 1
+
+ indent = tokens_by_line[-1][ix].start[1]
+ return 'incomplete', indent + 4
+
+ if tokens_by_line[-1][0].line.endswith('\\'):
+ return 'incomplete', None
+
+ # At this point, our checks think the code is complete (or invalid).
+ # We'll use codeop.compile_command to check this with the real parser
+ try:
+ with warnings.catch_warnings():
+ warnings.simplefilter('error', SyntaxWarning)
+ res = compile_command(''.join(lines), symbol='exec')
+ except (SyntaxError, OverflowError, ValueError, TypeError,
+ MemoryError, SyntaxWarning):
+ return 'invalid', None
+ else:
+ if res is None:
+ return 'incomplete', find_last_indent(lines)
+
+ if last_token_line and last_token_line[0].type == tokenize.DEDENT:
+ if ends_with_newline:
+ return 'complete', None
+ return 'incomplete', find_last_indent(lines)
+
+ # If there's a blank line at the end, assume we're ready to execute
+ if not lines[-1].strip():
+ return 'complete', None
+
+ return 'complete', None
+
+
+def find_last_indent(lines):
+ m = _indent_re.match(lines[-1])
+ if not m:
+ return 0
+ return len(m.group(0).replace('\t', ' '*4))
+
+
+class MaybeAsyncCompile(Compile):
+ def __init__(self, extra_flags=0):
+ super().__init__()
+ self.flags |= extra_flags
+
+
+class MaybeAsyncCommandCompiler(CommandCompiler):
+ def __init__(self, extra_flags=0):
+ self.compiler = MaybeAsyncCompile(extra_flags=extra_flags)
+
+
+_extra_flags = ast.PyCF_ALLOW_TOP_LEVEL_AWAIT
+
+compile_command = MaybeAsyncCommandCompiler(extra_flags=_extra_flags)
diff --git a/contrib/python/ipython/py3/IPython/core/interactiveshell.py b/contrib/python/ipython/py3/IPython/core/interactiveshell.py
new file mode 100644
index 0000000000..7392de7c02
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/interactiveshell.py
@@ -0,0 +1,3910 @@
+# -*- coding: utf-8 -*-
+"""Main IPython class."""
+
+#-----------------------------------------------------------------------------
+# Copyright (C) 2001 Janko Hauser <jhauser@zscout.de>
+# Copyright (C) 2001-2007 Fernando Perez. <fperez@colorado.edu>
+# Copyright (C) 2008-2011 The IPython Development Team
+#
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#-----------------------------------------------------------------------------
+
+
+import abc
+import ast
+import atexit
+import bdb
+import builtins as builtin_mod
+import functools
+import inspect
+import os
+import re
+import runpy
+import subprocess
+import sys
+import tempfile
+import traceback
+import types
+import warnings
+from ast import stmt
+from io import open as io_open
+from logging import error
+from pathlib import Path
+from typing import Callable
+from typing import List as ListType, Dict as DictType, Any as AnyType
+from typing import Optional, Sequence, Tuple
+from warnings import warn
+
+from pickleshare import PickleShareDB
+from tempfile import TemporaryDirectory
+from traitlets import (
+ Any,
+ Bool,
+ CaselessStrEnum,
+ Dict,
+ Enum,
+ Instance,
+ Integer,
+ List,
+ Type,
+ Unicode,
+ default,
+ observe,
+ validate,
+)
+from traitlets.config.configurable import SingletonConfigurable
+from traitlets.utils.importstring import import_item
+
+import IPython.core.hooks
+from IPython.core import magic, oinspect, page, prefilter, ultratb
+from IPython.core.alias import Alias, AliasManager
+from IPython.core.autocall import ExitAutocall
+from IPython.core.builtin_trap import BuiltinTrap
+from IPython.core.compilerop import CachingCompiler
+from IPython.core.debugger import InterruptiblePdb
+from IPython.core.display_trap import DisplayTrap
+from IPython.core.displayhook import DisplayHook
+from IPython.core.displaypub import DisplayPublisher
+from IPython.core.error import InputRejected, UsageError
+from IPython.core.events import EventManager, available_events
+from IPython.core.extensions import ExtensionManager
+from IPython.core.formatters import DisplayFormatter
+from IPython.core.history import HistoryManager
+from IPython.core.inputtransformer2 import ESC_MAGIC, ESC_MAGIC2
+from IPython.core.logger import Logger
+from IPython.core.macro import Macro
+from IPython.core.payload import PayloadManager
+from IPython.core.prefilter import PrefilterManager
+from IPython.core.profiledir import ProfileDir
+from IPython.core.usage import default_banner
+from IPython.display import display
+from IPython.paths import get_ipython_dir
+from IPython.testing.skipdoctest import skip_doctest
+from IPython.utils import PyColorize, io, openpy, py3compat
+from IPython.utils.decorators import undoc
+from IPython.utils.io import ask_yes_no
+from IPython.utils.ipstruct import Struct
+from IPython.utils.path import ensure_dir_exists, get_home_dir, get_py_filename
+from IPython.utils.process import getoutput, system
+from IPython.utils.strdispatch import StrDispatch
+from IPython.utils.syspathcontext import prepended_to_syspath
+from IPython.utils.text import DollarFormatter, LSString, SList, format_screen
+from IPython.core.oinspect import OInfo
+
+
+sphinxify: Optional[Callable]
+
+try:
+ import docrepr.sphinxify as sphx
+
+ def sphinxify(oinfo):
+ wrapped_docstring = sphx.wrap_main_docstring(oinfo)
+
+ def sphinxify_docstring(docstring):
+ with TemporaryDirectory() as dirname:
+ return {
+ "text/html": sphx.sphinxify(wrapped_docstring, dirname),
+ "text/plain": docstring,
+ }
+
+ return sphinxify_docstring
+except ImportError:
+ sphinxify = None
+
+
+class ProvisionalWarning(DeprecationWarning):
+ """
+ Warning class for unstable features
+ """
+ pass
+
+from ast import Module
+
+_assign_nodes = (ast.AugAssign, ast.AnnAssign, ast.Assign)
+_single_targets_nodes = (ast.AugAssign, ast.AnnAssign)
+
+#-----------------------------------------------------------------------------
+# Await Helpers
+#-----------------------------------------------------------------------------
+
+# we still need to run things using the asyncio eventloop, but there is no
+# async integration
+from .async_helpers import (
+ _asyncio_runner,
+ _curio_runner,
+ _pseudo_sync_runner,
+ _should_be_async,
+ _trio_runner,
+)
+
+#-----------------------------------------------------------------------------
+# Globals
+#-----------------------------------------------------------------------------
+
+# compiled regexps for autoindent management
+dedent_re = re.compile(r'^\s+raise|^\s+return|^\s+pass')
+
+#-----------------------------------------------------------------------------
+# Utilities
+#-----------------------------------------------------------------------------
+
+
+def is_integer_string(s: str):
+ """
+ Variant of "str.isnumeric()" that allow negative values and other ints.
+ """
+ try:
+ int(s)
+ return True
+ except ValueError:
+ return False
+ raise ValueError("Unexpected error")
+
+
+@undoc
+def softspace(file, newvalue):
+ """Copied from code.py, to remove the dependency"""
+
+ oldvalue = 0
+ try:
+ oldvalue = file.softspace
+ except AttributeError:
+ pass
+ try:
+ file.softspace = newvalue
+ except (AttributeError, TypeError):
+ # "attribute-less object" or "read-only attributes"
+ pass
+ return oldvalue
+
+@undoc
+def no_op(*a, **kw):
+ pass
+
+
+class SpaceInInput(Exception): pass
+
+
+class SeparateUnicode(Unicode):
+ r"""A Unicode subclass to validate separate_in, separate_out, etc.
+
+ This is a Unicode based trait that converts '0'->'' and ``'\\n'->'\n'``.
+ """
+
+ def validate(self, obj, value):
+ if value == '0': value = ''
+ value = value.replace('\\n','\n')
+ return super(SeparateUnicode, self).validate(obj, value)
+
+
+@undoc
+class DummyMod(object):
+ """A dummy module used for IPython's interactive module when
+ a namespace must be assigned to the module's __dict__."""
+ __spec__ = None
+
+
+class ExecutionInfo(object):
+ """The arguments used for a call to :meth:`InteractiveShell.run_cell`
+
+ Stores information about what is going to happen.
+ """
+ raw_cell = None
+ store_history = False
+ silent = False
+ shell_futures = True
+ cell_id = None
+
+ def __init__(self, raw_cell, store_history, silent, shell_futures, cell_id):
+ self.raw_cell = raw_cell
+ self.store_history = store_history
+ self.silent = silent
+ self.shell_futures = shell_futures
+ self.cell_id = cell_id
+
+ def __repr__(self):
+ name = self.__class__.__qualname__
+ raw_cell = (
+ (self.raw_cell[:50] + "..") if len(self.raw_cell) > 50 else self.raw_cell
+ )
+ return (
+ '<%s object at %x, raw_cell="%s" store_history=%s silent=%s shell_futures=%s cell_id=%s>'
+ % (
+ name,
+ id(self),
+ raw_cell,
+ self.store_history,
+ self.silent,
+ self.shell_futures,
+ self.cell_id,
+ )
+ )
+
+
+class ExecutionResult(object):
+ """The result of a call to :meth:`InteractiveShell.run_cell`
+
+ Stores information about what took place.
+ """
+ execution_count = None
+ error_before_exec = None
+ error_in_exec: Optional[BaseException] = None
+ info = None
+ result = None
+
+ def __init__(self, info):
+ self.info = info
+
+ @property
+ def success(self):
+ return (self.error_before_exec is None) and (self.error_in_exec is None)
+
+ def raise_error(self):
+ """Reraises error if `success` is `False`, otherwise does nothing"""
+ if self.error_before_exec is not None:
+ raise self.error_before_exec
+ if self.error_in_exec is not None:
+ raise self.error_in_exec
+
+ def __repr__(self):
+ name = self.__class__.__qualname__
+ return '<%s object at %x, execution_count=%s error_before_exec=%s error_in_exec=%s info=%s result=%s>' %\
+ (name, id(self), self.execution_count, self.error_before_exec, self.error_in_exec, repr(self.info), repr(self.result))
+
+@functools.wraps(io_open)
+def _modified_open(file, *args, **kwargs):
+ if file in {0, 1, 2}:
+ raise ValueError(
+ f"IPython won't let you open fd={file} by default "
+ "as it is likely to crash IPython. If you know what you are doing, "
+ "you can use builtins' open."
+ )
+
+ return io_open(file, *args, **kwargs)
+
+class InteractiveShell(SingletonConfigurable):
+ """An enhanced, interactive shell for Python."""
+
+ _instance = None
+
+ ast_transformers = List([], help=
+ """
+ A list of ast.NodeTransformer subclass instances, which will be applied
+ to user input before code is run.
+ """
+ ).tag(config=True)
+
+ autocall = Enum((0,1,2), default_value=0, help=
+ """
+ Make IPython automatically call any callable object even if you didn't
+ type explicit parentheses. For example, 'str 43' becomes 'str(43)'
+ automatically. The value can be '0' to disable the feature, '1' for
+ 'smart' autocall, where it is not applied if there are no more
+ arguments on the line, and '2' for 'full' autocall, where all callable
+ objects are automatically called (even if no arguments are present).
+ """
+ ).tag(config=True)
+
+ autoindent = Bool(True, help=
+ """
+ Autoindent IPython code entered interactively.
+ """
+ ).tag(config=True)
+
+ autoawait = Bool(True, help=
+ """
+ Automatically run await statement in the top level repl.
+ """
+ ).tag(config=True)
+
+ loop_runner_map ={
+ 'asyncio':(_asyncio_runner, True),
+ 'curio':(_curio_runner, True),
+ 'trio':(_trio_runner, True),
+ 'sync': (_pseudo_sync_runner, False)
+ }
+
+ loop_runner = Any(default_value="IPython.core.interactiveshell._asyncio_runner",
+ allow_none=True,
+ help="""Select the loop runner that will be used to execute top-level asynchronous code"""
+ ).tag(config=True)
+
+ @default('loop_runner')
+ def _default_loop_runner(self):
+ return import_item("IPython.core.interactiveshell._asyncio_runner")
+
+ @validate('loop_runner')
+ def _import_runner(self, proposal):
+ if isinstance(proposal.value, str):
+ if proposal.value in self.loop_runner_map:
+ runner, autoawait = self.loop_runner_map[proposal.value]
+ self.autoawait = autoawait
+ return runner
+ runner = import_item(proposal.value)
+ if not callable(runner):
+ raise ValueError('loop_runner must be callable')
+ return runner
+ if not callable(proposal.value):
+ raise ValueError('loop_runner must be callable')
+ return proposal.value
+
+ automagic = Bool(True, help=
+ """
+ Enable magic commands to be called without the leading %.
+ """
+ ).tag(config=True)
+
+ banner1 = Unicode(default_banner,
+ help="""The part of the banner to be printed before the profile"""
+ ).tag(config=True)
+ banner2 = Unicode('',
+ help="""The part of the banner to be printed after the profile"""
+ ).tag(config=True)
+
+ cache_size = Integer(1000, help=
+ """
+ Set the size of the output cache. The default is 1000, you can
+ change it permanently in your config file. Setting it to 0 completely
+ disables the caching system, and the minimum value accepted is 3 (if
+ you provide a value less than 3, it is reset to 0 and a warning is
+ issued). This limit is defined because otherwise you'll spend more
+ time re-flushing a too small cache than working
+ """
+ ).tag(config=True)
+ color_info = Bool(True, help=
+ """
+ Use colors for displaying information about objects. Because this
+ information is passed through a pager (like 'less'), and some pagers
+ get confused with color codes, this capability can be turned off.
+ """
+ ).tag(config=True)
+ colors = CaselessStrEnum(('Neutral', 'NoColor','LightBG','Linux'),
+ default_value='Neutral',
+ help="Set the color scheme (NoColor, Neutral, Linux, or LightBG)."
+ ).tag(config=True)
+ debug = Bool(False).tag(config=True)
+ disable_failing_post_execute = Bool(False,
+ help="Don't call post-execute functions that have failed in the past."
+ ).tag(config=True)
+ display_formatter = Instance(DisplayFormatter, allow_none=True)
+ displayhook_class = Type(DisplayHook)
+ display_pub_class = Type(DisplayPublisher)
+ compiler_class = Type(CachingCompiler)
+ inspector_class = Type(
+ oinspect.Inspector, help="Class to use to instantiate the shell inspector"
+ ).tag(config=True)
+
+ sphinxify_docstring = Bool(False, help=
+ """
+ Enables rich html representation of docstrings. (This requires the
+ docrepr module).
+ """).tag(config=True)
+
+ @observe("sphinxify_docstring")
+ def _sphinxify_docstring_changed(self, change):
+ if change['new']:
+ warn("`sphinxify_docstring` is provisional since IPython 5.0 and might change in future versions." , ProvisionalWarning)
+
+ enable_html_pager = Bool(False, help=
+ """
+ (Provisional API) enables html representation in mime bundles sent
+ to pagers.
+ """).tag(config=True)
+
+ @observe("enable_html_pager")
+ def _enable_html_pager_changed(self, change):
+ if change['new']:
+ warn("`enable_html_pager` is provisional since IPython 5.0 and might change in future versions.", ProvisionalWarning)
+
+ data_pub_class = None
+
+ exit_now = Bool(False)
+ exiter = Instance(ExitAutocall)
+ @default('exiter')
+ def _exiter_default(self):
+ return ExitAutocall(self)
+ # Monotonically increasing execution counter
+ execution_count = Integer(1)
+ filename = Unicode("<ipython console>")
+ ipython_dir= Unicode('').tag(config=True) # Set to get_ipython_dir() in __init__
+
+ # Used to transform cells before running them, and check whether code is complete
+ input_transformer_manager = Instance('IPython.core.inputtransformer2.TransformerManager',
+ ())
+
+ @property
+ def input_transformers_cleanup(self):
+ return self.input_transformer_manager.cleanup_transforms
+
+ input_transformers_post = List([],
+ help="A list of string input transformers, to be applied after IPython's "
+ "own input transformations."
+ )
+
+ @property
+ def input_splitter(self):
+ """Make this available for backward compatibility (pre-7.0 release) with existing code.
+
+ For example, ipykernel ipykernel currently uses
+ `shell.input_splitter.check_complete`
+ """
+ from warnings import warn
+ warn("`input_splitter` is deprecated since IPython 7.0, prefer `input_transformer_manager`.",
+ DeprecationWarning, stacklevel=2
+ )
+ return self.input_transformer_manager
+
+ logstart = Bool(False, help=
+ """
+ Start logging to the default log file in overwrite mode.
+ Use `logappend` to specify a log file to **append** logs to.
+ """
+ ).tag(config=True)
+ logfile = Unicode('', help=
+ """
+ The name of the logfile to use.
+ """
+ ).tag(config=True)
+ logappend = Unicode('', help=
+ """
+ Start logging to the given file in append mode.
+ Use `logfile` to specify a log file to **overwrite** logs to.
+ """
+ ).tag(config=True)
+ object_info_string_level = Enum((0,1,2), default_value=0,
+ ).tag(config=True)
+ pdb = Bool(False, help=
+ """
+ Automatically call the pdb debugger after every exception.
+ """
+ ).tag(config=True)
+ display_page = Bool(False,
+ help="""If True, anything that would be passed to the pager
+ will be displayed as regular output instead."""
+ ).tag(config=True)
+
+
+ show_rewritten_input = Bool(True,
+ help="Show rewritten input, e.g. for autocall."
+ ).tag(config=True)
+
+ quiet = Bool(False).tag(config=True)
+
+ history_length = Integer(10000,
+ help='Total length of command history'
+ ).tag(config=True)
+
+ history_load_length = Integer(1000, help=
+ """
+ The number of saved history entries to be loaded
+ into the history buffer at startup.
+ """
+ ).tag(config=True)
+
+ ast_node_interactivity = Enum(['all', 'last', 'last_expr', 'none', 'last_expr_or_assign'],
+ default_value='last_expr',
+ help="""
+ 'all', 'last', 'last_expr' or 'none', 'last_expr_or_assign' specifying
+ which nodes should be run interactively (displaying output from expressions).
+ """
+ ).tag(config=True)
+
+ warn_venv = Bool(
+ True,
+ help="Warn if running in a virtual environment with no IPython installed (so IPython from the global environment is used).",
+ ).tag(config=True)
+
+ # TODO: this part of prompt management should be moved to the frontends.
+ # Use custom TraitTypes that convert '0'->'' and '\\n'->'\n'
+ separate_in = SeparateUnicode('\n').tag(config=True)
+ separate_out = SeparateUnicode('').tag(config=True)
+ separate_out2 = SeparateUnicode('').tag(config=True)
+ wildcards_case_sensitive = Bool(True).tag(config=True)
+ xmode = CaselessStrEnum(('Context', 'Plain', 'Verbose', 'Minimal'),
+ default_value='Context',
+ help="Switch modes for the IPython exception handlers."
+ ).tag(config=True)
+
+ # Subcomponents of InteractiveShell
+ alias_manager = Instance('IPython.core.alias.AliasManager', allow_none=True)
+ prefilter_manager = Instance('IPython.core.prefilter.PrefilterManager', allow_none=True)
+ builtin_trap = Instance('IPython.core.builtin_trap.BuiltinTrap', allow_none=True)
+ display_trap = Instance('IPython.core.display_trap.DisplayTrap', allow_none=True)
+ extension_manager = Instance('IPython.core.extensions.ExtensionManager', allow_none=True)
+ payload_manager = Instance('IPython.core.payload.PayloadManager', allow_none=True)
+ history_manager = Instance('IPython.core.history.HistoryAccessorBase', allow_none=True)
+ magics_manager = Instance('IPython.core.magic.MagicsManager', allow_none=True)
+
+ profile_dir = Instance('IPython.core.application.ProfileDir', allow_none=True)
+ @property
+ def profile(self):
+ if self.profile_dir is not None:
+ name = os.path.basename(self.profile_dir.location)
+ return name.replace('profile_','')
+
+
+ # Private interface
+ _post_execute = Dict()
+
+ # Tracks any GUI loop loaded for pylab
+ pylab_gui_select = None
+
+ last_execution_succeeded = Bool(True, help='Did last executed command succeeded')
+
+ last_execution_result = Instance('IPython.core.interactiveshell.ExecutionResult', help='Result of executing the last command', allow_none=True)
+
+ def __init__(self, ipython_dir=None, profile_dir=None,
+ user_module=None, user_ns=None,
+ custom_exceptions=((), None), **kwargs):
+ # This is where traits with a config_key argument are updated
+ # from the values on config.
+ super(InteractiveShell, self).__init__(**kwargs)
+ if 'PromptManager' in self.config:
+ warn('As of IPython 5.0 `PromptManager` config will have no effect'
+ ' and has been replaced by TerminalInteractiveShell.prompts_class')
+ self.configurables = [self]
+
+ # These are relatively independent and stateless
+ self.init_ipython_dir(ipython_dir)
+ self.init_profile_dir(profile_dir)
+ self.init_instance_attrs()
+ self.init_environment()
+
+ # Check if we're in a virtualenv, and set up sys.path.
+ self.init_virtualenv()
+
+ # Create namespaces (user_ns, user_global_ns, etc.)
+ self.init_create_namespaces(user_module, user_ns)
+ # This has to be done after init_create_namespaces because it uses
+ # something in self.user_ns, but before init_sys_modules, which
+ # is the first thing to modify sys.
+ # TODO: When we override sys.stdout and sys.stderr before this class
+ # is created, we are saving the overridden ones here. Not sure if this
+ # is what we want to do.
+ self.save_sys_module_state()
+ self.init_sys_modules()
+
+ # While we're trying to have each part of the code directly access what
+ # it needs without keeping redundant references to objects, we have too
+ # much legacy code that expects ip.db to exist.
+ self.db = PickleShareDB(os.path.join(self.profile_dir.location, 'db'))
+
+ self.init_history()
+ self.init_encoding()
+ self.init_prefilter()
+
+ self.init_syntax_highlighting()
+ self.init_hooks()
+ self.init_events()
+ self.init_pushd_popd_magic()
+ self.init_user_ns()
+ self.init_logger()
+ self.init_builtins()
+
+ # The following was in post_config_initialization
+ self.init_inspector()
+ self.raw_input_original = input
+ self.init_completer()
+ # TODO: init_io() needs to happen before init_traceback handlers
+ # because the traceback handlers hardcode the stdout/stderr streams.
+ # This logic in in debugger.Pdb and should eventually be changed.
+ self.init_io()
+ self.init_traceback_handlers(custom_exceptions)
+ self.init_prompts()
+ self.init_display_formatter()
+ self.init_display_pub()
+ self.init_data_pub()
+ self.init_displayhook()
+ self.init_magics()
+ self.init_alias()
+ self.init_logstart()
+ self.init_pdb()
+ self.init_extension_manager()
+ self.init_payload()
+ self.events.trigger('shell_initialized', self)
+ atexit.register(self.atexit_operations)
+
+ # The trio runner is used for running Trio in the foreground thread. It
+ # is different from `_trio_runner(async_fn)` in `async_helpers.py`
+ # which calls `trio.run()` for every cell. This runner runs all cells
+ # inside a single Trio event loop. If used, it is set from
+ # `ipykernel.kernelapp`.
+ self.trio_runner = None
+
+ def get_ipython(self):
+ """Return the currently running IPython instance."""
+ return self
+
+ #-------------------------------------------------------------------------
+ # Trait changed handlers
+ #-------------------------------------------------------------------------
+ @observe('ipython_dir')
+ def _ipython_dir_changed(self, change):
+ ensure_dir_exists(change['new'])
+
+ def set_autoindent(self,value=None):
+ """Set the autoindent flag.
+
+ If called with no arguments, it acts as a toggle."""
+ if value is None:
+ self.autoindent = not self.autoindent
+ else:
+ self.autoindent = value
+
+ def set_trio_runner(self, tr):
+ self.trio_runner = tr
+
+ #-------------------------------------------------------------------------
+ # init_* methods called by __init__
+ #-------------------------------------------------------------------------
+
+ def init_ipython_dir(self, ipython_dir):
+ if ipython_dir is not None:
+ self.ipython_dir = ipython_dir
+ return
+
+ self.ipython_dir = get_ipython_dir()
+
+ def init_profile_dir(self, profile_dir):
+ if profile_dir is not None:
+ self.profile_dir = profile_dir
+ return
+ self.profile_dir = ProfileDir.create_profile_dir_by_name(
+ self.ipython_dir, "default"
+ )
+
+ def init_instance_attrs(self):
+ self.more = False
+
+ # command compiler
+ self.compile = self.compiler_class()
+
+ # Make an empty namespace, which extension writers can rely on both
+ # existing and NEVER being used by ipython itself. This gives them a
+ # convenient location for storing additional information and state
+ # their extensions may require, without fear of collisions with other
+ # ipython names that may develop later.
+ self.meta = Struct()
+
+ # Temporary files used for various purposes. Deleted at exit.
+ # The files here are stored with Path from Pathlib
+ self.tempfiles = []
+ self.tempdirs = []
+
+ # keep track of where we started running (mainly for crash post-mortem)
+ # This is not being used anywhere currently.
+ self.starting_dir = os.getcwd()
+
+ # Indentation management
+ self.indent_current_nsp = 0
+
+ # Dict to track post-execution functions that have been registered
+ self._post_execute = {}
+
+ def init_environment(self):
+ """Any changes we need to make to the user's environment."""
+ pass
+
+ def init_encoding(self):
+ # Get system encoding at startup time. Certain terminals (like Emacs
+ # under Win32 have it set to None, and we need to have a known valid
+ # encoding to use in the raw_input() method
+ try:
+ self.stdin_encoding = sys.stdin.encoding or 'ascii'
+ except AttributeError:
+ self.stdin_encoding = 'ascii'
+
+
+ @observe('colors')
+ def init_syntax_highlighting(self, changes=None):
+ # Python source parser/formatter for syntax highlighting
+ pyformat = PyColorize.Parser(style=self.colors, parent=self).format
+ self.pycolorize = lambda src: pyformat(src,'str')
+
+ def refresh_style(self):
+ # No-op here, used in subclass
+ pass
+
+ def init_pushd_popd_magic(self):
+ # for pushd/popd management
+ self.home_dir = get_home_dir()
+
+ self.dir_stack = []
+
+ def init_logger(self):
+ self.logger = Logger(self.home_dir, logfname='ipython_log.py',
+ logmode='rotate')
+
+ def init_logstart(self):
+ """Initialize logging in case it was requested at the command line.
+ """
+ if self.logappend:
+ self.magic('logstart %s append' % self.logappend)
+ elif self.logfile:
+ self.magic('logstart %s' % self.logfile)
+ elif self.logstart:
+ self.magic('logstart')
+
+
+ def init_builtins(self):
+ # A single, static flag that we set to True. Its presence indicates
+ # that an IPython shell has been created, and we make no attempts at
+ # removing on exit or representing the existence of more than one
+ # IPython at a time.
+ builtin_mod.__dict__['__IPYTHON__'] = True
+ builtin_mod.__dict__['display'] = display
+
+ self.builtin_trap = BuiltinTrap(shell=self)
+
+ @observe('colors')
+ def init_inspector(self, changes=None):
+ # Object inspector
+ self.inspector = self.inspector_class(
+ oinspect.InspectColors,
+ PyColorize.ANSICodeColors,
+ self.colors,
+ self.object_info_string_level,
+ )
+
+ def init_io(self):
+ # implemented in subclasses, TerminalInteractiveShell does call
+ # colorama.init().
+ pass
+
+ def init_prompts(self):
+ # Set system prompts, so that scripts can decide if they are running
+ # interactively.
+ sys.ps1 = 'In : '
+ sys.ps2 = '...: '
+ sys.ps3 = 'Out: '
+
+ def init_display_formatter(self):
+ self.display_formatter = DisplayFormatter(parent=self)
+ self.configurables.append(self.display_formatter)
+
+ def init_display_pub(self):
+ self.display_pub = self.display_pub_class(parent=self, shell=self)
+ self.configurables.append(self.display_pub)
+
+ def init_data_pub(self):
+ if not self.data_pub_class:
+ self.data_pub = None
+ return
+ self.data_pub = self.data_pub_class(parent=self)
+ self.configurables.append(self.data_pub)
+
+ def init_displayhook(self):
+ # Initialize displayhook, set in/out prompts and printing system
+ self.displayhook = self.displayhook_class(
+ parent=self,
+ shell=self,
+ cache_size=self.cache_size,
+ )
+ self.configurables.append(self.displayhook)
+ # This is a context manager that installs/revmoes the displayhook at
+ # the appropriate time.
+ self.display_trap = DisplayTrap(hook=self.displayhook)
+
+ @staticmethod
+ def get_path_links(p: Path):
+ """Gets path links including all symlinks
+
+ Examples
+ --------
+ In [1]: from IPython.core.interactiveshell import InteractiveShell
+
+ In [2]: import sys, pathlib
+
+ In [3]: paths = InteractiveShell.get_path_links(pathlib.Path(sys.executable))
+
+ In [4]: len(paths) == len(set(paths))
+ Out[4]: True
+
+ In [5]: bool(paths)
+ Out[5]: True
+ """
+ paths = [p]
+ while p.is_symlink():
+ new_path = Path(os.readlink(p))
+ if not new_path.is_absolute():
+ new_path = p.parent / new_path
+ p = new_path
+ paths.append(p)
+ return paths
+
+ def init_virtualenv(self):
+ """Add the current virtualenv to sys.path so the user can import modules from it.
+ This isn't perfect: it doesn't use the Python interpreter with which the
+ virtualenv was built, and it ignores the --no-site-packages option. A
+ warning will appear suggesting the user installs IPython in the
+ virtualenv, but for many cases, it probably works well enough.
+
+ Adapted from code snippets online.
+
+ http://blog.ufsoft.org/2009/1/29/ipython-and-virtualenv
+ """
+ if 'VIRTUAL_ENV' not in os.environ:
+ # Not in a virtualenv
+ return
+ elif os.environ["VIRTUAL_ENV"] == "":
+ warn("Virtual env path set to '', please check if this is intended.")
+ return
+
+ p = Path(sys.executable)
+ p_venv = Path(os.environ["VIRTUAL_ENV"])
+
+ # fallback venv detection:
+ # stdlib venv may symlink sys.executable, so we can't use realpath.
+ # but others can symlink *to* the venv Python, so we can't just use sys.executable.
+ # So we just check every item in the symlink tree (generally <= 3)
+ paths = self.get_path_links(p)
+
+ # In Cygwin paths like "c:\..." and '\cygdrive\c\...' are possible
+ if p_venv.parts[1] == "cygdrive":
+ drive_name = p_venv.parts[2]
+ p_venv = (drive_name + ":/") / Path(*p_venv.parts[3:])
+
+ if any(p_venv == p.parents[1] for p in paths):
+ # Our exe is inside or has access to the virtualenv, don't need to do anything.
+ return
+
+ if sys.platform == "win32":
+ virtual_env = str(Path(os.environ["VIRTUAL_ENV"], "Lib", "site-packages"))
+ else:
+ virtual_env_path = Path(
+ os.environ["VIRTUAL_ENV"], "lib", "python{}.{}", "site-packages"
+ )
+ p_ver = sys.version_info[:2]
+
+ # Predict version from py[thon]-x.x in the $VIRTUAL_ENV
+ re_m = re.search(r"\bpy(?:thon)?([23])\.(\d+)\b", os.environ["VIRTUAL_ENV"])
+ if re_m:
+ predicted_path = Path(str(virtual_env_path).format(*re_m.groups()))
+ if predicted_path.exists():
+ p_ver = re_m.groups()
+
+ virtual_env = str(virtual_env_path).format(*p_ver)
+ if self.warn_venv:
+ warn(
+ "Attempting to work in a virtualenv. If you encounter problems, "
+ "please install IPython inside the virtualenv."
+ )
+ import site
+ sys.path.insert(0, virtual_env)
+ site.addsitedir(virtual_env)
+
+ #-------------------------------------------------------------------------
+ # Things related to injections into the sys module
+ #-------------------------------------------------------------------------
+
+ def save_sys_module_state(self):
+ """Save the state of hooks in the sys module.
+
+ This has to be called after self.user_module is created.
+ """
+ self._orig_sys_module_state = {'stdin': sys.stdin,
+ 'stdout': sys.stdout,
+ 'stderr': sys.stderr,
+ 'excepthook': sys.excepthook}
+ self._orig_sys_modules_main_name = self.user_module.__name__
+ self._orig_sys_modules_main_mod = sys.modules.get(self.user_module.__name__)
+
+ def restore_sys_module_state(self):
+ """Restore the state of the sys module."""
+ try:
+ for k, v in self._orig_sys_module_state.items():
+ setattr(sys, k, v)
+ except AttributeError:
+ pass
+ # Reset what what done in self.init_sys_modules
+ if self._orig_sys_modules_main_mod is not None:
+ sys.modules[self._orig_sys_modules_main_name] = self._orig_sys_modules_main_mod
+
+ #-------------------------------------------------------------------------
+ # Things related to the banner
+ #-------------------------------------------------------------------------
+
+ @property
+ def banner(self):
+ banner = self.banner1
+ if self.profile and self.profile != 'default':
+ banner += '\nIPython profile: %s\n' % self.profile
+ if self.banner2:
+ banner += '\n' + self.banner2
+ return banner
+
+ def show_banner(self, banner=None):
+ if banner is None:
+ banner = self.banner
+ sys.stdout.write(banner)
+
+ #-------------------------------------------------------------------------
+ # Things related to hooks
+ #-------------------------------------------------------------------------
+
+ def init_hooks(self):
+ # hooks holds pointers used for user-side customizations
+ self.hooks = Struct()
+
+ self.strdispatchers = {}
+
+ # Set all default hooks, defined in the IPython.hooks module.
+ hooks = IPython.core.hooks
+ for hook_name in hooks.__all__:
+ # default hooks have priority 100, i.e. low; user hooks should have
+ # 0-100 priority
+ self.set_hook(hook_name, getattr(hooks, hook_name), 100)
+
+ if self.display_page:
+ self.set_hook('show_in_pager', page.as_hook(page.display_page), 90)
+
+ def set_hook(self, name, hook, priority=50, str_key=None, re_key=None):
+ """set_hook(name,hook) -> sets an internal IPython hook.
+
+ IPython exposes some of its internal API as user-modifiable hooks. By
+ adding your function to one of these hooks, you can modify IPython's
+ behavior to call at runtime your own routines."""
+
+ # At some point in the future, this should validate the hook before it
+ # accepts it. Probably at least check that the hook takes the number
+ # of args it's supposed to.
+
+ f = types.MethodType(hook,self)
+
+ # check if the hook is for strdispatcher first
+ if str_key is not None:
+ sdp = self.strdispatchers.get(name, StrDispatch())
+ sdp.add_s(str_key, f, priority )
+ self.strdispatchers[name] = sdp
+ return
+ if re_key is not None:
+ sdp = self.strdispatchers.get(name, StrDispatch())
+ sdp.add_re(re.compile(re_key), f, priority )
+ self.strdispatchers[name] = sdp
+ return
+
+ dp = getattr(self.hooks, name, None)
+ if name not in IPython.core.hooks.__all__:
+ print("Warning! Hook '%s' is not one of %s" % \
+ (name, IPython.core.hooks.__all__ ))
+
+ if name in IPython.core.hooks.deprecated:
+ alternative = IPython.core.hooks.deprecated[name]
+ raise ValueError(
+ "Hook {} has been deprecated since IPython 5.0. Use {} instead.".format(
+ name, alternative
+ )
+ )
+
+ if not dp:
+ dp = IPython.core.hooks.CommandChainDispatcher()
+
+ try:
+ dp.add(f,priority)
+ except AttributeError:
+ # it was not commandchain, plain old func - replace
+ dp = f
+
+ setattr(self.hooks,name, dp)
+
+ #-------------------------------------------------------------------------
+ # Things related to events
+ #-------------------------------------------------------------------------
+
+ def init_events(self):
+ self.events = EventManager(self, available_events)
+
+ self.events.register("pre_execute", self._clear_warning_registry)
+
+ def register_post_execute(self, func):
+ """DEPRECATED: Use ip.events.register('post_run_cell', func)
+
+ Register a function for calling after code execution.
+ """
+ raise ValueError(
+ "ip.register_post_execute is deprecated since IPython 1.0, use "
+ "ip.events.register('post_run_cell', func) instead."
+ )
+
+ def _clear_warning_registry(self):
+ # clear the warning registry, so that different code blocks with
+ # overlapping line number ranges don't cause spurious suppression of
+ # warnings (see gh-6611 for details)
+ if "__warningregistry__" in self.user_global_ns:
+ del self.user_global_ns["__warningregistry__"]
+
+ #-------------------------------------------------------------------------
+ # Things related to the "main" module
+ #-------------------------------------------------------------------------
+
+ def new_main_mod(self, filename, modname):
+ """Return a new 'main' module object for user code execution.
+
+ ``filename`` should be the path of the script which will be run in the
+ module. Requests with the same filename will get the same module, with
+ its namespace cleared.
+
+ ``modname`` should be the module name - normally either '__main__' or
+ the basename of the file without the extension.
+
+ When scripts are executed via %run, we must keep a reference to their
+ __main__ module around so that Python doesn't
+ clear it, rendering references to module globals useless.
+
+ This method keeps said reference in a private dict, keyed by the
+ absolute path of the script. This way, for multiple executions of the
+ same script we only keep one copy of the namespace (the last one),
+ thus preventing memory leaks from old references while allowing the
+ objects from the last execution to be accessible.
+ """
+ filename = os.path.abspath(filename)
+ try:
+ main_mod = self._main_mod_cache[filename]
+ except KeyError:
+ main_mod = self._main_mod_cache[filename] = types.ModuleType(
+ modname,
+ doc="Module created for script run in IPython")
+ else:
+ main_mod.__dict__.clear()
+ main_mod.__name__ = modname
+
+ main_mod.__file__ = filename
+ # It seems pydoc (and perhaps others) needs any module instance to
+ # implement a __nonzero__ method
+ main_mod.__nonzero__ = lambda : True
+
+ return main_mod
+
+ def clear_main_mod_cache(self):
+ """Clear the cache of main modules.
+
+ Mainly for use by utilities like %reset.
+
+ Examples
+ --------
+ In [15]: import IPython
+
+ In [16]: m = _ip.new_main_mod(IPython.__file__, 'IPython')
+
+ In [17]: len(_ip._main_mod_cache) > 0
+ Out[17]: True
+
+ In [18]: _ip.clear_main_mod_cache()
+
+ In [19]: len(_ip._main_mod_cache) == 0
+ Out[19]: True
+ """
+ self._main_mod_cache.clear()
+
+ #-------------------------------------------------------------------------
+ # Things related to debugging
+ #-------------------------------------------------------------------------
+
+ def init_pdb(self):
+ # Set calling of pdb on exceptions
+ # self.call_pdb is a property
+ self.call_pdb = self.pdb
+
+ def _get_call_pdb(self):
+ return self._call_pdb
+
+ def _set_call_pdb(self,val):
+
+ if val not in (0,1,False,True):
+ raise ValueError('new call_pdb value must be boolean')
+
+ # store value in instance
+ self._call_pdb = val
+
+ # notify the actual exception handlers
+ self.InteractiveTB.call_pdb = val
+
+ call_pdb = property(_get_call_pdb,_set_call_pdb,None,
+ 'Control auto-activation of pdb at exceptions')
+
+ def debugger(self,force=False):
+ """Call the pdb debugger.
+
+ Keywords:
+
+ - force(False): by default, this routine checks the instance call_pdb
+ flag and does not actually invoke the debugger if the flag is false.
+ The 'force' option forces the debugger to activate even if the flag
+ is false.
+ """
+
+ if not (force or self.call_pdb):
+ return
+
+ if not hasattr(sys,'last_traceback'):
+ error('No traceback has been produced, nothing to debug.')
+ return
+
+ self.InteractiveTB.debugger(force=True)
+
+ #-------------------------------------------------------------------------
+ # Things related to IPython's various namespaces
+ #-------------------------------------------------------------------------
+ default_user_namespaces = True
+
+ def init_create_namespaces(self, user_module=None, user_ns=None):
+ # Create the namespace where the user will operate. user_ns is
+ # normally the only one used, and it is passed to the exec calls as
+ # the locals argument. But we do carry a user_global_ns namespace
+ # given as the exec 'globals' argument, This is useful in embedding
+ # situations where the ipython shell opens in a context where the
+ # distinction between locals and globals is meaningful. For
+ # non-embedded contexts, it is just the same object as the user_ns dict.
+
+ # FIXME. For some strange reason, __builtins__ is showing up at user
+ # level as a dict instead of a module. This is a manual fix, but I
+ # should really track down where the problem is coming from. Alex
+ # Schmolck reported this problem first.
+
+ # A useful post by Alex Martelli on this topic:
+ # Re: inconsistent value from __builtins__
+ # Von: Alex Martelli <aleaxit@yahoo.com>
+ # Datum: Freitag 01 Oktober 2004 04:45:34 nachmittags/abends
+ # Gruppen: comp.lang.python
+
+ # Michael Hohn <hohn@hooknose.lbl.gov> wrote:
+ # > >>> print type(builtin_check.get_global_binding('__builtins__'))
+ # > <type 'dict'>
+ # > >>> print type(__builtins__)
+ # > <type 'module'>
+ # > Is this difference in return value intentional?
+
+ # Well, it's documented that '__builtins__' can be either a dictionary
+ # or a module, and it's been that way for a long time. Whether it's
+ # intentional (or sensible), I don't know. In any case, the idea is
+ # that if you need to access the built-in namespace directly, you
+ # should start with "import __builtin__" (note, no 's') which will
+ # definitely give you a module. Yeah, it's somewhat confusing:-(.
+
+ # These routines return a properly built module and dict as needed by
+ # the rest of the code, and can also be used by extension writers to
+ # generate properly initialized namespaces.
+ if (user_ns is not None) or (user_module is not None):
+ self.default_user_namespaces = False
+ self.user_module, self.user_ns = self.prepare_user_module(user_module, user_ns)
+
+ # A record of hidden variables we have added to the user namespace, so
+ # we can list later only variables defined in actual interactive use.
+ self.user_ns_hidden = {}
+
+ # Now that FakeModule produces a real module, we've run into a nasty
+ # problem: after script execution (via %run), the module where the user
+ # code ran is deleted. Now that this object is a true module (needed
+ # so doctest and other tools work correctly), the Python module
+ # teardown mechanism runs over it, and sets to None every variable
+ # present in that module. Top-level references to objects from the
+ # script survive, because the user_ns is updated with them. However,
+ # calling functions defined in the script that use other things from
+ # the script will fail, because the function's closure had references
+ # to the original objects, which are now all None. So we must protect
+ # these modules from deletion by keeping a cache.
+ #
+ # To avoid keeping stale modules around (we only need the one from the
+ # last run), we use a dict keyed with the full path to the script, so
+ # only the last version of the module is held in the cache. Note,
+ # however, that we must cache the module *namespace contents* (their
+ # __dict__). Because if we try to cache the actual modules, old ones
+ # (uncached) could be destroyed while still holding references (such as
+ # those held by GUI objects that tend to be long-lived)>
+ #
+ # The %reset command will flush this cache. See the cache_main_mod()
+ # and clear_main_mod_cache() methods for details on use.
+
+ # This is the cache used for 'main' namespaces
+ self._main_mod_cache = {}
+
+ # A table holding all the namespaces IPython deals with, so that
+ # introspection facilities can search easily.
+ self.ns_table = {'user_global':self.user_module.__dict__,
+ 'user_local':self.user_ns,
+ 'builtin':builtin_mod.__dict__
+ }
+
+ @property
+ def user_global_ns(self):
+ return self.user_module.__dict__
+
+ def prepare_user_module(self, user_module=None, user_ns=None):
+ """Prepare the module and namespace in which user code will be run.
+
+ When IPython is started normally, both parameters are None: a new module
+ is created automatically, and its __dict__ used as the namespace.
+
+ If only user_module is provided, its __dict__ is used as the namespace.
+ If only user_ns is provided, a dummy module is created, and user_ns
+ becomes the global namespace. If both are provided (as they may be
+ when embedding), user_ns is the local namespace, and user_module
+ provides the global namespace.
+
+ Parameters
+ ----------
+ user_module : module, optional
+ The current user module in which IPython is being run. If None,
+ a clean module will be created.
+ user_ns : dict, optional
+ A namespace in which to run interactive commands.
+
+ Returns
+ -------
+ A tuple of user_module and user_ns, each properly initialised.
+ """
+ if user_module is None and user_ns is not None:
+ user_ns.setdefault("__name__", "__main__")
+ user_module = DummyMod()
+ user_module.__dict__ = user_ns
+
+ if user_module is None:
+ user_module = types.ModuleType("__main__",
+ doc="Automatically created module for IPython interactive environment")
+
+ # We must ensure that __builtin__ (without the final 's') is always
+ # available and pointing to the __builtin__ *module*. For more details:
+ # http://mail.python.org/pipermail/python-dev/2001-April/014068.html
+ user_module.__dict__.setdefault('__builtin__', builtin_mod)
+ user_module.__dict__.setdefault('__builtins__', builtin_mod)
+
+ if user_ns is None:
+ user_ns = user_module.__dict__
+
+ return user_module, user_ns
+
+ def init_sys_modules(self):
+ # We need to insert into sys.modules something that looks like a
+ # module but which accesses the IPython namespace, for shelve and
+ # pickle to work interactively. Normally they rely on getting
+ # everything out of __main__, but for embedding purposes each IPython
+ # instance has its own private namespace, so we can't go shoving
+ # everything into __main__.
+
+ # note, however, that we should only do this for non-embedded
+ # ipythons, which really mimic the __main__.__dict__ with their own
+ # namespace. Embedded instances, on the other hand, should not do
+ # this because they need to manage the user local/global namespaces
+ # only, but they live within a 'normal' __main__ (meaning, they
+ # shouldn't overtake the execution environment of the script they're
+ # embedded in).
+
+ # This is overridden in the InteractiveShellEmbed subclass to a no-op.
+ main_name = self.user_module.__name__
+ sys.modules[main_name] = self.user_module
+
+ def init_user_ns(self):
+ """Initialize all user-visible namespaces to their minimum defaults.
+
+ Certain history lists are also initialized here, as they effectively
+ act as user namespaces.
+
+ Notes
+ -----
+ All data structures here are only filled in, they are NOT reset by this
+ method. If they were not empty before, data will simply be added to
+ them.
+ """
+ # This function works in two parts: first we put a few things in
+ # user_ns, and we sync that contents into user_ns_hidden so that these
+ # initial variables aren't shown by %who. After the sync, we add the
+ # rest of what we *do* want the user to see with %who even on a new
+ # session (probably nothing, so they really only see their own stuff)
+
+ # The user dict must *always* have a __builtin__ reference to the
+ # Python standard __builtin__ namespace, which must be imported.
+ # This is so that certain operations in prompt evaluation can be
+ # reliably executed with builtins. Note that we can NOT use
+ # __builtins__ (note the 's'), because that can either be a dict or a
+ # module, and can even mutate at runtime, depending on the context
+ # (Python makes no guarantees on it). In contrast, __builtin__ is
+ # always a module object, though it must be explicitly imported.
+
+ # For more details:
+ # http://mail.python.org/pipermail/python-dev/2001-April/014068.html
+ ns = {}
+
+ # make global variables for user access to the histories
+ ns['_ih'] = self.history_manager.input_hist_parsed
+ ns['_oh'] = self.history_manager.output_hist
+ ns['_dh'] = self.history_manager.dir_hist
+
+ # user aliases to input and output histories. These shouldn't show up
+ # in %who, as they can have very large reprs.
+ ns['In'] = self.history_manager.input_hist_parsed
+ ns['Out'] = self.history_manager.output_hist
+
+ # Store myself as the public api!!!
+ ns['get_ipython'] = self.get_ipython
+
+ ns['exit'] = self.exiter
+ ns['quit'] = self.exiter
+ ns["open"] = _modified_open
+
+ # Sync what we've added so far to user_ns_hidden so these aren't seen
+ # by %who
+ self.user_ns_hidden.update(ns)
+
+ # Anything put into ns now would show up in %who. Think twice before
+ # putting anything here, as we really want %who to show the user their
+ # stuff, not our variables.
+
+ # Finally, update the real user's namespace
+ self.user_ns.update(ns)
+
+ @property
+ def all_ns_refs(self):
+ """Get a list of references to all the namespace dictionaries in which
+ IPython might store a user-created object.
+
+ Note that this does not include the displayhook, which also caches
+ objects from the output."""
+ return [self.user_ns, self.user_global_ns, self.user_ns_hidden] + \
+ [m.__dict__ for m in self._main_mod_cache.values()]
+
+ def reset(self, new_session=True, aggressive=False):
+ """Clear all internal namespaces, and attempt to release references to
+ user objects.
+
+ If new_session is True, a new history session will be opened.
+ """
+ # Clear histories
+ self.history_manager.reset(new_session)
+ # Reset counter used to index all histories
+ if new_session:
+ self.execution_count = 1
+
+ # Reset last execution result
+ self.last_execution_succeeded = True
+ self.last_execution_result = None
+
+ # Flush cached output items
+ if self.displayhook.do_full_cache:
+ self.displayhook.flush()
+
+ # The main execution namespaces must be cleared very carefully,
+ # skipping the deletion of the builtin-related keys, because doing so
+ # would cause errors in many object's __del__ methods.
+ if self.user_ns is not self.user_global_ns:
+ self.user_ns.clear()
+ ns = self.user_global_ns
+ drop_keys = set(ns.keys())
+ drop_keys.discard('__builtin__')
+ drop_keys.discard('__builtins__')
+ drop_keys.discard('__name__')
+ for k in drop_keys:
+ del ns[k]
+
+ self.user_ns_hidden.clear()
+
+ # Restore the user namespaces to minimal usability
+ self.init_user_ns()
+ if aggressive and not hasattr(self, "_sys_modules_keys"):
+ print("Cannot restore sys.module, no snapshot")
+ elif aggressive:
+ print("culling sys module...")
+ current_keys = set(sys.modules.keys())
+ for k in current_keys - self._sys_modules_keys:
+ if k.startswith("multiprocessing"):
+ continue
+ del sys.modules[k]
+
+ # Restore the default and user aliases
+ self.alias_manager.clear_aliases()
+ self.alias_manager.init_aliases()
+
+ # Now define aliases that only make sense on the terminal, because they
+ # need direct access to the console in a way that we can't emulate in
+ # GUI or web frontend
+ if os.name == 'posix':
+ for cmd in ('clear', 'more', 'less', 'man'):
+ if cmd not in self.magics_manager.magics['line']:
+ self.alias_manager.soft_define_alias(cmd, cmd)
+
+ # Flush the private list of module references kept for script
+ # execution protection
+ self.clear_main_mod_cache()
+
+ def del_var(self, varname, by_name=False):
+ """Delete a variable from the various namespaces, so that, as
+ far as possible, we're not keeping any hidden references to it.
+
+ Parameters
+ ----------
+ varname : str
+ The name of the variable to delete.
+ by_name : bool
+ If True, delete variables with the given name in each
+ namespace. If False (default), find the variable in the user
+ namespace, and delete references to it.
+ """
+ if varname in ('__builtin__', '__builtins__'):
+ raise ValueError("Refusing to delete %s" % varname)
+
+ ns_refs = self.all_ns_refs
+
+ if by_name: # Delete by name
+ for ns in ns_refs:
+ try:
+ del ns[varname]
+ except KeyError:
+ pass
+ else: # Delete by object
+ try:
+ obj = self.user_ns[varname]
+ except KeyError as e:
+ raise NameError("name '%s' is not defined" % varname) from e
+ # Also check in output history
+ ns_refs.append(self.history_manager.output_hist)
+ for ns in ns_refs:
+ to_delete = [n for n, o in ns.items() if o is obj]
+ for name in to_delete:
+ del ns[name]
+
+ # Ensure it is removed from the last execution result
+ if self.last_execution_result.result is obj:
+ self.last_execution_result = None
+
+ # displayhook keeps extra references, but not in a dictionary
+ for name in ('_', '__', '___'):
+ if getattr(self.displayhook, name) is obj:
+ setattr(self.displayhook, name, None)
+
+ def reset_selective(self, regex=None):
+ """Clear selective variables from internal namespaces based on a
+ specified regular expression.
+
+ Parameters
+ ----------
+ regex : string or compiled pattern, optional
+ A regular expression pattern that will be used in searching
+ variable names in the users namespaces.
+ """
+ if regex is not None:
+ try:
+ m = re.compile(regex)
+ except TypeError as e:
+ raise TypeError('regex must be a string or compiled pattern') from e
+ # Search for keys in each namespace that match the given regex
+ # If a match is found, delete the key/value pair.
+ for ns in self.all_ns_refs:
+ for var in ns:
+ if m.search(var):
+ del ns[var]
+
+ def push(self, variables, interactive=True):
+ """Inject a group of variables into the IPython user namespace.
+
+ Parameters
+ ----------
+ variables : dict, str or list/tuple of str
+ The variables to inject into the user's namespace. If a dict, a
+ simple update is done. If a str, the string is assumed to have
+ variable names separated by spaces. A list/tuple of str can also
+ be used to give the variable names. If just the variable names are
+ give (list/tuple/str) then the variable values looked up in the
+ callers frame.
+ interactive : bool
+ If True (default), the variables will be listed with the ``who``
+ magic.
+ """
+ vdict = None
+
+ # We need a dict of name/value pairs to do namespace updates.
+ if isinstance(variables, dict):
+ vdict = variables
+ elif isinstance(variables, (str, list, tuple)):
+ if isinstance(variables, str):
+ vlist = variables.split()
+ else:
+ vlist = variables
+ vdict = {}
+ cf = sys._getframe(1)
+ for name in vlist:
+ try:
+ vdict[name] = eval(name, cf.f_globals, cf.f_locals)
+ except:
+ print('Could not get variable %s from %s' %
+ (name,cf.f_code.co_name))
+ else:
+ raise ValueError('variables must be a dict/str/list/tuple')
+
+ # Propagate variables to user namespace
+ self.user_ns.update(vdict)
+
+ # And configure interactive visibility
+ user_ns_hidden = self.user_ns_hidden
+ if interactive:
+ for name in vdict:
+ user_ns_hidden.pop(name, None)
+ else:
+ user_ns_hidden.update(vdict)
+
+ def drop_by_id(self, variables):
+ """Remove a dict of variables from the user namespace, if they are the
+ same as the values in the dictionary.
+
+ This is intended for use by extensions: variables that they've added can
+ be taken back out if they are unloaded, without removing any that the
+ user has overwritten.
+
+ Parameters
+ ----------
+ variables : dict
+ A dictionary mapping object names (as strings) to the objects.
+ """
+ for name, obj in variables.items():
+ if name in self.user_ns and self.user_ns[name] is obj:
+ del self.user_ns[name]
+ self.user_ns_hidden.pop(name, None)
+
+ #-------------------------------------------------------------------------
+ # Things related to object introspection
+ #-------------------------------------------------------------------------
+ @staticmethod
+ def _find_parts(oname: str) -> Tuple[bool, ListType[str]]:
+ """
+ Given an object name, return a list of parts of this object name.
+
+ Basically split on docs when using attribute access,
+ and extract the value when using square bracket.
+
+
+ For example foo.bar[3].baz[x] -> foo, bar, 3, baz, x
+
+
+ Returns
+ -------
+ parts_ok: bool
+ wether we were properly able to parse parts.
+ parts: list of str
+ extracted parts
+
+
+
+ """
+ raw_parts = oname.split(".")
+ parts = []
+ parts_ok = True
+ for p in raw_parts:
+ if p.endswith("]"):
+ var, *indices = p.split("[")
+ if not var.isidentifier():
+ parts_ok = False
+ break
+ parts.append(var)
+ for ind in indices:
+ if ind[-1] != "]" and not is_integer_string(ind[:-1]):
+ parts_ok = False
+ break
+ parts.append(ind[:-1])
+ continue
+
+ if not p.isidentifier():
+ parts_ok = False
+ parts.append(p)
+
+ return parts_ok, parts
+
+ def _ofind(
+ self, oname: str, namespaces: Optional[Sequence[Tuple[str, AnyType]]] = None
+ ) -> OInfo:
+ """Find an object in the available namespaces.
+
+
+ Returns
+ -------
+ OInfo with fields:
+ - ismagic
+ - isalias
+ - found
+ - obj
+ - namespac
+ - parent
+
+ Has special code to detect magic functions.
+ """
+ oname = oname.strip()
+ parts_ok, parts = self._find_parts(oname)
+
+ if (
+ not oname.startswith(ESC_MAGIC)
+ and not oname.startswith(ESC_MAGIC2)
+ and not parts_ok
+ ):
+ return OInfo(
+ ismagic=False,
+ isalias=False,
+ found=False,
+ obj=None,
+ namespace=None,
+ parent=None,
+ )
+
+ if namespaces is None:
+ # Namespaces to search in:
+ # Put them in a list. The order is important so that we
+ # find things in the same order that Python finds them.
+ namespaces = [ ('Interactive', self.user_ns),
+ ('Interactive (global)', self.user_global_ns),
+ ('Python builtin', builtin_mod.__dict__),
+ ]
+
+ ismagic = False
+ isalias = False
+ found = False
+ ospace = None
+ parent = None
+ obj = None
+
+
+ # Look for the given name by splitting it in parts. If the head is
+ # found, then we look for all the remaining parts as members, and only
+ # declare success if we can find them all.
+ oname_parts = parts
+ oname_head, oname_rest = oname_parts[0],oname_parts[1:]
+ for nsname,ns in namespaces:
+ try:
+ obj = ns[oname_head]
+ except KeyError:
+ continue
+ else:
+ for idx, part in enumerate(oname_rest):
+ try:
+ parent = obj
+ # The last part is looked up in a special way to avoid
+ # descriptor invocation as it may raise or have side
+ # effects.
+ if idx == len(oname_rest) - 1:
+ obj = self._getattr_property(obj, part)
+ else:
+ if is_integer_string(part):
+ obj = obj[int(part)]
+ else:
+ obj = getattr(obj, part)
+ except:
+ # Blanket except b/c some badly implemented objects
+ # allow __getattr__ to raise exceptions other than
+ # AttributeError, which then crashes IPython.
+ break
+ else:
+ # If we finish the for loop (no break), we got all members
+ found = True
+ ospace = nsname
+ break # namespace loop
+
+ # Try to see if it's magic
+ if not found:
+ obj = None
+ if oname.startswith(ESC_MAGIC2):
+ oname = oname.lstrip(ESC_MAGIC2)
+ obj = self.find_cell_magic(oname)
+ elif oname.startswith(ESC_MAGIC):
+ oname = oname.lstrip(ESC_MAGIC)
+ obj = self.find_line_magic(oname)
+ else:
+ # search without prefix, so run? will find %run?
+ obj = self.find_line_magic(oname)
+ if obj is None:
+ obj = self.find_cell_magic(oname)
+ if obj is not None:
+ found = True
+ ospace = 'IPython internal'
+ ismagic = True
+ isalias = isinstance(obj, Alias)
+
+ # Last try: special-case some literals like '', [], {}, etc:
+ if not found and oname_head in ["''",'""','[]','{}','()']:
+ obj = eval(oname_head)
+ found = True
+ ospace = 'Interactive'
+
+ return OInfo(
+ obj=obj,
+ found=found,
+ parent=parent,
+ ismagic=ismagic,
+ isalias=isalias,
+ namespace=ospace,
+ )
+
+ @staticmethod
+ def _getattr_property(obj, attrname):
+ """Property-aware getattr to use in object finding.
+
+ If attrname represents a property, return it unevaluated (in case it has
+ side effects or raises an error.
+
+ """
+ if not isinstance(obj, type):
+ try:
+ # `getattr(type(obj), attrname)` is not guaranteed to return
+ # `obj`, but does so for property:
+ #
+ # property.__get__(self, None, cls) -> self
+ #
+ # The universal alternative is to traverse the mro manually
+ # searching for attrname in class dicts.
+ if is_integer_string(attrname):
+ return obj[int(attrname)]
+ else:
+ attr = getattr(type(obj), attrname)
+ except AttributeError:
+ pass
+ else:
+ # This relies on the fact that data descriptors (with both
+ # __get__ & __set__ magic methods) take precedence over
+ # instance-level attributes:
+ #
+ # class A(object):
+ # @property
+ # def foobar(self): return 123
+ # a = A()
+ # a.__dict__['foobar'] = 345
+ # a.foobar # == 123
+ #
+ # So, a property may be returned right away.
+ if isinstance(attr, property):
+ return attr
+
+ # Nothing helped, fall back.
+ return getattr(obj, attrname)
+
+ def _object_find(self, oname, namespaces=None) -> OInfo:
+ """Find an object and return a struct with info about it."""
+ return self._ofind(oname, namespaces)
+
+ def _inspect(self, meth, oname, namespaces=None, **kw):
+ """Generic interface to the inspector system.
+
+ This function is meant to be called by pdef, pdoc & friends.
+ """
+ info: OInfo = self._object_find(oname, namespaces)
+ if self.sphinxify_docstring:
+ if sphinxify is None:
+ raise ImportError("Module ``docrepr`` required but missing")
+ docformat = sphinxify(self.object_inspect(oname))
+ else:
+ docformat = None
+ if info.found or hasattr(info.parent, oinspect.HOOK_NAME):
+ pmethod = getattr(self.inspector, meth)
+ # TODO: only apply format_screen to the plain/text repr of the mime
+ # bundle.
+ formatter = format_screen if info.ismagic else docformat
+ if meth == 'pdoc':
+ pmethod(info.obj, oname, formatter)
+ elif meth == 'pinfo':
+ pmethod(
+ info.obj,
+ oname,
+ formatter,
+ info,
+ enable_html_pager=self.enable_html_pager,
+ **kw,
+ )
+ else:
+ pmethod(info.obj, oname)
+ else:
+ print('Object `%s` not found.' % oname)
+ return 'not found' # so callers can take other action
+
+ def object_inspect(self, oname, detail_level=0):
+ """Get object info about oname"""
+ with self.builtin_trap:
+ info = self._object_find(oname)
+ if info.found:
+ return self.inspector.info(info.obj, oname, info=info,
+ detail_level=detail_level
+ )
+ else:
+ return oinspect.object_info(name=oname, found=False)
+
+ def object_inspect_text(self, oname, detail_level=0):
+ """Get object info as formatted text"""
+ return self.object_inspect_mime(oname, detail_level)['text/plain']
+
+ def object_inspect_mime(self, oname, detail_level=0, omit_sections=()):
+ """Get object info as a mimebundle of formatted representations.
+
+ A mimebundle is a dictionary, keyed by mime-type.
+ It must always have the key `'text/plain'`.
+ """
+ with self.builtin_trap:
+ info = self._object_find(oname)
+ if info.found:
+ docformat = (
+ sphinxify(self.object_inspect(oname))
+ if self.sphinxify_docstring
+ else None
+ )
+ return self.inspector._get_info(
+ info.obj,
+ oname,
+ info=info,
+ detail_level=detail_level,
+ formatter=docformat,
+ omit_sections=omit_sections,
+ )
+ else:
+ raise KeyError(oname)
+
+ #-------------------------------------------------------------------------
+ # Things related to history management
+ #-------------------------------------------------------------------------
+
+ def init_history(self):
+ """Sets up the command history, and starts regular autosaves."""
+ self.history_manager = HistoryManager(shell=self, parent=self)
+ self.configurables.append(self.history_manager)
+
+ #-------------------------------------------------------------------------
+ # Things related to exception handling and tracebacks (not debugging)
+ #-------------------------------------------------------------------------
+
+ debugger_cls = InterruptiblePdb
+
+ def init_traceback_handlers(self, custom_exceptions):
+ # Syntax error handler.
+ self.SyntaxTB = ultratb.SyntaxTB(color_scheme='NoColor', parent=self)
+
+ # The interactive one is initialized with an offset, meaning we always
+ # want to remove the topmost item in the traceback, which is our own
+ # internal code. Valid modes: ['Plain','Context','Verbose','Minimal']
+ self.InteractiveTB = ultratb.AutoFormattedTB(mode = 'Plain',
+ color_scheme='NoColor',
+ tb_offset = 1,
+ debugger_cls=self.debugger_cls, parent=self)
+
+ # The instance will store a pointer to the system-wide exception hook,
+ # so that runtime code (such as magics) can access it. This is because
+ # during the read-eval loop, it may get temporarily overwritten.
+ self.sys_excepthook = sys.excepthook
+
+ # and add any custom exception handlers the user may have specified
+ self.set_custom_exc(*custom_exceptions)
+
+ # Set the exception mode
+ self.InteractiveTB.set_mode(mode=self.xmode)
+
+ def set_custom_exc(self, exc_tuple, handler):
+ """set_custom_exc(exc_tuple, handler)
+
+ Set a custom exception handler, which will be called if any of the
+ exceptions in exc_tuple occur in the mainloop (specifically, in the
+ run_code() method).
+
+ Parameters
+ ----------
+ exc_tuple : tuple of exception classes
+ A *tuple* of exception classes, for which to call the defined
+ handler. It is very important that you use a tuple, and NOT A
+ LIST here, because of the way Python's except statement works. If
+ you only want to trap a single exception, use a singleton tuple::
+
+ exc_tuple == (MyCustomException,)
+
+ handler : callable
+ handler must have the following signature::
+
+ def my_handler(self, etype, value, tb, tb_offset=None):
+ ...
+ return structured_traceback
+
+ Your handler must return a structured traceback (a list of strings),
+ or None.
+
+ This will be made into an instance method (via types.MethodType)
+ of IPython itself, and it will be called if any of the exceptions
+ listed in the exc_tuple are caught. If the handler is None, an
+ internal basic one is used, which just prints basic info.
+
+ To protect IPython from crashes, if your handler ever raises an
+ exception or returns an invalid result, it will be immediately
+ disabled.
+
+ Notes
+ -----
+ WARNING: by putting in your own exception handler into IPython's main
+ execution loop, you run a very good chance of nasty crashes. This
+ facility should only be used if you really know what you are doing.
+ """
+
+ if not isinstance(exc_tuple, tuple):
+ raise TypeError("The custom exceptions must be given as a tuple.")
+
+ def dummy_handler(self, etype, value, tb, tb_offset=None):
+ print('*** Simple custom exception handler ***')
+ print('Exception type :', etype)
+ print('Exception value:', value)
+ print('Traceback :', tb)
+
+ def validate_stb(stb):
+ """validate structured traceback return type
+
+ return type of CustomTB *should* be a list of strings, but allow
+ single strings or None, which are harmless.
+
+ This function will *always* return a list of strings,
+ and will raise a TypeError if stb is inappropriate.
+ """
+ msg = "CustomTB must return list of strings, not %r" % stb
+ if stb is None:
+ return []
+ elif isinstance(stb, str):
+ return [stb]
+ elif not isinstance(stb, list):
+ raise TypeError(msg)
+ # it's a list
+ for line in stb:
+ # check every element
+ if not isinstance(line, str):
+ raise TypeError(msg)
+ return stb
+
+ if handler is None:
+ wrapped = dummy_handler
+ else:
+ def wrapped(self,etype,value,tb,tb_offset=None):
+ """wrap CustomTB handler, to protect IPython from user code
+
+ This makes it harder (but not impossible) for custom exception
+ handlers to crash IPython.
+ """
+ try:
+ stb = handler(self,etype,value,tb,tb_offset=tb_offset)
+ return validate_stb(stb)
+ except:
+ # clear custom handler immediately
+ self.set_custom_exc((), None)
+ print("Custom TB Handler failed, unregistering", file=sys.stderr)
+ # show the exception in handler first
+ stb = self.InteractiveTB.structured_traceback(*sys.exc_info())
+ print(self.InteractiveTB.stb2text(stb))
+ print("The original exception:")
+ stb = self.InteractiveTB.structured_traceback(
+ (etype,value,tb), tb_offset=tb_offset
+ )
+ return stb
+
+ self.CustomTB = types.MethodType(wrapped,self)
+ self.custom_exceptions = exc_tuple
+
+ def excepthook(self, etype, value, tb):
+ """One more defense for GUI apps that call sys.excepthook.
+
+ GUI frameworks like wxPython trap exceptions and call
+ sys.excepthook themselves. I guess this is a feature that
+ enables them to keep running after exceptions that would
+ otherwise kill their mainloop. This is a bother for IPython
+ which expects to catch all of the program exceptions with a try:
+ except: statement.
+
+ Normally, IPython sets sys.excepthook to a CrashHandler instance, so if
+ any app directly invokes sys.excepthook, it will look to the user like
+ IPython crashed. In order to work around this, we can disable the
+ CrashHandler and replace it with this excepthook instead, which prints a
+ regular traceback using our InteractiveTB. In this fashion, apps which
+ call sys.excepthook will generate a regular-looking exception from
+ IPython, and the CrashHandler will only be triggered by real IPython
+ crashes.
+
+ This hook should be used sparingly, only in places which are not likely
+ to be true IPython errors.
+ """
+ self.showtraceback((etype, value, tb), tb_offset=0)
+
+ def _get_exc_info(self, exc_tuple=None):
+ """get exc_info from a given tuple, sys.exc_info() or sys.last_type etc.
+
+ Ensures sys.last_type,value,traceback hold the exc_info we found,
+ from whichever source.
+
+ raises ValueError if none of these contain any information
+ """
+ if exc_tuple is None:
+ etype, value, tb = sys.exc_info()
+ else:
+ etype, value, tb = exc_tuple
+
+ if etype is None:
+ if hasattr(sys, 'last_type'):
+ etype, value, tb = sys.last_type, sys.last_value, \
+ sys.last_traceback
+
+ if etype is None:
+ raise ValueError("No exception to find")
+
+ # Now store the exception info in sys.last_type etc.
+ # WARNING: these variables are somewhat deprecated and not
+ # necessarily safe to use in a threaded environment, but tools
+ # like pdb depend on their existence, so let's set them. If we
+ # find problems in the field, we'll need to revisit their use.
+ sys.last_type = etype
+ sys.last_value = value
+ sys.last_traceback = tb
+
+ return etype, value, tb
+
+ def show_usage_error(self, exc):
+ """Show a short message for UsageErrors
+
+ These are special exceptions that shouldn't show a traceback.
+ """
+ print("UsageError: %s" % exc, file=sys.stderr)
+
+ def get_exception_only(self, exc_tuple=None):
+ """
+ Return as a string (ending with a newline) the exception that
+ just occurred, without any traceback.
+ """
+ etype, value, tb = self._get_exc_info(exc_tuple)
+ msg = traceback.format_exception_only(etype, value)
+ return ''.join(msg)
+
+ def showtraceback(self, exc_tuple=None, filename=None, tb_offset=None,
+ exception_only=False, running_compiled_code=False):
+ """Display the exception that just occurred.
+
+ If nothing is known about the exception, this is the method which
+ should be used throughout the code for presenting user tracebacks,
+ rather than directly invoking the InteractiveTB object.
+
+ A specific showsyntaxerror() also exists, but this method can take
+ care of calling it if needed, so unless you are explicitly catching a
+ SyntaxError exception, don't try to analyze the stack manually and
+ simply call this method."""
+
+ try:
+ try:
+ etype, value, tb = self._get_exc_info(exc_tuple)
+ except ValueError:
+ print('No traceback available to show.', file=sys.stderr)
+ return
+
+ if issubclass(etype, SyntaxError):
+ # Though this won't be called by syntax errors in the input
+ # line, there may be SyntaxError cases with imported code.
+ self.showsyntaxerror(filename, running_compiled_code)
+ elif etype is UsageError:
+ self.show_usage_error(value)
+ else:
+ if exception_only:
+ stb = ['An exception has occurred, use %tb to see '
+ 'the full traceback.\n']
+ stb.extend(self.InteractiveTB.get_exception_only(etype,
+ value))
+ else:
+ try:
+ # Exception classes can customise their traceback - we
+ # use this in IPython.parallel for exceptions occurring
+ # in the engines. This should return a list of strings.
+ if hasattr(value, "_render_traceback_"):
+ stb = value._render_traceback_()
+ else:
+ stb = self.InteractiveTB.structured_traceback(
+ etype, value, tb, tb_offset=tb_offset
+ )
+
+ except Exception:
+ print(
+ "Unexpected exception formatting exception. Falling back to standard exception"
+ )
+ traceback.print_exc()
+ return None
+
+ self._showtraceback(etype, value, stb)
+ if self.call_pdb:
+ # drop into debugger
+ self.debugger(force=True)
+ return
+
+ # Actually show the traceback
+ self._showtraceback(etype, value, stb)
+
+ except KeyboardInterrupt:
+ print('\n' + self.get_exception_only(), file=sys.stderr)
+
+ def _showtraceback(self, etype, evalue, stb: str):
+ """Actually show a traceback.
+
+ Subclasses may override this method to put the traceback on a different
+ place, like a side channel.
+ """
+ val = self.InteractiveTB.stb2text(stb)
+ try:
+ print(val)
+ except UnicodeEncodeError:
+ print(val.encode("utf-8", "backslashreplace").decode())
+
+ def showsyntaxerror(self, filename=None, running_compiled_code=False):
+ """Display the syntax error that just occurred.
+
+ This doesn't display a stack trace because there isn't one.
+
+ If a filename is given, it is stuffed in the exception instead
+ of what was there before (because Python's parser always uses
+ "<string>" when reading from a string).
+
+ If the syntax error occurred when running a compiled code (i.e. running_compile_code=True),
+ longer stack trace will be displayed.
+ """
+ etype, value, last_traceback = self._get_exc_info()
+
+ if filename and issubclass(etype, SyntaxError):
+ try:
+ value.filename = filename
+ except:
+ # Not the format we expect; leave it alone
+ pass
+
+ # If the error occurred when executing compiled code, we should provide full stacktrace.
+ elist = traceback.extract_tb(last_traceback) if running_compiled_code else []
+ stb = self.SyntaxTB.structured_traceback(etype, value, elist)
+ self._showtraceback(etype, value, stb)
+
+ # This is overridden in TerminalInteractiveShell to show a message about
+ # the %paste magic.
+ def showindentationerror(self):
+ """Called by _run_cell when there's an IndentationError in code entered
+ at the prompt.
+
+ This is overridden in TerminalInteractiveShell to show a message about
+ the %paste magic."""
+ self.showsyntaxerror()
+
+ @skip_doctest
+ def set_next_input(self, s, replace=False):
+ """ Sets the 'default' input string for the next command line.
+
+ Example::
+
+ In [1]: _ip.set_next_input("Hello Word")
+ In [2]: Hello Word_ # cursor is here
+ """
+ self.rl_next_input = s
+
+ def _indent_current_str(self):
+ """return the current level of indentation as a string"""
+ return self.input_splitter.get_indent_spaces() * ' '
+
+ #-------------------------------------------------------------------------
+ # Things related to text completion
+ #-------------------------------------------------------------------------
+
+ def init_completer(self):
+ """Initialize the completion machinery.
+
+ This creates completion machinery that can be used by client code,
+ either interactively in-process (typically triggered by the readline
+ library), programmatically (such as in test suites) or out-of-process
+ (typically over the network by remote frontends).
+ """
+ from IPython.core.completer import IPCompleter
+ from IPython.core.completerlib import (
+ cd_completer,
+ magic_run_completer,
+ module_completer,
+ reset_completer,
+ )
+
+ self.Completer = IPCompleter(shell=self,
+ namespace=self.user_ns,
+ global_namespace=self.user_global_ns,
+ parent=self,
+ )
+ self.configurables.append(self.Completer)
+
+ # Add custom completers to the basic ones built into IPCompleter
+ sdisp = self.strdispatchers.get('complete_command', StrDispatch())
+ self.strdispatchers['complete_command'] = sdisp
+ self.Completer.custom_completers = sdisp
+
+ self.set_hook('complete_command', module_completer, str_key = 'import')
+ self.set_hook('complete_command', module_completer, str_key = 'from')
+ self.set_hook('complete_command', module_completer, str_key = '%aimport')
+ self.set_hook('complete_command', magic_run_completer, str_key = '%run')
+ self.set_hook('complete_command', cd_completer, str_key = '%cd')
+ self.set_hook('complete_command', reset_completer, str_key = '%reset')
+
+ @skip_doctest
+ def complete(self, text, line=None, cursor_pos=None):
+ """Return the completed text and a list of completions.
+
+ Parameters
+ ----------
+ text : string
+ A string of text to be completed on. It can be given as empty and
+ instead a line/position pair are given. In this case, the
+ completer itself will split the line like readline does.
+ line : string, optional
+ The complete line that text is part of.
+ cursor_pos : int, optional
+ The position of the cursor on the input line.
+
+ Returns
+ -------
+ text : string
+ The actual text that was completed.
+ matches : list
+ A sorted list with all possible completions.
+
+ Notes
+ -----
+ The optional arguments allow the completion to take more context into
+ account, and are part of the low-level completion API.
+
+ This is a wrapper around the completion mechanism, similar to what
+ readline does at the command line when the TAB key is hit. By
+ exposing it as a method, it can be used by other non-readline
+ environments (such as GUIs) for text completion.
+
+ Examples
+ --------
+ In [1]: x = 'hello'
+
+ In [2]: _ip.complete('x.l')
+ Out[2]: ('x.l', ['x.ljust', 'x.lower', 'x.lstrip'])
+ """
+
+ # Inject names into __builtin__ so we can complete on the added names.
+ with self.builtin_trap:
+ return self.Completer.complete(text, line, cursor_pos)
+
+ def set_custom_completer(self, completer, pos=0) -> None:
+ """Adds a new custom completer function.
+
+ The position argument (defaults to 0) is the index in the completers
+ list where you want the completer to be inserted.
+
+ `completer` should have the following signature::
+
+ def completion(self: Completer, text: string) -> List[str]:
+ raise NotImplementedError
+
+ It will be bound to the current Completer instance and pass some text
+ and return a list with current completions to suggest to the user.
+ """
+
+ newcomp = types.MethodType(completer, self.Completer)
+ self.Completer.custom_matchers.insert(pos,newcomp)
+
+ def set_completer_frame(self, frame=None):
+ """Set the frame of the completer."""
+ if frame:
+ self.Completer.namespace = frame.f_locals
+ self.Completer.global_namespace = frame.f_globals
+ else:
+ self.Completer.namespace = self.user_ns
+ self.Completer.global_namespace = self.user_global_ns
+
+ #-------------------------------------------------------------------------
+ # Things related to magics
+ #-------------------------------------------------------------------------
+
+ def init_magics(self):
+ from IPython.core import magics as m
+ self.magics_manager = magic.MagicsManager(shell=self,
+ parent=self,
+ user_magics=m.UserMagics(self))
+ self.configurables.append(self.magics_manager)
+
+ # Expose as public API from the magics manager
+ self.register_magics = self.magics_manager.register
+
+ self.register_magics(m.AutoMagics, m.BasicMagics, m.CodeMagics,
+ m.ConfigMagics, m.DisplayMagics, m.ExecutionMagics,
+ m.ExtensionMagics, m.HistoryMagics, m.LoggingMagics,
+ m.NamespaceMagics, m.OSMagics, m.PackagingMagics,
+ m.PylabMagics, m.ScriptMagics,
+ )
+ self.register_magics(m.AsyncMagics)
+
+ # Register Magic Aliases
+ mman = self.magics_manager
+ # FIXME: magic aliases should be defined by the Magics classes
+ # or in MagicsManager, not here
+ mman.register_alias('ed', 'edit')
+ mman.register_alias('hist', 'history')
+ mman.register_alias('rep', 'recall')
+ mman.register_alias('SVG', 'svg', 'cell')
+ mman.register_alias('HTML', 'html', 'cell')
+ mman.register_alias('file', 'writefile', 'cell')
+
+ # FIXME: Move the color initialization to the DisplayHook, which
+ # should be split into a prompt manager and displayhook. We probably
+ # even need a centralize colors management object.
+ self.run_line_magic('colors', self.colors)
+
+ # Defined here so that it's included in the documentation
+ @functools.wraps(magic.MagicsManager.register_function)
+ def register_magic_function(self, func, magic_kind='line', magic_name=None):
+ self.magics_manager.register_function(
+ func, magic_kind=magic_kind, magic_name=magic_name
+ )
+
+ def _find_with_lazy_load(self, /, type_, magic_name: str):
+ """
+ Try to find a magic potentially lazy-loading it.
+
+ Parameters
+ ----------
+
+ type_: "line"|"cell"
+ the type of magics we are trying to find/lazy load.
+ magic_name: str
+ The name of the magic we are trying to find/lazy load
+
+
+ Note that this may have any side effects
+ """
+ finder = {"line": self.find_line_magic, "cell": self.find_cell_magic}[type_]
+ fn = finder(magic_name)
+ if fn is not None:
+ return fn
+ lazy = self.magics_manager.lazy_magics.get(magic_name)
+ if lazy is None:
+ return None
+
+ self.run_line_magic("load_ext", lazy)
+ res = finder(magic_name)
+ return res
+
+ def run_line_magic(self, magic_name: str, line, _stack_depth=1):
+ """Execute the given line magic.
+
+ Parameters
+ ----------
+ magic_name : str
+ Name of the desired magic function, without '%' prefix.
+ line : str
+ The rest of the input line as a single string.
+ _stack_depth : int
+ If run_line_magic() is called from magic() then _stack_depth=2.
+ This is added to ensure backward compatibility for use of 'get_ipython().magic()'
+ """
+ fn = self._find_with_lazy_load("line", magic_name)
+ if fn is None:
+ lazy = self.magics_manager.lazy_magics.get(magic_name)
+ if lazy:
+ self.run_line_magic("load_ext", lazy)
+ fn = self.find_line_magic(magic_name)
+ if fn is None:
+ cm = self.find_cell_magic(magic_name)
+ etpl = "Line magic function `%%%s` not found%s."
+ extra = '' if cm is None else (' (But cell magic `%%%%%s` exists, '
+ 'did you mean that instead?)' % magic_name )
+ raise UsageError(etpl % (magic_name, extra))
+ else:
+ # Note: this is the distance in the stack to the user's frame.
+ # This will need to be updated if the internal calling logic gets
+ # refactored, or else we'll be expanding the wrong variables.
+
+ # Determine stack_depth depending on where run_line_magic() has been called
+ stack_depth = _stack_depth
+ if getattr(fn, magic.MAGIC_NO_VAR_EXPAND_ATTR, False):
+ # magic has opted out of var_expand
+ magic_arg_s = line
+ else:
+ magic_arg_s = self.var_expand(line, stack_depth)
+ # Put magic args in a list so we can call with f(*a) syntax
+ args = [magic_arg_s]
+ kwargs = {}
+ # Grab local namespace if we need it:
+ if getattr(fn, "needs_local_scope", False):
+ kwargs['local_ns'] = self.get_local_scope(stack_depth)
+ with self.builtin_trap:
+ result = fn(*args, **kwargs)
+
+ # The code below prevents the output from being displayed
+ # when using magics with decodator @output_can_be_silenced
+ # when the last Python token in the expression is a ';'.
+ if getattr(fn, magic.MAGIC_OUTPUT_CAN_BE_SILENCED, False):
+ if DisplayHook.semicolon_at_end_of_expression(magic_arg_s):
+ return None
+
+ return result
+
+ def get_local_scope(self, stack_depth):
+ """Get local scope at given stack depth.
+
+ Parameters
+ ----------
+ stack_depth : int
+ Depth relative to calling frame
+ """
+ return sys._getframe(stack_depth + 1).f_locals
+
+ def run_cell_magic(self, magic_name, line, cell):
+ """Execute the given cell magic.
+
+ Parameters
+ ----------
+ magic_name : str
+ Name of the desired magic function, without '%' prefix.
+ line : str
+ The rest of the first input line as a single string.
+ cell : str
+ The body of the cell as a (possibly multiline) string.
+ """
+ fn = self._find_with_lazy_load("cell", magic_name)
+ if fn is None:
+ lm = self.find_line_magic(magic_name)
+ etpl = "Cell magic `%%{0}` not found{1}."
+ extra = '' if lm is None else (' (But line magic `%{0}` exists, '
+ 'did you mean that instead?)'.format(magic_name))
+ raise UsageError(etpl.format(magic_name, extra))
+ elif cell == '':
+ message = '%%{0} is a cell magic, but the cell body is empty.'.format(magic_name)
+ if self.find_line_magic(magic_name) is not None:
+ message += ' Did you mean the line magic %{0} (single %)?'.format(magic_name)
+ raise UsageError(message)
+ else:
+ # Note: this is the distance in the stack to the user's frame.
+ # This will need to be updated if the internal calling logic gets
+ # refactored, or else we'll be expanding the wrong variables.
+ stack_depth = 2
+ if getattr(fn, magic.MAGIC_NO_VAR_EXPAND_ATTR, False):
+ # magic has opted out of var_expand
+ magic_arg_s = line
+ else:
+ magic_arg_s = self.var_expand(line, stack_depth)
+ kwargs = {}
+ if getattr(fn, "needs_local_scope", False):
+ kwargs['local_ns'] = self.user_ns
+
+ with self.builtin_trap:
+ args = (magic_arg_s, cell)
+ result = fn(*args, **kwargs)
+
+ # The code below prevents the output from being displayed
+ # when using magics with decodator @output_can_be_silenced
+ # when the last Python token in the expression is a ';'.
+ if getattr(fn, magic.MAGIC_OUTPUT_CAN_BE_SILENCED, False):
+ if DisplayHook.semicolon_at_end_of_expression(cell):
+ return None
+
+ return result
+
+ def find_line_magic(self, magic_name):
+ """Find and return a line magic by name.
+
+ Returns None if the magic isn't found."""
+ return self.magics_manager.magics['line'].get(magic_name)
+
+ def find_cell_magic(self, magic_name):
+ """Find and return a cell magic by name.
+
+ Returns None if the magic isn't found."""
+ return self.magics_manager.magics['cell'].get(magic_name)
+
+ def find_magic(self, magic_name, magic_kind='line'):
+ """Find and return a magic of the given type by name.
+
+ Returns None if the magic isn't found."""
+ return self.magics_manager.magics[magic_kind].get(magic_name)
+
+ def magic(self, arg_s):
+ """
+ DEPRECATED
+
+ Deprecated since IPython 0.13 (warning added in
+ 8.1), use run_line_magic(magic_name, parameter_s).
+
+ Call a magic function by name.
+
+ Input: a string containing the name of the magic function to call and
+ any additional arguments to be passed to the magic.
+
+ magic('name -opt foo bar') is equivalent to typing at the ipython
+ prompt:
+
+ In[1]: %name -opt foo bar
+
+ To call a magic without arguments, simply use magic('name').
+
+ This provides a proper Python function to call IPython's magics in any
+ valid Python code you can type at the interpreter, including loops and
+ compound statements.
+ """
+ warnings.warn(
+ "`magic(...)` is deprecated since IPython 0.13 (warning added in "
+ "8.1), use run_line_magic(magic_name, parameter_s).",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ # TODO: should we issue a loud deprecation warning here?
+ magic_name, _, magic_arg_s = arg_s.partition(' ')
+ magic_name = magic_name.lstrip(prefilter.ESC_MAGIC)
+ return self.run_line_magic(magic_name, magic_arg_s, _stack_depth=2)
+
+ #-------------------------------------------------------------------------
+ # Things related to macros
+ #-------------------------------------------------------------------------
+
+ def define_macro(self, name, themacro):
+ """Define a new macro
+
+ Parameters
+ ----------
+ name : str
+ The name of the macro.
+ themacro : str or Macro
+ The action to do upon invoking the macro. If a string, a new
+ Macro object is created by passing the string to it.
+ """
+
+ from IPython.core import macro
+
+ if isinstance(themacro, str):
+ themacro = macro.Macro(themacro)
+ if not isinstance(themacro, macro.Macro):
+ raise ValueError('A macro must be a string or a Macro instance.')
+ self.user_ns[name] = themacro
+
+ #-------------------------------------------------------------------------
+ # Things related to the running of system commands
+ #-------------------------------------------------------------------------
+
+ def system_piped(self, cmd):
+ """Call the given cmd in a subprocess, piping stdout/err
+
+ Parameters
+ ----------
+ cmd : str
+ Command to execute (can not end in '&', as background processes are
+ not supported. Should not be a command that expects input
+ other than simple text.
+ """
+ if cmd.rstrip().endswith('&'):
+ # this is *far* from a rigorous test
+ # We do not support backgrounding processes because we either use
+ # pexpect or pipes to read from. Users can always just call
+ # os.system() or use ip.system=ip.system_raw
+ # if they really want a background process.
+ raise OSError("Background processes not supported.")
+
+ # we explicitly do NOT return the subprocess status code, because
+ # a non-None value would trigger :func:`sys.displayhook` calls.
+ # Instead, we store the exit_code in user_ns.
+ self.user_ns['_exit_code'] = system(self.var_expand(cmd, depth=1))
+
+ def system_raw(self, cmd):
+ """Call the given cmd in a subprocess using os.system on Windows or
+ subprocess.call using the system shell on other platforms.
+
+ Parameters
+ ----------
+ cmd : str
+ Command to execute.
+ """
+ cmd = self.var_expand(cmd, depth=1)
+ # warn if there is an IPython magic alternative.
+ main_cmd = cmd.split()[0]
+ has_magic_alternatives = ("pip", "conda", "cd")
+
+ if main_cmd in has_magic_alternatives:
+ warnings.warn(
+ (
+ "You executed the system command !{0} which may not work "
+ "as expected. Try the IPython magic %{0} instead."
+ ).format(main_cmd)
+ )
+
+ # protect os.system from UNC paths on Windows, which it can't handle:
+ if sys.platform == 'win32':
+ from IPython.utils._process_win32 import AvoidUNCPath
+ with AvoidUNCPath() as path:
+ if path is not None:
+ cmd = '"pushd %s &&"%s' % (path, cmd)
+ try:
+ ec = os.system(cmd)
+ except KeyboardInterrupt:
+ print('\n' + self.get_exception_only(), file=sys.stderr)
+ ec = -2
+ else:
+ # For posix the result of the subprocess.call() below is an exit
+ # code, which by convention is zero for success, positive for
+ # program failure. Exit codes above 128 are reserved for signals,
+ # and the formula for converting a signal to an exit code is usually
+ # signal_number+128. To more easily differentiate between exit
+ # codes and signals, ipython uses negative numbers. For instance
+ # since control-c is signal 2 but exit code 130, ipython's
+ # _exit_code variable will read -2. Note that some shells like
+ # csh and fish don't follow sh/bash conventions for exit codes.
+ executable = os.environ.get('SHELL', None)
+ try:
+ # Use env shell instead of default /bin/sh
+ ec = subprocess.call(cmd, shell=True, executable=executable)
+ except KeyboardInterrupt:
+ # intercept control-C; a long traceback is not useful here
+ print('\n' + self.get_exception_only(), file=sys.stderr)
+ ec = 130
+ if ec > 128:
+ ec = -(ec - 128)
+
+ # We explicitly do NOT return the subprocess status code, because
+ # a non-None value would trigger :func:`sys.displayhook` calls.
+ # Instead, we store the exit_code in user_ns. Note the semantics
+ # of _exit_code: for control-c, _exit_code == -signal.SIGNIT,
+ # but raising SystemExit(_exit_code) will give status 254!
+ self.user_ns['_exit_code'] = ec
+
+ # use piped system by default, because it is better behaved
+ system = system_piped
+
+ def getoutput(self, cmd, split=True, depth=0):
+ """Get output (possibly including stderr) from a subprocess.
+
+ Parameters
+ ----------
+ cmd : str
+ Command to execute (can not end in '&', as background processes are
+ not supported.
+ split : bool, optional
+ If True, split the output into an IPython SList. Otherwise, an
+ IPython LSString is returned. These are objects similar to normal
+ lists and strings, with a few convenience attributes for easier
+ manipulation of line-based output. You can use '?' on them for
+ details.
+ depth : int, optional
+ How many frames above the caller are the local variables which should
+ be expanded in the command string? The default (0) assumes that the
+ expansion variables are in the stack frame calling this function.
+ """
+ if cmd.rstrip().endswith('&'):
+ # this is *far* from a rigorous test
+ raise OSError("Background processes not supported.")
+ out = getoutput(self.var_expand(cmd, depth=depth+1))
+ if split:
+ out = SList(out.splitlines())
+ else:
+ out = LSString(out)
+ return out
+
+ #-------------------------------------------------------------------------
+ # Things related to aliases
+ #-------------------------------------------------------------------------
+
+ def init_alias(self):
+ self.alias_manager = AliasManager(shell=self, parent=self)
+ self.configurables.append(self.alias_manager)
+
+ #-------------------------------------------------------------------------
+ # Things related to extensions
+ #-------------------------------------------------------------------------
+
+ def init_extension_manager(self):
+ self.extension_manager = ExtensionManager(shell=self, parent=self)
+ self.configurables.append(self.extension_manager)
+
+ #-------------------------------------------------------------------------
+ # Things related to payloads
+ #-------------------------------------------------------------------------
+
+ def init_payload(self):
+ self.payload_manager = PayloadManager(parent=self)
+ self.configurables.append(self.payload_manager)
+
+ #-------------------------------------------------------------------------
+ # Things related to the prefilter
+ #-------------------------------------------------------------------------
+
+ def init_prefilter(self):
+ self.prefilter_manager = PrefilterManager(shell=self, parent=self)
+ self.configurables.append(self.prefilter_manager)
+ # Ultimately this will be refactored in the new interpreter code, but
+ # for now, we should expose the main prefilter method (there's legacy
+ # code out there that may rely on this).
+ self.prefilter = self.prefilter_manager.prefilter_lines
+
+ def auto_rewrite_input(self, cmd):
+ """Print to the screen the rewritten form of the user's command.
+
+ This shows visual feedback by rewriting input lines that cause
+ automatic calling to kick in, like::
+
+ /f x
+
+ into::
+
+ ------> f(x)
+
+ after the user's input prompt. This helps the user understand that the
+ input line was transformed automatically by IPython.
+ """
+ if not self.show_rewritten_input:
+ return
+
+ # This is overridden in TerminalInteractiveShell to use fancy prompts
+ print("------> " + cmd)
+
+ #-------------------------------------------------------------------------
+ # Things related to extracting values/expressions from kernel and user_ns
+ #-------------------------------------------------------------------------
+
+ def _user_obj_error(self):
+ """return simple exception dict
+
+ for use in user_expressions
+ """
+
+ etype, evalue, tb = self._get_exc_info()
+ stb = self.InteractiveTB.get_exception_only(etype, evalue)
+
+ exc_info = {
+ "status": "error",
+ "traceback": stb,
+ "ename": etype.__name__,
+ "evalue": py3compat.safe_unicode(evalue),
+ }
+
+ return exc_info
+
+ def _format_user_obj(self, obj):
+ """format a user object to display dict
+
+ for use in user_expressions
+ """
+
+ data, md = self.display_formatter.format(obj)
+ value = {
+ 'status' : 'ok',
+ 'data' : data,
+ 'metadata' : md,
+ }
+ return value
+
+ def user_expressions(self, expressions):
+ """Evaluate a dict of expressions in the user's namespace.
+
+ Parameters
+ ----------
+ expressions : dict
+ A dict with string keys and string values. The expression values
+ should be valid Python expressions, each of which will be evaluated
+ in the user namespace.
+
+ Returns
+ -------
+ A dict, keyed like the input expressions dict, with the rich mime-typed
+ display_data of each value.
+ """
+ out = {}
+ user_ns = self.user_ns
+ global_ns = self.user_global_ns
+
+ for key, expr in expressions.items():
+ try:
+ value = self._format_user_obj(eval(expr, global_ns, user_ns))
+ except:
+ value = self._user_obj_error()
+ out[key] = value
+ return out
+
+ #-------------------------------------------------------------------------
+ # Things related to the running of code
+ #-------------------------------------------------------------------------
+
+ def ex(self, cmd):
+ """Execute a normal python statement in user namespace."""
+ with self.builtin_trap:
+ exec(cmd, self.user_global_ns, self.user_ns)
+
+ def ev(self, expr):
+ """Evaluate python expression expr in user namespace.
+
+ Returns the result of evaluation
+ """
+ with self.builtin_trap:
+ return eval(expr, self.user_global_ns, self.user_ns)
+
+ def safe_execfile(self, fname, *where, exit_ignore=False, raise_exceptions=False, shell_futures=False):
+ """A safe version of the builtin execfile().
+
+ This version will never throw an exception, but instead print
+ helpful error messages to the screen. This only works on pure
+ Python files with the .py extension.
+
+ Parameters
+ ----------
+ fname : string
+ The name of the file to be executed.
+ *where : tuple
+ One or two namespaces, passed to execfile() as (globals,locals).
+ If only one is given, it is passed as both.
+ exit_ignore : bool (False)
+ If True, then silence SystemExit for non-zero status (it is always
+ silenced for zero status, as it is so common).
+ raise_exceptions : bool (False)
+ If True raise exceptions everywhere. Meant for testing.
+ shell_futures : bool (False)
+ If True, the code will share future statements with the interactive
+ shell. It will both be affected by previous __future__ imports, and
+ any __future__ imports in the code will affect the shell. If False,
+ __future__ imports are not shared in either direction.
+
+ """
+ fname = Path(fname).expanduser().resolve()
+
+ # Make sure we can open the file
+ try:
+ with fname.open("rb"):
+ pass
+ except:
+ warn('Could not open file <%s> for safe execution.' % fname)
+ return
+
+ # Find things also in current directory. This is needed to mimic the
+ # behavior of running a script from the system command line, where
+ # Python inserts the script's directory into sys.path
+ dname = str(fname.parent)
+
+ with prepended_to_syspath(dname), self.builtin_trap:
+ try:
+ glob, loc = (where + (None, ))[:2]
+ py3compat.execfile(
+ fname, glob, loc,
+ self.compile if shell_futures else None)
+ except SystemExit as status:
+ # If the call was made with 0 or None exit status (sys.exit(0)
+ # or sys.exit() ), don't bother showing a traceback, as both of
+ # these are considered normal by the OS:
+ # > python -c'import sys;sys.exit(0)'; echo $?
+ # 0
+ # > python -c'import sys;sys.exit()'; echo $?
+ # 0
+ # For other exit status, we show the exception unless
+ # explicitly silenced, but only in short form.
+ if status.code:
+ if raise_exceptions:
+ raise
+ if not exit_ignore:
+ self.showtraceback(exception_only=True)
+ except:
+ if raise_exceptions:
+ raise
+ # tb offset is 2 because we wrap execfile
+ self.showtraceback(tb_offset=2)
+
+ def safe_execfile_ipy(self, fname, shell_futures=False, raise_exceptions=False):
+ """Like safe_execfile, but for .ipy or .ipynb files with IPython syntax.
+
+ Parameters
+ ----------
+ fname : str
+ The name of the file to execute. The filename must have a
+ .ipy or .ipynb extension.
+ shell_futures : bool (False)
+ If True, the code will share future statements with the interactive
+ shell. It will both be affected by previous __future__ imports, and
+ any __future__ imports in the code will affect the shell. If False,
+ __future__ imports are not shared in either direction.
+ raise_exceptions : bool (False)
+ If True raise exceptions everywhere. Meant for testing.
+ """
+ fname = Path(fname).expanduser().resolve()
+
+ # Make sure we can open the file
+ try:
+ with fname.open("rb"):
+ pass
+ except:
+ warn('Could not open file <%s> for safe execution.' % fname)
+ return
+
+ # Find things also in current directory. This is needed to mimic the
+ # behavior of running a script from the system command line, where
+ # Python inserts the script's directory into sys.path
+ dname = str(fname.parent)
+
+ def get_cells():
+ """generator for sequence of code blocks to run"""
+ if fname.suffix == ".ipynb":
+ from nbformat import read
+ nb = read(fname, as_version=4)
+ if not nb.cells:
+ return
+ for cell in nb.cells:
+ if cell.cell_type == 'code':
+ yield cell.source
+ else:
+ yield fname.read_text(encoding="utf-8")
+
+ with prepended_to_syspath(dname):
+ try:
+ for cell in get_cells():
+ result = self.run_cell(cell, silent=True, shell_futures=shell_futures)
+ if raise_exceptions:
+ result.raise_error()
+ elif not result.success:
+ break
+ except:
+ if raise_exceptions:
+ raise
+ self.showtraceback()
+ warn('Unknown failure executing file: <%s>' % fname)
+
+ def safe_run_module(self, mod_name, where):
+ """A safe version of runpy.run_module().
+
+ This version will never throw an exception, but instead print
+ helpful error messages to the screen.
+
+ `SystemExit` exceptions with status code 0 or None are ignored.
+
+ Parameters
+ ----------
+ mod_name : string
+ The name of the module to be executed.
+ where : dict
+ The globals namespace.
+ """
+ try:
+ try:
+ where.update(
+ runpy.run_module(str(mod_name), run_name="__main__",
+ alter_sys=True)
+ )
+ except SystemExit as status:
+ if status.code:
+ raise
+ except:
+ self.showtraceback()
+ warn('Unknown failure executing module: <%s>' % mod_name)
+
+ def run_cell(
+ self,
+ raw_cell,
+ store_history=False,
+ silent=False,
+ shell_futures=True,
+ cell_id=None,
+ ):
+ """Run a complete IPython cell.
+
+ Parameters
+ ----------
+ raw_cell : str
+ The code (including IPython code such as %magic functions) to run.
+ store_history : bool
+ If True, the raw and translated cell will be stored in IPython's
+ history. For user code calling back into IPython's machinery, this
+ should be set to False.
+ silent : bool
+ If True, avoid side-effects, such as implicit displayhooks and
+ and logging. silent=True forces store_history=False.
+ shell_futures : bool
+ If True, the code will share future statements with the interactive
+ shell. It will both be affected by previous __future__ imports, and
+ any __future__ imports in the code will affect the shell. If False,
+ __future__ imports are not shared in either direction.
+
+ Returns
+ -------
+ result : :class:`ExecutionResult`
+ """
+ result = None
+ try:
+ result = self._run_cell(
+ raw_cell, store_history, silent, shell_futures, cell_id
+ )
+ finally:
+ self.events.trigger('post_execute')
+ if not silent:
+ self.events.trigger('post_run_cell', result)
+ return result
+
+ def _run_cell(
+ self,
+ raw_cell: str,
+ store_history: bool,
+ silent: bool,
+ shell_futures: bool,
+ cell_id: str,
+ ) -> ExecutionResult:
+ """Internal method to run a complete IPython cell."""
+
+ # we need to avoid calling self.transform_cell multiple time on the same thing
+ # so we need to store some results:
+ preprocessing_exc_tuple = None
+ try:
+ transformed_cell = self.transform_cell(raw_cell)
+ except Exception:
+ transformed_cell = raw_cell
+ preprocessing_exc_tuple = sys.exc_info()
+
+ assert transformed_cell is not None
+ coro = self.run_cell_async(
+ raw_cell,
+ store_history=store_history,
+ silent=silent,
+ shell_futures=shell_futures,
+ transformed_cell=transformed_cell,
+ preprocessing_exc_tuple=preprocessing_exc_tuple,
+ cell_id=cell_id,
+ )
+
+ # run_cell_async is async, but may not actually need an eventloop.
+ # when this is the case, we want to run it using the pseudo_sync_runner
+ # so that code can invoke eventloops (for example via the %run , and
+ # `%paste` magic.
+ if self.trio_runner:
+ runner = self.trio_runner
+ elif self.should_run_async(
+ raw_cell,
+ transformed_cell=transformed_cell,
+ preprocessing_exc_tuple=preprocessing_exc_tuple,
+ ):
+ runner = self.loop_runner
+ else:
+ runner = _pseudo_sync_runner
+
+ try:
+ result = runner(coro)
+ except BaseException as e:
+ info = ExecutionInfo(
+ raw_cell, store_history, silent, shell_futures, cell_id
+ )
+ result = ExecutionResult(info)
+ result.error_in_exec = e
+ self.showtraceback(running_compiled_code=True)
+ finally:
+ return result
+
+ def should_run_async(
+ self, raw_cell: str, *, transformed_cell=None, preprocessing_exc_tuple=None
+ ) -> bool:
+ """Return whether a cell should be run asynchronously via a coroutine runner
+
+ Parameters
+ ----------
+ raw_cell : str
+ The code to be executed
+
+ Returns
+ -------
+ result: bool
+ Whether the code needs to be run with a coroutine runner or not
+ .. versionadded:: 7.0
+ """
+ if not self.autoawait:
+ return False
+ if preprocessing_exc_tuple is not None:
+ return False
+ assert preprocessing_exc_tuple is None
+ if transformed_cell is None:
+ warnings.warn(
+ "`should_run_async` will not call `transform_cell`"
+ " automatically in the future. Please pass the result to"
+ " `transformed_cell` argument and any exception that happen"
+ " during the"
+ "transform in `preprocessing_exc_tuple` in"
+ " IPython 7.17 and above.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ try:
+ cell = self.transform_cell(raw_cell)
+ except Exception:
+ # any exception during transform will be raised
+ # prior to execution
+ return False
+ else:
+ cell = transformed_cell
+ return _should_be_async(cell)
+
+ async def run_cell_async(
+ self,
+ raw_cell: str,
+ store_history=False,
+ silent=False,
+ shell_futures=True,
+ *,
+ transformed_cell: Optional[str] = None,
+ preprocessing_exc_tuple: Optional[AnyType] = None,
+ cell_id=None,
+ ) -> ExecutionResult:
+ """Run a complete IPython cell asynchronously.
+
+ Parameters
+ ----------
+ raw_cell : str
+ The code (including IPython code such as %magic functions) to run.
+ store_history : bool
+ If True, the raw and translated cell will be stored in IPython's
+ history. For user code calling back into IPython's machinery, this
+ should be set to False.
+ silent : bool
+ If True, avoid side-effects, such as implicit displayhooks and
+ and logging. silent=True forces store_history=False.
+ shell_futures : bool
+ If True, the code will share future statements with the interactive
+ shell. It will both be affected by previous __future__ imports, and
+ any __future__ imports in the code will affect the shell. If False,
+ __future__ imports are not shared in either direction.
+ transformed_cell: str
+ cell that was passed through transformers
+ preprocessing_exc_tuple:
+ trace if the transformation failed.
+
+ Returns
+ -------
+ result : :class:`ExecutionResult`
+
+ .. versionadded:: 7.0
+ """
+ info = ExecutionInfo(raw_cell, store_history, silent, shell_futures, cell_id)
+ result = ExecutionResult(info)
+
+ if (not raw_cell) or raw_cell.isspace():
+ self.last_execution_succeeded = True
+ self.last_execution_result = result
+ return result
+
+ if silent:
+ store_history = False
+
+ if store_history:
+ result.execution_count = self.execution_count
+
+ def error_before_exec(value):
+ if store_history:
+ self.execution_count += 1
+ result.error_before_exec = value
+ self.last_execution_succeeded = False
+ self.last_execution_result = result
+ return result
+
+ self.events.trigger('pre_execute')
+ if not silent:
+ self.events.trigger('pre_run_cell', info)
+
+ if transformed_cell is None:
+ warnings.warn(
+ "`run_cell_async` will not call `transform_cell`"
+ " automatically in the future. Please pass the result to"
+ " `transformed_cell` argument and any exception that happen"
+ " during the"
+ "transform in `preprocessing_exc_tuple` in"
+ " IPython 7.17 and above.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ # If any of our input transformation (input_transformer_manager or
+ # prefilter_manager) raises an exception, we store it in this variable
+ # so that we can display the error after logging the input and storing
+ # it in the history.
+ try:
+ cell = self.transform_cell(raw_cell)
+ except Exception:
+ preprocessing_exc_tuple = sys.exc_info()
+ cell = raw_cell # cell has to exist so it can be stored/logged
+ else:
+ preprocessing_exc_tuple = None
+ else:
+ if preprocessing_exc_tuple is None:
+ cell = transformed_cell
+ else:
+ cell = raw_cell
+
+ # Do NOT store paste/cpaste magic history
+ if "get_ipython().run_line_magic(" in cell and "paste" in cell:
+ store_history = False
+
+ # Store raw and processed history
+ if store_history:
+ self.history_manager.store_inputs(self.execution_count, cell, raw_cell)
+ if not silent:
+ self.logger.log(cell, raw_cell)
+
+ # Display the exception if input processing failed.
+ if preprocessing_exc_tuple is not None:
+ self.showtraceback(preprocessing_exc_tuple)
+ if store_history:
+ self.execution_count += 1
+ return error_before_exec(preprocessing_exc_tuple[1])
+
+ # Our own compiler remembers the __future__ environment. If we want to
+ # run code with a separate __future__ environment, use the default
+ # compiler
+ compiler = self.compile if shell_futures else self.compiler_class()
+
+ _run_async = False
+
+ with self.builtin_trap:
+ cell_name = compiler.cache(cell, self.execution_count, raw_code=raw_cell)
+
+ with self.display_trap:
+ # Compile to bytecode
+ try:
+ code_ast = compiler.ast_parse(cell, filename=cell_name)
+ except self.custom_exceptions as e:
+ etype, value, tb = sys.exc_info()
+ self.CustomTB(etype, value, tb)
+ return error_before_exec(e)
+ except IndentationError as e:
+ self.showindentationerror()
+ return error_before_exec(e)
+ except (OverflowError, SyntaxError, ValueError, TypeError,
+ MemoryError) as e:
+ self.showsyntaxerror()
+ return error_before_exec(e)
+
+ # Apply AST transformations
+ try:
+ code_ast = self.transform_ast(code_ast)
+ except InputRejected as e:
+ self.showtraceback()
+ return error_before_exec(e)
+
+ # Give the displayhook a reference to our ExecutionResult so it
+ # can fill in the output value.
+ self.displayhook.exec_result = result
+
+ # Execute the user code
+ interactivity = "none" if silent else self.ast_node_interactivity
+
+
+ has_raised = await self.run_ast_nodes(code_ast.body, cell_name,
+ interactivity=interactivity, compiler=compiler, result=result)
+
+ self.last_execution_succeeded = not has_raised
+ self.last_execution_result = result
+
+ # Reset this so later displayed values do not modify the
+ # ExecutionResult
+ self.displayhook.exec_result = None
+
+ if store_history:
+ # Write output to the database. Does nothing unless
+ # history output logging is enabled.
+ self.history_manager.store_output(self.execution_count)
+ # Each cell is a *single* input, regardless of how many lines it has
+ self.execution_count += 1
+
+ return result
+
+ def transform_cell(self, raw_cell):
+ """Transform an input cell before parsing it.
+
+ Static transformations, implemented in IPython.core.inputtransformer2,
+ deal with things like ``%magic`` and ``!system`` commands.
+ These run on all input.
+ Dynamic transformations, for things like unescaped magics and the exit
+ autocall, depend on the state of the interpreter.
+ These only apply to single line inputs.
+
+ These string-based transformations are followed by AST transformations;
+ see :meth:`transform_ast`.
+ """
+ # Static input transformations
+ cell = self.input_transformer_manager.transform_cell(raw_cell)
+
+ if len(cell.splitlines()) == 1:
+ # Dynamic transformations - only applied for single line commands
+ with self.builtin_trap:
+ # use prefilter_lines to handle trailing newlines
+ # restore trailing newline for ast.parse
+ cell = self.prefilter_manager.prefilter_lines(cell) + '\n'
+
+ lines = cell.splitlines(keepends=True)
+ for transform in self.input_transformers_post:
+ lines = transform(lines)
+ cell = ''.join(lines)
+
+ return cell
+
+ def transform_ast(self, node):
+ """Apply the AST transformations from self.ast_transformers
+
+ Parameters
+ ----------
+ node : ast.Node
+ The root node to be transformed. Typically called with the ast.Module
+ produced by parsing user input.
+
+ Returns
+ -------
+ An ast.Node corresponding to the node it was called with. Note that it
+ may also modify the passed object, so don't rely on references to the
+ original AST.
+ """
+ for transformer in self.ast_transformers:
+ try:
+ node = transformer.visit(node)
+ except InputRejected:
+ # User-supplied AST transformers can reject an input by raising
+ # an InputRejected. Short-circuit in this case so that we
+ # don't unregister the transform.
+ raise
+ except Exception:
+ warn("AST transformer %r threw an error. It will be unregistered." % transformer)
+ self.ast_transformers.remove(transformer)
+
+ if self.ast_transformers:
+ ast.fix_missing_locations(node)
+ return node
+
+ async def run_ast_nodes(
+ self,
+ nodelist: ListType[stmt],
+ cell_name: str,
+ interactivity="last_expr",
+ compiler=compile,
+ result=None,
+ ):
+ """Run a sequence of AST nodes. The execution mode depends on the
+ interactivity parameter.
+
+ Parameters
+ ----------
+ nodelist : list
+ A sequence of AST nodes to run.
+ cell_name : str
+ Will be passed to the compiler as the filename of the cell. Typically
+ the value returned by ip.compile.cache(cell).
+ interactivity : str
+ 'all', 'last', 'last_expr' , 'last_expr_or_assign' or 'none',
+ specifying which nodes should be run interactively (displaying output
+ from expressions). 'last_expr' will run the last node interactively
+ only if it is an expression (i.e. expressions in loops or other blocks
+ are not displayed) 'last_expr_or_assign' will run the last expression
+ or the last assignment. Other values for this parameter will raise a
+ ValueError.
+
+ compiler : callable
+ A function with the same interface as the built-in compile(), to turn
+ the AST nodes into code objects. Default is the built-in compile().
+ result : ExecutionResult, optional
+ An object to store exceptions that occur during execution.
+
+ Returns
+ -------
+ True if an exception occurred while running code, False if it finished
+ running.
+ """
+ if not nodelist:
+ return
+
+
+ if interactivity == 'last_expr_or_assign':
+ if isinstance(nodelist[-1], _assign_nodes):
+ asg = nodelist[-1]
+ if isinstance(asg, ast.Assign) and len(asg.targets) == 1:
+ target = asg.targets[0]
+ elif isinstance(asg, _single_targets_nodes):
+ target = asg.target
+ else:
+ target = None
+ if isinstance(target, ast.Name):
+ nnode = ast.Expr(ast.Name(target.id, ast.Load()))
+ ast.fix_missing_locations(nnode)
+ nodelist.append(nnode)
+ interactivity = 'last_expr'
+
+ _async = False
+ if interactivity == 'last_expr':
+ if isinstance(nodelist[-1], ast.Expr):
+ interactivity = "last"
+ else:
+ interactivity = "none"
+
+ if interactivity == 'none':
+ to_run_exec, to_run_interactive = nodelist, []
+ elif interactivity == 'last':
+ to_run_exec, to_run_interactive = nodelist[:-1], nodelist[-1:]
+ elif interactivity == 'all':
+ to_run_exec, to_run_interactive = [], nodelist
+ else:
+ raise ValueError("Interactivity was %r" % interactivity)
+
+ try:
+
+ def compare(code):
+ is_async = inspect.CO_COROUTINE & code.co_flags == inspect.CO_COROUTINE
+ return is_async
+
+ # refactor that to just change the mod constructor.
+ to_run = []
+ for node in to_run_exec:
+ to_run.append((node, "exec"))
+
+ for node in to_run_interactive:
+ to_run.append((node, "single"))
+
+ for node, mode in to_run:
+ if mode == "exec":
+ mod = Module([node], [])
+ elif mode == "single":
+ mod = ast.Interactive([node]) # type: ignore
+ with compiler.extra_flags(
+ getattr(ast, "PyCF_ALLOW_TOP_LEVEL_AWAIT", 0x0)
+ if self.autoawait
+ else 0x0
+ ):
+ code = compiler(mod, cell_name, mode)
+ asy = compare(code)
+ if await self.run_code(code, result, async_=asy):
+ return True
+
+ # Flush softspace
+ if softspace(sys.stdout, 0):
+ print()
+
+ except:
+ # It's possible to have exceptions raised here, typically by
+ # compilation of odd code (such as a naked 'return' outside a
+ # function) that did parse but isn't valid. Typically the exception
+ # is a SyntaxError, but it's safest just to catch anything and show
+ # the user a traceback.
+
+ # We do only one try/except outside the loop to minimize the impact
+ # on runtime, and also because if any node in the node list is
+ # broken, we should stop execution completely.
+ if result:
+ result.error_before_exec = sys.exc_info()[1]
+ self.showtraceback()
+ return True
+
+ return False
+
+ async def run_code(self, code_obj, result=None, *, async_=False):
+ """Execute a code object.
+
+ When an exception occurs, self.showtraceback() is called to display a
+ traceback.
+
+ Parameters
+ ----------
+ code_obj : code object
+ A compiled code object, to be executed
+ result : ExecutionResult, optional
+ An object to store exceptions that occur during execution.
+ async_ : Bool (Experimental)
+ Attempt to run top-level asynchronous code in a default loop.
+
+ Returns
+ -------
+ False : successful execution.
+ True : an error occurred.
+ """
+ # special value to say that anything above is IPython and should be
+ # hidden.
+ __tracebackhide__ = "__ipython_bottom__"
+ # Set our own excepthook in case the user code tries to call it
+ # directly, so that the IPython crash handler doesn't get triggered
+ old_excepthook, sys.excepthook = sys.excepthook, self.excepthook
+
+ # we save the original sys.excepthook in the instance, in case config
+ # code (such as magics) needs access to it.
+ self.sys_excepthook = old_excepthook
+ outflag = True # happens in more places, so it's easier as default
+ try:
+ try:
+ if async_:
+ await eval(code_obj, self.user_global_ns, self.user_ns)
+ else:
+ exec(code_obj, self.user_global_ns, self.user_ns)
+ finally:
+ # Reset our crash handler in place
+ sys.excepthook = old_excepthook
+ except SystemExit as e:
+ if result is not None:
+ result.error_in_exec = e
+ self.showtraceback(exception_only=True)
+ warn("To exit: use 'exit', 'quit', or Ctrl-D.", stacklevel=1)
+ except bdb.BdbQuit:
+ etype, value, tb = sys.exc_info()
+ if result is not None:
+ result.error_in_exec = value
+ # the BdbQuit stops here
+ except self.custom_exceptions:
+ etype, value, tb = sys.exc_info()
+ if result is not None:
+ result.error_in_exec = value
+ self.CustomTB(etype, value, tb)
+ except:
+ if result is not None:
+ result.error_in_exec = sys.exc_info()[1]
+ self.showtraceback(running_compiled_code=True)
+ else:
+ outflag = False
+ return outflag
+
+ # For backwards compatibility
+ runcode = run_code
+
+ def check_complete(self, code: str) -> Tuple[str, str]:
+ """Return whether a block of code is ready to execute, or should be continued
+
+ Parameters
+ ----------
+ code : string
+ Python input code, which can be multiline.
+
+ Returns
+ -------
+ status : str
+ One of 'complete', 'incomplete', or 'invalid' if source is not a
+ prefix of valid code.
+ indent : str
+ When status is 'incomplete', this is some whitespace to insert on
+ the next line of the prompt.
+ """
+ status, nspaces = self.input_transformer_manager.check_complete(code)
+ return status, ' ' * (nspaces or 0)
+
+ #-------------------------------------------------------------------------
+ # Things related to GUI support and pylab
+ #-------------------------------------------------------------------------
+
+ active_eventloop = None
+
+ def enable_gui(self, gui=None):
+ raise NotImplementedError('Implement enable_gui in a subclass')
+
+ def enable_matplotlib(self, gui=None):
+ """Enable interactive matplotlib and inline figure support.
+
+ This takes the following steps:
+
+ 1. select the appropriate eventloop and matplotlib backend
+ 2. set up matplotlib for interactive use with that backend
+ 3. configure formatters for inline figure display
+ 4. enable the selected gui eventloop
+
+ Parameters
+ ----------
+ gui : optional, string
+ If given, dictates the choice of matplotlib GUI backend to use
+ (should be one of IPython's supported backends, 'qt', 'osx', 'tk',
+ 'gtk', 'wx' or 'inline'), otherwise we use the default chosen by
+ matplotlib (as dictated by the matplotlib build-time options plus the
+ user's matplotlibrc configuration file). Note that not all backends
+ make sense in all contexts, for example a terminal ipython can't
+ display figures inline.
+ """
+ from matplotlib_inline.backend_inline import configure_inline_support
+
+ from IPython.core import pylabtools as pt
+ gui, backend = pt.find_gui_and_backend(gui, self.pylab_gui_select)
+
+ if gui != 'inline':
+ # If we have our first gui selection, store it
+ if self.pylab_gui_select is None:
+ self.pylab_gui_select = gui
+ # Otherwise if they are different
+ elif gui != self.pylab_gui_select:
+ print('Warning: Cannot change to a different GUI toolkit: %s.'
+ ' Using %s instead.' % (gui, self.pylab_gui_select))
+ gui, backend = pt.find_gui_and_backend(self.pylab_gui_select)
+
+ pt.activate_matplotlib(backend)
+ configure_inline_support(self, backend)
+
+ # Now we must activate the gui pylab wants to use, and fix %run to take
+ # plot updates into account
+ self.enable_gui(gui)
+ self.magics_manager.registry['ExecutionMagics'].default_runner = \
+ pt.mpl_runner(self.safe_execfile)
+
+ return gui, backend
+
+ def enable_pylab(self, gui=None, import_all=True, welcome_message=False):
+ """Activate pylab support at runtime.
+
+ This turns on support for matplotlib, preloads into the interactive
+ namespace all of numpy and pylab, and configures IPython to correctly
+ interact with the GUI event loop. The GUI backend to be used can be
+ optionally selected with the optional ``gui`` argument.
+
+ This method only adds preloading the namespace to InteractiveShell.enable_matplotlib.
+
+ Parameters
+ ----------
+ gui : optional, string
+ If given, dictates the choice of matplotlib GUI backend to use
+ (should be one of IPython's supported backends, 'qt', 'osx', 'tk',
+ 'gtk', 'wx' or 'inline'), otherwise we use the default chosen by
+ matplotlib (as dictated by the matplotlib build-time options plus the
+ user's matplotlibrc configuration file). Note that not all backends
+ make sense in all contexts, for example a terminal ipython can't
+ display figures inline.
+ import_all : optional, bool, default: True
+ Whether to do `from numpy import *` and `from pylab import *`
+ in addition to module imports.
+ welcome_message : deprecated
+ This argument is ignored, no welcome message will be displayed.
+ """
+ from IPython.core.pylabtools import import_pylab
+
+ gui, backend = self.enable_matplotlib(gui)
+
+ # We want to prevent the loading of pylab to pollute the user's
+ # namespace as shown by the %who* magics, so we execute the activation
+ # code in an empty namespace, and we update *both* user_ns and
+ # user_ns_hidden with this information.
+ ns = {}
+ import_pylab(ns, import_all)
+ # warn about clobbered names
+ ignored = {"__builtins__"}
+ both = set(ns).intersection(self.user_ns).difference(ignored)
+ clobbered = [ name for name in both if self.user_ns[name] is not ns[name] ]
+ self.user_ns.update(ns)
+ self.user_ns_hidden.update(ns)
+ return gui, backend, clobbered
+
+ #-------------------------------------------------------------------------
+ # Utilities
+ #-------------------------------------------------------------------------
+
+ def var_expand(self, cmd, depth=0, formatter=DollarFormatter()):
+ """Expand python variables in a string.
+
+ The depth argument indicates how many frames above the caller should
+ be walked to look for the local namespace where to expand variables.
+
+ The global namespace for expansion is always the user's interactive
+ namespace.
+ """
+ ns = self.user_ns.copy()
+ try:
+ frame = sys._getframe(depth+1)
+ except ValueError:
+ # This is thrown if there aren't that many frames on the stack,
+ # e.g. if a script called run_line_magic() directly.
+ pass
+ else:
+ ns.update(frame.f_locals)
+
+ try:
+ # We have to use .vformat() here, because 'self' is a valid and common
+ # name, and expanding **ns for .format() would make it collide with
+ # the 'self' argument of the method.
+ cmd = formatter.vformat(cmd, args=[], kwargs=ns)
+ except Exception:
+ # if formatter couldn't format, just let it go untransformed
+ pass
+ return cmd
+
+ def mktempfile(self, data=None, prefix='ipython_edit_'):
+ """Make a new tempfile and return its filename.
+
+ This makes a call to tempfile.mkstemp (created in a tempfile.mkdtemp),
+ but it registers the created filename internally so ipython cleans it up
+ at exit time.
+
+ Optional inputs:
+
+ - data(None): if data is given, it gets written out to the temp file
+ immediately, and the file is closed again."""
+
+ dir_path = Path(tempfile.mkdtemp(prefix=prefix))
+ self.tempdirs.append(dir_path)
+
+ handle, filename = tempfile.mkstemp(".py", prefix, dir=str(dir_path))
+ os.close(handle) # On Windows, there can only be one open handle on a file
+
+ file_path = Path(filename)
+ self.tempfiles.append(file_path)
+
+ if data:
+ file_path.write_text(data, encoding="utf-8")
+ return filename
+
+ def ask_yes_no(self, prompt, default=None, interrupt=None):
+ if self.quiet:
+ return True
+ return ask_yes_no(prompt,default,interrupt)
+
+ def show_usage(self):
+ """Show a usage message"""
+ page.page(IPython.core.usage.interactive_usage)
+
+ def extract_input_lines(self, range_str, raw=False):
+ """Return as a string a set of input history slices.
+
+ Parameters
+ ----------
+ range_str : str
+ The set of slices is given as a string, like "~5/6-~4/2 4:8 9",
+ since this function is for use by magic functions which get their
+ arguments as strings. The number before the / is the session
+ number: ~n goes n back from the current session.
+
+ If empty string is given, returns history of current session
+ without the last input.
+
+ raw : bool, optional
+ By default, the processed input is used. If this is true, the raw
+ input history is used instead.
+
+ Notes
+ -----
+ Slices can be described with two notations:
+
+ * ``N:M`` -> standard python form, means including items N...(M-1).
+ * ``N-M`` -> include items N..M (closed endpoint).
+ """
+ lines = self.history_manager.get_range_by_str(range_str, raw=raw)
+ text = "\n".join(x for _, _, x in lines)
+
+ # Skip the last line, as it's probably the magic that called this
+ if not range_str:
+ if "\n" not in text:
+ text = ""
+ else:
+ text = text[: text.rfind("\n")]
+
+ return text
+
+ def find_user_code(self, target, raw=True, py_only=False, skip_encoding_cookie=True, search_ns=False):
+ """Get a code string from history, file, url, or a string or macro.
+
+ This is mainly used by magic functions.
+
+ Parameters
+ ----------
+ target : str
+ A string specifying code to retrieve. This will be tried respectively
+ as: ranges of input history (see %history for syntax), url,
+ corresponding .py file, filename, or an expression evaluating to a
+ string or Macro in the user namespace.
+
+ If empty string is given, returns complete history of current
+ session, without the last line.
+
+ raw : bool
+ If true (default), retrieve raw history. Has no effect on the other
+ retrieval mechanisms.
+
+ py_only : bool (default False)
+ Only try to fetch python code, do not try alternative methods to decode file
+ if unicode fails.
+
+ Returns
+ -------
+ A string of code.
+ ValueError is raised if nothing is found, and TypeError if it evaluates
+ to an object of another type. In each case, .args[0] is a printable
+ message.
+ """
+ code = self.extract_input_lines(target, raw=raw) # Grab history
+ if code:
+ return code
+ try:
+ if target.startswith(('http://', 'https://')):
+ return openpy.read_py_url(target, skip_encoding_cookie=skip_encoding_cookie)
+ except UnicodeDecodeError as e:
+ if not py_only :
+ # Deferred import
+ from urllib.request import urlopen
+ response = urlopen(target)
+ return response.read().decode('latin1')
+ raise ValueError(("'%s' seem to be unreadable.") % target) from e
+
+ potential_target = [target]
+ try :
+ potential_target.insert(0,get_py_filename(target))
+ except IOError:
+ pass
+
+ for tgt in potential_target :
+ if os.path.isfile(tgt): # Read file
+ try :
+ return openpy.read_py_file(tgt, skip_encoding_cookie=skip_encoding_cookie)
+ except UnicodeDecodeError as e:
+ if not py_only :
+ with io_open(tgt,'r', encoding='latin1') as f :
+ return f.read()
+ raise ValueError(("'%s' seem to be unreadable.") % target) from e
+ elif os.path.isdir(os.path.expanduser(tgt)):
+ raise ValueError("'%s' is a directory, not a regular file." % target)
+
+ if search_ns:
+ # Inspect namespace to load object source
+ object_info = self.object_inspect(target, detail_level=1)
+ if object_info['found'] and object_info['source']:
+ return object_info['source']
+
+ try: # User namespace
+ codeobj = eval(target, self.user_ns)
+ except Exception as e:
+ raise ValueError(("'%s' was not found in history, as a file, url, "
+ "nor in the user namespace.") % target) from e
+
+ if isinstance(codeobj, str):
+ return codeobj
+ elif isinstance(codeobj, Macro):
+ return codeobj.value
+
+ raise TypeError("%s is neither a string nor a macro." % target,
+ codeobj)
+
+ def _atexit_once(self):
+ """
+ At exist operation that need to be called at most once.
+ Second call to this function per instance will do nothing.
+ """
+
+ if not getattr(self, "_atexit_once_called", False):
+ self._atexit_once_called = True
+ # Clear all user namespaces to release all references cleanly.
+ self.reset(new_session=False)
+ # Close the history session (this stores the end time and line count)
+ # this must be *before* the tempfile cleanup, in case of temporary
+ # history db
+ self.history_manager.end_session()
+ self.history_manager = None
+
+ #-------------------------------------------------------------------------
+ # Things related to IPython exiting
+ #-------------------------------------------------------------------------
+ def atexit_operations(self):
+ """This will be executed at the time of exit.
+
+ Cleanup operations and saving of persistent data that is done
+ unconditionally by IPython should be performed here.
+
+ For things that may depend on startup flags or platform specifics (such
+ as having readline or not), register a separate atexit function in the
+ code that has the appropriate information, rather than trying to
+ clutter
+ """
+ self._atexit_once()
+
+ # Cleanup all tempfiles and folders left around
+ for tfile in self.tempfiles:
+ try:
+ tfile.unlink()
+ self.tempfiles.remove(tfile)
+ except FileNotFoundError:
+ pass
+ del self.tempfiles
+ for tdir in self.tempdirs:
+ try:
+ tdir.rmdir()
+ self.tempdirs.remove(tdir)
+ except FileNotFoundError:
+ pass
+ del self.tempdirs
+
+ # Restore user's cursor
+ if hasattr(self, "editing_mode") and self.editing_mode == "vi":
+ sys.stdout.write("\x1b[0 q")
+ sys.stdout.flush()
+
+ def cleanup(self):
+ self.restore_sys_module_state()
+
+
+ # Overridden in terminal subclass to change prompts
+ def switch_doctest_mode(self, mode):
+ pass
+
+
+class InteractiveShellABC(metaclass=abc.ABCMeta):
+ """An abstract base class for InteractiveShell."""
+
+InteractiveShellABC.register(InteractiveShell)
diff --git a/contrib/python/ipython/py3/IPython/core/latex_symbols.py b/contrib/python/ipython/py3/IPython/core/latex_symbols.py
new file mode 100644
index 0000000000..164d917beb
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/latex_symbols.py
@@ -0,0 +1,1301 @@
+# encoding: utf-8
+
+# DO NOT EDIT THIS FILE BY HAND.
+
+# To update this file, run the script /tools/gen_latex_symbols.py using Python 3
+
+# This file is autogenerated from the file:
+# https://raw.githubusercontent.com/JuliaLang/julia/master/base/latex_symbols.jl
+# This original list is filtered to remove any unicode characters that are not valid
+# Python identifiers.
+
+latex_symbols = {
+
+ "\\euler" : "ℯ",
+ "\\^a" : "ᵃ",
+ "\\^b" : "ᵇ",
+ "\\^c" : "ᶜ",
+ "\\^d" : "ᵈ",
+ "\\^e" : "ᵉ",
+ "\\^f" : "ᶠ",
+ "\\^g" : "ᵍ",
+ "\\^h" : "ʰ",
+ "\\^i" : "ⁱ",
+ "\\^j" : "ʲ",
+ "\\^k" : "ᵏ",
+ "\\^l" : "ˡ",
+ "\\^m" : "ᵐ",
+ "\\^n" : "ⁿ",
+ "\\^o" : "ᵒ",
+ "\\^p" : "ᵖ",
+ "\\^r" : "ʳ",
+ "\\^s" : "ˢ",
+ "\\^t" : "ᵗ",
+ "\\^u" : "ᵘ",
+ "\\^v" : "ᵛ",
+ "\\^w" : "ʷ",
+ "\\^x" : "ˣ",
+ "\\^y" : "ʸ",
+ "\\^z" : "ᶻ",
+ "\\^A" : "ᴬ",
+ "\\^B" : "ᴮ",
+ "\\^D" : "ᴰ",
+ "\\^E" : "ᴱ",
+ "\\^G" : "ᴳ",
+ "\\^H" : "ᴴ",
+ "\\^I" : "ᴵ",
+ "\\^J" : "ᴶ",
+ "\\^K" : "ᴷ",
+ "\\^L" : "ᴸ",
+ "\\^M" : "ᴹ",
+ "\\^N" : "ᴺ",
+ "\\^O" : "ᴼ",
+ "\\^P" : "ᴾ",
+ "\\^R" : "ᴿ",
+ "\\^T" : "ᵀ",
+ "\\^U" : "ᵁ",
+ "\\^V" : "ⱽ",
+ "\\^W" : "ᵂ",
+ "\\^alpha" : "ᵅ",
+ "\\^beta" : "ᵝ",
+ "\\^gamma" : "ᵞ",
+ "\\^delta" : "ᵟ",
+ "\\^epsilon" : "ᵋ",
+ "\\^theta" : "ᶿ",
+ "\\^iota" : "ᶥ",
+ "\\^phi" : "ᵠ",
+ "\\^chi" : "ᵡ",
+ "\\^Phi" : "ᶲ",
+ "\\_a" : "ₐ",
+ "\\_e" : "ₑ",
+ "\\_h" : "ₕ",
+ "\\_i" : "ᵢ",
+ "\\_j" : "ⱼ",
+ "\\_k" : "ₖ",
+ "\\_l" : "ₗ",
+ "\\_m" : "ₘ",
+ "\\_n" : "ₙ",
+ "\\_o" : "ₒ",
+ "\\_p" : "ₚ",
+ "\\_r" : "ᵣ",
+ "\\_s" : "ₛ",
+ "\\_t" : "ₜ",
+ "\\_u" : "ᵤ",
+ "\\_v" : "ᵥ",
+ "\\_x" : "ₓ",
+ "\\_schwa" : "ₔ",
+ "\\_beta" : "ᵦ",
+ "\\_gamma" : "ᵧ",
+ "\\_rho" : "ᵨ",
+ "\\_phi" : "ᵩ",
+ "\\_chi" : "ᵪ",
+ "\\hbar" : "ħ",
+ "\\sout" : "̶",
+ "\\ordfeminine" : "ª",
+ "\\cdotp" : "·",
+ "\\ordmasculine" : "º",
+ "\\AA" : "Å",
+ "\\AE" : "Æ",
+ "\\DH" : "Ð",
+ "\\O" : "Ø",
+ "\\TH" : "Þ",
+ "\\ss" : "ß",
+ "\\aa" : "å",
+ "\\ae" : "æ",
+ "\\eth" : "ð",
+ "\\dh" : "ð",
+ "\\o" : "ø",
+ "\\th" : "þ",
+ "\\DJ" : "Đ",
+ "\\dj" : "đ",
+ "\\imath" : "ı",
+ "\\jmath" : "ȷ",
+ "\\L" : "Ł",
+ "\\l" : "ł",
+ "\\NG" : "Ŋ",
+ "\\ng" : "ŋ",
+ "\\OE" : "Œ",
+ "\\oe" : "œ",
+ "\\hvlig" : "ƕ",
+ "\\nrleg" : "ƞ",
+ "\\doublepipe" : "ǂ",
+ "\\trna" : "ɐ",
+ "\\trnsa" : "ɒ",
+ "\\openo" : "ɔ",
+ "\\rtld" : "ɖ",
+ "\\schwa" : "ə",
+ "\\varepsilon" : "ε",
+ "\\pgamma" : "ɣ",
+ "\\pbgam" : "ɤ",
+ "\\trnh" : "ɥ",
+ "\\btdl" : "ɬ",
+ "\\rtll" : "ɭ",
+ "\\trnm" : "ɯ",
+ "\\trnmlr" : "ɰ",
+ "\\ltlmr" : "ɱ",
+ "\\ltln" : "ɲ",
+ "\\rtln" : "ɳ",
+ "\\clomeg" : "ɷ",
+ "\\ltphi" : "ɸ",
+ "\\trnr" : "ɹ",
+ "\\trnrl" : "ɺ",
+ "\\rttrnr" : "ɻ",
+ "\\rl" : "ɼ",
+ "\\rtlr" : "ɽ",
+ "\\fhr" : "ɾ",
+ "\\rtls" : "ʂ",
+ "\\esh" : "ʃ",
+ "\\trnt" : "ʇ",
+ "\\rtlt" : "ʈ",
+ "\\pupsil" : "ʊ",
+ "\\pscrv" : "ʋ",
+ "\\invv" : "ʌ",
+ "\\invw" : "ʍ",
+ "\\trny" : "ʎ",
+ "\\rtlz" : "ʐ",
+ "\\yogh" : "ʒ",
+ "\\glst" : "ʔ",
+ "\\reglst" : "ʕ",
+ "\\inglst" : "ʖ",
+ "\\turnk" : "ʞ",
+ "\\dyogh" : "ʤ",
+ "\\tesh" : "ʧ",
+ "\\rasp" : "ʼ",
+ "\\verts" : "ˈ",
+ "\\verti" : "ˌ",
+ "\\lmrk" : "ː",
+ "\\hlmrk" : "ˑ",
+ "\\grave" : "̀",
+ "\\acute" : "́",
+ "\\hat" : "̂",
+ "\\tilde" : "̃",
+ "\\bar" : "̄",
+ "\\breve" : "̆",
+ "\\dot" : "̇",
+ "\\ddot" : "̈",
+ "\\ocirc" : "̊",
+ "\\H" : "̋",
+ "\\check" : "̌",
+ "\\palh" : "̡",
+ "\\rh" : "̢",
+ "\\c" : "̧",
+ "\\k" : "̨",
+ "\\sbbrg" : "̪",
+ "\\strike" : "̶",
+ "\\Alpha" : "Α",
+ "\\Beta" : "Β",
+ "\\Gamma" : "Γ",
+ "\\Delta" : "Δ",
+ "\\Epsilon" : "Ε",
+ "\\Zeta" : "Ζ",
+ "\\Eta" : "Η",
+ "\\Theta" : "Θ",
+ "\\Iota" : "Ι",
+ "\\Kappa" : "Κ",
+ "\\Lambda" : "Λ",
+ "\\Xi" : "Ξ",
+ "\\Pi" : "Π",
+ "\\Rho" : "Ρ",
+ "\\Sigma" : "Σ",
+ "\\Tau" : "Τ",
+ "\\Upsilon" : "Υ",
+ "\\Phi" : "Φ",
+ "\\Chi" : "Χ",
+ "\\Psi" : "Ψ",
+ "\\Omega" : "Ω",
+ "\\alpha" : "α",
+ "\\beta" : "β",
+ "\\gamma" : "γ",
+ "\\delta" : "δ",
+ "\\zeta" : "ζ",
+ "\\eta" : "η",
+ "\\theta" : "θ",
+ "\\iota" : "ι",
+ "\\kappa" : "κ",
+ "\\lambda" : "λ",
+ "\\mu" : "μ",
+ "\\nu" : "ν",
+ "\\xi" : "ξ",
+ "\\pi" : "π",
+ "\\rho" : "ρ",
+ "\\varsigma" : "ς",
+ "\\sigma" : "σ",
+ "\\tau" : "τ",
+ "\\upsilon" : "υ",
+ "\\varphi" : "φ",
+ "\\chi" : "χ",
+ "\\psi" : "ψ",
+ "\\omega" : "ω",
+ "\\vartheta" : "ϑ",
+ "\\phi" : "ϕ",
+ "\\varpi" : "ϖ",
+ "\\Stigma" : "Ϛ",
+ "\\Digamma" : "Ϝ",
+ "\\digamma" : "ϝ",
+ "\\Koppa" : "Ϟ",
+ "\\Sampi" : "Ϡ",
+ "\\varkappa" : "ϰ",
+ "\\varrho" : "ϱ",
+ "\\varTheta" : "ϴ",
+ "\\epsilon" : "ϵ",
+ "\\dddot" : "⃛",
+ "\\ddddot" : "⃜",
+ "\\hslash" : "ℏ",
+ "\\Im" : "ℑ",
+ "\\ell" : "ℓ",
+ "\\wp" : "℘",
+ "\\Re" : "ℜ",
+ "\\aleph" : "ℵ",
+ "\\beth" : "ℶ",
+ "\\gimel" : "ℷ",
+ "\\daleth" : "ℸ",
+ "\\bbPi" : "ℿ",
+ "\\Zbar" : "Ƶ",
+ "\\overbar" : "̅",
+ "\\ovhook" : "̉",
+ "\\candra" : "̐",
+ "\\oturnedcomma" : "̒",
+ "\\ocommatopright" : "̕",
+ "\\droang" : "̚",
+ "\\wideutilde" : "̰",
+ "\\not" : "̸",
+ "\\upMu" : "Μ",
+ "\\upNu" : "Ν",
+ "\\upOmicron" : "Ο",
+ "\\upepsilon" : "ε",
+ "\\upomicron" : "ο",
+ "\\upvarbeta" : "ϐ",
+ "\\upoldKoppa" : "Ϙ",
+ "\\upoldkoppa" : "ϙ",
+ "\\upstigma" : "ϛ",
+ "\\upkoppa" : "ϟ",
+ "\\upsampi" : "ϡ",
+ "\\tieconcat" : "⁀",
+ "\\leftharpoonaccent" : "⃐",
+ "\\rightharpoonaccent" : "⃑",
+ "\\vertoverlay" : "⃒",
+ "\\overleftarrow" : "⃖",
+ "\\vec" : "⃗",
+ "\\overleftrightarrow" : "⃡",
+ "\\annuity" : "⃧",
+ "\\threeunderdot" : "⃨",
+ "\\widebridgeabove" : "⃩",
+ "\\bbC" : "ℂ",
+ "\\eulermascheroni" : "ℇ",
+ "\\scrg" : "ℊ",
+ "\\scrH" : "ℋ",
+ "\\frakH" : "ℌ",
+ "\\bbH" : "ℍ",
+ "\\planck" : "ℎ",
+ "\\scrI" : "ℐ",
+ "\\scrL" : "ℒ",
+ "\\bbN" : "ℕ",
+ "\\bbP" : "ℙ",
+ "\\bbQ" : "ℚ",
+ "\\scrR" : "ℛ",
+ "\\bbR" : "ℝ",
+ "\\bbZ" : "ℤ",
+ "\\frakZ" : "ℨ",
+ "\\Angstrom" : "Å",
+ "\\scrB" : "ℬ",
+ "\\frakC" : "ℭ",
+ "\\scre" : "ℯ",
+ "\\scrE" : "ℰ",
+ "\\scrF" : "ℱ",
+ "\\Finv" : "Ⅎ",
+ "\\scrM" : "ℳ",
+ "\\scro" : "ℴ",
+ "\\bbgamma" : "ℽ",
+ "\\bbGamma" : "ℾ",
+ "\\bbiD" : "ⅅ",
+ "\\bbid" : "ⅆ",
+ "\\bbie" : "ⅇ",
+ "\\bbii" : "ⅈ",
+ "\\bbij" : "ⅉ",
+ "\\bfA" : "𝐀",
+ "\\bfB" : "𝐁",
+ "\\bfC" : "𝐂",
+ "\\bfD" : "𝐃",
+ "\\bfE" : "𝐄",
+ "\\bfF" : "𝐅",
+ "\\bfG" : "𝐆",
+ "\\bfH" : "𝐇",
+ "\\bfI" : "𝐈",
+ "\\bfJ" : "𝐉",
+ "\\bfK" : "𝐊",
+ "\\bfL" : "𝐋",
+ "\\bfM" : "𝐌",
+ "\\bfN" : "𝐍",
+ "\\bfO" : "𝐎",
+ "\\bfP" : "𝐏",
+ "\\bfQ" : "𝐐",
+ "\\bfR" : "𝐑",
+ "\\bfS" : "𝐒",
+ "\\bfT" : "𝐓",
+ "\\bfU" : "𝐔",
+ "\\bfV" : "𝐕",
+ "\\bfW" : "𝐖",
+ "\\bfX" : "𝐗",
+ "\\bfY" : "𝐘",
+ "\\bfZ" : "𝐙",
+ "\\bfa" : "𝐚",
+ "\\bfb" : "𝐛",
+ "\\bfc" : "𝐜",
+ "\\bfd" : "𝐝",
+ "\\bfe" : "𝐞",
+ "\\bff" : "𝐟",
+ "\\bfg" : "𝐠",
+ "\\bfh" : "𝐡",
+ "\\bfi" : "𝐢",
+ "\\bfj" : "𝐣",
+ "\\bfk" : "𝐤",
+ "\\bfl" : "𝐥",
+ "\\bfm" : "𝐦",
+ "\\bfn" : "𝐧",
+ "\\bfo" : "𝐨",
+ "\\bfp" : "𝐩",
+ "\\bfq" : "𝐪",
+ "\\bfr" : "𝐫",
+ "\\bfs" : "𝐬",
+ "\\bft" : "𝐭",
+ "\\bfu" : "𝐮",
+ "\\bfv" : "𝐯",
+ "\\bfw" : "𝐰",
+ "\\bfx" : "𝐱",
+ "\\bfy" : "𝐲",
+ "\\bfz" : "𝐳",
+ "\\itA" : "𝐴",
+ "\\itB" : "𝐵",
+ "\\itC" : "𝐶",
+ "\\itD" : "𝐷",
+ "\\itE" : "𝐸",
+ "\\itF" : "𝐹",
+ "\\itG" : "𝐺",
+ "\\itH" : "𝐻",
+ "\\itI" : "𝐼",
+ "\\itJ" : "𝐽",
+ "\\itK" : "𝐾",
+ "\\itL" : "𝐿",
+ "\\itM" : "𝑀",
+ "\\itN" : "𝑁",
+ "\\itO" : "𝑂",
+ "\\itP" : "𝑃",
+ "\\itQ" : "𝑄",
+ "\\itR" : "𝑅",
+ "\\itS" : "𝑆",
+ "\\itT" : "𝑇",
+ "\\itU" : "𝑈",
+ "\\itV" : "𝑉",
+ "\\itW" : "𝑊",
+ "\\itX" : "𝑋",
+ "\\itY" : "𝑌",
+ "\\itZ" : "𝑍",
+ "\\ita" : "𝑎",
+ "\\itb" : "𝑏",
+ "\\itc" : "𝑐",
+ "\\itd" : "𝑑",
+ "\\ite" : "𝑒",
+ "\\itf" : "𝑓",
+ "\\itg" : "𝑔",
+ "\\iti" : "𝑖",
+ "\\itj" : "𝑗",
+ "\\itk" : "𝑘",
+ "\\itl" : "𝑙",
+ "\\itm" : "𝑚",
+ "\\itn" : "𝑛",
+ "\\ito" : "𝑜",
+ "\\itp" : "𝑝",
+ "\\itq" : "𝑞",
+ "\\itr" : "𝑟",
+ "\\its" : "𝑠",
+ "\\itt" : "𝑡",
+ "\\itu" : "𝑢",
+ "\\itv" : "𝑣",
+ "\\itw" : "𝑤",
+ "\\itx" : "𝑥",
+ "\\ity" : "𝑦",
+ "\\itz" : "𝑧",
+ "\\biA" : "𝑨",
+ "\\biB" : "𝑩",
+ "\\biC" : "𝑪",
+ "\\biD" : "𝑫",
+ "\\biE" : "𝑬",
+ "\\biF" : "𝑭",
+ "\\biG" : "𝑮",
+ "\\biH" : "𝑯",
+ "\\biI" : "𝑰",
+ "\\biJ" : "𝑱",
+ "\\biK" : "𝑲",
+ "\\biL" : "𝑳",
+ "\\biM" : "𝑴",
+ "\\biN" : "𝑵",
+ "\\biO" : "𝑶",
+ "\\biP" : "𝑷",
+ "\\biQ" : "𝑸",
+ "\\biR" : "𝑹",
+ "\\biS" : "𝑺",
+ "\\biT" : "𝑻",
+ "\\biU" : "𝑼",
+ "\\biV" : "𝑽",
+ "\\biW" : "𝑾",
+ "\\biX" : "𝑿",
+ "\\biY" : "𝒀",
+ "\\biZ" : "𝒁",
+ "\\bia" : "𝒂",
+ "\\bib" : "𝒃",
+ "\\bic" : "𝒄",
+ "\\bid" : "𝒅",
+ "\\bie" : "𝒆",
+ "\\bif" : "𝒇",
+ "\\big" : "𝒈",
+ "\\bih" : "𝒉",
+ "\\bii" : "𝒊",
+ "\\bij" : "𝒋",
+ "\\bik" : "𝒌",
+ "\\bil" : "𝒍",
+ "\\bim" : "𝒎",
+ "\\bin" : "𝒏",
+ "\\bio" : "𝒐",
+ "\\bip" : "𝒑",
+ "\\biq" : "𝒒",
+ "\\bir" : "𝒓",
+ "\\bis" : "𝒔",
+ "\\bit" : "𝒕",
+ "\\biu" : "𝒖",
+ "\\biv" : "𝒗",
+ "\\biw" : "𝒘",
+ "\\bix" : "𝒙",
+ "\\biy" : "𝒚",
+ "\\biz" : "𝒛",
+ "\\scrA" : "𝒜",
+ "\\scrC" : "𝒞",
+ "\\scrD" : "𝒟",
+ "\\scrG" : "𝒢",
+ "\\scrJ" : "𝒥",
+ "\\scrK" : "𝒦",
+ "\\scrN" : "𝒩",
+ "\\scrO" : "𝒪",
+ "\\scrP" : "𝒫",
+ "\\scrQ" : "𝒬",
+ "\\scrS" : "𝒮",
+ "\\scrT" : "𝒯",
+ "\\scrU" : "𝒰",
+ "\\scrV" : "𝒱",
+ "\\scrW" : "𝒲",
+ "\\scrX" : "𝒳",
+ "\\scrY" : "𝒴",
+ "\\scrZ" : "𝒵",
+ "\\scra" : "𝒶",
+ "\\scrb" : "𝒷",
+ "\\scrc" : "𝒸",
+ "\\scrd" : "𝒹",
+ "\\scrf" : "𝒻",
+ "\\scrh" : "𝒽",
+ "\\scri" : "𝒾",
+ "\\scrj" : "𝒿",
+ "\\scrk" : "𝓀",
+ "\\scrm" : "𝓂",
+ "\\scrn" : "𝓃",
+ "\\scrp" : "𝓅",
+ "\\scrq" : "𝓆",
+ "\\scrr" : "𝓇",
+ "\\scrs" : "𝓈",
+ "\\scrt" : "𝓉",
+ "\\scru" : "𝓊",
+ "\\scrv" : "𝓋",
+ "\\scrw" : "𝓌",
+ "\\scrx" : "𝓍",
+ "\\scry" : "𝓎",
+ "\\scrz" : "𝓏",
+ "\\bscrA" : "𝓐",
+ "\\bscrB" : "𝓑",
+ "\\bscrC" : "𝓒",
+ "\\bscrD" : "𝓓",
+ "\\bscrE" : "𝓔",
+ "\\bscrF" : "𝓕",
+ "\\bscrG" : "𝓖",
+ "\\bscrH" : "𝓗",
+ "\\bscrI" : "𝓘",
+ "\\bscrJ" : "𝓙",
+ "\\bscrK" : "𝓚",
+ "\\bscrL" : "𝓛",
+ "\\bscrM" : "𝓜",
+ "\\bscrN" : "𝓝",
+ "\\bscrO" : "𝓞",
+ "\\bscrP" : "𝓟",
+ "\\bscrQ" : "𝓠",
+ "\\bscrR" : "𝓡",
+ "\\bscrS" : "𝓢",
+ "\\bscrT" : "𝓣",
+ "\\bscrU" : "𝓤",
+ "\\bscrV" : "𝓥",
+ "\\bscrW" : "𝓦",
+ "\\bscrX" : "𝓧",
+ "\\bscrY" : "𝓨",
+ "\\bscrZ" : "𝓩",
+ "\\bscra" : "𝓪",
+ "\\bscrb" : "𝓫",
+ "\\bscrc" : "𝓬",
+ "\\bscrd" : "𝓭",
+ "\\bscre" : "𝓮",
+ "\\bscrf" : "𝓯",
+ "\\bscrg" : "𝓰",
+ "\\bscrh" : "𝓱",
+ "\\bscri" : "𝓲",
+ "\\bscrj" : "𝓳",
+ "\\bscrk" : "𝓴",
+ "\\bscrl" : "𝓵",
+ "\\bscrm" : "𝓶",
+ "\\bscrn" : "𝓷",
+ "\\bscro" : "𝓸",
+ "\\bscrp" : "𝓹",
+ "\\bscrq" : "𝓺",
+ "\\bscrr" : "𝓻",
+ "\\bscrs" : "𝓼",
+ "\\bscrt" : "𝓽",
+ "\\bscru" : "𝓾",
+ "\\bscrv" : "𝓿",
+ "\\bscrw" : "𝔀",
+ "\\bscrx" : "𝔁",
+ "\\bscry" : "𝔂",
+ "\\bscrz" : "𝔃",
+ "\\frakA" : "𝔄",
+ "\\frakB" : "𝔅",
+ "\\frakD" : "𝔇",
+ "\\frakE" : "𝔈",
+ "\\frakF" : "𝔉",
+ "\\frakG" : "𝔊",
+ "\\frakJ" : "𝔍",
+ "\\frakK" : "𝔎",
+ "\\frakL" : "𝔏",
+ "\\frakM" : "𝔐",
+ "\\frakN" : "𝔑",
+ "\\frakO" : "𝔒",
+ "\\frakP" : "𝔓",
+ "\\frakQ" : "𝔔",
+ "\\frakS" : "𝔖",
+ "\\frakT" : "𝔗",
+ "\\frakU" : "𝔘",
+ "\\frakV" : "𝔙",
+ "\\frakW" : "𝔚",
+ "\\frakX" : "𝔛",
+ "\\frakY" : "𝔜",
+ "\\fraka" : "𝔞",
+ "\\frakb" : "𝔟",
+ "\\frakc" : "𝔠",
+ "\\frakd" : "𝔡",
+ "\\frake" : "𝔢",
+ "\\frakf" : "𝔣",
+ "\\frakg" : "𝔤",
+ "\\frakh" : "𝔥",
+ "\\fraki" : "𝔦",
+ "\\frakj" : "𝔧",
+ "\\frakk" : "𝔨",
+ "\\frakl" : "𝔩",
+ "\\frakm" : "𝔪",
+ "\\frakn" : "𝔫",
+ "\\frako" : "𝔬",
+ "\\frakp" : "𝔭",
+ "\\frakq" : "𝔮",
+ "\\frakr" : "𝔯",
+ "\\fraks" : "𝔰",
+ "\\frakt" : "𝔱",
+ "\\fraku" : "𝔲",
+ "\\frakv" : "𝔳",
+ "\\frakw" : "𝔴",
+ "\\frakx" : "𝔵",
+ "\\fraky" : "𝔶",
+ "\\frakz" : "𝔷",
+ "\\bbA" : "𝔸",
+ "\\bbB" : "𝔹",
+ "\\bbD" : "𝔻",
+ "\\bbE" : "𝔼",
+ "\\bbF" : "𝔽",
+ "\\bbG" : "𝔾",
+ "\\bbI" : "𝕀",
+ "\\bbJ" : "𝕁",
+ "\\bbK" : "𝕂",
+ "\\bbL" : "𝕃",
+ "\\bbM" : "𝕄",
+ "\\bbO" : "𝕆",
+ "\\bbS" : "𝕊",
+ "\\bbT" : "𝕋",
+ "\\bbU" : "𝕌",
+ "\\bbV" : "𝕍",
+ "\\bbW" : "𝕎",
+ "\\bbX" : "𝕏",
+ "\\bbY" : "𝕐",
+ "\\bba" : "𝕒",
+ "\\bbb" : "𝕓",
+ "\\bbc" : "𝕔",
+ "\\bbd" : "𝕕",
+ "\\bbe" : "𝕖",
+ "\\bbf" : "𝕗",
+ "\\bbg" : "𝕘",
+ "\\bbh" : "𝕙",
+ "\\bbi" : "𝕚",
+ "\\bbj" : "𝕛",
+ "\\bbk" : "𝕜",
+ "\\bbl" : "𝕝",
+ "\\bbm" : "𝕞",
+ "\\bbn" : "𝕟",
+ "\\bbo" : "𝕠",
+ "\\bbp" : "𝕡",
+ "\\bbq" : "𝕢",
+ "\\bbr" : "𝕣",
+ "\\bbs" : "𝕤",
+ "\\bbt" : "𝕥",
+ "\\bbu" : "𝕦",
+ "\\bbv" : "𝕧",
+ "\\bbw" : "𝕨",
+ "\\bbx" : "𝕩",
+ "\\bby" : "𝕪",
+ "\\bbz" : "𝕫",
+ "\\bfrakA" : "𝕬",
+ "\\bfrakB" : "𝕭",
+ "\\bfrakC" : "𝕮",
+ "\\bfrakD" : "𝕯",
+ "\\bfrakE" : "𝕰",
+ "\\bfrakF" : "𝕱",
+ "\\bfrakG" : "𝕲",
+ "\\bfrakH" : "𝕳",
+ "\\bfrakI" : "𝕴",
+ "\\bfrakJ" : "𝕵",
+ "\\bfrakK" : "𝕶",
+ "\\bfrakL" : "𝕷",
+ "\\bfrakM" : "𝕸",
+ "\\bfrakN" : "𝕹",
+ "\\bfrakO" : "𝕺",
+ "\\bfrakP" : "𝕻",
+ "\\bfrakQ" : "𝕼",
+ "\\bfrakR" : "𝕽",
+ "\\bfrakS" : "𝕾",
+ "\\bfrakT" : "𝕿",
+ "\\bfrakU" : "𝖀",
+ "\\bfrakV" : "𝖁",
+ "\\bfrakW" : "𝖂",
+ "\\bfrakX" : "𝖃",
+ "\\bfrakY" : "𝖄",
+ "\\bfrakZ" : "𝖅",
+ "\\bfraka" : "𝖆",
+ "\\bfrakb" : "𝖇",
+ "\\bfrakc" : "𝖈",
+ "\\bfrakd" : "𝖉",
+ "\\bfrake" : "𝖊",
+ "\\bfrakf" : "𝖋",
+ "\\bfrakg" : "𝖌",
+ "\\bfrakh" : "𝖍",
+ "\\bfraki" : "𝖎",
+ "\\bfrakj" : "𝖏",
+ "\\bfrakk" : "𝖐",
+ "\\bfrakl" : "𝖑",
+ "\\bfrakm" : "𝖒",
+ "\\bfrakn" : "𝖓",
+ "\\bfrako" : "𝖔",
+ "\\bfrakp" : "𝖕",
+ "\\bfrakq" : "𝖖",
+ "\\bfrakr" : "𝖗",
+ "\\bfraks" : "𝖘",
+ "\\bfrakt" : "𝖙",
+ "\\bfraku" : "𝖚",
+ "\\bfrakv" : "𝖛",
+ "\\bfrakw" : "𝖜",
+ "\\bfrakx" : "𝖝",
+ "\\bfraky" : "𝖞",
+ "\\bfrakz" : "𝖟",
+ "\\sansA" : "𝖠",
+ "\\sansB" : "𝖡",
+ "\\sansC" : "𝖢",
+ "\\sansD" : "𝖣",
+ "\\sansE" : "𝖤",
+ "\\sansF" : "𝖥",
+ "\\sansG" : "𝖦",
+ "\\sansH" : "𝖧",
+ "\\sansI" : "𝖨",
+ "\\sansJ" : "𝖩",
+ "\\sansK" : "𝖪",
+ "\\sansL" : "𝖫",
+ "\\sansM" : "𝖬",
+ "\\sansN" : "𝖭",
+ "\\sansO" : "𝖮",
+ "\\sansP" : "𝖯",
+ "\\sansQ" : "𝖰",
+ "\\sansR" : "𝖱",
+ "\\sansS" : "𝖲",
+ "\\sansT" : "𝖳",
+ "\\sansU" : "𝖴",
+ "\\sansV" : "𝖵",
+ "\\sansW" : "𝖶",
+ "\\sansX" : "𝖷",
+ "\\sansY" : "𝖸",
+ "\\sansZ" : "𝖹",
+ "\\sansa" : "𝖺",
+ "\\sansb" : "𝖻",
+ "\\sansc" : "𝖼",
+ "\\sansd" : "𝖽",
+ "\\sanse" : "𝖾",
+ "\\sansf" : "𝖿",
+ "\\sansg" : "𝗀",
+ "\\sansh" : "𝗁",
+ "\\sansi" : "𝗂",
+ "\\sansj" : "𝗃",
+ "\\sansk" : "𝗄",
+ "\\sansl" : "𝗅",
+ "\\sansm" : "𝗆",
+ "\\sansn" : "𝗇",
+ "\\sanso" : "𝗈",
+ "\\sansp" : "𝗉",
+ "\\sansq" : "𝗊",
+ "\\sansr" : "𝗋",
+ "\\sanss" : "𝗌",
+ "\\sanst" : "𝗍",
+ "\\sansu" : "𝗎",
+ "\\sansv" : "𝗏",
+ "\\sansw" : "𝗐",
+ "\\sansx" : "𝗑",
+ "\\sansy" : "𝗒",
+ "\\sansz" : "𝗓",
+ "\\bsansA" : "𝗔",
+ "\\bsansB" : "𝗕",
+ "\\bsansC" : "𝗖",
+ "\\bsansD" : "𝗗",
+ "\\bsansE" : "𝗘",
+ "\\bsansF" : "𝗙",
+ "\\bsansG" : "𝗚",
+ "\\bsansH" : "𝗛",
+ "\\bsansI" : "𝗜",
+ "\\bsansJ" : "𝗝",
+ "\\bsansK" : "𝗞",
+ "\\bsansL" : "𝗟",
+ "\\bsansM" : "𝗠",
+ "\\bsansN" : "𝗡",
+ "\\bsansO" : "𝗢",
+ "\\bsansP" : "𝗣",
+ "\\bsansQ" : "𝗤",
+ "\\bsansR" : "𝗥",
+ "\\bsansS" : "𝗦",
+ "\\bsansT" : "𝗧",
+ "\\bsansU" : "𝗨",
+ "\\bsansV" : "𝗩",
+ "\\bsansW" : "𝗪",
+ "\\bsansX" : "𝗫",
+ "\\bsansY" : "𝗬",
+ "\\bsansZ" : "𝗭",
+ "\\bsansa" : "𝗮",
+ "\\bsansb" : "𝗯",
+ "\\bsansc" : "𝗰",
+ "\\bsansd" : "𝗱",
+ "\\bsanse" : "𝗲",
+ "\\bsansf" : "𝗳",
+ "\\bsansg" : "𝗴",
+ "\\bsansh" : "𝗵",
+ "\\bsansi" : "𝗶",
+ "\\bsansj" : "𝗷",
+ "\\bsansk" : "𝗸",
+ "\\bsansl" : "𝗹",
+ "\\bsansm" : "𝗺",
+ "\\bsansn" : "𝗻",
+ "\\bsanso" : "𝗼",
+ "\\bsansp" : "𝗽",
+ "\\bsansq" : "𝗾",
+ "\\bsansr" : "𝗿",
+ "\\bsanss" : "𝘀",
+ "\\bsanst" : "𝘁",
+ "\\bsansu" : "𝘂",
+ "\\bsansv" : "𝘃",
+ "\\bsansw" : "𝘄",
+ "\\bsansx" : "𝘅",
+ "\\bsansy" : "𝘆",
+ "\\bsansz" : "𝘇",
+ "\\isansA" : "𝘈",
+ "\\isansB" : "𝘉",
+ "\\isansC" : "𝘊",
+ "\\isansD" : "𝘋",
+ "\\isansE" : "𝘌",
+ "\\isansF" : "𝘍",
+ "\\isansG" : "𝘎",
+ "\\isansH" : "𝘏",
+ "\\isansI" : "𝘐",
+ "\\isansJ" : "𝘑",
+ "\\isansK" : "𝘒",
+ "\\isansL" : "𝘓",
+ "\\isansM" : "𝘔",
+ "\\isansN" : "𝘕",
+ "\\isansO" : "𝘖",
+ "\\isansP" : "𝘗",
+ "\\isansQ" : "𝘘",
+ "\\isansR" : "𝘙",
+ "\\isansS" : "𝘚",
+ "\\isansT" : "𝘛",
+ "\\isansU" : "𝘜",
+ "\\isansV" : "𝘝",
+ "\\isansW" : "𝘞",
+ "\\isansX" : "𝘟",
+ "\\isansY" : "𝘠",
+ "\\isansZ" : "𝘡",
+ "\\isansa" : "𝘢",
+ "\\isansb" : "𝘣",
+ "\\isansc" : "𝘤",
+ "\\isansd" : "𝘥",
+ "\\isanse" : "𝘦",
+ "\\isansf" : "𝘧",
+ "\\isansg" : "𝘨",
+ "\\isansh" : "𝘩",
+ "\\isansi" : "𝘪",
+ "\\isansj" : "𝘫",
+ "\\isansk" : "𝘬",
+ "\\isansl" : "𝘭",
+ "\\isansm" : "𝘮",
+ "\\isansn" : "𝘯",
+ "\\isanso" : "𝘰",
+ "\\isansp" : "𝘱",
+ "\\isansq" : "𝘲",
+ "\\isansr" : "𝘳",
+ "\\isanss" : "𝘴",
+ "\\isanst" : "𝘵",
+ "\\isansu" : "𝘶",
+ "\\isansv" : "𝘷",
+ "\\isansw" : "𝘸",
+ "\\isansx" : "𝘹",
+ "\\isansy" : "𝘺",
+ "\\isansz" : "𝘻",
+ "\\bisansA" : "𝘼",
+ "\\bisansB" : "𝘽",
+ "\\bisansC" : "𝘾",
+ "\\bisansD" : "𝘿",
+ "\\bisansE" : "𝙀",
+ "\\bisansF" : "𝙁",
+ "\\bisansG" : "𝙂",
+ "\\bisansH" : "𝙃",
+ "\\bisansI" : "𝙄",
+ "\\bisansJ" : "𝙅",
+ "\\bisansK" : "𝙆",
+ "\\bisansL" : "𝙇",
+ "\\bisansM" : "𝙈",
+ "\\bisansN" : "𝙉",
+ "\\bisansO" : "𝙊",
+ "\\bisansP" : "𝙋",
+ "\\bisansQ" : "𝙌",
+ "\\bisansR" : "𝙍",
+ "\\bisansS" : "𝙎",
+ "\\bisansT" : "𝙏",
+ "\\bisansU" : "𝙐",
+ "\\bisansV" : "𝙑",
+ "\\bisansW" : "𝙒",
+ "\\bisansX" : "𝙓",
+ "\\bisansY" : "𝙔",
+ "\\bisansZ" : "𝙕",
+ "\\bisansa" : "𝙖",
+ "\\bisansb" : "𝙗",
+ "\\bisansc" : "𝙘",
+ "\\bisansd" : "𝙙",
+ "\\bisanse" : "𝙚",
+ "\\bisansf" : "𝙛",
+ "\\bisansg" : "𝙜",
+ "\\bisansh" : "𝙝",
+ "\\bisansi" : "𝙞",
+ "\\bisansj" : "𝙟",
+ "\\bisansk" : "𝙠",
+ "\\bisansl" : "𝙡",
+ "\\bisansm" : "𝙢",
+ "\\bisansn" : "𝙣",
+ "\\bisanso" : "𝙤",
+ "\\bisansp" : "𝙥",
+ "\\bisansq" : "𝙦",
+ "\\bisansr" : "𝙧",
+ "\\bisanss" : "𝙨",
+ "\\bisanst" : "𝙩",
+ "\\bisansu" : "𝙪",
+ "\\bisansv" : "𝙫",
+ "\\bisansw" : "𝙬",
+ "\\bisansx" : "𝙭",
+ "\\bisansy" : "𝙮",
+ "\\bisansz" : "𝙯",
+ "\\ttA" : "𝙰",
+ "\\ttB" : "𝙱",
+ "\\ttC" : "𝙲",
+ "\\ttD" : "𝙳",
+ "\\ttE" : "𝙴",
+ "\\ttF" : "𝙵",
+ "\\ttG" : "𝙶",
+ "\\ttH" : "𝙷",
+ "\\ttI" : "𝙸",
+ "\\ttJ" : "𝙹",
+ "\\ttK" : "𝙺",
+ "\\ttL" : "𝙻",
+ "\\ttM" : "𝙼",
+ "\\ttN" : "𝙽",
+ "\\ttO" : "𝙾",
+ "\\ttP" : "𝙿",
+ "\\ttQ" : "𝚀",
+ "\\ttR" : "𝚁",
+ "\\ttS" : "𝚂",
+ "\\ttT" : "𝚃",
+ "\\ttU" : "𝚄",
+ "\\ttV" : "𝚅",
+ "\\ttW" : "𝚆",
+ "\\ttX" : "𝚇",
+ "\\ttY" : "𝚈",
+ "\\ttZ" : "𝚉",
+ "\\tta" : "𝚊",
+ "\\ttb" : "𝚋",
+ "\\ttc" : "𝚌",
+ "\\ttd" : "𝚍",
+ "\\tte" : "𝚎",
+ "\\ttf" : "𝚏",
+ "\\ttg" : "𝚐",
+ "\\tth" : "𝚑",
+ "\\tti" : "𝚒",
+ "\\ttj" : "𝚓",
+ "\\ttk" : "𝚔",
+ "\\ttl" : "𝚕",
+ "\\ttm" : "𝚖",
+ "\\ttn" : "𝚗",
+ "\\tto" : "𝚘",
+ "\\ttp" : "𝚙",
+ "\\ttq" : "𝚚",
+ "\\ttr" : "𝚛",
+ "\\tts" : "𝚜",
+ "\\ttt" : "𝚝",
+ "\\ttu" : "𝚞",
+ "\\ttv" : "𝚟",
+ "\\ttw" : "𝚠",
+ "\\ttx" : "𝚡",
+ "\\tty" : "𝚢",
+ "\\ttz" : "𝚣",
+ "\\bfAlpha" : "𝚨",
+ "\\bfBeta" : "𝚩",
+ "\\bfGamma" : "𝚪",
+ "\\bfDelta" : "𝚫",
+ "\\bfEpsilon" : "𝚬",
+ "\\bfZeta" : "𝚭",
+ "\\bfEta" : "𝚮",
+ "\\bfTheta" : "𝚯",
+ "\\bfIota" : "𝚰",
+ "\\bfKappa" : "𝚱",
+ "\\bfLambda" : "𝚲",
+ "\\bfMu" : "𝚳",
+ "\\bfNu" : "𝚴",
+ "\\bfXi" : "𝚵",
+ "\\bfOmicron" : "𝚶",
+ "\\bfPi" : "𝚷",
+ "\\bfRho" : "𝚸",
+ "\\bfvarTheta" : "𝚹",
+ "\\bfSigma" : "𝚺",
+ "\\bfTau" : "𝚻",
+ "\\bfUpsilon" : "𝚼",
+ "\\bfPhi" : "𝚽",
+ "\\bfChi" : "𝚾",
+ "\\bfPsi" : "𝚿",
+ "\\bfOmega" : "𝛀",
+ "\\bfalpha" : "𝛂",
+ "\\bfbeta" : "𝛃",
+ "\\bfgamma" : "𝛄",
+ "\\bfdelta" : "𝛅",
+ "\\bfepsilon" : "𝛆",
+ "\\bfzeta" : "𝛇",
+ "\\bfeta" : "𝛈",
+ "\\bftheta" : "𝛉",
+ "\\bfiota" : "𝛊",
+ "\\bfkappa" : "𝛋",
+ "\\bflambda" : "𝛌",
+ "\\bfmu" : "𝛍",
+ "\\bfnu" : "𝛎",
+ "\\bfxi" : "𝛏",
+ "\\bfomicron" : "𝛐",
+ "\\bfpi" : "𝛑",
+ "\\bfrho" : "𝛒",
+ "\\bfvarsigma" : "𝛓",
+ "\\bfsigma" : "𝛔",
+ "\\bftau" : "𝛕",
+ "\\bfupsilon" : "𝛖",
+ "\\bfvarphi" : "𝛗",
+ "\\bfchi" : "𝛘",
+ "\\bfpsi" : "𝛙",
+ "\\bfomega" : "𝛚",
+ "\\bfvarepsilon" : "𝛜",
+ "\\bfvartheta" : "𝛝",
+ "\\bfvarkappa" : "𝛞",
+ "\\bfphi" : "𝛟",
+ "\\bfvarrho" : "𝛠",
+ "\\bfvarpi" : "𝛡",
+ "\\itAlpha" : "𝛢",
+ "\\itBeta" : "𝛣",
+ "\\itGamma" : "𝛤",
+ "\\itDelta" : "𝛥",
+ "\\itEpsilon" : "𝛦",
+ "\\itZeta" : "𝛧",
+ "\\itEta" : "𝛨",
+ "\\itTheta" : "𝛩",
+ "\\itIota" : "𝛪",
+ "\\itKappa" : "𝛫",
+ "\\itLambda" : "𝛬",
+ "\\itMu" : "𝛭",
+ "\\itNu" : "𝛮",
+ "\\itXi" : "𝛯",
+ "\\itOmicron" : "𝛰",
+ "\\itPi" : "𝛱",
+ "\\itRho" : "𝛲",
+ "\\itvarTheta" : "𝛳",
+ "\\itSigma" : "𝛴",
+ "\\itTau" : "𝛵",
+ "\\itUpsilon" : "𝛶",
+ "\\itPhi" : "𝛷",
+ "\\itChi" : "𝛸",
+ "\\itPsi" : "𝛹",
+ "\\itOmega" : "𝛺",
+ "\\italpha" : "𝛼",
+ "\\itbeta" : "𝛽",
+ "\\itgamma" : "𝛾",
+ "\\itdelta" : "𝛿",
+ "\\itepsilon" : "𝜀",
+ "\\itzeta" : "𝜁",
+ "\\iteta" : "𝜂",
+ "\\ittheta" : "𝜃",
+ "\\itiota" : "𝜄",
+ "\\itkappa" : "𝜅",
+ "\\itlambda" : "𝜆",
+ "\\itmu" : "𝜇",
+ "\\itnu" : "𝜈",
+ "\\itxi" : "𝜉",
+ "\\itomicron" : "𝜊",
+ "\\itpi" : "𝜋",
+ "\\itrho" : "𝜌",
+ "\\itvarsigma" : "𝜍",
+ "\\itsigma" : "𝜎",
+ "\\ittau" : "𝜏",
+ "\\itupsilon" : "𝜐",
+ "\\itphi" : "𝜑",
+ "\\itchi" : "𝜒",
+ "\\itpsi" : "𝜓",
+ "\\itomega" : "𝜔",
+ "\\itvarepsilon" : "𝜖",
+ "\\itvartheta" : "𝜗",
+ "\\itvarkappa" : "𝜘",
+ "\\itvarphi" : "𝜙",
+ "\\itvarrho" : "𝜚",
+ "\\itvarpi" : "𝜛",
+ "\\biAlpha" : "𝜜",
+ "\\biBeta" : "𝜝",
+ "\\biGamma" : "𝜞",
+ "\\biDelta" : "𝜟",
+ "\\biEpsilon" : "𝜠",
+ "\\biZeta" : "𝜡",
+ "\\biEta" : "𝜢",
+ "\\biTheta" : "𝜣",
+ "\\biIota" : "𝜤",
+ "\\biKappa" : "𝜥",
+ "\\biLambda" : "𝜦",
+ "\\biMu" : "𝜧",
+ "\\biNu" : "𝜨",
+ "\\biXi" : "𝜩",
+ "\\biOmicron" : "𝜪",
+ "\\biPi" : "𝜫",
+ "\\biRho" : "𝜬",
+ "\\bivarTheta" : "𝜭",
+ "\\biSigma" : "𝜮",
+ "\\biTau" : "𝜯",
+ "\\biUpsilon" : "𝜰",
+ "\\biPhi" : "𝜱",
+ "\\biChi" : "𝜲",
+ "\\biPsi" : "𝜳",
+ "\\biOmega" : "𝜴",
+ "\\bialpha" : "𝜶",
+ "\\bibeta" : "𝜷",
+ "\\bigamma" : "𝜸",
+ "\\bidelta" : "𝜹",
+ "\\biepsilon" : "𝜺",
+ "\\bizeta" : "𝜻",
+ "\\bieta" : "𝜼",
+ "\\bitheta" : "𝜽",
+ "\\biiota" : "𝜾",
+ "\\bikappa" : "𝜿",
+ "\\bilambda" : "𝝀",
+ "\\bimu" : "𝝁",
+ "\\binu" : "𝝂",
+ "\\bixi" : "𝝃",
+ "\\biomicron" : "𝝄",
+ "\\bipi" : "𝝅",
+ "\\birho" : "𝝆",
+ "\\bivarsigma" : "𝝇",
+ "\\bisigma" : "𝝈",
+ "\\bitau" : "𝝉",
+ "\\biupsilon" : "𝝊",
+ "\\biphi" : "𝝋",
+ "\\bichi" : "𝝌",
+ "\\bipsi" : "𝝍",
+ "\\biomega" : "𝝎",
+ "\\bivarepsilon" : "𝝐",
+ "\\bivartheta" : "𝝑",
+ "\\bivarkappa" : "𝝒",
+ "\\bivarphi" : "𝝓",
+ "\\bivarrho" : "𝝔",
+ "\\bivarpi" : "𝝕",
+ "\\bsansAlpha" : "𝝖",
+ "\\bsansBeta" : "𝝗",
+ "\\bsansGamma" : "𝝘",
+ "\\bsansDelta" : "𝝙",
+ "\\bsansEpsilon" : "𝝚",
+ "\\bsansZeta" : "𝝛",
+ "\\bsansEta" : "𝝜",
+ "\\bsansTheta" : "𝝝",
+ "\\bsansIota" : "𝝞",
+ "\\bsansKappa" : "𝝟",
+ "\\bsansLambda" : "𝝠",
+ "\\bsansMu" : "𝝡",
+ "\\bsansNu" : "𝝢",
+ "\\bsansXi" : "𝝣",
+ "\\bsansOmicron" : "𝝤",
+ "\\bsansPi" : "𝝥",
+ "\\bsansRho" : "𝝦",
+ "\\bsansvarTheta" : "𝝧",
+ "\\bsansSigma" : "𝝨",
+ "\\bsansTau" : "𝝩",
+ "\\bsansUpsilon" : "𝝪",
+ "\\bsansPhi" : "𝝫",
+ "\\bsansChi" : "𝝬",
+ "\\bsansPsi" : "𝝭",
+ "\\bsansOmega" : "𝝮",
+ "\\bsansalpha" : "𝝰",
+ "\\bsansbeta" : "𝝱",
+ "\\bsansgamma" : "𝝲",
+ "\\bsansdelta" : "𝝳",
+ "\\bsansepsilon" : "𝝴",
+ "\\bsanszeta" : "𝝵",
+ "\\bsanseta" : "𝝶",
+ "\\bsanstheta" : "𝝷",
+ "\\bsansiota" : "𝝸",
+ "\\bsanskappa" : "𝝹",
+ "\\bsanslambda" : "𝝺",
+ "\\bsansmu" : "𝝻",
+ "\\bsansnu" : "𝝼",
+ "\\bsansxi" : "𝝽",
+ "\\bsansomicron" : "𝝾",
+ "\\bsanspi" : "𝝿",
+ "\\bsansrho" : "𝞀",
+ "\\bsansvarsigma" : "𝞁",
+ "\\bsanssigma" : "𝞂",
+ "\\bsanstau" : "𝞃",
+ "\\bsansupsilon" : "𝞄",
+ "\\bsansphi" : "𝞅",
+ "\\bsanschi" : "𝞆",
+ "\\bsanspsi" : "𝞇",
+ "\\bsansomega" : "𝞈",
+ "\\bsansvarepsilon" : "𝞊",
+ "\\bsansvartheta" : "𝞋",
+ "\\bsansvarkappa" : "𝞌",
+ "\\bsansvarphi" : "𝞍",
+ "\\bsansvarrho" : "𝞎",
+ "\\bsansvarpi" : "𝞏",
+ "\\bisansAlpha" : "𝞐",
+ "\\bisansBeta" : "𝞑",
+ "\\bisansGamma" : "𝞒",
+ "\\bisansDelta" : "𝞓",
+ "\\bisansEpsilon" : "𝞔",
+ "\\bisansZeta" : "𝞕",
+ "\\bisansEta" : "𝞖",
+ "\\bisansTheta" : "𝞗",
+ "\\bisansIota" : "𝞘",
+ "\\bisansKappa" : "𝞙",
+ "\\bisansLambda" : "𝞚",
+ "\\bisansMu" : "𝞛",
+ "\\bisansNu" : "𝞜",
+ "\\bisansXi" : "𝞝",
+ "\\bisansOmicron" : "𝞞",
+ "\\bisansPi" : "𝞟",
+ "\\bisansRho" : "𝞠",
+ "\\bisansvarTheta" : "𝞡",
+ "\\bisansSigma" : "𝞢",
+ "\\bisansTau" : "𝞣",
+ "\\bisansUpsilon" : "𝞤",
+ "\\bisansPhi" : "𝞥",
+ "\\bisansChi" : "𝞦",
+ "\\bisansPsi" : "𝞧",
+ "\\bisansOmega" : "𝞨",
+ "\\bisansalpha" : "𝞪",
+ "\\bisansbeta" : "𝞫",
+ "\\bisansgamma" : "𝞬",
+ "\\bisansdelta" : "𝞭",
+ "\\bisansepsilon" : "𝞮",
+ "\\bisanszeta" : "𝞯",
+ "\\bisanseta" : "𝞰",
+ "\\bisanstheta" : "𝞱",
+ "\\bisansiota" : "𝞲",
+ "\\bisanskappa" : "𝞳",
+ "\\bisanslambda" : "𝞴",
+ "\\bisansmu" : "𝞵",
+ "\\bisansnu" : "𝞶",
+ "\\bisansxi" : "𝞷",
+ "\\bisansomicron" : "𝞸",
+ "\\bisanspi" : "𝞹",
+ "\\bisansrho" : "𝞺",
+ "\\bisansvarsigma" : "𝞻",
+ "\\bisanssigma" : "𝞼",
+ "\\bisanstau" : "𝞽",
+ "\\bisansupsilon" : "𝞾",
+ "\\bisansphi" : "𝞿",
+ "\\bisanschi" : "𝟀",
+ "\\bisanspsi" : "𝟁",
+ "\\bisansomega" : "𝟂",
+ "\\bisansvarepsilon" : "𝟄",
+ "\\bisansvartheta" : "𝟅",
+ "\\bisansvarkappa" : "𝟆",
+ "\\bisansvarphi" : "𝟇",
+ "\\bisansvarrho" : "𝟈",
+ "\\bisansvarpi" : "𝟉",
+ "\\bfzero" : "𝟎",
+ "\\bfone" : "𝟏",
+ "\\bftwo" : "𝟐",
+ "\\bfthree" : "𝟑",
+ "\\bffour" : "𝟒",
+ "\\bffive" : "𝟓",
+ "\\bfsix" : "𝟔",
+ "\\bfseven" : "𝟕",
+ "\\bfeight" : "𝟖",
+ "\\bfnine" : "𝟗",
+ "\\bbzero" : "𝟘",
+ "\\bbone" : "𝟙",
+ "\\bbtwo" : "𝟚",
+ "\\bbthree" : "𝟛",
+ "\\bbfour" : "𝟜",
+ "\\bbfive" : "𝟝",
+ "\\bbsix" : "𝟞",
+ "\\bbseven" : "𝟟",
+ "\\bbeight" : "𝟠",
+ "\\bbnine" : "𝟡",
+ "\\sanszero" : "𝟢",
+ "\\sansone" : "𝟣",
+ "\\sanstwo" : "𝟤",
+ "\\sansthree" : "𝟥",
+ "\\sansfour" : "𝟦",
+ "\\sansfive" : "𝟧",
+ "\\sanssix" : "𝟨",
+ "\\sansseven" : "𝟩",
+ "\\sanseight" : "𝟪",
+ "\\sansnine" : "𝟫",
+ "\\bsanszero" : "𝟬",
+ "\\bsansone" : "𝟭",
+ "\\bsanstwo" : "𝟮",
+ "\\bsansthree" : "𝟯",
+ "\\bsansfour" : "𝟰",
+ "\\bsansfive" : "𝟱",
+ "\\bsanssix" : "𝟲",
+ "\\bsansseven" : "𝟳",
+ "\\bsanseight" : "𝟴",
+ "\\bsansnine" : "𝟵",
+ "\\ttzero" : "𝟶",
+ "\\ttone" : "𝟷",
+ "\\tttwo" : "𝟸",
+ "\\ttthree" : "𝟹",
+ "\\ttfour" : "𝟺",
+ "\\ttfive" : "𝟻",
+ "\\ttsix" : "𝟼",
+ "\\ttseven" : "𝟽",
+ "\\tteight" : "𝟾",
+ "\\ttnine" : "𝟿",
+ "\\underbar" : "̲",
+ "\\underleftrightarrow" : "͍",
+}
+
+
+reverse_latex_symbol = { v:k for k,v in latex_symbols.items()}
diff --git a/contrib/python/ipython/py3/IPython/core/logger.py b/contrib/python/ipython/py3/IPython/core/logger.py
new file mode 100644
index 0000000000..99e7ce2918
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/logger.py
@@ -0,0 +1,227 @@
+"""Logger class for IPython's logging facilities.
+"""
+
+#*****************************************************************************
+# Copyright (C) 2001 Janko Hauser <jhauser@zscout.de> and
+# Copyright (C) 2001-2006 Fernando Perez <fperez@colorado.edu>
+#
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#*****************************************************************************
+
+#****************************************************************************
+# Modules and globals
+
+# Python standard modules
+import glob
+import io
+import os
+import time
+
+
+#****************************************************************************
+# FIXME: This class isn't a mixin anymore, but it still needs attributes from
+# ipython and does input cache management. Finish cleanup later...
+
+class Logger(object):
+ """A Logfile class with different policies for file creation"""
+
+ def __init__(self, home_dir, logfname='Logger.log', loghead=u'',
+ logmode='over'):
+
+ # this is the full ipython instance, we need some attributes from it
+ # which won't exist until later. What a mess, clean up later...
+ self.home_dir = home_dir
+
+ self.logfname = logfname
+ self.loghead = loghead
+ self.logmode = logmode
+ self.logfile = None
+
+ # Whether to log raw or processed input
+ self.log_raw_input = False
+
+ # whether to also log output
+ self.log_output = False
+
+ # whether to put timestamps before each log entry
+ self.timestamp = False
+
+ # activity control flags
+ self.log_active = False
+
+ # logmode is a validated property
+ def _set_mode(self,mode):
+ if mode not in ['append','backup','global','over','rotate']:
+ raise ValueError('invalid log mode %s given' % mode)
+ self._logmode = mode
+
+ def _get_mode(self):
+ return self._logmode
+
+ logmode = property(_get_mode,_set_mode)
+
+ def logstart(self, logfname=None, loghead=None, logmode=None,
+ log_output=False, timestamp=False, log_raw_input=False):
+ """Generate a new log-file with a default header.
+
+ Raises RuntimeError if the log has already been started"""
+
+ if self.logfile is not None:
+ raise RuntimeError('Log file is already active: %s' %
+ self.logfname)
+
+ # The parameters can override constructor defaults
+ if logfname is not None: self.logfname = logfname
+ if loghead is not None: self.loghead = loghead
+ if logmode is not None: self.logmode = logmode
+
+ # Parameters not part of the constructor
+ self.timestamp = timestamp
+ self.log_output = log_output
+ self.log_raw_input = log_raw_input
+
+ # init depending on the log mode requested
+ isfile = os.path.isfile
+ logmode = self.logmode
+
+ if logmode == 'append':
+ self.logfile = io.open(self.logfname, 'a', encoding='utf-8')
+
+ elif logmode == 'backup':
+ if isfile(self.logfname):
+ backup_logname = self.logfname+'~'
+ # Manually remove any old backup, since os.rename may fail
+ # under Windows.
+ if isfile(backup_logname):
+ os.remove(backup_logname)
+ os.rename(self.logfname,backup_logname)
+ self.logfile = io.open(self.logfname, 'w', encoding='utf-8')
+
+ elif logmode == 'global':
+ self.logfname = os.path.join(self.home_dir,self.logfname)
+ self.logfile = io.open(self.logfname, 'a', encoding='utf-8')
+
+ elif logmode == 'over':
+ if isfile(self.logfname):
+ os.remove(self.logfname)
+ self.logfile = io.open(self.logfname,'w', encoding='utf-8')
+
+ elif logmode == 'rotate':
+ if isfile(self.logfname):
+ if isfile(self.logfname+'.001~'):
+ old = glob.glob(self.logfname+'.*~')
+ old.sort()
+ old.reverse()
+ for f in old:
+ root, ext = os.path.splitext(f)
+ num = int(ext[1:-1])+1
+ os.rename(f, root+'.'+repr(num).zfill(3)+'~')
+ os.rename(self.logfname, self.logfname+'.001~')
+ self.logfile = io.open(self.logfname, 'w', encoding='utf-8')
+
+ if logmode != 'append':
+ self.logfile.write(self.loghead)
+
+ self.logfile.flush()
+ self.log_active = True
+
+ def switch_log(self,val):
+ """Switch logging on/off. val should be ONLY a boolean."""
+
+ if val not in [False,True,0,1]:
+ raise ValueError('Call switch_log ONLY with a boolean argument, '
+ 'not with: %s' % val)
+
+ label = {0:'OFF',1:'ON',False:'OFF',True:'ON'}
+
+ if self.logfile is None:
+ print("""
+Logging hasn't been started yet (use logstart for that).
+
+%logon/%logoff are for temporarily starting and stopping logging for a logfile
+which already exists. But you must first start the logging process with
+%logstart (optionally giving a logfile name).""")
+
+ else:
+ if self.log_active == val:
+ print('Logging is already',label[val])
+ else:
+ print('Switching logging',label[val])
+ self.log_active = not self.log_active
+ self.log_active_out = self.log_active
+
+ def logstate(self):
+ """Print a status message about the logger."""
+ if self.logfile is None:
+ print('Logging has not been activated.')
+ else:
+ state = self.log_active and 'active' or 'temporarily suspended'
+ print('Filename :', self.logfname)
+ print('Mode :', self.logmode)
+ print('Output logging :', self.log_output)
+ print('Raw input log :', self.log_raw_input)
+ print('Timestamping :', self.timestamp)
+ print('State :', state)
+
+ def log(self, line_mod, line_ori):
+ """Write the sources to a log.
+
+ Inputs:
+
+ - line_mod: possibly modified input, such as the transformations made
+ by input prefilters or input handlers of various kinds. This should
+ always be valid Python.
+
+ - line_ori: unmodified input line from the user. This is not
+ necessarily valid Python.
+ """
+
+ # Write the log line, but decide which one according to the
+ # log_raw_input flag, set when the log is started.
+ if self.log_raw_input:
+ self.log_write(line_ori)
+ else:
+ self.log_write(line_mod)
+
+ def log_write(self, data, kind='input'):
+ """Write data to the log file, if active"""
+
+ #print 'data: %r' % data # dbg
+ if self.log_active and data:
+ write = self.logfile.write
+ if kind=='input':
+ if self.timestamp:
+ write(time.strftime('# %a, %d %b %Y %H:%M:%S\n', time.localtime()))
+ write(data)
+ elif kind=='output' and self.log_output:
+ odata = u'\n'.join([u'#[Out]# %s' % s
+ for s in data.splitlines()])
+ write(u'%s\n' % odata)
+ try:
+ self.logfile.flush()
+ except OSError:
+ print("Failed to flush the log file.")
+ print(
+ f"Please check that {self.logfname} exists and have the right permissions."
+ )
+ print(
+ "Also consider turning off the log with `%logstop` to avoid this warning."
+ )
+
+ def logstop(self):
+ """Fully stop logging and close log file.
+
+ In order to start logging again, a new logstart() call needs to be
+ made, possibly (though not necessarily) with a new filename, mode and
+ other options."""
+
+ if self.logfile is not None:
+ self.logfile.close()
+ self.logfile = None
+ else:
+ print("Logging hadn't been started.")
+ self.log_active = False
+
+ # For backwards compatibility, in case anyone was using this.
+ close_log = logstop
diff --git a/contrib/python/ipython/py3/IPython/core/macro.py b/contrib/python/ipython/py3/IPython/core/macro.py
new file mode 100644
index 0000000000..ce86898cac
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/macro.py
@@ -0,0 +1,53 @@
+"""Support for interactive macros in IPython"""
+
+#*****************************************************************************
+# Copyright (C) 2001-2005 Fernando Perez <fperez@colorado.edu>
+#
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#*****************************************************************************
+
+import re
+
+from IPython.utils.encoding import DEFAULT_ENCODING
+
+coding_declaration = re.compile(r"#\s*coding[:=]\s*([-\w.]+)")
+
+class Macro(object):
+ """Simple class to store the value of macros as strings.
+
+ Macro is just a callable that executes a string of IPython
+ input when called.
+ """
+
+ def __init__(self,code):
+ """store the macro value, as a single string which can be executed"""
+ lines = []
+ enc = None
+ for line in code.splitlines():
+ coding_match = coding_declaration.match(line)
+ if coding_match:
+ enc = coding_match.group(1)
+ else:
+ lines.append(line)
+ code = "\n".join(lines)
+ if isinstance(code, bytes):
+ code = code.decode(enc or DEFAULT_ENCODING)
+ self.value = code + '\n'
+
+ def __str__(self):
+ return self.value
+
+ def __repr__(self):
+ return 'IPython.macro.Macro(%s)' % repr(self.value)
+
+ def __getstate__(self):
+ """ needed for safe pickling via %store """
+ return {'value': self.value}
+
+ def __add__(self, other):
+ if isinstance(other, Macro):
+ return Macro(self.value + other.value)
+ elif isinstance(other, str):
+ return Macro(self.value + other)
+ raise TypeError
diff --git a/contrib/python/ipython/py3/IPython/core/magic.py b/contrib/python/ipython/py3/IPython/core/magic.py
new file mode 100644
index 0000000000..4f9e4e548f
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/magic.py
@@ -0,0 +1,757 @@
+# encoding: utf-8
+"""Magic functions for InteractiveShell.
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (C) 2001 Janko Hauser <jhauser@zscout.de> and
+# Copyright (C) 2001 Fernando Perez <fperez@colorado.edu>
+# Copyright (C) 2008 The IPython Development Team
+
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#-----------------------------------------------------------------------------
+
+import os
+import re
+import sys
+from getopt import getopt, GetoptError
+
+from traitlets.config.configurable import Configurable
+from . import oinspect
+from .error import UsageError
+from .inputtransformer2 import ESC_MAGIC, ESC_MAGIC2
+from ..utils.ipstruct import Struct
+from ..utils.process import arg_split
+from ..utils.text import dedent
+from traitlets import Bool, Dict, Instance, observe
+from logging import error
+
+#-----------------------------------------------------------------------------
+# Globals
+#-----------------------------------------------------------------------------
+
+# A dict we'll use for each class that has magics, used as temporary storage to
+# pass information between the @line/cell_magic method decorators and the
+# @magics_class class decorator, because the method decorators have no
+# access to the class when they run. See for more details:
+# http://stackoverflow.com/questions/2366713/can-a-python-decorator-of-an-instance-method-access-the-class
+
+magics = dict(line={}, cell={})
+
+magic_kinds = ('line', 'cell')
+magic_spec = ('line', 'cell', 'line_cell')
+magic_escapes = dict(line=ESC_MAGIC, cell=ESC_MAGIC2)
+
+#-----------------------------------------------------------------------------
+# Utility classes and functions
+#-----------------------------------------------------------------------------
+
+class Bunch: pass
+
+
+def on_off(tag):
+ """Return an ON/OFF string for a 1/0 input. Simple utility function."""
+ return ['OFF','ON'][tag]
+
+
+def compress_dhist(dh):
+ """Compress a directory history into a new one with at most 20 entries.
+
+ Return a new list made from the first and last 10 elements of dhist after
+ removal of duplicates.
+ """
+ head, tail = dh[:-10], dh[-10:]
+
+ newhead = []
+ done = set()
+ for h in head:
+ if h in done:
+ continue
+ newhead.append(h)
+ done.add(h)
+
+ return newhead + tail
+
+
+def needs_local_scope(func):
+ """Decorator to mark magic functions which need to local scope to run."""
+ func.needs_local_scope = True
+ return func
+
+#-----------------------------------------------------------------------------
+# Class and method decorators for registering magics
+#-----------------------------------------------------------------------------
+
+def magics_class(cls):
+ """Class decorator for all subclasses of the main Magics class.
+
+ Any class that subclasses Magics *must* also apply this decorator, to
+ ensure that all the methods that have been decorated as line/cell magics
+ get correctly registered in the class instance. This is necessary because
+ when method decorators run, the class does not exist yet, so they
+ temporarily store their information into a module global. Application of
+ this class decorator copies that global data to the class instance and
+ clears the global.
+
+ Obviously, this mechanism is not thread-safe, which means that the
+ *creation* of subclasses of Magic should only be done in a single-thread
+ context. Instantiation of the classes has no restrictions. Given that
+ these classes are typically created at IPython startup time and before user
+ application code becomes active, in practice this should not pose any
+ problems.
+ """
+ cls.registered = True
+ cls.magics = dict(line = magics['line'],
+ cell = magics['cell'])
+ magics['line'] = {}
+ magics['cell'] = {}
+ return cls
+
+
+def record_magic(dct, magic_kind, magic_name, func):
+ """Utility function to store a function as a magic of a specific kind.
+
+ Parameters
+ ----------
+ dct : dict
+ A dictionary with 'line' and 'cell' subdicts.
+ magic_kind : str
+ Kind of magic to be stored.
+ magic_name : str
+ Key to store the magic as.
+ func : function
+ Callable object to store.
+ """
+ if magic_kind == 'line_cell':
+ dct['line'][magic_name] = dct['cell'][magic_name] = func
+ else:
+ dct[magic_kind][magic_name] = func
+
+
+def validate_type(magic_kind):
+ """Ensure that the given magic_kind is valid.
+
+ Check that the given magic_kind is one of the accepted spec types (stored
+ in the global `magic_spec`), raise ValueError otherwise.
+ """
+ if magic_kind not in magic_spec:
+ raise ValueError('magic_kind must be one of %s, %s given' %
+ magic_kinds, magic_kind)
+
+
+# The docstrings for the decorator below will be fairly similar for the two
+# types (method and function), so we generate them here once and reuse the
+# templates below.
+_docstring_template = \
+"""Decorate the given {0} as {1} magic.
+
+The decorator can be used with or without arguments, as follows.
+
+i) without arguments: it will create a {1} magic named as the {0} being
+decorated::
+
+ @deco
+ def foo(...)
+
+will create a {1} magic named `foo`.
+
+ii) with one string argument: which will be used as the actual name of the
+resulting magic::
+
+ @deco('bar')
+ def foo(...)
+
+will create a {1} magic named `bar`.
+
+To register a class magic use ``Interactiveshell.register_magic(class or instance)``.
+"""
+
+# These two are decorator factories. While they are conceptually very similar,
+# there are enough differences in the details that it's simpler to have them
+# written as completely standalone functions rather than trying to share code
+# and make a single one with convoluted logic.
+
+def _method_magic_marker(magic_kind):
+ """Decorator factory for methods in Magics subclasses.
+ """
+
+ validate_type(magic_kind)
+
+ # This is a closure to capture the magic_kind. We could also use a class,
+ # but it's overkill for just that one bit of state.
+ def magic_deco(arg):
+ if callable(arg):
+ # "Naked" decorator call (just @foo, no args)
+ func = arg
+ name = func.__name__
+ retval = arg
+ record_magic(magics, magic_kind, name, name)
+ elif isinstance(arg, str):
+ # Decorator called with arguments (@foo('bar'))
+ name = arg
+ def mark(func, *a, **kw):
+ record_magic(magics, magic_kind, name, func.__name__)
+ return func
+ retval = mark
+ else:
+ raise TypeError("Decorator can only be called with "
+ "string or function")
+ return retval
+
+ # Ensure the resulting decorator has a usable docstring
+ magic_deco.__doc__ = _docstring_template.format('method', magic_kind)
+ return magic_deco
+
+
+def _function_magic_marker(magic_kind):
+ """Decorator factory for standalone functions.
+ """
+ validate_type(magic_kind)
+
+ # This is a closure to capture the magic_kind. We could also use a class,
+ # but it's overkill for just that one bit of state.
+ def magic_deco(arg):
+ # Find get_ipython() in the caller's namespace
+ caller = sys._getframe(1)
+ for ns in ['f_locals', 'f_globals', 'f_builtins']:
+ get_ipython = getattr(caller, ns).get('get_ipython')
+ if get_ipython is not None:
+ break
+ else:
+ raise NameError('Decorator can only run in context where '
+ '`get_ipython` exists')
+
+ ip = get_ipython()
+
+ if callable(arg):
+ # "Naked" decorator call (just @foo, no args)
+ func = arg
+ name = func.__name__
+ ip.register_magic_function(func, magic_kind, name)
+ retval = arg
+ elif isinstance(arg, str):
+ # Decorator called with arguments (@foo('bar'))
+ name = arg
+ def mark(func, *a, **kw):
+ ip.register_magic_function(func, magic_kind, name)
+ return func
+ retval = mark
+ else:
+ raise TypeError("Decorator can only be called with "
+ "string or function")
+ return retval
+
+ # Ensure the resulting decorator has a usable docstring
+ ds = _docstring_template.format('function', magic_kind)
+
+ ds += dedent("""
+ Note: this decorator can only be used in a context where IPython is already
+ active, so that the `get_ipython()` call succeeds. You can therefore use
+ it in your startup files loaded after IPython initializes, but *not* in the
+ IPython configuration file itself, which is executed before IPython is
+ fully up and running. Any file located in the `startup` subdirectory of
+ your configuration profile will be OK in this sense.
+ """)
+
+ magic_deco.__doc__ = ds
+ return magic_deco
+
+
+MAGIC_NO_VAR_EXPAND_ATTR = "_ipython_magic_no_var_expand"
+MAGIC_OUTPUT_CAN_BE_SILENCED = "_ipython_magic_output_can_be_silenced"
+
+
+def no_var_expand(magic_func):
+ """Mark a magic function as not needing variable expansion
+
+ By default, IPython interprets `{a}` or `$a` in the line passed to magics
+ as variables that should be interpolated from the interactive namespace
+ before passing the line to the magic function.
+ This is not always desirable, e.g. when the magic executes Python code
+ (%timeit, %time, etc.).
+ Decorate magics with `@no_var_expand` to opt-out of variable expansion.
+
+ .. versionadded:: 7.3
+ """
+ setattr(magic_func, MAGIC_NO_VAR_EXPAND_ATTR, True)
+ return magic_func
+
+
+def output_can_be_silenced(magic_func):
+ """Mark a magic function so its output may be silenced.
+
+ The output is silenced if the Python code used as a parameter of
+ the magic ends in a semicolon, not counting a Python comment that can
+ follow it.
+ """
+ setattr(magic_func, MAGIC_OUTPUT_CAN_BE_SILENCED, True)
+ return magic_func
+
+# Create the actual decorators for public use
+
+# These three are used to decorate methods in class definitions
+line_magic = _method_magic_marker('line')
+cell_magic = _method_magic_marker('cell')
+line_cell_magic = _method_magic_marker('line_cell')
+
+# These three decorate standalone functions and perform the decoration
+# immediately. They can only run where get_ipython() works
+register_line_magic = _function_magic_marker('line')
+register_cell_magic = _function_magic_marker('cell')
+register_line_cell_magic = _function_magic_marker('line_cell')
+
+#-----------------------------------------------------------------------------
+# Core Magic classes
+#-----------------------------------------------------------------------------
+
+class MagicsManager(Configurable):
+ """Object that handles all magic-related functionality for IPython.
+ """
+ # Non-configurable class attributes
+
+ # A two-level dict, first keyed by magic type, then by magic function, and
+ # holding the actual callable object as value. This is the dict used for
+ # magic function dispatch
+ magics = Dict()
+ lazy_magics = Dict(
+ help="""
+ Mapping from magic names to modules to load.
+
+ This can be used in IPython/IPykernel configuration to declare lazy magics
+ that will only be imported/registered on first use.
+
+ For example::
+
+ c.MagicsManager.lazy_magics = {
+ "my_magic": "slow.to.import",
+ "my_other_magic": "also.slow",
+ }
+
+ On first invocation of `%my_magic`, `%%my_magic`, `%%my_other_magic` or
+ `%%my_other_magic`, the corresponding module will be loaded as an ipython
+ extensions as if you had previously done `%load_ext ipython`.
+
+ Magics names should be without percent(s) as magics can be both cell
+ and line magics.
+
+ Lazy loading happen relatively late in execution process, and
+ complex extensions that manipulate Python/IPython internal state or global state
+ might not support lazy loading.
+ """
+ ).tag(
+ config=True,
+ )
+
+ # A registry of the original objects that we've been given holding magics.
+ registry = Dict()
+
+ shell = Instance('IPython.core.interactiveshell.InteractiveShellABC', allow_none=True)
+
+ auto_magic = Bool(True, help=
+ "Automatically call line magics without requiring explicit % prefix"
+ ).tag(config=True)
+ @observe('auto_magic')
+ def _auto_magic_changed(self, change):
+ self.shell.automagic = change['new']
+
+ _auto_status = [
+ 'Automagic is OFF, % prefix IS needed for line magics.',
+ 'Automagic is ON, % prefix IS NOT needed for line magics.']
+
+ user_magics = Instance('IPython.core.magics.UserMagics', allow_none=True)
+
+ def __init__(self, shell=None, config=None, user_magics=None, **traits):
+
+ super(MagicsManager, self).__init__(shell=shell, config=config,
+ user_magics=user_magics, **traits)
+ self.magics = dict(line={}, cell={})
+ # Let's add the user_magics to the registry for uniformity, so *all*
+ # registered magic containers can be found there.
+ self.registry[user_magics.__class__.__name__] = user_magics
+
+ def auto_status(self):
+ """Return descriptive string with automagic status."""
+ return self._auto_status[self.auto_magic]
+
+ def lsmagic(self):
+ """Return a dict of currently available magic functions.
+
+ The return dict has the keys 'line' and 'cell', corresponding to the
+ two types of magics we support. Each value is a list of names.
+ """
+ return self.magics
+
+ def lsmagic_docs(self, brief=False, missing=''):
+ """Return dict of documentation of magic functions.
+
+ The return dict has the keys 'line' and 'cell', corresponding to the
+ two types of magics we support. Each value is a dict keyed by magic
+ name whose value is the function docstring. If a docstring is
+ unavailable, the value of `missing` is used instead.
+
+ If brief is True, only the first line of each docstring will be returned.
+ """
+ docs = {}
+ for m_type in self.magics:
+ m_docs = {}
+ for m_name, m_func in self.magics[m_type].items():
+ if m_func.__doc__:
+ if brief:
+ m_docs[m_name] = m_func.__doc__.split('\n', 1)[0]
+ else:
+ m_docs[m_name] = m_func.__doc__.rstrip()
+ else:
+ m_docs[m_name] = missing
+ docs[m_type] = m_docs
+ return docs
+
+ def register_lazy(self, name: str, fully_qualified_name: str):
+ """
+ Lazily register a magic via an extension.
+
+
+ Parameters
+ ----------
+ name : str
+ Name of the magic you wish to register.
+ fully_qualified_name :
+ Fully qualified name of the module/submodule that should be loaded
+ as an extensions when the magic is first called.
+ It is assumed that loading this extensions will register the given
+ magic.
+ """
+
+ self.lazy_magics[name] = fully_qualified_name
+
+ def register(self, *magic_objects):
+ """Register one or more instances of Magics.
+
+ Take one or more classes or instances of classes that subclass the main
+ `core.Magic` class, and register them with IPython to use the magic
+ functions they provide. The registration process will then ensure that
+ any methods that have decorated to provide line and/or cell magics will
+ be recognized with the `%x`/`%%x` syntax as a line/cell magic
+ respectively.
+
+ If classes are given, they will be instantiated with the default
+ constructor. If your classes need a custom constructor, you should
+ instanitate them first and pass the instance.
+
+ The provided arguments can be an arbitrary mix of classes and instances.
+
+ Parameters
+ ----------
+ *magic_objects : one or more classes or instances
+ """
+ # Start by validating them to ensure they have all had their magic
+ # methods registered at the instance level
+ for m in magic_objects:
+ if not m.registered:
+ raise ValueError("Class of magics %r was constructed without "
+ "the @register_magics class decorator")
+ if isinstance(m, type):
+ # If we're given an uninstantiated class
+ m = m(shell=self.shell)
+
+ # Now that we have an instance, we can register it and update the
+ # table of callables
+ self.registry[m.__class__.__name__] = m
+ for mtype in magic_kinds:
+ self.magics[mtype].update(m.magics[mtype])
+
+ def register_function(self, func, magic_kind='line', magic_name=None):
+ """Expose a standalone function as magic function for IPython.
+
+ This will create an IPython magic (line, cell or both) from a
+ standalone function. The functions should have the following
+ signatures:
+
+ * For line magics: `def f(line)`
+ * For cell magics: `def f(line, cell)`
+ * For a function that does both: `def f(line, cell=None)`
+
+ In the latter case, the function will be called with `cell==None` when
+ invoked as `%f`, and with cell as a string when invoked as `%%f`.
+
+ Parameters
+ ----------
+ func : callable
+ Function to be registered as a magic.
+ magic_kind : str
+ Kind of magic, one of 'line', 'cell' or 'line_cell'
+ magic_name : optional str
+ If given, the name the magic will have in the IPython namespace. By
+ default, the name of the function itself is used.
+ """
+
+ # Create the new method in the user_magics and register it in the
+ # global table
+ validate_type(magic_kind)
+ magic_name = func.__name__ if magic_name is None else magic_name
+ setattr(self.user_magics, magic_name, func)
+ record_magic(self.magics, magic_kind, magic_name, func)
+
+ def register_alias(self, alias_name, magic_name, magic_kind='line', magic_params=None):
+ """Register an alias to a magic function.
+
+ The alias is an instance of :class:`MagicAlias`, which holds the
+ name and kind of the magic it should call. Binding is done at
+ call time, so if the underlying magic function is changed the alias
+ will call the new function.
+
+ Parameters
+ ----------
+ alias_name : str
+ The name of the magic to be registered.
+ magic_name : str
+ The name of an existing magic.
+ magic_kind : str
+ Kind of magic, one of 'line' or 'cell'
+ """
+
+ # `validate_type` is too permissive, as it allows 'line_cell'
+ # which we do not handle.
+ if magic_kind not in magic_kinds:
+ raise ValueError('magic_kind must be one of %s, %s given' %
+ magic_kinds, magic_kind)
+
+ alias = MagicAlias(self.shell, magic_name, magic_kind, magic_params)
+ setattr(self.user_magics, alias_name, alias)
+ record_magic(self.magics, magic_kind, alias_name, alias)
+
+# Key base class that provides the central functionality for magics.
+
+
+class Magics(Configurable):
+ """Base class for implementing magic functions.
+
+ Shell functions which can be reached as %function_name. All magic
+ functions should accept a string, which they can parse for their own
+ needs. This can make some functions easier to type, eg `%cd ../`
+ vs. `%cd("../")`
+
+ Classes providing magic functions need to subclass this class, and they
+ MUST:
+
+ - Use the method decorators `@line_magic` and `@cell_magic` to decorate
+ individual methods as magic functions, AND
+
+ - Use the class decorator `@magics_class` to ensure that the magic
+ methods are properly registered at the instance level upon instance
+ initialization.
+
+ See :mod:`magic_functions` for examples of actual implementation classes.
+ """
+ # Dict holding all command-line options for each magic.
+ options_table = None
+ # Dict for the mapping of magic names to methods, set by class decorator
+ magics = None
+ # Flag to check that the class decorator was properly applied
+ registered = False
+ # Instance of IPython shell
+ shell = None
+
+ def __init__(self, shell=None, **kwargs):
+ if not(self.__class__.registered):
+ raise ValueError('Magics subclass without registration - '
+ 'did you forget to apply @magics_class?')
+ if shell is not None:
+ if hasattr(shell, 'configurables'):
+ shell.configurables.append(self)
+ if hasattr(shell, 'config'):
+ kwargs.setdefault('parent', shell)
+
+ self.shell = shell
+ self.options_table = {}
+ # The method decorators are run when the instance doesn't exist yet, so
+ # they can only record the names of the methods they are supposed to
+ # grab. Only now, that the instance exists, can we create the proper
+ # mapping to bound methods. So we read the info off the original names
+ # table and replace each method name by the actual bound method.
+ # But we mustn't clobber the *class* mapping, in case of multiple instances.
+ class_magics = self.magics
+ self.magics = {}
+ for mtype in magic_kinds:
+ tab = self.magics[mtype] = {}
+ cls_tab = class_magics[mtype]
+ for magic_name, meth_name in cls_tab.items():
+ if isinstance(meth_name, str):
+ # it's a method name, grab it
+ tab[magic_name] = getattr(self, meth_name)
+ else:
+ # it's the real thing
+ tab[magic_name] = meth_name
+ # Configurable **needs** to be initiated at the end or the config
+ # magics get screwed up.
+ super(Magics, self).__init__(**kwargs)
+
+ def arg_err(self,func):
+ """Print docstring if incorrect arguments were passed"""
+ print('Error in arguments:')
+ print(oinspect.getdoc(func))
+
+ def format_latex(self, strng):
+ """Format a string for latex inclusion."""
+
+ # Characters that need to be escaped for latex:
+ escape_re = re.compile(r'(%|_|\$|#|&)',re.MULTILINE)
+ # Magic command names as headers:
+ cmd_name_re = re.compile(r'^(%s.*?):' % ESC_MAGIC,
+ re.MULTILINE)
+ # Magic commands
+ cmd_re = re.compile(r'(?P<cmd>%s.+?\b)(?!\}\}:)' % ESC_MAGIC,
+ re.MULTILINE)
+ # Paragraph continue
+ par_re = re.compile(r'\\$',re.MULTILINE)
+
+ # The "\n" symbol
+ newline_re = re.compile(r'\\n')
+
+ # Now build the string for output:
+ #strng = cmd_name_re.sub(r'\n\\texttt{\\textsl{\\large \1}}:',strng)
+ strng = cmd_name_re.sub(r'\n\\bigskip\n\\texttt{\\textbf{ \1}}:',
+ strng)
+ strng = cmd_re.sub(r'\\texttt{\g<cmd>}',strng)
+ strng = par_re.sub(r'\\\\',strng)
+ strng = escape_re.sub(r'\\\1',strng)
+ strng = newline_re.sub(r'\\textbackslash{}n',strng)
+ return strng
+
+ def parse_options(self, arg_str, opt_str, *long_opts, **kw):
+ """Parse options passed to an argument string.
+
+ The interface is similar to that of :func:`getopt.getopt`, but it
+ returns a :class:`~IPython.utils.struct.Struct` with the options as keys
+ and the stripped argument string still as a string.
+
+ arg_str is quoted as a true sys.argv vector by using shlex.split.
+ This allows us to easily expand variables, glob files, quote
+ arguments, etc.
+
+ Parameters
+ ----------
+ arg_str : str
+ The arguments to parse.
+ opt_str : str
+ The options specification.
+ mode : str, default 'string'
+ If given as 'list', the argument string is returned as a list (split
+ on whitespace) instead of a string.
+ list_all : bool, default False
+ Put all option values in lists. Normally only options
+ appearing more than once are put in a list.
+ posix : bool, default True
+ Whether to split the input line in POSIX mode or not, as per the
+ conventions outlined in the :mod:`shlex` module from the standard
+ library.
+ """
+
+ # inject default options at the beginning of the input line
+ caller = sys._getframe(1).f_code.co_name
+ arg_str = '%s %s' % (self.options_table.get(caller,''),arg_str)
+
+ mode = kw.get('mode','string')
+ if mode not in ['string','list']:
+ raise ValueError('incorrect mode given: %s' % mode)
+ # Get options
+ list_all = kw.get('list_all',0)
+ posix = kw.get('posix', os.name == 'posix')
+ strict = kw.get('strict', True)
+
+ preserve_non_opts = kw.get("preserve_non_opts", False)
+ remainder_arg_str = arg_str
+
+ # Check if we have more than one argument to warrant extra processing:
+ odict = {} # Dictionary with options
+ args = arg_str.split()
+ if len(args) >= 1:
+ # If the list of inputs only has 0 or 1 thing in it, there's no
+ # need to look for options
+ argv = arg_split(arg_str, posix, strict)
+ # Do regular option processing
+ try:
+ opts,args = getopt(argv, opt_str, long_opts)
+ except GetoptError as e:
+ raise UsageError(
+ '%s ( allowed: "%s" %s)' % (e.msg, opt_str, " ".join(long_opts))
+ ) from e
+ for o, a in opts:
+ if mode == "string" and preserve_non_opts:
+ # remove option-parts from the original args-string and preserve remaining-part.
+ # This relies on the arg_split(...) and getopt(...)'s impl spec, that the parsed options are
+ # returned in the original order.
+ remainder_arg_str = remainder_arg_str.replace(o, "", 1).replace(
+ a, "", 1
+ )
+ if o.startswith("--"):
+ o = o[2:]
+ else:
+ o = o[1:]
+ try:
+ odict[o].append(a)
+ except AttributeError:
+ odict[o] = [odict[o],a]
+ except KeyError:
+ if list_all:
+ odict[o] = [a]
+ else:
+ odict[o] = a
+
+ # Prepare opts,args for return
+ opts = Struct(odict)
+ if mode == 'string':
+ if preserve_non_opts:
+ args = remainder_arg_str.lstrip()
+ else:
+ args = " ".join(args)
+
+ return opts,args
+
+ def default_option(self, fn, optstr):
+ """Make an entry in the options_table for fn, with value optstr"""
+
+ if fn not in self.lsmagic():
+ error("%s is not a magic function" % fn)
+ self.options_table[fn] = optstr
+
+
+class MagicAlias(object):
+ """An alias to another magic function.
+
+ An alias is determined by its magic name and magic kind. Lookup
+ is done at call time, so if the underlying magic changes the alias
+ will call the new function.
+
+ Use the :meth:`MagicsManager.register_alias` method or the
+ `%alias_magic` magic function to create and register a new alias.
+ """
+ def __init__(self, shell, magic_name, magic_kind, magic_params=None):
+ self.shell = shell
+ self.magic_name = magic_name
+ self.magic_params = magic_params
+ self.magic_kind = magic_kind
+
+ self.pretty_target = '%s%s' % (magic_escapes[self.magic_kind], self.magic_name)
+ self.__doc__ = "Alias for `%s`." % self.pretty_target
+
+ self._in_call = False
+
+ def __call__(self, *args, **kwargs):
+ """Call the magic alias."""
+ fn = self.shell.find_magic(self.magic_name, self.magic_kind)
+ if fn is None:
+ raise UsageError("Magic `%s` not found." % self.pretty_target)
+
+ # Protect against infinite recursion.
+ if self._in_call:
+ raise UsageError("Infinite recursion detected; "
+ "magic aliases cannot call themselves.")
+ self._in_call = True
+ try:
+ if self.magic_params:
+ args_list = list(args)
+ args_list[0] = self.magic_params + " " + args[0]
+ args = tuple(args_list)
+ return fn(*args, **kwargs)
+ finally:
+ self._in_call = False
diff --git a/contrib/python/ipython/py3/IPython/core/magic_arguments.py b/contrib/python/ipython/py3/IPython/core/magic_arguments.py
new file mode 100644
index 0000000000..24dd541876
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/magic_arguments.py
@@ -0,0 +1,310 @@
+''' A decorator-based method of constructing IPython magics with `argparse`
+option handling.
+
+New magic functions can be defined like so::
+
+ from IPython.core.magic_arguments import (argument, magic_arguments,
+ parse_argstring)
+
+ @magic_arguments()
+ @argument('-o', '--option', help='An optional argument.')
+ @argument('arg', type=int, help='An integer positional argument.')
+ def magic_cool(self, arg):
+ """ A really cool magic command.
+
+ """
+ args = parse_argstring(magic_cool, arg)
+ ...
+
+The `@magic_arguments` decorator marks the function as having argparse arguments.
+The `@argument` decorator adds an argument using the same syntax as argparse's
+`add_argument()` method. More sophisticated uses may also require the
+`@argument_group` or `@kwds` decorator to customize the formatting and the
+parsing.
+
+Help text for the magic is automatically generated from the docstring and the
+arguments::
+
+ In[1]: %cool?
+ %cool [-o OPTION] arg
+
+ A really cool magic command.
+
+ positional arguments:
+ arg An integer positional argument.
+
+ optional arguments:
+ -o OPTION, --option OPTION
+ An optional argument.
+
+Here is an elaborated example that uses default parameters in `argument` and calls the `args` in the cell magic::
+
+ from IPython.core.magic import register_cell_magic
+ from IPython.core.magic_arguments import (argument, magic_arguments,
+ parse_argstring)
+
+
+ @magic_arguments()
+ @argument(
+ "--option",
+ "-o",
+ help=("Add an option here"),
+ )
+ @argument(
+ "--style",
+ "-s",
+ default="foo",
+ help=("Add some style arguments"),
+ )
+ @register_cell_magic
+ def my_cell_magic(line, cell):
+ args = parse_argstring(my_cell_magic, line)
+ print(f"{args.option=}")
+ print(f"{args.style=}")
+ print(f"{cell=}")
+
+In a jupyter notebook, this cell magic can be executed like this::
+
+ %%my_cell_magic -o Hello
+ print("bar")
+ i = 42
+
+Inheritance diagram:
+
+.. inheritance-diagram:: IPython.core.magic_arguments
+ :parts: 3
+
+'''
+#-----------------------------------------------------------------------------
+# Copyright (C) 2010-2011, IPython Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+import argparse
+import re
+
+# Our own imports
+from IPython.core.error import UsageError
+from IPython.utils.decorators import undoc
+from IPython.utils.process import arg_split
+from IPython.utils.text import dedent
+
+NAME_RE = re.compile(r"[a-zA-Z][a-zA-Z0-9_-]*$")
+
+@undoc
+class MagicHelpFormatter(argparse.RawDescriptionHelpFormatter):
+ """A HelpFormatter with a couple of changes to meet our needs.
+ """
+ # Modified to dedent text.
+ def _fill_text(self, text, width, indent):
+ return argparse.RawDescriptionHelpFormatter._fill_text(self, dedent(text), width, indent)
+
+ # Modified to wrap argument placeholders in <> where necessary.
+ def _format_action_invocation(self, action):
+ if not action.option_strings:
+ metavar, = self._metavar_formatter(action, action.dest)(1)
+ return metavar
+
+ else:
+ parts = []
+
+ # if the Optional doesn't take a value, format is:
+ # -s, --long
+ if action.nargs == 0:
+ parts.extend(action.option_strings)
+
+ # if the Optional takes a value, format is:
+ # -s ARGS, --long ARGS
+ else:
+ default = action.dest.upper()
+ args_string = self._format_args(action, default)
+ # IPYTHON MODIFICATION: If args_string is not a plain name, wrap
+ # it in <> so it's valid RST.
+ if not NAME_RE.match(args_string):
+ args_string = "<%s>" % args_string
+ for option_string in action.option_strings:
+ parts.append('%s %s' % (option_string, args_string))
+
+ return ', '.join(parts)
+
+ # Override the default prefix ('usage') to our % magic escape,
+ # in a code block.
+ def add_usage(self, usage, actions, groups, prefix="::\n\n %"):
+ super(MagicHelpFormatter, self).add_usage(usage, actions, groups, prefix)
+
+class MagicArgumentParser(argparse.ArgumentParser):
+ """ An ArgumentParser tweaked for use by IPython magics.
+ """
+ def __init__(self,
+ prog=None,
+ usage=None,
+ description=None,
+ epilog=None,
+ parents=None,
+ formatter_class=MagicHelpFormatter,
+ prefix_chars='-',
+ argument_default=None,
+ conflict_handler='error',
+ add_help=False):
+ if parents is None:
+ parents = []
+ super(MagicArgumentParser, self).__init__(prog=prog, usage=usage,
+ description=description, epilog=epilog,
+ parents=parents, formatter_class=formatter_class,
+ prefix_chars=prefix_chars, argument_default=argument_default,
+ conflict_handler=conflict_handler, add_help=add_help)
+
+ def error(self, message):
+ """ Raise a catchable error instead of exiting.
+ """
+ raise UsageError(message)
+
+ def parse_argstring(self, argstring):
+ """ Split a string into an argument list and parse that argument list.
+ """
+ argv = arg_split(argstring)
+ return self.parse_args(argv)
+
+
+def construct_parser(magic_func):
+ """ Construct an argument parser using the function decorations.
+ """
+ kwds = getattr(magic_func, 'argcmd_kwds', {})
+ if 'description' not in kwds:
+ kwds['description'] = getattr(magic_func, '__doc__', None)
+ arg_name = real_name(magic_func)
+ parser = MagicArgumentParser(arg_name, **kwds)
+ # Reverse the list of decorators in order to apply them in the
+ # order in which they appear in the source.
+ group = None
+ for deco in magic_func.decorators[::-1]:
+ result = deco.add_to_parser(parser, group)
+ if result is not None:
+ group = result
+
+ # Replace the magic function's docstring with the full help text.
+ magic_func.__doc__ = parser.format_help()
+
+ return parser
+
+
+def parse_argstring(magic_func, argstring):
+ """ Parse the string of arguments for the given magic function.
+ """
+ return magic_func.parser.parse_argstring(argstring)
+
+
+def real_name(magic_func):
+ """ Find the real name of the magic.
+ """
+ magic_name = magic_func.__name__
+ if magic_name.startswith('magic_'):
+ magic_name = magic_name[len('magic_'):]
+ return getattr(magic_func, 'argcmd_name', magic_name)
+
+
+class ArgDecorator(object):
+ """ Base class for decorators to add ArgumentParser information to a method.
+ """
+
+ def __call__(self, func):
+ if not getattr(func, 'has_arguments', False):
+ func.has_arguments = True
+ func.decorators = []
+ func.decorators.append(self)
+ return func
+
+ def add_to_parser(self, parser, group):
+ """ Add this object's information to the parser, if necessary.
+ """
+ pass
+
+
+class magic_arguments(ArgDecorator):
+ """ Mark the magic as having argparse arguments and possibly adjust the
+ name.
+ """
+
+ def __init__(self, name=None):
+ self.name = name
+
+ def __call__(self, func):
+ if not getattr(func, 'has_arguments', False):
+ func.has_arguments = True
+ func.decorators = []
+ if self.name is not None:
+ func.argcmd_name = self.name
+ # This should be the first decorator in the list of decorators, thus the
+ # last to execute. Build the parser.
+ func.parser = construct_parser(func)
+ return func
+
+
+class ArgMethodWrapper(ArgDecorator):
+
+ """
+ Base class to define a wrapper for ArgumentParser method.
+
+ Child class must define either `_method_name` or `add_to_parser`.
+
+ """
+
+ _method_name: str
+
+ def __init__(self, *args, **kwds):
+ self.args = args
+ self.kwds = kwds
+
+ def add_to_parser(self, parser, group):
+ """ Add this object's information to the parser.
+ """
+ if group is not None:
+ parser = group
+ getattr(parser, self._method_name)(*self.args, **self.kwds)
+ return None
+
+
+class argument(ArgMethodWrapper):
+ """ Store arguments and keywords to pass to add_argument().
+
+ Instances also serve to decorate command methods.
+ """
+ _method_name = 'add_argument'
+
+
+class defaults(ArgMethodWrapper):
+ """ Store arguments and keywords to pass to set_defaults().
+
+ Instances also serve to decorate command methods.
+ """
+ _method_name = 'set_defaults'
+
+
+class argument_group(ArgMethodWrapper):
+ """ Store arguments and keywords to pass to add_argument_group().
+
+ Instances also serve to decorate command methods.
+ """
+
+ def add_to_parser(self, parser, group):
+ """ Add this object's information to the parser.
+ """
+ return parser.add_argument_group(*self.args, **self.kwds)
+
+
+class kwds(ArgDecorator):
+ """ Provide other keywords to the sub-parser constructor.
+ """
+ def __init__(self, **kwds):
+ self.kwds = kwds
+
+ def __call__(self, func):
+ func = super(kwds, self).__call__(func)
+ func.argcmd_kwds = self.kwds
+ return func
+
+
+__all__ = ['magic_arguments', 'argument', 'argument_group', 'kwds',
+ 'parse_argstring']
diff --git a/contrib/python/ipython/py3/IPython/core/magics/__init__.py b/contrib/python/ipython/py3/IPython/core/magics/__init__.py
new file mode 100644
index 0000000000..a6c5f474c1
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/magics/__init__.py
@@ -0,0 +1,42 @@
+"""Implementation of all the magic functions built into IPython.
+"""
+#-----------------------------------------------------------------------------
+# Copyright (c) 2012 The IPython Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+from ..magic import Magics, magics_class
+from .auto import AutoMagics
+from .basic import BasicMagics, AsyncMagics
+from .code import CodeMagics, MacroToEdit
+from .config import ConfigMagics
+from .display import DisplayMagics
+from .execution import ExecutionMagics
+from .extension import ExtensionMagics
+from .history import HistoryMagics
+from .logging import LoggingMagics
+from .namespace import NamespaceMagics
+from .osm import OSMagics
+from .packaging import PackagingMagics
+from .pylab import PylabMagics
+from .script import ScriptMagics
+
+#-----------------------------------------------------------------------------
+# Magic implementation classes
+#-----------------------------------------------------------------------------
+
+@magics_class
+class UserMagics(Magics):
+ """Placeholder for user-defined magics to be added at runtime.
+
+ All magics are eventually merged into a single namespace at runtime, but we
+ use this class to isolate the magics defined dynamically by the user into
+ their own class.
+ """
diff --git a/contrib/python/ipython/py3/IPython/core/magics/auto.py b/contrib/python/ipython/py3/IPython/core/magics/auto.py
new file mode 100644
index 0000000000..56aa4f72eb
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/magics/auto.py
@@ -0,0 +1,144 @@
+"""Implementation of magic functions that control various automatic behaviors.
+"""
+#-----------------------------------------------------------------------------
+# Copyright (c) 2012 The IPython Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+# Our own packages
+from IPython.core.magic import Bunch, Magics, magics_class, line_magic
+from IPython.testing.skipdoctest import skip_doctest
+from logging import error
+
+#-----------------------------------------------------------------------------
+# Magic implementation classes
+#-----------------------------------------------------------------------------
+
+@magics_class
+class AutoMagics(Magics):
+ """Magics that control various autoX behaviors."""
+
+ def __init__(self, shell):
+ super(AutoMagics, self).__init__(shell)
+ # namespace for holding state we may need
+ self._magic_state = Bunch()
+
+ @line_magic
+ def automagic(self, parameter_s=''):
+ """Make magic functions callable without having to type the initial %.
+
+ Without arguments toggles on/off (when off, you must call it as
+ %automagic, of course). With arguments it sets the value, and you can
+ use any of (case insensitive):
+
+ - on, 1, True: to activate
+
+ - off, 0, False: to deactivate.
+
+ Note that magic functions have lowest priority, so if there's a
+ variable whose name collides with that of a magic fn, automagic won't
+ work for that function (you get the variable instead). However, if you
+ delete the variable (del var), the previously shadowed magic function
+ becomes visible to automagic again."""
+
+ arg = parameter_s.lower()
+ mman = self.shell.magics_manager
+ if arg in ('on', '1', 'true'):
+ val = True
+ elif arg in ('off', '0', 'false'):
+ val = False
+ else:
+ val = not mman.auto_magic
+ mman.auto_magic = val
+ print('\n' + self.shell.magics_manager.auto_status())
+
+ @skip_doctest
+ @line_magic
+ def autocall(self, parameter_s=''):
+ """Make functions callable without having to type parentheses.
+
+ Usage:
+
+ %autocall [mode]
+
+ The mode can be one of: 0->Off, 1->Smart, 2->Full. If not given, the
+ value is toggled on and off (remembering the previous state).
+
+ In more detail, these values mean:
+
+ 0 -> fully disabled
+
+ 1 -> active, but do not apply if there are no arguments on the line.
+
+ In this mode, you get::
+
+ In [1]: callable
+ Out[1]: <built-in function callable>
+
+ In [2]: callable 'hello'
+ ------> callable('hello')
+ Out[2]: False
+
+ 2 -> Active always. Even if no arguments are present, the callable
+ object is called::
+
+ In [2]: float
+ ------> float()
+ Out[2]: 0.0
+
+ Note that even with autocall off, you can still use '/' at the start of
+ a line to treat the first argument on the command line as a function
+ and add parentheses to it::
+
+ In [8]: /str 43
+ ------> str(43)
+ Out[8]: '43'
+
+ # all-random (note for auto-testing)
+ """
+
+ valid_modes = {
+ 0: "Off",
+ 1: "Smart",
+ 2: "Full",
+ }
+
+ def errorMessage() -> str:
+ error = "Valid modes: "
+ for k, v in valid_modes.items():
+ error += str(k) + "->" + v + ", "
+ error = error[:-2] # remove tailing `, ` after last element
+ return error
+
+ if parameter_s:
+ if not parameter_s in map(str, valid_modes.keys()):
+ error(errorMessage())
+ return
+ arg = int(parameter_s)
+ else:
+ arg = 'toggle'
+
+ if not arg in (*list(valid_modes.keys()), "toggle"):
+ error(errorMessage())
+ return
+
+ if arg in (valid_modes.keys()):
+ self.shell.autocall = arg
+ else: # toggle
+ if self.shell.autocall:
+ self._magic_state.autocall_save = self.shell.autocall
+ self.shell.autocall = 0
+ else:
+ try:
+ self.shell.autocall = self._magic_state.autocall_save
+ except AttributeError:
+ self.shell.autocall = self._magic_state.autocall_save = 1
+
+ print("Automatic calling is:", list(valid_modes.values())[self.shell.autocall])
diff --git a/contrib/python/ipython/py3/IPython/core/magics/basic.py b/contrib/python/ipython/py3/IPython/core/magics/basic.py
new file mode 100644
index 0000000000..814dec72e2
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/magics/basic.py
@@ -0,0 +1,663 @@
+"""Implementation of basic magic functions."""
+
+
+from logging import error
+import io
+import os
+from pprint import pformat
+import sys
+from warnings import warn
+
+from traitlets.utils.importstring import import_item
+from IPython.core import magic_arguments, page
+from IPython.core.error import UsageError
+from IPython.core.magic import Magics, magics_class, line_magic, magic_escapes
+from IPython.utils.text import format_screen, dedent, indent
+from IPython.testing.skipdoctest import skip_doctest
+from IPython.utils.ipstruct import Struct
+
+
+class MagicsDisplay(object):
+ def __init__(self, magics_manager, ignore=None):
+ self.ignore = ignore if ignore else []
+ self.magics_manager = magics_manager
+
+ def _lsmagic(self):
+ """The main implementation of the %lsmagic"""
+ mesc = magic_escapes['line']
+ cesc = magic_escapes['cell']
+ mman = self.magics_manager
+ magics = mman.lsmagic()
+ out = ['Available line magics:',
+ mesc + (' '+mesc).join(sorted([m for m,v in magics['line'].items() if (v not in self.ignore)])),
+ '',
+ 'Available cell magics:',
+ cesc + (' '+cesc).join(sorted([m for m,v in magics['cell'].items() if (v not in self.ignore)])),
+ '',
+ mman.auto_status()]
+ return '\n'.join(out)
+
+ def _repr_pretty_(self, p, cycle):
+ p.text(self._lsmagic())
+
+ def __str__(self):
+ return self._lsmagic()
+
+ def _jsonable(self):
+ """turn magics dict into jsonable dict of the same structure
+
+ replaces object instances with their class names as strings
+ """
+ magic_dict = {}
+ mman = self.magics_manager
+ magics = mman.lsmagic()
+ for key, subdict in magics.items():
+ d = {}
+ magic_dict[key] = d
+ for name, obj in subdict.items():
+ try:
+ classname = obj.__self__.__class__.__name__
+ except AttributeError:
+ classname = 'Other'
+
+ d[name] = classname
+ return magic_dict
+
+ def _repr_json_(self):
+ return self._jsonable()
+
+
+@magics_class
+class BasicMagics(Magics):
+ """Magics that provide central IPython functionality.
+
+ These are various magics that don't fit into specific categories but that
+ are all part of the base 'IPython experience'."""
+
+ @skip_doctest
+ @magic_arguments.magic_arguments()
+ @magic_arguments.argument(
+ '-l', '--line', action='store_true',
+ help="""Create a line magic alias."""
+ )
+ @magic_arguments.argument(
+ '-c', '--cell', action='store_true',
+ help="""Create a cell magic alias."""
+ )
+ @magic_arguments.argument(
+ 'name',
+ help="""Name of the magic to be created."""
+ )
+ @magic_arguments.argument(
+ 'target',
+ help="""Name of the existing line or cell magic."""
+ )
+ @magic_arguments.argument(
+ '-p', '--params', default=None,
+ help="""Parameters passed to the magic function."""
+ )
+ @line_magic
+ def alias_magic(self, line=''):
+ """Create an alias for an existing line or cell magic.
+
+ Examples
+ --------
+ ::
+
+ In [1]: %alias_magic t timeit
+ Created `%t` as an alias for `%timeit`.
+ Created `%%t` as an alias for `%%timeit`.
+
+ In [2]: %t -n1 pass
+ 1 loops, best of 3: 954 ns per loop
+
+ In [3]: %%t -n1
+ ...: pass
+ ...:
+ 1 loops, best of 3: 954 ns per loop
+
+ In [4]: %alias_magic --cell whereami pwd
+ UsageError: Cell magic function `%%pwd` not found.
+ In [5]: %alias_magic --line whereami pwd
+ Created `%whereami` as an alias for `%pwd`.
+
+ In [6]: %whereami
+ Out[6]: u'/home/testuser'
+
+ In [7]: %alias_magic h history "-p -l 30" --line
+ Created `%h` as an alias for `%history -l 30`.
+ """
+
+ args = magic_arguments.parse_argstring(self.alias_magic, line)
+ shell = self.shell
+ mman = self.shell.magics_manager
+ escs = ''.join(magic_escapes.values())
+
+ target = args.target.lstrip(escs)
+ name = args.name.lstrip(escs)
+
+ params = args.params
+ if (params and
+ ((params.startswith('"') and params.endswith('"'))
+ or (params.startswith("'") and params.endswith("'")))):
+ params = params[1:-1]
+
+ # Find the requested magics.
+ m_line = shell.find_magic(target, 'line')
+ m_cell = shell.find_magic(target, 'cell')
+ if args.line and m_line is None:
+ raise UsageError('Line magic function `%s%s` not found.' %
+ (magic_escapes['line'], target))
+ if args.cell and m_cell is None:
+ raise UsageError('Cell magic function `%s%s` not found.' %
+ (magic_escapes['cell'], target))
+
+ # If --line and --cell are not specified, default to the ones
+ # that are available.
+ if not args.line and not args.cell:
+ if not m_line and not m_cell:
+ raise UsageError(
+ 'No line or cell magic with name `%s` found.' % target
+ )
+ args.line = bool(m_line)
+ args.cell = bool(m_cell)
+
+ params_str = "" if params is None else " " + params
+
+ if args.line:
+ mman.register_alias(name, target, 'line', params)
+ print('Created `%s%s` as an alias for `%s%s%s`.' % (
+ magic_escapes['line'], name,
+ magic_escapes['line'], target, params_str))
+
+ if args.cell:
+ mman.register_alias(name, target, 'cell', params)
+ print('Created `%s%s` as an alias for `%s%s%s`.' % (
+ magic_escapes['cell'], name,
+ magic_escapes['cell'], target, params_str))
+
+ @line_magic
+ def lsmagic(self, parameter_s=''):
+ """List currently available magic functions."""
+ return MagicsDisplay(self.shell.magics_manager, ignore=[])
+
+ def _magic_docs(self, brief=False, rest=False):
+ """Return docstrings from magic functions."""
+ mman = self.shell.magics_manager
+ docs = mman.lsmagic_docs(brief, missing='No documentation')
+
+ if rest:
+ format_string = '**%s%s**::\n\n%s\n\n'
+ else:
+ format_string = '%s%s:\n%s\n'
+
+ return ''.join(
+ [format_string % (magic_escapes['line'], fname,
+ indent(dedent(fndoc)))
+ for fname, fndoc in sorted(docs['line'].items())]
+ +
+ [format_string % (magic_escapes['cell'], fname,
+ indent(dedent(fndoc)))
+ for fname, fndoc in sorted(docs['cell'].items())]
+ )
+
+ @line_magic
+ def magic(self, parameter_s=''):
+ """Print information about the magic function system.
+
+ Supported formats: -latex, -brief, -rest
+ """
+
+ mode = ''
+ try:
+ mode = parameter_s.split()[0][1:]
+ except IndexError:
+ pass
+
+ brief = (mode == 'brief')
+ rest = (mode == 'rest')
+ magic_docs = self._magic_docs(brief, rest)
+
+ if mode == 'latex':
+ print(self.format_latex(magic_docs))
+ return
+ else:
+ magic_docs = format_screen(magic_docs)
+
+ out = ["""
+IPython's 'magic' functions
+===========================
+
+The magic function system provides a series of functions which allow you to
+control the behavior of IPython itself, plus a lot of system-type
+features. There are two kinds of magics, line-oriented and cell-oriented.
+
+Line magics are prefixed with the % character and work much like OS
+command-line calls: they get as an argument the rest of the line, where
+arguments are passed without parentheses or quotes. For example, this will
+time the given statement::
+
+ %timeit range(1000)
+
+Cell magics are prefixed with a double %%, and they are functions that get as
+an argument not only the rest of the line, but also the lines below it in a
+separate argument. These magics are called with two arguments: the rest of the
+call line and the body of the cell, consisting of the lines below the first.
+For example::
+
+ %%timeit x = numpy.random.randn((100, 100))
+ numpy.linalg.svd(x)
+
+will time the execution of the numpy svd routine, running the assignment of x
+as part of the setup phase, which is not timed.
+
+In a line-oriented client (the terminal or Qt console IPython), starting a new
+input with %% will automatically enter cell mode, and IPython will continue
+reading input until a blank line is given. In the notebook, simply type the
+whole cell as one entity, but keep in mind that the %% escape can only be at
+the very start of the cell.
+
+NOTE: If you have 'automagic' enabled (via the command line option or with the
+%automagic function), you don't need to type in the % explicitly for line
+magics; cell magics always require an explicit '%%' escape. By default,
+IPython ships with automagic on, so you should only rarely need the % escape.
+
+Example: typing '%cd mydir' (without the quotes) changes your working directory
+to 'mydir', if it exists.
+
+For a list of the available magic functions, use %lsmagic. For a description
+of any of them, type %magic_name?, e.g. '%cd?'.
+
+Currently the magic system has the following functions:""",
+ magic_docs,
+ "Summary of magic functions (from %slsmagic):" % magic_escapes['line'],
+ str(self.lsmagic()),
+ ]
+ page.page('\n'.join(out))
+
+
+ @line_magic
+ def page(self, parameter_s=''):
+ """Pretty print the object and display it through a pager.
+
+ %page [options] OBJECT
+
+ If no object is given, use _ (last output).
+
+ Options:
+
+ -r: page str(object), don't pretty-print it."""
+
+ # After a function contributed by Olivier Aubert, slightly modified.
+
+ # Process options/args
+ opts, args = self.parse_options(parameter_s, 'r')
+ raw = 'r' in opts
+
+ oname = args and args or '_'
+ info = self.shell._ofind(oname)
+ if info.found:
+ if raw:
+ txt = str(info.obj)
+ else:
+ txt = pformat(info.obj)
+ page.page(txt)
+ else:
+ print('Object `%s` not found' % oname)
+
+ @line_magic
+ def pprint(self, parameter_s=''):
+ """Toggle pretty printing on/off."""
+ ptformatter = self.shell.display_formatter.formatters['text/plain']
+ ptformatter.pprint = bool(1 - ptformatter.pprint)
+ print('Pretty printing has been turned',
+ ['OFF','ON'][ptformatter.pprint])
+
+ @line_magic
+ def colors(self, parameter_s=''):
+ """Switch color scheme for prompts, info system and exception handlers.
+
+ Currently implemented schemes: NoColor, Linux, LightBG.
+
+ Color scheme names are not case-sensitive.
+
+ Examples
+ --------
+ To get a plain black and white terminal::
+
+ %colors nocolor
+ """
+ def color_switch_err(name):
+ warn('Error changing %s color schemes.\n%s' %
+ (name, sys.exc_info()[1]), stacklevel=2)
+
+
+ new_scheme = parameter_s.strip()
+ if not new_scheme:
+ raise UsageError(
+ "%colors: you must specify a color scheme. See '%colors?'")
+ # local shortcut
+ shell = self.shell
+
+ # Set shell colour scheme
+ try:
+ shell.colors = new_scheme
+ shell.refresh_style()
+ except:
+ color_switch_err('shell')
+
+ # Set exception colors
+ try:
+ shell.InteractiveTB.set_colors(scheme = new_scheme)
+ shell.SyntaxTB.set_colors(scheme = new_scheme)
+ except:
+ color_switch_err('exception')
+
+ # Set info (for 'object?') colors
+ if shell.color_info:
+ try:
+ shell.inspector.set_active_scheme(new_scheme)
+ except:
+ color_switch_err('object inspector')
+ else:
+ shell.inspector.set_active_scheme('NoColor')
+
+ @line_magic
+ def xmode(self, parameter_s=''):
+ """Switch modes for the exception handlers.
+
+ Valid modes: Plain, Context, Verbose, and Minimal.
+
+ If called without arguments, acts as a toggle.
+
+ When in verbose mode the value `--show` (and `--hide`)
+ will respectively show (or hide) frames with ``__tracebackhide__ =
+ True`` value set.
+ """
+
+ def xmode_switch_err(name):
+ warn('Error changing %s exception modes.\n%s' %
+ (name,sys.exc_info()[1]))
+
+ shell = self.shell
+ if parameter_s.strip() == "--show":
+ shell.InteractiveTB.skip_hidden = False
+ return
+ if parameter_s.strip() == "--hide":
+ shell.InteractiveTB.skip_hidden = True
+ return
+
+ new_mode = parameter_s.strip().capitalize()
+ try:
+ shell.InteractiveTB.set_mode(mode=new_mode)
+ print('Exception reporting mode:',shell.InteractiveTB.mode)
+ except:
+ xmode_switch_err('user')
+
+ @line_magic
+ def quickref(self, arg):
+ """ Show a quick reference sheet """
+ from IPython.core.usage import quick_reference
+ qr = quick_reference + self._magic_docs(brief=True)
+ page.page(qr)
+
+ @line_magic
+ def doctest_mode(self, parameter_s=''):
+ """Toggle doctest mode on and off.
+
+ This mode is intended to make IPython behave as much as possible like a
+ plain Python shell, from the perspective of how its prompts, exceptions
+ and output look. This makes it easy to copy and paste parts of a
+ session into doctests. It does so by:
+
+ - Changing the prompts to the classic ``>>>`` ones.
+ - Changing the exception reporting mode to 'Plain'.
+ - Disabling pretty-printing of output.
+
+ Note that IPython also supports the pasting of code snippets that have
+ leading '>>>' and '...' prompts in them. This means that you can paste
+ doctests from files or docstrings (even if they have leading
+ whitespace), and the code will execute correctly. You can then use
+ '%history -t' to see the translated history; this will give you the
+ input after removal of all the leading prompts and whitespace, which
+ can be pasted back into an editor.
+
+ With these features, you can switch into this mode easily whenever you
+ need to do testing and changes to doctests, without having to leave
+ your existing IPython session.
+ """
+
+ # Shorthands
+ shell = self.shell
+ meta = shell.meta
+ disp_formatter = self.shell.display_formatter
+ ptformatter = disp_formatter.formatters['text/plain']
+ # dstore is a data store kept in the instance metadata bag to track any
+ # changes we make, so we can undo them later.
+ dstore = meta.setdefault('doctest_mode',Struct())
+ save_dstore = dstore.setdefault
+
+ # save a few values we'll need to recover later
+ mode = save_dstore('mode',False)
+ save_dstore('rc_pprint',ptformatter.pprint)
+ save_dstore('xmode',shell.InteractiveTB.mode)
+ save_dstore('rc_separate_out',shell.separate_out)
+ save_dstore('rc_separate_out2',shell.separate_out2)
+ save_dstore('rc_separate_in',shell.separate_in)
+ save_dstore('rc_active_types',disp_formatter.active_types)
+
+ if not mode:
+ # turn on
+
+ # Prompt separators like plain python
+ shell.separate_in = ''
+ shell.separate_out = ''
+ shell.separate_out2 = ''
+
+
+ ptformatter.pprint = False
+ disp_formatter.active_types = ['text/plain']
+
+ shell.magic('xmode Plain')
+ else:
+ # turn off
+ shell.separate_in = dstore.rc_separate_in
+
+ shell.separate_out = dstore.rc_separate_out
+ shell.separate_out2 = dstore.rc_separate_out2
+
+ ptformatter.pprint = dstore.rc_pprint
+ disp_formatter.active_types = dstore.rc_active_types
+
+ shell.magic('xmode ' + dstore.xmode)
+
+ # mode here is the state before we switch; switch_doctest_mode takes
+ # the mode we're switching to.
+ shell.switch_doctest_mode(not mode)
+
+ # Store new mode and inform
+ dstore.mode = bool(not mode)
+ mode_label = ['OFF','ON'][dstore.mode]
+ print('Doctest mode is:', mode_label)
+
+ @line_magic
+ def gui(self, parameter_s=''):
+ """Enable or disable IPython GUI event loop integration.
+
+ %gui [GUINAME]
+
+ This magic replaces IPython's threaded shells that were activated
+ using the (pylab/wthread/etc.) command line flags. GUI toolkits
+ can now be enabled at runtime and keyboard
+ interrupts should work without any problems. The following toolkits
+ are supported: wxPython, PyQt4, PyGTK, Tk and Cocoa (OSX)::
+
+ %gui wx # enable wxPython event loop integration
+ %gui qt # enable PyQt/PySide event loop integration
+ # with the latest version available.
+ %gui qt6 # enable PyQt6/PySide6 event loop integration
+ %gui qt5 # enable PyQt5/PySide2 event loop integration
+ %gui gtk # enable PyGTK event loop integration
+ %gui gtk3 # enable Gtk3 event loop integration
+ %gui gtk4 # enable Gtk4 event loop integration
+ %gui tk # enable Tk event loop integration
+ %gui osx # enable Cocoa event loop integration
+ # (requires %matplotlib 1.1)
+ %gui # disable all event loop integration
+
+ WARNING: after any of these has been called you can simply create
+ an application object, but DO NOT start the event loop yourself, as
+ we have already handled that.
+ """
+ opts, arg = self.parse_options(parameter_s, '')
+ if arg=='': arg = None
+ try:
+ return self.shell.enable_gui(arg)
+ except Exception as e:
+ # print simple error message, rather than traceback if we can't
+ # hook up the GUI
+ error(str(e))
+
+ @skip_doctest
+ @line_magic
+ def precision(self, s=''):
+ """Set floating point precision for pretty printing.
+
+ Can set either integer precision or a format string.
+
+ If numpy has been imported and precision is an int,
+ numpy display precision will also be set, via ``numpy.set_printoptions``.
+
+ If no argument is given, defaults will be restored.
+
+ Examples
+ --------
+ ::
+
+ In [1]: from math import pi
+
+ In [2]: %precision 3
+ Out[2]: u'%.3f'
+
+ In [3]: pi
+ Out[3]: 3.142
+
+ In [4]: %precision %i
+ Out[4]: u'%i'
+
+ In [5]: pi
+ Out[5]: 3
+
+ In [6]: %precision %e
+ Out[6]: u'%e'
+
+ In [7]: pi**10
+ Out[7]: 9.364805e+04
+
+ In [8]: %precision
+ Out[8]: u'%r'
+
+ In [9]: pi**10
+ Out[9]: 93648.047476082982
+ """
+ ptformatter = self.shell.display_formatter.formatters['text/plain']
+ ptformatter.float_precision = s
+ return ptformatter.float_format
+
+ @magic_arguments.magic_arguments()
+ @magic_arguments.argument(
+ 'filename', type=str,
+ help='Notebook name or filename'
+ )
+ @line_magic
+ def notebook(self, s):
+ """Export and convert IPython notebooks.
+
+ This function can export the current IPython history to a notebook file.
+ For example, to export the history to "foo.ipynb" do "%notebook foo.ipynb".
+ """
+ args = magic_arguments.parse_argstring(self.notebook, s)
+ outfname = os.path.expanduser(args.filename)
+
+ from nbformat import write, v4
+
+ cells = []
+ hist = list(self.shell.history_manager.get_range())
+ if(len(hist)<=1):
+ raise ValueError('History is empty, cannot export')
+ for session, execution_count, source in hist[:-1]:
+ cells.append(v4.new_code_cell(
+ execution_count=execution_count,
+ source=source
+ ))
+ nb = v4.new_notebook(cells=cells)
+ with io.open(outfname, "w", encoding="utf-8") as f:
+ write(nb, f, version=4)
+
+@magics_class
+class AsyncMagics(BasicMagics):
+
+ @line_magic
+ def autoawait(self, parameter_s):
+ """
+ Allow to change the status of the autoawait option.
+
+ This allow you to set a specific asynchronous code runner.
+
+ If no value is passed, print the currently used asynchronous integration
+ and whether it is activated.
+
+ It can take a number of value evaluated in the following order:
+
+ - False/false/off deactivate autoawait integration
+ - True/true/on activate autoawait integration using configured default
+ loop
+ - asyncio/curio/trio activate autoawait integration and use integration
+ with said library.
+
+ - `sync` turn on the pseudo-sync integration (mostly used for
+ `IPython.embed()` which does not run IPython with a real eventloop and
+ deactivate running asynchronous code. Turning on Asynchronous code with
+ the pseudo sync loop is undefined behavior and may lead IPython to crash.
+
+ If the passed parameter does not match any of the above and is a python
+ identifier, get said object from user namespace and set it as the
+ runner, and activate autoawait.
+
+ If the object is a fully qualified object name, attempt to import it and
+ set it as the runner, and activate autoawait.
+
+ The exact behavior of autoawait is experimental and subject to change
+ across version of IPython and Python.
+ """
+
+ param = parameter_s.strip()
+ d = {True: "on", False: "off"}
+
+ if not param:
+ print("IPython autoawait is `{}`, and set to use `{}`".format(
+ d[self.shell.autoawait],
+ self.shell.loop_runner
+ ))
+ return None
+
+ if param.lower() in ('false', 'off'):
+ self.shell.autoawait = False
+ return None
+ if param.lower() in ('true', 'on'):
+ self.shell.autoawait = True
+ return None
+
+ if param in self.shell.loop_runner_map:
+ self.shell.loop_runner, self.shell.autoawait = self.shell.loop_runner_map[param]
+ return None
+
+ if param in self.shell.user_ns :
+ self.shell.loop_runner = self.shell.user_ns[param]
+ self.shell.autoawait = True
+ return None
+
+ runner = import_item(param)
+
+ self.shell.loop_runner = runner
+ self.shell.autoawait = True
diff --git a/contrib/python/ipython/py3/IPython/core/magics/code.py b/contrib/python/ipython/py3/IPython/core/magics/code.py
new file mode 100644
index 0000000000..65ba52b8bb
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/magics/code.py
@@ -0,0 +1,755 @@
+"""Implementation of code management magic functions.
+"""
+#-----------------------------------------------------------------------------
+# Copyright (c) 2012 The IPython Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+# Stdlib
+import inspect
+import io
+import os
+import re
+import sys
+import ast
+from itertools import chain
+from urllib.request import Request, urlopen
+from urllib.parse import urlencode
+from pathlib import Path
+
+# Our own packages
+from IPython.core.error import TryNext, StdinNotImplementedError, UsageError
+from IPython.core.macro import Macro
+from IPython.core.magic import Magics, magics_class, line_magic
+from IPython.core.oinspect import find_file, find_source_lines
+from IPython.core.release import version
+from IPython.testing.skipdoctest import skip_doctest
+from IPython.utils.contexts import preserve_keys
+from IPython.utils.path import get_py_filename
+from warnings import warn
+from logging import error
+from IPython.utils.text import get_text_list
+
+#-----------------------------------------------------------------------------
+# Magic implementation classes
+#-----------------------------------------------------------------------------
+
+# Used for exception handling in magic_edit
+class MacroToEdit(ValueError): pass
+
+ipython_input_pat = re.compile(r"<ipython\-input\-(\d+)-[a-z\d]+>$")
+
+# To match, e.g. 8-10 1:5 :10 3-
+range_re = re.compile(r"""
+(?P<start>\d+)?
+((?P<sep>[\-:])
+ (?P<end>\d+)?)?
+$""", re.VERBOSE)
+
+
+def extract_code_ranges(ranges_str):
+ """Turn a string of range for %%load into 2-tuples of (start, stop)
+ ready to use as a slice of the content split by lines.
+
+ Examples
+ --------
+ list(extract_input_ranges("5-10 2"))
+ [(4, 10), (1, 2)]
+ """
+ for range_str in ranges_str.split():
+ rmatch = range_re.match(range_str)
+ if not rmatch:
+ continue
+ sep = rmatch.group("sep")
+ start = rmatch.group("start")
+ end = rmatch.group("end")
+
+ if sep == '-':
+ start = int(start) - 1 if start else None
+ end = int(end) if end else None
+ elif sep == ':':
+ start = int(start) - 1 if start else None
+ end = int(end) - 1 if end else None
+ else:
+ end = int(start)
+ start = int(start) - 1
+ yield (start, end)
+
+
+def extract_symbols(code, symbols):
+ """
+ Return a tuple (blocks, not_found)
+ where ``blocks`` is a list of code fragments
+ for each symbol parsed from code, and ``not_found`` are
+ symbols not found in the code.
+
+ For example::
+
+ In [1]: code = '''a = 10
+ ...: def b(): return 42
+ ...: class A: pass'''
+
+ In [2]: extract_symbols(code, 'A,b,z')
+ Out[2]: (['class A: pass\\n', 'def b(): return 42\\n'], ['z'])
+ """
+ symbols = symbols.split(',')
+
+ # this will raise SyntaxError if code isn't valid Python
+ py_code = ast.parse(code)
+
+ marks = [(getattr(s, 'name', None), s.lineno) for s in py_code.body]
+ code = code.split('\n')
+
+ symbols_lines = {}
+
+ # we already know the start_lineno of each symbol (marks).
+ # To find each end_lineno, we traverse in reverse order until each
+ # non-blank line
+ end = len(code)
+ for name, start in reversed(marks):
+ while not code[end - 1].strip():
+ end -= 1
+ if name:
+ symbols_lines[name] = (start - 1, end)
+ end = start - 1
+
+ # Now symbols_lines is a map
+ # {'symbol_name': (start_lineno, end_lineno), ...}
+
+ # fill a list with chunks of codes for each requested symbol
+ blocks = []
+ not_found = []
+ for symbol in symbols:
+ if symbol in symbols_lines:
+ start, end = symbols_lines[symbol]
+ blocks.append('\n'.join(code[start:end]) + '\n')
+ else:
+ not_found.append(symbol)
+
+ return blocks, not_found
+
+def strip_initial_indent(lines):
+ """For %load, strip indent from lines until finding an unindented line.
+
+ https://github.com/ipython/ipython/issues/9775
+ """
+ indent_re = re.compile(r'\s+')
+
+ it = iter(lines)
+ first_line = next(it)
+ indent_match = indent_re.match(first_line)
+
+ if indent_match:
+ # First line was indented
+ indent = indent_match.group()
+ yield first_line[len(indent):]
+
+ for line in it:
+ if line.startswith(indent):
+ yield line[len(indent):]
+ else:
+ # Less indented than the first line - stop dedenting
+ yield line
+ break
+ else:
+ yield first_line
+
+ # Pass the remaining lines through without dedenting
+ for line in it:
+ yield line
+
+
+class InteractivelyDefined(Exception):
+ """Exception for interactively defined variable in magic_edit"""
+ def __init__(self, index):
+ self.index = index
+
+
+@magics_class
+class CodeMagics(Magics):
+ """Magics related to code management (loading, saving, editing, ...)."""
+
+ def __init__(self, *args, **kwargs):
+ self._knowntemps = set()
+ super(CodeMagics, self).__init__(*args, **kwargs)
+
+ @line_magic
+ def save(self, parameter_s=''):
+ """Save a set of lines or a macro to a given filename.
+
+ Usage:\\
+ %save [options] filename [history]
+
+ Options:
+
+ -r: use 'raw' input. By default, the 'processed' history is used,
+ so that magics are loaded in their transformed version to valid
+ Python. If this option is given, the raw input as typed as the
+ command line is used instead.
+
+ -f: force overwrite. If file exists, %save will prompt for overwrite
+ unless -f is given.
+
+ -a: append to the file instead of overwriting it.
+
+ The history argument uses the same syntax as %history for input ranges,
+ then saves the lines to the filename you specify.
+
+ If no ranges are specified, saves history of the current session up to
+ this point.
+
+ It adds a '.py' extension to the file if you don't do so yourself, and
+ it asks for confirmation before overwriting existing files.
+
+ If `-r` option is used, the default extension is `.ipy`.
+ """
+
+ opts,args = self.parse_options(parameter_s,'fra',mode='list')
+ if not args:
+ raise UsageError('Missing filename.')
+ raw = 'r' in opts
+ force = 'f' in opts
+ append = 'a' in opts
+ mode = 'a' if append else 'w'
+ ext = '.ipy' if raw else '.py'
+ fname, codefrom = args[0], " ".join(args[1:])
+ if not fname.endswith(('.py','.ipy')):
+ fname += ext
+ fname = os.path.expanduser(fname)
+ file_exists = os.path.isfile(fname)
+ if file_exists and not force and not append:
+ try:
+ overwrite = self.shell.ask_yes_no('File `%s` exists. Overwrite (y/[N])? ' % fname, default='n')
+ except StdinNotImplementedError:
+ print("File `%s` exists. Use `%%save -f %s` to force overwrite" % (fname, parameter_s))
+ return
+ if not overwrite :
+ print('Operation cancelled.')
+ return
+ try:
+ cmds = self.shell.find_user_code(codefrom,raw)
+ except (TypeError, ValueError) as e:
+ print(e.args[0])
+ return
+ with io.open(fname, mode, encoding="utf-8") as f:
+ if not file_exists or not append:
+ f.write("# coding: utf-8\n")
+ f.write(cmds)
+ # make sure we end on a newline
+ if not cmds.endswith('\n'):
+ f.write('\n')
+ print('The following commands were written to file `%s`:' % fname)
+ print(cmds)
+
+ @line_magic
+ def pastebin(self, parameter_s=''):
+ """Upload code to dpaste.com, returning the URL.
+
+ Usage:\\
+ %pastebin [-d "Custom description"][-e 24] 1-7
+
+ The argument can be an input history range, a filename, or the name of a
+ string or macro.
+
+ If no arguments are given, uploads the history of this session up to
+ this point.
+
+ Options:
+
+ -d: Pass a custom description. The default will say
+ "Pasted from IPython".
+ -e: Pass number of days for the link to be expired.
+ The default will be 7 days.
+ """
+ opts, args = self.parse_options(parameter_s, "d:e:")
+
+ try:
+ code = self.shell.find_user_code(args)
+ except (ValueError, TypeError) as e:
+ print(e.args[0])
+ return
+
+ expiry_days = 7
+ try:
+ expiry_days = int(opts.get("e", 7))
+ except ValueError as e:
+ print(e.args[0].capitalize())
+ return
+ if expiry_days < 1 or expiry_days > 365:
+ print("Expiry days should be in range of 1 to 365")
+ return
+
+ post_data = urlencode(
+ {
+ "title": opts.get("d", "Pasted from IPython"),
+ "syntax": "python",
+ "content": code,
+ "expiry_days": expiry_days,
+ }
+ ).encode("utf-8")
+
+ request = Request(
+ "https://dpaste.com/api/v2/",
+ headers={"User-Agent": "IPython v{}".format(version)},
+ )
+ response = urlopen(request, post_data)
+ return response.headers.get('Location')
+
+ @line_magic
+ def loadpy(self, arg_s):
+ """Alias of `%load`
+
+ `%loadpy` has gained some flexibility and dropped the requirement of a `.py`
+ extension. So it has been renamed simply into %load. You can look at
+ `%load`'s docstring for more info.
+ """
+ self.load(arg_s)
+
+ @line_magic
+ def load(self, arg_s):
+ """Load code into the current frontend.
+
+ Usage:\\
+ %load [options] source
+
+ where source can be a filename, URL, input history range, macro, or
+ element in the user namespace
+
+ If no arguments are given, loads the history of this session up to this
+ point.
+
+ Options:
+
+ -r <lines>: Specify lines or ranges of lines to load from the source.
+ Ranges could be specified as x-y (x..y) or in python-style x:y
+ (x..(y-1)). Both limits x and y can be left blank (meaning the
+ beginning and end of the file, respectively).
+
+ -s <symbols>: Specify function or classes to load from python source.
+
+ -y : Don't ask confirmation for loading source above 200 000 characters.
+
+ -n : Include the user's namespace when searching for source code.
+
+ This magic command can either take a local filename, a URL, an history
+ range (see %history) or a macro as argument, it will prompt for
+ confirmation before loading source with more than 200 000 characters, unless
+ -y flag is passed or if the frontend does not support raw_input::
+
+ %load
+ %load myscript.py
+ %load 7-27
+ %load myMacro
+ %load http://www.example.com/myscript.py
+ %load -r 5-10 myscript.py
+ %load -r 10-20,30,40: foo.py
+ %load -s MyClass,wonder_function myscript.py
+ %load -n MyClass
+ %load -n my_module.wonder_function
+ """
+ opts,args = self.parse_options(arg_s,'yns:r:')
+ search_ns = 'n' in opts
+ contents = self.shell.find_user_code(args, search_ns=search_ns)
+
+ if 's' in opts:
+ try:
+ blocks, not_found = extract_symbols(contents, opts['s'])
+ except SyntaxError:
+ # non python code
+ error("Unable to parse the input as valid Python code")
+ return
+
+ if len(not_found) == 1:
+ warn('The symbol `%s` was not found' % not_found[0])
+ elif len(not_found) > 1:
+ warn('The symbols %s were not found' % get_text_list(not_found,
+ wrap_item_with='`')
+ )
+
+ contents = '\n'.join(blocks)
+
+ if 'r' in opts:
+ ranges = opts['r'].replace(',', ' ')
+ lines = contents.split('\n')
+ slices = extract_code_ranges(ranges)
+ contents = [lines[slice(*slc)] for slc in slices]
+ contents = '\n'.join(strip_initial_indent(chain.from_iterable(contents)))
+
+ l = len(contents)
+
+ # 200 000 is ~ 2500 full 80 character lines
+ # so in average, more than 5000 lines
+ if l > 200000 and 'y' not in opts:
+ try:
+ ans = self.shell.ask_yes_no(("The text you're trying to load seems pretty big"\
+ " (%d characters). Continue (y/[N]) ?" % l), default='n' )
+ except StdinNotImplementedError:
+ #assume yes if raw input not implemented
+ ans = True
+
+ if ans is False :
+ print('Operation cancelled.')
+ return
+
+ contents = "# %load {}\n".format(arg_s) + contents
+
+ self.shell.set_next_input(contents, replace=True)
+
+ @staticmethod
+ def _find_edit_target(shell, args, opts, last_call):
+ """Utility method used by magic_edit to find what to edit."""
+
+ def make_filename(arg):
+ "Make a filename from the given args"
+ try:
+ filename = get_py_filename(arg)
+ except IOError:
+ # If it ends with .py but doesn't already exist, assume we want
+ # a new file.
+ if arg.endswith('.py'):
+ filename = arg
+ else:
+ filename = None
+ return filename
+
+ # Set a few locals from the options for convenience:
+ opts_prev = 'p' in opts
+ opts_raw = 'r' in opts
+
+ # custom exceptions
+ class DataIsObject(Exception): pass
+
+ # Default line number value
+ lineno = opts.get('n',None)
+
+ if opts_prev:
+ args = '_%s' % last_call[0]
+ if args not in shell.user_ns:
+ args = last_call[1]
+
+ # by default this is done with temp files, except when the given
+ # arg is a filename
+ use_temp = True
+
+ data = ''
+
+ # First, see if the arguments should be a filename.
+ filename = make_filename(args)
+ if filename:
+ use_temp = False
+ elif args:
+ # Mode where user specifies ranges of lines, like in %macro.
+ data = shell.extract_input_lines(args, opts_raw)
+ if not data:
+ try:
+ # Load the parameter given as a variable. If not a string,
+ # process it as an object instead (below)
+
+ #print '*** args',args,'type',type(args) # dbg
+ data = eval(args, shell.user_ns)
+ if not isinstance(data, str):
+ raise DataIsObject
+
+ except (NameError,SyntaxError):
+ # given argument is not a variable, try as a filename
+ filename = make_filename(args)
+ if filename is None:
+ warn("Argument given (%s) can't be found as a variable "
+ "or as a filename." % args)
+ return (None, None, None)
+ use_temp = False
+
+ except DataIsObject as e:
+ # macros have a special edit function
+ if isinstance(data, Macro):
+ raise MacroToEdit(data) from e
+
+ # For objects, try to edit the file where they are defined
+ filename = find_file(data)
+ if filename:
+ if 'fakemodule' in filename.lower() and \
+ inspect.isclass(data):
+ # class created by %edit? Try to find source
+ # by looking for method definitions instead, the
+ # __module__ in those classes is FakeModule.
+ attrs = [getattr(data, aname) for aname in dir(data)]
+ for attr in attrs:
+ if not inspect.ismethod(attr):
+ continue
+ filename = find_file(attr)
+ if filename and \
+ 'fakemodule' not in filename.lower():
+ # change the attribute to be the edit
+ # target instead
+ data = attr
+ break
+
+ m = ipython_input_pat.match(os.path.basename(filename))
+ if m:
+ raise InteractivelyDefined(int(m.groups()[0])) from e
+
+ datafile = 1
+ if filename is None:
+ filename = make_filename(args)
+ datafile = 1
+ if filename is not None:
+ # only warn about this if we get a real name
+ warn('Could not find file where `%s` is defined.\n'
+ 'Opening a file named `%s`' % (args, filename))
+ # Now, make sure we can actually read the source (if it was
+ # in a temp file it's gone by now).
+ if datafile:
+ if lineno is None:
+ lineno = find_source_lines(data)
+ if lineno is None:
+ filename = make_filename(args)
+ if filename is None:
+ warn('The file where `%s` was defined '
+ 'cannot be read or found.' % data)
+ return (None, None, None)
+ use_temp = False
+
+ if use_temp:
+ filename = shell.mktempfile(data)
+ print('IPython will make a temporary file named:',filename)
+
+ # use last_call to remember the state of the previous call, but don't
+ # let it be clobbered by successive '-p' calls.
+ try:
+ last_call[0] = shell.displayhook.prompt_count
+ if not opts_prev:
+ last_call[1] = args
+ except:
+ pass
+
+
+ return filename, lineno, use_temp
+
+ def _edit_macro(self,mname,macro):
+ """open an editor with the macro data in a file"""
+ filename = self.shell.mktempfile(macro.value)
+ self.shell.hooks.editor(filename)
+
+ # and make a new macro object, to replace the old one
+ mvalue = Path(filename).read_text(encoding="utf-8")
+ self.shell.user_ns[mname] = Macro(mvalue)
+
+ @skip_doctest
+ @line_magic
+ def edit(self, parameter_s='',last_call=['','']):
+ """Bring up an editor and execute the resulting code.
+
+ Usage:
+ %edit [options] [args]
+
+ %edit runs IPython's editor hook. The default version of this hook is
+ set to call the editor specified by your $EDITOR environment variable.
+ If this isn't found, it will default to vi under Linux/Unix and to
+ notepad under Windows. See the end of this docstring for how to change
+ the editor hook.
+
+ You can also set the value of this editor via the
+ ``TerminalInteractiveShell.editor`` option in your configuration file.
+ This is useful if you wish to use a different editor from your typical
+ default with IPython (and for Windows users who typically don't set
+ environment variables).
+
+ This command allows you to conveniently edit multi-line code right in
+ your IPython session.
+
+ If called without arguments, %edit opens up an empty editor with a
+ temporary file and will execute the contents of this file when you
+ close it (don't forget to save it!).
+
+
+ Options:
+
+ -n <number>: open the editor at a specified line number. By default,
+ the IPython editor hook uses the unix syntax 'editor +N filename', but
+ you can configure this by providing your own modified hook if your
+ favorite editor supports line-number specifications with a different
+ syntax.
+
+ -p: this will call the editor with the same data as the previous time
+ it was used, regardless of how long ago (in your current session) it
+ was.
+
+ -r: use 'raw' input. This option only applies to input taken from the
+ user's history. By default, the 'processed' history is used, so that
+ magics are loaded in their transformed version to valid Python. If
+ this option is given, the raw input as typed as the command line is
+ used instead. When you exit the editor, it will be executed by
+ IPython's own processor.
+
+ -x: do not execute the edited code immediately upon exit. This is
+ mainly useful if you are editing programs which need to be called with
+ command line arguments, which you can then do using %run.
+
+
+ Arguments:
+
+ If arguments are given, the following possibilities exist:
+
+ - If the argument is a filename, IPython will load that into the
+ editor. It will execute its contents with execfile() when you exit,
+ loading any code in the file into your interactive namespace.
+
+ - The arguments are ranges of input history, e.g. "7 ~1/4-6".
+ The syntax is the same as in the %history magic.
+
+ - If the argument is a string variable, its contents are loaded
+ into the editor. You can thus edit any string which contains
+ python code (including the result of previous edits).
+
+ - If the argument is the name of an object (other than a string),
+ IPython will try to locate the file where it was defined and open the
+ editor at the point where it is defined. You can use `%edit function`
+ to load an editor exactly at the point where 'function' is defined,
+ edit it and have the file be executed automatically.
+
+ - If the object is a macro (see %macro for details), this opens up your
+ specified editor with a temporary file containing the macro's data.
+ Upon exit, the macro is reloaded with the contents of the file.
+
+ Note: opening at an exact line is only supported under Unix, and some
+ editors (like kedit and gedit up to Gnome 2.8) do not understand the
+ '+NUMBER' parameter necessary for this feature. Good editors like
+ (X)Emacs, vi, jed, pico and joe all do.
+
+ After executing your code, %edit will return as output the code you
+ typed in the editor (except when it was an existing file). This way
+ you can reload the code in further invocations of %edit as a variable,
+ via _<NUMBER> or Out[<NUMBER>], where <NUMBER> is the prompt number of
+ the output.
+
+ Note that %edit is also available through the alias %ed.
+
+ This is an example of creating a simple function inside the editor and
+ then modifying it. First, start up the editor::
+
+ In [1]: edit
+ Editing... done. Executing edited code...
+ Out[1]: 'def foo():\\n print "foo() was defined in an editing
+ session"\\n'
+
+ We can then call the function foo()::
+
+ In [2]: foo()
+ foo() was defined in an editing session
+
+ Now we edit foo. IPython automatically loads the editor with the
+ (temporary) file where foo() was previously defined::
+
+ In [3]: edit foo
+ Editing... done. Executing edited code...
+
+ And if we call foo() again we get the modified version::
+
+ In [4]: foo()
+ foo() has now been changed!
+
+ Here is an example of how to edit a code snippet successive
+ times. First we call the editor::
+
+ In [5]: edit
+ Editing... done. Executing edited code...
+ hello
+ Out[5]: "print 'hello'\\n"
+
+ Now we call it again with the previous output (stored in _)::
+
+ In [6]: edit _
+ Editing... done. Executing edited code...
+ hello world
+ Out[6]: "print 'hello world'\\n"
+
+ Now we call it with the output #8 (stored in _8, also as Out[8])::
+
+ In [7]: edit _8
+ Editing... done. Executing edited code...
+ hello again
+ Out[7]: "print 'hello again'\\n"
+
+
+ Changing the default editor hook:
+
+ If you wish to write your own editor hook, you can put it in a
+ configuration file which you load at startup time. The default hook
+ is defined in the IPython.core.hooks module, and you can use that as a
+ starting example for further modifications. That file also has
+ general instructions on how to set a new hook for use once you've
+ defined it."""
+ opts,args = self.parse_options(parameter_s,'prxn:')
+
+ try:
+ filename, lineno, is_temp = self._find_edit_target(self.shell,
+ args, opts, last_call)
+ except MacroToEdit as e:
+ self._edit_macro(args, e.args[0])
+ return
+ except InteractivelyDefined as e:
+ print("Editing In[%i]" % e.index)
+ args = str(e.index)
+ filename, lineno, is_temp = self._find_edit_target(self.shell,
+ args, opts, last_call)
+ if filename is None:
+ # nothing was found, warnings have already been issued,
+ # just give up.
+ return
+
+ if is_temp:
+ self._knowntemps.add(filename)
+ elif (filename in self._knowntemps):
+ is_temp = True
+
+
+ # do actual editing here
+ print('Editing...', end=' ')
+ sys.stdout.flush()
+ filepath = Path(filename)
+ try:
+ # Quote filenames that may have spaces in them when opening
+ # the editor
+ quoted = filename = str(filepath.absolute())
+ if " " in quoted:
+ quoted = "'%s'" % quoted
+ self.shell.hooks.editor(quoted, lineno)
+ except TryNext:
+ warn('Could not open editor')
+ return
+
+ # XXX TODO: should this be generalized for all string vars?
+ # For now, this is special-cased to blocks created by cpaste
+ if args.strip() == "pasted_block":
+ self.shell.user_ns["pasted_block"] = filepath.read_text(encoding="utf-8")
+
+ if 'x' in opts: # -x prevents actual execution
+ print()
+ else:
+ print('done. Executing edited code...')
+ with preserve_keys(self.shell.user_ns, '__file__'):
+ if not is_temp:
+ self.shell.user_ns["__file__"] = filename
+ if "r" in opts: # Untranslated IPython code
+ source = filepath.read_text(encoding="utf-8")
+ self.shell.run_cell(source, store_history=False)
+ else:
+ self.shell.safe_execfile(filename, self.shell.user_ns,
+ self.shell.user_ns)
+
+ if is_temp:
+ try:
+ return filepath.read_text(encoding="utf-8")
+ except IOError as msg:
+ if Path(msg.filename) == filepath:
+ warn('File not found. Did you forget to save?')
+ return
+ else:
+ self.shell.showtraceback()
diff --git a/contrib/python/ipython/py3/IPython/core/magics/config.py b/contrib/python/ipython/py3/IPython/core/magics/config.py
new file mode 100644
index 0000000000..9e1cb38c25
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/magics/config.py
@@ -0,0 +1,140 @@
+"""Implementation of configuration-related magic functions.
+"""
+#-----------------------------------------------------------------------------
+# Copyright (c) 2012 The IPython Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+# Stdlib
+import re
+
+# Our own packages
+from IPython.core.error import UsageError
+from IPython.core.magic import Magics, magics_class, line_magic
+from logging import error
+
+#-----------------------------------------------------------------------------
+# Magic implementation classes
+#-----------------------------------------------------------------------------
+
+reg = re.compile(r'^\w+\.\w+$')
+@magics_class
+class ConfigMagics(Magics):
+
+ def __init__(self, shell):
+ super(ConfigMagics, self).__init__(shell)
+ self.configurables = []
+
+ @line_magic
+ def config(self, s):
+ """configure IPython
+
+ %config Class[.trait=value]
+
+ This magic exposes most of the IPython config system. Any
+ Configurable class should be able to be configured with the simple
+ line::
+
+ %config Class.trait=value
+
+ Where `value` will be resolved in the user's namespace, if it is an
+ expression or variable name.
+
+ Examples
+ --------
+
+ To see what classes are available for config, pass no arguments::
+
+ In [1]: %config
+ Available objects for config:
+ AliasManager
+ DisplayFormatter
+ HistoryManager
+ IPCompleter
+ LoggingMagics
+ MagicsManager
+ OSMagics
+ PrefilterManager
+ ScriptMagics
+ TerminalInteractiveShell
+
+ To view what is configurable on a given class, just pass the class
+ name::
+
+ In [2]: %config LoggingMagics
+ LoggingMagics(Magics) options
+ ---------------------------
+ LoggingMagics.quiet=<Bool>
+ Suppress output of log state when logging is enabled
+ Current: False
+
+ but the real use is in setting values::
+
+ In [3]: %config LoggingMagics.quiet = True
+
+ and these values are read from the user_ns if they are variables::
+
+ In [4]: feeling_quiet=False
+
+ In [5]: %config LoggingMagics.quiet = feeling_quiet
+
+ """
+ from traitlets.config.loader import Config
+ # some IPython objects are Configurable, but do not yet have
+ # any configurable traits. Exclude them from the effects of
+ # this magic, as their presence is just noise:
+ configurables = sorted(set([ c for c in self.shell.configurables
+ if c.__class__.class_traits(config=True)
+ ]), key=lambda x: x.__class__.__name__)
+ classnames = [ c.__class__.__name__ for c in configurables ]
+
+ line = s.strip()
+ if not line:
+ # print available configurable names
+ print("Available objects for config:")
+ for name in classnames:
+ print(" ", name)
+ return
+ elif line in classnames:
+ # `%config TerminalInteractiveShell` will print trait info for
+ # TerminalInteractiveShell
+ c = configurables[classnames.index(line)]
+ cls = c.__class__
+ help = cls.class_get_help(c)
+ # strip leading '--' from cl-args:
+ help = re.sub(re.compile(r'^--', re.MULTILINE), '', help)
+ print(help)
+ return
+ elif reg.match(line):
+ cls, attr = line.split('.')
+ return getattr(configurables[classnames.index(cls)],attr)
+ elif '=' not in line:
+ msg = "Invalid config statement: %r, "\
+ "should be `Class.trait = value`."
+
+ ll = line.lower()
+ for classname in classnames:
+ if ll == classname.lower():
+ msg = msg + '\nDid you mean %s (note the case)?' % classname
+ break
+
+ raise UsageError( msg % line)
+
+ # otherwise, assume we are setting configurables.
+ # leave quotes on args when splitting, because we want
+ # unquoted args to eval in user_ns
+ cfg = Config()
+ exec("cfg."+line, self.shell.user_ns, locals())
+
+ for configurable in configurables:
+ try:
+ configurable.update_config(cfg)
+ except Exception as e:
+ error(e)
diff --git a/contrib/python/ipython/py3/IPython/core/magics/display.py b/contrib/python/ipython/py3/IPython/core/magics/display.py
new file mode 100644
index 0000000000..6c0eff6884
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/magics/display.py
@@ -0,0 +1,93 @@
+"""Simple magics for display formats"""
+#-----------------------------------------------------------------------------
+# Copyright (c) 2012 The IPython Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+# Our own packages
+from IPython.display import display, Javascript, Latex, SVG, HTML, Markdown
+from IPython.core.magic import (
+ Magics, magics_class, cell_magic
+)
+from IPython.core import magic_arguments
+
+#-----------------------------------------------------------------------------
+# Magic implementation classes
+#-----------------------------------------------------------------------------
+
+
+@magics_class
+class DisplayMagics(Magics):
+ """Magics for displaying various output types with literals
+
+ Defines javascript/latex/svg/html cell magics for writing
+ blocks in those languages, to be rendered in the frontend.
+ """
+
+ @cell_magic
+ def js(self, line, cell):
+ """Run the cell block of Javascript code
+
+ Alias of `%%javascript`
+
+ Starting with IPython 8.0 %%javascript is pending deprecation to be replaced
+ by a more flexible system
+
+ Please See https://github.com/ipython/ipython/issues/13376
+ """
+ self.javascript(line, cell)
+
+ @cell_magic
+ def javascript(self, line, cell):
+ """Run the cell block of Javascript code
+
+ Starting with IPython 8.0 %%javascript is pending deprecation to be replaced
+ by a more flexible system
+
+ Please See https://github.com/ipython/ipython/issues/13376
+ """
+ display(Javascript(cell))
+
+
+ @cell_magic
+ def latex(self, line, cell):
+ """Render the cell as a block of LaTeX
+
+ The subset of LaTeX which is supported depends on the implementation in
+ the client. In the Jupyter Notebook, this magic only renders the subset
+ of LaTeX defined by MathJax
+ [here](https://docs.mathjax.org/en/v2.5-latest/tex.html)."""
+ display(Latex(cell))
+
+ @cell_magic
+ def svg(self, line, cell):
+ """Render the cell as an SVG literal"""
+ display(SVG(cell))
+
+ @magic_arguments.magic_arguments()
+ @magic_arguments.argument(
+ '--isolated', action='store_true', default=False,
+ help="""Annotate the cell as 'isolated'.
+Isolated cells are rendered inside their own <iframe> tag"""
+ )
+ @cell_magic
+ def html(self, line, cell):
+ """Render the cell as a block of HTML"""
+ args = magic_arguments.parse_argstring(self.html, line)
+ html = HTML(cell)
+ if args.isolated:
+ display(html, metadata={'text/html':{'isolated':True}})
+ else:
+ display(html)
+
+ @cell_magic
+ def markdown(self, line, cell):
+ """Render the cell as Markdown text block"""
+ display(Markdown(cell))
diff --git a/contrib/python/ipython/py3/IPython/core/magics/execution.py b/contrib/python/ipython/py3/IPython/core/magics/execution.py
new file mode 100644
index 0000000000..228cbd9da7
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/magics/execution.py
@@ -0,0 +1,1522 @@
+# -*- coding: utf-8 -*-
+"""Implementation of execution-related magic functions."""
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+
+import ast
+import bdb
+import builtins as builtin_mod
+import cProfile as profile
+import gc
+import itertools
+import math
+import os
+import pstats
+import re
+import shlex
+import sys
+import time
+import timeit
+from ast import Module
+from io import StringIO
+from logging import error
+from pathlib import Path
+from pdb import Restart
+from warnings import warn
+
+from IPython.core import magic_arguments, oinspect, page
+from IPython.core.error import UsageError
+from IPython.core.macro import Macro
+from IPython.core.magic import (
+ Magics,
+ cell_magic,
+ line_cell_magic,
+ line_magic,
+ magics_class,
+ needs_local_scope,
+ no_var_expand,
+ output_can_be_silenced,
+ on_off,
+)
+from IPython.testing.skipdoctest import skip_doctest
+from IPython.utils.capture import capture_output
+from IPython.utils.contexts import preserve_keys
+from IPython.utils.ipstruct import Struct
+from IPython.utils.module_paths import find_mod
+from IPython.utils.path import get_py_filename, shellglob
+from IPython.utils.timing import clock, clock2
+from IPython.core.displayhook import DisplayHook
+
+#-----------------------------------------------------------------------------
+# Magic implementation classes
+#-----------------------------------------------------------------------------
+
+
+class TimeitResult(object):
+ """
+ Object returned by the timeit magic with info about the run.
+
+ Contains the following attributes :
+
+ loops: (int) number of loops done per measurement
+ repeat: (int) number of times the measurement has been repeated
+ best: (float) best execution time / number
+ all_runs: (list of float) execution time of each run (in s)
+ compile_time: (float) time of statement compilation (s)
+
+ """
+ def __init__(self, loops, repeat, best, worst, all_runs, compile_time, precision):
+ self.loops = loops
+ self.repeat = repeat
+ self.best = best
+ self.worst = worst
+ self.all_runs = all_runs
+ self.compile_time = compile_time
+ self._precision = precision
+ self.timings = [ dt / self.loops for dt in all_runs]
+
+ @property
+ def average(self):
+ return math.fsum(self.timings) / len(self.timings)
+
+ @property
+ def stdev(self):
+ mean = self.average
+ return (math.fsum([(x - mean) ** 2 for x in self.timings]) / len(self.timings)) ** 0.5
+
+ def __str__(self):
+ pm = '+-'
+ if hasattr(sys.stdout, 'encoding') and sys.stdout.encoding:
+ try:
+ u'\xb1'.encode(sys.stdout.encoding)
+ pm = u'\xb1'
+ except:
+ pass
+ return "{mean} {pm} {std} per loop (mean {pm} std. dev. of {runs} run{run_plural}, {loops:,} loop{loop_plural} each)".format(
+ pm=pm,
+ runs=self.repeat,
+ loops=self.loops,
+ loop_plural="" if self.loops == 1 else "s",
+ run_plural="" if self.repeat == 1 else "s",
+ mean=_format_time(self.average, self._precision),
+ std=_format_time(self.stdev, self._precision),
+ )
+
+ def _repr_pretty_(self, p , cycle):
+ unic = self.__str__()
+ p.text(u'<TimeitResult : '+unic+u'>')
+
+
+class TimeitTemplateFiller(ast.NodeTransformer):
+ """Fill in the AST template for timing execution.
+
+ This is quite closely tied to the template definition, which is in
+ :meth:`ExecutionMagics.timeit`.
+ """
+ def __init__(self, ast_setup, ast_stmt):
+ self.ast_setup = ast_setup
+ self.ast_stmt = ast_stmt
+
+ def visit_FunctionDef(self, node):
+ "Fill in the setup statement"
+ self.generic_visit(node)
+ if node.name == "inner":
+ node.body[:1] = self.ast_setup.body
+
+ return node
+
+ def visit_For(self, node):
+ "Fill in the statement to be timed"
+ if getattr(getattr(node.body[0], 'value', None), 'id', None) == 'stmt':
+ node.body = self.ast_stmt.body
+ return node
+
+
+class Timer(timeit.Timer):
+ """Timer class that explicitly uses self.inner
+
+ which is an undocumented implementation detail of CPython,
+ not shared by PyPy.
+ """
+ # Timer.timeit copied from CPython 3.4.2
+ def timeit(self, number=timeit.default_number):
+ """Time 'number' executions of the main statement.
+
+ To be precise, this executes the setup statement once, and
+ then returns the time it takes to execute the main statement
+ a number of times, as a float measured in seconds. The
+ argument is the number of times through the loop, defaulting
+ to one million. The main statement, the setup statement and
+ the timer function to be used are passed to the constructor.
+ """
+ it = itertools.repeat(None, number)
+ gcold = gc.isenabled()
+ gc.disable()
+ try:
+ timing = self.inner(it, self.timer)
+ finally:
+ if gcold:
+ gc.enable()
+ return timing
+
+
+@magics_class
+class ExecutionMagics(Magics):
+ """Magics related to code execution, debugging, profiling, etc.
+
+ """
+
+ def __init__(self, shell):
+ super(ExecutionMagics, self).__init__(shell)
+ # Default execution function used to actually run user code.
+ self.default_runner = None
+
+ @skip_doctest
+ @no_var_expand
+ @line_cell_magic
+ def prun(self, parameter_s='', cell=None):
+
+ """Run a statement through the python code profiler.
+
+ Usage, in line mode:
+ %prun [options] statement
+
+ Usage, in cell mode:
+ %%prun [options] [statement]
+ code...
+ code...
+
+ In cell mode, the additional code lines are appended to the (possibly
+ empty) statement in the first line. Cell mode allows you to easily
+ profile multiline blocks without having to put them in a separate
+ function.
+
+ The given statement (which doesn't require quote marks) is run via the
+ python profiler in a manner similar to the profile.run() function.
+ Namespaces are internally managed to work correctly; profile.run
+ cannot be used in IPython because it makes certain assumptions about
+ namespaces which do not hold under IPython.
+
+ Options:
+
+ -l <limit>
+ you can place restrictions on what or how much of the
+ profile gets printed. The limit value can be:
+
+ * A string: only information for function names containing this string
+ is printed.
+
+ * An integer: only these many lines are printed.
+
+ * A float (between 0 and 1): this fraction of the report is printed
+ (for example, use a limit of 0.4 to see the topmost 40% only).
+
+ You can combine several limits with repeated use of the option. For
+ example, ``-l __init__ -l 5`` will print only the topmost 5 lines of
+ information about class constructors.
+
+ -r
+ return the pstats.Stats object generated by the profiling. This
+ object has all the information about the profile in it, and you can
+ later use it for further analysis or in other functions.
+
+ -s <key>
+ sort profile by given key. You can provide more than one key
+ by using the option several times: '-s key1 -s key2 -s key3...'. The
+ default sorting key is 'time'.
+
+ The following is copied verbatim from the profile documentation
+ referenced below:
+
+ When more than one key is provided, additional keys are used as
+ secondary criteria when the there is equality in all keys selected
+ before them.
+
+ Abbreviations can be used for any key names, as long as the
+ abbreviation is unambiguous. The following are the keys currently
+ defined:
+
+ ============ =====================
+ Valid Arg Meaning
+ ============ =====================
+ "calls" call count
+ "cumulative" cumulative time
+ "file" file name
+ "module" file name
+ "pcalls" primitive call count
+ "line" line number
+ "name" function name
+ "nfl" name/file/line
+ "stdname" standard name
+ "time" internal time
+ ============ =====================
+
+ Note that all sorts on statistics are in descending order (placing
+ most time consuming items first), where as name, file, and line number
+ searches are in ascending order (i.e., alphabetical). The subtle
+ distinction between "nfl" and "stdname" is that the standard name is a
+ sort of the name as printed, which means that the embedded line
+ numbers get compared in an odd way. For example, lines 3, 20, and 40
+ would (if the file names were the same) appear in the string order
+ "20" "3" and "40". In contrast, "nfl" does a numeric compare of the
+ line numbers. In fact, sort_stats("nfl") is the same as
+ sort_stats("name", "file", "line").
+
+ -T <filename>
+ save profile results as shown on screen to a text
+ file. The profile is still shown on screen.
+
+ -D <filename>
+ save (via dump_stats) profile statistics to given
+ filename. This data is in a format understood by the pstats module, and
+ is generated by a call to the dump_stats() method of profile
+ objects. The profile is still shown on screen.
+
+ -q
+ suppress output to the pager. Best used with -T and/or -D above.
+
+ If you want to run complete programs under the profiler's control, use
+ ``%run -p [prof_opts] filename.py [args to program]`` where prof_opts
+ contains profiler specific options as described here.
+
+ You can read the complete documentation for the profile module with::
+
+ In [1]: import profile; profile.help()
+
+ .. versionchanged:: 7.3
+ User variables are no longer expanded,
+ the magic line is always left unmodified.
+
+ """
+ opts, arg_str = self.parse_options(parameter_s, 'D:l:rs:T:q',
+ list_all=True, posix=False)
+ if cell is not None:
+ arg_str += '\n' + cell
+ arg_str = self.shell.transform_cell(arg_str)
+ return self._run_with_profiler(arg_str, opts, self.shell.user_ns)
+
+ def _run_with_profiler(self, code, opts, namespace):
+ """
+ Run `code` with profiler. Used by ``%prun`` and ``%run -p``.
+
+ Parameters
+ ----------
+ code : str
+ Code to be executed.
+ opts : Struct
+ Options parsed by `self.parse_options`.
+ namespace : dict
+ A dictionary for Python namespace (e.g., `self.shell.user_ns`).
+
+ """
+
+ # Fill default values for unspecified options:
+ opts.merge(Struct(D=[''], l=[], s=['time'], T=['']))
+
+ prof = profile.Profile()
+ try:
+ prof = prof.runctx(code, namespace, namespace)
+ sys_exit = ''
+ except SystemExit:
+ sys_exit = """*** SystemExit exception caught in code being profiled."""
+
+ stats = pstats.Stats(prof).strip_dirs().sort_stats(*opts.s)
+
+ lims = opts.l
+ if lims:
+ lims = [] # rebuild lims with ints/floats/strings
+ for lim in opts.l:
+ try:
+ lims.append(int(lim))
+ except ValueError:
+ try:
+ lims.append(float(lim))
+ except ValueError:
+ lims.append(lim)
+
+ # Trap output.
+ stdout_trap = StringIO()
+ stats_stream = stats.stream
+ try:
+ stats.stream = stdout_trap
+ stats.print_stats(*lims)
+ finally:
+ stats.stream = stats_stream
+
+ output = stdout_trap.getvalue()
+ output = output.rstrip()
+
+ if 'q' not in opts:
+ page.page(output)
+ print(sys_exit, end=' ')
+
+ dump_file = opts.D[0]
+ text_file = opts.T[0]
+ if dump_file:
+ prof.dump_stats(dump_file)
+ print(
+ f"\n*** Profile stats marshalled to file {repr(dump_file)}.{sys_exit}"
+ )
+ if text_file:
+ pfile = Path(text_file)
+ pfile.touch(exist_ok=True)
+ pfile.write_text(output, encoding="utf-8")
+
+ print(
+ f"\n*** Profile printout saved to text file {repr(text_file)}.{sys_exit}"
+ )
+
+ if 'r' in opts:
+ return stats
+
+ return None
+
+ @line_magic
+ def pdb(self, parameter_s=''):
+ """Control the automatic calling of the pdb interactive debugger.
+
+ Call as '%pdb on', '%pdb 1', '%pdb off' or '%pdb 0'. If called without
+ argument it works as a toggle.
+
+ When an exception is triggered, IPython can optionally call the
+ interactive pdb debugger after the traceback printout. %pdb toggles
+ this feature on and off.
+
+ The initial state of this feature is set in your configuration
+ file (the option is ``InteractiveShell.pdb``).
+
+ If you want to just activate the debugger AFTER an exception has fired,
+ without having to type '%pdb on' and rerunning your code, you can use
+ the %debug magic."""
+
+ par = parameter_s.strip().lower()
+
+ if par:
+ try:
+ new_pdb = {'off':0,'0':0,'on':1,'1':1}[par]
+ except KeyError:
+ print ('Incorrect argument. Use on/1, off/0, '
+ 'or nothing for a toggle.')
+ return
+ else:
+ # toggle
+ new_pdb = not self.shell.call_pdb
+
+ # set on the shell
+ self.shell.call_pdb = new_pdb
+ print('Automatic pdb calling has been turned',on_off(new_pdb))
+
+ @magic_arguments.magic_arguments()
+ @magic_arguments.argument('--breakpoint', '-b', metavar='FILE:LINE',
+ help="""
+ Set break point at LINE in FILE.
+ """
+ )
+ @magic_arguments.argument('statement', nargs='*',
+ help="""
+ Code to run in debugger.
+ You can omit this in cell magic mode.
+ """
+ )
+ @no_var_expand
+ @line_cell_magic
+ @needs_local_scope
+ def debug(self, line="", cell=None, local_ns=None):
+ """Activate the interactive debugger.
+
+ This magic command support two ways of activating debugger.
+ One is to activate debugger before executing code. This way, you
+ can set a break point, to step through the code from the point.
+ You can use this mode by giving statements to execute and optionally
+ a breakpoint.
+
+ The other one is to activate debugger in post-mortem mode. You can
+ activate this mode simply running %debug without any argument.
+ If an exception has just occurred, this lets you inspect its stack
+ frames interactively. Note that this will always work only on the last
+ traceback that occurred, so you must call this quickly after an
+ exception that you wish to inspect has fired, because if another one
+ occurs, it clobbers the previous one.
+
+ If you want IPython to automatically do this on every exception, see
+ the %pdb magic for more details.
+
+ .. versionchanged:: 7.3
+ When running code, user variables are no longer expanded,
+ the magic line is always left unmodified.
+
+ """
+ args = magic_arguments.parse_argstring(self.debug, line)
+
+ if not (args.breakpoint or args.statement or cell):
+ self._debug_post_mortem()
+ elif not (args.breakpoint or cell):
+ # If there is no breakpoints, the line is just code to execute
+ self._debug_exec(line, None, local_ns)
+ else:
+ # Here we try to reconstruct the code from the output of
+ # parse_argstring. This might not work if the code has spaces
+ # For example this fails for `print("a b")`
+ code = "\n".join(args.statement)
+ if cell:
+ code += "\n" + cell
+ self._debug_exec(code, args.breakpoint, local_ns)
+
+ def _debug_post_mortem(self):
+ self.shell.debugger(force=True)
+
+ def _debug_exec(self, code, breakpoint, local_ns=None):
+ if breakpoint:
+ (filename, bp_line) = breakpoint.rsplit(':', 1)
+ bp_line = int(bp_line)
+ else:
+ (filename, bp_line) = (None, None)
+ self._run_with_debugger(
+ code, self.shell.user_ns, filename, bp_line, local_ns=local_ns
+ )
+
+ @line_magic
+ def tb(self, s):
+ """Print the last traceback.
+
+ Optionally, specify an exception reporting mode, tuning the
+ verbosity of the traceback. By default the currently-active exception
+ mode is used. See %xmode for changing exception reporting modes.
+
+ Valid modes: Plain, Context, Verbose, and Minimal.
+ """
+ interactive_tb = self.shell.InteractiveTB
+ if s:
+ # Switch exception reporting mode for this one call.
+ # Ensure it is switched back.
+ def xmode_switch_err(name):
+ warn('Error changing %s exception modes.\n%s' %
+ (name,sys.exc_info()[1]))
+
+ new_mode = s.strip().capitalize()
+ original_mode = interactive_tb.mode
+ try:
+ try:
+ interactive_tb.set_mode(mode=new_mode)
+ except Exception:
+ xmode_switch_err('user')
+ else:
+ self.shell.showtraceback()
+ finally:
+ interactive_tb.set_mode(mode=original_mode)
+ else:
+ self.shell.showtraceback()
+
+ @skip_doctest
+ @line_magic
+ def run(self, parameter_s='', runner=None,
+ file_finder=get_py_filename):
+ """Run the named file inside IPython as a program.
+
+ Usage::
+
+ %run [-n -i -e -G]
+ [( -t [-N<N>] | -d [-b<N>] | -p [profile options] )]
+ ( -m mod | filename ) [args]
+
+ The filename argument should be either a pure Python script (with
+ extension ``.py``), or a file with custom IPython syntax (such as
+ magics). If the latter, the file can be either a script with ``.ipy``
+ extension, or a Jupyter notebook with ``.ipynb`` extension. When running
+ a Jupyter notebook, the output from print statements and other
+ displayed objects will appear in the terminal (even matplotlib figures
+ will open, if a terminal-compliant backend is being used). Note that,
+ at the system command line, the ``jupyter run`` command offers similar
+ functionality for executing notebooks (albeit currently with some
+ differences in supported options).
+
+ Parameters after the filename are passed as command-line arguments to
+ the program (put in sys.argv). Then, control returns to IPython's
+ prompt.
+
+ This is similar to running at a system prompt ``python file args``,
+ but with the advantage of giving you IPython's tracebacks, and of
+ loading all variables into your interactive namespace for further use
+ (unless -p is used, see below).
+
+ The file is executed in a namespace initially consisting only of
+ ``__name__=='__main__'`` and sys.argv constructed as indicated. It thus
+ sees its environment as if it were being run as a stand-alone program
+ (except for sharing global objects such as previously imported
+ modules). But after execution, the IPython interactive namespace gets
+ updated with all variables defined in the program (except for __name__
+ and sys.argv). This allows for very convenient loading of code for
+ interactive work, while giving each program a 'clean sheet' to run in.
+
+ Arguments are expanded using shell-like glob match. Patterns
+ '*', '?', '[seq]' and '[!seq]' can be used. Additionally,
+ tilde '~' will be expanded into user's home directory. Unlike
+ real shells, quotation does not suppress expansions. Use
+ *two* back slashes (e.g. ``\\\\*``) to suppress expansions.
+ To completely disable these expansions, you can use -G flag.
+
+ On Windows systems, the use of single quotes `'` when specifying
+ a file is not supported. Use double quotes `"`.
+
+ Options:
+
+ -n
+ __name__ is NOT set to '__main__', but to the running file's name
+ without extension (as python does under import). This allows running
+ scripts and reloading the definitions in them without calling code
+ protected by an ``if __name__ == "__main__"`` clause.
+
+ -i
+ run the file in IPython's namespace instead of an empty one. This
+ is useful if you are experimenting with code written in a text editor
+ which depends on variables defined interactively.
+
+ -e
+ ignore sys.exit() calls or SystemExit exceptions in the script
+ being run. This is particularly useful if IPython is being used to
+ run unittests, which always exit with a sys.exit() call. In such
+ cases you are interested in the output of the test results, not in
+ seeing a traceback of the unittest module.
+
+ -t
+ print timing information at the end of the run. IPython will give
+ you an estimated CPU time consumption for your script, which under
+ Unix uses the resource module to avoid the wraparound problems of
+ time.clock(). Under Unix, an estimate of time spent on system tasks
+ is also given (for Windows platforms this is reported as 0.0).
+
+ If -t is given, an additional ``-N<N>`` option can be given, where <N>
+ must be an integer indicating how many times you want the script to
+ run. The final timing report will include total and per run results.
+
+ For example (testing the script uniq_stable.py)::
+
+ In [1]: run -t uniq_stable
+
+ IPython CPU timings (estimated):
+ User : 0.19597 s.
+ System: 0.0 s.
+
+ In [2]: run -t -N5 uniq_stable
+
+ IPython CPU timings (estimated):
+ Total runs performed: 5
+ Times : Total Per run
+ User : 0.910862 s, 0.1821724 s.
+ System: 0.0 s, 0.0 s.
+
+ -d
+ run your program under the control of pdb, the Python debugger.
+ This allows you to execute your program step by step, watch variables,
+ etc. Internally, what IPython does is similar to calling::
+
+ pdb.run('execfile("YOURFILENAME")')
+
+ with a breakpoint set on line 1 of your file. You can change the line
+ number for this automatic breakpoint to be <N> by using the -bN option
+ (where N must be an integer). For example::
+
+ %run -d -b40 myscript
+
+ will set the first breakpoint at line 40 in myscript.py. Note that
+ the first breakpoint must be set on a line which actually does
+ something (not a comment or docstring) for it to stop execution.
+
+ Or you can specify a breakpoint in a different file::
+
+ %run -d -b myotherfile.py:20 myscript
+
+ When the pdb debugger starts, you will see a (Pdb) prompt. You must
+ first enter 'c' (without quotes) to start execution up to the first
+ breakpoint.
+
+ Entering 'help' gives information about the use of the debugger. You
+ can easily see pdb's full documentation with "import pdb;pdb.help()"
+ at a prompt.
+
+ -p
+ run program under the control of the Python profiler module (which
+ prints a detailed report of execution times, function calls, etc).
+
+ You can pass other options after -p which affect the behavior of the
+ profiler itself. See the docs for %prun for details.
+
+ In this mode, the program's variables do NOT propagate back to the
+ IPython interactive namespace (because they remain in the namespace
+ where the profiler executes them).
+
+ Internally this triggers a call to %prun, see its documentation for
+ details on the options available specifically for profiling.
+
+ There is one special usage for which the text above doesn't apply:
+ if the filename ends with .ipy[nb], the file is run as ipython script,
+ just as if the commands were written on IPython prompt.
+
+ -m
+ specify module name to load instead of script path. Similar to
+ the -m option for the python interpreter. Use this option last if you
+ want to combine with other %run options. Unlike the python interpreter
+ only source modules are allowed no .pyc or .pyo files.
+ For example::
+
+ %run -m example
+
+ will run the example module.
+
+ -G
+ disable shell-like glob expansion of arguments.
+
+ """
+
+ # Logic to handle issue #3664
+ # Add '--' after '-m <module_name>' to ignore additional args passed to a module.
+ if '-m' in parameter_s and '--' not in parameter_s:
+ argv = shlex.split(parameter_s, posix=(os.name == 'posix'))
+ for idx, arg in enumerate(argv):
+ if arg and arg.startswith('-') and arg != '-':
+ if arg == '-m':
+ argv.insert(idx + 2, '--')
+ break
+ else:
+ # Positional arg, break
+ break
+ parameter_s = ' '.join(shlex.quote(arg) for arg in argv)
+
+ # get arguments and set sys.argv for program to be run.
+ opts, arg_lst = self.parse_options(parameter_s,
+ 'nidtN:b:pD:l:rs:T:em:G',
+ mode='list', list_all=1)
+ if "m" in opts:
+ modulename = opts["m"][0]
+ modpath = find_mod(modulename)
+ if modpath is None:
+ msg = '%r is not a valid modulename on sys.path'%modulename
+ raise Exception(msg)
+ arg_lst = [modpath] + arg_lst
+ try:
+ fpath = None # initialize to make sure fpath is in scope later
+ fpath = arg_lst[0]
+ filename = file_finder(fpath)
+ except IndexError as e:
+ msg = 'you must provide at least a filename.'
+ raise Exception(msg) from e
+ except IOError as e:
+ try:
+ msg = str(e)
+ except UnicodeError:
+ msg = e.message
+ if os.name == 'nt' and re.match(r"^'.*'$",fpath):
+ warn('For Windows, use double quotes to wrap a filename: %run "mypath\\myfile.py"')
+ raise Exception(msg) from e
+ except TypeError:
+ if fpath in sys.meta_path:
+ filename = ""
+ else:
+ raise
+
+ if filename.lower().endswith(('.ipy', '.ipynb')):
+ with preserve_keys(self.shell.user_ns, '__file__'):
+ self.shell.user_ns['__file__'] = filename
+ self.shell.safe_execfile_ipy(filename, raise_exceptions=True)
+ return
+
+ # Control the response to exit() calls made by the script being run
+ exit_ignore = 'e' in opts
+
+ # Make sure that the running script gets a proper sys.argv as if it
+ # were run from a system shell.
+ save_argv = sys.argv # save it for later restoring
+
+ if 'G' in opts:
+ args = arg_lst[1:]
+ else:
+ # tilde and glob expansion
+ args = shellglob(map(os.path.expanduser, arg_lst[1:]))
+
+ sys.argv = [filename] + args # put in the proper filename
+
+ if 'n' in opts:
+ name = Path(filename).stem
+ else:
+ name = '__main__'
+
+ if 'i' in opts:
+ # Run in user's interactive namespace
+ prog_ns = self.shell.user_ns
+ __name__save = self.shell.user_ns['__name__']
+ prog_ns['__name__'] = name
+ main_mod = self.shell.user_module
+
+ # Since '%run foo' emulates 'python foo.py' at the cmd line, we must
+ # set the __file__ global in the script's namespace
+ # TK: Is this necessary in interactive mode?
+ prog_ns['__file__'] = filename
+ else:
+ # Run in a fresh, empty namespace
+
+ # The shell MUST hold a reference to prog_ns so after %run
+ # exits, the python deletion mechanism doesn't zero it out
+ # (leaving dangling references). See interactiveshell for details
+ main_mod = self.shell.new_main_mod(filename, name)
+ prog_ns = main_mod.__dict__
+
+ # pickle fix. See interactiveshell for an explanation. But we need to
+ # make sure that, if we overwrite __main__, we replace it at the end
+ main_mod_name = prog_ns['__name__']
+
+ if main_mod_name == '__main__':
+ restore_main = sys.modules['__main__']
+ else:
+ restore_main = False
+
+ # This needs to be undone at the end to prevent holding references to
+ # every single object ever created.
+ sys.modules[main_mod_name] = main_mod
+
+ if 'p' in opts or 'd' in opts:
+ if 'm' in opts:
+ code = 'run_module(modulename, prog_ns)'
+ code_ns = {
+ 'run_module': self.shell.safe_run_module,
+ 'prog_ns': prog_ns,
+ 'modulename': modulename,
+ }
+ else:
+ if 'd' in opts:
+ # allow exceptions to raise in debug mode
+ code = 'execfile(filename, prog_ns, raise_exceptions=True)'
+ else:
+ code = 'execfile(filename, prog_ns)'
+ code_ns = {
+ 'execfile': self.shell.safe_execfile,
+ 'prog_ns': prog_ns,
+ 'filename': get_py_filename(filename),
+ }
+
+ try:
+ stats = None
+ if 'p' in opts:
+ stats = self._run_with_profiler(code, opts, code_ns)
+ else:
+ if 'd' in opts:
+ bp_file, bp_line = parse_breakpoint(
+ opts.get('b', ['1'])[0], filename)
+ self._run_with_debugger(
+ code, code_ns, filename, bp_line, bp_file)
+ else:
+ if 'm' in opts:
+ def run():
+ self.shell.safe_run_module(modulename, prog_ns)
+ else:
+ if runner is None:
+ runner = self.default_runner
+ if runner is None:
+ runner = self.shell.safe_execfile
+
+ def run():
+ runner(filename, prog_ns, prog_ns,
+ exit_ignore=exit_ignore)
+
+ if 't' in opts:
+ # timed execution
+ try:
+ nruns = int(opts['N'][0])
+ if nruns < 1:
+ error('Number of runs must be >=1')
+ return
+ except (KeyError):
+ nruns = 1
+ self._run_with_timing(run, nruns)
+ else:
+ # regular execution
+ run()
+
+ if 'i' in opts:
+ self.shell.user_ns['__name__'] = __name__save
+ else:
+ # update IPython interactive namespace
+
+ # Some forms of read errors on the file may mean the
+ # __name__ key was never set; using pop we don't have to
+ # worry about a possible KeyError.
+ prog_ns.pop('__name__', None)
+
+ with preserve_keys(self.shell.user_ns, '__file__'):
+ self.shell.user_ns.update(prog_ns)
+ finally:
+ # It's a bit of a mystery why, but __builtins__ can change from
+ # being a module to becoming a dict missing some key data after
+ # %run. As best I can see, this is NOT something IPython is doing
+ # at all, and similar problems have been reported before:
+ # http://coding.derkeiler.com/Archive/Python/comp.lang.python/2004-10/0188.html
+ # Since this seems to be done by the interpreter itself, the best
+ # we can do is to at least restore __builtins__ for the user on
+ # exit.
+ self.shell.user_ns['__builtins__'] = builtin_mod
+
+ # Ensure key global structures are restored
+ sys.argv = save_argv
+ if restore_main:
+ sys.modules['__main__'] = restore_main
+ if '__mp_main__' in sys.modules:
+ sys.modules['__mp_main__'] = restore_main
+ else:
+ # Remove from sys.modules the reference to main_mod we'd
+ # added. Otherwise it will trap references to objects
+ # contained therein.
+ del sys.modules[main_mod_name]
+
+ return stats
+
+ def _run_with_debugger(
+ self, code, code_ns, filename=None, bp_line=None, bp_file=None, local_ns=None
+ ):
+ """
+ Run `code` in debugger with a break point.
+
+ Parameters
+ ----------
+ code : str
+ Code to execute.
+ code_ns : dict
+ A namespace in which `code` is executed.
+ filename : str
+ `code` is ran as if it is in `filename`.
+ bp_line : int, optional
+ Line number of the break point.
+ bp_file : str, optional
+ Path to the file in which break point is specified.
+ `filename` is used if not given.
+ local_ns : dict, optional
+ A local namespace in which `code` is executed.
+
+ Raises
+ ------
+ UsageError
+ If the break point given by `bp_line` is not valid.
+
+ """
+ deb = self.shell.InteractiveTB.pdb
+ if not deb:
+ self.shell.InteractiveTB.pdb = self.shell.InteractiveTB.debugger_cls()
+ deb = self.shell.InteractiveTB.pdb
+
+ # deb.checkline() fails if deb.curframe exists but is None; it can
+ # handle it not existing. https://github.com/ipython/ipython/issues/10028
+ if hasattr(deb, 'curframe'):
+ del deb.curframe
+
+ # reset Breakpoint state, which is moronically kept
+ # in a class
+ bdb.Breakpoint.next = 1
+ bdb.Breakpoint.bplist = {}
+ bdb.Breakpoint.bpbynumber = [None]
+ deb.clear_all_breaks()
+ if bp_line is not None:
+ # Set an initial breakpoint to stop execution
+ maxtries = 10
+ bp_file = bp_file or filename
+ checkline = deb.checkline(bp_file, bp_line)
+ if not checkline:
+ for bp in range(bp_line + 1, bp_line + maxtries + 1):
+ if deb.checkline(bp_file, bp):
+ break
+ else:
+ msg = ("\nI failed to find a valid line to set "
+ "a breakpoint\n"
+ "after trying up to line: %s.\n"
+ "Please set a valid breakpoint manually "
+ "with the -b option." % bp)
+ raise UsageError(msg)
+ # if we find a good linenumber, set the breakpoint
+ deb.do_break('%s:%s' % (bp_file, bp_line))
+
+ if filename:
+ # Mimic Pdb._runscript(...)
+ deb._wait_for_mainpyfile = True
+ deb.mainpyfile = deb.canonic(filename)
+
+ # Start file run
+ print("NOTE: Enter 'c' at the %s prompt to continue execution." % deb.prompt)
+ try:
+ if filename:
+ # save filename so it can be used by methods on the deb object
+ deb._exec_filename = filename
+ while True:
+ try:
+ trace = sys.gettrace()
+ deb.run(code, code_ns, local_ns)
+ except Restart:
+ print("Restarting")
+ if filename:
+ deb._wait_for_mainpyfile = True
+ deb.mainpyfile = deb.canonic(filename)
+ continue
+ else:
+ break
+ finally:
+ sys.settrace(trace)
+
+
+ except:
+ etype, value, tb = sys.exc_info()
+ # Skip three frames in the traceback: the %run one,
+ # one inside bdb.py, and the command-line typed by the
+ # user (run by exec in pdb itself).
+ self.shell.InteractiveTB(etype, value, tb, tb_offset=3)
+
+ @staticmethod
+ def _run_with_timing(run, nruns):
+ """
+ Run function `run` and print timing information.
+
+ Parameters
+ ----------
+ run : callable
+ Any callable object which takes no argument.
+ nruns : int
+ Number of times to execute `run`.
+
+ """
+ twall0 = time.perf_counter()
+ if nruns == 1:
+ t0 = clock2()
+ run()
+ t1 = clock2()
+ t_usr = t1[0] - t0[0]
+ t_sys = t1[1] - t0[1]
+ print("\nIPython CPU timings (estimated):")
+ print(" User : %10.2f s." % t_usr)
+ print(" System : %10.2f s." % t_sys)
+ else:
+ runs = range(nruns)
+ t0 = clock2()
+ for nr in runs:
+ run()
+ t1 = clock2()
+ t_usr = t1[0] - t0[0]
+ t_sys = t1[1] - t0[1]
+ print("\nIPython CPU timings (estimated):")
+ print("Total runs performed:", nruns)
+ print(" Times : %10s %10s" % ('Total', 'Per run'))
+ print(" User : %10.2f s, %10.2f s." % (t_usr, t_usr / nruns))
+ print(" System : %10.2f s, %10.2f s." % (t_sys, t_sys / nruns))
+ twall1 = time.perf_counter()
+ print("Wall time: %10.2f s." % (twall1 - twall0))
+
+ @skip_doctest
+ @no_var_expand
+ @line_cell_magic
+ @needs_local_scope
+ def timeit(self, line='', cell=None, local_ns=None):
+ """Time execution of a Python statement or expression
+
+ Usage, in line mode:
+ %timeit [-n<N> -r<R> [-t|-c] -q -p<P> -o] statement
+ or in cell mode:
+ %%timeit [-n<N> -r<R> [-t|-c] -q -p<P> -o] setup_code
+ code
+ code...
+
+ Time execution of a Python statement or expression using the timeit
+ module. This function can be used both as a line and cell magic:
+
+ - In line mode you can time a single-line statement (though multiple
+ ones can be chained with using semicolons).
+
+ - In cell mode, the statement in the first line is used as setup code
+ (executed but not timed) and the body of the cell is timed. The cell
+ body has access to any variables created in the setup code.
+
+ Options:
+ -n<N>: execute the given statement <N> times in a loop. If <N> is not
+ provided, <N> is determined so as to get sufficient accuracy.
+
+ -r<R>: number of repeats <R>, each consisting of <N> loops, and take the
+ best result.
+ Default: 7
+
+ -t: use time.time to measure the time, which is the default on Unix.
+ This function measures wall time.
+
+ -c: use time.clock to measure the time, which is the default on
+ Windows and measures wall time. On Unix, resource.getrusage is used
+ instead and returns the CPU user time.
+
+ -p<P>: use a precision of <P> digits to display the timing result.
+ Default: 3
+
+ -q: Quiet, do not print result.
+
+ -o: return a TimeitResult that can be stored in a variable to inspect
+ the result in more details.
+
+ .. versionchanged:: 7.3
+ User variables are no longer expanded,
+ the magic line is always left unmodified.
+
+ Examples
+ --------
+ ::
+
+ In [1]: %timeit pass
+ 8.26 ns ± 0.12 ns per loop (mean ± std. dev. of 7 runs, 100000000 loops each)
+
+ In [2]: u = None
+
+ In [3]: %timeit u is None
+ 29.9 ns ± 0.643 ns per loop (mean ± std. dev. of 7 runs, 10000000 loops each)
+
+ In [4]: %timeit -r 4 u == None
+
+ In [5]: import time
+
+ In [6]: %timeit -n1 time.sleep(2)
+
+ The times reported by %timeit will be slightly higher than those
+ reported by the timeit.py script when variables are accessed. This is
+ due to the fact that %timeit executes the statement in the namespace
+ of the shell, compared with timeit.py, which uses a single setup
+ statement to import function or create variables. Generally, the bias
+ does not matter as long as results from timeit.py are not mixed with
+ those from %timeit."""
+
+ opts, stmt = self.parse_options(
+ line, "n:r:tcp:qo", posix=False, strict=False, preserve_non_opts=True
+ )
+ if stmt == "" and cell is None:
+ return
+
+ timefunc = timeit.default_timer
+ number = int(getattr(opts, "n", 0))
+ default_repeat = 7 if timeit.default_repeat < 7 else timeit.default_repeat
+ repeat = int(getattr(opts, "r", default_repeat))
+ precision = int(getattr(opts, "p", 3))
+ quiet = 'q' in opts
+ return_result = 'o' in opts
+ if hasattr(opts, "t"):
+ timefunc = time.time
+ if hasattr(opts, "c"):
+ timefunc = clock
+
+ timer = Timer(timer=timefunc)
+ # this code has tight coupling to the inner workings of timeit.Timer,
+ # but is there a better way to achieve that the code stmt has access
+ # to the shell namespace?
+ transform = self.shell.transform_cell
+
+ if cell is None:
+ # called as line magic
+ ast_setup = self.shell.compile.ast_parse("pass")
+ ast_stmt = self.shell.compile.ast_parse(transform(stmt))
+ else:
+ ast_setup = self.shell.compile.ast_parse(transform(stmt))
+ ast_stmt = self.shell.compile.ast_parse(transform(cell))
+
+ ast_setup = self.shell.transform_ast(ast_setup)
+ ast_stmt = self.shell.transform_ast(ast_stmt)
+
+ # Check that these compile to valid Python code *outside* the timer func
+ # Invalid code may become valid when put inside the function & loop,
+ # which messes up error messages.
+ # https://github.com/ipython/ipython/issues/10636
+ self.shell.compile(ast_setup, "<magic-timeit-setup>", "exec")
+ self.shell.compile(ast_stmt, "<magic-timeit-stmt>", "exec")
+
+ # This codestring is taken from timeit.template - we fill it in as an
+ # AST, so that we can apply our AST transformations to the user code
+ # without affecting the timing code.
+ timeit_ast_template = ast.parse('def inner(_it, _timer):\n'
+ ' setup\n'
+ ' _t0 = _timer()\n'
+ ' for _i in _it:\n'
+ ' stmt\n'
+ ' _t1 = _timer()\n'
+ ' return _t1 - _t0\n')
+
+ timeit_ast = TimeitTemplateFiller(ast_setup, ast_stmt).visit(timeit_ast_template)
+ timeit_ast = ast.fix_missing_locations(timeit_ast)
+
+ # Track compilation time so it can be reported if too long
+ # Minimum time above which compilation time will be reported
+ tc_min = 0.1
+
+ t0 = clock()
+ code = self.shell.compile(timeit_ast, "<magic-timeit>", "exec")
+ tc = clock()-t0
+
+ ns = {}
+ glob = self.shell.user_ns
+ # handles global vars with same name as local vars. We store them in conflict_globs.
+ conflict_globs = {}
+ if local_ns and cell is None:
+ for var_name, var_val in glob.items():
+ if var_name in local_ns:
+ conflict_globs[var_name] = var_val
+ glob.update(local_ns)
+
+ exec(code, glob, ns)
+ timer.inner = ns["inner"]
+
+ # This is used to check if there is a huge difference between the
+ # best and worst timings.
+ # Issue: https://github.com/ipython/ipython/issues/6471
+ if number == 0:
+ # determine number so that 0.2 <= total time < 2.0
+ for index in range(0, 10):
+ number = 10 ** index
+ time_number = timer.timeit(number)
+ if time_number >= 0.2:
+ break
+
+ all_runs = timer.repeat(repeat, number)
+ best = min(all_runs) / number
+ worst = max(all_runs) / number
+ timeit_result = TimeitResult(number, repeat, best, worst, all_runs, tc, precision)
+
+ # Restore global vars from conflict_globs
+ if conflict_globs:
+ glob.update(conflict_globs)
+
+ if not quiet :
+ # Check best timing is greater than zero to avoid a
+ # ZeroDivisionError.
+ # In cases where the slowest timing is lesser than a microsecond
+ # we assume that it does not really matter if the fastest
+ # timing is 4 times faster than the slowest timing or not.
+ if worst > 4 * best and best > 0 and worst > 1e-6:
+ print("The slowest run took %0.2f times longer than the "
+ "fastest. This could mean that an intermediate result "
+ "is being cached." % (worst / best))
+
+ print( timeit_result )
+
+ if tc > tc_min:
+ print("Compiler time: %.2f s" % tc)
+ if return_result:
+ return timeit_result
+
+ @skip_doctest
+ @no_var_expand
+ @needs_local_scope
+ @line_cell_magic
+ @output_can_be_silenced
+ def time(self,line='', cell=None, local_ns=None):
+ """Time execution of a Python statement or expression.
+
+ The CPU and wall clock times are printed, and the value of the
+ expression (if any) is returned. Note that under Win32, system time
+ is always reported as 0, since it can not be measured.
+
+ This function can be used both as a line and cell magic:
+
+ - In line mode you can time a single-line statement (though multiple
+ ones can be chained with using semicolons).
+
+ - In cell mode, you can time the cell body (a directly
+ following statement raises an error).
+
+ This function provides very basic timing functionality. Use the timeit
+ magic for more control over the measurement.
+
+ .. versionchanged:: 7.3
+ User variables are no longer expanded,
+ the magic line is always left unmodified.
+
+ Examples
+ --------
+ ::
+
+ In [1]: %time 2**128
+ CPU times: user 0.00 s, sys: 0.00 s, total: 0.00 s
+ Wall time: 0.00
+ Out[1]: 340282366920938463463374607431768211456L
+
+ In [2]: n = 1000000
+
+ In [3]: %time sum(range(n))
+ CPU times: user 1.20 s, sys: 0.05 s, total: 1.25 s
+ Wall time: 1.37
+ Out[3]: 499999500000L
+
+ In [4]: %time print 'hello world'
+ hello world
+ CPU times: user 0.00 s, sys: 0.00 s, total: 0.00 s
+ Wall time: 0.00
+
+ .. note::
+ The time needed by Python to compile the given expression will be
+ reported if it is more than 0.1s.
+
+ In the example below, the actual exponentiation is done by Python
+ at compilation time, so while the expression can take a noticeable
+ amount of time to compute, that time is purely due to the
+ compilation::
+
+ In [5]: %time 3**9999;
+ CPU times: user 0.00 s, sys: 0.00 s, total: 0.00 s
+ Wall time: 0.00 s
+
+ In [6]: %time 3**999999;
+ CPU times: user 0.00 s, sys: 0.00 s, total: 0.00 s
+ Wall time: 0.00 s
+ Compiler : 0.78 s
+ """
+ # fail immediately if the given expression can't be compiled
+
+ if line and cell:
+ raise UsageError("Can't use statement directly after '%%time'!")
+
+ if cell:
+ expr = self.shell.transform_cell(cell)
+ else:
+ expr = self.shell.transform_cell(line)
+
+ # Minimum time above which parse time will be reported
+ tp_min = 0.1
+
+ t0 = clock()
+ expr_ast = self.shell.compile.ast_parse(expr)
+ tp = clock()-t0
+
+ # Apply AST transformations
+ expr_ast = self.shell.transform_ast(expr_ast)
+
+ # Minimum time above which compilation time will be reported
+ tc_min = 0.1
+
+ expr_val=None
+ if len(expr_ast.body)==1 and isinstance(expr_ast.body[0], ast.Expr):
+ mode = 'eval'
+ source = '<timed eval>'
+ expr_ast = ast.Expression(expr_ast.body[0].value)
+ else:
+ mode = 'exec'
+ source = '<timed exec>'
+ # multi-line %%time case
+ if len(expr_ast.body) > 1 and isinstance(expr_ast.body[-1], ast.Expr):
+ expr_val= expr_ast.body[-1]
+ expr_ast = expr_ast.body[:-1]
+ expr_ast = Module(expr_ast, [])
+ expr_val = ast.Expression(expr_val.value)
+
+ t0 = clock()
+ code = self.shell.compile(expr_ast, source, mode)
+ tc = clock()-t0
+
+ # skew measurement as little as possible
+ glob = self.shell.user_ns
+ wtime = time.time
+ # time execution
+ wall_st = wtime()
+ if mode=='eval':
+ st = clock2()
+ try:
+ out = eval(code, glob, local_ns)
+ except:
+ self.shell.showtraceback()
+ return
+ end = clock2()
+ else:
+ st = clock2()
+ try:
+ exec(code, glob, local_ns)
+ out=None
+ # multi-line %%time case
+ if expr_val is not None:
+ code_2 = self.shell.compile(expr_val, source, 'eval')
+ out = eval(code_2, glob, local_ns)
+ except:
+ self.shell.showtraceback()
+ return
+ end = clock2()
+
+ wall_end = wtime()
+ # Compute actual times and report
+ wall_time = wall_end - wall_st
+ cpu_user = end[0] - st[0]
+ cpu_sys = end[1] - st[1]
+ cpu_tot = cpu_user + cpu_sys
+ # On windows cpu_sys is always zero, so only total is displayed
+ if sys.platform != "win32":
+ print(
+ f"CPU times: user {_format_time(cpu_user)}, sys: {_format_time(cpu_sys)}, total: {_format_time(cpu_tot)}"
+ )
+ else:
+ print(f"CPU times: total: {_format_time(cpu_tot)}")
+ print(f"Wall time: {_format_time(wall_time)}")
+ if tc > tc_min:
+ print(f"Compiler : {_format_time(tc)}")
+ if tp > tp_min:
+ print(f"Parser : {_format_time(tp)}")
+ return out
+
+ @skip_doctest
+ @line_magic
+ def macro(self, parameter_s=''):
+ """Define a macro for future re-execution. It accepts ranges of history,
+ filenames or string objects.
+
+ Usage:\\
+ %macro [options] name n1-n2 n3-n4 ... n5 .. n6 ...
+
+ Options:
+
+ -r: use 'raw' input. By default, the 'processed' history is used,
+ so that magics are loaded in their transformed version to valid
+ Python. If this option is given, the raw input as typed at the
+ command line is used instead.
+
+ -q: quiet macro definition. By default, a tag line is printed
+ to indicate the macro has been created, and then the contents of
+ the macro are printed. If this option is given, then no printout
+ is produced once the macro is created.
+
+ This will define a global variable called `name` which is a string
+ made of joining the slices and lines you specify (n1,n2,... numbers
+ above) from your input history into a single string. This variable
+ acts like an automatic function which re-executes those lines as if
+ you had typed them. You just type 'name' at the prompt and the code
+ executes.
+
+ The syntax for indicating input ranges is described in %history.
+
+ Note: as a 'hidden' feature, you can also use traditional python slice
+ notation, where N:M means numbers N through M-1.
+
+ For example, if your history contains (print using %hist -n )::
+
+ 44: x=1
+ 45: y=3
+ 46: z=x+y
+ 47: print x
+ 48: a=5
+ 49: print 'x',x,'y',y
+
+ you can create a macro with lines 44 through 47 (included) and line 49
+ called my_macro with::
+
+ In [55]: %macro my_macro 44-47 49
+
+ Now, typing `my_macro` (without quotes) will re-execute all this code
+ in one pass.
+
+ You don't need to give the line-numbers in order, and any given line
+ number can appear multiple times. You can assemble macros with any
+ lines from your input history in any order.
+
+ The macro is a simple object which holds its value in an attribute,
+ but IPython's display system checks for macros and executes them as
+ code instead of printing them when you type their name.
+
+ You can view a macro's contents by explicitly printing it with::
+
+ print macro_name
+
+ """
+ opts,args = self.parse_options(parameter_s,'rq',mode='list')
+ if not args: # List existing macros
+ return sorted(k for k,v in self.shell.user_ns.items() if isinstance(v, Macro))
+ if len(args) == 1:
+ raise UsageError(
+ "%macro insufficient args; usage '%macro name n1-n2 n3-4...")
+ name, codefrom = args[0], " ".join(args[1:])
+
+ #print 'rng',ranges # dbg
+ try:
+ lines = self.shell.find_user_code(codefrom, 'r' in opts)
+ except (ValueError, TypeError) as e:
+ print(e.args[0])
+ return
+ macro = Macro(lines)
+ self.shell.define_macro(name, macro)
+ if not ( 'q' in opts) :
+ print('Macro `%s` created. To execute, type its name (without quotes).' % name)
+ print('=== Macro contents: ===')
+ print(macro, end=' ')
+
+ @magic_arguments.magic_arguments()
+ @magic_arguments.argument('output', type=str, default='', nargs='?',
+ help="""The name of the variable in which to store output.
+ This is a utils.io.CapturedIO object with stdout/err attributes
+ for the text of the captured output.
+
+ CapturedOutput also has a show() method for displaying the output,
+ and __call__ as well, so you can use that to quickly display the
+ output.
+
+ If unspecified, captured output is discarded.
+ """
+ )
+ @magic_arguments.argument('--no-stderr', action="store_true",
+ help="""Don't capture stderr."""
+ )
+ @magic_arguments.argument('--no-stdout', action="store_true",
+ help="""Don't capture stdout."""
+ )
+ @magic_arguments.argument('--no-display', action="store_true",
+ help="""Don't capture IPython's rich display."""
+ )
+ @cell_magic
+ def capture(self, line, cell):
+ """run the cell, capturing stdout, stderr, and IPython's rich display() calls."""
+ args = magic_arguments.parse_argstring(self.capture, line)
+ out = not args.no_stdout
+ err = not args.no_stderr
+ disp = not args.no_display
+ with capture_output(out, err, disp) as io:
+ self.shell.run_cell(cell)
+ if DisplayHook.semicolon_at_end_of_expression(cell):
+ if args.output in self.shell.user_ns:
+ del self.shell.user_ns[args.output]
+ elif args.output:
+ self.shell.user_ns[args.output] = io
+
+def parse_breakpoint(text, current_file):
+ '''Returns (file, line) for file:line and (current_file, line) for line'''
+ colon = text.find(':')
+ if colon == -1:
+ return current_file, int(text)
+ else:
+ return text[:colon], int(text[colon+1:])
+
+def _format_time(timespan, precision=3):
+ """Formats the timespan in a human readable form"""
+
+ if timespan >= 60.0:
+ # we have more than a minute, format that in a human readable form
+ # Idea from http://snipplr.com/view/5713/
+ parts = [("d", 60*60*24),("h", 60*60),("min", 60), ("s", 1)]
+ time = []
+ leftover = timespan
+ for suffix, length in parts:
+ value = int(leftover / length)
+ if value > 0:
+ leftover = leftover % length
+ time.append(u'%s%s' % (str(value), suffix))
+ if leftover < 1:
+ break
+ return " ".join(time)
+
+
+ # Unfortunately the unicode 'micro' symbol can cause problems in
+ # certain terminals.
+ # See bug: https://bugs.launchpad.net/ipython/+bug/348466
+ # Try to prevent crashes by being more secure than it needs to
+ # E.g. eclipse is able to print a µ, but has no sys.stdout.encoding set.
+ units = [u"s", u"ms",u'us',"ns"] # the save value
+ if hasattr(sys.stdout, 'encoding') and sys.stdout.encoding:
+ try:
+ u'\xb5'.encode(sys.stdout.encoding)
+ units = [u"s", u"ms",u'\xb5s',"ns"]
+ except:
+ pass
+ scaling = [1, 1e3, 1e6, 1e9]
+
+ if timespan > 0.0:
+ order = min(-int(math.floor(math.log10(timespan)) // 3), 3)
+ else:
+ order = 3
+ return u"%.*g %s" % (precision, timespan * scaling[order], units[order])
diff --git a/contrib/python/ipython/py3/IPython/core/magics/extension.py b/contrib/python/ipython/py3/IPython/core/magics/extension.py
new file mode 100644
index 0000000000..2bc76b2d55
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/magics/extension.py
@@ -0,0 +1,63 @@
+"""Implementation of magic functions for the extension machinery.
+"""
+#-----------------------------------------------------------------------------
+# Copyright (c) 2012 The IPython Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+
+# Our own packages
+from IPython.core.error import UsageError
+from IPython.core.magic import Magics, magics_class, line_magic
+
+#-----------------------------------------------------------------------------
+# Magic implementation classes
+#-----------------------------------------------------------------------------
+
+@magics_class
+class ExtensionMagics(Magics):
+ """Magics to manage the IPython extensions system."""
+
+ @line_magic
+ def load_ext(self, module_str):
+ """Load an IPython extension by its module name."""
+ if not module_str:
+ raise UsageError('Missing module name.')
+ res = self.shell.extension_manager.load_extension(module_str)
+
+ if res == 'already loaded':
+ print("The %s extension is already loaded. To reload it, use:" % module_str)
+ print(" %reload_ext", module_str)
+ elif res == 'no load function':
+ print("The %s module is not an IPython extension." % module_str)
+
+ @line_magic
+ def unload_ext(self, module_str):
+ """Unload an IPython extension by its module name.
+
+ Not all extensions can be unloaded, only those which define an
+ ``unload_ipython_extension`` function.
+ """
+ if not module_str:
+ raise UsageError('Missing module name.')
+
+ res = self.shell.extension_manager.unload_extension(module_str)
+
+ if res == 'no unload function':
+ print("The %s extension doesn't define how to unload it." % module_str)
+ elif res == "not loaded":
+ print("The %s extension is not loaded." % module_str)
+
+ @line_magic
+ def reload_ext(self, module_str):
+ """Reload an IPython extension by its module name."""
+ if not module_str:
+ raise UsageError('Missing module name.')
+ self.shell.extension_manager.reload_extension(module_str)
diff --git a/contrib/python/ipython/py3/IPython/core/magics/history.py b/contrib/python/ipython/py3/IPython/core/magics/history.py
new file mode 100644
index 0000000000..faa4335faa
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/magics/history.py
@@ -0,0 +1,338 @@
+"""Implementation of magic functions related to History.
+"""
+#-----------------------------------------------------------------------------
+# Copyright (c) 2012, IPython Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+# Stdlib
+import os
+import sys
+from io import open as io_open
+import fnmatch
+
+# Our own packages
+from IPython.core.error import StdinNotImplementedError
+from IPython.core.magic import Magics, magics_class, line_magic
+from IPython.core.magic_arguments import (argument, magic_arguments,
+ parse_argstring)
+from IPython.testing.skipdoctest import skip_doctest
+from IPython.utils import io
+
+#-----------------------------------------------------------------------------
+# Magics class implementation
+#-----------------------------------------------------------------------------
+
+
+_unspecified = object()
+
+
+@magics_class
+class HistoryMagics(Magics):
+
+ @magic_arguments()
+ @argument(
+ '-n', dest='print_nums', action='store_true', default=False,
+ help="""
+ print line numbers for each input.
+ This feature is only available if numbered prompts are in use.
+ """)
+ @argument(
+ '-o', dest='get_output', action='store_true', default=False,
+ help="also print outputs for each input.")
+ @argument(
+ '-p', dest='pyprompts', action='store_true', default=False,
+ help="""
+ print classic '>>>' python prompts before each input.
+ This is useful for making documentation, and in conjunction
+ with -o, for producing doctest-ready output.
+ """)
+ @argument(
+ '-t', dest='raw', action='store_false', default=True,
+ help="""
+ print the 'translated' history, as IPython understands it.
+ IPython filters your input and converts it all into valid Python
+ source before executing it (things like magics or aliases are turned
+ into function calls, for example). With this option, you'll see the
+ native history instead of the user-entered version: '%%cd /' will be
+ seen as 'get_ipython().run_line_magic("cd", "/")' instead of '%%cd /'.
+ """)
+ @argument(
+ '-f', dest='filename',
+ help="""
+ FILENAME: instead of printing the output to the screen, redirect
+ it to the given file. The file is always overwritten, though *when
+ it can*, IPython asks for confirmation first. In particular, running
+ the command 'history -f FILENAME' from the IPython Notebook
+ interface will replace FILENAME even if it already exists *without*
+ confirmation.
+ """)
+ @argument(
+ '-g', dest='pattern', nargs='*', default=None,
+ help="""
+ treat the arg as a glob pattern to search for in (full) history.
+ This includes the saved history (almost all commands ever written).
+ The pattern may contain '?' to match one unknown character and '*'
+ to match any number of unknown characters. Use '%%hist -g' to show
+ full saved history (may be very long).
+ """)
+ @argument(
+ '-l', dest='limit', type=int, nargs='?', default=_unspecified,
+ help="""
+ get the last n lines from all sessions. Specify n as a single
+ arg, or the default is the last 10 lines.
+ """)
+ @argument(
+ '-u', dest='unique', action='store_true',
+ help="""
+ when searching history using `-g`, show only unique history.
+ """)
+ @argument('range', nargs='*')
+ @skip_doctest
+ @line_magic
+ def history(self, parameter_s = ''):
+ """Print input history (_i<n> variables), with most recent last.
+
+ By default, input history is printed without line numbers so it can be
+ directly pasted into an editor. Use -n to show them.
+
+ By default, all input history from the current session is displayed.
+ Ranges of history can be indicated using the syntax:
+
+ ``4``
+ Line 4, current session
+ ``4-6``
+ Lines 4-6, current session
+ ``243/1-5``
+ Lines 1-5, session 243
+ ``~2/7``
+ Line 7, session 2 before current
+ ``~8/1-~6/5``
+ From the first line of 8 sessions ago, to the fifth line of 6
+ sessions ago.
+
+ Multiple ranges can be entered, separated by spaces
+
+ The same syntax is used by %macro, %save, %edit, %rerun
+
+ Examples
+ --------
+ ::
+
+ In [6]: %history -n 4-6
+ 4:a = 12
+ 5:print a**2
+ 6:%history -n 4-6
+
+ """
+
+ args = parse_argstring(self.history, parameter_s)
+
+ # For brevity
+ history_manager = self.shell.history_manager
+
+ def _format_lineno(session, line):
+ """Helper function to format line numbers properly."""
+ if session in (0, history_manager.session_number):
+ return str(line)
+ return "%s/%s" % (session, line)
+
+ # Check if output to specific file was requested.
+ outfname = args.filename
+ if not outfname:
+ outfile = sys.stdout # default
+ # We don't want to close stdout at the end!
+ close_at_end = False
+ else:
+ outfname = os.path.expanduser(outfname)
+ if os.path.exists(outfname):
+ try:
+ ans = io.ask_yes_no("File %r exists. Overwrite?" % outfname)
+ except StdinNotImplementedError:
+ ans = True
+ if not ans:
+ print('Aborting.')
+ return
+ print("Overwriting file.")
+ outfile = io_open(outfname, 'w', encoding='utf-8')
+ close_at_end = True
+
+ print_nums = args.print_nums
+ get_output = args.get_output
+ pyprompts = args.pyprompts
+ raw = args.raw
+
+ pattern = None
+ limit = None if args.limit is _unspecified else args.limit
+
+ range_pattern = False
+ if args.pattern is not None and not args.range:
+ if args.pattern:
+ pattern = "*" + " ".join(args.pattern) + "*"
+ else:
+ pattern = "*"
+ hist = history_manager.search(pattern, raw=raw, output=get_output,
+ n=limit, unique=args.unique)
+ print_nums = True
+ elif args.limit is not _unspecified:
+ n = 10 if limit is None else limit
+ hist = history_manager.get_tail(n, raw=raw, output=get_output)
+ else:
+ if args.pattern:
+ range_pattern = "*" + " ".join(args.pattern) + "*"
+ print_nums = True
+ hist = history_manager.get_range_by_str(
+ " ".join(args.range), raw, get_output
+ )
+
+ # We could be displaying the entire history, so let's not try to pull
+ # it into a list in memory. Anything that needs more space will just
+ # misalign.
+ width = 4
+
+ for session, lineno, inline in hist:
+ # Print user history with tabs expanded to 4 spaces. The GUI
+ # clients use hard tabs for easier usability in auto-indented code,
+ # but we want to produce PEP-8 compliant history for safe pasting
+ # into an editor.
+ if get_output:
+ inline, output = inline
+ if range_pattern:
+ if not fnmatch.fnmatch(inline, range_pattern):
+ continue
+ inline = inline.expandtabs(4).rstrip()
+
+ multiline = "\n" in inline
+ line_sep = '\n' if multiline else ' '
+ if print_nums:
+ print(u'%s:%s' % (_format_lineno(session, lineno).rjust(width),
+ line_sep), file=outfile, end=u'')
+ if pyprompts:
+ print(u">>> ", end=u"", file=outfile)
+ if multiline:
+ inline = "\n... ".join(inline.splitlines()) + "\n..."
+ print(inline, file=outfile)
+ if get_output and output:
+ print(output, file=outfile)
+
+ if close_at_end:
+ outfile.close()
+
+ @line_magic
+ def recall(self, arg):
+ r"""Repeat a command, or get command to input line for editing.
+
+ %recall and %rep are equivalent.
+
+ - %recall (no arguments):
+
+ Place a string version of last computation result (stored in the
+ special '_' variable) to the next input prompt. Allows you to create
+ elaborate command lines without using copy-paste::
+
+ In[1]: l = ["hei", "vaan"]
+ In[2]: "".join(l)
+ Out[2]: heivaan
+ In[3]: %recall
+ In[4]: heivaan_ <== cursor blinking
+
+ %recall 45
+
+ Place history line 45 on the next input prompt. Use %hist to find
+ out the number.
+
+ %recall 1-4
+
+ Combine the specified lines into one cell, and place it on the next
+ input prompt. See %history for the slice syntax.
+
+ %recall foo+bar
+
+ If foo+bar can be evaluated in the user namespace, the result is
+ placed at the next input prompt. Otherwise, the history is searched
+ for lines which contain that substring, and the most recent one is
+ placed at the next input prompt.
+ """
+ if not arg: # Last output
+ self.shell.set_next_input(str(self.shell.user_ns["_"]))
+ return
+ # Get history range
+ histlines = self.shell.history_manager.get_range_by_str(arg)
+ cmd = "\n".join(x[2] for x in histlines)
+ if cmd:
+ self.shell.set_next_input(cmd.rstrip())
+ return
+
+ try: # Variable in user namespace
+ cmd = str(eval(arg, self.shell.user_ns))
+ except Exception: # Search for term in history
+ histlines = self.shell.history_manager.search("*"+arg+"*")
+ for h in reversed([x[2] for x in histlines]):
+ if 'recall' in h or 'rep' in h:
+ continue
+ self.shell.set_next_input(h.rstrip())
+ return
+ else:
+ self.shell.set_next_input(cmd.rstrip())
+ return
+ print("Couldn't evaluate or find in history:", arg)
+
+ @line_magic
+ def rerun(self, parameter_s=''):
+ """Re-run previous input
+
+ By default, you can specify ranges of input history to be repeated
+ (as with %history). With no arguments, it will repeat the last line.
+
+ Options:
+
+ -l <n> : Repeat the last n lines of input, not including the
+ current command.
+
+ -g foo : Repeat the most recent line which contains foo
+ """
+ opts, args = self.parse_options(parameter_s, 'l:g:', mode='string')
+ if "l" in opts: # Last n lines
+ try:
+ n = int(opts["l"])
+ except ValueError:
+ print("Number of lines must be an integer")
+ return
+
+ if n == 0:
+ print("Requested 0 last lines - nothing to run")
+ return
+ elif n < 0:
+ print("Number of lines to rerun cannot be negative")
+ return
+
+ hist = self.shell.history_manager.get_tail(n)
+ elif "g" in opts: # Search
+ p = "*"+opts['g']+"*"
+ hist = list(self.shell.history_manager.search(p))
+ for l in reversed(hist):
+ if "rerun" not in l[2]:
+ hist = [l] # The last match which isn't a %rerun
+ break
+ else:
+ hist = [] # No matches except %rerun
+ elif args: # Specify history ranges
+ hist = self.shell.history_manager.get_range_by_str(args)
+ else: # Last line
+ hist = self.shell.history_manager.get_tail(1)
+ hist = [x[2] for x in hist]
+ if not hist:
+ print("No lines in history match specification")
+ return
+ histlines = "\n".join(hist)
+ print("=== Executing: ===")
+ print(histlines)
+ print("=== Output: ===")
+ self.shell.run_cell("\n".join(hist), store_history=False)
diff --git a/contrib/python/ipython/py3/IPython/core/magics/logging.py b/contrib/python/ipython/py3/IPython/core/magics/logging.py
new file mode 100644
index 0000000000..b6b8d8a5af
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/magics/logging.py
@@ -0,0 +1,195 @@
+"""Implementation of magic functions for IPython's own logging.
+"""
+#-----------------------------------------------------------------------------
+# Copyright (c) 2012 The IPython Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+# Stdlib
+import os
+import sys
+
+# Our own packages
+from IPython.core.magic import Magics, magics_class, line_magic
+from warnings import warn
+from traitlets import Bool
+
+#-----------------------------------------------------------------------------
+# Magic implementation classes
+#-----------------------------------------------------------------------------
+
+@magics_class
+class LoggingMagics(Magics):
+ """Magics related to all logging machinery."""
+
+ quiet = Bool(False, help=
+ """
+ Suppress output of log state when logging is enabled
+ """
+ ).tag(config=True)
+
+ @line_magic
+ def logstart(self, parameter_s=''):
+ """Start logging anywhere in a session.
+
+ %logstart [-o|-r|-t|-q] [log_name [log_mode]]
+
+ If no name is given, it defaults to a file named 'ipython_log.py' in your
+ current directory, in 'rotate' mode (see below).
+
+ '%logstart name' saves to file 'name' in 'backup' mode. It saves your
+ history up to that point and then continues logging.
+
+ %logstart takes a second optional parameter: logging mode. This can be one
+ of (note that the modes are given unquoted):
+
+ append
+ Keep logging at the end of any existing file.
+
+ backup
+ Rename any existing file to name~ and start name.
+
+ global
+ Append to a single logfile in your home directory.
+
+ over
+ Overwrite any existing log.
+
+ rotate
+ Create rotating logs: name.1~, name.2~, etc.
+
+ Options:
+
+ -o
+ log also IPython's output. In this mode, all commands which
+ generate an Out[NN] prompt are recorded to the logfile, right after
+ their corresponding input line. The output lines are always
+ prepended with a '#[Out]# ' marker, so that the log remains valid
+ Python code.
+
+ Since this marker is always the same, filtering only the output from
+ a log is very easy, using for example a simple awk call::
+
+ awk -F'#\\[Out\\]# ' '{if($2) {print $2}}' ipython_log.py
+
+ -r
+ log 'raw' input. Normally, IPython's logs contain the processed
+ input, so that user lines are logged in their final form, converted
+ into valid Python. For example, %Exit is logged as
+ _ip.magic("Exit"). If the -r flag is given, all input is logged
+ exactly as typed, with no transformations applied.
+
+ -t
+ put timestamps before each input line logged (these are put in
+ comments).
+
+ -q
+ suppress output of logstate message when logging is invoked
+ """
+
+ opts,par = self.parse_options(parameter_s,'ortq')
+ log_output = 'o' in opts
+ log_raw_input = 'r' in opts
+ timestamp = 't' in opts
+ quiet = 'q' in opts
+
+ logger = self.shell.logger
+
+ # if no args are given, the defaults set in the logger constructor by
+ # ipython remain valid
+ if par:
+ try:
+ logfname,logmode = par.split()
+ except:
+ logfname = par
+ logmode = 'backup'
+ else:
+ logfname = logger.logfname
+ logmode = logger.logmode
+ # put logfname into rc struct as if it had been called on the command
+ # line, so it ends up saved in the log header Save it in case we need
+ # to restore it...
+ old_logfile = self.shell.logfile
+ if logfname:
+ logfname = os.path.expanduser(logfname)
+ self.shell.logfile = logfname
+
+ loghead = u'# IPython log file\n\n'
+ try:
+ logger.logstart(logfname, loghead, logmode, log_output, timestamp,
+ log_raw_input)
+ except:
+ self.shell.logfile = old_logfile
+ warn("Couldn't start log: %s" % sys.exc_info()[1])
+ else:
+ # log input history up to this point, optionally interleaving
+ # output if requested
+
+ if timestamp:
+ # disable timestamping for the previous history, since we've
+ # lost those already (no time machine here).
+ logger.timestamp = False
+
+ if log_raw_input:
+ input_hist = self.shell.history_manager.input_hist_raw
+ else:
+ input_hist = self.shell.history_manager.input_hist_parsed
+
+ if log_output:
+ log_write = logger.log_write
+ output_hist = self.shell.history_manager.output_hist
+ for n in range(1,len(input_hist)-1):
+ log_write(input_hist[n].rstrip() + u'\n')
+ if n in output_hist:
+ log_write(repr(output_hist[n]),'output')
+ else:
+ logger.log_write(u'\n'.join(input_hist[1:]))
+ logger.log_write(u'\n')
+ if timestamp:
+ # re-enable timestamping
+ logger.timestamp = True
+
+ if not (self.quiet or quiet):
+ print ('Activating auto-logging. '
+ 'Current session state plus future input saved.')
+ logger.logstate()
+
+ @line_magic
+ def logstop(self, parameter_s=''):
+ """Fully stop logging and close log file.
+
+ In order to start logging again, a new %logstart call needs to be made,
+ possibly (though not necessarily) with a new filename, mode and other
+ options."""
+ self.shell.logger.logstop()
+
+ @line_magic
+ def logoff(self, parameter_s=''):
+ """Temporarily stop logging.
+
+ You must have previously started logging."""
+ self.shell.logger.switch_log(0)
+
+ @line_magic
+ def logon(self, parameter_s=''):
+ """Restart logging.
+
+ This function is for restarting logging which you've temporarily
+ stopped with %logoff. For starting logging for the first time, you
+ must use the %logstart function, which allows you to specify an
+ optional log filename."""
+
+ self.shell.logger.switch_log(1)
+
+ @line_magic
+ def logstate(self, parameter_s=''):
+ """Print the status of the logging system."""
+
+ self.shell.logger.logstate()
diff --git a/contrib/python/ipython/py3/IPython/core/magics/namespace.py b/contrib/python/ipython/py3/IPython/core/magics/namespace.py
new file mode 100644
index 0000000000..5da8f7161a
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/magics/namespace.py
@@ -0,0 +1,711 @@
+"""Implementation of namespace-related magic functions.
+"""
+#-----------------------------------------------------------------------------
+# Copyright (c) 2012 The IPython Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+# Stdlib
+import gc
+import re
+import sys
+
+# Our own packages
+from IPython.core import page
+from IPython.core.error import StdinNotImplementedError, UsageError
+from IPython.core.magic import Magics, magics_class, line_magic
+from IPython.testing.skipdoctest import skip_doctest
+from IPython.utils.encoding import DEFAULT_ENCODING
+from IPython.utils.openpy import read_py_file
+from IPython.utils.path import get_py_filename
+
+#-----------------------------------------------------------------------------
+# Magic implementation classes
+#-----------------------------------------------------------------------------
+
+@magics_class
+class NamespaceMagics(Magics):
+ """Magics to manage various aspects of the user's namespace.
+
+ These include listing variables, introspecting into them, etc.
+ """
+
+ @line_magic
+ def pinfo(self, parameter_s='', namespaces=None):
+ """Provide detailed information about an object.
+
+ '%pinfo object' is just a synonym for object? or ?object."""
+
+ #print 'pinfo par: <%s>' % parameter_s # dbg
+ # detail_level: 0 -> obj? , 1 -> obj??
+ detail_level = 0
+ # We need to detect if we got called as 'pinfo pinfo foo', which can
+ # happen if the user types 'pinfo foo?' at the cmd line.
+ pinfo,qmark1,oname,qmark2 = \
+ re.match(r'(pinfo )?(\?*)(.*?)(\??$)',parameter_s).groups()
+ if pinfo or qmark1 or qmark2:
+ detail_level = 1
+ if "*" in oname:
+ self.psearch(oname)
+ else:
+ self.shell._inspect('pinfo', oname, detail_level=detail_level,
+ namespaces=namespaces)
+
+ @line_magic
+ def pinfo2(self, parameter_s='', namespaces=None):
+ """Provide extra detailed information about an object.
+
+ '%pinfo2 object' is just a synonym for object?? or ??object."""
+ self.shell._inspect('pinfo', parameter_s, detail_level=1,
+ namespaces=namespaces)
+
+ @skip_doctest
+ @line_magic
+ def pdef(self, parameter_s='', namespaces=None):
+ """Print the call signature for any callable object.
+
+ If the object is a class, print the constructor information.
+
+ Examples
+ --------
+ ::
+
+ In [3]: %pdef urllib.urlopen
+ urllib.urlopen(url, data=None, proxies=None)
+ """
+ self.shell._inspect('pdef',parameter_s, namespaces)
+
+ @line_magic
+ def pdoc(self, parameter_s='', namespaces=None):
+ """Print the docstring for an object.
+
+ If the given object is a class, it will print both the class and the
+ constructor docstrings."""
+ self.shell._inspect('pdoc',parameter_s, namespaces)
+
+ @line_magic
+ def psource(self, parameter_s='', namespaces=None):
+ """Print (or run through pager) the source code for an object."""
+ if not parameter_s:
+ raise UsageError('Missing object name.')
+ self.shell._inspect('psource',parameter_s, namespaces)
+
+ @line_magic
+ def pfile(self, parameter_s='', namespaces=None):
+ """Print (or run through pager) the file where an object is defined.
+
+ The file opens at the line where the object definition begins. IPython
+ will honor the environment variable PAGER if set, and otherwise will
+ do its best to print the file in a convenient form.
+
+ If the given argument is not an object currently defined, IPython will
+ try to interpret it as a filename (automatically adding a .py extension
+ if needed). You can thus use %pfile as a syntax highlighting code
+ viewer."""
+
+ # first interpret argument as an object name
+ out = self.shell._inspect('pfile',parameter_s, namespaces)
+ # if not, try the input as a filename
+ if out == 'not found':
+ try:
+ filename = get_py_filename(parameter_s)
+ except IOError as msg:
+ print(msg)
+ return
+ page.page(self.shell.pycolorize(read_py_file(filename, skip_encoding_cookie=False)))
+
+ @line_magic
+ def psearch(self, parameter_s=''):
+ """Search for object in namespaces by wildcard.
+
+ %psearch [options] PATTERN [OBJECT TYPE]
+
+ Note: ? can be used as a synonym for %psearch, at the beginning or at
+ the end: both a*? and ?a* are equivalent to '%psearch a*'. Still, the
+ rest of the command line must be unchanged (options come first), so
+ for example the following forms are equivalent
+
+ %psearch -i a* function
+ -i a* function?
+ ?-i a* function
+
+ Arguments:
+
+ PATTERN
+
+ where PATTERN is a string containing * as a wildcard similar to its
+ use in a shell. The pattern is matched in all namespaces on the
+ search path. By default objects starting with a single _ are not
+ matched, many IPython generated objects have a single
+ underscore. The default is case insensitive matching. Matching is
+ also done on the attributes of objects and not only on the objects
+ in a module.
+
+ [OBJECT TYPE]
+
+ Is the name of a python type from the types module. The name is
+ given in lowercase without the ending type, ex. StringType is
+ written string. By adding a type here only objects matching the
+ given type are matched. Using all here makes the pattern match all
+ types (this is the default).
+
+ Options:
+
+ -a: makes the pattern match even objects whose names start with a
+ single underscore. These names are normally omitted from the
+ search.
+
+ -i/-c: make the pattern case insensitive/sensitive. If neither of
+ these options are given, the default is read from your configuration
+ file, with the option ``InteractiveShell.wildcards_case_sensitive``.
+ If this option is not specified in your configuration file, IPython's
+ internal default is to do a case sensitive search.
+
+ -e/-s NAMESPACE: exclude/search a given namespace. The pattern you
+ specify can be searched in any of the following namespaces:
+ 'builtin', 'user', 'user_global','internal', 'alias', where
+ 'builtin' and 'user' are the search defaults. Note that you should
+ not use quotes when specifying namespaces.
+
+ -l: List all available object types for object matching. This function
+ can be used without arguments.
+
+ 'Builtin' contains the python module builtin, 'user' contains all
+ user data, 'alias' only contain the shell aliases and no python
+ objects, 'internal' contains objects used by IPython. The
+ 'user_global' namespace is only used by embedded IPython instances,
+ and it contains module-level globals. You can add namespaces to the
+ search with -s or exclude them with -e (these options can be given
+ more than once).
+
+ Examples
+ --------
+ ::
+
+ %psearch a* -> objects beginning with an a
+ %psearch -e builtin a* -> objects NOT in the builtin space starting in a
+ %psearch a* function -> all functions beginning with an a
+ %psearch re.e* -> objects beginning with an e in module re
+ %psearch r*.e* -> objects that start with e in modules starting in r
+ %psearch r*.* string -> all strings in modules beginning with r
+
+ Case sensitive search::
+
+ %psearch -c a* list all object beginning with lower case a
+
+ Show objects beginning with a single _::
+
+ %psearch -a _* list objects beginning with a single underscore
+
+ List available objects::
+
+ %psearch -l list all available object types
+ """
+ # default namespaces to be searched
+ def_search = ['user_local', 'user_global', 'builtin']
+
+ # Process options/args
+ opts,args = self.parse_options(parameter_s,'cias:e:l',list_all=True)
+ opt = opts.get
+ shell = self.shell
+ psearch = shell.inspector.psearch
+
+ # select list object types
+ list_types = False
+ if 'l' in opts:
+ list_types = True
+
+ # select case options
+ if 'i' in opts:
+ ignore_case = True
+ elif 'c' in opts:
+ ignore_case = False
+ else:
+ ignore_case = not shell.wildcards_case_sensitive
+
+ # Build list of namespaces to search from user options
+ def_search.extend(opt('s',[]))
+ ns_exclude = ns_exclude=opt('e',[])
+ ns_search = [nm for nm in def_search if nm not in ns_exclude]
+
+ # Call the actual search
+ try:
+ psearch(args,shell.ns_table,ns_search,
+ show_all=opt('a'),ignore_case=ignore_case, list_types=list_types)
+ except:
+ shell.showtraceback()
+
+ @skip_doctest
+ @line_magic
+ def who_ls(self, parameter_s=''):
+ """Return a sorted list of all interactive variables.
+
+ If arguments are given, only variables of types matching these
+ arguments are returned.
+
+ Examples
+ --------
+ Define two variables and list them with who_ls::
+
+ In [1]: alpha = 123
+
+ In [2]: beta = 'test'
+
+ In [3]: %who_ls
+ Out[3]: ['alpha', 'beta']
+
+ In [4]: %who_ls int
+ Out[4]: ['alpha']
+
+ In [5]: %who_ls str
+ Out[5]: ['beta']
+ """
+
+ user_ns = self.shell.user_ns
+ user_ns_hidden = self.shell.user_ns_hidden
+ nonmatching = object() # This can never be in user_ns
+ out = [ i for i in user_ns
+ if not i.startswith('_') \
+ and (user_ns[i] is not user_ns_hidden.get(i, nonmatching)) ]
+
+ typelist = parameter_s.split()
+ if typelist:
+ typeset = set(typelist)
+ out = [i for i in out if type(user_ns[i]).__name__ in typeset]
+
+ out.sort()
+ return out
+
+ @skip_doctest
+ @line_magic
+ def who(self, parameter_s=''):
+ """Print all interactive variables, with some minimal formatting.
+
+ If any arguments are given, only variables whose type matches one of
+ these are printed. For example::
+
+ %who function str
+
+ will only list functions and strings, excluding all other types of
+ variables. To find the proper type names, simply use type(var) at a
+ command line to see how python prints type names. For example:
+
+ ::
+
+ In [1]: type('hello')\\
+ Out[1]: <type 'str'>
+
+ indicates that the type name for strings is 'str'.
+
+ ``%who`` always excludes executed names loaded through your configuration
+ file and things which are internal to IPython.
+
+ This is deliberate, as typically you may load many modules and the
+ purpose of %who is to show you only what you've manually defined.
+
+ Examples
+ --------
+
+ Define two variables and list them with who::
+
+ In [1]: alpha = 123
+
+ In [2]: beta = 'test'
+
+ In [3]: %who
+ alpha beta
+
+ In [4]: %who int
+ alpha
+
+ In [5]: %who str
+ beta
+ """
+
+ varlist = self.who_ls(parameter_s)
+ if not varlist:
+ if parameter_s:
+ print('No variables match your requested type.')
+ else:
+ print('Interactive namespace is empty.')
+ return
+
+ # if we have variables, move on...
+ count = 0
+ for i in varlist:
+ print(i+'\t', end=' ')
+ count += 1
+ if count > 8:
+ count = 0
+ print()
+ print()
+
+ @skip_doctest
+ @line_magic
+ def whos(self, parameter_s=''):
+ """Like %who, but gives some extra information about each variable.
+
+ The same type filtering of %who can be applied here.
+
+ For all variables, the type is printed. Additionally it prints:
+
+ - For {},[],(): their length.
+
+ - For numpy arrays, a summary with shape, number of
+ elements, typecode and size in memory.
+
+ - Everything else: a string representation, snipping their middle if
+ too long.
+
+ Examples
+ --------
+ Define two variables and list them with whos::
+
+ In [1]: alpha = 123
+
+ In [2]: beta = 'test'
+
+ In [3]: %whos
+ Variable Type Data/Info
+ --------------------------------
+ alpha int 123
+ beta str test
+ """
+
+ varnames = self.who_ls(parameter_s)
+ if not varnames:
+ if parameter_s:
+ print('No variables match your requested type.')
+ else:
+ print('Interactive namespace is empty.')
+ return
+
+ # if we have variables, move on...
+
+ # for these types, show len() instead of data:
+ seq_types = ['dict', 'list', 'tuple']
+
+ # for numpy arrays, display summary info
+ ndarray_type = None
+ if 'numpy' in sys.modules:
+ try:
+ from numpy import ndarray
+ except ImportError:
+ pass
+ else:
+ ndarray_type = ndarray.__name__
+
+ # Find all variable names and types so we can figure out column sizes
+
+ # some types are well known and can be shorter
+ abbrevs = {'IPython.core.macro.Macro' : 'Macro'}
+ def type_name(v):
+ tn = type(v).__name__
+ return abbrevs.get(tn,tn)
+
+ varlist = [self.shell.user_ns[n] for n in varnames]
+
+ typelist = []
+ for vv in varlist:
+ tt = type_name(vv)
+
+ if tt=='instance':
+ typelist.append( abbrevs.get(str(vv.__class__),
+ str(vv.__class__)))
+ else:
+ typelist.append(tt)
+
+ # column labels and # of spaces as separator
+ varlabel = 'Variable'
+ typelabel = 'Type'
+ datalabel = 'Data/Info'
+ colsep = 3
+ # variable format strings
+ vformat = "{0:<{varwidth}}{1:<{typewidth}}"
+ aformat = "%s: %s elems, type `%s`, %s bytes"
+ # find the size of the columns to format the output nicely
+ varwidth = max(max(map(len,varnames)), len(varlabel)) + colsep
+ typewidth = max(max(map(len,typelist)), len(typelabel)) + colsep
+ # table header
+ print(varlabel.ljust(varwidth) + typelabel.ljust(typewidth) + \
+ ' '+datalabel+'\n' + '-'*(varwidth+typewidth+len(datalabel)+1))
+ # and the table itself
+ kb = 1024
+ Mb = 1048576 # kb**2
+ for vname,var,vtype in zip(varnames,varlist,typelist):
+ print(vformat.format(vname, vtype, varwidth=varwidth, typewidth=typewidth), end=' ')
+ if vtype in seq_types:
+ print("n="+str(len(var)))
+ elif vtype == ndarray_type:
+ vshape = str(var.shape).replace(',','').replace(' ','x')[1:-1]
+ if vtype==ndarray_type:
+ # numpy
+ vsize = var.size
+ vbytes = vsize*var.itemsize
+ vdtype = var.dtype
+
+ if vbytes < 100000:
+ print(aformat % (vshape, vsize, vdtype, vbytes))
+ else:
+ print(aformat % (vshape, vsize, vdtype, vbytes), end=' ')
+ if vbytes < Mb:
+ print('(%s kb)' % (vbytes/kb,))
+ else:
+ print('(%s Mb)' % (vbytes/Mb,))
+ else:
+ try:
+ vstr = str(var)
+ except UnicodeEncodeError:
+ vstr = var.encode(DEFAULT_ENCODING,
+ 'backslashreplace')
+ except:
+ vstr = "<object with id %d (str() failed)>" % id(var)
+ vstr = vstr.replace('\n', '\\n')
+ if len(vstr) < 50:
+ print(vstr)
+ else:
+ print(vstr[:25] + "<...>" + vstr[-25:])
+
+ @line_magic
+ def reset(self, parameter_s=''):
+ """Resets the namespace by removing all names defined by the user, if
+ called without arguments, or by removing some types of objects, such
+ as everything currently in IPython's In[] and Out[] containers (see
+ the parameters for details).
+
+ Parameters
+ ----------
+ -f
+ force reset without asking for confirmation.
+ -s
+ 'Soft' reset: Only clears your namespace, leaving history intact.
+ References to objects may be kept. By default (without this option),
+ we do a 'hard' reset, giving you a new session and removing all
+ references to objects from the current session.
+ --aggressive
+ Try to aggressively remove modules from sys.modules ; this
+ may allow you to reimport Python modules that have been updated and
+ pick up changes, but can have unintended consequences.
+
+ in
+ reset input history
+ out
+ reset output history
+ dhist
+ reset directory history
+ array
+ reset only variables that are NumPy arrays
+
+ See Also
+ --------
+ reset_selective : invoked as ``%reset_selective``
+
+ Examples
+ --------
+ ::
+
+ In [6]: a = 1
+
+ In [7]: a
+ Out[7]: 1
+
+ In [8]: 'a' in get_ipython().user_ns
+ Out[8]: True
+
+ In [9]: %reset -f
+
+ In [1]: 'a' in get_ipython().user_ns
+ Out[1]: False
+
+ In [2]: %reset -f in
+ Flushing input history
+
+ In [3]: %reset -f dhist in
+ Flushing directory history
+ Flushing input history
+
+ Notes
+ -----
+ Calling this magic from clients that do not implement standard input,
+ such as the ipython notebook interface, will reset the namespace
+ without confirmation.
+ """
+ opts, args = self.parse_options(parameter_s, "sf", "aggressive", mode="list")
+ if "f" in opts:
+ ans = True
+ else:
+ try:
+ ans = self.shell.ask_yes_no(
+ "Once deleted, variables cannot be recovered. Proceed (y/[n])?",
+ default='n')
+ except StdinNotImplementedError:
+ ans = True
+ if not ans:
+ print('Nothing done.')
+ return
+
+ if 's' in opts: # Soft reset
+ user_ns = self.shell.user_ns
+ for i in self.who_ls():
+ del(user_ns[i])
+ elif len(args) == 0: # Hard reset
+ self.shell.reset(new_session=False, aggressive=("aggressive" in opts))
+
+ # reset in/out/dhist/array: previously extensinions/clearcmd.py
+ ip = self.shell
+ user_ns = self.shell.user_ns # local lookup, heavily used
+
+ for target in args:
+ target = target.lower() # make matches case insensitive
+ if target == 'out':
+ print("Flushing output cache (%d entries)" % len(user_ns['_oh']))
+ self.shell.displayhook.flush()
+
+ elif target == 'in':
+ print("Flushing input history")
+ pc = self.shell.displayhook.prompt_count + 1
+ for n in range(1, pc):
+ key = '_i'+repr(n)
+ user_ns.pop(key,None)
+ user_ns.update(dict(_i=u'',_ii=u'',_iii=u''))
+ hm = ip.history_manager
+ # don't delete these, as %save and %macro depending on the
+ # length of these lists to be preserved
+ hm.input_hist_parsed[:] = [''] * pc
+ hm.input_hist_raw[:] = [''] * pc
+ # hm has internal machinery for _i,_ii,_iii, clear it out
+ hm._i = hm._ii = hm._iii = hm._i00 = u''
+
+ elif target == 'array':
+ # Support cleaning up numpy arrays
+ try:
+ from numpy import ndarray
+ # This must be done with items and not iteritems because
+ # we're going to modify the dict in-place.
+ for x,val in list(user_ns.items()):
+ if isinstance(val,ndarray):
+ del user_ns[x]
+ except ImportError:
+ print("reset array only works if Numpy is available.")
+
+ elif target == 'dhist':
+ print("Flushing directory history")
+ del user_ns['_dh'][:]
+
+ else:
+ print("Don't know how to reset ", end=' ')
+ print(target + ", please run `%reset?` for details")
+
+ gc.collect()
+
+ @line_magic
+ def reset_selective(self, parameter_s=''):
+ """Resets the namespace by removing names defined by the user.
+
+ Input/Output history are left around in case you need them.
+
+ %reset_selective [-f] regex
+
+ No action is taken if regex is not included
+
+ Options
+ -f : force reset without asking for confirmation.
+
+ See Also
+ --------
+ reset : invoked as ``%reset``
+
+ Examples
+ --------
+ We first fully reset the namespace so your output looks identical to
+ this example for pedagogical reasons; in practice you do not need a
+ full reset::
+
+ In [1]: %reset -f
+
+ Now, with a clean namespace we can make a few variables and use
+ ``%reset_selective`` to only delete names that match our regexp::
+
+ In [2]: a=1; b=2; c=3; b1m=4; b2m=5; b3m=6; b4m=7; b2s=8
+
+ In [3]: who_ls
+ Out[3]: ['a', 'b', 'b1m', 'b2m', 'b2s', 'b3m', 'b4m', 'c']
+
+ In [4]: %reset_selective -f b[2-3]m
+
+ In [5]: who_ls
+ Out[5]: ['a', 'b', 'b1m', 'b2s', 'b4m', 'c']
+
+ In [6]: %reset_selective -f d
+
+ In [7]: who_ls
+ Out[7]: ['a', 'b', 'b1m', 'b2s', 'b4m', 'c']
+
+ In [8]: %reset_selective -f c
+
+ In [9]: who_ls
+ Out[9]: ['a', 'b', 'b1m', 'b2s', 'b4m']
+
+ In [10]: %reset_selective -f b
+
+ In [11]: who_ls
+ Out[11]: ['a']
+
+ Notes
+ -----
+ Calling this magic from clients that do not implement standard input,
+ such as the ipython notebook interface, will reset the namespace
+ without confirmation.
+ """
+
+ opts, regex = self.parse_options(parameter_s,'f')
+
+ if 'f' in opts:
+ ans = True
+ else:
+ try:
+ ans = self.shell.ask_yes_no(
+ "Once deleted, variables cannot be recovered. Proceed (y/[n])? ",
+ default='n')
+ except StdinNotImplementedError:
+ ans = True
+ if not ans:
+ print('Nothing done.')
+ return
+ user_ns = self.shell.user_ns
+ if not regex:
+ print('No regex pattern specified. Nothing done.')
+ return
+ else:
+ try:
+ m = re.compile(regex)
+ except TypeError as e:
+ raise TypeError('regex must be a string or compiled pattern') from e
+ for i in self.who_ls():
+ if m.search(i):
+ del(user_ns[i])
+
+ @line_magic
+ def xdel(self, parameter_s=''):
+ """Delete a variable, trying to clear it from anywhere that
+ IPython's machinery has references to it. By default, this uses
+ the identity of the named object in the user namespace to remove
+ references held under other names. The object is also removed
+ from the output history.
+
+ Options
+ -n : Delete the specified name from all namespaces, without
+ checking their identity.
+ """
+ opts, varname = self.parse_options(parameter_s,'n')
+ try:
+ self.shell.del_var(varname, ('n' in opts))
+ except (NameError, ValueError) as e:
+ print(type(e).__name__ +": "+ str(e))
diff --git a/contrib/python/ipython/py3/IPython/core/magics/osm.py b/contrib/python/ipython/py3/IPython/core/magics/osm.py
new file mode 100644
index 0000000000..f64f1bce6a
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/magics/osm.py
@@ -0,0 +1,855 @@
+"""Implementation of magic functions for interaction with the OS.
+
+Note: this module is named 'osm' instead of 'os' to avoid a collision with the
+builtin.
+"""
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+import io
+import os
+import pathlib
+import re
+import sys
+from pprint import pformat
+
+from IPython.core import magic_arguments
+from IPython.core import oinspect
+from IPython.core import page
+from IPython.core.alias import AliasError, Alias
+from IPython.core.error import UsageError
+from IPython.core.magic import (
+ Magics, compress_dhist, magics_class, line_magic, cell_magic, line_cell_magic
+)
+from IPython.testing.skipdoctest import skip_doctest
+from IPython.utils.openpy import source_to_unicode
+from IPython.utils.process import abbrev_cwd
+from IPython.utils.terminal import set_term_title
+from traitlets import Bool
+from warnings import warn
+
+
+@magics_class
+class OSMagics(Magics):
+ """Magics to interact with the underlying OS (shell-type functionality).
+ """
+
+ cd_force_quiet = Bool(False,
+ help="Force %cd magic to be quiet even if -q is not passed."
+ ).tag(config=True)
+
+ def __init__(self, shell=None, **kwargs):
+
+ # Now define isexec in a cross platform manner.
+ self.is_posix = False
+ self.execre = None
+ if os.name == 'posix':
+ self.is_posix = True
+ else:
+ try:
+ winext = os.environ['pathext'].replace(';','|').replace('.','')
+ except KeyError:
+ winext = 'exe|com|bat|py'
+ try:
+ self.execre = re.compile(r'(.*)\.(%s)$' % winext,re.IGNORECASE)
+ except re.error:
+ warn("Seems like your pathext environmental "
+ "variable is malformed. Please check it to "
+ "enable a proper handle of file extensions "
+ "managed for your system")
+ winext = 'exe|com|bat|py'
+ self.execre = re.compile(r'(.*)\.(%s)$' % winext,re.IGNORECASE)
+
+ # call up the chain
+ super().__init__(shell=shell, **kwargs)
+
+
+ def _isexec_POSIX(self, file):
+ """
+ Test for executable on a POSIX system
+ """
+ if os.access(file.path, os.X_OK):
+ # will fail on maxOS if access is not X_OK
+ return file.is_file()
+ return False
+
+
+
+ def _isexec_WIN(self, file):
+ """
+ Test for executable file on non POSIX system
+ """
+ return file.is_file() and self.execre.match(file.name) is not None
+
+ def isexec(self, file):
+ """
+ Test for executable file on non POSIX system
+ """
+ if self.is_posix:
+ return self._isexec_POSIX(file)
+ else:
+ return self._isexec_WIN(file)
+
+
+ @skip_doctest
+ @line_magic
+ def alias(self, parameter_s=''):
+ """Define an alias for a system command.
+
+ '%alias alias_name cmd' defines 'alias_name' as an alias for 'cmd'
+
+ Then, typing 'alias_name params' will execute the system command 'cmd
+ params' (from your underlying operating system).
+
+ Aliases have lower precedence than magic functions and Python normal
+ variables, so if 'foo' is both a Python variable and an alias, the
+ alias can not be executed until 'del foo' removes the Python variable.
+
+ You can use the %l specifier in an alias definition to represent the
+ whole line when the alias is called. For example::
+
+ In [2]: alias bracket echo "Input in brackets: <%l>"
+ In [3]: bracket hello world
+ Input in brackets: <hello world>
+
+ You can also define aliases with parameters using %s specifiers (one
+ per parameter)::
+
+ In [1]: alias parts echo first %s second %s
+ In [2]: %parts A B
+ first A second B
+ In [3]: %parts A
+ Incorrect number of arguments: 2 expected.
+ parts is an alias to: 'echo first %s second %s'
+
+ Note that %l and %s are mutually exclusive. You can only use one or
+ the other in your aliases.
+
+ Aliases expand Python variables just like system calls using ! or !!
+ do: all expressions prefixed with '$' get expanded. For details of
+ the semantic rules, see PEP-215:
+ https://peps.python.org/pep-0215/. This is the library used by
+ IPython for variable expansion. If you want to access a true shell
+ variable, an extra $ is necessary to prevent its expansion by
+ IPython::
+
+ In [6]: alias show echo
+ In [7]: PATH='A Python string'
+ In [8]: show $PATH
+ A Python string
+ In [9]: show $$PATH
+ /usr/local/lf9560/bin:/usr/local/intel/compiler70/ia32/bin:...
+
+ You can use the alias facility to access all of $PATH. See the %rehashx
+ function, which automatically creates aliases for the contents of your
+ $PATH.
+
+ If called with no parameters, %alias prints the current alias table
+ for your system. For posix systems, the default aliases are 'cat',
+ 'cp', 'mv', 'rm', 'rmdir', and 'mkdir', and other platform-specific
+ aliases are added. For windows-based systems, the default aliases are
+ 'copy', 'ddir', 'echo', 'ls', 'ldir', 'mkdir', 'ren', and 'rmdir'.
+
+ You can see the definition of alias by adding a question mark in the
+ end::
+
+ In [1]: cat?
+ Repr: <alias cat for 'cat'>"""
+
+ par = parameter_s.strip()
+ if not par:
+ aliases = sorted(self.shell.alias_manager.aliases)
+ # stored = self.shell.db.get('stored_aliases', {} )
+ # for k, v in stored:
+ # atab.append(k, v[0])
+
+ print("Total number of aliases:", len(aliases))
+ sys.stdout.flush()
+ return aliases
+
+ # Now try to define a new one
+ try:
+ alias,cmd = par.split(None, 1)
+ except TypeError:
+ print(oinspect.getdoc(self.alias))
+ return
+
+ try:
+ self.shell.alias_manager.define_alias(alias, cmd)
+ except AliasError as e:
+ print(e)
+ # end magic_alias
+
+ @line_magic
+ def unalias(self, parameter_s=''):
+ """Remove an alias"""
+
+ aname = parameter_s.strip()
+ try:
+ self.shell.alias_manager.undefine_alias(aname)
+ except ValueError as e:
+ print(e)
+ return
+
+ stored = self.shell.db.get('stored_aliases', {} )
+ if aname in stored:
+ print("Removing %stored alias",aname)
+ del stored[aname]
+ self.shell.db['stored_aliases'] = stored
+
+ @line_magic
+ def rehashx(self, parameter_s=''):
+ """Update the alias table with all executable files in $PATH.
+
+ rehashx explicitly checks that every entry in $PATH is a file
+ with execute access (os.X_OK).
+
+ Under Windows, it checks executability as a match against a
+ '|'-separated string of extensions, stored in the IPython config
+ variable win_exec_ext. This defaults to 'exe|com|bat'.
+
+ This function also resets the root module cache of module completer,
+ used on slow filesystems.
+ """
+ from IPython.core.alias import InvalidAliasError
+
+ # for the benefit of module completer in ipy_completers.py
+ del self.shell.db['rootmodules_cache']
+
+ path = [os.path.abspath(os.path.expanduser(p)) for p in
+ os.environ.get('PATH','').split(os.pathsep)]
+
+ syscmdlist = []
+ savedir = os.getcwd()
+
+ # Now walk the paths looking for executables to alias.
+ try:
+ # write the whole loop for posix/Windows so we don't have an if in
+ # the innermost part
+ if self.is_posix:
+ for pdir in path:
+ try:
+ os.chdir(pdir)
+ except OSError:
+ continue
+
+ # for python 3.6+ rewrite to: with os.scandir(pdir) as dirlist:
+ dirlist = os.scandir(path=pdir)
+ for ff in dirlist:
+ if self.isexec(ff):
+ fname = ff.name
+ try:
+ # Removes dots from the name since ipython
+ # will assume names with dots to be python.
+ if not self.shell.alias_manager.is_alias(fname):
+ self.shell.alias_manager.define_alias(
+ fname.replace('.',''), fname)
+ except InvalidAliasError:
+ pass
+ else:
+ syscmdlist.append(fname)
+ else:
+ no_alias = Alias.blacklist
+ for pdir in path:
+ try:
+ os.chdir(pdir)
+ except OSError:
+ continue
+
+ # for python 3.6+ rewrite to: with os.scandir(pdir) as dirlist:
+ dirlist = os.scandir(pdir)
+ for ff in dirlist:
+ fname = ff.name
+ base, ext = os.path.splitext(fname)
+ if self.isexec(ff) and base.lower() not in no_alias:
+ if ext.lower() == '.exe':
+ fname = base
+ try:
+ # Removes dots from the name since ipython
+ # will assume names with dots to be python.
+ self.shell.alias_manager.define_alias(
+ base.lower().replace('.',''), fname)
+ except InvalidAliasError:
+ pass
+ syscmdlist.append(fname)
+
+ self.shell.db['syscmdlist'] = syscmdlist
+ finally:
+ os.chdir(savedir)
+
+ @skip_doctest
+ @line_magic
+ def pwd(self, parameter_s=''):
+ """Return the current working directory path.
+
+ Examples
+ --------
+ ::
+
+ In [9]: pwd
+ Out[9]: '/home/tsuser/sprint/ipython'
+ """
+ try:
+ return os.getcwd()
+ except FileNotFoundError as e:
+ raise UsageError("CWD no longer exists - please use %cd to change directory.") from e
+
+ @skip_doctest
+ @line_magic
+ def cd(self, parameter_s=''):
+ """Change the current working directory.
+
+ This command automatically maintains an internal list of directories
+ you visit during your IPython session, in the variable ``_dh``. The
+ command :magic:`%dhist` shows this history nicely formatted. You can
+ also do ``cd -<tab>`` to see directory history conveniently.
+ Usage:
+
+ - ``cd 'dir'``: changes to directory 'dir'.
+ - ``cd -``: changes to the last visited directory.
+ - ``cd -<n>``: changes to the n-th directory in the directory history.
+ - ``cd --foo``: change to directory that matches 'foo' in history
+ - ``cd -b <bookmark_name>``: jump to a bookmark set by %bookmark
+ - Hitting a tab key after ``cd -b`` allows you to tab-complete
+ bookmark names.
+
+ .. note::
+ ``cd <bookmark_name>`` is enough if there is no directory
+ ``<bookmark_name>``, but a bookmark with the name exists.
+
+ Options:
+
+ -q Be quiet. Do not print the working directory after the
+ cd command is executed. By default IPython's cd
+ command does print this directory, since the default
+ prompts do not display path information.
+
+ .. note::
+ Note that ``!cd`` doesn't work for this purpose because the shell
+ where ``!command`` runs is immediately discarded after executing
+ 'command'.
+
+ Examples
+ --------
+ ::
+
+ In [10]: cd parent/child
+ /home/tsuser/parent/child
+ """
+
+ try:
+ oldcwd = os.getcwd()
+ except FileNotFoundError:
+ # Happens if the CWD has been deleted.
+ oldcwd = None
+
+ numcd = re.match(r'(-)(\d+)$',parameter_s)
+ # jump in directory history by number
+ if numcd:
+ nn = int(numcd.group(2))
+ try:
+ ps = self.shell.user_ns['_dh'][nn]
+ except IndexError:
+ print('The requested directory does not exist in history.')
+ return
+ else:
+ opts = {}
+ elif parameter_s.startswith('--'):
+ ps = None
+ fallback = None
+ pat = parameter_s[2:]
+ dh = self.shell.user_ns['_dh']
+ # first search only by basename (last component)
+ for ent in reversed(dh):
+ if pat in os.path.basename(ent) and os.path.isdir(ent):
+ ps = ent
+ break
+
+ if fallback is None and pat in ent and os.path.isdir(ent):
+ fallback = ent
+
+ # if we have no last part match, pick the first full path match
+ if ps is None:
+ ps = fallback
+
+ if ps is None:
+ print("No matching entry in directory history")
+ return
+ else:
+ opts = {}
+
+
+ else:
+ opts, ps = self.parse_options(parameter_s, 'qb', mode='string')
+ # jump to previous
+ if ps == '-':
+ try:
+ ps = self.shell.user_ns['_dh'][-2]
+ except IndexError as e:
+ raise UsageError('%cd -: No previous directory to change to.') from e
+ # jump to bookmark if needed
+ else:
+ if not os.path.isdir(ps) or 'b' in opts:
+ bkms = self.shell.db.get('bookmarks', {})
+
+ if ps in bkms:
+ target = bkms[ps]
+ print('(bookmark:%s) -> %s' % (ps, target))
+ ps = target
+ else:
+ if 'b' in opts:
+ raise UsageError("Bookmark '%s' not found. "
+ "Use '%%bookmark -l' to see your bookmarks." % ps)
+
+ # at this point ps should point to the target dir
+ if ps:
+ try:
+ os.chdir(os.path.expanduser(ps))
+ if hasattr(self.shell, 'term_title') and self.shell.term_title:
+ set_term_title(self.shell.term_title_format.format(cwd=abbrev_cwd()))
+ except OSError:
+ print(sys.exc_info()[1])
+ else:
+ cwd = pathlib.Path.cwd()
+ dhist = self.shell.user_ns['_dh']
+ if oldcwd != cwd:
+ dhist.append(cwd)
+ self.shell.db['dhist'] = compress_dhist(dhist)[-100:]
+
+ else:
+ os.chdir(self.shell.home_dir)
+ if hasattr(self.shell, 'term_title') and self.shell.term_title:
+ set_term_title(self.shell.term_title_format.format(cwd="~"))
+ cwd = pathlib.Path.cwd()
+ dhist = self.shell.user_ns['_dh']
+
+ if oldcwd != cwd:
+ dhist.append(cwd)
+ self.shell.db['dhist'] = compress_dhist(dhist)[-100:]
+ if not 'q' in opts and not self.cd_force_quiet and self.shell.user_ns['_dh']:
+ print(self.shell.user_ns['_dh'][-1])
+
+ @line_magic
+ def env(self, parameter_s=''):
+ """Get, set, or list environment variables.
+
+ Usage:\\
+
+ :``%env``: lists all environment variables/values
+ :``%env var``: get value for var
+ :``%env var val``: set value for var
+ :``%env var=val``: set value for var
+ :``%env var=$val``: set value for var, using python expansion if possible
+ """
+ if parameter_s.strip():
+ split = '=' if '=' in parameter_s else ' '
+ bits = parameter_s.split(split)
+ if len(bits) == 1:
+ key = parameter_s.strip()
+ if key in os.environ:
+ return os.environ[key]
+ else:
+ err = "Environment does not have key: {0}".format(key)
+ raise UsageError(err)
+ if len(bits) > 1:
+ return self.set_env(parameter_s)
+ env = dict(os.environ)
+ # hide likely secrets when printing the whole environment
+ for key in list(env):
+ if any(s in key.lower() for s in ('key', 'token', 'secret')):
+ env[key] = '<hidden>'
+
+ return env
+
+ @line_magic
+ def set_env(self, parameter_s):
+ """Set environment variables. Assumptions are that either "val" is a
+ name in the user namespace, or val is something that evaluates to a
+ string.
+
+ Usage:\\
+ :``%set_env var val``: set value for var
+ :``%set_env var=val``: set value for var
+ :``%set_env var=$val``: set value for var, using python expansion if possible
+ """
+ split = '=' if '=' in parameter_s else ' '
+ bits = parameter_s.split(split, 1)
+ if not parameter_s.strip() or len(bits)<2:
+ raise UsageError("usage is 'set_env var=val'")
+ var = bits[0].strip()
+ val = bits[1].strip()
+ if re.match(r'.*\s.*', var):
+ # an environment variable with whitespace is almost certainly
+ # not what the user intended. what's more likely is the wrong
+ # split was chosen, ie for "set_env cmd_args A=B", we chose
+ # '=' for the split and should have chosen ' '. to get around
+ # this, users should just assign directly to os.environ or use
+ # standard magic {var} expansion.
+ err = "refusing to set env var with whitespace: '{0}'"
+ err = err.format(val)
+ raise UsageError(err)
+ os.environ[var] = val
+ print('env: {0}={1}'.format(var,val))
+
+ @line_magic
+ def pushd(self, parameter_s=''):
+ """Place the current dir on stack and change directory.
+
+ Usage:\\
+ %pushd ['dirname']
+ """
+
+ dir_s = self.shell.dir_stack
+ tgt = os.path.expanduser(parameter_s)
+ cwd = os.getcwd().replace(self.shell.home_dir,'~')
+ if tgt:
+ self.cd(parameter_s)
+ dir_s.insert(0,cwd)
+ return self.shell.run_line_magic('dirs', '')
+
+ @line_magic
+ def popd(self, parameter_s=''):
+ """Change to directory popped off the top of the stack.
+ """
+ if not self.shell.dir_stack:
+ raise UsageError("%popd on empty stack")
+ top = self.shell.dir_stack.pop(0)
+ self.cd(top)
+ print("popd ->",top)
+
+ @line_magic
+ def dirs(self, parameter_s=''):
+ """Return the current directory stack."""
+
+ return self.shell.dir_stack
+
+ @line_magic
+ def dhist(self, parameter_s=''):
+ """Print your history of visited directories.
+
+ %dhist -> print full history\\
+ %dhist n -> print last n entries only\\
+ %dhist n1 n2 -> print entries between n1 and n2 (n2 not included)\\
+
+ This history is automatically maintained by the %cd command, and
+ always available as the global list variable _dh. You can use %cd -<n>
+ to go to directory number <n>.
+
+ Note that most of time, you should view directory history by entering
+ cd -<TAB>.
+
+ """
+
+ dh = self.shell.user_ns['_dh']
+ if parameter_s:
+ try:
+ args = map(int,parameter_s.split())
+ except:
+ self.arg_err(self.dhist)
+ return
+ if len(args) == 1:
+ ini,fin = max(len(dh)-(args[0]),0),len(dh)
+ elif len(args) == 2:
+ ini,fin = args
+ fin = min(fin, len(dh))
+ else:
+ self.arg_err(self.dhist)
+ return
+ else:
+ ini,fin = 0,len(dh)
+ print('Directory history (kept in _dh)')
+ for i in range(ini, fin):
+ print("%d: %s" % (i, dh[i]))
+
+ @skip_doctest
+ @line_magic
+ def sc(self, parameter_s=''):
+ """Shell capture - run shell command and capture output (DEPRECATED use !).
+
+ DEPRECATED. Suboptimal, retained for backwards compatibility.
+
+ You should use the form 'var = !command' instead. Example:
+
+ "%sc -l myfiles = ls ~" should now be written as
+
+ "myfiles = !ls ~"
+
+ myfiles.s, myfiles.l and myfiles.n still apply as documented
+ below.
+
+ --
+ %sc [options] varname=command
+
+ IPython will run the given command using commands.getoutput(), and
+ will then update the user's interactive namespace with a variable
+ called varname, containing the value of the call. Your command can
+ contain shell wildcards, pipes, etc.
+
+ The '=' sign in the syntax is mandatory, and the variable name you
+ supply must follow Python's standard conventions for valid names.
+
+ (A special format without variable name exists for internal use)
+
+ Options:
+
+ -l: list output. Split the output on newlines into a list before
+ assigning it to the given variable. By default the output is stored
+ as a single string.
+
+ -v: verbose. Print the contents of the variable.
+
+ In most cases you should not need to split as a list, because the
+ returned value is a special type of string which can automatically
+ provide its contents either as a list (split on newlines) or as a
+ space-separated string. These are convenient, respectively, either
+ for sequential processing or to be passed to a shell command.
+
+ For example::
+
+ # Capture into variable a
+ In [1]: sc a=ls *py
+
+ # a is a string with embedded newlines
+ In [2]: a
+ Out[2]: 'setup.py\\nwin32_manual_post_install.py'
+
+ # which can be seen as a list:
+ In [3]: a.l
+ Out[3]: ['setup.py', 'win32_manual_post_install.py']
+
+ # or as a whitespace-separated string:
+ In [4]: a.s
+ Out[4]: 'setup.py win32_manual_post_install.py'
+
+ # a.s is useful to pass as a single command line:
+ In [5]: !wc -l $a.s
+ 146 setup.py
+ 130 win32_manual_post_install.py
+ 276 total
+
+ # while the list form is useful to loop over:
+ In [6]: for f in a.l:
+ ...: !wc -l $f
+ ...:
+ 146 setup.py
+ 130 win32_manual_post_install.py
+
+ Similarly, the lists returned by the -l option are also special, in
+ the sense that you can equally invoke the .s attribute on them to
+ automatically get a whitespace-separated string from their contents::
+
+ In [7]: sc -l b=ls *py
+
+ In [8]: b
+ Out[8]: ['setup.py', 'win32_manual_post_install.py']
+
+ In [9]: b.s
+ Out[9]: 'setup.py win32_manual_post_install.py'
+
+ In summary, both the lists and strings used for output capture have
+ the following special attributes::
+
+ .l (or .list) : value as list.
+ .n (or .nlstr): value as newline-separated string.
+ .s (or .spstr): value as space-separated string.
+ """
+
+ opts,args = self.parse_options(parameter_s, 'lv')
+ # Try to get a variable name and command to run
+ try:
+ # the variable name must be obtained from the parse_options
+ # output, which uses shlex.split to strip options out.
+ var,_ = args.split('=', 1)
+ var = var.strip()
+ # But the command has to be extracted from the original input
+ # parameter_s, not on what parse_options returns, to avoid the
+ # quote stripping which shlex.split performs on it.
+ _,cmd = parameter_s.split('=', 1)
+ except ValueError:
+ var,cmd = '',''
+ # If all looks ok, proceed
+ split = 'l' in opts
+ out = self.shell.getoutput(cmd, split=split)
+ if 'v' in opts:
+ print('%s ==\n%s' % (var, pformat(out)))
+ if var:
+ self.shell.user_ns.update({var:out})
+ else:
+ return out
+
+ @line_cell_magic
+ def sx(self, line='', cell=None):
+ """Shell execute - run shell command and capture output (!! is short-hand).
+
+ %sx command
+
+ IPython will run the given command using commands.getoutput(), and
+ return the result formatted as a list (split on '\\n'). Since the
+ output is _returned_, it will be stored in ipython's regular output
+ cache Out[N] and in the '_N' automatic variables.
+
+ Notes:
+
+ 1) If an input line begins with '!!', then %sx is automatically
+ invoked. That is, while::
+
+ !ls
+
+ causes ipython to simply issue system('ls'), typing::
+
+ !!ls
+
+ is a shorthand equivalent to::
+
+ %sx ls
+
+ 2) %sx differs from %sc in that %sx automatically splits into a list,
+ like '%sc -l'. The reason for this is to make it as easy as possible
+ to process line-oriented shell output via further python commands.
+ %sc is meant to provide much finer control, but requires more
+ typing.
+
+ 3) Just like %sc -l, this is a list with special attributes:
+ ::
+
+ .l (or .list) : value as list.
+ .n (or .nlstr): value as newline-separated string.
+ .s (or .spstr): value as whitespace-separated string.
+
+ This is very useful when trying to use such lists as arguments to
+ system commands."""
+
+ if cell is None:
+ # line magic
+ return self.shell.getoutput(line)
+ else:
+ opts,args = self.parse_options(line, '', 'out=')
+ output = self.shell.getoutput(cell)
+ out_name = opts.get('out', opts.get('o'))
+ if out_name:
+ self.shell.user_ns[out_name] = output
+ else:
+ return output
+
+ system = line_cell_magic('system')(sx)
+ bang = cell_magic('!')(sx)
+
+ @line_magic
+ def bookmark(self, parameter_s=''):
+ """Manage IPython's bookmark system.
+
+ %bookmark <name> - set bookmark to current dir
+ %bookmark <name> <dir> - set bookmark to <dir>
+ %bookmark -l - list all bookmarks
+ %bookmark -d <name> - remove bookmark
+ %bookmark -r - remove all bookmarks
+
+ You can later on access a bookmarked folder with::
+
+ %cd -b <name>
+
+ or simply '%cd <name>' if there is no directory called <name> AND
+ there is such a bookmark defined.
+
+ Your bookmarks persist through IPython sessions, but they are
+ associated with each profile."""
+
+ opts,args = self.parse_options(parameter_s,'drl',mode='list')
+ if len(args) > 2:
+ raise UsageError("%bookmark: too many arguments")
+
+ bkms = self.shell.db.get('bookmarks',{})
+
+ if 'd' in opts:
+ try:
+ todel = args[0]
+ except IndexError as e:
+ raise UsageError(
+ "%bookmark -d: must provide a bookmark to delete") from e
+ else:
+ try:
+ del bkms[todel]
+ except KeyError as e:
+ raise UsageError(
+ "%%bookmark -d: Can't delete bookmark '%s'" % todel) from e
+
+ elif 'r' in opts:
+ bkms = {}
+ elif 'l' in opts:
+ bks = sorted(bkms)
+ if bks:
+ size = max(map(len, bks))
+ else:
+ size = 0
+ fmt = '%-'+str(size)+'s -> %s'
+ print('Current bookmarks:')
+ for bk in bks:
+ print(fmt % (bk, bkms[bk]))
+ else:
+ if not args:
+ raise UsageError("%bookmark: You must specify the bookmark name")
+ elif len(args)==1:
+ bkms[args[0]] = os.getcwd()
+ elif len(args)==2:
+ bkms[args[0]] = args[1]
+ self.shell.db['bookmarks'] = bkms
+
+ @line_magic
+ def pycat(self, parameter_s=''):
+ """Show a syntax-highlighted file through a pager.
+
+ This magic is similar to the cat utility, but it will assume the file
+ to be Python source and will show it with syntax highlighting.
+
+ This magic command can either take a local filename, an url,
+ an history range (see %history) or a macro as argument.
+
+ If no parameter is given, prints out history of current session up to
+ this point. ::
+
+ %pycat myscript.py
+ %pycat 7-27
+ %pycat myMacro
+ %pycat http://www.example.com/myscript.py
+ """
+ try:
+ cont = self.shell.find_user_code(parameter_s, skip_encoding_cookie=False)
+ except (ValueError, IOError):
+ print("Error: no such file, variable, URL, history range or macro")
+ return
+
+ page.page(self.shell.pycolorize(source_to_unicode(cont)))
+
+ @magic_arguments.magic_arguments()
+ @magic_arguments.argument(
+ '-a', '--append', action='store_true', default=False,
+ help='Append contents of the cell to an existing file. '
+ 'The file will be created if it does not exist.'
+ )
+ @magic_arguments.argument(
+ 'filename', type=str,
+ help='file to write'
+ )
+ @cell_magic
+ def writefile(self, line, cell):
+ """Write the contents of the cell to a file.
+
+ The file will be overwritten unless the -a (--append) flag is specified.
+ """
+ args = magic_arguments.parse_argstring(self.writefile, line)
+ if re.match(r'^(\'.*\')|(".*")$', args.filename):
+ filename = os.path.expanduser(args.filename[1:-1])
+ else:
+ filename = os.path.expanduser(args.filename)
+
+ if os.path.exists(filename):
+ if args.append:
+ print("Appending to %s" % filename)
+ else:
+ print("Overwriting %s" % filename)
+ else:
+ print("Writing %s" % filename)
+
+ mode = 'a' if args.append else 'w'
+ with io.open(filename, mode, encoding='utf-8') as f:
+ f.write(cell)
diff --git a/contrib/python/ipython/py3/IPython/core/magics/packaging.py b/contrib/python/ipython/py3/IPython/core/magics/packaging.py
new file mode 100644
index 0000000000..2f7652c169
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/magics/packaging.py
@@ -0,0 +1,112 @@
+"""Implementation of packaging-related magic functions.
+"""
+#-----------------------------------------------------------------------------
+# Copyright (c) 2018 The IPython Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import re
+import shlex
+import sys
+from pathlib import Path
+
+from IPython.core.magic import Magics, magics_class, line_magic
+
+
+def _is_conda_environment():
+ """Return True if the current Python executable is in a conda env"""
+ # TODO: does this need to change on windows?
+ return Path(sys.prefix, "conda-meta", "history").exists()
+
+
+def _get_conda_executable():
+ """Find the path to the conda executable"""
+ # Check if there is a conda executable in the same directory as the Python executable.
+ # This is the case within conda's root environment.
+ conda = Path(sys.executable).parent / "conda"
+ if conda.is_file():
+ return str(conda)
+
+ # Otherwise, attempt to extract the executable from conda history.
+ # This applies in any conda environment.
+ history = Path(sys.prefix, "conda-meta", "history").read_text(encoding="utf-8")
+ match = re.search(
+ r"^#\s*cmd:\s*(?P<command>.*conda)\s[create|install]",
+ history,
+ flags=re.MULTILINE,
+ )
+ if match:
+ return match.groupdict()["command"]
+
+ # Fallback: assume conda is available on the system path.
+ return "conda"
+
+
+CONDA_COMMANDS_REQUIRING_PREFIX = {
+ 'install', 'list', 'remove', 'uninstall', 'update', 'upgrade',
+}
+CONDA_COMMANDS_REQUIRING_YES = {
+ 'install', 'remove', 'uninstall', 'update', 'upgrade',
+}
+CONDA_ENV_FLAGS = {'-p', '--prefix', '-n', '--name'}
+CONDA_YES_FLAGS = {'-y', '--y'}
+
+
+@magics_class
+class PackagingMagics(Magics):
+ """Magics related to packaging & installation"""
+
+ @line_magic
+ def pip(self, line):
+ """Run the pip package manager within the current kernel.
+
+ Usage:
+ %pip install [pkgs]
+ """
+ python = sys.executable
+ if sys.platform == "win32":
+ python = '"' + python + '"'
+ else:
+ python = shlex.quote(python)
+
+ self.shell.system(" ".join([python, "-m", "pip", line]))
+
+ print("Note: you may need to restart the kernel to use updated packages.")
+
+ @line_magic
+ def conda(self, line):
+ """Run the conda package manager within the current kernel.
+
+ Usage:
+ %conda install [pkgs]
+ """
+ if not _is_conda_environment():
+ raise ValueError("The python kernel does not appear to be a conda environment. "
+ "Please use ``%pip install`` instead.")
+
+ conda = _get_conda_executable()
+ args = shlex.split(line)
+ command = args[0] if len(args) > 0 else ""
+ args = args[1:] if len(args) > 1 else [""]
+
+ extra_args = []
+
+ # When the subprocess does not allow us to respond "yes" during the installation,
+ # we need to insert --yes in the argument list for some commands
+ stdin_disabled = getattr(self.shell, 'kernel', None) is not None
+ needs_yes = command in CONDA_COMMANDS_REQUIRING_YES
+ has_yes = set(args).intersection(CONDA_YES_FLAGS)
+ if stdin_disabled and needs_yes and not has_yes:
+ extra_args.append("--yes")
+
+ # Add --prefix to point conda installation to the current environment
+ needs_prefix = command in CONDA_COMMANDS_REQUIRING_PREFIX
+ has_prefix = set(args).intersection(CONDA_ENV_FLAGS)
+ if needs_prefix and not has_prefix:
+ extra_args.extend(["--prefix", sys.prefix])
+
+ self.shell.system(' '.join([conda, command] + extra_args + args))
+ print("\nNote: you may need to restart the kernel to use updated packages.")
diff --git a/contrib/python/ipython/py3/IPython/core/magics/pylab.py b/contrib/python/ipython/py3/IPython/core/magics/pylab.py
new file mode 100644
index 0000000000..2a69453ac9
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/magics/pylab.py
@@ -0,0 +1,169 @@
+"""Implementation of magic functions for matplotlib/pylab support.
+"""
+#-----------------------------------------------------------------------------
+# Copyright (c) 2012 The IPython Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+# Our own packages
+from traitlets.config.application import Application
+from IPython.core import magic_arguments
+from IPython.core.magic import Magics, magics_class, line_magic
+from IPython.testing.skipdoctest import skip_doctest
+from warnings import warn
+from IPython.core.pylabtools import backends
+
+#-----------------------------------------------------------------------------
+# Magic implementation classes
+#-----------------------------------------------------------------------------
+
+magic_gui_arg = magic_arguments.argument(
+ 'gui', nargs='?',
+ help="""Name of the matplotlib backend to use %s.
+ If given, the corresponding matplotlib backend is used,
+ otherwise it will be matplotlib's default
+ (which you can set in your matplotlib config file).
+ """ % str(tuple(sorted(backends.keys())))
+)
+
+
+@magics_class
+class PylabMagics(Magics):
+ """Magics related to matplotlib's pylab support"""
+
+ @skip_doctest
+ @line_magic
+ @magic_arguments.magic_arguments()
+ @magic_arguments.argument('-l', '--list', action='store_true',
+ help='Show available matplotlib backends')
+ @magic_gui_arg
+ def matplotlib(self, line=''):
+ """Set up matplotlib to work interactively.
+
+ This function lets you activate matplotlib interactive support
+ at any point during an IPython session. It does not import anything
+ into the interactive namespace.
+
+ If you are using the inline matplotlib backend in the IPython Notebook
+ you can set which figure formats are enabled using the following::
+
+ In [1]: from matplotlib_inline.backend_inline import set_matplotlib_formats
+
+ In [2]: set_matplotlib_formats('pdf', 'svg')
+
+ The default for inline figures sets `bbox_inches` to 'tight'. This can
+ cause discrepancies between the displayed image and the identical
+ image created using `savefig`. This behavior can be disabled using the
+ `%config` magic::
+
+ In [3]: %config InlineBackend.print_figure_kwargs = {'bbox_inches':None}
+
+ In addition, see the docstrings of
+ `matplotlib_inline.backend_inline.set_matplotlib_formats` and
+ `matplotlib_inline.backend_inline.set_matplotlib_close` for more information on
+ changing additional behaviors of the inline backend.
+
+ Examples
+ --------
+ To enable the inline backend for usage with the IPython Notebook::
+
+ In [1]: %matplotlib inline
+
+ In this case, where the matplotlib default is TkAgg::
+
+ In [2]: %matplotlib
+ Using matplotlib backend: TkAgg
+
+ But you can explicitly request a different GUI backend::
+
+ In [3]: %matplotlib qt
+
+ You can list the available backends using the -l/--list option::
+
+ In [4]: %matplotlib --list
+ Available matplotlib backends: ['osx', 'qt4', 'qt5', 'gtk3', 'gtk4', 'notebook', 'wx', 'qt', 'nbagg',
+ 'gtk', 'tk', 'inline']
+ """
+ args = magic_arguments.parse_argstring(self.matplotlib, line)
+ if args.list:
+ backends_list = list(backends.keys())
+ print("Available matplotlib backends: %s" % backends_list)
+ else:
+ gui, backend = self.shell.enable_matplotlib(args.gui.lower() if isinstance(args.gui, str) else args.gui)
+ self._show_matplotlib_backend(args.gui, backend)
+
+ @skip_doctest
+ @line_magic
+ @magic_arguments.magic_arguments()
+ @magic_arguments.argument(
+ '--no-import-all', action='store_true', default=None,
+ help="""Prevent IPython from performing ``import *`` into the interactive namespace.
+
+ You can govern the default behavior of this flag with the
+ InteractiveShellApp.pylab_import_all configurable.
+ """
+ )
+ @magic_gui_arg
+ def pylab(self, line=''):
+ """Load numpy and matplotlib to work interactively.
+
+ This function lets you activate pylab (matplotlib, numpy and
+ interactive support) at any point during an IPython session.
+
+ %pylab makes the following imports::
+
+ import numpy
+ import matplotlib
+ from matplotlib import pylab, mlab, pyplot
+ np = numpy
+ plt = pyplot
+
+ from IPython.display import display
+ from IPython.core.pylabtools import figsize, getfigs
+
+ from pylab import *
+ from numpy import *
+
+ If you pass `--no-import-all`, the last two `*` imports will be excluded.
+
+ See the %matplotlib magic for more details about activating matplotlib
+ without affecting the interactive namespace.
+ """
+ args = magic_arguments.parse_argstring(self.pylab, line)
+ if args.no_import_all is None:
+ # get default from Application
+ if Application.initialized():
+ app = Application.instance()
+ try:
+ import_all = app.pylab_import_all
+ except AttributeError:
+ import_all = True
+ else:
+ # nothing specified, no app - default True
+ import_all = True
+ else:
+ # invert no-import flag
+ import_all = not args.no_import_all
+
+ gui, backend, clobbered = self.shell.enable_pylab(args.gui, import_all=import_all)
+ self._show_matplotlib_backend(args.gui, backend)
+ print(
+ "%pylab is deprecated, use %matplotlib inline and import the required libraries."
+ )
+ print("Populating the interactive namespace from numpy and matplotlib")
+ if clobbered:
+ warn("pylab import has clobbered these variables: %s" % clobbered +
+ "\n`%matplotlib` prevents importing * from pylab and numpy"
+ )
+
+ def _show_matplotlib_backend(self, gui, backend):
+ """show matplotlib message backend message"""
+ if not gui or gui == 'auto':
+ print("Using matplotlib backend: %s" % backend)
diff --git a/contrib/python/ipython/py3/IPython/core/magics/script.py b/contrib/python/ipython/py3/IPython/core/magics/script.py
new file mode 100644
index 0000000000..a858c6489c
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/magics/script.py
@@ -0,0 +1,371 @@
+"""Magic functions for running cells in various scripts."""
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+import asyncio
+import asyncio.exceptions
+import atexit
+import errno
+import os
+import signal
+import sys
+import time
+from subprocess import CalledProcessError
+from threading import Thread
+
+from traitlets import Any, Dict, List, default
+
+from IPython.core import magic_arguments
+from IPython.core.async_helpers import _AsyncIOProxy
+from IPython.core.magic import Magics, cell_magic, line_magic, magics_class
+from IPython.utils.process import arg_split
+
+#-----------------------------------------------------------------------------
+# Magic implementation classes
+#-----------------------------------------------------------------------------
+
+def script_args(f):
+ """single decorator for adding script args"""
+ args = [
+ magic_arguments.argument(
+ '--out', type=str,
+ help="""The variable in which to store stdout from the script.
+ If the script is backgrounded, this will be the stdout *pipe*,
+ instead of the stderr text itself and will not be auto closed.
+ """
+ ),
+ magic_arguments.argument(
+ '--err', type=str,
+ help="""The variable in which to store stderr from the script.
+ If the script is backgrounded, this will be the stderr *pipe*,
+ instead of the stderr text itself and will not be autoclosed.
+ """
+ ),
+ magic_arguments.argument(
+ '--bg', action="store_true",
+ help="""Whether to run the script in the background.
+ If given, the only way to see the output of the command is
+ with --out/err.
+ """
+ ),
+ magic_arguments.argument(
+ '--proc', type=str,
+ help="""The variable in which to store Popen instance.
+ This is used only when --bg option is given.
+ """
+ ),
+ magic_arguments.argument(
+ '--no-raise-error', action="store_false", dest='raise_error',
+ help="""Whether you should raise an error message in addition to
+ a stream on stderr if you get a nonzero exit code.
+ """,
+ ),
+ ]
+ for arg in args:
+ f = arg(f)
+ return f
+
+
+@magics_class
+class ScriptMagics(Magics):
+ """Magics for talking to scripts
+
+ This defines a base `%%script` cell magic for running a cell
+ with a program in a subprocess, and registers a few top-level
+ magics that call %%script with common interpreters.
+ """
+
+ event_loop = Any(
+ help="""
+ The event loop on which to run subprocesses
+
+ Not the main event loop,
+ because we want to be able to make blocking calls
+ and have certain requirements we don't want to impose on the main loop.
+ """
+ )
+
+ script_magics = List(
+ help="""Extra script cell magics to define
+
+ This generates simple wrappers of `%%script foo` as `%%foo`.
+
+ If you want to add script magics that aren't on your path,
+ specify them in script_paths
+ """,
+ ).tag(config=True)
+ @default('script_magics')
+ def _script_magics_default(self):
+ """default to a common list of programs"""
+
+ defaults = [
+ 'sh',
+ 'bash',
+ 'perl',
+ 'ruby',
+ 'python',
+ 'python2',
+ 'python3',
+ 'pypy',
+ ]
+ if os.name == 'nt':
+ defaults.extend([
+ 'cmd',
+ ])
+
+ return defaults
+
+ script_paths = Dict(
+ help="""Dict mapping short 'ruby' names to full paths, such as '/opt/secret/bin/ruby'
+
+ Only necessary for items in script_magics where the default path will not
+ find the right interpreter.
+ """
+ ).tag(config=True)
+
+ def __init__(self, shell=None):
+ super(ScriptMagics, self).__init__(shell=shell)
+ self._generate_script_magics()
+ self.bg_processes = []
+ atexit.register(self.kill_bg_processes)
+
+ def __del__(self):
+ self.kill_bg_processes()
+
+ def _generate_script_magics(self):
+ cell_magics = self.magics['cell']
+ for name in self.script_magics:
+ cell_magics[name] = self._make_script_magic(name)
+
+ def _make_script_magic(self, name):
+ """make a named magic, that calls %%script with a particular program"""
+ # expand to explicit path if necessary:
+ script = self.script_paths.get(name, name)
+
+ @magic_arguments.magic_arguments()
+ @script_args
+ def named_script_magic(line, cell):
+ # if line, add it as cl-flags
+ if line:
+ line = "%s %s" % (script, line)
+ else:
+ line = script
+ return self.shebang(line, cell)
+
+ # write a basic docstring:
+ named_script_magic.__doc__ = \
+ """%%{name} script magic
+
+ Run cells with {script} in a subprocess.
+
+ This is a shortcut for `%%script {script}`
+ """.format(**locals())
+
+ return named_script_magic
+
+ @magic_arguments.magic_arguments()
+ @script_args
+ @cell_magic("script")
+ def shebang(self, line, cell):
+ """Run a cell via a shell command
+
+ The `%%script` line is like the #! line of script,
+ specifying a program (bash, perl, ruby, etc.) with which to run.
+
+ The rest of the cell is run by that program.
+
+ Examples
+ --------
+ ::
+
+ In [1]: %%script bash
+ ...: for i in 1 2 3; do
+ ...: echo $i
+ ...: done
+ 1
+ 2
+ 3
+ """
+
+ # Create the event loop in which to run script magics
+ # this operates on a background thread
+ if self.event_loop is None:
+ if sys.platform == "win32":
+ # don't override the current policy,
+ # just create an event loop
+ event_loop = asyncio.WindowsProactorEventLoopPolicy().new_event_loop()
+ else:
+ event_loop = asyncio.new_event_loop()
+ self.event_loop = event_loop
+
+ # start the loop in a background thread
+ asyncio_thread = Thread(target=event_loop.run_forever, daemon=True)
+ asyncio_thread.start()
+ else:
+ event_loop = self.event_loop
+
+ def in_thread(coro):
+ """Call a coroutine on the asyncio thread"""
+ return asyncio.run_coroutine_threadsafe(coro, event_loop).result()
+
+ async def _readchunk(stream):
+ try:
+ return await stream.readuntil(b"\n")
+ except asyncio.exceptions.IncompleteReadError as e:
+ return e.partial
+ except asyncio.exceptions.LimitOverrunError as e:
+ return await stream.read(e.consumed)
+
+ async def _handle_stream(stream, stream_arg, file_object):
+ while True:
+ chunk = (await _readchunk(stream)).decode("utf8", errors="replace")
+ if not chunk:
+ break
+ if stream_arg:
+ self.shell.user_ns[stream_arg] = chunk
+ else:
+ file_object.write(chunk)
+ file_object.flush()
+
+ async def _stream_communicate(process, cell):
+ process.stdin.write(cell)
+ process.stdin.close()
+ stdout_task = asyncio.create_task(
+ _handle_stream(process.stdout, args.out, sys.stdout)
+ )
+ stderr_task = asyncio.create_task(
+ _handle_stream(process.stderr, args.err, sys.stderr)
+ )
+ await asyncio.wait([stdout_task, stderr_task])
+ await process.wait()
+
+ argv = arg_split(line, posix=not sys.platform.startswith("win"))
+ args, cmd = self.shebang.parser.parse_known_args(argv)
+
+ try:
+ p = in_thread(
+ asyncio.create_subprocess_exec(
+ *cmd,
+ stdout=asyncio.subprocess.PIPE,
+ stderr=asyncio.subprocess.PIPE,
+ stdin=asyncio.subprocess.PIPE,
+ )
+ )
+ except OSError as e:
+ if e.errno == errno.ENOENT:
+ print("Couldn't find program: %r" % cmd[0])
+ return
+ else:
+ raise
+
+ if not cell.endswith('\n'):
+ cell += '\n'
+ cell = cell.encode('utf8', 'replace')
+ if args.bg:
+ self.bg_processes.append(p)
+ self._gc_bg_processes()
+ to_close = []
+ if args.out:
+ self.shell.user_ns[args.out] = _AsyncIOProxy(p.stdout, event_loop)
+ else:
+ to_close.append(p.stdout)
+ if args.err:
+ self.shell.user_ns[args.err] = _AsyncIOProxy(p.stderr, event_loop)
+ else:
+ to_close.append(p.stderr)
+ event_loop.call_soon_threadsafe(
+ lambda: asyncio.Task(self._run_script(p, cell, to_close))
+ )
+ if args.proc:
+ proc_proxy = _AsyncIOProxy(p, event_loop)
+ proc_proxy.stdout = _AsyncIOProxy(p.stdout, event_loop)
+ proc_proxy.stderr = _AsyncIOProxy(p.stderr, event_loop)
+ self.shell.user_ns[args.proc] = proc_proxy
+ return
+
+ try:
+ in_thread(_stream_communicate(p, cell))
+ except KeyboardInterrupt:
+ try:
+ p.send_signal(signal.SIGINT)
+ in_thread(asyncio.wait_for(p.wait(), timeout=0.1))
+ if p.returncode is not None:
+ print("Process is interrupted.")
+ return
+ p.terminate()
+ in_thread(asyncio.wait_for(p.wait(), timeout=0.1))
+ if p.returncode is not None:
+ print("Process is terminated.")
+ return
+ p.kill()
+ print("Process is killed.")
+ except OSError:
+ pass
+ except Exception as e:
+ print("Error while terminating subprocess (pid=%i): %s" % (p.pid, e))
+ return
+
+ if args.raise_error and p.returncode != 0:
+ # If we get here and p.returncode is still None, we must have
+ # killed it but not yet seen its return code. We don't wait for it,
+ # in case it's stuck in uninterruptible sleep. -9 = SIGKILL
+ rc = p.returncode or -9
+ raise CalledProcessError(rc, cell)
+
+ shebang.__skip_doctest__ = os.name != "posix"
+
+ async def _run_script(self, p, cell, to_close):
+ """callback for running the script in the background"""
+
+ p.stdin.write(cell)
+ await p.stdin.drain()
+ p.stdin.close()
+ await p.stdin.wait_closed()
+ await p.wait()
+ # asyncio read pipes have no close
+ # but we should drain the data anyway
+ for s in to_close:
+ await s.read()
+ self._gc_bg_processes()
+
+ @line_magic("killbgscripts")
+ def killbgscripts(self, _nouse_=''):
+ """Kill all BG processes started by %%script and its family."""
+ self.kill_bg_processes()
+ print("All background processes were killed.")
+
+ def kill_bg_processes(self):
+ """Kill all BG processes which are still running."""
+ if not self.bg_processes:
+ return
+ for p in self.bg_processes:
+ if p.returncode is None:
+ try:
+ p.send_signal(signal.SIGINT)
+ except:
+ pass
+ time.sleep(0.1)
+ self._gc_bg_processes()
+ if not self.bg_processes:
+ return
+ for p in self.bg_processes:
+ if p.returncode is None:
+ try:
+ p.terminate()
+ except:
+ pass
+ time.sleep(0.1)
+ self._gc_bg_processes()
+ if not self.bg_processes:
+ return
+ for p in self.bg_processes:
+ if p.returncode is None:
+ try:
+ p.kill()
+ except:
+ pass
+ self._gc_bg_processes()
+
+ def _gc_bg_processes(self):
+ self.bg_processes = [p for p in self.bg_processes if p.returncode is None]
diff --git a/contrib/python/ipython/py3/IPython/core/oinspect.py b/contrib/python/ipython/py3/IPython/core/oinspect.py
new file mode 100644
index 0000000000..ef6a0d02d7
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/oinspect.py
@@ -0,0 +1,1171 @@
+# -*- coding: utf-8 -*-
+"""Tools for inspecting Python objects.
+
+Uses syntax highlighting for presenting the various information elements.
+
+Similar in spirit to the inspect module, but all calls take a name argument to
+reference the name under which an object is being read.
+"""
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+__all__ = ['Inspector','InspectColors']
+
+# stdlib modules
+from dataclasses import dataclass
+from inspect import signature
+from textwrap import dedent
+import ast
+import html
+import inspect
+import io as stdlib_io
+import linecache
+import os
+import sys
+import types
+import warnings
+
+from typing import Any, Optional, Dict, Union, List, Tuple
+
+if sys.version_info <= (3, 10):
+ from typing_extensions import TypeAlias
+else:
+ from typing import TypeAlias
+
+# IPython's own
+from IPython.core import page
+from IPython.lib.pretty import pretty
+from IPython.testing.skipdoctest import skip_doctest
+from IPython.utils import PyColorize
+from IPython.utils import openpy
+from IPython.utils.dir2 import safe_hasattr
+from IPython.utils.path import compress_user
+from IPython.utils.text import indent
+from IPython.utils.wildcard import list_namespace
+from IPython.utils.wildcard import typestr2type
+from IPython.utils.coloransi import TermColors, ColorScheme, ColorSchemeTable
+from IPython.utils.py3compat import cast_unicode
+from IPython.utils.colorable import Colorable
+from IPython.utils.decorators import undoc
+
+from pygments import highlight
+from pygments.lexers import PythonLexer
+from pygments.formatters import HtmlFormatter
+
+HOOK_NAME = "__custom_documentations__"
+
+
+UnformattedBundle: TypeAlias = Dict[str, List[Tuple[str, str]]] # List of (title, body)
+Bundle: TypeAlias = Dict[str, str]
+
+
+@dataclass
+class OInfo:
+ ismagic: bool
+ isalias: bool
+ found: bool
+ namespace: Optional[str]
+ parent: Any
+ obj: Any
+
+ def get(self, field):
+ """Get a field from the object for backward compatibility with before 8.12
+
+ see https://github.com/h5py/h5py/issues/2253
+ """
+ # We need to deprecate this at some point, but the warning will show in completion.
+ # Let's comment this for now and uncomment end of 2023 ish
+ # warnings.warn(
+ # f"OInfo dataclass with fields access since IPython 8.12 please use OInfo.{field} instead."
+ # "OInfo used to be a dict but a dataclass provide static fields verification with mypy."
+ # "This warning and backward compatibility `get()` method were added in 8.13.",
+ # DeprecationWarning,
+ # stacklevel=2,
+ # )
+ return getattr(self, field)
+
+
+def pylight(code):
+ return highlight(code, PythonLexer(), HtmlFormatter(noclasses=True))
+
+# builtin docstrings to ignore
+_func_call_docstring = types.FunctionType.__call__.__doc__
+_object_init_docstring = object.__init__.__doc__
+_builtin_type_docstrings = {
+ inspect.getdoc(t) for t in (types.ModuleType, types.MethodType,
+ types.FunctionType, property)
+}
+
+_builtin_func_type = type(all)
+_builtin_meth_type = type(str.upper) # Bound methods have the same type as builtin functions
+#****************************************************************************
+# Builtin color schemes
+
+Colors = TermColors # just a shorthand
+
+InspectColors = PyColorize.ANSICodeColors
+
+#****************************************************************************
+# Auxiliary functions and objects
+
+# See the messaging spec for the definition of all these fields. This list
+# effectively defines the order of display
+info_fields = ['type_name', 'base_class', 'string_form', 'namespace',
+ 'length', 'file', 'definition', 'docstring', 'source',
+ 'init_definition', 'class_docstring', 'init_docstring',
+ 'call_def', 'call_docstring',
+ # These won't be printed but will be used to determine how to
+ # format the object
+ 'ismagic', 'isalias', 'isclass', 'found', 'name'
+ ]
+
+
+def object_info(**kw):
+ """Make an object info dict with all fields present."""
+ infodict = {k:None for k in info_fields}
+ infodict.update(kw)
+ return infodict
+
+
+def get_encoding(obj):
+ """Get encoding for python source file defining obj
+
+ Returns None if obj is not defined in a sourcefile.
+ """
+ ofile = find_file(obj)
+ # run contents of file through pager starting at line where the object
+ # is defined, as long as the file isn't binary and is actually on the
+ # filesystem.
+ if ofile is None:
+ return None
+ elif ofile.endswith(('.so', '.dll', '.pyd')):
+ return None
+ elif not os.path.isfile(ofile):
+ return None
+ else:
+ # Print only text files, not extension binaries. Note that
+ # getsourcelines returns lineno with 1-offset and page() uses
+ # 0-offset, so we must adjust.
+ with stdlib_io.open(ofile, 'rb') as buffer: # Tweaked to use io.open for Python 2
+ encoding, lines = openpy.detect_encoding(buffer.readline)
+ return encoding
+
+def getdoc(obj) -> Union[str,None]:
+ """Stable wrapper around inspect.getdoc.
+
+ This can't crash because of attribute problems.
+
+ It also attempts to call a getdoc() method on the given object. This
+ allows objects which provide their docstrings via non-standard mechanisms
+ (like Pyro proxies) to still be inspected by ipython's ? system.
+ """
+ # Allow objects to offer customized documentation via a getdoc method:
+ try:
+ ds = obj.getdoc()
+ except Exception:
+ pass
+ else:
+ if isinstance(ds, str):
+ return inspect.cleandoc(ds)
+ docstr = inspect.getdoc(obj)
+ return docstr
+
+
+def getsource(obj, oname='') -> Union[str,None]:
+ """Wrapper around inspect.getsource.
+
+ This can be modified by other projects to provide customized source
+ extraction.
+
+ Parameters
+ ----------
+ obj : object
+ an object whose source code we will attempt to extract
+ oname : str
+ (optional) a name under which the object is known
+
+ Returns
+ -------
+ src : unicode or None
+
+ """
+
+ if isinstance(obj, property):
+ sources = []
+ for attrname in ['fget', 'fset', 'fdel']:
+ fn = getattr(obj, attrname)
+ if fn is not None:
+ encoding = get_encoding(fn)
+ oname_prefix = ('%s.' % oname) if oname else ''
+ sources.append(''.join(('# ', oname_prefix, attrname)))
+ if inspect.isfunction(fn):
+ _src = getsource(fn)
+ if _src:
+ # assert _src is not None, "please mypy"
+ sources.append(dedent(_src))
+ else:
+ # Default str/repr only prints function name,
+ # pretty.pretty prints module name too.
+ sources.append(
+ '%s%s = %s\n' % (oname_prefix, attrname, pretty(fn))
+ )
+ if sources:
+ return '\n'.join(sources)
+ else:
+ return None
+
+ else:
+ # Get source for non-property objects.
+
+ obj = _get_wrapped(obj)
+
+ try:
+ src = inspect.getsource(obj)
+ except TypeError:
+ # The object itself provided no meaningful source, try looking for
+ # its class definition instead.
+ try:
+ src = inspect.getsource(obj.__class__)
+ except (OSError, TypeError):
+ return None
+ except OSError:
+ return None
+
+ return src
+
+
+def is_simple_callable(obj):
+ """True if obj is a function ()"""
+ return (inspect.isfunction(obj) or inspect.ismethod(obj) or \
+ isinstance(obj, _builtin_func_type) or isinstance(obj, _builtin_meth_type))
+
+@undoc
+def getargspec(obj):
+ """Wrapper around :func:`inspect.getfullargspec`
+
+ In addition to functions and methods, this can also handle objects with a
+ ``__call__`` attribute.
+
+ DEPRECATED: Deprecated since 7.10. Do not use, will be removed.
+ """
+
+ warnings.warn('`getargspec` function is deprecated as of IPython 7.10'
+ 'and will be removed in future versions.', DeprecationWarning, stacklevel=2)
+
+ if safe_hasattr(obj, '__call__') and not is_simple_callable(obj):
+ obj = obj.__call__
+
+ return inspect.getfullargspec(obj)
+
+@undoc
+def format_argspec(argspec):
+ """Format argspect, convenience wrapper around inspect's.
+
+ This takes a dict instead of ordered arguments and calls
+ inspect.format_argspec with the arguments in the necessary order.
+
+ DEPRECATED (since 7.10): Do not use; will be removed in future versions.
+ """
+
+ warnings.warn('`format_argspec` function is deprecated as of IPython 7.10'
+ 'and will be removed in future versions.', DeprecationWarning, stacklevel=2)
+
+
+ return inspect.formatargspec(argspec['args'], argspec['varargs'],
+ argspec['varkw'], argspec['defaults'])
+
+@undoc
+def call_tip(oinfo, format_call=True):
+ """DEPRECATED since 6.0. Extract call tip data from an oinfo dict."""
+ warnings.warn(
+ "`call_tip` function is deprecated as of IPython 6.0"
+ "and will be removed in future versions.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ # Get call definition
+ argspec = oinfo.get('argspec')
+ if argspec is None:
+ call_line = None
+ else:
+ # Callable objects will have 'self' as their first argument, prune
+ # it out if it's there for clarity (since users do *not* pass an
+ # extra first argument explicitly).
+ try:
+ has_self = argspec['args'][0] == 'self'
+ except (KeyError, IndexError):
+ pass
+ else:
+ if has_self:
+ argspec['args'] = argspec['args'][1:]
+
+ call_line = oinfo['name']+format_argspec(argspec)
+
+ # Now get docstring.
+ # The priority is: call docstring, constructor docstring, main one.
+ doc = oinfo.get('call_docstring')
+ if doc is None:
+ doc = oinfo.get('init_docstring')
+ if doc is None:
+ doc = oinfo.get('docstring','')
+
+ return call_line, doc
+
+
+def _get_wrapped(obj):
+ """Get the original object if wrapped in one or more @decorators
+
+ Some objects automatically construct similar objects on any unrecognised
+ attribute access (e.g. unittest.mock.call). To protect against infinite loops,
+ this will arbitrarily cut off after 100 levels of obj.__wrapped__
+ attribute access. --TK, Jan 2016
+ """
+ orig_obj = obj
+ i = 0
+ while safe_hasattr(obj, '__wrapped__'):
+ obj = obj.__wrapped__
+ i += 1
+ if i > 100:
+ # __wrapped__ is probably a lie, so return the thing we started with
+ return orig_obj
+ return obj
+
+def find_file(obj) -> str:
+ """Find the absolute path to the file where an object was defined.
+
+ This is essentially a robust wrapper around `inspect.getabsfile`.
+
+ Returns None if no file can be found.
+
+ Parameters
+ ----------
+ obj : any Python object
+
+ Returns
+ -------
+ fname : str
+ The absolute path to the file where the object was defined.
+ """
+ obj = _get_wrapped(obj)
+
+ fname = None
+ try:
+ fname = inspect.getabsfile(obj)
+ except TypeError:
+ # For an instance, the file that matters is where its class was
+ # declared.
+ try:
+ fname = inspect.getabsfile(obj.__class__)
+ except (OSError, TypeError):
+ # Can happen for builtins
+ pass
+ except OSError:
+ pass
+
+ return cast_unicode(fname)
+
+
+def find_source_lines(obj):
+ """Find the line number in a file where an object was defined.
+
+ This is essentially a robust wrapper around `inspect.getsourcelines`.
+
+ Returns None if no file can be found.
+
+ Parameters
+ ----------
+ obj : any Python object
+
+ Returns
+ -------
+ lineno : int
+ The line number where the object definition starts.
+ """
+ obj = _get_wrapped(obj)
+
+ try:
+ lineno = inspect.getsourcelines(obj)[1]
+ except TypeError:
+ # For instances, try the class object like getsource() does
+ try:
+ lineno = inspect.getsourcelines(obj.__class__)[1]
+ except (OSError, TypeError):
+ return None
+ except OSError:
+ return None
+
+ return lineno
+
+class Inspector(Colorable):
+
+ def __init__(self, color_table=InspectColors,
+ code_color_table=PyColorize.ANSICodeColors,
+ scheme=None,
+ str_detail_level=0,
+ parent=None, config=None):
+ super(Inspector, self).__init__(parent=parent, config=config)
+ self.color_table = color_table
+ self.parser = PyColorize.Parser(out='str', parent=self, style=scheme)
+ self.format = self.parser.format
+ self.str_detail_level = str_detail_level
+ self.set_active_scheme(scheme)
+
+ def _getdef(self,obj,oname='') -> Union[str,None]:
+ """Return the call signature for any callable object.
+
+ If any exception is generated, None is returned instead and the
+ exception is suppressed."""
+ try:
+ return _render_signature(signature(obj), oname)
+ except:
+ return None
+
+ def __head(self,h) -> str:
+ """Return a header string with proper colors."""
+ return '%s%s%s' % (self.color_table.active_colors.header,h,
+ self.color_table.active_colors.normal)
+
+ def set_active_scheme(self, scheme):
+ if scheme is not None:
+ self.color_table.set_active_scheme(scheme)
+ self.parser.color_table.set_active_scheme(scheme)
+
+ def noinfo(self, msg, oname):
+ """Generic message when no information is found."""
+ print('No %s found' % msg, end=' ')
+ if oname:
+ print('for %s' % oname)
+ else:
+ print()
+
+ def pdef(self, obj, oname=''):
+ """Print the call signature for any callable object.
+
+ If the object is a class, print the constructor information."""
+
+ if not callable(obj):
+ print('Object is not callable.')
+ return
+
+ header = ''
+
+ if inspect.isclass(obj):
+ header = self.__head('Class constructor information:\n')
+
+
+ output = self._getdef(obj,oname)
+ if output is None:
+ self.noinfo('definition header',oname)
+ else:
+ print(header,self.format(output), end=' ')
+
+ # In Python 3, all classes are new-style, so they all have __init__.
+ @skip_doctest
+ def pdoc(self, obj, oname='', formatter=None):
+ """Print the docstring for any object.
+
+ Optional:
+ -formatter: a function to run the docstring through for specially
+ formatted docstrings.
+
+ Examples
+ --------
+ In [1]: class NoInit:
+ ...: pass
+
+ In [2]: class NoDoc:
+ ...: def __init__(self):
+ ...: pass
+
+ In [3]: %pdoc NoDoc
+ No documentation found for NoDoc
+
+ In [4]: %pdoc NoInit
+ No documentation found for NoInit
+
+ In [5]: obj = NoInit()
+
+ In [6]: %pdoc obj
+ No documentation found for obj
+
+ In [5]: obj2 = NoDoc()
+
+ In [6]: %pdoc obj2
+ No documentation found for obj2
+ """
+
+ head = self.__head # For convenience
+ lines = []
+ ds = getdoc(obj)
+ if formatter:
+ ds = formatter(ds).get('plain/text', ds)
+ if ds:
+ lines.append(head("Class docstring:"))
+ lines.append(indent(ds))
+ if inspect.isclass(obj) and hasattr(obj, '__init__'):
+ init_ds = getdoc(obj.__init__)
+ if init_ds is not None:
+ lines.append(head("Init docstring:"))
+ lines.append(indent(init_ds))
+ elif hasattr(obj,'__call__'):
+ call_ds = getdoc(obj.__call__)
+ if call_ds:
+ lines.append(head("Call docstring:"))
+ lines.append(indent(call_ds))
+
+ if not lines:
+ self.noinfo('documentation',oname)
+ else:
+ page.page('\n'.join(lines))
+
+ def psource(self, obj, oname=''):
+ """Print the source code for an object."""
+
+ # Flush the source cache because inspect can return out-of-date source
+ linecache.checkcache()
+ try:
+ src = getsource(obj, oname=oname)
+ except Exception:
+ src = None
+
+ if src is None:
+ self.noinfo('source', oname)
+ else:
+ page.page(self.format(src))
+
+ def pfile(self, obj, oname=''):
+ """Show the whole file where an object was defined."""
+
+ lineno = find_source_lines(obj)
+ if lineno is None:
+ self.noinfo('file', oname)
+ return
+
+ ofile = find_file(obj)
+ # run contents of file through pager starting at line where the object
+ # is defined, as long as the file isn't binary and is actually on the
+ # filesystem.
+ if ofile.endswith(('.so', '.dll', '.pyd')):
+ print('File %r is binary, not printing.' % ofile)
+ elif not os.path.isfile(ofile):
+ print('File %r does not exist, not printing.' % ofile)
+ else:
+ # Print only text files, not extension binaries. Note that
+ # getsourcelines returns lineno with 1-offset and page() uses
+ # 0-offset, so we must adjust.
+ page.page(self.format(openpy.read_py_file(ofile, skip_encoding_cookie=False)), lineno - 1)
+
+
+ def _mime_format(self, text:str, formatter=None) -> dict:
+ """Return a mime bundle representation of the input text.
+
+ - if `formatter` is None, the returned mime bundle has
+ a ``text/plain`` field, with the input text.
+ a ``text/html`` field with a ``<pre>`` tag containing the input text.
+
+ - if ``formatter`` is not None, it must be a callable transforming the
+ input text into a mime bundle. Default values for ``text/plain`` and
+ ``text/html`` representations are the ones described above.
+
+ Note:
+
+ Formatters returning strings are supported but this behavior is deprecated.
+
+ """
+ defaults = {
+ "text/plain": text,
+ "text/html": f"<pre>{html.escape(text)}</pre>",
+ }
+
+ if formatter is None:
+ return defaults
+ else:
+ formatted = formatter(text)
+
+ if not isinstance(formatted, dict):
+ # Handle the deprecated behavior of a formatter returning
+ # a string instead of a mime bundle.
+ return {"text/plain": formatted, "text/html": f"<pre>{formatted}</pre>"}
+
+ else:
+ return dict(defaults, **formatted)
+
+ def format_mime(self, bundle: UnformattedBundle) -> Bundle:
+ """Format a mimebundle being created by _make_info_unformatted into a real mimebundle"""
+ # Format text/plain mimetype
+ assert isinstance(bundle["text/plain"], list)
+ for item in bundle["text/plain"]:
+ assert isinstance(item, tuple)
+
+ new_b: Bundle = {}
+ lines = []
+ _len = max(len(h) for h, _ in bundle["text/plain"])
+
+ for head, body in bundle["text/plain"]:
+ body = body.strip("\n")
+ delim = "\n" if "\n" in body else " "
+ lines.append(
+ f"{self.__head(head+':')}{(_len - len(head))*' '}{delim}{body}"
+ )
+
+ new_b["text/plain"] = "\n".join(lines)
+
+ if "text/html" in bundle:
+ assert isinstance(bundle["text/html"], list)
+ for item in bundle["text/html"]:
+ assert isinstance(item, tuple)
+ # Format the text/html mimetype
+ if isinstance(bundle["text/html"], (list, tuple)):
+ # bundle['text/html'] is a list of (head, formatted body) pairs
+ new_b["text/html"] = "\n".join(
+ (f"<h1>{head}</h1>\n{body}" for (head, body) in bundle["text/html"])
+ )
+
+ for k in bundle.keys():
+ if k in ("text/html", "text/plain"):
+ continue
+ else:
+ new_b = bundle[k] # type:ignore
+ return new_b
+
+ def _append_info_field(
+ self,
+ bundle: UnformattedBundle,
+ title: str,
+ key: str,
+ info,
+ omit_sections,
+ formatter,
+ ):
+ """Append an info value to the unformatted mimebundle being constructed by _make_info_unformatted"""
+ if title in omit_sections or key in omit_sections:
+ return
+ field = info[key]
+ if field is not None:
+ formatted_field = self._mime_format(field, formatter)
+ bundle["text/plain"].append((title, formatted_field["text/plain"]))
+ bundle["text/html"].append((title, formatted_field["text/html"]))
+
+ def _make_info_unformatted(
+ self, obj, info, formatter, detail_level, omit_sections
+ ) -> UnformattedBundle:
+ """Assemble the mimebundle as unformatted lists of information"""
+ bundle: UnformattedBundle = {
+ "text/plain": [],
+ "text/html": [],
+ }
+
+ # A convenience function to simplify calls below
+ def append_field(
+ bundle: UnformattedBundle, title: str, key: str, formatter=None
+ ):
+ self._append_info_field(
+ bundle,
+ title=title,
+ key=key,
+ info=info,
+ omit_sections=omit_sections,
+ formatter=formatter,
+ )
+
+ def code_formatter(text) -> Bundle:
+ return {
+ 'text/plain': self.format(text),
+ 'text/html': pylight(text)
+ }
+
+ if info["isalias"]:
+ append_field(bundle, "Repr", "string_form")
+
+ elif info['ismagic']:
+ if detail_level > 0:
+ append_field(bundle, "Source", "source", code_formatter)
+ else:
+ append_field(bundle, "Docstring", "docstring", formatter)
+ append_field(bundle, "File", "file")
+
+ elif info['isclass'] or is_simple_callable(obj):
+ # Functions, methods, classes
+ append_field(bundle, "Signature", "definition", code_formatter)
+ append_field(bundle, "Init signature", "init_definition", code_formatter)
+ append_field(bundle, "Docstring", "docstring", formatter)
+ if detail_level > 0 and info["source"]:
+ append_field(bundle, "Source", "source", code_formatter)
+ else:
+ append_field(bundle, "Init docstring", "init_docstring", formatter)
+
+ append_field(bundle, "File", "file")
+ append_field(bundle, "Type", "type_name")
+ append_field(bundle, "Subclasses", "subclasses")
+
+ else:
+ # General Python objects
+ append_field(bundle, "Signature", "definition", code_formatter)
+ append_field(bundle, "Call signature", "call_def", code_formatter)
+ append_field(bundle, "Type", "type_name")
+ append_field(bundle, "String form", "string_form")
+
+ # Namespace
+ if info["namespace"] != "Interactive":
+ append_field(bundle, "Namespace", "namespace")
+
+ append_field(bundle, "Length", "length")
+ append_field(bundle, "File", "file")
+
+ # Source or docstring, depending on detail level and whether
+ # source found.
+ if detail_level > 0 and info["source"]:
+ append_field(bundle, "Source", "source", code_formatter)
+ else:
+ append_field(bundle, "Docstring", "docstring", formatter)
+
+ append_field(bundle, "Class docstring", "class_docstring", formatter)
+ append_field(bundle, "Init docstring", "init_docstring", formatter)
+ append_field(bundle, "Call docstring", "call_docstring", formatter)
+ return bundle
+
+
+ def _get_info(
+ self,
+ obj: Any,
+ oname: str = "",
+ formatter=None,
+ info: Optional[OInfo] = None,
+ detail_level=0,
+ omit_sections=(),
+ ) -> Bundle:
+ """Retrieve an info dict and format it.
+
+ Parameters
+ ----------
+ obj : any
+ Object to inspect and return info from
+ oname : str (default: ''):
+ Name of the variable pointing to `obj`.
+ formatter : callable
+ info
+ already computed information
+ detail_level : integer
+ Granularity of detail level, if set to 1, give more information.
+ omit_sections : container[str]
+ Titles or keys to omit from output (can be set, tuple, etc., anything supporting `in`)
+ """
+
+ info_dict = self.info(obj, oname=oname, info=info, detail_level=detail_level)
+ bundle = self._make_info_unformatted(
+ obj,
+ info_dict,
+ formatter,
+ detail_level=detail_level,
+ omit_sections=omit_sections,
+ )
+ return self.format_mime(bundle)
+
+ def pinfo(
+ self,
+ obj,
+ oname="",
+ formatter=None,
+ info: Optional[OInfo] = None,
+ detail_level=0,
+ enable_html_pager=True,
+ omit_sections=(),
+ ):
+ """Show detailed information about an object.
+
+ Optional arguments:
+
+ - oname: name of the variable pointing to the object.
+
+ - formatter: callable (optional)
+ A special formatter for docstrings.
+
+ The formatter is a callable that takes a string as an input
+ and returns either a formatted string or a mime type bundle
+ in the form of a dictionary.
+
+ Although the support of custom formatter returning a string
+ instead of a mime type bundle is deprecated.
+
+ - info: a structure with some information fields which may have been
+ precomputed already.
+
+ - detail_level: if set to 1, more information is given.
+
+ - omit_sections: set of section keys and titles to omit
+ """
+ assert info is not None
+ info_b: Bundle = self._get_info(
+ obj, oname, formatter, info, detail_level, omit_sections=omit_sections
+ )
+ if not enable_html_pager:
+ del info_b["text/html"]
+ page.page(info_b)
+
+ def _info(self, obj, oname="", info=None, detail_level=0):
+ """
+ Inspector.info() was likely improperly marked as deprecated
+ while only a parameter was deprecated. We "un-deprecate" it.
+ """
+
+ warnings.warn(
+ "The `Inspector.info()` method has been un-deprecated as of 8.0 "
+ "and the `formatter=` keyword removed. `Inspector._info` is now "
+ "an alias, and you can just call `.info()` directly.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ return self.info(obj, oname=oname, info=info, detail_level=detail_level)
+
+ def info(self, obj, oname="", info=None, detail_level=0) -> Dict[str, Any]:
+ """Compute a dict with detailed information about an object.
+
+ Parameters
+ ----------
+ obj : any
+ An object to find information about
+ oname : str (default: '')
+ Name of the variable pointing to `obj`.
+ info : (default: None)
+ A struct (dict like with attr access) with some information fields
+ which may have been precomputed already.
+ detail_level : int (default:0)
+ If set to 1, more information is given.
+
+ Returns
+ -------
+ An object info dict with known fields from `info_fields`. Keys are
+ strings, values are string or None.
+ """
+
+ if info is None:
+ ismagic = False
+ isalias = False
+ ospace = ''
+ else:
+ ismagic = info.ismagic
+ isalias = info.isalias
+ ospace = info.namespace
+
+ # Get docstring, special-casing aliases:
+ att_name = oname.split(".")[-1]
+ parents_docs = None
+ prelude = ""
+ if info and info.parent is not None and hasattr(info.parent, HOOK_NAME):
+ parents_docs_dict = getattr(info.parent, HOOK_NAME)
+ parents_docs = parents_docs_dict.get(att_name, None)
+ out = dict(
+ name=oname, found=True, isalias=isalias, ismagic=ismagic, subclasses=None
+ )
+
+ if parents_docs:
+ ds = parents_docs
+ elif isalias:
+ if not callable(obj):
+ try:
+ ds = "Alias to the system command:\n %s" % obj[1]
+ except:
+ ds = "Alias: " + str(obj)
+ else:
+ ds = "Alias to " + str(obj)
+ if obj.__doc__:
+ ds += "\nDocstring:\n" + obj.__doc__
+ else:
+ ds_or_None = getdoc(obj)
+ if ds_or_None is None:
+ ds = '<no docstring>'
+ else:
+ ds = ds_or_None
+
+ ds = prelude + ds
+
+ # store output in a dict, we initialize it here and fill it as we go
+
+ string_max = 200 # max size of strings to show (snipped if longer)
+ shalf = int((string_max - 5) / 2)
+
+ if ismagic:
+ out['type_name'] = 'Magic function'
+ elif isalias:
+ out['type_name'] = 'System alias'
+ else:
+ out['type_name'] = type(obj).__name__
+
+ try:
+ bclass = obj.__class__
+ out['base_class'] = str(bclass)
+ except:
+ pass
+
+ # String form, but snip if too long in ? form (full in ??)
+ if detail_level >= self.str_detail_level:
+ try:
+ ostr = str(obj)
+ str_head = 'string_form'
+ if not detail_level and len(ostr)>string_max:
+ ostr = ostr[:shalf] + ' <...> ' + ostr[-shalf:]
+ ostr = ("\n" + " " * len(str_head.expandtabs())).\
+ join(q.strip() for q in ostr.split("\n"))
+ out[str_head] = ostr
+ except:
+ pass
+
+ if ospace:
+ out['namespace'] = ospace
+
+ # Length (for strings and lists)
+ try:
+ out['length'] = str(len(obj))
+ except Exception:
+ pass
+
+ # Filename where object was defined
+ binary_file = False
+ fname = find_file(obj)
+ if fname is None:
+ # if anything goes wrong, we don't want to show source, so it's as
+ # if the file was binary
+ binary_file = True
+ else:
+ if fname.endswith(('.so', '.dll', '.pyd')):
+ binary_file = True
+ elif fname.endswith('<string>'):
+ fname = 'Dynamically generated function. No source code available.'
+ out['file'] = compress_user(fname)
+
+ # Original source code for a callable, class or property.
+ if detail_level:
+ # Flush the source cache because inspect can return out-of-date
+ # source
+ linecache.checkcache()
+ try:
+ if isinstance(obj, property) or not binary_file:
+ src = getsource(obj, oname)
+ if src is not None:
+ src = src.rstrip()
+ out['source'] = src
+
+ except Exception:
+ pass
+
+ # Add docstring only if no source is to be shown (avoid repetitions).
+ if ds and not self._source_contains_docstring(out.get('source'), ds):
+ out['docstring'] = ds
+
+ # Constructor docstring for classes
+ if inspect.isclass(obj):
+ out['isclass'] = True
+
+ # get the init signature:
+ try:
+ init_def = self._getdef(obj, oname)
+ except AttributeError:
+ init_def = None
+
+ # get the __init__ docstring
+ try:
+ obj_init = obj.__init__
+ except AttributeError:
+ init_ds = None
+ else:
+ if init_def is None:
+ # Get signature from init if top-level sig failed.
+ # Can happen for built-in types (list, etc.).
+ try:
+ init_def = self._getdef(obj_init, oname)
+ except AttributeError:
+ pass
+ init_ds = getdoc(obj_init)
+ # Skip Python's auto-generated docstrings
+ if init_ds == _object_init_docstring:
+ init_ds = None
+
+ if init_def:
+ out['init_definition'] = init_def
+
+ if init_ds:
+ out['init_docstring'] = init_ds
+
+ names = [sub.__name__ for sub in type.__subclasses__(obj)]
+ if len(names) < 10:
+ all_names = ', '.join(names)
+ else:
+ all_names = ', '.join(names[:10]+['...'])
+ out['subclasses'] = all_names
+ # and class docstring for instances:
+ else:
+ # reconstruct the function definition and print it:
+ defln = self._getdef(obj, oname)
+ if defln:
+ out['definition'] = defln
+
+ # First, check whether the instance docstring is identical to the
+ # class one, and print it separately if they don't coincide. In
+ # most cases they will, but it's nice to print all the info for
+ # objects which use instance-customized docstrings.
+ if ds:
+ try:
+ cls = getattr(obj,'__class__')
+ except:
+ class_ds = None
+ else:
+ class_ds = getdoc(cls)
+ # Skip Python's auto-generated docstrings
+ if class_ds in _builtin_type_docstrings:
+ class_ds = None
+ if class_ds and ds != class_ds:
+ out['class_docstring'] = class_ds
+
+ # Next, try to show constructor docstrings
+ try:
+ init_ds = getdoc(obj.__init__)
+ # Skip Python's auto-generated docstrings
+ if init_ds == _object_init_docstring:
+ init_ds = None
+ except AttributeError:
+ init_ds = None
+ if init_ds:
+ out['init_docstring'] = init_ds
+
+ # Call form docstring for callable instances
+ if safe_hasattr(obj, '__call__') and not is_simple_callable(obj):
+ call_def = self._getdef(obj.__call__, oname)
+ if call_def and (call_def != out.get('definition')):
+ # it may never be the case that call def and definition differ,
+ # but don't include the same signature twice
+ out['call_def'] = call_def
+ call_ds = getdoc(obj.__call__)
+ # Skip Python's auto-generated docstrings
+ if call_ds == _func_call_docstring:
+ call_ds = None
+ if call_ds:
+ out['call_docstring'] = call_ds
+
+ return object_info(**out)
+
+ @staticmethod
+ def _source_contains_docstring(src, doc):
+ """
+ Check whether the source *src* contains the docstring *doc*.
+
+ This is is helper function to skip displaying the docstring if the
+ source already contains it, avoiding repetition of information.
+ """
+ try:
+ (def_node,) = ast.parse(dedent(src)).body
+ return ast.get_docstring(def_node) == doc # type: ignore[arg-type]
+ except Exception:
+ # The source can become invalid or even non-existent (because it
+ # is re-fetched from the source file) so the above code fail in
+ # arbitrary ways.
+ return False
+
+ def psearch(self,pattern,ns_table,ns_search=[],
+ ignore_case=False,show_all=False, *, list_types=False):
+ """Search namespaces with wildcards for objects.
+
+ Arguments:
+
+ - pattern: string containing shell-like wildcards to use in namespace
+ searches and optionally a type specification to narrow the search to
+ objects of that type.
+
+ - ns_table: dict of name->namespaces for search.
+
+ Optional arguments:
+
+ - ns_search: list of namespace names to include in search.
+
+ - ignore_case(False): make the search case-insensitive.
+
+ - show_all(False): show all names, including those starting with
+ underscores.
+
+ - list_types(False): list all available object types for object matching.
+ """
+ #print 'ps pattern:<%r>' % pattern # dbg
+
+ # defaults
+ type_pattern = 'all'
+ filter = ''
+
+ # list all object types
+ if list_types:
+ page.page('\n'.join(sorted(typestr2type)))
+ return
+
+ cmds = pattern.split()
+ len_cmds = len(cmds)
+ if len_cmds == 1:
+ # Only filter pattern given
+ filter = cmds[0]
+ elif len_cmds == 2:
+ # Both filter and type specified
+ filter,type_pattern = cmds
+ else:
+ raise ValueError('invalid argument string for psearch: <%s>' %
+ pattern)
+
+ # filter search namespaces
+ for name in ns_search:
+ if name not in ns_table:
+ raise ValueError('invalid namespace <%s>. Valid names: %s' %
+ (name,ns_table.keys()))
+
+ #print 'type_pattern:',type_pattern # dbg
+ search_result, namespaces_seen = set(), set()
+ for ns_name in ns_search:
+ ns = ns_table[ns_name]
+ # Normally, locals and globals are the same, so we just check one.
+ if id(ns) in namespaces_seen:
+ continue
+ namespaces_seen.add(id(ns))
+ tmp_res = list_namespace(ns, type_pattern, filter,
+ ignore_case=ignore_case, show_all=show_all)
+ search_result.update(tmp_res)
+
+ page.page('\n'.join(sorted(search_result)))
+
+
+def _render_signature(obj_signature, obj_name) -> str:
+ """
+ This was mostly taken from inspect.Signature.__str__.
+ Look there for the comments.
+ The only change is to add linebreaks when this gets too long.
+ """
+ result = []
+ pos_only = False
+ kw_only = True
+ for param in obj_signature.parameters.values():
+ if param.kind == inspect.Parameter.POSITIONAL_ONLY:
+ pos_only = True
+ elif pos_only:
+ result.append('/')
+ pos_only = False
+
+ if param.kind == inspect.Parameter.VAR_POSITIONAL:
+ kw_only = False
+ elif param.kind == inspect.Parameter.KEYWORD_ONLY and kw_only:
+ result.append('*')
+ kw_only = False
+
+ result.append(str(param))
+
+ if pos_only:
+ result.append('/')
+
+ # add up name, parameters, braces (2), and commas
+ if len(obj_name) + sum(len(r) + 2 for r in result) > 75:
+ # This doesn’t fit behind “Signature: ” in an inspect window.
+ rendered = '{}(\n{})'.format(obj_name, ''.join(
+ ' {},\n'.format(r) for r in result)
+ )
+ else:
+ rendered = '{}({})'.format(obj_name, ', '.join(result))
+
+ if obj_signature.return_annotation is not inspect._empty:
+ anno = inspect.formatannotation(obj_signature.return_annotation)
+ rendered += ' -> {}'.format(anno)
+
+ return rendered
diff --git a/contrib/python/ipython/py3/IPython/core/page.py b/contrib/python/ipython/py3/IPython/core/page.py
new file mode 100644
index 0000000000..d3e6a9eef5
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/page.py
@@ -0,0 +1,348 @@
+# encoding: utf-8
+"""
+Paging capabilities for IPython.core
+
+Notes
+-----
+
+For now this uses IPython hooks, so it can't be in IPython.utils. If we can get
+rid of that dependency, we could move it there.
+-----
+"""
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+
+import os
+import io
+import re
+import sys
+import tempfile
+import subprocess
+
+from io import UnsupportedOperation
+from pathlib import Path
+
+from IPython import get_ipython
+from IPython.display import display
+from IPython.core.error import TryNext
+from IPython.utils.data import chop
+from IPython.utils.process import system
+from IPython.utils.terminal import get_terminal_size
+from IPython.utils import py3compat
+
+
+def display_page(strng, start=0, screen_lines=25):
+ """Just display, no paging. screen_lines is ignored."""
+ if isinstance(strng, dict):
+ data = strng
+ else:
+ if start:
+ strng = u'\n'.join(strng.splitlines()[start:])
+ data = { 'text/plain': strng }
+ display(data, raw=True)
+
+
+def as_hook(page_func):
+ """Wrap a pager func to strip the `self` arg
+
+ so it can be called as a hook.
+ """
+ return lambda self, *args, **kwargs: page_func(*args, **kwargs)
+
+
+esc_re = re.compile(r"(\x1b[^m]+m)")
+
+def page_dumb(strng, start=0, screen_lines=25):
+ """Very dumb 'pager' in Python, for when nothing else works.
+
+ Only moves forward, same interface as page(), except for pager_cmd and
+ mode.
+ """
+ if isinstance(strng, dict):
+ strng = strng.get('text/plain', '')
+ out_ln = strng.splitlines()[start:]
+ screens = chop(out_ln,screen_lines-1)
+ if len(screens) == 1:
+ print(os.linesep.join(screens[0]))
+ else:
+ last_escape = ""
+ for scr in screens[0:-1]:
+ hunk = os.linesep.join(scr)
+ print(last_escape + hunk)
+ if not page_more():
+ return
+ esc_list = esc_re.findall(hunk)
+ if len(esc_list) > 0:
+ last_escape = esc_list[-1]
+ print(last_escape + os.linesep.join(screens[-1]))
+
+def _detect_screen_size(screen_lines_def):
+ """Attempt to work out the number of lines on the screen.
+
+ This is called by page(). It can raise an error (e.g. when run in the
+ test suite), so it's separated out so it can easily be called in a try block.
+ """
+ TERM = os.environ.get('TERM',None)
+ if not((TERM=='xterm' or TERM=='xterm-color') and sys.platform != 'sunos5'):
+ # curses causes problems on many terminals other than xterm, and
+ # some termios calls lock up on Sun OS5.
+ return screen_lines_def
+
+ try:
+ import termios
+ import curses
+ except ImportError:
+ return screen_lines_def
+
+ # There is a bug in curses, where *sometimes* it fails to properly
+ # initialize, and then after the endwin() call is made, the
+ # terminal is left in an unusable state. Rather than trying to
+ # check every time for this (by requesting and comparing termios
+ # flags each time), we just save the initial terminal state and
+ # unconditionally reset it every time. It's cheaper than making
+ # the checks.
+ try:
+ term_flags = termios.tcgetattr(sys.stdout)
+ except termios.error as err:
+ # can fail on Linux 2.6, pager_page will catch the TypeError
+ raise TypeError('termios error: {0}'.format(err)) from err
+
+ try:
+ scr = curses.initscr()
+ except AttributeError:
+ # Curses on Solaris may not be complete, so we can't use it there
+ return screen_lines_def
+
+ screen_lines_real,screen_cols = scr.getmaxyx()
+ curses.endwin()
+
+ # Restore terminal state in case endwin() didn't.
+ termios.tcsetattr(sys.stdout,termios.TCSANOW,term_flags)
+ # Now we have what we needed: the screen size in rows/columns
+ return screen_lines_real
+ #print '***Screen size:',screen_lines_real,'lines x',\
+ #screen_cols,'columns.' # dbg
+
+def pager_page(strng, start=0, screen_lines=0, pager_cmd=None):
+ """Display a string, piping through a pager after a certain length.
+
+ strng can be a mime-bundle dict, supplying multiple representations,
+ keyed by mime-type.
+
+ The screen_lines parameter specifies the number of *usable* lines of your
+ terminal screen (total lines minus lines you need to reserve to show other
+ information).
+
+ If you set screen_lines to a number <=0, page() will try to auto-determine
+ your screen size and will only use up to (screen_size+screen_lines) for
+ printing, paging after that. That is, if you want auto-detection but need
+ to reserve the bottom 3 lines of the screen, use screen_lines = -3, and for
+ auto-detection without any lines reserved simply use screen_lines = 0.
+
+ If a string won't fit in the allowed lines, it is sent through the
+ specified pager command. If none given, look for PAGER in the environment,
+ and ultimately default to less.
+
+ If no system pager works, the string is sent through a 'dumb pager'
+ written in python, very simplistic.
+ """
+
+ # for compatibility with mime-bundle form:
+ if isinstance(strng, dict):
+ strng = strng['text/plain']
+
+ # Ugly kludge, but calling curses.initscr() flat out crashes in emacs
+ TERM = os.environ.get('TERM','dumb')
+ if TERM in ['dumb','emacs'] and os.name != 'nt':
+ print(strng)
+ return
+ # chop off the topmost part of the string we don't want to see
+ str_lines = strng.splitlines()[start:]
+ str_toprint = os.linesep.join(str_lines)
+ num_newlines = len(str_lines)
+ len_str = len(str_toprint)
+
+ # Dumb heuristics to guesstimate number of on-screen lines the string
+ # takes. Very basic, but good enough for docstrings in reasonable
+ # terminals. If someone later feels like refining it, it's not hard.
+ numlines = max(num_newlines,int(len_str/80)+1)
+
+ screen_lines_def = get_terminal_size()[1]
+
+ # auto-determine screen size
+ if screen_lines <= 0:
+ try:
+ screen_lines += _detect_screen_size(screen_lines_def)
+ except (TypeError, UnsupportedOperation):
+ print(str_toprint)
+ return
+
+ #print 'numlines',numlines,'screenlines',screen_lines # dbg
+ if numlines <= screen_lines :
+ #print '*** normal print' # dbg
+ print(str_toprint)
+ else:
+ # Try to open pager and default to internal one if that fails.
+ # All failure modes are tagged as 'retval=1', to match the return
+ # value of a failed system command. If any intermediate attempt
+ # sets retval to 1, at the end we resort to our own page_dumb() pager.
+ pager_cmd = get_pager_cmd(pager_cmd)
+ pager_cmd += ' ' + get_pager_start(pager_cmd,start)
+ if os.name == 'nt':
+ if pager_cmd.startswith('type'):
+ # The default WinXP 'type' command is failing on complex strings.
+ retval = 1
+ else:
+ fd, tmpname = tempfile.mkstemp('.txt')
+ tmppath = Path(tmpname)
+ try:
+ os.close(fd)
+ with tmppath.open("wt", encoding="utf-8") as tmpfile:
+ tmpfile.write(strng)
+ cmd = "%s < %s" % (pager_cmd, tmppath)
+ # tmpfile needs to be closed for windows
+ if os.system(cmd):
+ retval = 1
+ else:
+ retval = None
+ finally:
+ Path.unlink(tmppath)
+ else:
+ try:
+ retval = None
+ # Emulate os.popen, but redirect stderr
+ proc = subprocess.Popen(
+ pager_cmd,
+ shell=True,
+ stdin=subprocess.PIPE,
+ stderr=subprocess.DEVNULL,
+ )
+ pager = os._wrap_close(
+ io.TextIOWrapper(proc.stdin, encoding="utf-8"), proc
+ )
+ try:
+ pager_encoding = pager.encoding or sys.stdout.encoding
+ pager.write(strng)
+ finally:
+ retval = pager.close()
+ except IOError as msg: # broken pipe when user quits
+ if msg.args == (32, 'Broken pipe'):
+ retval = None
+ else:
+ retval = 1
+ except OSError:
+ # Other strange problems, sometimes seen in Win2k/cygwin
+ retval = 1
+ if retval is not None:
+ page_dumb(strng,screen_lines=screen_lines)
+
+
+def page(data, start=0, screen_lines=0, pager_cmd=None):
+ """Display content in a pager, piping through a pager after a certain length.
+
+ data can be a mime-bundle dict, supplying multiple representations,
+ keyed by mime-type, or text.
+
+ Pager is dispatched via the `show_in_pager` IPython hook.
+ If no hook is registered, `pager_page` will be used.
+ """
+ # Some routines may auto-compute start offsets incorrectly and pass a
+ # negative value. Offset to 0 for robustness.
+ start = max(0, start)
+
+ # first, try the hook
+ ip = get_ipython()
+ if ip:
+ try:
+ ip.hooks.show_in_pager(data, start=start, screen_lines=screen_lines)
+ return
+ except TryNext:
+ pass
+
+ # fallback on default pager
+ return pager_page(data, start, screen_lines, pager_cmd)
+
+
+def page_file(fname, start=0, pager_cmd=None):
+ """Page a file, using an optional pager command and starting line.
+ """
+
+ pager_cmd = get_pager_cmd(pager_cmd)
+ pager_cmd += ' ' + get_pager_start(pager_cmd,start)
+
+ try:
+ if os.environ['TERM'] in ['emacs','dumb']:
+ raise EnvironmentError
+ system(pager_cmd + ' ' + fname)
+ except:
+ try:
+ if start > 0:
+ start -= 1
+ page(open(fname, encoding="utf-8").read(), start)
+ except:
+ print('Unable to show file',repr(fname))
+
+
+def get_pager_cmd(pager_cmd=None):
+ """Return a pager command.
+
+ Makes some attempts at finding an OS-correct one.
+ """
+ if os.name == 'posix':
+ default_pager_cmd = 'less -R' # -R for color control sequences
+ elif os.name in ['nt','dos']:
+ default_pager_cmd = 'type'
+
+ if pager_cmd is None:
+ try:
+ pager_cmd = os.environ['PAGER']
+ except:
+ pager_cmd = default_pager_cmd
+
+ if pager_cmd == 'less' and '-r' not in os.environ.get('LESS', '').lower():
+ pager_cmd += ' -R'
+
+ return pager_cmd
+
+
+def get_pager_start(pager, start):
+ """Return the string for paging files with an offset.
+
+ This is the '+N' argument which less and more (under Unix) accept.
+ """
+
+ if pager in ['less','more']:
+ if start:
+ start_string = '+' + str(start)
+ else:
+ start_string = ''
+ else:
+ start_string = ''
+ return start_string
+
+
+# (X)emacs on win32 doesn't like to be bypassed with msvcrt.getch()
+if os.name == 'nt' and os.environ.get('TERM','dumb') != 'emacs':
+ import msvcrt
+ def page_more():
+ """ Smart pausing between pages
+
+ @return: True if need print more lines, False if quit
+ """
+ sys.stdout.write('---Return to continue, q to quit--- ')
+ ans = msvcrt.getwch()
+ if ans in ("q", "Q"):
+ result = False
+ else:
+ result = True
+ sys.stdout.write("\b"*37 + " "*37 + "\b"*37)
+ return result
+else:
+ def page_more():
+ ans = py3compat.input('---Return to continue, q to quit--- ')
+ if ans.lower().startswith('q'):
+ return False
+ else:
+ return True
diff --git a/contrib/python/ipython/py3/IPython/core/payload.py b/contrib/python/ipython/py3/IPython/core/payload.py
new file mode 100644
index 0000000000..6818be1537
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/payload.py
@@ -0,0 +1,55 @@
+# -*- coding: utf-8 -*-
+"""Payload system for IPython.
+
+Authors:
+
+* Fernando Perez
+* Brian Granger
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (C) 2008-2011 The IPython Development Team
+#
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+from traitlets.config.configurable import Configurable
+from traitlets import List
+
+#-----------------------------------------------------------------------------
+# Main payload class
+#-----------------------------------------------------------------------------
+
+class PayloadManager(Configurable):
+
+ _payload = List([])
+
+ def write_payload(self, data, single=True):
+ """Include or update the specified `data` payload in the PayloadManager.
+
+ If a previous payload with the same source exists and `single` is True,
+ it will be overwritten with the new one.
+ """
+
+ if not isinstance(data, dict):
+ raise TypeError('Each payload write must be a dict, got: %r' % data)
+
+ if single and 'source' in data:
+ source = data['source']
+ for i, pl in enumerate(self._payload):
+ if 'source' in pl and pl['source'] == source:
+ self._payload[i] = data
+ return
+
+ self._payload.append(data)
+
+ def read_payload(self):
+ return self._payload
+
+ def clear_payload(self):
+ self._payload = []
diff --git a/contrib/python/ipython/py3/IPython/core/payloadpage.py b/contrib/python/ipython/py3/IPython/core/payloadpage.py
new file mode 100644
index 0000000000..4958108076
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/payloadpage.py
@@ -0,0 +1,51 @@
+# encoding: utf-8
+"""A payload based version of page."""
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+import warnings
+from IPython.core.getipython import get_ipython
+
+
+def page(strng, start=0, screen_lines=0, pager_cmd=None):
+ """Print a string, piping through a pager.
+
+ This version ignores the screen_lines and pager_cmd arguments and uses
+ IPython's payload system instead.
+
+ Parameters
+ ----------
+ strng : str or mime-dict
+ Text to page, or a mime-type keyed dict of already formatted data.
+ start : int
+ Starting line at which to place the display.
+ """
+
+ # Some routines may auto-compute start offsets incorrectly and pass a
+ # negative value. Offset to 0 for robustness.
+ start = max(0, start)
+ shell = get_ipython()
+
+ if isinstance(strng, dict):
+ data = strng
+ else:
+ data = {'text/plain' : strng}
+ payload = dict(
+ source='page',
+ data=data,
+ start=start,
+ )
+ shell.payload_manager.write_payload(payload)
+
+
+def install_payload_page():
+ """DEPRECATED, use show_in_pager hook
+
+ Install this version of page as IPython.core.page.page.
+ """
+ warnings.warn("""install_payload_page is deprecated.
+ Use `ip.set_hook('show_in_pager, page.as_hook(payloadpage.page))`
+ """)
+ from IPython.core import page as corepage
+ corepage.page = page
diff --git a/contrib/python/ipython/py3/IPython/core/prefilter.py b/contrib/python/ipython/py3/IPython/core/prefilter.py
new file mode 100644
index 0000000000..e7e82e3377
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/prefilter.py
@@ -0,0 +1,700 @@
+# encoding: utf-8
+"""
+Prefiltering components.
+
+Prefilters transform user input before it is exec'd by Python. These
+transforms are used to implement additional syntax such as !ls and %magic.
+"""
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+from keyword import iskeyword
+import re
+
+from .autocall import IPyAutocall
+from traitlets.config.configurable import Configurable
+from .inputtransformer2 import (
+ ESC_MAGIC,
+ ESC_QUOTE,
+ ESC_QUOTE2,
+ ESC_PAREN,
+)
+from .macro import Macro
+from .splitinput import LineInfo
+
+from traitlets import (
+ List, Integer, Unicode, Bool, Instance, CRegExp
+)
+
+#-----------------------------------------------------------------------------
+# Global utilities, errors and constants
+#-----------------------------------------------------------------------------
+
+
+class PrefilterError(Exception):
+ pass
+
+
+# RegExp to identify potential function names
+re_fun_name = re.compile(r'[^\W\d]([\w.]*) *$')
+
+# RegExp to exclude strings with this start from autocalling. In
+# particular, all binary operators should be excluded, so that if foo is
+# callable, foo OP bar doesn't become foo(OP bar), which is invalid. The
+# characters '!=()' don't need to be checked for, as the checkPythonChars
+# routine explicitly does so, to catch direct calls and rebindings of
+# existing names.
+
+# Warning: the '-' HAS TO BE AT THE END of the first group, otherwise
+# it affects the rest of the group in square brackets.
+re_exclude_auto = re.compile(r'^[,&^\|\*/\+-]'
+ r'|^is |^not |^in |^and |^or ')
+
+# try to catch also methods for stuff in lists/tuples/dicts: off
+# (experimental). For this to work, the line_split regexp would need
+# to be modified so it wouldn't break things at '['. That line is
+# nasty enough that I shouldn't change it until I can test it _well_.
+#self.re_fun_name = re.compile (r'[a-zA-Z_]([a-zA-Z0-9_.\[\]]*) ?$')
+
+
+# Handler Check Utilities
+def is_shadowed(identifier, ip):
+ """Is the given identifier defined in one of the namespaces which shadow
+ the alias and magic namespaces? Note that an identifier is different
+ than ifun, because it can not contain a '.' character."""
+ # This is much safer than calling ofind, which can change state
+ return (identifier in ip.user_ns \
+ or identifier in ip.user_global_ns \
+ or identifier in ip.ns_table['builtin']\
+ or iskeyword(identifier))
+
+
+#-----------------------------------------------------------------------------
+# Main Prefilter manager
+#-----------------------------------------------------------------------------
+
+
+class PrefilterManager(Configurable):
+ """Main prefilter component.
+
+ The IPython prefilter is run on all user input before it is run. The
+ prefilter consumes lines of input and produces transformed lines of
+ input.
+
+ The implementation consists of two phases:
+
+ 1. Transformers
+ 2. Checkers and handlers
+
+ Over time, we plan on deprecating the checkers and handlers and doing
+ everything in the transformers.
+
+ The transformers are instances of :class:`PrefilterTransformer` and have
+ a single method :meth:`transform` that takes a line and returns a
+ transformed line. The transformation can be accomplished using any
+ tool, but our current ones use regular expressions for speed.
+
+ After all the transformers have been run, the line is fed to the checkers,
+ which are instances of :class:`PrefilterChecker`. The line is passed to
+ the :meth:`check` method, which either returns `None` or a
+ :class:`PrefilterHandler` instance. If `None` is returned, the other
+ checkers are tried. If an :class:`PrefilterHandler` instance is returned,
+ the line is passed to the :meth:`handle` method of the returned
+ handler and no further checkers are tried.
+
+ Both transformers and checkers have a `priority` attribute, that determines
+ the order in which they are called. Smaller priorities are tried first.
+
+ Both transformers and checkers also have `enabled` attribute, which is
+ a boolean that determines if the instance is used.
+
+ Users or developers can change the priority or enabled attribute of
+ transformers or checkers, but they must call the :meth:`sort_checkers`
+ or :meth:`sort_transformers` method after changing the priority.
+ """
+
+ multi_line_specials = Bool(True).tag(config=True)
+ shell = Instance('IPython.core.interactiveshell.InteractiveShellABC', allow_none=True)
+
+ def __init__(self, shell=None, **kwargs):
+ super(PrefilterManager, self).__init__(shell=shell, **kwargs)
+ self.shell = shell
+ self._transformers = []
+ self.init_handlers()
+ self.init_checkers()
+
+ #-------------------------------------------------------------------------
+ # API for managing transformers
+ #-------------------------------------------------------------------------
+
+ def sort_transformers(self):
+ """Sort the transformers by priority.
+
+ This must be called after the priority of a transformer is changed.
+ The :meth:`register_transformer` method calls this automatically.
+ """
+ self._transformers.sort(key=lambda x: x.priority)
+
+ @property
+ def transformers(self):
+ """Return a list of checkers, sorted by priority."""
+ return self._transformers
+
+ def register_transformer(self, transformer):
+ """Register a transformer instance."""
+ if transformer not in self._transformers:
+ self._transformers.append(transformer)
+ self.sort_transformers()
+
+ def unregister_transformer(self, transformer):
+ """Unregister a transformer instance."""
+ if transformer in self._transformers:
+ self._transformers.remove(transformer)
+
+ #-------------------------------------------------------------------------
+ # API for managing checkers
+ #-------------------------------------------------------------------------
+
+ def init_checkers(self):
+ """Create the default checkers."""
+ self._checkers = []
+ for checker in _default_checkers:
+ checker(
+ shell=self.shell, prefilter_manager=self, parent=self
+ )
+
+ def sort_checkers(self):
+ """Sort the checkers by priority.
+
+ This must be called after the priority of a checker is changed.
+ The :meth:`register_checker` method calls this automatically.
+ """
+ self._checkers.sort(key=lambda x: x.priority)
+
+ @property
+ def checkers(self):
+ """Return a list of checkers, sorted by priority."""
+ return self._checkers
+
+ def register_checker(self, checker):
+ """Register a checker instance."""
+ if checker not in self._checkers:
+ self._checkers.append(checker)
+ self.sort_checkers()
+
+ def unregister_checker(self, checker):
+ """Unregister a checker instance."""
+ if checker in self._checkers:
+ self._checkers.remove(checker)
+
+ #-------------------------------------------------------------------------
+ # API for managing handlers
+ #-------------------------------------------------------------------------
+
+ def init_handlers(self):
+ """Create the default handlers."""
+ self._handlers = {}
+ self._esc_handlers = {}
+ for handler in _default_handlers:
+ handler(
+ shell=self.shell, prefilter_manager=self, parent=self
+ )
+
+ @property
+ def handlers(self):
+ """Return a dict of all the handlers."""
+ return self._handlers
+
+ def register_handler(self, name, handler, esc_strings):
+ """Register a handler instance by name with esc_strings."""
+ self._handlers[name] = handler
+ for esc_str in esc_strings:
+ self._esc_handlers[esc_str] = handler
+
+ def unregister_handler(self, name, handler, esc_strings):
+ """Unregister a handler instance by name with esc_strings."""
+ try:
+ del self._handlers[name]
+ except KeyError:
+ pass
+ for esc_str in esc_strings:
+ h = self._esc_handlers.get(esc_str)
+ if h is handler:
+ del self._esc_handlers[esc_str]
+
+ def get_handler_by_name(self, name):
+ """Get a handler by its name."""
+ return self._handlers.get(name)
+
+ def get_handler_by_esc(self, esc_str):
+ """Get a handler by its escape string."""
+ return self._esc_handlers.get(esc_str)
+
+ #-------------------------------------------------------------------------
+ # Main prefiltering API
+ #-------------------------------------------------------------------------
+
+ def prefilter_line_info(self, line_info):
+ """Prefilter a line that has been converted to a LineInfo object.
+
+ This implements the checker/handler part of the prefilter pipe.
+ """
+ # print "prefilter_line_info: ", line_info
+ handler = self.find_handler(line_info)
+ return handler.handle(line_info)
+
+ def find_handler(self, line_info):
+ """Find a handler for the line_info by trying checkers."""
+ for checker in self.checkers:
+ if checker.enabled:
+ handler = checker.check(line_info)
+ if handler:
+ return handler
+ return self.get_handler_by_name('normal')
+
+ def transform_line(self, line, continue_prompt):
+ """Calls the enabled transformers in order of increasing priority."""
+ for transformer in self.transformers:
+ if transformer.enabled:
+ line = transformer.transform(line, continue_prompt)
+ return line
+
+ def prefilter_line(self, line, continue_prompt=False):
+ """Prefilter a single input line as text.
+
+ This method prefilters a single line of text by calling the
+ transformers and then the checkers/handlers.
+ """
+
+ # print "prefilter_line: ", line, continue_prompt
+ # All handlers *must* return a value, even if it's blank ('').
+
+ # save the line away in case we crash, so the post-mortem handler can
+ # record it
+ self.shell._last_input_line = line
+
+ if not line:
+ # Return immediately on purely empty lines, so that if the user
+ # previously typed some whitespace that started a continuation
+ # prompt, he can break out of that loop with just an empty line.
+ # This is how the default python prompt works.
+ return ''
+
+ # At this point, we invoke our transformers.
+ if not continue_prompt or (continue_prompt and self.multi_line_specials):
+ line = self.transform_line(line, continue_prompt)
+
+ # Now we compute line_info for the checkers and handlers
+ line_info = LineInfo(line, continue_prompt)
+
+ # the input history needs to track even empty lines
+ stripped = line.strip()
+
+ normal_handler = self.get_handler_by_name('normal')
+ if not stripped:
+ return normal_handler.handle(line_info)
+
+ # special handlers are only allowed for single line statements
+ if continue_prompt and not self.multi_line_specials:
+ return normal_handler.handle(line_info)
+
+ prefiltered = self.prefilter_line_info(line_info)
+ # print "prefiltered line: %r" % prefiltered
+ return prefiltered
+
+ def prefilter_lines(self, lines, continue_prompt=False):
+ """Prefilter multiple input lines of text.
+
+ This is the main entry point for prefiltering multiple lines of
+ input. This simply calls :meth:`prefilter_line` for each line of
+ input.
+
+ This covers cases where there are multiple lines in the user entry,
+ which is the case when the user goes back to a multiline history
+ entry and presses enter.
+ """
+ llines = lines.rstrip('\n').split('\n')
+ # We can get multiple lines in one shot, where multiline input 'blends'
+ # into one line, in cases like recalling from the readline history
+ # buffer. We need to make sure that in such cases, we correctly
+ # communicate downstream which line is first and which are continuation
+ # ones.
+ if len(llines) > 1:
+ out = '\n'.join([self.prefilter_line(line, lnum>0)
+ for lnum, line in enumerate(llines) ])
+ else:
+ out = self.prefilter_line(llines[0], continue_prompt)
+
+ return out
+
+#-----------------------------------------------------------------------------
+# Prefilter transformers
+#-----------------------------------------------------------------------------
+
+
+class PrefilterTransformer(Configurable):
+ """Transform a line of user input."""
+
+ priority = Integer(100).tag(config=True)
+ # Transformers don't currently use shell or prefilter_manager, but as we
+ # move away from checkers and handlers, they will need them.
+ shell = Instance('IPython.core.interactiveshell.InteractiveShellABC', allow_none=True)
+ prefilter_manager = Instance('IPython.core.prefilter.PrefilterManager', allow_none=True)
+ enabled = Bool(True).tag(config=True)
+
+ def __init__(self, shell=None, prefilter_manager=None, **kwargs):
+ super(PrefilterTransformer, self).__init__(
+ shell=shell, prefilter_manager=prefilter_manager, **kwargs
+ )
+ self.prefilter_manager.register_transformer(self)
+
+ def transform(self, line, continue_prompt):
+ """Transform a line, returning the new one."""
+ return None
+
+ def __repr__(self):
+ return "<%s(priority=%r, enabled=%r)>" % (
+ self.__class__.__name__, self.priority, self.enabled)
+
+
+#-----------------------------------------------------------------------------
+# Prefilter checkers
+#-----------------------------------------------------------------------------
+
+
+class PrefilterChecker(Configurable):
+ """Inspect an input line and return a handler for that line."""
+
+ priority = Integer(100).tag(config=True)
+ shell = Instance('IPython.core.interactiveshell.InteractiveShellABC', allow_none=True)
+ prefilter_manager = Instance('IPython.core.prefilter.PrefilterManager', allow_none=True)
+ enabled = Bool(True).tag(config=True)
+
+ def __init__(self, shell=None, prefilter_manager=None, **kwargs):
+ super(PrefilterChecker, self).__init__(
+ shell=shell, prefilter_manager=prefilter_manager, **kwargs
+ )
+ self.prefilter_manager.register_checker(self)
+
+ def check(self, line_info):
+ """Inspect line_info and return a handler instance or None."""
+ return None
+
+ def __repr__(self):
+ return "<%s(priority=%r, enabled=%r)>" % (
+ self.__class__.__name__, self.priority, self.enabled)
+
+
+class EmacsChecker(PrefilterChecker):
+
+ priority = Integer(100).tag(config=True)
+ enabled = Bool(False).tag(config=True)
+
+ def check(self, line_info):
+ "Emacs ipython-mode tags certain input lines."
+ if line_info.line.endswith('# PYTHON-MODE'):
+ return self.prefilter_manager.get_handler_by_name('emacs')
+ else:
+ return None
+
+
+class MacroChecker(PrefilterChecker):
+
+ priority = Integer(250).tag(config=True)
+
+ def check(self, line_info):
+ obj = self.shell.user_ns.get(line_info.ifun)
+ if isinstance(obj, Macro):
+ return self.prefilter_manager.get_handler_by_name('macro')
+ else:
+ return None
+
+
+class IPyAutocallChecker(PrefilterChecker):
+
+ priority = Integer(300).tag(config=True)
+
+ def check(self, line_info):
+ "Instances of IPyAutocall in user_ns get autocalled immediately"
+ obj = self.shell.user_ns.get(line_info.ifun, None)
+ if isinstance(obj, IPyAutocall):
+ obj.set_ip(self.shell)
+ return self.prefilter_manager.get_handler_by_name('auto')
+ else:
+ return None
+
+
+class AssignmentChecker(PrefilterChecker):
+
+ priority = Integer(600).tag(config=True)
+
+ def check(self, line_info):
+ """Check to see if user is assigning to a var for the first time, in
+ which case we want to avoid any sort of automagic / autocall games.
+
+ This allows users to assign to either alias or magic names true python
+ variables (the magic/alias systems always take second seat to true
+ python code). E.g. ls='hi', or ls,that=1,2"""
+ if line_info.the_rest:
+ if line_info.the_rest[0] in '=,':
+ return self.prefilter_manager.get_handler_by_name('normal')
+ else:
+ return None
+
+
+class AutoMagicChecker(PrefilterChecker):
+
+ priority = Integer(700).tag(config=True)
+
+ def check(self, line_info):
+ """If the ifun is magic, and automagic is on, run it. Note: normal,
+ non-auto magic would already have been triggered via '%' in
+ check_esc_chars. This just checks for automagic. Also, before
+ triggering the magic handler, make sure that there is nothing in the
+ user namespace which could shadow it."""
+ if not self.shell.automagic or not self.shell.find_magic(line_info.ifun):
+ return None
+
+ # We have a likely magic method. Make sure we should actually call it.
+ if line_info.continue_prompt and not self.prefilter_manager.multi_line_specials:
+ return None
+
+ head = line_info.ifun.split('.',1)[0]
+ if is_shadowed(head, self.shell):
+ return None
+
+ return self.prefilter_manager.get_handler_by_name('magic')
+
+
+class PythonOpsChecker(PrefilterChecker):
+
+ priority = Integer(900).tag(config=True)
+
+ def check(self, line_info):
+ """If the 'rest' of the line begins with a function call or pretty much
+ any python operator, we should simply execute the line (regardless of
+ whether or not there's a possible autocall expansion). This avoids
+ spurious (and very confusing) geattr() accesses."""
+ if line_info.the_rest and line_info.the_rest[0] in '!=()<>,+*/%^&|':
+ return self.prefilter_manager.get_handler_by_name('normal')
+ else:
+ return None
+
+
+class AutocallChecker(PrefilterChecker):
+
+ priority = Integer(1000).tag(config=True)
+
+ function_name_regexp = CRegExp(re_fun_name,
+ help="RegExp to identify potential function names."
+ ).tag(config=True)
+ exclude_regexp = CRegExp(re_exclude_auto,
+ help="RegExp to exclude strings with this start from autocalling."
+ ).tag(config=True)
+
+ def check(self, line_info):
+ "Check if the initial word/function is callable and autocall is on."
+ if not self.shell.autocall:
+ return None
+
+ oinfo = line_info.ofind(self.shell) # This can mutate state via getattr
+ if not oinfo.found:
+ return None
+
+ ignored_funs = ['b', 'f', 'r', 'u', 'br', 'rb', 'fr', 'rf']
+ ifun = line_info.ifun
+ line = line_info.line
+ if ifun.lower() in ignored_funs and (line.startswith(ifun + "'") or line.startswith(ifun + '"')):
+ return None
+
+ if (
+ callable(oinfo.obj)
+ and (not self.exclude_regexp.match(line_info.the_rest))
+ and self.function_name_regexp.match(line_info.ifun)
+ ):
+ return self.prefilter_manager.get_handler_by_name("auto")
+ else:
+ return None
+
+
+#-----------------------------------------------------------------------------
+# Prefilter handlers
+#-----------------------------------------------------------------------------
+
+
+class PrefilterHandler(Configurable):
+
+ handler_name = Unicode('normal')
+ esc_strings = List([])
+ shell = Instance('IPython.core.interactiveshell.InteractiveShellABC', allow_none=True)
+ prefilter_manager = Instance('IPython.core.prefilter.PrefilterManager', allow_none=True)
+
+ def __init__(self, shell=None, prefilter_manager=None, **kwargs):
+ super(PrefilterHandler, self).__init__(
+ shell=shell, prefilter_manager=prefilter_manager, **kwargs
+ )
+ self.prefilter_manager.register_handler(
+ self.handler_name,
+ self,
+ self.esc_strings
+ )
+
+ def handle(self, line_info):
+ # print "normal: ", line_info
+ """Handle normal input lines. Use as a template for handlers."""
+
+ # With autoindent on, we need some way to exit the input loop, and I
+ # don't want to force the user to have to backspace all the way to
+ # clear the line. The rule will be in this case, that either two
+ # lines of pure whitespace in a row, or a line of pure whitespace but
+ # of a size different to the indent level, will exit the input loop.
+ line = line_info.line
+ continue_prompt = line_info.continue_prompt
+
+ if (continue_prompt and
+ self.shell.autoindent and
+ line.isspace() and
+ 0 < abs(len(line) - self.shell.indent_current_nsp) <= 2):
+ line = ''
+
+ return line
+
+ def __str__(self):
+ return "<%s(name=%s)>" % (self.__class__.__name__, self.handler_name)
+
+
+class MacroHandler(PrefilterHandler):
+ handler_name = Unicode("macro")
+
+ def handle(self, line_info):
+ obj = self.shell.user_ns.get(line_info.ifun)
+ pre_space = line_info.pre_whitespace
+ line_sep = "\n" + pre_space
+ return pre_space + line_sep.join(obj.value.splitlines())
+
+
+class MagicHandler(PrefilterHandler):
+
+ handler_name = Unicode('magic')
+ esc_strings = List([ESC_MAGIC])
+
+ def handle(self, line_info):
+ """Execute magic functions."""
+ ifun = line_info.ifun
+ the_rest = line_info.the_rest
+ #Prepare arguments for get_ipython().run_line_magic(magic_name, magic_args)
+ t_arg_s = ifun + " " + the_rest
+ t_magic_name, _, t_magic_arg_s = t_arg_s.partition(' ')
+ t_magic_name = t_magic_name.lstrip(ESC_MAGIC)
+ cmd = '%sget_ipython().run_line_magic(%r, %r)' % (line_info.pre_whitespace, t_magic_name, t_magic_arg_s)
+ return cmd
+
+
+class AutoHandler(PrefilterHandler):
+
+ handler_name = Unicode('auto')
+ esc_strings = List([ESC_PAREN, ESC_QUOTE, ESC_QUOTE2])
+
+ def handle(self, line_info):
+ """Handle lines which can be auto-executed, quoting if requested."""
+ line = line_info.line
+ ifun = line_info.ifun
+ the_rest = line_info.the_rest
+ esc = line_info.esc
+ continue_prompt = line_info.continue_prompt
+ obj = line_info.ofind(self.shell).obj
+
+ # This should only be active for single-line input!
+ if continue_prompt:
+ return line
+
+ force_auto = isinstance(obj, IPyAutocall)
+
+ # User objects sometimes raise exceptions on attribute access other
+ # than AttributeError (we've seen it in the past), so it's safest to be
+ # ultra-conservative here and catch all.
+ try:
+ auto_rewrite = obj.rewrite
+ except Exception:
+ auto_rewrite = True
+
+ if esc == ESC_QUOTE:
+ # Auto-quote splitting on whitespace
+ newcmd = '%s("%s")' % (ifun,'", "'.join(the_rest.split()) )
+ elif esc == ESC_QUOTE2:
+ # Auto-quote whole string
+ newcmd = '%s("%s")' % (ifun,the_rest)
+ elif esc == ESC_PAREN:
+ newcmd = '%s(%s)' % (ifun,",".join(the_rest.split()))
+ else:
+ # Auto-paren.
+ if force_auto:
+ # Don't rewrite if it is already a call.
+ do_rewrite = not the_rest.startswith('(')
+ else:
+ if not the_rest:
+ # We only apply it to argument-less calls if the autocall
+ # parameter is set to 2.
+ do_rewrite = (self.shell.autocall >= 2)
+ elif the_rest.startswith('[') and hasattr(obj, '__getitem__'):
+ # Don't autocall in this case: item access for an object
+ # which is BOTH callable and implements __getitem__.
+ do_rewrite = False
+ else:
+ do_rewrite = True
+
+ # Figure out the rewritten command
+ if do_rewrite:
+ if the_rest.endswith(';'):
+ newcmd = '%s(%s);' % (ifun.rstrip(),the_rest[:-1])
+ else:
+ newcmd = '%s(%s)' % (ifun.rstrip(), the_rest)
+ else:
+ normal_handler = self.prefilter_manager.get_handler_by_name('normal')
+ return normal_handler.handle(line_info)
+
+ # Display the rewritten call
+ if auto_rewrite:
+ self.shell.auto_rewrite_input(newcmd)
+
+ return newcmd
+
+
+class EmacsHandler(PrefilterHandler):
+
+ handler_name = Unicode('emacs')
+ esc_strings = List([])
+
+ def handle(self, line_info):
+ """Handle input lines marked by python-mode."""
+
+ # Currently, nothing is done. Later more functionality can be added
+ # here if needed.
+
+ # The input cache shouldn't be updated
+ return line_info.line
+
+
+#-----------------------------------------------------------------------------
+# Defaults
+#-----------------------------------------------------------------------------
+
+
+_default_checkers = [
+ EmacsChecker,
+ MacroChecker,
+ IPyAutocallChecker,
+ AssignmentChecker,
+ AutoMagicChecker,
+ PythonOpsChecker,
+ AutocallChecker
+]
+
+_default_handlers = [
+ PrefilterHandler,
+ MacroHandler,
+ MagicHandler,
+ AutoHandler,
+ EmacsHandler
+]
diff --git a/contrib/python/ipython/py3/IPython/core/profile/README_STARTUP b/contrib/python/ipython/py3/IPython/core/profile/README_STARTUP
new file mode 100644
index 0000000000..61d4700042
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/profile/README_STARTUP
@@ -0,0 +1,11 @@
+This is the IPython startup directory
+
+.py and .ipy files in this directory will be run *prior* to any code or files specified
+via the exec_lines or exec_files configurables whenever you load this profile.
+
+Files will be run in lexicographical order, so you can control the execution order of files
+with a prefix, e.g.::
+
+ 00-first.py
+ 50-middle.py
+ 99-last.ipy
diff --git a/contrib/python/ipython/py3/IPython/core/profileapp.py b/contrib/python/ipython/py3/IPython/core/profileapp.py
new file mode 100644
index 0000000000..9a1bae55ac
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/profileapp.py
@@ -0,0 +1,312 @@
+# encoding: utf-8
+"""
+An application for managing IPython profiles.
+
+To be invoked as the `ipython profile` subcommand.
+
+Authors:
+
+* Min RK
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (C) 2008 The IPython Development Team
+#
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+import os
+
+from traitlets.config.application import Application
+from IPython.core.application import (
+ BaseIPythonApplication, base_flags
+)
+from IPython.core.profiledir import ProfileDir
+from IPython.utils.importstring import import_item
+from IPython.paths import get_ipython_dir, get_ipython_package_dir
+from traitlets import Unicode, Bool, Dict, observe
+
+#-----------------------------------------------------------------------------
+# Constants
+#-----------------------------------------------------------------------------
+
+create_help = """Create an IPython profile by name
+
+Create an ipython profile directory by its name or
+profile directory path. Profile directories contain
+configuration, log and security related files and are named
+using the convention 'profile_<name>'. By default they are
+located in your ipython directory. Once created, you will
+can edit the configuration files in the profile
+directory to configure IPython. Most users will create a
+profile directory by name,
+`ipython profile create myprofile`, which will put the directory
+in `<ipython_dir>/profile_myprofile`.
+"""
+list_help = """List available IPython profiles
+
+List all available profiles, by profile location, that can
+be found in the current working directly or in the ipython
+directory. Profile directories are named using the convention
+'profile_<profile>'.
+"""
+profile_help = """Manage IPython profiles
+
+Profile directories contain
+configuration, log and security related files and are named
+using the convention 'profile_<name>'. By default they are
+located in your ipython directory. You can create profiles
+with `ipython profile create <name>`, or see the profiles you
+already have with `ipython profile list`
+
+To get started configuring IPython, simply do:
+
+$> ipython profile create
+
+and IPython will create the default profile in <ipython_dir>/profile_default,
+where you can edit ipython_config.py to start configuring IPython.
+
+"""
+
+_list_examples = "ipython profile list # list all profiles"
+
+_create_examples = """
+ipython profile create foo # create profile foo w/ default config files
+ipython profile create foo --reset # restage default config files over current
+ipython profile create foo --parallel # also stage parallel config files
+"""
+
+_main_examples = """
+ipython profile create -h # show the help string for the create subcommand
+ipython profile list -h # show the help string for the list subcommand
+
+ipython locate profile foo # print the path to the directory for profile 'foo'
+"""
+
+#-----------------------------------------------------------------------------
+# Profile Application Class (for `ipython profile` subcommand)
+#-----------------------------------------------------------------------------
+
+
+def list_profiles_in(path):
+ """list profiles in a given root directory"""
+ profiles = []
+
+ # for python 3.6+ rewrite to: with os.scandir(path) as dirlist:
+ files = os.scandir(path)
+ for f in files:
+ if f.is_dir() and f.name.startswith('profile_'):
+ profiles.append(f.name.split('_', 1)[-1])
+ return profiles
+
+
+def list_bundled_profiles():
+ """list profiles that are bundled with IPython."""
+ path = os.path.join(get_ipython_package_dir(), u'core', u'profile')
+ profiles = []
+
+ # for python 3.6+ rewrite to: with os.scandir(path) as dirlist:
+ files = os.scandir(path)
+ for profile in files:
+ if profile.is_dir() and profile.name != "__pycache__":
+ profiles.append(profile.name)
+ return profiles
+
+
+class ProfileLocate(BaseIPythonApplication):
+ description = """print the path to an IPython profile dir"""
+
+ def parse_command_line(self, argv=None):
+ super(ProfileLocate, self).parse_command_line(argv)
+ if self.extra_args:
+ self.profile = self.extra_args[0]
+
+ def start(self):
+ print(self.profile_dir.location)
+
+
+class ProfileList(Application):
+ name = u'ipython-profile'
+ description = list_help
+ examples = _list_examples
+
+ aliases = Dict({
+ 'ipython-dir' : 'ProfileList.ipython_dir',
+ 'log-level' : 'Application.log_level',
+ })
+ flags = Dict(dict(
+ debug = ({'Application' : {'log_level' : 0}},
+ "Set Application.log_level to 0, maximizing log output."
+ )
+ ))
+
+ ipython_dir = Unicode(get_ipython_dir(),
+ help="""
+ The name of the IPython directory. This directory is used for logging
+ configuration (through profiles), history storage, etc. The default
+ is usually $HOME/.ipython. This options can also be specified through
+ the environment variable IPYTHONDIR.
+ """
+ ).tag(config=True)
+
+
+ def _print_profiles(self, profiles):
+ """print list of profiles, indented."""
+ for profile in profiles:
+ print(' %s' % profile)
+
+ def list_profile_dirs(self):
+ profiles = list_bundled_profiles()
+ if profiles:
+ print()
+ print("Available profiles in IPython:")
+ self._print_profiles(profiles)
+ print()
+ print(" The first request for a bundled profile will copy it")
+ print(" into your IPython directory (%s)," % self.ipython_dir)
+ print(" where you can customize it.")
+
+ profiles = list_profiles_in(self.ipython_dir)
+ if profiles:
+ print()
+ print("Available profiles in %s:" % self.ipython_dir)
+ self._print_profiles(profiles)
+
+ profiles = list_profiles_in(os.getcwd())
+ if profiles:
+ print()
+ print(
+ "Profiles from CWD have been removed for security reason, see CVE-2022-21699:"
+ )
+
+ print()
+ print("To use any of the above profiles, start IPython with:")
+ print(" ipython --profile=<name>")
+ print()
+
+ def start(self):
+ self.list_profile_dirs()
+
+
+create_flags = {}
+create_flags.update(base_flags)
+# don't include '--init' flag, which implies running profile create in other apps
+create_flags.pop('init')
+create_flags['reset'] = ({'ProfileCreate': {'overwrite' : True}},
+ "reset config files in this profile to the defaults.")
+create_flags['parallel'] = ({'ProfileCreate': {'parallel' : True}},
+ "Include the config files for parallel "
+ "computing apps (ipengine, ipcontroller, etc.)")
+
+
+class ProfileCreate(BaseIPythonApplication):
+ name = u'ipython-profile'
+ description = create_help
+ examples = _create_examples
+ auto_create = Bool(True)
+ def _log_format_default(self):
+ return "[%(name)s] %(message)s"
+
+ def _copy_config_files_default(self):
+ return True
+
+ parallel = Bool(False,
+ help="whether to include parallel computing config files"
+ ).tag(config=True)
+
+ @observe('parallel')
+ def _parallel_changed(self, change):
+ parallel_files = [ 'ipcontroller_config.py',
+ 'ipengine_config.py',
+ 'ipcluster_config.py'
+ ]
+ if change['new']:
+ for cf in parallel_files:
+ self.config_files.append(cf)
+ else:
+ for cf in parallel_files:
+ if cf in self.config_files:
+ self.config_files.remove(cf)
+
+ def parse_command_line(self, argv):
+ super(ProfileCreate, self).parse_command_line(argv)
+ # accept positional arg as profile name
+ if self.extra_args:
+ self.profile = self.extra_args[0]
+
+ flags = Dict(create_flags)
+
+ classes = [ProfileDir]
+
+ def _import_app(self, app_path):
+ """import an app class"""
+ app = None
+ name = app_path.rsplit('.', 1)[-1]
+ try:
+ app = import_item(app_path)
+ except ImportError:
+ self.log.info("Couldn't import %s, config file will be excluded", name)
+ except Exception:
+ self.log.warning('Unexpected error importing %s', name, exc_info=True)
+ return app
+
+ def init_config_files(self):
+ super(ProfileCreate, self).init_config_files()
+ # use local imports, since these classes may import from here
+ from IPython.terminal.ipapp import TerminalIPythonApp
+ apps = [TerminalIPythonApp]
+ for app_path in (
+ 'ipykernel.kernelapp.IPKernelApp',
+ ):
+ app = self._import_app(app_path)
+ if app is not None:
+ apps.append(app)
+ if self.parallel:
+ from ipyparallel.apps.ipcontrollerapp import IPControllerApp
+ from ipyparallel.apps.ipengineapp import IPEngineApp
+ from ipyparallel.apps.ipclusterapp import IPClusterStart
+ apps.extend([
+ IPControllerApp,
+ IPEngineApp,
+ IPClusterStart,
+ ])
+ for App in apps:
+ app = App()
+ app.config.update(self.config)
+ app.log = self.log
+ app.overwrite = self.overwrite
+ app.copy_config_files=True
+ app.ipython_dir=self.ipython_dir
+ app.profile_dir=self.profile_dir
+ app.init_config_files()
+
+ def stage_default_config_file(self):
+ pass
+
+
+class ProfileApp(Application):
+ name = u'ipython profile'
+ description = profile_help
+ examples = _main_examples
+
+ subcommands = Dict(dict(
+ create = (ProfileCreate, ProfileCreate.description.splitlines()[0]),
+ list = (ProfileList, ProfileList.description.splitlines()[0]),
+ locate = (ProfileLocate, ProfileLocate.description.splitlines()[0]),
+ ))
+
+ def start(self):
+ if self.subapp is None:
+ print("No subcommand specified. Must specify one of: %s"%(self.subcommands.keys()))
+ print()
+ self.print_description()
+ self.print_subcommands()
+ self.exit(1)
+ else:
+ return self.subapp.start()
diff --git a/contrib/python/ipython/py3/IPython/core/profiledir.py b/contrib/python/ipython/py3/IPython/core/profiledir.py
new file mode 100644
index 0000000000..cb4d39339a
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/profiledir.py
@@ -0,0 +1,223 @@
+# encoding: utf-8
+"""An object for managing IPython profile directories."""
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+import os
+import shutil
+import errno
+from pathlib import Path
+
+from traitlets.config.configurable import LoggingConfigurable
+from ..paths import get_ipython_package_dir
+from ..utils.path import expand_path, ensure_dir_exists
+from traitlets import Unicode, Bool, observe
+
+#-----------------------------------------------------------------------------
+# Module errors
+#-----------------------------------------------------------------------------
+
+class ProfileDirError(Exception):
+ pass
+
+
+#-----------------------------------------------------------------------------
+# Class for managing profile directories
+#-----------------------------------------------------------------------------
+
+class ProfileDir(LoggingConfigurable):
+ """An object to manage the profile directory and its resources.
+
+ The profile directory is used by all IPython applications, to manage
+ configuration, logging and security.
+
+ This object knows how to find, create and manage these directories. This
+ should be used by any code that wants to handle profiles.
+ """
+
+ security_dir_name = Unicode('security')
+ log_dir_name = Unicode('log')
+ startup_dir_name = Unicode('startup')
+ pid_dir_name = Unicode('pid')
+ static_dir_name = Unicode('static')
+ security_dir = Unicode(u'')
+ log_dir = Unicode(u'')
+ startup_dir = Unicode(u'')
+ pid_dir = Unicode(u'')
+ static_dir = Unicode(u'')
+
+ location = Unicode(u'',
+ help="""Set the profile location directly. This overrides the logic used by the
+ `profile` option.""",
+ ).tag(config=True)
+
+ _location_isset = Bool(False) # flag for detecting multiply set location
+ @observe('location')
+ def _location_changed(self, change):
+ if self._location_isset:
+ raise RuntimeError("Cannot set profile location more than once.")
+ self._location_isset = True
+ new = change['new']
+ ensure_dir_exists(new)
+
+ # ensure config files exist:
+ self.security_dir = os.path.join(new, self.security_dir_name)
+ self.log_dir = os.path.join(new, self.log_dir_name)
+ self.startup_dir = os.path.join(new, self.startup_dir_name)
+ self.pid_dir = os.path.join(new, self.pid_dir_name)
+ self.static_dir = os.path.join(new, self.static_dir_name)
+ self.check_dirs()
+
+ def _mkdir(self, path, mode=None):
+ """ensure a directory exists at a given path
+
+ This is a version of os.mkdir, with the following differences:
+
+ - returns True if it created the directory, False otherwise
+ - ignores EEXIST, protecting against race conditions where
+ the dir may have been created in between the check and
+ the creation
+ - sets permissions if requested and the dir already exists
+ """
+ if os.path.exists(path):
+ if mode and os.stat(path).st_mode != mode:
+ try:
+ os.chmod(path, mode)
+ except OSError:
+ self.log.warning(
+ "Could not set permissions on %s",
+ path
+ )
+ return False
+ try:
+ if mode:
+ os.mkdir(path, mode)
+ else:
+ os.mkdir(path)
+ except OSError as e:
+ if e.errno == errno.EEXIST:
+ return False
+ else:
+ raise
+
+ return True
+
+ @observe('log_dir')
+ def check_log_dir(self, change=None):
+ self._mkdir(self.log_dir)
+
+ @observe('startup_dir')
+ def check_startup_dir(self, change=None):
+ self._mkdir(self.startup_dir)
+
+ readme = os.path.join(self.startup_dir, 'README')
+
+ if not os.path.exists(readme):
+ import pkgutil
+ with open(readme, 'wb') as f:
+ f.write(pkgutil.get_data(__name__, 'profile/README_STARTUP'))
+
+ @observe('security_dir')
+ def check_security_dir(self, change=None):
+ self._mkdir(self.security_dir, 0o40700)
+
+ @observe('pid_dir')
+ def check_pid_dir(self, change=None):
+ self._mkdir(self.pid_dir, 0o40700)
+
+ def check_dirs(self):
+ self.check_security_dir()
+ self.check_log_dir()
+ self.check_pid_dir()
+ self.check_startup_dir()
+
+ def copy_config_file(self, config_file: str, path: Path, overwrite=False) -> bool:
+ """Copy a default config file into the active profile directory.
+
+ Default configuration files are kept in :mod:`IPython.core.profile`.
+ This function moves these from that location to the working profile
+ directory.
+ """
+ dst = Path(os.path.join(self.location, config_file))
+ if dst.exists() and not overwrite:
+ return False
+ if path is None:
+ path = os.path.join(get_ipython_package_dir(), u'core', u'profile', u'default')
+ assert isinstance(path, Path)
+ src = path / config_file
+ shutil.copy(src, dst)
+ return True
+
+ @classmethod
+ def create_profile_dir(cls, profile_dir, config=None):
+ """Create a new profile directory given a full path.
+
+ Parameters
+ ----------
+ profile_dir : str
+ The full path to the profile directory. If it does exist, it will
+ be used. If not, it will be created.
+ """
+ return cls(location=profile_dir, config=config)
+
+ @classmethod
+ def create_profile_dir_by_name(cls, path, name=u'default', config=None):
+ """Create a profile dir by profile name and path.
+
+ Parameters
+ ----------
+ path : unicode
+ The path (directory) to put the profile directory in.
+ name : unicode
+ The name of the profile. The name of the profile directory will
+ be "profile_<profile>".
+ """
+ if not os.path.isdir(path):
+ raise ProfileDirError('Directory not found: %s' % path)
+ profile_dir = os.path.join(path, u'profile_' + name)
+ return cls(location=profile_dir, config=config)
+
+ @classmethod
+ def find_profile_dir_by_name(cls, ipython_dir, name=u'default', config=None):
+ """Find an existing profile dir by profile name, return its ProfileDir.
+
+ This searches through a sequence of paths for a profile dir. If it
+ is not found, a :class:`ProfileDirError` exception will be raised.
+
+ The search path algorithm is:
+ 1. ``os.getcwd()`` # removed for security reason.
+ 2. ``ipython_dir``
+
+ Parameters
+ ----------
+ ipython_dir : unicode or str
+ The IPython directory to use.
+ name : unicode or str
+ The name of the profile. The name of the profile directory
+ will be "profile_<profile>".
+ """
+ dirname = u'profile_' + name
+ paths = [ipython_dir]
+ for p in paths:
+ profile_dir = os.path.join(p, dirname)
+ if os.path.isdir(profile_dir):
+ return cls(location=profile_dir, config=config)
+ else:
+ raise ProfileDirError('Profile directory not found in paths: %s' % dirname)
+
+ @classmethod
+ def find_profile_dir(cls, profile_dir, config=None):
+ """Find/create a profile dir and return its ProfileDir.
+
+ This will create the profile directory if it doesn't exist.
+
+ Parameters
+ ----------
+ profile_dir : unicode or str
+ The path of the profile directory.
+ """
+ profile_dir = expand_path(profile_dir)
+ if not os.path.isdir(profile_dir):
+ raise ProfileDirError('Profile directory not found: %s' % profile_dir)
+ return cls(location=profile_dir, config=config)
diff --git a/contrib/python/ipython/py3/IPython/core/prompts.py b/contrib/python/ipython/py3/IPython/core/prompts.py
new file mode 100644
index 0000000000..7fd218d37a
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/prompts.py
@@ -0,0 +1,21 @@
+# -*- coding: utf-8 -*-
+"""Being removed
+"""
+
+class LazyEvaluate(object):
+ """This is used for formatting strings with values that need to be updated
+ at that time, such as the current time or working directory."""
+ def __init__(self, func, *args, **kwargs):
+ self.func = func
+ self.args = args
+ self.kwargs = kwargs
+
+ def __call__(self, **kwargs):
+ self.kwargs.update(kwargs)
+ return self.func(*self.args, **self.kwargs)
+
+ def __str__(self):
+ return str(self())
+
+ def __format__(self, format_spec):
+ return format(self(), format_spec)
diff --git a/contrib/python/ipython/py3/IPython/core/pylabtools.py b/contrib/python/ipython/py3/IPython/core/pylabtools.py
new file mode 100644
index 0000000000..deadf038ea
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/pylabtools.py
@@ -0,0 +1,425 @@
+# -*- coding: utf-8 -*-
+"""Pylab (matplotlib) support utilities."""
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+from io import BytesIO
+from binascii import b2a_base64
+from functools import partial
+import warnings
+
+from IPython.core.display import _pngxy
+from IPython.utils.decorators import flag_calls
+
+# If user specifies a GUI, that dictates the backend, otherwise we read the
+# user's mpl default from the mpl rc structure
+backends = {
+ "tk": "TkAgg",
+ "gtk": "GTKAgg",
+ "gtk3": "GTK3Agg",
+ "gtk4": "GTK4Agg",
+ "wx": "WXAgg",
+ "qt4": "Qt4Agg",
+ "qt5": "Qt5Agg",
+ "qt6": "QtAgg",
+ "qt": "Qt5Agg",
+ "osx": "MacOSX",
+ "nbagg": "nbAgg",
+ "webagg": "WebAgg",
+ "notebook": "nbAgg",
+ "agg": "agg",
+ "svg": "svg",
+ "pdf": "pdf",
+ "ps": "ps",
+ "inline": "module://matplotlib_inline.backend_inline",
+ "ipympl": "module://ipympl.backend_nbagg",
+ "widget": "module://ipympl.backend_nbagg",
+}
+
+# We also need a reverse backends2guis mapping that will properly choose which
+# GUI support to activate based on the desired matplotlib backend. For the
+# most part it's just a reverse of the above dict, but we also need to add a
+# few others that map to the same GUI manually:
+backend2gui = dict(zip(backends.values(), backends.keys()))
+# In the reverse mapping, there are a few extra valid matplotlib backends that
+# map to the same GUI support
+backend2gui["GTK"] = backend2gui["GTKCairo"] = "gtk"
+backend2gui["GTK3Cairo"] = "gtk3"
+backend2gui["GTK4Cairo"] = "gtk4"
+backend2gui["WX"] = "wx"
+backend2gui["CocoaAgg"] = "osx"
+# There needs to be a hysteresis here as the new QtAgg Matplotlib backend
+# supports either Qt5 or Qt6 and the IPython qt event loop support Qt4, Qt5,
+# and Qt6.
+backend2gui["QtAgg"] = "qt"
+backend2gui["Qt4Agg"] = "qt"
+backend2gui["Qt5Agg"] = "qt"
+
+# And some backends that don't need GUI integration
+del backend2gui["nbAgg"]
+del backend2gui["agg"]
+del backend2gui["svg"]
+del backend2gui["pdf"]
+del backend2gui["ps"]
+del backend2gui["module://matplotlib_inline.backend_inline"]
+del backend2gui["module://ipympl.backend_nbagg"]
+
+#-----------------------------------------------------------------------------
+# Matplotlib utilities
+#-----------------------------------------------------------------------------
+
+
+def getfigs(*fig_nums):
+ """Get a list of matplotlib figures by figure numbers.
+
+ If no arguments are given, all available figures are returned. If the
+ argument list contains references to invalid figures, a warning is printed
+ but the function continues pasting further figures.
+
+ Parameters
+ ----------
+ figs : tuple
+ A tuple of ints giving the figure numbers of the figures to return.
+ """
+ from matplotlib._pylab_helpers import Gcf
+ if not fig_nums:
+ fig_managers = Gcf.get_all_fig_managers()
+ return [fm.canvas.figure for fm in fig_managers]
+ else:
+ figs = []
+ for num in fig_nums:
+ f = Gcf.figs.get(num)
+ if f is None:
+ print('Warning: figure %s not available.' % num)
+ else:
+ figs.append(f.canvas.figure)
+ return figs
+
+
+def figsize(sizex, sizey):
+ """Set the default figure size to be [sizex, sizey].
+
+ This is just an easy to remember, convenience wrapper that sets::
+
+ matplotlib.rcParams['figure.figsize'] = [sizex, sizey]
+ """
+ import matplotlib
+ matplotlib.rcParams['figure.figsize'] = [sizex, sizey]
+
+
+def print_figure(fig, fmt="png", bbox_inches="tight", base64=False, **kwargs):
+ """Print a figure to an image, and return the resulting file data
+
+ Returned data will be bytes unless ``fmt='svg'``,
+ in which case it will be unicode.
+
+ Any keyword args are passed to fig.canvas.print_figure,
+ such as ``quality`` or ``bbox_inches``.
+
+ If `base64` is True, return base64-encoded str instead of raw bytes
+ for binary-encoded image formats
+
+ .. versionadded:: 7.29
+ base64 argument
+ """
+ # When there's an empty figure, we shouldn't return anything, otherwise we
+ # get big blank areas in the qt console.
+ if not fig.axes and not fig.lines:
+ return
+
+ dpi = fig.dpi
+ if fmt == 'retina':
+ dpi = dpi * 2
+ fmt = 'png'
+
+ # build keyword args
+ kw = {
+ "format":fmt,
+ "facecolor":fig.get_facecolor(),
+ "edgecolor":fig.get_edgecolor(),
+ "dpi":dpi,
+ "bbox_inches":bbox_inches,
+ }
+ # **kwargs get higher priority
+ kw.update(kwargs)
+
+ bytes_io = BytesIO()
+ if fig.canvas is None:
+ from matplotlib.backend_bases import FigureCanvasBase
+ FigureCanvasBase(fig)
+
+ fig.canvas.print_figure(bytes_io, **kw)
+ data = bytes_io.getvalue()
+ if fmt == 'svg':
+ data = data.decode('utf-8')
+ elif base64:
+ data = b2a_base64(data, newline=False).decode("ascii")
+ return data
+
+def retina_figure(fig, base64=False, **kwargs):
+ """format a figure as a pixel-doubled (retina) PNG
+
+ If `base64` is True, return base64-encoded str instead of raw bytes
+ for binary-encoded image formats
+
+ .. versionadded:: 7.29
+ base64 argument
+ """
+ pngdata = print_figure(fig, fmt="retina", base64=False, **kwargs)
+ # Make sure that retina_figure acts just like print_figure and returns
+ # None when the figure is empty.
+ if pngdata is None:
+ return
+ w, h = _pngxy(pngdata)
+ metadata = {"width": w//2, "height":h//2}
+ if base64:
+ pngdata = b2a_base64(pngdata, newline=False).decode("ascii")
+ return pngdata, metadata
+
+
+# We need a little factory function here to create the closure where
+# safe_execfile can live.
+def mpl_runner(safe_execfile):
+ """Factory to return a matplotlib-enabled runner for %run.
+
+ Parameters
+ ----------
+ safe_execfile : function
+ This must be a function with the same interface as the
+ :meth:`safe_execfile` method of IPython.
+
+ Returns
+ -------
+ A function suitable for use as the ``runner`` argument of the %run magic
+ function.
+ """
+
+ def mpl_execfile(fname,*where,**kw):
+ """matplotlib-aware wrapper around safe_execfile.
+
+ Its interface is identical to that of the :func:`execfile` builtin.
+
+ This is ultimately a call to execfile(), but wrapped in safeties to
+ properly handle interactive rendering."""
+
+ import matplotlib
+ import matplotlib.pyplot as plt
+
+ #print '*** Matplotlib runner ***' # dbg
+ # turn off rendering until end of script
+ is_interactive = matplotlib.rcParams['interactive']
+ matplotlib.interactive(False)
+ safe_execfile(fname,*where,**kw)
+ matplotlib.interactive(is_interactive)
+ # make rendering call now, if the user tried to do it
+ if plt.draw_if_interactive.called:
+ plt.draw()
+ plt.draw_if_interactive.called = False
+
+ # re-draw everything that is stale
+ try:
+ da = plt.draw_all
+ except AttributeError:
+ pass
+ else:
+ da()
+
+ return mpl_execfile
+
+
+def _reshow_nbagg_figure(fig):
+ """reshow an nbagg figure"""
+ try:
+ reshow = fig.canvas.manager.reshow
+ except AttributeError as e:
+ raise NotImplementedError() from e
+ else:
+ reshow()
+
+
+def select_figure_formats(shell, formats, **kwargs):
+ """Select figure formats for the inline backend.
+
+ Parameters
+ ----------
+ shell : InteractiveShell
+ The main IPython instance.
+ formats : str or set
+ One or a set of figure formats to enable: 'png', 'retina', 'jpeg', 'svg', 'pdf'.
+ **kwargs : any
+ Extra keyword arguments to be passed to fig.canvas.print_figure.
+ """
+ import matplotlib
+ from matplotlib.figure import Figure
+
+ svg_formatter = shell.display_formatter.formatters['image/svg+xml']
+ png_formatter = shell.display_formatter.formatters['image/png']
+ jpg_formatter = shell.display_formatter.formatters['image/jpeg']
+ pdf_formatter = shell.display_formatter.formatters['application/pdf']
+
+ if isinstance(formats, str):
+ formats = {formats}
+ # cast in case of list / tuple
+ formats = set(formats)
+
+ [ f.pop(Figure, None) for f in shell.display_formatter.formatters.values() ]
+ mplbackend = matplotlib.get_backend().lower()
+ if mplbackend == 'nbagg' or mplbackend == 'module://ipympl.backend_nbagg':
+ formatter = shell.display_formatter.ipython_display_formatter
+ formatter.for_type(Figure, _reshow_nbagg_figure)
+
+ supported = {'png', 'png2x', 'retina', 'jpg', 'jpeg', 'svg', 'pdf'}
+ bad = formats.difference(supported)
+ if bad:
+ bs = "%s" % ','.join([repr(f) for f in bad])
+ gs = "%s" % ','.join([repr(f) for f in supported])
+ raise ValueError("supported formats are: %s not %s" % (gs, bs))
+
+ if "png" in formats:
+ png_formatter.for_type(
+ Figure, partial(print_figure, fmt="png", base64=True, **kwargs)
+ )
+ if "retina" in formats or "png2x" in formats:
+ png_formatter.for_type(Figure, partial(retina_figure, base64=True, **kwargs))
+ if "jpg" in formats or "jpeg" in formats:
+ jpg_formatter.for_type(
+ Figure, partial(print_figure, fmt="jpg", base64=True, **kwargs)
+ )
+ if "svg" in formats:
+ svg_formatter.for_type(Figure, partial(print_figure, fmt="svg", **kwargs))
+ if "pdf" in formats:
+ pdf_formatter.for_type(
+ Figure, partial(print_figure, fmt="pdf", base64=True, **kwargs)
+ )
+
+#-----------------------------------------------------------------------------
+# Code for initializing matplotlib and importing pylab
+#-----------------------------------------------------------------------------
+
+
+def find_gui_and_backend(gui=None, gui_select=None):
+ """Given a gui string return the gui and mpl backend.
+
+ Parameters
+ ----------
+ gui : str
+ Can be one of ('tk','gtk','wx','qt','qt4','inline','agg').
+ gui_select : str
+ Can be one of ('tk','gtk','wx','qt','qt4','inline').
+ This is any gui already selected by the shell.
+
+ Returns
+ -------
+ A tuple of (gui, backend) where backend is one of ('TkAgg','GTKAgg',
+ 'WXAgg','Qt4Agg','module://matplotlib_inline.backend_inline','agg').
+ """
+
+ import matplotlib
+
+ if gui and gui != 'auto':
+ # select backend based on requested gui
+ backend = backends[gui]
+ if gui == 'agg':
+ gui = None
+ else:
+ # We need to read the backend from the original data structure, *not*
+ # from mpl.rcParams, since a prior invocation of %matplotlib may have
+ # overwritten that.
+ # WARNING: this assumes matplotlib 1.1 or newer!!
+ backend = matplotlib.rcParamsOrig['backend']
+ # In this case, we need to find what the appropriate gui selection call
+ # should be for IPython, so we can activate inputhook accordingly
+ gui = backend2gui.get(backend, None)
+
+ # If we have already had a gui active, we need it and inline are the
+ # ones allowed.
+ if gui_select and gui != gui_select:
+ gui = gui_select
+ backend = backends[gui]
+
+ return gui, backend
+
+
+def activate_matplotlib(backend):
+ """Activate the given backend and set interactive to True."""
+
+ import matplotlib
+ matplotlib.interactive(True)
+
+ # Matplotlib had a bug where even switch_backend could not force
+ # the rcParam to update. This needs to be set *before* the module
+ # magic of switch_backend().
+ matplotlib.rcParams['backend'] = backend
+
+ # Due to circular imports, pyplot may be only partially initialised
+ # when this function runs.
+ # So avoid needing matplotlib attribute-lookup to access pyplot.
+ from matplotlib import pyplot as plt
+
+ plt.switch_backend(backend)
+
+ plt.show._needmain = False
+ # We need to detect at runtime whether show() is called by the user.
+ # For this, we wrap it into a decorator which adds a 'called' flag.
+ plt.draw_if_interactive = flag_calls(plt.draw_if_interactive)
+
+
+def import_pylab(user_ns, import_all=True):
+ """Populate the namespace with pylab-related values.
+
+ Imports matplotlib, pylab, numpy, and everything from pylab and numpy.
+
+ Also imports a few names from IPython (figsize, display, getfigs)
+
+ """
+
+ # Import numpy as np/pyplot as plt are conventions we're trying to
+ # somewhat standardize on. Making them available to users by default
+ # will greatly help this.
+ s = ("import numpy\n"
+ "import matplotlib\n"
+ "from matplotlib import pylab, mlab, pyplot\n"
+ "np = numpy\n"
+ "plt = pyplot\n"
+ )
+ exec(s, user_ns)
+
+ if import_all:
+ s = ("from matplotlib.pylab import *\n"
+ "from numpy import *\n")
+ exec(s, user_ns)
+
+ # IPython symbols to add
+ user_ns['figsize'] = figsize
+ from IPython.display import display
+ # Add display and getfigs to the user's namespace
+ user_ns['display'] = display
+ user_ns['getfigs'] = getfigs
+
+
+def configure_inline_support(shell, backend):
+ """
+ .. deprecated:: 7.23
+
+ use `matplotlib_inline.backend_inline.configure_inline_support()`
+
+ Configure an IPython shell object for matplotlib use.
+
+ Parameters
+ ----------
+ shell : InteractiveShell instance
+ backend : matplotlib backend
+ """
+ warnings.warn(
+ "`configure_inline_support` is deprecated since IPython 7.23, directly "
+ "use `matplotlib_inline.backend_inline.configure_inline_support()`",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+
+ from matplotlib_inline.backend_inline import (
+ configure_inline_support as configure_inline_support_orig,
+ )
+
+ configure_inline_support_orig(shell, backend)
diff --git a/contrib/python/ipython/py3/IPython/core/release.py b/contrib/python/ipython/py3/IPython/core/release.py
new file mode 100644
index 0000000000..50080642ee
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/release.py
@@ -0,0 +1,54 @@
+# -*- coding: utf-8 -*-
+"""Release data for the IPython project."""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2008, IPython Development Team.
+# Copyright (c) 2001, Fernando Perez <fernando.perez@colorado.edu>
+# Copyright (c) 2001, Janko Hauser <jhauser@zscout.de>
+# Copyright (c) 2001, Nathaniel Gray <n8gray@caltech.edu>
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+# IPython version information. An empty _version_extra corresponds to a full
+# release. 'dev' as a _version_extra string means this is a development
+# version
+_version_major = 8
+_version_minor = 14
+_version_patch = 0
+_version_extra = ".dev"
+# _version_extra = "rc1"
+_version_extra = "" # Uncomment this for full releases
+
+# Construct full version string from these.
+_ver = [_version_major, _version_minor, _version_patch]
+
+__version__ = '.'.join(map(str, _ver))
+if _version_extra:
+ __version__ = __version__ + _version_extra
+
+version = __version__ # backwards compatibility name
+version_info = (_version_major, _version_minor, _version_patch, _version_extra)
+
+# Change this when incrementing the kernel protocol version
+kernel_protocol_version_info = (5, 0)
+kernel_protocol_version = "%i.%i" % kernel_protocol_version_info
+
+license = "BSD-3-Clause"
+
+authors = {'Fernando' : ('Fernando Perez','fperez.net@gmail.com'),
+ 'Janko' : ('Janko Hauser','jhauser@zscout.de'),
+ 'Nathan' : ('Nathaniel Gray','n8gray@caltech.edu'),
+ 'Ville' : ('Ville Vainio','vivainio@gmail.com'),
+ 'Brian' : ('Brian E Granger', 'ellisonbg@gmail.com'),
+ 'Min' : ('Min Ragan-Kelley', 'benjaminrk@gmail.com'),
+ 'Thomas' : ('Thomas A. Kluyver', 'takowl@gmail.com'),
+ 'Jorgen' : ('Jorgen Stenarson', 'jorgen.stenarson@bostream.nu'),
+ 'Matthias' : ('Matthias Bussonnier', 'bussonniermatthias@gmail.com'),
+ }
+
+author = 'The IPython Development Team'
+
+author_email = 'ipython-dev@python.org'
diff --git a/contrib/python/ipython/py3/IPython/core/shellapp.py b/contrib/python/ipython/py3/IPython/core/shellapp.py
new file mode 100644
index 0000000000..29325a0ad2
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/shellapp.py
@@ -0,0 +1,451 @@
+# encoding: utf-8
+"""
+A mixin for :class:`~IPython.core.application.Application` classes that
+launch InteractiveShell instances, load extensions, etc.
+"""
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+import glob
+from itertools import chain
+import os
+import sys
+
+from traitlets.config.application import boolean_flag
+from traitlets.config.configurable import Configurable
+from traitlets.config.loader import Config
+from IPython.core.application import SYSTEM_CONFIG_DIRS, ENV_CONFIG_DIRS
+from IPython.core import pylabtools
+from IPython.utils.contexts import preserve_keys
+from IPython.utils.path import filefind
+from traitlets import (
+ Unicode, Instance, List, Bool, CaselessStrEnum, observe,
+ DottedObjectName,
+)
+from IPython.terminal import pt_inputhooks
+
+#-----------------------------------------------------------------------------
+# Aliases and Flags
+#-----------------------------------------------------------------------------
+
+gui_keys = tuple(sorted(pt_inputhooks.backends) + sorted(pt_inputhooks.aliases))
+
+backend_keys = sorted(pylabtools.backends.keys())
+backend_keys.insert(0, 'auto')
+
+shell_flags = {}
+
+addflag = lambda *args: shell_flags.update(boolean_flag(*args))
+addflag('autoindent', 'InteractiveShell.autoindent',
+ 'Turn on autoindenting.', 'Turn off autoindenting.'
+)
+addflag('automagic', 'InteractiveShell.automagic',
+ """Turn on the auto calling of magic commands. Type %%magic at the
+ IPython prompt for more information.""",
+ 'Turn off the auto calling of magic commands.'
+)
+addflag('pdb', 'InteractiveShell.pdb',
+ "Enable auto calling the pdb debugger after every exception.",
+ "Disable auto calling the pdb debugger after every exception."
+)
+addflag('pprint', 'PlainTextFormatter.pprint',
+ "Enable auto pretty printing of results.",
+ "Disable auto pretty printing of results."
+)
+addflag('color-info', 'InteractiveShell.color_info',
+ """IPython can display information about objects via a set of functions,
+ and optionally can use colors for this, syntax highlighting
+ source code and various other elements. This is on by default, but can cause
+ problems with some pagers. If you see such problems, you can disable the
+ colours.""",
+ "Disable using colors for info related things."
+)
+addflag('ignore-cwd', 'InteractiveShellApp.ignore_cwd',
+ "Exclude the current working directory from sys.path",
+ "Include the current working directory in sys.path",
+)
+nosep_config = Config()
+nosep_config.InteractiveShell.separate_in = ''
+nosep_config.InteractiveShell.separate_out = ''
+nosep_config.InteractiveShell.separate_out2 = ''
+
+shell_flags['nosep']=(nosep_config, "Eliminate all spacing between prompts.")
+shell_flags['pylab'] = (
+ {'InteractiveShellApp' : {'pylab' : 'auto'}},
+ """Pre-load matplotlib and numpy for interactive use with
+ the default matplotlib backend."""
+)
+shell_flags['matplotlib'] = (
+ {'InteractiveShellApp' : {'matplotlib' : 'auto'}},
+ """Configure matplotlib for interactive use with
+ the default matplotlib backend."""
+)
+
+# it's possible we don't want short aliases for *all* of these:
+shell_aliases = dict(
+ autocall='InteractiveShell.autocall',
+ colors='InteractiveShell.colors',
+ logfile='InteractiveShell.logfile',
+ logappend='InteractiveShell.logappend',
+ c='InteractiveShellApp.code_to_run',
+ m='InteractiveShellApp.module_to_run',
+ ext="InteractiveShellApp.extra_extensions",
+ gui='InteractiveShellApp.gui',
+ pylab='InteractiveShellApp.pylab',
+ matplotlib='InteractiveShellApp.matplotlib',
+)
+shell_aliases['cache-size'] = 'InteractiveShell.cache_size'
+
+#-----------------------------------------------------------------------------
+# Main classes and functions
+#-----------------------------------------------------------------------------
+
+class InteractiveShellApp(Configurable):
+ """A Mixin for applications that start InteractiveShell instances.
+
+ Provides configurables for loading extensions and executing files
+ as part of configuring a Shell environment.
+
+ The following methods should be called by the :meth:`initialize` method
+ of the subclass:
+
+ - :meth:`init_path`
+ - :meth:`init_shell` (to be implemented by the subclass)
+ - :meth:`init_gui_pylab`
+ - :meth:`init_extensions`
+ - :meth:`init_code`
+ """
+ extensions = List(Unicode(),
+ help="A list of dotted module names of IPython extensions to load."
+ ).tag(config=True)
+
+ extra_extensions = List(
+ DottedObjectName(),
+ help="""
+ Dotted module name(s) of one or more IPython extensions to load.
+
+ For specifying extra extensions to load on the command-line.
+
+ .. versionadded:: 7.10
+ """,
+ ).tag(config=True)
+
+ reraise_ipython_extension_failures = Bool(False,
+ help="Reraise exceptions encountered loading IPython extensions?",
+ ).tag(config=True)
+
+ # Extensions that are always loaded (not configurable)
+ default_extensions = List(Unicode(), [u'storemagic']).tag(config=False)
+
+ hide_initial_ns = Bool(True,
+ help="""Should variables loaded at startup (by startup files, exec_lines, etc.)
+ be hidden from tools like %who?"""
+ ).tag(config=True)
+
+ exec_files = List(Unicode(),
+ help="""List of files to run at IPython startup."""
+ ).tag(config=True)
+ exec_PYTHONSTARTUP = Bool(True,
+ help="""Run the file referenced by the PYTHONSTARTUP environment
+ variable at IPython startup."""
+ ).tag(config=True)
+ file_to_run = Unicode('',
+ help="""A file to be run""").tag(config=True)
+
+ exec_lines = List(Unicode(),
+ help="""lines of code to run at IPython startup."""
+ ).tag(config=True)
+ code_to_run = Unicode('',
+ help="Execute the given command string."
+ ).tag(config=True)
+ module_to_run = Unicode('',
+ help="Run the module as a script."
+ ).tag(config=True)
+ gui = CaselessStrEnum(gui_keys, allow_none=True,
+ help="Enable GUI event loop integration with any of {0}.".format(gui_keys)
+ ).tag(config=True)
+ matplotlib = CaselessStrEnum(backend_keys, allow_none=True,
+ help="""Configure matplotlib for interactive use with
+ the default matplotlib backend."""
+ ).tag(config=True)
+ pylab = CaselessStrEnum(backend_keys, allow_none=True,
+ help="""Pre-load matplotlib and numpy for interactive use,
+ selecting a particular matplotlib backend and loop integration.
+ """
+ ).tag(config=True)
+ pylab_import_all = Bool(True,
+ help="""If true, IPython will populate the user namespace with numpy, pylab, etc.
+ and an ``import *`` is done from numpy and pylab, when using pylab mode.
+
+ When False, pylab mode should not import any names into the user namespace.
+ """
+ ).tag(config=True)
+ ignore_cwd = Bool(
+ False,
+ help="""If True, IPython will not add the current working directory to sys.path.
+ When False, the current working directory is added to sys.path, allowing imports
+ of modules defined in the current directory."""
+ ).tag(config=True)
+ shell = Instance('IPython.core.interactiveshell.InteractiveShellABC',
+ allow_none=True)
+ # whether interact-loop should start
+ interact = Bool(True)
+
+ user_ns = Instance(dict, args=None, allow_none=True)
+ @observe('user_ns')
+ def _user_ns_changed(self, change):
+ if self.shell is not None:
+ self.shell.user_ns = change['new']
+ self.shell.init_user_ns()
+
+ def init_path(self):
+ """Add current working directory, '', to sys.path
+
+ Unlike Python's default, we insert before the first `site-packages`
+ or `dist-packages` directory,
+ so that it is after the standard library.
+
+ .. versionchanged:: 7.2
+ Try to insert after the standard library, instead of first.
+ .. versionchanged:: 8.0
+ Allow optionally not including the current directory in sys.path
+ """
+ if '' in sys.path or self.ignore_cwd:
+ return
+ for idx, path in enumerate(sys.path):
+ parent, last_part = os.path.split(path)
+ if last_part in {'site-packages', 'dist-packages'}:
+ break
+ else:
+ # no site-packages or dist-packages found (?!)
+ # back to original behavior of inserting at the front
+ idx = 0
+ sys.path.insert(idx, '')
+
+ def init_shell(self):
+ raise NotImplementedError("Override in subclasses")
+
+ def init_gui_pylab(self):
+ """Enable GUI event loop integration, taking pylab into account."""
+ enable = False
+ shell = self.shell
+ if self.pylab:
+ enable = lambda key: shell.enable_pylab(key, import_all=self.pylab_import_all)
+ key = self.pylab
+ elif self.matplotlib:
+ enable = shell.enable_matplotlib
+ key = self.matplotlib
+ elif self.gui:
+ enable = shell.enable_gui
+ key = self.gui
+
+ if not enable:
+ return
+
+ try:
+ r = enable(key)
+ except ImportError:
+ self.log.warning("Eventloop or matplotlib integration failed. Is matplotlib installed?")
+ self.shell.showtraceback()
+ return
+ except Exception:
+ self.log.warning("GUI event loop or pylab initialization failed")
+ self.shell.showtraceback()
+ return
+
+ if isinstance(r, tuple):
+ gui, backend = r[:2]
+ self.log.info("Enabling GUI event loop integration, "
+ "eventloop=%s, matplotlib=%s", gui, backend)
+ if key == "auto":
+ print("Using matplotlib backend: %s" % backend)
+ else:
+ gui = r
+ self.log.info("Enabling GUI event loop integration, "
+ "eventloop=%s", gui)
+
+ def init_extensions(self):
+ """Load all IPython extensions in IPythonApp.extensions.
+
+ This uses the :meth:`ExtensionManager.load_extensions` to load all
+ the extensions listed in ``self.extensions``.
+ """
+ try:
+ self.log.debug("Loading IPython extensions...")
+ extensions = (
+ self.default_extensions + self.extensions + self.extra_extensions
+ )
+ for ext in extensions:
+ try:
+ self.log.info("Loading IPython extension: %s", ext)
+ self.shell.extension_manager.load_extension(ext)
+ except:
+ if self.reraise_ipython_extension_failures:
+ raise
+ msg = ("Error in loading extension: {ext}\n"
+ "Check your config files in {location}".format(
+ ext=ext,
+ location=self.profile_dir.location
+ ))
+ self.log.warning(msg, exc_info=True)
+ except:
+ if self.reraise_ipython_extension_failures:
+ raise
+ self.log.warning("Unknown error in loading extensions:", exc_info=True)
+
+ def init_code(self):
+ """run the pre-flight code, specified via exec_lines"""
+ self._run_startup_files()
+ self._run_exec_lines()
+ self._run_exec_files()
+
+ # Hide variables defined here from %who etc.
+ if self.hide_initial_ns:
+ self.shell.user_ns_hidden.update(self.shell.user_ns)
+
+ # command-line execution (ipython -i script.py, ipython -m module)
+ # should *not* be excluded from %whos
+ self._run_cmd_line_code()
+ self._run_module()
+
+ # flush output, so itwon't be attached to the first cell
+ sys.stdout.flush()
+ sys.stderr.flush()
+ self.shell._sys_modules_keys = set(sys.modules.keys())
+
+ def _run_exec_lines(self):
+ """Run lines of code in IPythonApp.exec_lines in the user's namespace."""
+ if not self.exec_lines:
+ return
+ try:
+ self.log.debug("Running code from IPythonApp.exec_lines...")
+ for line in self.exec_lines:
+ try:
+ self.log.info("Running code in user namespace: %s" %
+ line)
+ self.shell.run_cell(line, store_history=False)
+ except:
+ self.log.warning("Error in executing line in user "
+ "namespace: %s" % line)
+ self.shell.showtraceback()
+ except:
+ self.log.warning("Unknown error in handling IPythonApp.exec_lines:")
+ self.shell.showtraceback()
+
+ def _exec_file(self, fname, shell_futures=False):
+ try:
+ full_filename = filefind(fname, [u'.', self.ipython_dir])
+ except IOError:
+ self.log.warning("File not found: %r"%fname)
+ return
+ # Make sure that the running script gets a proper sys.argv as if it
+ # were run from a system shell.
+ save_argv = sys.argv
+ sys.argv = [full_filename] + self.extra_args[1:]
+ try:
+ if os.path.isfile(full_filename):
+ self.log.info("Running file in user namespace: %s" %
+ full_filename)
+ # Ensure that __file__ is always defined to match Python
+ # behavior.
+ with preserve_keys(self.shell.user_ns, '__file__'):
+ self.shell.user_ns['__file__'] = fname
+ if full_filename.endswith('.ipy') or full_filename.endswith('.ipynb'):
+ self.shell.safe_execfile_ipy(full_filename,
+ shell_futures=shell_futures)
+ else:
+ # default to python, even without extension
+ self.shell.safe_execfile(full_filename,
+ self.shell.user_ns,
+ shell_futures=shell_futures,
+ raise_exceptions=True)
+ finally:
+ sys.argv = save_argv
+
+ def _run_startup_files(self):
+ """Run files from profile startup directory"""
+ startup_dirs = [self.profile_dir.startup_dir] + [
+ os.path.join(p, 'startup') for p in chain(ENV_CONFIG_DIRS, SYSTEM_CONFIG_DIRS)
+ ]
+ startup_files = []
+
+ if self.exec_PYTHONSTARTUP and os.environ.get('PYTHONSTARTUP', False) and \
+ not (self.file_to_run or self.code_to_run or self.module_to_run):
+ python_startup = os.environ['PYTHONSTARTUP']
+ self.log.debug("Running PYTHONSTARTUP file %s...", python_startup)
+ try:
+ self._exec_file(python_startup)
+ except:
+ self.log.warning("Unknown error in handling PYTHONSTARTUP file %s:", python_startup)
+ self.shell.showtraceback()
+ for startup_dir in startup_dirs[::-1]:
+ startup_files += glob.glob(os.path.join(startup_dir, '*.py'))
+ startup_files += glob.glob(os.path.join(startup_dir, '*.ipy'))
+ if not startup_files:
+ return
+
+ self.log.debug("Running startup files from %s...", startup_dir)
+ try:
+ for fname in sorted(startup_files):
+ self._exec_file(fname)
+ except:
+ self.log.warning("Unknown error in handling startup files:")
+ self.shell.showtraceback()
+
+ def _run_exec_files(self):
+ """Run files from IPythonApp.exec_files"""
+ if not self.exec_files:
+ return
+
+ self.log.debug("Running files in IPythonApp.exec_files...")
+ try:
+ for fname in self.exec_files:
+ self._exec_file(fname)
+ except:
+ self.log.warning("Unknown error in handling IPythonApp.exec_files:")
+ self.shell.showtraceback()
+
+ def _run_cmd_line_code(self):
+ """Run code or file specified at the command-line"""
+ if self.code_to_run:
+ line = self.code_to_run
+ try:
+ self.log.info("Running code given at command line (c=): %s" %
+ line)
+ self.shell.run_cell(line, store_history=False)
+ except:
+ self.log.warning("Error in executing line in user namespace: %s" %
+ line)
+ self.shell.showtraceback()
+ if not self.interact:
+ self.exit(1)
+
+ # Like Python itself, ignore the second if the first of these is present
+ elif self.file_to_run:
+ fname = self.file_to_run
+ if os.path.isdir(fname):
+ fname = os.path.join(fname, "__main__.py")
+ if not os.path.exists(fname):
+ self.log.warning("File '%s' doesn't exist", fname)
+ if not self.interact:
+ self.exit(2)
+ try:
+ self._exec_file(fname, shell_futures=True)
+ except:
+ self.shell.showtraceback(tb_offset=4)
+ if not self.interact:
+ self.exit(1)
+
+ def _run_module(self):
+ """Run module specified at the command-line."""
+ if self.module_to_run:
+ # Make sure that the module gets a proper sys.argv as if it were
+ # run using `python -m`.
+ save_argv = sys.argv
+ sys.argv = [sys.executable] + self.extra_args
+ try:
+ self.shell.safe_run_module(self.module_to_run,
+ self.shell.user_ns)
+ finally:
+ sys.argv = save_argv
diff --git a/contrib/python/ipython/py3/IPython/core/splitinput.py b/contrib/python/ipython/py3/IPython/core/splitinput.py
new file mode 100644
index 0000000000..5bc3e32542
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/splitinput.py
@@ -0,0 +1,138 @@
+# encoding: utf-8
+"""
+Simple utility for splitting user input. This is used by both inputsplitter and
+prefilter.
+
+Authors:
+
+* Brian Granger
+* Fernando Perez
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (C) 2008-2011 The IPython Development Team
+#
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+import re
+import sys
+
+from IPython.utils import py3compat
+from IPython.utils.encoding import get_stream_enc
+from IPython.core.oinspect import OInfo
+
+#-----------------------------------------------------------------------------
+# Main function
+#-----------------------------------------------------------------------------
+
+# RegExp for splitting line contents into pre-char//first word-method//rest.
+# For clarity, each group in on one line.
+
+# WARNING: update the regexp if the escapes in interactiveshell are changed, as
+# they are hardwired in.
+
+# Although it's not solely driven by the regex, note that:
+# ,;/% only trigger if they are the first character on the line
+# ! and !! trigger if they are first char(s) *or* follow an indent
+# ? triggers as first or last char.
+
+line_split = re.compile(r"""
+ ^(\s*) # any leading space
+ ([,;/%]|!!?|\?\??)? # escape character or characters
+ \s*(%{0,2}[\w\.\*]*) # function/method, possibly with leading %
+ # to correctly treat things like '?%magic'
+ (.*?$|$) # rest of line
+ """, re.VERBOSE)
+
+
+def split_user_input(line, pattern=None):
+ """Split user input into initial whitespace, escape character, function part
+ and the rest.
+ """
+ # We need to ensure that the rest of this routine deals only with unicode
+ encoding = get_stream_enc(sys.stdin, 'utf-8')
+ line = py3compat.cast_unicode(line, encoding)
+
+ if pattern is None:
+ pattern = line_split
+ match = pattern.match(line)
+ if not match:
+ # print "match failed for line '%s'" % line
+ try:
+ ifun, the_rest = line.split(None,1)
+ except ValueError:
+ # print "split failed for line '%s'" % line
+ ifun, the_rest = line, u''
+ pre = re.match(r'^(\s*)(.*)',line).groups()[0]
+ esc = ""
+ else:
+ pre, esc, ifun, the_rest = match.groups()
+
+ #print 'line:<%s>' % line # dbg
+ #print 'pre <%s> ifun <%s> rest <%s>' % (pre,ifun.strip(),the_rest) # dbg
+ return pre, esc or '', ifun.strip(), the_rest.lstrip()
+
+
+class LineInfo(object):
+ """A single line of input and associated info.
+
+ Includes the following as properties:
+
+ line
+ The original, raw line
+
+ continue_prompt
+ Is this line a continuation in a sequence of multiline input?
+
+ pre
+ Any leading whitespace.
+
+ esc
+ The escape character(s) in pre or the empty string if there isn't one.
+ Note that '!!' and '??' are possible values for esc. Otherwise it will
+ always be a single character.
+
+ ifun
+ The 'function part', which is basically the maximal initial sequence
+ of valid python identifiers and the '.' character. This is what is
+ checked for alias and magic transformations, used for auto-calling,
+ etc. In contrast to Python identifiers, it may start with "%" and contain
+ "*".
+
+ the_rest
+ Everything else on the line.
+ """
+ def __init__(self, line, continue_prompt=False):
+ self.line = line
+ self.continue_prompt = continue_prompt
+ self.pre, self.esc, self.ifun, self.the_rest = split_user_input(line)
+
+ self.pre_char = self.pre.strip()
+ if self.pre_char:
+ self.pre_whitespace = '' # No whitespace allowed before esc chars
+ else:
+ self.pre_whitespace = self.pre
+
+ def ofind(self, ip) -> OInfo:
+ """Do a full, attribute-walking lookup of the ifun in the various
+ namespaces for the given IPython InteractiveShell instance.
+
+ Return a dict with keys: {found, obj, ospace, ismagic}
+
+ Note: can cause state changes because of calling getattr, but should
+ only be run if autocall is on and if the line hasn't matched any
+ other, less dangerous handlers.
+
+ Does cache the results of the call, so can be called multiple times
+ without worrying about *further* damaging state.
+ """
+ return ip._ofind(self.ifun)
+
+ def __str__(self):
+ return "LineInfo [%s|%s|%s|%s]" %(self.pre, self.esc, self.ifun, self.the_rest)
diff --git a/contrib/python/ipython/py3/IPython/core/ultratb.py b/contrib/python/ipython/py3/IPython/core/ultratb.py
new file mode 100644
index 0000000000..61b5939398
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/ultratb.py
@@ -0,0 +1,1518 @@
+# -*- coding: utf-8 -*-
+"""
+Verbose and colourful traceback formatting.
+
+**ColorTB**
+
+I've always found it a bit hard to visually parse tracebacks in Python. The
+ColorTB class is a solution to that problem. It colors the different parts of a
+traceback in a manner similar to what you would expect from a syntax-highlighting
+text editor.
+
+Installation instructions for ColorTB::
+
+ import sys,ultratb
+ sys.excepthook = ultratb.ColorTB()
+
+**VerboseTB**
+
+I've also included a port of Ka-Ping Yee's "cgitb.py" that produces all kinds
+of useful info when a traceback occurs. Ping originally had it spit out HTML
+and intended it for CGI programmers, but why should they have all the fun? I
+altered it to spit out colored text to the terminal. It's a bit overwhelming,
+but kind of neat, and maybe useful for long-running programs that you believe
+are bug-free. If a crash *does* occur in that type of program you want details.
+Give it a shot--you'll love it or you'll hate it.
+
+.. note::
+
+ The Verbose mode prints the variables currently visible where the exception
+ happened (shortening their strings if too long). This can potentially be
+ very slow, if you happen to have a huge data structure whose string
+ representation is complex to compute. Your computer may appear to freeze for
+ a while with cpu usage at 100%. If this occurs, you can cancel the traceback
+ with Ctrl-C (maybe hitting it more than once).
+
+ If you encounter this kind of situation often, you may want to use the
+ Verbose_novars mode instead of the regular Verbose, which avoids formatting
+ variables (but otherwise includes the information and context given by
+ Verbose).
+
+.. note::
+
+ The verbose mode print all variables in the stack, which means it can
+ potentially leak sensitive information like access keys, or unencrypted
+ password.
+
+Installation instructions for VerboseTB::
+
+ import sys,ultratb
+ sys.excepthook = ultratb.VerboseTB()
+
+Note: Much of the code in this module was lifted verbatim from the standard
+library module 'traceback.py' and Ka-Ping Yee's 'cgitb.py'.
+
+Color schemes
+-------------
+
+The colors are defined in the class TBTools through the use of the
+ColorSchemeTable class. Currently the following exist:
+
+ - NoColor: allows all of this module to be used in any terminal (the color
+ escapes are just dummy blank strings).
+
+ - Linux: is meant to look good in a terminal like the Linux console (black
+ or very dark background).
+
+ - LightBG: similar to Linux but swaps dark/light colors to be more readable
+ in light background terminals.
+
+ - Neutral: a neutral color scheme that should be readable on both light and
+ dark background
+
+You can implement other color schemes easily, the syntax is fairly
+self-explanatory. Please send back new schemes you develop to the author for
+possible inclusion in future releases.
+
+Inheritance diagram:
+
+.. inheritance-diagram:: IPython.core.ultratb
+ :parts: 3
+"""
+
+#*****************************************************************************
+# Copyright (C) 2001 Nathaniel Gray <n8gray@caltech.edu>
+# Copyright (C) 2001-2004 Fernando Perez <fperez@colorado.edu>
+#
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#*****************************************************************************
+
+
+from collections.abc import Sequence
+import functools
+import inspect
+import linecache
+import pydoc
+import sys
+import time
+import traceback
+import types
+from types import TracebackType
+from typing import Any, List, Optional, Tuple
+
+import stack_data
+from pygments.formatters.terminal256 import Terminal256Formatter
+from pygments.styles import get_style_by_name
+
+import IPython.utils.colorable as colorable
+# IPython's own modules
+from IPython import get_ipython
+from IPython.core import debugger
+from IPython.core.display_trap import DisplayTrap
+from IPython.core.excolors import exception_colors
+from IPython.utils import PyColorize
+from IPython.utils import path as util_path
+from IPython.utils import py3compat
+from IPython.utils.terminal import get_terminal_size
+
+# Globals
+# amount of space to put line numbers before verbose tracebacks
+INDENT_SIZE = 8
+
+# Default color scheme. This is used, for example, by the traceback
+# formatter. When running in an actual IPython instance, the user's rc.colors
+# value is used, but having a module global makes this functionality available
+# to users of ultratb who are NOT running inside ipython.
+DEFAULT_SCHEME = 'NoColor'
+FAST_THRESHOLD = 10_000
+
+# ---------------------------------------------------------------------------
+# Code begins
+
+# Helper function -- largely belongs to VerboseTB, but we need the same
+# functionality to produce a pseudo verbose TB for SyntaxErrors, so that they
+# can be recognized properly by ipython.el's py-traceback-line-re
+# (SyntaxErrors have to be treated specially because they have no traceback)
+
+
+@functools.lru_cache()
+def count_lines_in_py_file(filename: str) -> int:
+ """
+ Given a filename, returns the number of lines in the file
+ if it ends with the extension ".py". Otherwise, returns 0.
+ """
+ if not filename.endswith(".py"):
+ return 0
+ else:
+ try:
+ with open(filename, "r") as file:
+ s = sum(1 for line in file)
+ except UnicodeError:
+ return 0
+ return s
+
+ """
+ Given a frame object, returns the total number of lines in the file
+ if the filename ends with the extension ".py". Otherwise, returns 0.
+ """
+
+
+def get_line_number_of_frame(frame: types.FrameType) -> int:
+ """
+ Given a frame object, returns the total number of lines in the file
+ containing the frame's code object, or the number of lines in the
+ frame's source code if the file is not available.
+
+ Parameters
+ ----------
+ frame : FrameType
+ The frame object whose line number is to be determined.
+
+ Returns
+ -------
+ int
+ The total number of lines in the file containing the frame's
+ code object, or the number of lines in the frame's source code
+ if the file is not available.
+ """
+ filename = frame.f_code.co_filename
+ if filename is None:
+ print("No file....")
+ lines, first = inspect.getsourcelines(frame)
+ return first + len(lines)
+ return count_lines_in_py_file(filename)
+
+
+def _safe_string(value, what, func=str):
+ # Copied from cpython/Lib/traceback.py
+ try:
+ return func(value)
+ except:
+ return f"<{what} {func.__name__}() failed>"
+
+
+def _format_traceback_lines(lines, Colors, has_colors: bool, lvals):
+ """
+ Format tracebacks lines with pointing arrow, leading numbers...
+
+ Parameters
+ ----------
+ lines : list[Line]
+ Colors
+ ColorScheme used.
+ lvals : str
+ Values of local variables, already colored, to inject just after the error line.
+ """
+ numbers_width = INDENT_SIZE - 1
+ res = []
+
+ for stack_line in lines:
+ if stack_line is stack_data.LINE_GAP:
+ res.append('%s (...)%s\n' % (Colors.linenoEm, Colors.Normal))
+ continue
+
+ line = stack_line.render(pygmented=has_colors).rstrip('\n') + '\n'
+ lineno = stack_line.lineno
+ if stack_line.is_current:
+ # This is the line with the error
+ pad = numbers_width - len(str(lineno))
+ num = '%s%s' % (debugger.make_arrow(pad), str(lineno))
+ start_color = Colors.linenoEm
+ else:
+ num = '%*s' % (numbers_width, lineno)
+ start_color = Colors.lineno
+
+ line = '%s%s%s %s' % (start_color, num, Colors.Normal, line)
+
+ res.append(line)
+ if lvals and stack_line.is_current:
+ res.append(lvals + '\n')
+ return res
+
+def _simple_format_traceback_lines(lnum, index, lines, Colors, lvals, _line_format):
+ """
+ Format tracebacks lines with pointing arrow, leading numbers...
+
+ Parameters
+ ==========
+
+ lnum: int
+ number of the target line of code.
+ index: int
+ which line in the list should be highlighted.
+ lines: list[string]
+ Colors:
+ ColorScheme used.
+ lvals: bytes
+ Values of local variables, already colored, to inject just after the error line.
+ _line_format: f (str) -> (str, bool)
+ return (colorized version of str, failure to do so)
+ """
+ numbers_width = INDENT_SIZE - 1
+ res = []
+ for i, line in enumerate(lines, lnum - index):
+ # assert isinstance(line, str)
+ line = py3compat.cast_unicode(line)
+
+ new_line, err = _line_format(line, "str")
+ if not err:
+ line = new_line
+
+ if i == lnum:
+ # This is the line with the error
+ pad = numbers_width - len(str(i))
+ num = "%s%s" % (debugger.make_arrow(pad), str(lnum))
+ line = "%s%s%s %s%s" % (
+ Colors.linenoEm,
+ num,
+ Colors.line,
+ line,
+ Colors.Normal,
+ )
+ else:
+ num = "%*s" % (numbers_width, i)
+ line = "%s%s%s %s" % (Colors.lineno, num, Colors.Normal, line)
+
+ res.append(line)
+ if lvals and i == lnum:
+ res.append(lvals + "\n")
+ return res
+
+
+def _format_filename(file, ColorFilename, ColorNormal, *, lineno=None):
+ """
+ Format filename lines with custom formatting from caching compiler or `File *.py` by default
+
+ Parameters
+ ----------
+ file : str
+ ColorFilename
+ ColorScheme's filename coloring to be used.
+ ColorNormal
+ ColorScheme's normal coloring to be used.
+ """
+ ipinst = get_ipython()
+ if (
+ ipinst is not None
+ and (data := ipinst.compile.format_code_name(file)) is not None
+ ):
+ label, name = data
+ if lineno is None:
+ tpl_link = f"{{label}} {ColorFilename}{{name}}{ColorNormal}"
+ else:
+ tpl_link = (
+ f"{{label}} {ColorFilename}{{name}}, line {{lineno}}{ColorNormal}"
+ )
+ else:
+ label = "File"
+ name = util_path.compress_user(
+ py3compat.cast_unicode(file, util_path.fs_encoding)
+ )
+ if lineno is None:
+ tpl_link = f"{{label}} {ColorFilename}{{name}}{ColorNormal}"
+ else:
+ # can we make this the more friendly ", line {{lineno}}", or do we need to preserve the formatting with the colon?
+ tpl_link = f"{{label}} {ColorFilename}{{name}}:{{lineno}}{ColorNormal}"
+
+ return tpl_link.format(label=label, name=name, lineno=lineno)
+
+#---------------------------------------------------------------------------
+# Module classes
+class TBTools(colorable.Colorable):
+ """Basic tools used by all traceback printer classes."""
+
+ # Number of frames to skip when reporting tracebacks
+ tb_offset = 0
+
+ def __init__(
+ self,
+ color_scheme="NoColor",
+ call_pdb=False,
+ ostream=None,
+ parent=None,
+ config=None,
+ *,
+ debugger_cls=None,
+ ):
+ # Whether to call the interactive pdb debugger after printing
+ # tracebacks or not
+ super(TBTools, self).__init__(parent=parent, config=config)
+ self.call_pdb = call_pdb
+
+ # Output stream to write to. Note that we store the original value in
+ # a private attribute and then make the public ostream a property, so
+ # that we can delay accessing sys.stdout until runtime. The way
+ # things are written now, the sys.stdout object is dynamically managed
+ # so a reference to it should NEVER be stored statically. This
+ # property approach confines this detail to a single location, and all
+ # subclasses can simply access self.ostream for writing.
+ self._ostream = ostream
+
+ # Create color table
+ self.color_scheme_table = exception_colors()
+
+ self.set_colors(color_scheme)
+ self.old_scheme = color_scheme # save initial value for toggles
+ self.debugger_cls = debugger_cls or debugger.Pdb
+
+ if call_pdb:
+ self.pdb = self.debugger_cls()
+ else:
+ self.pdb = None
+
+ def _get_ostream(self):
+ """Output stream that exceptions are written to.
+
+ Valid values are:
+
+ - None: the default, which means that IPython will dynamically resolve
+ to sys.stdout. This ensures compatibility with most tools, including
+ Windows (where plain stdout doesn't recognize ANSI escapes).
+
+ - Any object with 'write' and 'flush' attributes.
+ """
+ return sys.stdout if self._ostream is None else self._ostream
+
+ def _set_ostream(self, val):
+ assert val is None or (hasattr(val, 'write') and hasattr(val, 'flush'))
+ self._ostream = val
+
+ ostream = property(_get_ostream, _set_ostream)
+
+ @staticmethod
+ def _get_chained_exception(exception_value):
+ cause = getattr(exception_value, "__cause__", None)
+ if cause:
+ return cause
+ if getattr(exception_value, "__suppress_context__", False):
+ return None
+ return getattr(exception_value, "__context__", None)
+
+ def get_parts_of_chained_exception(
+ self, evalue
+ ) -> Optional[Tuple[type, BaseException, TracebackType]]:
+ chained_evalue = self._get_chained_exception(evalue)
+
+ if chained_evalue:
+ return chained_evalue.__class__, chained_evalue, chained_evalue.__traceback__
+ return None
+
+ def prepare_chained_exception_message(self, cause) -> List[Any]:
+ direct_cause = "\nThe above exception was the direct cause of the following exception:\n"
+ exception_during_handling = "\nDuring handling of the above exception, another exception occurred:\n"
+
+ if cause:
+ message = [[direct_cause]]
+ else:
+ message = [[exception_during_handling]]
+ return message
+
+ @property
+ def has_colors(self) -> bool:
+ return self.color_scheme_table.active_scheme_name.lower() != "nocolor"
+
+ def set_colors(self, *args, **kw):
+ """Shorthand access to the color table scheme selector method."""
+
+ # Set own color table
+ self.color_scheme_table.set_active_scheme(*args, **kw)
+ # for convenience, set Colors to the active scheme
+ self.Colors = self.color_scheme_table.active_colors
+ # Also set colors of debugger
+ if hasattr(self, 'pdb') and self.pdb is not None:
+ self.pdb.set_colors(*args, **kw)
+
+ def color_toggle(self):
+ """Toggle between the currently active color scheme and NoColor."""
+
+ if self.color_scheme_table.active_scheme_name == 'NoColor':
+ self.color_scheme_table.set_active_scheme(self.old_scheme)
+ self.Colors = self.color_scheme_table.active_colors
+ else:
+ self.old_scheme = self.color_scheme_table.active_scheme_name
+ self.color_scheme_table.set_active_scheme('NoColor')
+ self.Colors = self.color_scheme_table.active_colors
+
+ def stb2text(self, stb):
+ """Convert a structured traceback (a list) to a string."""
+ return '\n'.join(stb)
+
+ def text(self, etype, value, tb, tb_offset: Optional[int] = None, context=5):
+ """Return formatted traceback.
+
+ Subclasses may override this if they add extra arguments.
+ """
+ tb_list = self.structured_traceback(etype, value, tb,
+ tb_offset, context)
+ return self.stb2text(tb_list)
+
+ def structured_traceback(
+ self,
+ etype: type,
+ evalue: Optional[BaseException],
+ etb: Optional[TracebackType] = None,
+ tb_offset: Optional[int] = None,
+ number_of_lines_of_context: int = 5,
+ ):
+ """Return a list of traceback frames.
+
+ Must be implemented by each class.
+ """
+ raise NotImplementedError()
+
+
+#---------------------------------------------------------------------------
+class ListTB(TBTools):
+ """Print traceback information from a traceback list, with optional color.
+
+ Calling requires 3 arguments: (etype, evalue, elist)
+ as would be obtained by::
+
+ etype, evalue, tb = sys.exc_info()
+ if tb:
+ elist = traceback.extract_tb(tb)
+ else:
+ elist = None
+
+ It can thus be used by programs which need to process the traceback before
+ printing (such as console replacements based on the code module from the
+ standard library).
+
+ Because they are meant to be called without a full traceback (only a
+ list), instances of this class can't call the interactive pdb debugger."""
+
+
+ def __call__(self, etype, value, elist):
+ self.ostream.flush()
+ self.ostream.write(self.text(etype, value, elist))
+ self.ostream.write('\n')
+
+ def _extract_tb(self, tb):
+ if tb:
+ return traceback.extract_tb(tb)
+ else:
+ return None
+
+ def structured_traceback(
+ self,
+ etype: type,
+ evalue: Optional[BaseException],
+ etb: Optional[TracebackType] = None,
+ tb_offset: Optional[int] = None,
+ context=5,
+ ):
+ """Return a color formatted string with the traceback info.
+
+ Parameters
+ ----------
+ etype : exception type
+ Type of the exception raised.
+ evalue : object
+ Data stored in the exception
+ etb : list | TracebackType | None
+ If list: List of frames, see class docstring for details.
+ If Traceback: Traceback of the exception.
+ tb_offset : int, optional
+ Number of frames in the traceback to skip. If not given, the
+ instance evalue is used (set in constructor).
+ context : int, optional
+ Number of lines of context information to print.
+
+ Returns
+ -------
+ String with formatted exception.
+ """
+ # This is a workaround to get chained_exc_ids in recursive calls
+ # etb should not be a tuple if structured_traceback is not recursive
+ if isinstance(etb, tuple):
+ etb, chained_exc_ids = etb
+ else:
+ chained_exc_ids = set()
+
+ if isinstance(etb, list):
+ elist = etb
+ elif etb is not None:
+ elist = self._extract_tb(etb)
+ else:
+ elist = []
+ tb_offset = self.tb_offset if tb_offset is None else tb_offset
+ assert isinstance(tb_offset, int)
+ Colors = self.Colors
+ out_list = []
+ if elist:
+
+ if tb_offset and len(elist) > tb_offset:
+ elist = elist[tb_offset:]
+
+ out_list.append('Traceback %s(most recent call last)%s:' %
+ (Colors.normalEm, Colors.Normal) + '\n')
+ out_list.extend(self._format_list(elist))
+ # The exception info should be a single entry in the list.
+ lines = ''.join(self._format_exception_only(etype, evalue))
+ out_list.append(lines)
+
+ exception = self.get_parts_of_chained_exception(evalue)
+
+ if exception and (id(exception[1]) not in chained_exc_ids):
+ chained_exception_message = (
+ self.prepare_chained_exception_message(evalue.__cause__)[0]
+ if evalue is not None
+ else ""
+ )
+ etype, evalue, etb = exception
+ # Trace exception to avoid infinite 'cause' loop
+ chained_exc_ids.add(id(exception[1]))
+ chained_exceptions_tb_offset = 0
+ out_list = (
+ self.structured_traceback(
+ etype,
+ evalue,
+ (etb, chained_exc_ids), # type: ignore
+ chained_exceptions_tb_offset,
+ context,
+ )
+ + chained_exception_message
+ + out_list)
+
+ return out_list
+
+ def _format_list(self, extracted_list):
+ """Format a list of traceback entry tuples for printing.
+
+ Given a list of tuples as returned by extract_tb() or
+ extract_stack(), return a list of strings ready for printing.
+ Each string in the resulting list corresponds to the item with the
+ same index in the argument list. Each string ends in a newline;
+ the strings may contain internal newlines as well, for those items
+ whose source text line is not None.
+
+ Lifted almost verbatim from traceback.py
+ """
+
+ Colors = self.Colors
+ output_list = []
+ for ind, (filename, lineno, name, line) in enumerate(extracted_list):
+ normalCol, nameCol, fileCol, lineCol = (
+ # Emphasize the last entry
+ (Colors.normalEm, Colors.nameEm, Colors.filenameEm, Colors.line)
+ if ind == len(extracted_list) - 1
+ else (Colors.Normal, Colors.name, Colors.filename, "")
+ )
+
+ fns = _format_filename(filename, fileCol, normalCol, lineno=lineno)
+ item = f"{normalCol} {fns}"
+
+ if name != "<module>":
+ item += f" in {nameCol}{name}{normalCol}\n"
+ else:
+ item += "\n"
+ if line:
+ item += f"{lineCol} {line.strip()}{normalCol}\n"
+ output_list.append(item)
+
+ return output_list
+
+ def _format_exception_only(self, etype, value):
+ """Format the exception part of a traceback.
+
+ The arguments are the exception type and value such as given by
+ sys.exc_info()[:2]. The return value is a list of strings, each ending
+ in a newline. Normally, the list contains a single string; however,
+ for SyntaxError exceptions, it contains several lines that (when
+ printed) display detailed information about where the syntax error
+ occurred. The message indicating which exception occurred is the
+ always last string in the list.
+
+ Also lifted nearly verbatim from traceback.py
+ """
+ have_filedata = False
+ Colors = self.Colors
+ output_list = []
+ stype = py3compat.cast_unicode(Colors.excName + etype.__name__ + Colors.Normal)
+ if value is None:
+ # Not sure if this can still happen in Python 2.6 and above
+ output_list.append(stype + "\n")
+ else:
+ if issubclass(etype, SyntaxError):
+ have_filedata = True
+ if not value.filename: value.filename = "<string>"
+ if value.lineno:
+ lineno = value.lineno
+ textline = linecache.getline(value.filename, value.lineno)
+ else:
+ lineno = "unknown"
+ textline = ""
+ output_list.append(
+ "%s %s%s\n"
+ % (
+ Colors.normalEm,
+ _format_filename(
+ value.filename,
+ Colors.filenameEm,
+ Colors.normalEm,
+ lineno=(None if lineno == "unknown" else lineno),
+ ),
+ Colors.Normal,
+ )
+ )
+ if textline == "":
+ textline = py3compat.cast_unicode(value.text, "utf-8")
+
+ if textline is not None:
+ i = 0
+ while i < len(textline) and textline[i].isspace():
+ i += 1
+ output_list.append(
+ "%s %s%s\n" % (Colors.line, textline.strip(), Colors.Normal)
+ )
+ if value.offset is not None:
+ s = ' '
+ for c in textline[i:value.offset - 1]:
+ if c.isspace():
+ s += c
+ else:
+ s += " "
+ output_list.append(
+ "%s%s^%s\n" % (Colors.caret, s, Colors.Normal)
+ )
+
+ try:
+ s = value.msg
+ except Exception:
+ s = self._some_str(value)
+ if s:
+ output_list.append(
+ "%s%s:%s %s\n" % (stype, Colors.excName, Colors.Normal, s)
+ )
+ else:
+ output_list.append("%s\n" % stype)
+
+ # PEP-678 notes
+ output_list.extend(f"{x}\n" for x in getattr(value, "__notes__", []))
+
+ # sync with user hooks
+ if have_filedata:
+ ipinst = get_ipython()
+ if ipinst is not None:
+ ipinst.hooks.synchronize_with_editor(value.filename, value.lineno, 0)
+
+ return output_list
+
+ def get_exception_only(self, etype, value):
+ """Only print the exception type and message, without a traceback.
+
+ Parameters
+ ----------
+ etype : exception type
+ value : exception value
+ """
+ return ListTB.structured_traceback(self, etype, value)
+
+ def show_exception_only(self, etype, evalue):
+ """Only print the exception type and message, without a traceback.
+
+ Parameters
+ ----------
+ etype : exception type
+ evalue : exception value
+ """
+ # This method needs to use __call__ from *this* class, not the one from
+ # a subclass whose signature or behavior may be different
+ ostream = self.ostream
+ ostream.flush()
+ ostream.write('\n'.join(self.get_exception_only(etype, evalue)))
+ ostream.flush()
+
+ def _some_str(self, value):
+ # Lifted from traceback.py
+ try:
+ return py3compat.cast_unicode(str(value))
+ except:
+ return u'<unprintable %s object>' % type(value).__name__
+
+
+class FrameInfo:
+ """
+ Mirror of stack data's FrameInfo, but so that we can bypass highlighting on
+ really long frames.
+ """
+
+ description: Optional[str]
+ filename: Optional[str]
+ lineno: Tuple[int]
+ # number of context lines to use
+ context: Optional[int]
+
+ @classmethod
+ def _from_stack_data_FrameInfo(cls, frame_info):
+ return cls(
+ getattr(frame_info, "description", None),
+ getattr(frame_info, "filename", None), # type: ignore[arg-type]
+ getattr(frame_info, "lineno", None), # type: ignore[arg-type]
+ getattr(frame_info, "frame", None),
+ getattr(frame_info, "code", None),
+ sd=frame_info,
+ context=None,
+ )
+
+ def __init__(
+ self,
+ description: Optional[str],
+ filename: str,
+ lineno: Tuple[int],
+ frame,
+ code,
+ *,
+ sd=None,
+ context=None,
+ ):
+ self.description = description
+ self.filename = filename
+ self.lineno = lineno
+ self.frame = frame
+ self.code = code
+ self._sd = sd
+ self.context = context
+
+ # self.lines = []
+ if sd is None:
+ ix = inspect.getsourcelines(frame)
+ self.raw_lines = ix[0]
+
+ @property
+ def variables_in_executing_piece(self):
+ if self._sd:
+ return self._sd.variables_in_executing_piece
+ else:
+ return []
+
+ @property
+ def lines(self):
+ return self._sd.lines
+
+ @property
+ def executing(self):
+ if self._sd:
+ return self._sd.executing
+ else:
+ return None
+
+
+# ----------------------------------------------------------------------------
+class VerboseTB(TBTools):
+ """A port of Ka-Ping Yee's cgitb.py module that outputs color text instead
+ of HTML. Requires inspect and pydoc. Crazy, man.
+
+ Modified version which optionally strips the topmost entries from the
+ traceback, to be used with alternate interpreters (because their own code
+ would appear in the traceback)."""
+
+ _tb_highlight = ""
+
+ def __init__(
+ self,
+ color_scheme: str = "Linux",
+ call_pdb: bool = False,
+ ostream=None,
+ tb_offset: int = 0,
+ long_header: bool = False,
+ include_vars: bool = True,
+ check_cache=None,
+ debugger_cls=None,
+ parent=None,
+ config=None,
+ ):
+ """Specify traceback offset, headers and color scheme.
+
+ Define how many frames to drop from the tracebacks. Calling it with
+ tb_offset=1 allows use of this handler in interpreters which will have
+ their own code at the top of the traceback (VerboseTB will first
+ remove that frame before printing the traceback info)."""
+ TBTools.__init__(
+ self,
+ color_scheme=color_scheme,
+ call_pdb=call_pdb,
+ ostream=ostream,
+ parent=parent,
+ config=config,
+ debugger_cls=debugger_cls,
+ )
+ self.tb_offset = tb_offset
+ self.long_header = long_header
+ self.include_vars = include_vars
+ # By default we use linecache.checkcache, but the user can provide a
+ # different check_cache implementation. This was formerly used by the
+ # IPython kernel for interactive code, but is no longer necessary.
+ if check_cache is None:
+ check_cache = linecache.checkcache
+ self.check_cache = check_cache
+
+ self.skip_hidden = True
+
+ def format_record(self, frame_info: FrameInfo):
+ """Format a single stack frame"""
+ assert isinstance(frame_info, FrameInfo)
+ Colors = self.Colors # just a shorthand + quicker name lookup
+ ColorsNormal = Colors.Normal # used a lot
+
+ if isinstance(frame_info._sd, stack_data.RepeatedFrames):
+ return ' %s[... skipping similar frames: %s]%s\n' % (
+ Colors.excName, frame_info.description, ColorsNormal)
+
+ indent = " " * INDENT_SIZE
+ em_normal = "%s\n%s%s" % (Colors.valEm, indent, ColorsNormal)
+ tpl_call = f"in {Colors.vName}{{file}}{Colors.valEm}{{scope}}{ColorsNormal}"
+ tpl_call_fail = "in %s%%s%s(***failed resolving arguments***)%s" % (
+ Colors.vName,
+ Colors.valEm,
+ ColorsNormal,
+ )
+ tpl_name_val = "%%s %s= %%s%s" % (Colors.valEm, ColorsNormal)
+
+ link = _format_filename(
+ frame_info.filename,
+ Colors.filenameEm,
+ ColorsNormal,
+ lineno=frame_info.lineno,
+ )
+ args, varargs, varkw, locals_ = inspect.getargvalues(frame_info.frame)
+ if frame_info.executing is not None:
+ func = frame_info.executing.code_qualname()
+ else:
+ func = "?"
+ if func == "<module>":
+ call = ""
+ else:
+ # Decide whether to include variable details or not
+ var_repr = eqrepr if self.include_vars else nullrepr
+ try:
+ scope = inspect.formatargvalues(
+ args, varargs, varkw, locals_, formatvalue=var_repr
+ )
+ call = tpl_call.format(file=func, scope=scope)
+ except KeyError:
+ # This happens in situations like errors inside generator
+ # expressions, where local variables are listed in the
+ # line, but can't be extracted from the frame. I'm not
+ # 100% sure this isn't actually a bug in inspect itself,
+ # but since there's no info for us to compute with, the
+ # best we can do is report the failure and move on. Here
+ # we must *not* call any traceback construction again,
+ # because that would mess up use of %debug later on. So we
+ # simply report the failure and move on. The only
+ # limitation will be that this frame won't have locals
+ # listed in the call signature. Quite subtle problem...
+ # I can't think of a good way to validate this in a unit
+ # test, but running a script consisting of:
+ # dict( (k,v.strip()) for (k,v) in range(10) )
+ # will illustrate the error, if this exception catch is
+ # disabled.
+ call = tpl_call_fail % func
+
+ lvals = ''
+ lvals_list = []
+ if self.include_vars:
+ try:
+ # we likely want to fix stackdata at some point, but
+ # still need a workaround.
+ fibp = frame_info.variables_in_executing_piece
+ for var in fibp:
+ lvals_list.append(tpl_name_val % (var.name, repr(var.value)))
+ except Exception:
+ lvals_list.append(
+ "Exception trying to inspect frame. No more locals available."
+ )
+ if lvals_list:
+ lvals = '%s%s' % (indent, em_normal.join(lvals_list))
+
+ result = f'{link}{", " if call else ""}{call}\n'
+ if frame_info._sd is None:
+ # fast fallback if file is too long
+ tpl_link = "%s%%s%s" % (Colors.filenameEm, ColorsNormal)
+ link = tpl_link % util_path.compress_user(frame_info.filename)
+ level = "%s %s\n" % (link, call)
+ _line_format = PyColorize.Parser(
+ style=self.color_scheme_table.active_scheme_name, parent=self
+ ).format2
+ first_line = frame_info.code.co_firstlineno
+ current_line = frame_info.lineno[0]
+ raw_lines = frame_info.raw_lines
+ index = current_line - first_line
+
+ if index >= frame_info.context:
+ start = max(index - frame_info.context, 0)
+ stop = index + frame_info.context
+ index = frame_info.context
+ else:
+ start = 0
+ stop = index + frame_info.context
+ raw_lines = raw_lines[start:stop]
+
+ return "%s%s" % (
+ level,
+ "".join(
+ _simple_format_traceback_lines(
+ current_line,
+ index,
+ raw_lines,
+ Colors,
+ lvals,
+ _line_format,
+ )
+ ),
+ )
+ # result += "\n".join(frame_info.raw_lines)
+ else:
+ result += "".join(
+ _format_traceback_lines(
+ frame_info.lines, Colors, self.has_colors, lvals
+ )
+ )
+ return result
+
+ def prepare_header(self, etype: str, long_version: bool = False):
+ colors = self.Colors # just a shorthand + quicker name lookup
+ colorsnormal = colors.Normal # used a lot
+ exc = '%s%s%s' % (colors.excName, etype, colorsnormal)
+ width = min(75, get_terminal_size()[0])
+ if long_version:
+ # Header with the exception type, python version, and date
+ pyver = 'Python ' + sys.version.split()[0] + ': ' + sys.executable
+ date = time.ctime(time.time())
+
+ head = "%s%s%s\n%s%s%s\n%s" % (
+ colors.topline,
+ "-" * width,
+ colorsnormal,
+ exc,
+ " " * (width - len(etype) - len(pyver)),
+ pyver,
+ date.rjust(width),
+ )
+ head += (
+ "\nA problem occurred executing Python code. Here is the sequence of function"
+ "\ncalls leading up to the error, with the most recent (innermost) call last."
+ )
+ else:
+ # Simplified header
+ head = "%s%s" % (
+ exc,
+ "Traceback (most recent call last)".rjust(width - len(etype)),
+ )
+
+ return head
+
+ def format_exception(self, etype, evalue):
+ colors = self.Colors # just a shorthand + quicker name lookup
+ colorsnormal = colors.Normal # used a lot
+ # Get (safely) a string form of the exception info
+ try:
+ etype_str, evalue_str = map(str, (etype, evalue))
+ except:
+ # User exception is improperly defined.
+ etype, evalue = str, sys.exc_info()[:2]
+ etype_str, evalue_str = map(str, (etype, evalue))
+
+ # PEP-678 notes
+ notes = getattr(evalue, "__notes__", [])
+ if not isinstance(notes, Sequence) or isinstance(notes, (str, bytes)):
+ notes = [_safe_string(notes, "__notes__", func=repr)]
+
+ # ... and format it
+ return [
+ "{}{}{}: {}".format(
+ colors.excName,
+ etype_str,
+ colorsnormal,
+ py3compat.cast_unicode(evalue_str),
+ ),
+ *(
+ "{}{}".format(
+ colorsnormal, _safe_string(py3compat.cast_unicode(n), "note")
+ )
+ for n in notes
+ ),
+ ]
+
+ def format_exception_as_a_whole(
+ self,
+ etype: type,
+ evalue: Optional[BaseException],
+ etb: Optional[TracebackType],
+ number_of_lines_of_context,
+ tb_offset: Optional[int],
+ ):
+ """Formats the header, traceback and exception message for a single exception.
+
+ This may be called multiple times by Python 3 exception chaining
+ (PEP 3134).
+ """
+ # some locals
+ orig_etype = etype
+ try:
+ etype = etype.__name__ # type: ignore
+ except AttributeError:
+ pass
+
+ tb_offset = self.tb_offset if tb_offset is None else tb_offset
+ assert isinstance(tb_offset, int)
+ head = self.prepare_header(str(etype), self.long_header)
+ records = (
+ self.get_records(etb, number_of_lines_of_context, tb_offset) if etb else []
+ )
+
+ frames = []
+ skipped = 0
+ lastrecord = len(records) - 1
+ for i, record in enumerate(records):
+ if (
+ not isinstance(record._sd, stack_data.RepeatedFrames)
+ and self.skip_hidden
+ ):
+ if (
+ record.frame.f_locals.get("__tracebackhide__", 0)
+ and i != lastrecord
+ ):
+ skipped += 1
+ continue
+ if skipped:
+ Colors = self.Colors # just a shorthand + quicker name lookup
+ ColorsNormal = Colors.Normal # used a lot
+ frames.append(
+ " %s[... skipping hidden %s frame]%s\n"
+ % (Colors.excName, skipped, ColorsNormal)
+ )
+ skipped = 0
+ frames.append(self.format_record(record))
+ if skipped:
+ Colors = self.Colors # just a shorthand + quicker name lookup
+ ColorsNormal = Colors.Normal # used a lot
+ frames.append(
+ " %s[... skipping hidden %s frame]%s\n"
+ % (Colors.excName, skipped, ColorsNormal)
+ )
+
+ formatted_exception = self.format_exception(etype, evalue)
+ if records:
+ frame_info = records[-1]
+ ipinst = get_ipython()
+ if ipinst is not None:
+ ipinst.hooks.synchronize_with_editor(frame_info.filename, frame_info.lineno, 0)
+
+ return [[head] + frames + formatted_exception]
+
+ def get_records(
+ self, etb: TracebackType, number_of_lines_of_context: int, tb_offset: int
+ ):
+ assert etb is not None
+ context = number_of_lines_of_context - 1
+ after = context // 2
+ before = context - after
+ if self.has_colors:
+ style = get_style_by_name("default")
+ style = stack_data.style_with_executing_node(style, self._tb_highlight)
+ formatter = Terminal256Formatter(style=style)
+ else:
+ formatter = None
+ options = stack_data.Options(
+ before=before,
+ after=after,
+ pygments_formatter=formatter,
+ )
+
+ # Let's estimate the amount of code we will have to parse/highlight.
+ cf: Optional[TracebackType] = etb
+ max_len = 0
+ tbs = []
+ while cf is not None:
+ try:
+ mod = inspect.getmodule(cf.tb_frame)
+ if mod is not None:
+ mod_name = mod.__name__
+ root_name, *_ = mod_name.split(".")
+ if root_name == "IPython":
+ cf = cf.tb_next
+ continue
+ max_len = get_line_number_of_frame(cf.tb_frame)
+
+ except OSError:
+ max_len = 0
+ max_len = max(max_len, max_len)
+ tbs.append(cf)
+ cf = getattr(cf, "tb_next", None)
+
+ if max_len > FAST_THRESHOLD:
+ FIs = []
+ for tb in tbs:
+ frame = tb.tb_frame # type: ignore
+ lineno = (frame.f_lineno,)
+ code = frame.f_code
+ filename = code.co_filename
+ # TODO: Here we need to use before/after/
+ FIs.append(
+ FrameInfo(
+ "Raw frame", filename, lineno, frame, code, context=context
+ )
+ )
+ return FIs
+ res = list(stack_data.FrameInfo.stack_data(etb, options=options))[tb_offset:]
+ res = [FrameInfo._from_stack_data_FrameInfo(r) for r in res]
+ return res
+
+ def structured_traceback(
+ self,
+ etype: type,
+ evalue: Optional[BaseException],
+ etb: Optional[TracebackType] = None,
+ tb_offset: Optional[int] = None,
+ number_of_lines_of_context: int = 5,
+ ):
+ """Return a nice text document describing the traceback."""
+ formatted_exception = self.format_exception_as_a_whole(etype, evalue, etb, number_of_lines_of_context,
+ tb_offset)
+
+ colors = self.Colors # just a shorthand + quicker name lookup
+ colorsnormal = colors.Normal # used a lot
+ head = '%s%s%s' % (colors.topline, '-' * min(75, get_terminal_size()[0]), colorsnormal)
+ structured_traceback_parts = [head]
+ chained_exceptions_tb_offset = 0
+ lines_of_context = 3
+ formatted_exceptions = formatted_exception
+ exception = self.get_parts_of_chained_exception(evalue)
+ if exception:
+ assert evalue is not None
+ formatted_exceptions += self.prepare_chained_exception_message(evalue.__cause__)
+ etype, evalue, etb = exception
+ else:
+ evalue = None
+ chained_exc_ids = set()
+ while evalue:
+ formatted_exceptions += self.format_exception_as_a_whole(etype, evalue, etb, lines_of_context,
+ chained_exceptions_tb_offset)
+ exception = self.get_parts_of_chained_exception(evalue)
+
+ if exception and not id(exception[1]) in chained_exc_ids:
+ chained_exc_ids.add(id(exception[1])) # trace exception to avoid infinite 'cause' loop
+ formatted_exceptions += self.prepare_chained_exception_message(evalue.__cause__)
+ etype, evalue, etb = exception
+ else:
+ evalue = None
+
+ # we want to see exceptions in a reversed order:
+ # the first exception should be on top
+ for formatted_exception in reversed(formatted_exceptions):
+ structured_traceback_parts += formatted_exception
+
+ return structured_traceback_parts
+
+ def debugger(self, force: bool = False):
+ """Call up the pdb debugger if desired, always clean up the tb
+ reference.
+
+ Keywords:
+
+ - force(False): by default, this routine checks the instance call_pdb
+ flag and does not actually invoke the debugger if the flag is false.
+ The 'force' option forces the debugger to activate even if the flag
+ is false.
+
+ If the call_pdb flag is set, the pdb interactive debugger is
+ invoked. In all cases, the self.tb reference to the current traceback
+ is deleted to prevent lingering references which hamper memory
+ management.
+
+ Note that each call to pdb() does an 'import readline', so if your app
+ requires a special setup for the readline completers, you'll have to
+ fix that by hand after invoking the exception handler."""
+
+ if force or self.call_pdb:
+ if self.pdb is None:
+ self.pdb = self.debugger_cls()
+ # the system displayhook may have changed, restore the original
+ # for pdb
+ display_trap = DisplayTrap(hook=sys.__displayhook__)
+ with display_trap:
+ self.pdb.reset()
+ # Find the right frame so we don't pop up inside ipython itself
+ if hasattr(self, "tb") and self.tb is not None: # type: ignore[has-type]
+ etb = self.tb # type: ignore[has-type]
+ else:
+ etb = self.tb = sys.last_traceback
+ while self.tb is not None and self.tb.tb_next is not None:
+ assert self.tb.tb_next is not None
+ self.tb = self.tb.tb_next
+ if etb and etb.tb_next:
+ etb = etb.tb_next
+ self.pdb.botframe = etb.tb_frame
+ self.pdb.interaction(None, etb)
+
+ if hasattr(self, 'tb'):
+ del self.tb
+
+ def handler(self, info=None):
+ (etype, evalue, etb) = info or sys.exc_info()
+ self.tb = etb
+ ostream = self.ostream
+ ostream.flush()
+ ostream.write(self.text(etype, evalue, etb))
+ ostream.write('\n')
+ ostream.flush()
+
+ # Changed so an instance can just be called as VerboseTB_inst() and print
+ # out the right info on its own.
+ def __call__(self, etype=None, evalue=None, etb=None):
+ """This hook can replace sys.excepthook (for Python 2.1 or higher)."""
+ if etb is None:
+ self.handler()
+ else:
+ self.handler((etype, evalue, etb))
+ try:
+ self.debugger()
+ except KeyboardInterrupt:
+ print("\nKeyboardInterrupt")
+
+
+#----------------------------------------------------------------------------
+class FormattedTB(VerboseTB, ListTB):
+ """Subclass ListTB but allow calling with a traceback.
+
+ It can thus be used as a sys.excepthook for Python > 2.1.
+
+ Also adds 'Context' and 'Verbose' modes, not available in ListTB.
+
+ Allows a tb_offset to be specified. This is useful for situations where
+ one needs to remove a number of topmost frames from the traceback (such as
+ occurs with python programs that themselves execute other python code,
+ like Python shells). """
+
+ mode: str
+
+ def __init__(self, mode='Plain', color_scheme='Linux', call_pdb=False,
+ ostream=None,
+ tb_offset=0, long_header=False, include_vars=False,
+ check_cache=None, debugger_cls=None,
+ parent=None, config=None):
+
+ # NEVER change the order of this list. Put new modes at the end:
+ self.valid_modes = ['Plain', 'Context', 'Verbose', 'Minimal']
+ self.verbose_modes = self.valid_modes[1:3]
+
+ VerboseTB.__init__(self, color_scheme=color_scheme, call_pdb=call_pdb,
+ ostream=ostream, tb_offset=tb_offset,
+ long_header=long_header, include_vars=include_vars,
+ check_cache=check_cache, debugger_cls=debugger_cls,
+ parent=parent, config=config)
+
+ # Different types of tracebacks are joined with different separators to
+ # form a single string. They are taken from this dict
+ self._join_chars = dict(Plain='', Context='\n', Verbose='\n',
+ Minimal='')
+ # set_mode also sets the tb_join_char attribute
+ self.set_mode(mode)
+
+ def structured_traceback(self, etype, value, tb, tb_offset=None, number_of_lines_of_context=5):
+ tb_offset = self.tb_offset if tb_offset is None else tb_offset
+ mode = self.mode
+ if mode in self.verbose_modes:
+ # Verbose modes need a full traceback
+ return VerboseTB.structured_traceback(
+ self, etype, value, tb, tb_offset, number_of_lines_of_context
+ )
+ elif mode == 'Minimal':
+ return ListTB.get_exception_only(self, etype, value)
+ else:
+ # We must check the source cache because otherwise we can print
+ # out-of-date source code.
+ self.check_cache()
+ # Now we can extract and format the exception
+ return ListTB.structured_traceback(
+ self, etype, value, tb, tb_offset, number_of_lines_of_context
+ )
+
+ def stb2text(self, stb):
+ """Convert a structured traceback (a list) to a string."""
+ return self.tb_join_char.join(stb)
+
+ def set_mode(self, mode: Optional[str] = None):
+ """Switch to the desired mode.
+
+ If mode is not specified, cycles through the available modes."""
+
+ if not mode:
+ new_idx = (self.valid_modes.index(self.mode) + 1 ) % \
+ len(self.valid_modes)
+ self.mode = self.valid_modes[new_idx]
+ elif mode not in self.valid_modes:
+ raise ValueError(
+ "Unrecognized mode in FormattedTB: <" + mode + ">\n"
+ "Valid modes: " + str(self.valid_modes)
+ )
+ else:
+ assert isinstance(mode, str)
+ self.mode = mode
+ # include variable details only in 'Verbose' mode
+ self.include_vars = (self.mode == self.valid_modes[2])
+ # Set the join character for generating text tracebacks
+ self.tb_join_char = self._join_chars[self.mode]
+
+ # some convenient shortcuts
+ def plain(self):
+ self.set_mode(self.valid_modes[0])
+
+ def context(self):
+ self.set_mode(self.valid_modes[1])
+
+ def verbose(self):
+ self.set_mode(self.valid_modes[2])
+
+ def minimal(self):
+ self.set_mode(self.valid_modes[3])
+
+
+#----------------------------------------------------------------------------
+class AutoFormattedTB(FormattedTB):
+ """A traceback printer which can be called on the fly.
+
+ It will find out about exceptions by itself.
+
+ A brief example::
+
+ AutoTB = AutoFormattedTB(mode = 'Verbose',color_scheme='Linux')
+ try:
+ ...
+ except:
+ AutoTB() # or AutoTB(out=logfile) where logfile is an open file object
+ """
+
+ def __call__(self, etype=None, evalue=None, etb=None,
+ out=None, tb_offset=None):
+ """Print out a formatted exception traceback.
+
+ Optional arguments:
+ - out: an open file-like object to direct output to.
+
+ - tb_offset: the number of frames to skip over in the stack, on a
+ per-call basis (this overrides temporarily the instance's tb_offset
+ given at initialization time."""
+
+ if out is None:
+ out = self.ostream
+ out.flush()
+ out.write(self.text(etype, evalue, etb, tb_offset))
+ out.write('\n')
+ out.flush()
+ # FIXME: we should remove the auto pdb behavior from here and leave
+ # that to the clients.
+ try:
+ self.debugger()
+ except KeyboardInterrupt:
+ print("\nKeyboardInterrupt")
+
+ def structured_traceback(
+ self,
+ etype: type,
+ evalue: Optional[BaseException],
+ etb: Optional[TracebackType] = None,
+ tb_offset: Optional[int] = None,
+ number_of_lines_of_context: int = 5,
+ ):
+ # tb: TracebackType or tupleof tb types ?
+ if etype is None:
+ etype, evalue, etb = sys.exc_info()
+ if isinstance(etb, tuple):
+ # tb is a tuple if this is a chained exception.
+ self.tb = etb[0]
+ else:
+ self.tb = etb
+ return FormattedTB.structured_traceback(
+ self, etype, evalue, etb, tb_offset, number_of_lines_of_context
+ )
+
+
+#---------------------------------------------------------------------------
+
+# A simple class to preserve Nathan's original functionality.
+class ColorTB(FormattedTB):
+ """Shorthand to initialize a FormattedTB in Linux colors mode."""
+
+ def __init__(self, color_scheme='Linux', call_pdb=0, **kwargs):
+ FormattedTB.__init__(self, color_scheme=color_scheme,
+ call_pdb=call_pdb, **kwargs)
+
+
+class SyntaxTB(ListTB):
+ """Extension which holds some state: the last exception value"""
+
+ def __init__(self, color_scheme='NoColor', parent=None, config=None):
+ ListTB.__init__(self, color_scheme, parent=parent, config=config)
+ self.last_syntax_error = None
+
+ def __call__(self, etype, value, elist):
+ self.last_syntax_error = value
+
+ ListTB.__call__(self, etype, value, elist)
+
+ def structured_traceback(self, etype, value, elist, tb_offset=None,
+ context=5):
+ # If the source file has been edited, the line in the syntax error can
+ # be wrong (retrieved from an outdated cache). This replaces it with
+ # the current value.
+ if isinstance(value, SyntaxError) \
+ and isinstance(value.filename, str) \
+ and isinstance(value.lineno, int):
+ linecache.checkcache(value.filename)
+ newtext = linecache.getline(value.filename, value.lineno)
+ if newtext:
+ value.text = newtext
+ self.last_syntax_error = value
+ return super(SyntaxTB, self).structured_traceback(etype, value, elist,
+ tb_offset=tb_offset, context=context)
+
+ def clear_err_state(self):
+ """Return the current error state and clear it"""
+ e = self.last_syntax_error
+ self.last_syntax_error = None
+ return e
+
+ def stb2text(self, stb):
+ """Convert a structured traceback (a list) to a string."""
+ return ''.join(stb)
+
+
+# some internal-use functions
+def text_repr(value):
+ """Hopefully pretty robust repr equivalent."""
+ # this is pretty horrible but should always return *something*
+ try:
+ return pydoc.text.repr(value) # type: ignore[call-arg]
+ except KeyboardInterrupt:
+ raise
+ except:
+ try:
+ return repr(value)
+ except KeyboardInterrupt:
+ raise
+ except:
+ try:
+ # all still in an except block so we catch
+ # getattr raising
+ name = getattr(value, '__name__', None)
+ if name:
+ # ick, recursion
+ return text_repr(name)
+ klass = getattr(value, '__class__', None)
+ if klass:
+ return '%s instance' % text_repr(klass)
+ except KeyboardInterrupt:
+ raise
+ except:
+ return 'UNRECOVERABLE REPR FAILURE'
+
+
+def eqrepr(value, repr=text_repr):
+ return '=%s' % repr(value)
+
+
+def nullrepr(value, repr=text_repr):
+ return ''
diff --git a/contrib/python/ipython/py3/IPython/core/usage.py b/contrib/python/ipython/py3/IPython/core/usage.py
new file mode 100644
index 0000000000..53219bceb2
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/usage.py
@@ -0,0 +1,341 @@
+# -*- coding: utf-8 -*-
+"""Usage information for the main IPython applications.
+"""
+#-----------------------------------------------------------------------------
+# Copyright (C) 2008-2011 The IPython Development Team
+# Copyright (C) 2001-2007 Fernando Perez. <fperez@colorado.edu>
+#
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#-----------------------------------------------------------------------------
+
+import sys
+from IPython.core import release
+
+cl_usage = """\
+=========
+ IPython
+=========
+
+Tools for Interactive Computing in Python
+=========================================
+
+ A Python shell with automatic history (input and output), dynamic object
+ introspection, easier configuration, command completion, access to the
+ system shell and more. IPython can also be embedded in running programs.
+
+
+Usage
+
+ ipython [subcommand] [options] [-c cmd | -m mod | file] [--] [arg] ...
+
+ If invoked with no options, it executes the file and exits, passing the
+ remaining arguments to the script, just as if you had specified the same
+ command with python. You may need to specify `--` before args to be passed
+ to the script, to prevent IPython from attempting to parse them. If you
+ specify the option `-i` before the filename, it will enter an interactive
+ IPython session after running the script, rather than exiting. Files ending
+ in .py will be treated as normal Python, but files ending in .ipy can
+ contain special IPython syntax (magic commands, shell expansions, etc.).
+
+ Almost all configuration in IPython is available via the command-line. Do
+ `ipython --help-all` to see all available options. For persistent
+ configuration, look into your `ipython_config.py` configuration file for
+ details.
+
+ This file is typically installed in the `IPYTHONDIR` directory, and there
+ is a separate configuration directory for each profile. The default profile
+ directory will be located in $IPYTHONDIR/profile_default. IPYTHONDIR
+ defaults to to `$HOME/.ipython`. For Windows users, $HOME resolves to
+ C:\\Users\\YourUserName in most instances.
+
+ To initialize a profile with the default configuration file, do::
+
+ $> ipython profile create
+
+ and start editing `IPYTHONDIR/profile_default/ipython_config.py`
+
+ In IPython's documentation, we will refer to this directory as
+ `IPYTHONDIR`, you can change its default location by creating an
+ environment variable with this name and setting it to the desired path.
+
+ For more information, see the manual available in HTML and PDF in your
+ installation, or online at https://ipython.org/documentation.html.
+"""
+
+interactive_usage = """
+IPython -- An enhanced Interactive Python
+=========================================
+
+IPython offers a fully compatible replacement for the standard Python
+interpreter, with convenient shell features, special commands, command
+history mechanism and output results caching.
+
+At your system command line, type 'ipython -h' to see the command line
+options available. This document only describes interactive features.
+
+GETTING HELP
+------------
+
+Within IPython you have various way to access help:
+
+ ? -> Introduction and overview of IPython's features (this screen).
+ object? -> Details about 'object'.
+ object?? -> More detailed, verbose information about 'object'.
+ %quickref -> Quick reference of all IPython specific syntax and magics.
+ help -> Access Python's own help system.
+
+If you are in terminal IPython you can quit this screen by pressing `q`.
+
+
+MAIN FEATURES
+-------------
+
+* Access to the standard Python help with object docstrings and the Python
+ manuals. Simply type 'help' (no quotes) to invoke it.
+
+* Magic commands: type %magic for information on the magic subsystem.
+
+* System command aliases, via the %alias command or the configuration file(s).
+
+* Dynamic object information:
+
+ Typing ?word or word? prints detailed information about an object. Certain
+ long strings (code, etc.) get snipped in the center for brevity.
+
+ Typing ??word or word?? gives access to the full information without
+ snipping long strings. Strings that are longer than the screen are printed
+ through the less pager.
+
+ The ?/?? system gives access to the full source code for any object (if
+ available), shows function prototypes and other useful information.
+
+ If you just want to see an object's docstring, type '%pdoc object' (without
+ quotes, and without % if you have automagic on).
+
+* Tab completion in the local namespace:
+
+ At any time, hitting tab will complete any available python commands or
+ variable names, and show you a list of the possible completions if there's
+ no unambiguous one. It will also complete filenames in the current directory.
+
+* Search previous command history in multiple ways:
+
+ - Start typing, and then use arrow keys up/down or (Ctrl-p/Ctrl-n) to search
+ through the history items that match what you've typed so far.
+
+ - Hit Ctrl-r: opens a search prompt. Begin typing and the system searches
+ your history for lines that match what you've typed so far, completing as
+ much as it can.
+
+ - %hist: search history by index.
+
+* Persistent command history across sessions.
+
+* Logging of input with the ability to save and restore a working session.
+
+* System shell with !. Typing !ls will run 'ls' in the current directory.
+
+* The reload command does a 'deep' reload of a module: changes made to the
+ module since you imported will actually be available without having to exit.
+
+* Verbose and colored exception traceback printouts. See the magic xmode and
+ xcolor functions for details (just type %magic).
+
+* Input caching system:
+
+ IPython offers numbered prompts (In/Out) with input and output caching. All
+ input is saved and can be retrieved as variables (besides the usual arrow
+ key recall).
+
+ The following GLOBAL variables always exist (so don't overwrite them!):
+ _i: stores previous input.
+ _ii: next previous.
+ _iii: next-next previous.
+ _ih : a list of all input _ih[n] is the input from line n.
+
+ Additionally, global variables named _i<n> are dynamically created (<n>
+ being the prompt counter), such that _i<n> == _ih[<n>]
+
+ For example, what you typed at prompt 14 is available as _i14 and _ih[14].
+
+ You can create macros which contain multiple input lines from this history,
+ for later re-execution, with the %macro function.
+
+ The history function %hist allows you to see any part of your input history
+ by printing a range of the _i variables. Note that inputs which contain
+ magic functions (%) appear in the history with a prepended comment. This is
+ because they aren't really valid Python code, so you can't exec them.
+
+* Output caching system:
+
+ For output that is returned from actions, a system similar to the input
+ cache exists but using _ instead of _i. Only actions that produce a result
+ (NOT assignments, for example) are cached. If you are familiar with
+ Mathematica, IPython's _ variables behave exactly like Mathematica's %
+ variables.
+
+ The following GLOBAL variables always exist (so don't overwrite them!):
+ _ (one underscore): previous output.
+ __ (two underscores): next previous.
+ ___ (three underscores): next-next previous.
+
+ Global variables named _<n> are dynamically created (<n> being the prompt
+ counter), such that the result of output <n> is always available as _<n>.
+
+ Finally, a global dictionary named _oh exists with entries for all lines
+ which generated output.
+
+* Directory history:
+
+ Your history of visited directories is kept in the global list _dh, and the
+ magic %cd command can be used to go to any entry in that list.
+
+* Auto-parentheses and auto-quotes (adapted from Nathan Gray's LazyPython)
+
+ 1. Auto-parentheses
+
+ Callable objects (i.e. functions, methods, etc) can be invoked like
+ this (notice the commas between the arguments)::
+
+ In [1]: callable_ob arg1, arg2, arg3
+
+ and the input will be translated to this::
+
+ callable_ob(arg1, arg2, arg3)
+
+ This feature is off by default (in rare cases it can produce
+ undesirable side-effects), but you can activate it at the command-line
+ by starting IPython with `--autocall 1`, set it permanently in your
+ configuration file, or turn on at runtime with `%autocall 1`.
+
+ You can force auto-parentheses by using '/' as the first character
+ of a line. For example::
+
+ In [1]: /globals # becomes 'globals()'
+
+ Note that the '/' MUST be the first character on the line! This
+ won't work::
+
+ In [2]: print /globals # syntax error
+
+ In most cases the automatic algorithm should work, so you should
+ rarely need to explicitly invoke /. One notable exception is if you
+ are trying to call a function with a list of tuples as arguments (the
+ parenthesis will confuse IPython)::
+
+ In [1]: zip (1,2,3),(4,5,6) # won't work
+
+ but this will work::
+
+ In [2]: /zip (1,2,3),(4,5,6)
+ ------> zip ((1,2,3),(4,5,6))
+ Out[2]= [(1, 4), (2, 5), (3, 6)]
+
+ IPython tells you that it has altered your command line by
+ displaying the new command line preceded by -->. e.g.::
+
+ In [18]: callable list
+ -------> callable (list)
+
+ 2. Auto-Quoting
+
+ You can force auto-quoting of a function's arguments by using ',' as
+ the first character of a line. For example::
+
+ In [1]: ,my_function /home/me # becomes my_function("/home/me")
+
+ If you use ';' instead, the whole argument is quoted as a single
+ string (while ',' splits on whitespace)::
+
+ In [2]: ,my_function a b c # becomes my_function("a","b","c")
+ In [3]: ;my_function a b c # becomes my_function("a b c")
+
+ Note that the ',' MUST be the first character on the line! This
+ won't work::
+
+ In [4]: x = ,my_function /home/me # syntax error
+"""
+
+interactive_usage_min = """\
+An enhanced console for Python.
+Some of its features are:
+- Tab completion in the local namespace.
+- Logging of input, see command-line options.
+- System shell escape via ! , eg !ls.
+- Magic commands, starting with a % (like %ls, %pwd, %cd, etc.)
+- Keeps track of locally defined variables via %who, %whos.
+- Show object information with a ? eg ?x or x? (use ?? for more info).
+"""
+
+quick_reference = r"""
+IPython -- An enhanced Interactive Python - Quick Reference Card
+================================================================
+
+obj?, obj?? : Get help, or more help for object (also works as
+ ?obj, ??obj).
+?foo.*abc* : List names in 'foo' containing 'abc' in them.
+%magic : Information about IPython's 'magic' % functions.
+
+Magic functions are prefixed by % or %%, and typically take their arguments
+without parentheses, quotes or even commas for convenience. Line magics take a
+single % and cell magics are prefixed with two %%.
+
+Example magic function calls:
+
+%alias d ls -F : 'd' is now an alias for 'ls -F'
+alias d ls -F : Works if 'alias' not a python name
+alist = %alias : Get list of aliases to 'alist'
+cd /usr/share : Obvious. cd -<tab> to choose from visited dirs.
+%cd?? : See help AND source for magic %cd
+%timeit x=10 : time the 'x=10' statement with high precision.
+%%timeit x=2**100
+x**100 : time 'x**100' with a setup of 'x=2**100'; setup code is not
+ counted. This is an example of a cell magic.
+
+System commands:
+
+!cp a.txt b/ : System command escape, calls os.system()
+cp a.txt b/ : after %rehashx, most system commands work without !
+cp ${f}.txt $bar : Variable expansion in magics and system commands
+files = !ls /usr : Capture system command output
+files.s, files.l, files.n: "a b c", ['a','b','c'], 'a\nb\nc'
+
+History:
+
+_i, _ii, _iii : Previous, next previous, next next previous input
+_i4, _ih[2:5] : Input history line 4, lines 2-4
+exec(_i81) : Execute input history line #81 again
+%rep 81 : Edit input history line #81
+_, __, ___ : previous, next previous, next next previous output
+_dh : Directory history
+_oh : Output history
+%hist : Command history of current session.
+%hist -g foo : Search command history of (almost) all sessions for 'foo'.
+%hist -g : Command history of (almost) all sessions.
+%hist 1/2-8 : Command history containing lines 2-8 of session 1.
+%hist 1/ ~2/ : Command history of session 1 and 2 sessions before current.
+%hist ~8/1-~6/5 : Command history from line 1 of 8 sessions ago to
+ line 5 of 6 sessions ago.
+%edit 0/ : Open editor to execute code with history of current session.
+
+Autocall:
+
+f 1,2 : f(1,2) # Off by default, enable with %autocall magic.
+/f 1,2 : f(1,2) (forced autoparen)
+,f 1 2 : f("1","2")
+;f 1 2 : f("1 2")
+
+Remember: TAB completion works in many contexts, not just file names
+or python names.
+
+The following magic functions are currently available:
+
+"""
+
+default_banner_parts = ["Python %s\n"%sys.version.split("\n")[0],
+ "Type 'copyright', 'credits' or 'license' for more information\n" ,
+ "IPython {version} -- An enhanced Interactive Python. Type '?' for help.\n".format(version=release.version),
+]
+
+default_banner = ''.join(default_banner_parts)
diff --git a/contrib/python/ipython/py3/IPython/display.py b/contrib/python/ipython/py3/IPython/display.py
new file mode 100644
index 0000000000..b7f64f25c9
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/display.py
@@ -0,0 +1,44 @@
+"""Public API for display tools in IPython.
+"""
+
+# -----------------------------------------------------------------------------
+# Copyright (C) 2012 The IPython Development Team
+#
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+# -----------------------------------------------------------------------------
+
+# -----------------------------------------------------------------------------
+# Imports
+# -----------------------------------------------------------------------------
+
+from IPython.core.display_functions import *
+from IPython.core.display import (
+ display_pretty,
+ display_html,
+ display_markdown,
+ display_svg,
+ display_png,
+ display_jpeg,
+ display_latex,
+ display_json,
+ display_javascript,
+ display_pdf,
+ DisplayObject,
+ TextDisplayObject,
+ Pretty,
+ HTML,
+ Markdown,
+ Math,
+ Latex,
+ SVG,
+ ProgressBar,
+ JSON,
+ GeoJSON,
+ Javascript,
+ Image,
+ set_matplotlib_formats,
+ set_matplotlib_close,
+ Video,
+)
+from IPython.lib.display import *
diff --git a/contrib/python/ipython/py3/IPython/extensions/__init__.py b/contrib/python/ipython/py3/IPython/extensions/__init__.py
new file mode 100644
index 0000000000..db7f79fca6
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/extensions/__init__.py
@@ -0,0 +1,2 @@
+# -*- coding: utf-8 -*-
+"""This directory is meant for IPython extensions."""
diff --git a/contrib/python/ipython/py3/IPython/extensions/autoreload.py b/contrib/python/ipython/py3/IPython/extensions/autoreload.py
new file mode 100644
index 0000000000..0025ad519f
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/extensions/autoreload.py
@@ -0,0 +1,727 @@
+"""IPython extension to reload modules before executing user code.
+
+``autoreload`` reloads modules automatically before entering the execution of
+code typed at the IPython prompt.
+
+This makes for example the following workflow possible:
+
+.. sourcecode:: ipython
+
+ In [1]: %load_ext autoreload
+
+ In [2]: %autoreload 2
+
+ In [3]: from foo import some_function
+
+ In [4]: some_function()
+ Out[4]: 42
+
+ In [5]: # open foo.py in an editor and change some_function to return 43
+
+ In [6]: some_function()
+ Out[6]: 43
+
+The module was reloaded without reloading it explicitly, and the object
+imported with ``from foo import ...`` was also updated.
+
+Usage
+=====
+
+The following magic commands are provided:
+
+``%autoreload``, ``%autoreload now``
+
+ Reload all modules (except those excluded by ``%aimport``)
+ automatically now.
+
+``%autoreload 0``, ``%autoreload off``
+
+ Disable automatic reloading.
+
+``%autoreload 1``, ``%autoreload explicit``
+
+ Reload all modules imported with ``%aimport`` every time before
+ executing the Python code typed.
+
+``%autoreload 2``, ``%autoreload all``
+
+ Reload all modules (except those excluded by ``%aimport``) every
+ time before executing the Python code typed.
+
+``%autoreload 3``, ``%autoreload complete``
+
+ Same as 2/all, but also adds any new objects in the module. See
+ unit test at IPython/extensions/tests/test_autoreload.py::test_autoload_newly_added_objects
+
+ Adding ``--print`` or ``-p`` to the ``%autoreload`` line will print autoreload activity to
+ standard out. ``--log`` or ``-l`` will do it to the log at INFO level; both can be used
+ simultaneously.
+
+``%aimport``
+
+ List modules which are to be automatically imported or not to be imported.
+
+``%aimport foo``
+
+ Import module 'foo' and mark it to be autoreloaded for ``%autoreload 1``
+
+``%aimport foo, bar``
+
+ Import modules 'foo', 'bar' and mark them to be autoreloaded for ``%autoreload 1``
+
+``%aimport -foo``
+
+ Mark module 'foo' to not be autoreloaded.
+
+Caveats
+=======
+
+Reloading Python modules in a reliable way is in general difficult,
+and unexpected things may occur. ``%autoreload`` tries to work around
+common pitfalls by replacing function code objects and parts of
+classes previously in the module with new versions. This makes the
+following things to work:
+
+- Functions and classes imported via 'from xxx import foo' are upgraded
+ to new versions when 'xxx' is reloaded.
+
+- Methods and properties of classes are upgraded on reload, so that
+ calling 'c.foo()' on an object 'c' created before the reload causes
+ the new code for 'foo' to be executed.
+
+Some of the known remaining caveats are:
+
+- Replacing code objects does not always succeed: changing a @property
+ in a class to an ordinary method or a method to a member variable
+ can cause problems (but in old objects only).
+
+- Functions that are removed (eg. via monkey-patching) from a module
+ before it is reloaded are not upgraded.
+
+- C extension modules cannot be reloaded, and so cannot be autoreloaded.
+
+- While comparing Enum and Flag, the 'is' Identity Operator is used (even in the case '==' has been used (Similar to the 'None' keyword)).
+
+- Reloading a module, or importing the same module by a different name, creates new Enums. These may look the same, but are not.
+"""
+
+from IPython.core import magic_arguments
+from IPython.core.magic import Magics, magics_class, line_magic
+
+__skip_doctest__ = True
+
+# -----------------------------------------------------------------------------
+# Copyright (C) 2000 Thomas Heller
+# Copyright (C) 2008 Pauli Virtanen <pav@iki.fi>
+# Copyright (C) 2012 The IPython Development Team
+#
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+# -----------------------------------------------------------------------------
+#
+# This IPython module is written by Pauli Virtanen, based on the autoreload
+# code by Thomas Heller.
+
+# -----------------------------------------------------------------------------
+# Imports
+# -----------------------------------------------------------------------------
+
+import os
+import sys
+import traceback
+import types
+import weakref
+import gc
+import logging
+from importlib import import_module, reload
+from importlib.util import source_from_cache
+
+# ------------------------------------------------------------------------------
+# Autoreload functionality
+# ------------------------------------------------------------------------------
+
+
+class ModuleReloader:
+ enabled = False
+ """Whether this reloader is enabled"""
+
+ check_all = True
+ """Autoreload all modules, not just those listed in 'modules'"""
+
+ autoload_obj = False
+ """Autoreload all modules AND autoload all new objects"""
+
+ def __init__(self, shell=None):
+ # Modules that failed to reload: {module: mtime-on-failed-reload, ...}
+ self.failed = {}
+ # Modules specially marked as autoreloadable.
+ self.modules = {}
+ # Modules specially marked as not autoreloadable.
+ self.skip_modules = {}
+ # (module-name, name) -> weakref, for replacing old code objects
+ self.old_objects = {}
+ # Module modification timestamps
+ self.modules_mtimes = {}
+ self.shell = shell
+
+ # Reporting callable for verbosity
+ self._report = lambda msg: None # by default, be quiet.
+
+ # Cache module modification times
+ self.check(check_all=True, do_reload=False)
+
+ # To hide autoreload errors
+ self.hide_errors = False
+
+ def mark_module_skipped(self, module_name):
+ """Skip reloading the named module in the future"""
+ try:
+ del self.modules[module_name]
+ except KeyError:
+ pass
+ self.skip_modules[module_name] = True
+
+ def mark_module_reloadable(self, module_name):
+ """Reload the named module in the future (if it is imported)"""
+ try:
+ del self.skip_modules[module_name]
+ except KeyError:
+ pass
+ self.modules[module_name] = True
+
+ def aimport_module(self, module_name):
+ """Import a module, and mark it reloadable
+
+ Returns
+ -------
+ top_module : module
+ The imported module if it is top-level, or the top-level
+ top_name : module
+ Name of top_module
+
+ """
+ self.mark_module_reloadable(module_name)
+
+ import_module(module_name)
+ top_name = module_name.split(".")[0]
+ top_module = sys.modules[top_name]
+ return top_module, top_name
+
+ def filename_and_mtime(self, module):
+ if not hasattr(module, "__file__") or module.__file__ is None:
+ return None, None
+
+ if getattr(module, "__name__", None) in [None, "__mp_main__", "__main__"]:
+ # we cannot reload(__main__) or reload(__mp_main__)
+ return None, None
+
+ filename = module.__file__
+ path, ext = os.path.splitext(filename)
+
+ if ext.lower() == ".py":
+ py_filename = filename
+ else:
+ try:
+ py_filename = source_from_cache(filename)
+ except ValueError:
+ return None, None
+
+ try:
+ pymtime = os.stat(py_filename).st_mtime
+ except OSError:
+ return None, None
+
+ return py_filename, pymtime
+
+ def check(self, check_all=False, do_reload=True):
+ """Check whether some modules need to be reloaded."""
+
+ if not self.enabled and not check_all:
+ return
+
+ if check_all or self.check_all:
+ modules = list(sys.modules.keys())
+ else:
+ modules = list(self.modules.keys())
+
+ for modname in modules:
+ m = sys.modules.get(modname, None)
+
+ if modname in self.skip_modules:
+ continue
+
+ py_filename, pymtime = self.filename_and_mtime(m)
+ if py_filename is None:
+ continue
+
+ try:
+ if pymtime <= self.modules_mtimes[modname]:
+ continue
+ except KeyError:
+ self.modules_mtimes[modname] = pymtime
+ continue
+ else:
+ if self.failed.get(py_filename, None) == pymtime:
+ continue
+
+ self.modules_mtimes[modname] = pymtime
+
+ # If we've reached this point, we should try to reload the module
+ if do_reload:
+ self._report(f"Reloading '{modname}'.")
+ try:
+ if self.autoload_obj:
+ superreload(m, reload, self.old_objects, self.shell)
+ else:
+ superreload(m, reload, self.old_objects)
+ if py_filename in self.failed:
+ del self.failed[py_filename]
+ except:
+ if not self.hide_errors:
+ print(
+ "[autoreload of {} failed: {}]".format(
+ modname, traceback.format_exc(10)
+ ),
+ file=sys.stderr,
+ )
+ self.failed[py_filename] = pymtime
+
+
+# ------------------------------------------------------------------------------
+# superreload
+# ------------------------------------------------------------------------------
+
+
+func_attrs = [
+ "__code__",
+ "__defaults__",
+ "__doc__",
+ "__closure__",
+ "__globals__",
+ "__dict__",
+]
+
+
+def update_function(old, new):
+ """Upgrade the code object of a function"""
+ for name in func_attrs:
+ try:
+ setattr(old, name, getattr(new, name))
+ except (AttributeError, TypeError):
+ pass
+
+
+def update_instances(old, new):
+ """Use garbage collector to find all instances that refer to the old
+ class definition and update their __class__ to point to the new class
+ definition"""
+
+ refs = gc.get_referrers(old)
+
+ for ref in refs:
+ if type(ref) is old:
+ object.__setattr__(ref, "__class__", new)
+
+
+def update_class(old, new):
+ """Replace stuff in the __dict__ of a class, and upgrade
+ method code objects, and add new methods, if any"""
+ for key in list(old.__dict__.keys()):
+ old_obj = getattr(old, key)
+ try:
+ new_obj = getattr(new, key)
+ # explicitly checking that comparison returns True to handle
+ # cases where `==` doesn't return a boolean.
+ if (old_obj == new_obj) is True:
+ continue
+ except AttributeError:
+ # obsolete attribute: remove it
+ try:
+ delattr(old, key)
+ except (AttributeError, TypeError):
+ pass
+ continue
+ except ValueError:
+ # can't compare nested structures containing
+ # numpy arrays using `==`
+ pass
+
+ if update_generic(old_obj, new_obj):
+ continue
+
+ try:
+ setattr(old, key, getattr(new, key))
+ except (AttributeError, TypeError):
+ pass # skip non-writable attributes
+
+ for key in list(new.__dict__.keys()):
+ if key not in list(old.__dict__.keys()):
+ try:
+ setattr(old, key, getattr(new, key))
+ except (AttributeError, TypeError):
+ pass # skip non-writable attributes
+
+ # update all instances of class
+ update_instances(old, new)
+
+
+def update_property(old, new):
+ """Replace get/set/del functions of a property"""
+ update_generic(old.fdel, new.fdel)
+ update_generic(old.fget, new.fget)
+ update_generic(old.fset, new.fset)
+
+
+def isinstance2(a, b, typ):
+ return isinstance(a, typ) and isinstance(b, typ)
+
+
+UPDATE_RULES = [
+ (lambda a, b: isinstance2(a, b, type), update_class),
+ (lambda a, b: isinstance2(a, b, types.FunctionType), update_function),
+ (lambda a, b: isinstance2(a, b, property), update_property),
+]
+UPDATE_RULES.extend(
+ [
+ (
+ lambda a, b: isinstance2(a, b, types.MethodType),
+ lambda a, b: update_function(a.__func__, b.__func__),
+ ),
+ ]
+)
+
+
+def update_generic(a, b):
+ for type_check, update in UPDATE_RULES:
+ if type_check(a, b):
+ update(a, b)
+ return True
+ return False
+
+
+class StrongRef:
+ def __init__(self, obj):
+ self.obj = obj
+
+ def __call__(self):
+ return self.obj
+
+
+mod_attrs = [
+ "__name__",
+ "__doc__",
+ "__package__",
+ "__loader__",
+ "__spec__",
+ "__file__",
+ "__cached__",
+ "__builtins__",
+]
+
+
+def append_obj(module, d, name, obj, autoload=False):
+ in_module = hasattr(obj, "__module__") and obj.__module__ == module.__name__
+ if autoload:
+ # check needed for module global built-ins
+ if not in_module and name in mod_attrs:
+ return False
+ else:
+ if not in_module:
+ return False
+
+ key = (module.__name__, name)
+ try:
+ d.setdefault(key, []).append(weakref.ref(obj))
+ except TypeError:
+ pass
+ return True
+
+
+def superreload(module, reload=reload, old_objects=None, shell=None):
+ """Enhanced version of the builtin reload function.
+
+ superreload remembers objects previously in the module, and
+
+ - upgrades the class dictionary of every old class in the module
+ - upgrades the code object of every old function and method
+ - clears the module's namespace before reloading
+
+ """
+ if old_objects is None:
+ old_objects = {}
+
+ # collect old objects in the module
+ for name, obj in list(module.__dict__.items()):
+ if not append_obj(module, old_objects, name, obj):
+ continue
+ key = (module.__name__, name)
+ try:
+ old_objects.setdefault(key, []).append(weakref.ref(obj))
+ except TypeError:
+ pass
+
+ # reload module
+ try:
+ # clear namespace first from old cruft
+ old_dict = module.__dict__.copy()
+ old_name = module.__name__
+ module.__dict__.clear()
+ module.__dict__["__name__"] = old_name
+ module.__dict__["__loader__"] = old_dict["__loader__"]
+ except (TypeError, AttributeError, KeyError):
+ pass
+
+ try:
+ module = reload(module)
+ except:
+ # restore module dictionary on failed reload
+ module.__dict__.update(old_dict)
+ raise
+
+ # iterate over all objects and update functions & classes
+ for name, new_obj in list(module.__dict__.items()):
+ key = (module.__name__, name)
+ if key not in old_objects:
+ # here 'shell' acts both as a flag and as an output var
+ if (
+ shell is None
+ or name == "Enum"
+ or not append_obj(module, old_objects, name, new_obj, True)
+ ):
+ continue
+ shell.user_ns[name] = new_obj
+
+ new_refs = []
+ for old_ref in old_objects[key]:
+ old_obj = old_ref()
+ if old_obj is None:
+ continue
+ new_refs.append(old_ref)
+ update_generic(old_obj, new_obj)
+
+ if new_refs:
+ old_objects[key] = new_refs
+ else:
+ del old_objects[key]
+
+ return module
+
+
+# ------------------------------------------------------------------------------
+# IPython connectivity
+# ------------------------------------------------------------------------------
+
+
+@magics_class
+class AutoreloadMagics(Magics):
+ def __init__(self, *a, **kw):
+ super().__init__(*a, **kw)
+ self._reloader = ModuleReloader(self.shell)
+ self._reloader.check_all = False
+ self._reloader.autoload_obj = False
+ self.loaded_modules = set(sys.modules)
+
+ @line_magic
+ @magic_arguments.magic_arguments()
+ @magic_arguments.argument(
+ "mode",
+ type=str,
+ default="now",
+ nargs="?",
+ help="""blank or 'now' - Reload all modules (except those excluded by %%aimport)
+ automatically now.
+
+ '0' or 'off' - Disable automatic reloading.
+
+ '1' or 'explicit' - Reload only modules imported with %%aimport every
+ time before executing the Python code typed.
+
+ '2' or 'all' - Reload all modules (except those excluded by %%aimport)
+ every time before executing the Python code typed.
+
+ '3' or 'complete' - Same as 2/all, but also but also adds any new
+ objects in the module.
+ """,
+ )
+ @magic_arguments.argument(
+ "-p",
+ "--print",
+ action="store_true",
+ default=False,
+ help="Show autoreload activity using `print` statements",
+ )
+ @magic_arguments.argument(
+ "-l",
+ "--log",
+ action="store_true",
+ default=False,
+ help="Show autoreload activity using the logger",
+ )
+ @magic_arguments.argument(
+ "--hide-errors",
+ action="store_true",
+ default=False,
+ help="Hide autoreload errors",
+ )
+ def autoreload(self, line=""):
+ r"""%autoreload => Reload modules automatically
+
+ %autoreload or %autoreload now
+ Reload all modules (except those excluded by %aimport) automatically
+ now.
+
+ %autoreload 0 or %autoreload off
+ Disable automatic reloading.
+
+ %autoreload 1 or %autoreload explicit
+ Reload only modules imported with %aimport every time before executing
+ the Python code typed.
+
+ %autoreload 2 or %autoreload all
+ Reload all modules (except those excluded by %aimport) every time
+ before executing the Python code typed.
+
+ %autoreload 3 or %autoreload complete
+ Same as 2/all, but also but also adds any new objects in the module. See
+ unit test at IPython/extensions/tests/test_autoreload.py::test_autoload_newly_added_objects
+
+ The optional arguments --print and --log control display of autoreload activity. The default
+ is to act silently; --print (or -p) will print out the names of modules that are being
+ reloaded, and --log (or -l) outputs them to the log at INFO level.
+
+ The optional argument --hide-errors hides any errors that can happen when trying to
+ reload code.
+
+ Reloading Python modules in a reliable way is in general
+ difficult, and unexpected things may occur. %autoreload tries to
+ work around common pitfalls by replacing function code objects and
+ parts of classes previously in the module with new versions. This
+ makes the following things to work:
+
+ - Functions and classes imported via 'from xxx import foo' are upgraded
+ to new versions when 'xxx' is reloaded.
+
+ - Methods and properties of classes are upgraded on reload, so that
+ calling 'c.foo()' on an object 'c' created before the reload causes
+ the new code for 'foo' to be executed.
+
+ Some of the known remaining caveats are:
+
+ - Replacing code objects does not always succeed: changing a @property
+ in a class to an ordinary method or a method to a member variable
+ can cause problems (but in old objects only).
+
+ - Functions that are removed (eg. via monkey-patching) from a module
+ before it is reloaded are not upgraded.
+
+ - C extension modules cannot be reloaded, and so cannot be
+ autoreloaded.
+
+ """
+ args = magic_arguments.parse_argstring(self.autoreload, line)
+ mode = args.mode.lower()
+
+ p = print
+
+ logger = logging.getLogger("autoreload")
+
+ l = logger.info
+
+ def pl(msg):
+ p(msg)
+ l(msg)
+
+ if args.print is False and args.log is False:
+ self._reloader._report = lambda msg: None
+ elif args.print is True:
+ if args.log is True:
+ self._reloader._report = pl
+ else:
+ self._reloader._report = p
+ elif args.log is True:
+ self._reloader._report = l
+
+ self._reloader.hide_errors = args.hide_errors
+
+ if mode == "" or mode == "now":
+ self._reloader.check(True)
+ elif mode == "0" or mode == "off":
+ self._reloader.enabled = False
+ elif mode == "1" or mode == "explicit":
+ self._reloader.enabled = True
+ self._reloader.check_all = False
+ self._reloader.autoload_obj = False
+ elif mode == "2" or mode == "all":
+ self._reloader.enabled = True
+ self._reloader.check_all = True
+ self._reloader.autoload_obj = False
+ elif mode == "3" or mode == "complete":
+ self._reloader.enabled = True
+ self._reloader.check_all = True
+ self._reloader.autoload_obj = True
+ else:
+ raise ValueError(f'Unrecognized autoreload mode "{mode}".')
+
+ @line_magic
+ def aimport(self, parameter_s="", stream=None):
+ """%aimport => Import modules for automatic reloading.
+
+ %aimport
+ List modules to automatically import and not to import.
+
+ %aimport foo
+ Import module 'foo' and mark it to be autoreloaded for %autoreload explicit
+
+ %aimport foo, bar
+ Import modules 'foo', 'bar' and mark them to be autoreloaded for %autoreload explicit
+
+ %aimport -foo, bar
+ Mark module 'foo' to not be autoreloaded for %autoreload explicit, all, or complete, and 'bar'
+ to be autoreloaded for mode explicit.
+ """
+ modname = parameter_s
+ if not modname:
+ to_reload = sorted(self._reloader.modules.keys())
+ to_skip = sorted(self._reloader.skip_modules.keys())
+ if stream is None:
+ stream = sys.stdout
+ if self._reloader.check_all:
+ stream.write("Modules to reload:\nall-except-skipped\n")
+ else:
+ stream.write("Modules to reload:\n%s\n" % " ".join(to_reload))
+ stream.write("\nModules to skip:\n%s\n" % " ".join(to_skip))
+ else:
+ for _module in [_.strip() for _ in modname.split(",")]:
+ if _module.startswith("-"):
+ _module = _module[1:].strip()
+ self._reloader.mark_module_skipped(_module)
+ else:
+ top_module, top_name = self._reloader.aimport_module(_module)
+
+ # Inject module to user namespace
+ self.shell.push({top_name: top_module})
+
+ def pre_run_cell(self):
+ if self._reloader.enabled:
+ try:
+ self._reloader.check()
+ except:
+ pass
+
+ def post_execute_hook(self):
+ """Cache the modification times of any modules imported in this execution"""
+ newly_loaded_modules = set(sys.modules) - self.loaded_modules
+ for modname in newly_loaded_modules:
+ _, pymtime = self._reloader.filename_and_mtime(sys.modules[modname])
+ if pymtime is not None:
+ self._reloader.modules_mtimes[modname] = pymtime
+
+ self.loaded_modules.update(newly_loaded_modules)
+
+
+def load_ipython_extension(ip):
+ """Load the extension in IPython."""
+ auto_reload = AutoreloadMagics(ip)
+ ip.register_magics(auto_reload)
+ ip.events.register("pre_run_cell", auto_reload.pre_run_cell)
+ ip.events.register("post_execute", auto_reload.post_execute_hook)
diff --git a/contrib/python/ipython/py3/IPython/extensions/storemagic.py b/contrib/python/ipython/py3/IPython/extensions/storemagic.py
new file mode 100644
index 0000000000..d9d00f14b9
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/extensions/storemagic.py
@@ -0,0 +1,236 @@
+# -*- coding: utf-8 -*-
+"""
+%store magic for lightweight persistence.
+
+Stores variables, aliases and macros in IPython's database.
+
+To automatically restore stored variables at startup, add this to your
+:file:`ipython_config.py` file::
+
+ c.StoreMagics.autorestore = True
+"""
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+import inspect, os, sys, textwrap
+
+from IPython.core.error import UsageError
+from IPython.core.magic import Magics, magics_class, line_magic
+from IPython.testing.skipdoctest import skip_doctest
+from traitlets import Bool
+
+
+def restore_aliases(ip, alias=None):
+ staliases = ip.db.get('stored_aliases', {})
+ if alias is None:
+ for k,v in staliases.items():
+ #print "restore alias",k,v # dbg
+ #self.alias_table[k] = v
+ ip.alias_manager.define_alias(k,v)
+ else:
+ ip.alias_manager.define_alias(alias, staliases[alias])
+
+
+def refresh_variables(ip):
+ db = ip.db
+ for key in db.keys('autorestore/*'):
+ # strip autorestore
+ justkey = os.path.basename(key)
+ try:
+ obj = db[key]
+ except KeyError:
+ print("Unable to restore variable '%s', ignoring (use %%store -d to forget!)" % justkey)
+ print("The error was:", sys.exc_info()[0])
+ else:
+ #print "restored",justkey,"=",obj #dbg
+ ip.user_ns[justkey] = obj
+
+
+def restore_dhist(ip):
+ ip.user_ns['_dh'] = ip.db.get('dhist',[])
+
+
+def restore_data(ip):
+ refresh_variables(ip)
+ restore_aliases(ip)
+ restore_dhist(ip)
+
+
+@magics_class
+class StoreMagics(Magics):
+ """Lightweight persistence for python variables.
+
+ Provides the %store magic."""
+
+ autorestore = Bool(False, help=
+ """If True, any %store-d variables will be automatically restored
+ when IPython starts.
+ """
+ ).tag(config=True)
+
+ def __init__(self, shell):
+ super(StoreMagics, self).__init__(shell=shell)
+ self.shell.configurables.append(self)
+ if self.autorestore:
+ restore_data(self.shell)
+
+ @skip_doctest
+ @line_magic
+ def store(self, parameter_s=''):
+ """Lightweight persistence for python variables.
+
+ Example::
+
+ In [1]: l = ['hello',10,'world']
+ In [2]: %store l
+ Stored 'l' (list)
+ In [3]: exit
+
+ (IPython session is closed and started again...)
+
+ ville@badger:~$ ipython
+ In [1]: l
+ NameError: name 'l' is not defined
+ In [2]: %store -r
+ In [3]: l
+ Out[3]: ['hello', 10, 'world']
+
+ Usage:
+
+ * ``%store`` - Show list of all variables and their current
+ values
+ * ``%store spam bar`` - Store the *current* value of the variables spam
+ and bar to disk
+ * ``%store -d spam`` - Remove the variable and its value from storage
+ * ``%store -z`` - Remove all variables from storage
+ * ``%store -r`` - Refresh all variables, aliases and directory history
+ from store (overwrite current vals)
+ * ``%store -r spam bar`` - Refresh specified variables and aliases from store
+ (delete current val)
+ * ``%store foo >a.txt`` - Store value of foo to new file a.txt
+ * ``%store foo >>a.txt`` - Append value of foo to file a.txt
+
+ It should be noted that if you change the value of a variable, you
+ need to %store it again if you want to persist the new value.
+
+ Note also that the variables will need to be pickleable; most basic
+ python types can be safely %store'd.
+
+ Also aliases can be %store'd across sessions.
+ To remove an alias from the storage, use the %unalias magic.
+ """
+
+ opts,argsl = self.parse_options(parameter_s,'drz',mode='string')
+ args = argsl.split()
+ ip = self.shell
+ db = ip.db
+ # delete
+ if 'd' in opts:
+ try:
+ todel = args[0]
+ except IndexError as e:
+ raise UsageError('You must provide the variable to forget') from e
+ else:
+ try:
+ del db['autorestore/' + todel]
+ except BaseException as e:
+ raise UsageError("Can't delete variable '%s'" % todel) from e
+ # reset
+ elif 'z' in opts:
+ for k in db.keys('autorestore/*'):
+ del db[k]
+
+ elif 'r' in opts:
+ if args:
+ for arg in args:
+ try:
+ obj = db['autorestore/' + arg]
+ except KeyError:
+ try:
+ restore_aliases(ip, alias=arg)
+ except KeyError:
+ print("no stored variable or alias %s" % arg)
+ else:
+ ip.user_ns[arg] = obj
+ else:
+ restore_data(ip)
+
+ # run without arguments -> list variables & values
+ elif not args:
+ vars = db.keys('autorestore/*')
+ vars.sort()
+ if vars:
+ size = max(map(len, vars))
+ else:
+ size = 0
+
+ print('Stored variables and their in-db values:')
+ fmt = '%-'+str(size)+'s -> %s'
+ get = db.get
+ for var in vars:
+ justkey = os.path.basename(var)
+ # print 30 first characters from every var
+ print(fmt % (justkey, repr(get(var, '<unavailable>'))[:50]))
+
+ # default action - store the variable
+ else:
+ # %store foo >file.txt or >>file.txt
+ if len(args) > 1 and args[1].startswith(">"):
+ fnam = os.path.expanduser(args[1].lstrip(">").lstrip())
+ if args[1].startswith(">>"):
+ fil = open(fnam, "a", encoding="utf-8")
+ else:
+ fil = open(fnam, "w", encoding="utf-8")
+ with fil:
+ obj = ip.ev(args[0])
+ print("Writing '%s' (%s) to file '%s'." % (args[0],
+ obj.__class__.__name__, fnam))
+
+ if not isinstance (obj, str):
+ from pprint import pprint
+ pprint(obj, fil)
+ else:
+ fil.write(obj)
+ if not obj.endswith('\n'):
+ fil.write('\n')
+
+ return
+
+ # %store foo
+ for arg in args:
+ try:
+ obj = ip.user_ns[arg]
+ except KeyError:
+ # it might be an alias
+ name = arg
+ try:
+ cmd = ip.alias_manager.retrieve_alias(name)
+ except ValueError as e:
+ raise UsageError("Unknown variable '%s'" % name) from e
+
+ staliases = db.get('stored_aliases',{})
+ staliases[name] = cmd
+ db['stored_aliases'] = staliases
+ print("Alias stored: %s (%s)" % (name, cmd))
+ return
+
+ else:
+ modname = getattr(inspect.getmodule(obj), '__name__', '')
+ if modname == '__main__':
+ print(textwrap.dedent("""\
+ Warning:%s is %s
+ Proper storage of interactively declared classes (or instances
+ of those classes) is not possible! Only instances
+ of classes in real modules on file system can be %%store'd.
+ """ % (arg, obj) ))
+ return
+ #pickled = pickle.dumps(obj)
+ db[ 'autorestore/' + arg ] = obj
+ print("Stored '%s' (%s)" % (arg, obj.__class__.__name__))
+
+
+def load_ipython_extension(ip):
+ """Load the extension in IPython."""
+ ip.register_magics(StoreMagics)
+
diff --git a/contrib/python/ipython/py3/IPython/external/__init__.py b/contrib/python/ipython/py3/IPython/external/__init__.py
new file mode 100644
index 0000000000..eedc338eb8
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/external/__init__.py
@@ -0,0 +1,7 @@
+"""
+This package contains all third-party modules bundled with IPython.
+"""
+
+from typing import List
+
+__all__: List[str] = []
diff --git a/contrib/python/ipython/py3/IPython/external/qt_for_kernel.py b/contrib/python/ipython/py3/IPython/external/qt_for_kernel.py
new file mode 100644
index 0000000000..11e88625d1
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/external/qt_for_kernel.py
@@ -0,0 +1,124 @@
+""" Import Qt in a manner suitable for an IPython kernel.
+
+This is the import used for the `gui=qt` or `matplotlib=qt` initialization.
+
+Import Priority:
+
+if Qt has been imported anywhere else:
+ use that
+
+if matplotlib has been imported and doesn't support v2 (<= 1.0.1):
+ use PyQt4 @v1
+
+Next, ask QT_API env variable
+
+if QT_API not set:
+ ask matplotlib what it's using. If Qt4Agg or Qt5Agg, then use the
+ version matplotlib is configured with
+
+ else: (matplotlib said nothing)
+ # this is the default path - nobody told us anything
+ try in this order:
+ PyQt default version, PySide, PyQt5
+else:
+ use what QT_API says
+
+ Note that %gui's implementation will always set a `QT_API`, see
+ `IPython.terminal.pt_inputhooks.get_inputhook_name_and_func`
+
+"""
+# NOTE: This is no longer an external, third-party module, and should be
+# considered part of IPython. For compatibility however, it is being kept in
+# IPython/external.
+
+import os
+import sys
+
+from IPython.external.qt_loaders import (
+ load_qt,
+ loaded_api,
+ enum_factory,
+ # QT6
+ QT_API_PYQT6,
+ QT_API_PYSIDE6,
+ # QT5
+ QT_API_PYQT5,
+ QT_API_PYSIDE2,
+ # QT4
+ QT_API_PYQT,
+ QT_API_PYSIDE,
+ # default
+ QT_API_PYQT_DEFAULT,
+)
+
+_qt_apis = (
+ # QT6
+ QT_API_PYQT6,
+ QT_API_PYSIDE6,
+ # QT5
+ QT_API_PYQT5,
+ QT_API_PYSIDE2,
+ # default
+ QT_API_PYQT_DEFAULT,
+)
+
+
+def matplotlib_options(mpl):
+ """Constraints placed on an imported matplotlib."""
+ if mpl is None:
+ return
+ backend = mpl.rcParams.get('backend', None)
+ if backend == 'Qt4Agg':
+ mpqt = mpl.rcParams.get('backend.qt4', None)
+ if mpqt is None:
+ return None
+ if mpqt.lower() == 'pyside':
+ return [QT_API_PYSIDE]
+ elif mpqt.lower() == 'pyqt4':
+ return [QT_API_PYQT_DEFAULT]
+ elif mpqt.lower() == 'pyqt4v2':
+ return [QT_API_PYQT]
+ raise ImportError("unhandled value for backend.qt4 from matplotlib: %r" %
+ mpqt)
+ elif backend == 'Qt5Agg':
+ mpqt = mpl.rcParams.get('backend.qt5', None)
+ if mpqt is None:
+ return None
+ if mpqt.lower() == 'pyqt5':
+ return [QT_API_PYQT5]
+ raise ImportError("unhandled value for backend.qt5 from matplotlib: %r" %
+ mpqt)
+
+def get_options():
+ """Return a list of acceptable QT APIs, in decreasing order of preference."""
+ #already imported Qt somewhere. Use that
+ loaded = loaded_api()
+ if loaded is not None:
+ return [loaded]
+
+ mpl = sys.modules.get("matplotlib", None)
+
+ if mpl is not None and tuple(mpl.__version__.split(".")) < ("1", "0", "2"):
+ # 1.0.1 only supports PyQt4 v1
+ return [QT_API_PYQT_DEFAULT]
+
+ qt_api = os.environ.get('QT_API', None)
+ if qt_api is None:
+ #no ETS variable. Ask mpl, then use default fallback path
+ return matplotlib_options(mpl) or [
+ QT_API_PYQT_DEFAULT,
+ QT_API_PYQT6,
+ QT_API_PYSIDE6,
+ QT_API_PYQT5,
+ QT_API_PYSIDE2,
+ ]
+ elif qt_api not in _qt_apis:
+ raise RuntimeError("Invalid Qt API %r, valid values are: %r" %
+ (qt_api, ', '.join(_qt_apis)))
+ else:
+ return [qt_api]
+
+
+api_opts = get_options()
+QtCore, QtGui, QtSvg, QT_API = load_qt(api_opts)
+enum_helper = enum_factory(QT_API, QtCore)
diff --git a/contrib/python/ipython/py3/IPython/external/qt_loaders.py b/contrib/python/ipython/py3/IPython/external/qt_loaders.py
new file mode 100644
index 0000000000..1486cf9d77
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/external/qt_loaders.py
@@ -0,0 +1,410 @@
+"""
+This module contains factory functions that attempt
+to return Qt submodules from the various python Qt bindings.
+
+It also protects against double-importing Qt with different
+bindings, which is unstable and likely to crash
+
+This is used primarily by qt and qt_for_kernel, and shouldn't
+be accessed directly from the outside
+"""
+import importlib.abc
+import sys
+import os
+import types
+from functools import partial, lru_cache
+import operator
+
+# ### Available APIs.
+# Qt6
+QT_API_PYQT6 = "pyqt6"
+QT_API_PYSIDE6 = "pyside6"
+
+# Qt5
+QT_API_PYQT5 = 'pyqt5'
+QT_API_PYSIDE2 = 'pyside2'
+
+# Qt4
+# NOTE: Here for legacy matplotlib compatibility, but not really supported on the IPython side.
+QT_API_PYQT = "pyqt" # Force version 2
+QT_API_PYQTv1 = "pyqtv1" # Force version 2
+QT_API_PYSIDE = "pyside"
+
+QT_API_PYQT_DEFAULT = "pyqtdefault" # use system default for version 1 vs. 2
+
+api_to_module = {
+ # Qt6
+ QT_API_PYQT6: "PyQt6",
+ QT_API_PYSIDE6: "PySide6",
+ # Qt5
+ QT_API_PYQT5: "PyQt5",
+ QT_API_PYSIDE2: "PySide2",
+ # Qt4
+ QT_API_PYSIDE: "PySide",
+ QT_API_PYQT: "PyQt4",
+ QT_API_PYQTv1: "PyQt4",
+ # default
+ QT_API_PYQT_DEFAULT: "PyQt6",
+}
+
+
+class ImportDenier(importlib.abc.MetaPathFinder):
+ """Import Hook that will guard against bad Qt imports
+ once IPython commits to a specific binding
+ """
+
+ def __init__(self):
+ self.__forbidden = set()
+
+ def forbid(self, module_name):
+ sys.modules.pop(module_name, None)
+ self.__forbidden.add(module_name)
+
+ def find_spec(self, fullname, path, target=None):
+ if path:
+ return
+ if fullname in self.__forbidden:
+ raise ImportError(
+ """
+ Importing %s disabled by IPython, which has
+ already imported an Incompatible QT Binding: %s
+ """
+ % (fullname, loaded_api())
+ )
+
+
+ID = ImportDenier()
+sys.meta_path.insert(0, ID)
+
+
+def commit_api(api):
+ """Commit to a particular API, and trigger ImportErrors on subsequent
+ dangerous imports"""
+ modules = set(api_to_module.values())
+
+ modules.remove(api_to_module[api])
+ for mod in modules:
+ ID.forbid(mod)
+
+
+def loaded_api():
+ """Return which API is loaded, if any
+
+ If this returns anything besides None,
+ importing any other Qt binding is unsafe.
+
+ Returns
+ -------
+ None, 'pyside6', 'pyqt6', 'pyside2', 'pyside', 'pyqt', 'pyqt5', 'pyqtv1'
+ """
+ if sys.modules.get("PyQt6.QtCore"):
+ return QT_API_PYQT6
+ elif sys.modules.get("PySide6.QtCore"):
+ return QT_API_PYSIDE6
+ elif sys.modules.get("PyQt5.QtCore"):
+ return QT_API_PYQT5
+ elif sys.modules.get("PySide2.QtCore"):
+ return QT_API_PYSIDE2
+ elif sys.modules.get("PyQt4.QtCore"):
+ if qtapi_version() == 2:
+ return QT_API_PYQT
+ else:
+ return QT_API_PYQTv1
+ elif sys.modules.get("PySide.QtCore"):
+ return QT_API_PYSIDE
+
+ return None
+
+
+def has_binding(api):
+ """Safely check for PyQt4/5, PySide or PySide2, without importing submodules
+
+ Parameters
+ ----------
+ api : str [ 'pyqtv1' | 'pyqt' | 'pyqt5' | 'pyside' | 'pyside2' | 'pyqtdefault']
+ Which module to check for
+
+ Returns
+ -------
+ True if the relevant module appears to be importable
+ """
+ module_name = api_to_module[api]
+ from importlib.util import find_spec
+
+ required = ['QtCore', 'QtGui', 'QtSvg']
+ if api in (QT_API_PYQT5, QT_API_PYSIDE2, QT_API_PYQT6, QT_API_PYSIDE6):
+ # QT5 requires QtWidgets too
+ required.append('QtWidgets')
+
+ for submod in required:
+ try:
+ spec = find_spec('%s.%s' % (module_name, submod))
+ except ImportError:
+ # Package (e.g. PyQt5) not found
+ return False
+ else:
+ if spec is None:
+ # Submodule (e.g. PyQt5.QtCore) not found
+ return False
+
+ if api == QT_API_PYSIDE:
+ # We can also safely check PySide version
+ import PySide
+
+ return PySide.__version_info__ >= (1, 0, 3)
+
+ return True
+
+
+def qtapi_version():
+ """Return which QString API has been set, if any
+
+ Returns
+ -------
+ The QString API version (1 or 2), or None if not set
+ """
+ try:
+ import sip
+ except ImportError:
+ # as of PyQt5 5.11, sip is no longer available as a top-level
+ # module and needs to be imported from the PyQt5 namespace
+ try:
+ from PyQt5 import sip
+ except ImportError:
+ return
+ try:
+ return sip.getapi('QString')
+ except ValueError:
+ return
+
+
+def can_import(api):
+ """Safely query whether an API is importable, without importing it"""
+ if not has_binding(api):
+ return False
+
+ current = loaded_api()
+ if api == QT_API_PYQT_DEFAULT:
+ return current in [QT_API_PYQT6, None]
+ else:
+ return current in [api, None]
+
+
+def import_pyqt4(version=2):
+ """
+ Import PyQt4
+
+ Parameters
+ ----------
+ version : 1, 2, or None
+ Which QString/QVariant API to use. Set to None to use the system
+ default
+ ImportErrors raised within this function are non-recoverable
+ """
+ # The new-style string API (version=2) automatically
+ # converts QStrings to Unicode Python strings. Also, automatically unpacks
+ # QVariants to their underlying objects.
+ import sip
+
+ if version is not None:
+ sip.setapi('QString', version)
+ sip.setapi('QVariant', version)
+
+ from PyQt4 import QtGui, QtCore, QtSvg
+
+ if QtCore.PYQT_VERSION < 0x040700:
+ raise ImportError("IPython requires PyQt4 >= 4.7, found %s" %
+ QtCore.PYQT_VERSION_STR)
+
+ # Alias PyQt-specific functions for PySide compatibility.
+ QtCore.Signal = QtCore.pyqtSignal
+ QtCore.Slot = QtCore.pyqtSlot
+
+ # query for the API version (in case version == None)
+ version = sip.getapi('QString')
+ api = QT_API_PYQTv1 if version == 1 else QT_API_PYQT
+ return QtCore, QtGui, QtSvg, api
+
+
+def import_pyqt5():
+ """
+ Import PyQt5
+
+ ImportErrors raised within this function are non-recoverable
+ """
+
+ from PyQt5 import QtCore, QtSvg, QtWidgets, QtGui
+
+ # Alias PyQt-specific functions for PySide compatibility.
+ QtCore.Signal = QtCore.pyqtSignal
+ QtCore.Slot = QtCore.pyqtSlot
+
+ # Join QtGui and QtWidgets for Qt4 compatibility.
+ QtGuiCompat = types.ModuleType('QtGuiCompat')
+ QtGuiCompat.__dict__.update(QtGui.__dict__)
+ QtGuiCompat.__dict__.update(QtWidgets.__dict__)
+
+ api = QT_API_PYQT5
+ return QtCore, QtGuiCompat, QtSvg, api
+
+
+def import_pyqt6():
+ """
+ Import PyQt6
+
+ ImportErrors raised within this function are non-recoverable
+ """
+
+ from PyQt6 import QtCore, QtSvg, QtWidgets, QtGui
+
+ # Alias PyQt-specific functions for PySide compatibility.
+ QtCore.Signal = QtCore.pyqtSignal
+ QtCore.Slot = QtCore.pyqtSlot
+
+ # Join QtGui and QtWidgets for Qt4 compatibility.
+ QtGuiCompat = types.ModuleType("QtGuiCompat")
+ QtGuiCompat.__dict__.update(QtGui.__dict__)
+ QtGuiCompat.__dict__.update(QtWidgets.__dict__)
+
+ api = QT_API_PYQT6
+ return QtCore, QtGuiCompat, QtSvg, api
+
+
+def import_pyside():
+ """
+ Import PySide
+
+ ImportErrors raised within this function are non-recoverable
+ """
+ from PySide import QtGui, QtCore, QtSvg
+ return QtCore, QtGui, QtSvg, QT_API_PYSIDE
+
+def import_pyside2():
+ """
+ Import PySide2
+
+ ImportErrors raised within this function are non-recoverable
+ """
+ from PySide2 import QtGui, QtCore, QtSvg, QtWidgets, QtPrintSupport
+
+ # Join QtGui and QtWidgets for Qt4 compatibility.
+ QtGuiCompat = types.ModuleType('QtGuiCompat')
+ QtGuiCompat.__dict__.update(QtGui.__dict__)
+ QtGuiCompat.__dict__.update(QtWidgets.__dict__)
+ QtGuiCompat.__dict__.update(QtPrintSupport.__dict__)
+
+ return QtCore, QtGuiCompat, QtSvg, QT_API_PYSIDE2
+
+
+def import_pyside6():
+ """
+ Import PySide6
+
+ ImportErrors raised within this function are non-recoverable
+ """
+ from PySide6 import QtGui, QtCore, QtSvg, QtWidgets, QtPrintSupport
+
+ # Join QtGui and QtWidgets for Qt4 compatibility.
+ QtGuiCompat = types.ModuleType("QtGuiCompat")
+ QtGuiCompat.__dict__.update(QtGui.__dict__)
+ QtGuiCompat.__dict__.update(QtWidgets.__dict__)
+ QtGuiCompat.__dict__.update(QtPrintSupport.__dict__)
+
+ return QtCore, QtGuiCompat, QtSvg, QT_API_PYSIDE6
+
+
+def load_qt(api_options):
+ """
+ Attempt to import Qt, given a preference list
+ of permissible bindings
+
+ It is safe to call this function multiple times.
+
+ Parameters
+ ----------
+ api_options : List of strings
+ The order of APIs to try. Valid items are 'pyside', 'pyside2',
+ 'pyqt', 'pyqt5', 'pyqtv1' and 'pyqtdefault'
+
+ Returns
+ -------
+ A tuple of QtCore, QtGui, QtSvg, QT_API
+ The first three are the Qt modules. The last is the
+ string indicating which module was loaded.
+
+ Raises
+ ------
+ ImportError, if it isn't possible to import any requested
+ bindings (either because they aren't installed, or because
+ an incompatible library has already been installed)
+ """
+ loaders = {
+ # Qt6
+ QT_API_PYQT6: import_pyqt6,
+ QT_API_PYSIDE6: import_pyside6,
+ # Qt5
+ QT_API_PYQT5: import_pyqt5,
+ QT_API_PYSIDE2: import_pyside2,
+ # Qt4
+ QT_API_PYSIDE: import_pyside,
+ QT_API_PYQT: import_pyqt4,
+ QT_API_PYQTv1: partial(import_pyqt4, version=1),
+ # default
+ QT_API_PYQT_DEFAULT: import_pyqt6,
+ }
+
+ for api in api_options:
+
+ if api not in loaders:
+ raise RuntimeError(
+ "Invalid Qt API %r, valid values are: %s" %
+ (api, ", ".join(["%r" % k for k in loaders.keys()])))
+
+ if not can_import(api):
+ continue
+
+ #cannot safely recover from an ImportError during this
+ result = loaders[api]()
+ api = result[-1] # changed if api = QT_API_PYQT_DEFAULT
+ commit_api(api)
+ return result
+ else:
+ # Clear the environment variable since it doesn't work.
+ if "QT_API" in os.environ:
+ del os.environ["QT_API"]
+
+ raise ImportError(
+ """
+ Could not load requested Qt binding. Please ensure that
+ PyQt4 >= 4.7, PyQt5, PyQt6, PySide >= 1.0.3, PySide2, or
+ PySide6 is available, and only one is imported per session.
+
+ Currently-imported Qt library: %r
+ PyQt5 available (requires QtCore, QtGui, QtSvg, QtWidgets): %s
+ PyQt6 available (requires QtCore, QtGui, QtSvg, QtWidgets): %s
+ PySide2 installed: %s
+ PySide6 installed: %s
+ Tried to load: %r
+ """
+ % (
+ loaded_api(),
+ has_binding(QT_API_PYQT5),
+ has_binding(QT_API_PYQT6),
+ has_binding(QT_API_PYSIDE2),
+ has_binding(QT_API_PYSIDE6),
+ api_options,
+ )
+ )
+
+
+def enum_factory(QT_API, QtCore):
+ """Construct an enum helper to account for PyQt5 <-> PyQt6 changes."""
+
+ @lru_cache(None)
+ def _enum(name):
+ # foo.bar.Enum.Entry (PyQt6) <=> foo.bar.Entry (non-PyQt6).
+ return operator.attrgetter(
+ name if QT_API == QT_API_PYQT6 else name.rpartition(".")[0]
+ )(sys.modules[QtCore.__package__])
+
+ return _enum
diff --git a/contrib/python/ipython/py3/IPython/lib/__init__.py b/contrib/python/ipython/py3/IPython/lib/__init__.py
new file mode 100644
index 0000000000..94b8ade4ec
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/lib/__init__.py
@@ -0,0 +1,11 @@
+# encoding: utf-8
+"""
+Extra capabilities for IPython
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (C) 2008-2011 The IPython Development Team
+#
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#-----------------------------------------------------------------------------
diff --git a/contrib/python/ipython/py3/IPython/lib/backgroundjobs.py b/contrib/python/ipython/py3/IPython/lib/backgroundjobs.py
new file mode 100644
index 0000000000..e7ad51eb67
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/lib/backgroundjobs.py
@@ -0,0 +1,491 @@
+# -*- coding: utf-8 -*-
+"""Manage background (threaded) jobs conveniently from an interactive shell.
+
+This module provides a BackgroundJobManager class. This is the main class
+meant for public usage, it implements an object which can create and manage
+new background jobs.
+
+It also provides the actual job classes managed by these BackgroundJobManager
+objects, see their docstrings below.
+
+
+This system was inspired by discussions with B. Granger and the
+BackgroundCommand class described in the book Python Scripting for
+Computational Science, by H. P. Langtangen:
+
+http://folk.uio.no/hpl/scripting
+
+(although ultimately no code from this text was used, as IPython's system is a
+separate implementation).
+
+An example notebook is provided in our documentation illustrating interactive
+use of the system.
+"""
+
+#*****************************************************************************
+# Copyright (C) 2005-2006 Fernando Perez <fperez@colorado.edu>
+#
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#*****************************************************************************
+
+# Code begins
+import sys
+import threading
+
+from IPython import get_ipython
+from IPython.core.ultratb import AutoFormattedTB
+from logging import error, debug
+
+
+class BackgroundJobManager(object):
+ """Class to manage a pool of backgrounded threaded jobs.
+
+ Below, we assume that 'jobs' is a BackgroundJobManager instance.
+
+ Usage summary (see the method docstrings for details):
+
+ jobs.new(...) -> start a new job
+
+ jobs() or jobs.status() -> print status summary of all jobs
+
+ jobs[N] -> returns job number N.
+
+ foo = jobs[N].result -> assign to variable foo the result of job N
+
+ jobs[N].traceback() -> print the traceback of dead job N
+
+ jobs.remove(N) -> remove (finished) job N
+
+ jobs.flush() -> remove all finished jobs
+
+ As a convenience feature, BackgroundJobManager instances provide the
+ utility result and traceback methods which retrieve the corresponding
+ information from the jobs list:
+
+ jobs.result(N) <--> jobs[N].result
+ jobs.traceback(N) <--> jobs[N].traceback()
+
+ While this appears minor, it allows you to use tab completion
+ interactively on the job manager instance.
+ """
+
+ def __init__(self):
+ # Lists for job management, accessed via a property to ensure they're
+ # up to date.x
+ self._running = []
+ self._completed = []
+ self._dead = []
+ # A dict of all jobs, so users can easily access any of them
+ self.all = {}
+ # For reporting
+ self._comp_report = []
+ self._dead_report = []
+ # Store status codes locally for fast lookups
+ self._s_created = BackgroundJobBase.stat_created_c
+ self._s_running = BackgroundJobBase.stat_running_c
+ self._s_completed = BackgroundJobBase.stat_completed_c
+ self._s_dead = BackgroundJobBase.stat_dead_c
+ self._current_job_id = 0
+
+ @property
+ def running(self):
+ self._update_status()
+ return self._running
+
+ @property
+ def dead(self):
+ self._update_status()
+ return self._dead
+
+ @property
+ def completed(self):
+ self._update_status()
+ return self._completed
+
+ def new(self, func_or_exp, *args, **kwargs):
+ """Add a new background job and start it in a separate thread.
+
+ There are two types of jobs which can be created:
+
+ 1. Jobs based on expressions which can be passed to an eval() call.
+ The expression must be given as a string. For example:
+
+ job_manager.new('myfunc(x,y,z=1)'[,glob[,loc]])
+
+ The given expression is passed to eval(), along with the optional
+ global/local dicts provided. If no dicts are given, they are
+ extracted automatically from the caller's frame.
+
+ A Python statement is NOT a valid eval() expression. Basically, you
+ can only use as an eval() argument something which can go on the right
+ of an '=' sign and be assigned to a variable.
+
+ For example,"print 'hello'" is not valid, but '2+3' is.
+
+ 2. Jobs given a function object, optionally passing additional
+ positional arguments:
+
+ job_manager.new(myfunc, x, y)
+
+ The function is called with the given arguments.
+
+ If you need to pass keyword arguments to your function, you must
+ supply them as a dict named kw:
+
+ job_manager.new(myfunc, x, y, kw=dict(z=1))
+
+ The reason for this asymmetry is that the new() method needs to
+ maintain access to its own keywords, and this prevents name collisions
+ between arguments to new() and arguments to your own functions.
+
+ In both cases, the result is stored in the job.result field of the
+ background job object.
+
+ You can set `daemon` attribute of the thread by giving the keyword
+ argument `daemon`.
+
+ Notes and caveats:
+
+ 1. All threads running share the same standard output. Thus, if your
+ background jobs generate output, it will come out on top of whatever
+ you are currently writing. For this reason, background jobs are best
+ used with silent functions which simply return their output.
+
+ 2. Threads also all work within the same global namespace, and this
+ system does not lock interactive variables. So if you send job to the
+ background which operates on a mutable object for a long time, and
+ start modifying that same mutable object interactively (or in another
+ backgrounded job), all sorts of bizarre behaviour will occur.
+
+ 3. If a background job is spending a lot of time inside a C extension
+ module which does not release the Python Global Interpreter Lock
+ (GIL), this will block the IPython prompt. This is simply because the
+ Python interpreter can only switch between threads at Python
+ bytecodes. While the execution is inside C code, the interpreter must
+ simply wait unless the extension module releases the GIL.
+
+ 4. There is no way, due to limitations in the Python threads library,
+ to kill a thread once it has started."""
+
+ if callable(func_or_exp):
+ kw = kwargs.get('kw',{})
+ job = BackgroundJobFunc(func_or_exp,*args,**kw)
+ elif isinstance(func_or_exp, str):
+ if not args:
+ frame = sys._getframe(1)
+ glob, loc = frame.f_globals, frame.f_locals
+ elif len(args)==1:
+ glob = loc = args[0]
+ elif len(args)==2:
+ glob,loc = args
+ else:
+ raise ValueError(
+ 'Expression jobs take at most 2 args (globals,locals)')
+ job = BackgroundJobExpr(func_or_exp, glob, loc)
+ else:
+ raise TypeError('invalid args for new job')
+
+ if kwargs.get('daemon', False):
+ job.daemon = True
+ job.num = self._current_job_id
+ self._current_job_id += 1
+ self.running.append(job)
+ self.all[job.num] = job
+ debug('Starting job # %s in a separate thread.' % job.num)
+ job.start()
+ return job
+
+ def __getitem__(self, job_key):
+ num = job_key if isinstance(job_key, int) else job_key.num
+ return self.all[num]
+
+ def __call__(self):
+ """An alias to self.status(),
+
+ This allows you to simply call a job manager instance much like the
+ Unix `jobs` shell command."""
+
+ return self.status()
+
+ def _update_status(self):
+ """Update the status of the job lists.
+
+ This method moves finished jobs to one of two lists:
+ - self.completed: jobs which completed successfully
+ - self.dead: jobs which finished but died.
+
+ It also copies those jobs to corresponding _report lists. These lists
+ are used to report jobs completed/dead since the last update, and are
+ then cleared by the reporting function after each call."""
+
+ # Status codes
+ srun, scomp, sdead = self._s_running, self._s_completed, self._s_dead
+ # State lists, use the actual lists b/c the public names are properties
+ # that call this very function on access
+ running, completed, dead = self._running, self._completed, self._dead
+
+ # Now, update all state lists
+ for num, job in enumerate(running):
+ stat = job.stat_code
+ if stat == srun:
+ continue
+ elif stat == scomp:
+ completed.append(job)
+ self._comp_report.append(job)
+ running[num] = False
+ elif stat == sdead:
+ dead.append(job)
+ self._dead_report.append(job)
+ running[num] = False
+ # Remove dead/completed jobs from running list
+ running[:] = filter(None, running)
+
+ def _group_report(self,group,name):
+ """Report summary for a given job group.
+
+ Return True if the group had any elements."""
+
+ if group:
+ print('%s jobs:' % name)
+ for job in group:
+ print('%s : %s' % (job.num,job))
+ print()
+ return True
+
+ def _group_flush(self,group,name):
+ """Flush a given job group
+
+ Return True if the group had any elements."""
+
+ njobs = len(group)
+ if njobs:
+ plural = {1:''}.setdefault(njobs,'s')
+ print('Flushing %s %s job%s.' % (njobs,name,plural))
+ group[:] = []
+ return True
+
+ def _status_new(self):
+ """Print the status of newly finished jobs.
+
+ Return True if any new jobs are reported.
+
+ This call resets its own state every time, so it only reports jobs
+ which have finished since the last time it was called."""
+
+ self._update_status()
+ new_comp = self._group_report(self._comp_report, 'Completed')
+ new_dead = self._group_report(self._dead_report,
+ 'Dead, call jobs.traceback() for details')
+ self._comp_report[:] = []
+ self._dead_report[:] = []
+ return new_comp or new_dead
+
+ def status(self,verbose=0):
+ """Print a status of all jobs currently being managed."""
+
+ self._update_status()
+ self._group_report(self.running,'Running')
+ self._group_report(self.completed,'Completed')
+ self._group_report(self.dead,'Dead')
+ # Also flush the report queues
+ self._comp_report[:] = []
+ self._dead_report[:] = []
+
+ def remove(self,num):
+ """Remove a finished (completed or dead) job."""
+
+ try:
+ job = self.all[num]
+ except KeyError:
+ error('Job #%s not found' % num)
+ else:
+ stat_code = job.stat_code
+ if stat_code == self._s_running:
+ error('Job #%s is still running, it can not be removed.' % num)
+ return
+ elif stat_code == self._s_completed:
+ self.completed.remove(job)
+ elif stat_code == self._s_dead:
+ self.dead.remove(job)
+
+ def flush(self):
+ """Flush all finished jobs (completed and dead) from lists.
+
+ Running jobs are never flushed.
+
+ It first calls _status_new(), to update info. If any jobs have
+ completed since the last _status_new() call, the flush operation
+ aborts."""
+
+ # Remove the finished jobs from the master dict
+ alljobs = self.all
+ for job in self.completed+self.dead:
+ del(alljobs[job.num])
+
+ # Now flush these lists completely
+ fl_comp = self._group_flush(self.completed, 'Completed')
+ fl_dead = self._group_flush(self.dead, 'Dead')
+ if not (fl_comp or fl_dead):
+ print('No jobs to flush.')
+
+ def result(self,num):
+ """result(N) -> return the result of job N."""
+ try:
+ return self.all[num].result
+ except KeyError:
+ error('Job #%s not found' % num)
+
+ def _traceback(self, job):
+ num = job if isinstance(job, int) else job.num
+ try:
+ self.all[num].traceback()
+ except KeyError:
+ error('Job #%s not found' % num)
+
+ def traceback(self, job=None):
+ if job is None:
+ self._update_status()
+ for deadjob in self.dead:
+ print("Traceback for: %r" % deadjob)
+ self._traceback(deadjob)
+ print()
+ else:
+ self._traceback(job)
+
+
+class BackgroundJobBase(threading.Thread):
+ """Base class to build BackgroundJob classes.
+
+ The derived classes must implement:
+
+ - Their own __init__, since the one here raises NotImplementedError. The
+ derived constructor must call self._init() at the end, to provide common
+ initialization.
+
+ - A strform attribute used in calls to __str__.
+
+ - A call() method, which will make the actual execution call and must
+ return a value to be held in the 'result' field of the job object.
+ """
+
+ # Class constants for status, in string and as numerical codes (when
+ # updating jobs lists, we don't want to do string comparisons). This will
+ # be done at every user prompt, so it has to be as fast as possible
+ stat_created = 'Created'; stat_created_c = 0
+ stat_running = 'Running'; stat_running_c = 1
+ stat_completed = 'Completed'; stat_completed_c = 2
+ stat_dead = 'Dead (Exception), call jobs.traceback() for details'
+ stat_dead_c = -1
+
+ def __init__(self):
+ """Must be implemented in subclasses.
+
+ Subclasses must call :meth:`_init` for standard initialisation.
+ """
+ raise NotImplementedError("This class can not be instantiated directly.")
+
+ def _init(self):
+ """Common initialization for all BackgroundJob objects"""
+
+ for attr in ['call','strform']:
+ assert hasattr(self,attr), "Missing attribute <%s>" % attr
+
+ # The num tag can be set by an external job manager
+ self.num = None
+
+ self.status = BackgroundJobBase.stat_created
+ self.stat_code = BackgroundJobBase.stat_created_c
+ self.finished = False
+ self.result = '<BackgroundJob has not completed>'
+
+ # reuse the ipython traceback handler if we can get to it, otherwise
+ # make a new one
+ try:
+ make_tb = get_ipython().InteractiveTB.text
+ except:
+ make_tb = AutoFormattedTB(mode = 'Context',
+ color_scheme='NoColor',
+ tb_offset = 1).text
+ # Note that the actual API for text() requires the three args to be
+ # passed in, so we wrap it in a simple lambda.
+ self._make_tb = lambda : make_tb(None, None, None)
+
+ # Hold a formatted traceback if one is generated.
+ self._tb = None
+
+ threading.Thread.__init__(self)
+
+ def __str__(self):
+ return self.strform
+
+ def __repr__(self):
+ return '<BackgroundJob #%d: %s>' % (self.num, self.strform)
+
+ def traceback(self):
+ print(self._tb)
+
+ def run(self):
+ try:
+ self.status = BackgroundJobBase.stat_running
+ self.stat_code = BackgroundJobBase.stat_running_c
+ self.result = self.call()
+ except:
+ self.status = BackgroundJobBase.stat_dead
+ self.stat_code = BackgroundJobBase.stat_dead_c
+ self.finished = None
+ self.result = ('<BackgroundJob died, call jobs.traceback() for details>')
+ self._tb = self._make_tb()
+ else:
+ self.status = BackgroundJobBase.stat_completed
+ self.stat_code = BackgroundJobBase.stat_completed_c
+ self.finished = True
+
+
+class BackgroundJobExpr(BackgroundJobBase):
+ """Evaluate an expression as a background job (uses a separate thread)."""
+
+ def __init__(self, expression, glob=None, loc=None):
+ """Create a new job from a string which can be fed to eval().
+
+ global/locals dicts can be provided, which will be passed to the eval
+ call."""
+
+ # fail immediately if the given expression can't be compiled
+ self.code = compile(expression,'<BackgroundJob compilation>','eval')
+
+ glob = {} if glob is None else glob
+ loc = {} if loc is None else loc
+ self.expression = self.strform = expression
+ self.glob = glob
+ self.loc = loc
+ self._init()
+
+ def call(self):
+ return eval(self.code,self.glob,self.loc)
+
+
+class BackgroundJobFunc(BackgroundJobBase):
+ """Run a function call as a background job (uses a separate thread)."""
+
+ def __init__(self, func, *args, **kwargs):
+ """Create a new job from a callable object.
+
+ Any positional arguments and keyword args given to this constructor
+ after the initial callable are passed directly to it."""
+
+ if not callable(func):
+ raise TypeError(
+ 'first argument to BackgroundJobFunc must be callable')
+
+ self.func = func
+ self.args = args
+ self.kwargs = kwargs
+ # The string form will only include the function passed, because
+ # generating string representations of the arguments is a potentially
+ # _very_ expensive operation (e.g. with large arrays).
+ self.strform = str(func)
+ self._init()
+
+ def call(self):
+ return self.func(*self.args, **self.kwargs)
diff --git a/contrib/python/ipython/py3/IPython/lib/clipboard.py b/contrib/python/ipython/py3/IPython/lib/clipboard.py
new file mode 100644
index 0000000000..1d691a7ea6
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/lib/clipboard.py
@@ -0,0 +1,101 @@
+""" Utilities for accessing the platform's clipboard.
+"""
+import os
+import subprocess
+
+from IPython.core.error import TryNext
+import IPython.utils.py3compat as py3compat
+
+
+class ClipboardEmpty(ValueError):
+ pass
+
+
+def win32_clipboard_get():
+ """ Get the current clipboard's text on Windows.
+
+ Requires Mark Hammond's pywin32 extensions.
+ """
+ try:
+ import win32clipboard
+ except ImportError as e:
+ raise TryNext("Getting text from the clipboard requires the pywin32 "
+ "extensions: http://sourceforge.net/projects/pywin32/") from e
+ win32clipboard.OpenClipboard()
+ try:
+ text = win32clipboard.GetClipboardData(win32clipboard.CF_UNICODETEXT)
+ except (TypeError, win32clipboard.error):
+ try:
+ text = win32clipboard.GetClipboardData(win32clipboard.CF_TEXT)
+ text = py3compat.cast_unicode(text, py3compat.DEFAULT_ENCODING)
+ except (TypeError, win32clipboard.error) as e:
+ raise ClipboardEmpty from e
+ finally:
+ win32clipboard.CloseClipboard()
+ return text
+
+
+def osx_clipboard_get() -> str:
+ """ Get the clipboard's text on OS X.
+ """
+ p = subprocess.Popen(['pbpaste', '-Prefer', 'ascii'],
+ stdout=subprocess.PIPE)
+ bytes_, stderr = p.communicate()
+ # Text comes in with old Mac \r line endings. Change them to \n.
+ bytes_ = bytes_.replace(b'\r', b'\n')
+ text = py3compat.decode(bytes_)
+ return text
+
+
+def tkinter_clipboard_get():
+ """ Get the clipboard's text using Tkinter.
+
+ This is the default on systems that are not Windows or OS X. It may
+ interfere with other UI toolkits and should be replaced with an
+ implementation that uses that toolkit.
+ """
+ try:
+ from tkinter import Tk, TclError
+ except ImportError as e:
+ raise TryNext("Getting text from the clipboard on this platform requires tkinter.") from e
+
+ root = Tk()
+ root.withdraw()
+ try:
+ text = root.clipboard_get()
+ except TclError as e:
+ raise ClipboardEmpty from e
+ finally:
+ root.destroy()
+ text = py3compat.cast_unicode(text, py3compat.DEFAULT_ENCODING)
+ return text
+
+
+def wayland_clipboard_get():
+ """Get the clipboard's text under Wayland using wl-paste command.
+
+ This requires Wayland and wl-clipboard installed and running.
+ """
+ if os.environ.get("XDG_SESSION_TYPE") != "wayland":
+ raise TryNext("wayland is not detected")
+
+ try:
+ with subprocess.Popen(["wl-paste"], stdout=subprocess.PIPE) as p:
+ raw, err = p.communicate()
+ if p.wait():
+ raise TryNext(err)
+ except FileNotFoundError as e:
+ raise TryNext(
+ "Getting text from the clipboard under Wayland requires the wl-clipboard "
+ "extension: https://github.com/bugaevc/wl-clipboard"
+ ) from e
+
+ if not raw:
+ raise ClipboardEmpty
+
+ try:
+ text = py3compat.decode(raw)
+ except UnicodeDecodeError as e:
+ raise ClipboardEmpty from e
+
+ return text
diff --git a/contrib/python/ipython/py3/IPython/lib/deepreload.py b/contrib/python/ipython/py3/IPython/lib/deepreload.py
new file mode 100644
index 0000000000..aaedab2425
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/lib/deepreload.py
@@ -0,0 +1,310 @@
+# -*- coding: utf-8 -*-
+"""
+Provides a reload() function that acts recursively.
+
+Python's normal :func:`python:reload` function only reloads the module that it's
+passed. The :func:`reload` function in this module also reloads everything
+imported from that module, which is useful when you're changing files deep
+inside a package.
+
+To use this as your default reload function, type this::
+
+ import builtins
+ from IPython.lib import deepreload
+ builtins.reload = deepreload.reload
+
+A reference to the original :func:`python:reload` is stored in this module as
+:data:`original_reload`, so you can restore it later.
+
+This code is almost entirely based on knee.py, which is a Python
+re-implementation of hierarchical module import.
+"""
+#*****************************************************************************
+# Copyright (C) 2001 Nathaniel Gray <n8gray@caltech.edu>
+#
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#*****************************************************************************
+
+import builtins as builtin_mod
+from contextlib import contextmanager
+import importlib
+import sys
+
+from types import ModuleType
+from warnings import warn
+import types
+
+original_import = builtin_mod.__import__
+
+@contextmanager
+def replace_import_hook(new_import):
+ saved_import = builtin_mod.__import__
+ builtin_mod.__import__ = new_import
+ try:
+ yield
+ finally:
+ builtin_mod.__import__ = saved_import
+
+def get_parent(globals, level):
+ """
+ parent, name = get_parent(globals, level)
+
+ Return the package that an import is being performed in. If globals comes
+ from the module foo.bar.bat (not itself a package), this returns the
+ sys.modules entry for foo.bar. If globals is from a package's __init__.py,
+ the package's entry in sys.modules is returned.
+
+ If globals doesn't come from a package or a module in a package, or a
+ corresponding entry is not found in sys.modules, None is returned.
+ """
+ orig_level = level
+
+ if not level or not isinstance(globals, dict):
+ return None, ''
+
+ pkgname = globals.get('__package__', None)
+
+ if pkgname is not None:
+ # __package__ is set, so use it
+ if not hasattr(pkgname, 'rindex'):
+ raise ValueError('__package__ set to non-string')
+ if len(pkgname) == 0:
+ if level > 0:
+ raise ValueError('Attempted relative import in non-package')
+ return None, ''
+ name = pkgname
+ else:
+ # __package__ not set, so figure it out and set it
+ if '__name__' not in globals:
+ return None, ''
+ modname = globals['__name__']
+
+ if '__path__' in globals:
+ # __path__ is set, so modname is already the package name
+ globals['__package__'] = name = modname
+ else:
+ # Normal module, so work out the package name if any
+ lastdot = modname.rfind('.')
+ if lastdot < 0 < level:
+ raise ValueError("Attempted relative import in non-package")
+ if lastdot < 0:
+ globals['__package__'] = None
+ return None, ''
+ globals['__package__'] = name = modname[:lastdot]
+
+ dot = len(name)
+ for x in range(level, 1, -1):
+ try:
+ dot = name.rindex('.', 0, dot)
+ except ValueError as e:
+ raise ValueError("attempted relative import beyond top-level "
+ "package") from e
+ name = name[:dot]
+
+ try:
+ parent = sys.modules[name]
+ except BaseException as e:
+ if orig_level < 1:
+ warn("Parent module '%.200s' not found while handling absolute "
+ "import" % name)
+ parent = None
+ else:
+ raise SystemError("Parent module '%.200s' not loaded, cannot "
+ "perform relative import" % name) from e
+
+ # We expect, but can't guarantee, if parent != None, that:
+ # - parent.__name__ == name
+ # - parent.__dict__ is globals
+ # If this is violated... Who cares?
+ return parent, name
+
+def load_next(mod, altmod, name, buf):
+ """
+ mod, name, buf = load_next(mod, altmod, name, buf)
+
+ altmod is either None or same as mod
+ """
+
+ if len(name) == 0:
+ # completely empty module name should only happen in
+ # 'from . import' (or '__import__("")')
+ return mod, None, buf
+
+ dot = name.find('.')
+ if dot == 0:
+ raise ValueError('Empty module name')
+
+ if dot < 0:
+ subname = name
+ next = None
+ else:
+ subname = name[:dot]
+ next = name[dot+1:]
+
+ if buf != '':
+ buf += '.'
+ buf += subname
+
+ result = import_submodule(mod, subname, buf)
+ if result is None and mod != altmod:
+ result = import_submodule(altmod, subname, subname)
+ if result is not None:
+ buf = subname
+
+ if result is None:
+ raise ImportError("No module named %.200s" % name)
+
+ return result, next, buf
+
+
+# Need to keep track of what we've already reloaded to prevent cyclic evil
+found_now = {}
+
+def import_submodule(mod, subname, fullname):
+ """m = import_submodule(mod, subname, fullname)"""
+ # Require:
+ # if mod == None: subname == fullname
+ # else: mod.__name__ + "." + subname == fullname
+
+ global found_now
+ if fullname in found_now and fullname in sys.modules:
+ m = sys.modules[fullname]
+ else:
+ print('Reloading', fullname)
+ found_now[fullname] = 1
+ oldm = sys.modules.get(fullname, None)
+ try:
+ if oldm is not None:
+ m = importlib.reload(oldm)
+ else:
+ m = importlib.import_module(subname, mod)
+ except:
+ # load_module probably removed name from modules because of
+ # the error. Put back the original module object.
+ if oldm:
+ sys.modules[fullname] = oldm
+ raise
+
+ add_submodule(mod, m, fullname, subname)
+
+ return m
+
+def add_submodule(mod, submod, fullname, subname):
+ """mod.{subname} = submod"""
+ if mod is None:
+ return #Nothing to do here.
+
+ if submod is None:
+ submod = sys.modules[fullname]
+
+ setattr(mod, subname, submod)
+
+ return
+
+def ensure_fromlist(mod, fromlist, buf, recursive):
+ """Handle 'from module import a, b, c' imports."""
+ if not hasattr(mod, '__path__'):
+ return
+ for item in fromlist:
+ if not hasattr(item, 'rindex'):
+ raise TypeError("Item in ``from list'' not a string")
+ if item == '*':
+ if recursive:
+ continue # avoid endless recursion
+ try:
+ all = mod.__all__
+ except AttributeError:
+ pass
+ else:
+ ret = ensure_fromlist(mod, all, buf, 1)
+ if not ret:
+ return 0
+ elif not hasattr(mod, item):
+ import_submodule(mod, item, buf + '.' + item)
+
+def deep_import_hook(name, globals=None, locals=None, fromlist=None, level=-1):
+ """Replacement for __import__()"""
+ parent, buf = get_parent(globals, level)
+
+ head, name, buf = load_next(parent, None if level < 0 else parent, name, buf)
+
+ tail = head
+ while name:
+ tail, name, buf = load_next(tail, tail, name, buf)
+
+ # If tail is None, both get_parent and load_next found
+ # an empty module name: someone called __import__("") or
+ # doctored faulty bytecode
+ if tail is None:
+ raise ValueError('Empty module name')
+
+ if not fromlist:
+ return head
+
+ ensure_fromlist(tail, fromlist, buf, 0)
+ return tail
+
+modules_reloading = {}
+
+def deep_reload_hook(m):
+ """Replacement for reload()."""
+ # Hardcode this one as it would raise a NotImplementedError from the
+ # bowels of Python and screw up the import machinery after.
+ # unlike other imports the `exclude` list already in place is not enough.
+
+ if m is types:
+ return m
+ if not isinstance(m, ModuleType):
+ raise TypeError("reload() argument must be module")
+
+ name = m.__name__
+
+ if name not in sys.modules:
+ raise ImportError("reload(): module %.200s not in sys.modules" % name)
+
+ global modules_reloading
+ try:
+ return modules_reloading[name]
+ except:
+ modules_reloading[name] = m
+
+ try:
+ newm = importlib.reload(m)
+ except:
+ sys.modules[name] = m
+ raise
+ finally:
+ modules_reloading.clear()
+ return newm
+
+# Save the original hooks
+original_reload = importlib.reload
+
+# Replacement for reload()
+def reload(
+ module,
+ exclude=(
+ *sys.builtin_module_names,
+ "sys",
+ "os.path",
+ "builtins",
+ "__main__",
+ "numpy",
+ "numpy._globals",
+ ),
+):
+ """Recursively reload all modules used in the given module. Optionally
+ takes a list of modules to exclude from reloading. The default exclude
+ list contains modules listed in sys.builtin_module_names with additional
+ sys, os.path, builtins and __main__, to prevent, e.g., resetting
+ display, exception, and io hooks.
+ """
+ global found_now
+ for i in exclude:
+ found_now[i] = 1
+ try:
+ with replace_import_hook(deep_import_hook):
+ return deep_reload_hook(module)
+ finally:
+ found_now = {}
diff --git a/contrib/python/ipython/py3/IPython/lib/demo.py b/contrib/python/ipython/py3/IPython/lib/demo.py
new file mode 100644
index 0000000000..ebffd54abd
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/lib/demo.py
@@ -0,0 +1,672 @@
+"""Module for interactive demos using IPython.
+
+This module implements a few classes for running Python scripts interactively
+in IPython for demonstrations. With very simple markup (a few tags in
+comments), you can control points where the script stops executing and returns
+control to IPython.
+
+
+Provided classes
+----------------
+
+The classes are (see their docstrings for further details):
+
+ - Demo: pure python demos
+
+ - IPythonDemo: demos with input to be processed by IPython as if it had been
+ typed interactively (so magics work, as well as any other special syntax you
+ may have added via input prefilters).
+
+ - LineDemo: single-line version of the Demo class. These demos are executed
+ one line at a time, and require no markup.
+
+ - IPythonLineDemo: IPython version of the LineDemo class (the demo is
+ executed a line at a time, but processed via IPython).
+
+ - ClearMixin: mixin to make Demo classes with less visual clutter. It
+ declares an empty marquee and a pre_cmd that clears the screen before each
+ block (see Subclassing below).
+
+ - ClearDemo, ClearIPDemo: mixin-enabled versions of the Demo and IPythonDemo
+ classes.
+
+Inheritance diagram:
+
+.. inheritance-diagram:: IPython.lib.demo
+ :parts: 3
+
+Subclassing
+-----------
+
+The classes here all include a few methods meant to make customization by
+subclassing more convenient. Their docstrings below have some more details:
+
+ - highlight(): format every block and optionally highlight comments and
+ docstring content.
+
+ - marquee(): generates a marquee to provide visible on-screen markers at each
+ block start and end.
+
+ - pre_cmd(): run right before the execution of each block.
+
+ - post_cmd(): run right after the execution of each block. If the block
+ raises an exception, this is NOT called.
+
+
+Operation
+---------
+
+The file is run in its own empty namespace (though you can pass it a string of
+arguments as if in a command line environment, and it will see those as
+sys.argv). But at each stop, the global IPython namespace is updated with the
+current internal demo namespace, so you can work interactively with the data
+accumulated so far.
+
+By default, each block of code is printed (with syntax highlighting) before
+executing it and you have to confirm execution. This is intended to show the
+code to an audience first so you can discuss it, and only proceed with
+execution once you agree. There are a few tags which allow you to modify this
+behavior.
+
+The supported tags are:
+
+# <demo> stop
+
+ Defines block boundaries, the points where IPython stops execution of the
+ file and returns to the interactive prompt.
+
+ You can optionally mark the stop tag with extra dashes before and after the
+ word 'stop', to help visually distinguish the blocks in a text editor:
+
+ # <demo> --- stop ---
+
+
+# <demo> silent
+
+ Make a block execute silently (and hence automatically). Typically used in
+ cases where you have some boilerplate or initialization code which you need
+ executed but do not want to be seen in the demo.
+
+# <demo> auto
+
+ Make a block execute automatically, but still being printed. Useful for
+ simple code which does not warrant discussion, since it avoids the extra
+ manual confirmation.
+
+# <demo> auto_all
+
+ This tag can _only_ be in the first block, and if given it overrides the
+ individual auto tags to make the whole demo fully automatic (no block asks
+ for confirmation). It can also be given at creation time (or the attribute
+ set later) to override what's in the file.
+
+While _any_ python file can be run as a Demo instance, if there are no stop
+tags the whole file will run in a single block (no different that calling
+first %pycat and then %run). The minimal markup to make this useful is to
+place a set of stop tags; the other tags are only there to let you fine-tune
+the execution.
+
+This is probably best explained with the simple example file below. You can
+copy this into a file named ex_demo.py, and try running it via::
+
+ from IPython.lib.demo import Demo
+ d = Demo('ex_demo.py')
+ d()
+
+Each time you call the demo object, it runs the next block. The demo object
+has a few useful methods for navigation, like again(), edit(), jump(), seek()
+and back(). It can be reset for a new run via reset() or reloaded from disk
+(in case you've edited the source) via reload(). See their docstrings below.
+
+Note: To make this simpler to explore, a file called "demo-exercizer.py" has
+been added to the "docs/examples/core" directory. Just cd to this directory in
+an IPython session, and type::
+
+ %run demo-exercizer.py
+
+and then follow the directions.
+
+Example
+-------
+
+The following is a very simple example of a valid demo file.
+
+::
+
+ #################### EXAMPLE DEMO <ex_demo.py> ###############################
+ '''A simple interactive demo to illustrate the use of IPython's Demo class.'''
+
+ print('Hello, welcome to an interactive IPython demo.')
+
+ # The mark below defines a block boundary, which is a point where IPython will
+ # stop execution and return to the interactive prompt. The dashes are actually
+ # optional and used only as a visual aid to clearly separate blocks while
+ # editing the demo code.
+ # <demo> stop
+
+ x = 1
+ y = 2
+
+ # <demo> stop
+
+ # the mark below makes this block as silent
+ # <demo> silent
+
+ print('This is a silent block, which gets executed but not printed.')
+
+ # <demo> stop
+ # <demo> auto
+ print('This is an automatic block.')
+ print('It is executed without asking for confirmation, but printed.')
+ z = x + y
+
+ print('z =', x)
+
+ # <demo> stop
+ # This is just another normal block.
+ print('z is now:', z)
+
+ print('bye!')
+ ################### END EXAMPLE DEMO <ex_demo.py> ############################
+"""
+
+
+#*****************************************************************************
+# Copyright (C) 2005-2006 Fernando Perez. <Fernando.Perez@colorado.edu>
+#
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#
+#*****************************************************************************
+
+import os
+import re
+import shlex
+import sys
+import pygments
+from pathlib import Path
+
+from IPython.utils.text import marquee
+from IPython.utils import openpy
+from IPython.utils import py3compat
+__all__ = ['Demo','IPythonDemo','LineDemo','IPythonLineDemo','DemoError']
+
+class DemoError(Exception): pass
+
+def re_mark(mark):
+ return re.compile(r'^\s*#\s+<demo>\s+%s\s*$' % mark,re.MULTILINE)
+
+class Demo(object):
+
+ re_stop = re_mark(r'-*\s?stop\s?-*')
+ re_silent = re_mark('silent')
+ re_auto = re_mark('auto')
+ re_auto_all = re_mark('auto_all')
+
+ def __init__(self,src,title='',arg_str='',auto_all=None, format_rst=False,
+ formatter='terminal', style='default'):
+ """Make a new demo object. To run the demo, simply call the object.
+
+ See the module docstring for full details and an example (you can use
+ IPython.Demo? in IPython to see it).
+
+ Inputs:
+
+ - src is either a file, or file-like object, or a
+ string that can be resolved to a filename.
+
+ Optional inputs:
+
+ - title: a string to use as the demo name. Of most use when the demo
+ you are making comes from an object that has no filename, or if you
+ want an alternate denotation distinct from the filename.
+
+ - arg_str(''): a string of arguments, internally converted to a list
+ just like sys.argv, so the demo script can see a similar
+ environment.
+
+ - auto_all(None): global flag to run all blocks automatically without
+ confirmation. This attribute overrides the block-level tags and
+ applies to the whole demo. It is an attribute of the object, and
+ can be changed at runtime simply by reassigning it to a boolean
+ value.
+
+ - format_rst(False): a bool to enable comments and doc strings
+ formatting with pygments rst lexer
+
+ - formatter('terminal'): a string of pygments formatter name to be
+ used. Useful values for terminals: terminal, terminal256,
+ terminal16m
+
+ - style('default'): a string of pygments style name to be used.
+ """
+ if hasattr(src, "read"):
+ # It seems to be a file or a file-like object
+ self.fname = "from a file-like object"
+ if title == '':
+ self.title = "from a file-like object"
+ else:
+ self.title = title
+ else:
+ # Assume it's a string or something that can be converted to one
+ self.fname = src
+ if title == '':
+ (filepath, filename) = os.path.split(src)
+ self.title = filename
+ else:
+ self.title = title
+ self.sys_argv = [src] + shlex.split(arg_str)
+ self.auto_all = auto_all
+ self.src = src
+
+ try:
+ ip = get_ipython() # this is in builtins whenever IPython is running
+ self.inside_ipython = True
+ except NameError:
+ self.inside_ipython = False
+
+ if self.inside_ipython:
+ # get a few things from ipython. While it's a bit ugly design-wise,
+ # it ensures that things like color scheme and the like are always in
+ # sync with the ipython mode being used. This class is only meant to
+ # be used inside ipython anyways, so it's OK.
+ self.ip_ns = ip.user_ns
+ self.ip_colorize = ip.pycolorize
+ self.ip_showtb = ip.showtraceback
+ self.ip_run_cell = ip.run_cell
+ self.shell = ip
+
+ self.formatter = pygments.formatters.get_formatter_by_name(formatter,
+ style=style)
+ self.python_lexer = pygments.lexers.get_lexer_by_name("py3")
+ self.format_rst = format_rst
+ if format_rst:
+ self.rst_lexer = pygments.lexers.get_lexer_by_name("rst")
+
+ # load user data and initialize data structures
+ self.reload()
+
+ def fload(self):
+ """Load file object."""
+ # read data and parse into blocks
+ if hasattr(self, 'fobj') and self.fobj is not None:
+ self.fobj.close()
+ if hasattr(self.src, "read"):
+ # It seems to be a file or a file-like object
+ self.fobj = self.src
+ else:
+ # Assume it's a string or something that can be converted to one
+ self.fobj = openpy.open(self.fname)
+
+ def reload(self):
+ """Reload source from disk and initialize state."""
+ self.fload()
+
+ self.src = "".join(openpy.strip_encoding_cookie(self.fobj))
+ src_b = [b.strip() for b in self.re_stop.split(self.src) if b]
+ self._silent = [bool(self.re_silent.findall(b)) for b in src_b]
+ self._auto = [bool(self.re_auto.findall(b)) for b in src_b]
+
+ # if auto_all is not given (def. None), we read it from the file
+ if self.auto_all is None:
+ self.auto_all = bool(self.re_auto_all.findall(src_b[0]))
+ else:
+ self.auto_all = bool(self.auto_all)
+
+ # Clean the sources from all markup so it doesn't get displayed when
+ # running the demo
+ src_blocks = []
+ auto_strip = lambda s: self.re_auto.sub('',s)
+ for i,b in enumerate(src_b):
+ if self._auto[i]:
+ src_blocks.append(auto_strip(b))
+ else:
+ src_blocks.append(b)
+ # remove the auto_all marker
+ src_blocks[0] = self.re_auto_all.sub('',src_blocks[0])
+
+ self.nblocks = len(src_blocks)
+ self.src_blocks = src_blocks
+
+ # also build syntax-highlighted source
+ self.src_blocks_colored = list(map(self.highlight,self.src_blocks))
+
+ # ensure clean namespace and seek offset
+ self.reset()
+
+ def reset(self):
+ """Reset the namespace and seek pointer to restart the demo"""
+ self.user_ns = {}
+ self.finished = False
+ self.block_index = 0
+
+ def _validate_index(self,index):
+ if index<0 or index>=self.nblocks:
+ raise ValueError('invalid block index %s' % index)
+
+ def _get_index(self,index):
+ """Get the current block index, validating and checking status.
+
+ Returns None if the demo is finished"""
+
+ if index is None:
+ if self.finished:
+ print('Demo finished. Use <demo_name>.reset() if you want to rerun it.')
+ return None
+ index = self.block_index
+ else:
+ self._validate_index(index)
+ return index
+
+ def seek(self,index):
+ """Move the current seek pointer to the given block.
+
+ You can use negative indices to seek from the end, with identical
+ semantics to those of Python lists."""
+ if index<0:
+ index = self.nblocks + index
+ self._validate_index(index)
+ self.block_index = index
+ self.finished = False
+
+ def back(self,num=1):
+ """Move the seek pointer back num blocks (default is 1)."""
+ self.seek(self.block_index-num)
+
+ def jump(self,num=1):
+ """Jump a given number of blocks relative to the current one.
+
+ The offset can be positive or negative, defaults to 1."""
+ self.seek(self.block_index+num)
+
+ def again(self):
+ """Move the seek pointer back one block and re-execute."""
+ self.back(1)
+ self()
+
+ def edit(self,index=None):
+ """Edit a block.
+
+ If no number is given, use the last block executed.
+
+ This edits the in-memory copy of the demo, it does NOT modify the
+ original source file. If you want to do that, simply open the file in
+ an editor and use reload() when you make changes to the file. This
+ method is meant to let you change a block during a demonstration for
+ explanatory purposes, without damaging your original script."""
+
+ index = self._get_index(index)
+ if index is None:
+ return
+ # decrease the index by one (unless we're at the very beginning), so
+ # that the default demo.edit() call opens up the sblock we've last run
+ if index>0:
+ index -= 1
+
+ filename = self.shell.mktempfile(self.src_blocks[index])
+ self.shell.hooks.editor(filename, 1)
+ with open(Path(filename), "r", encoding="utf-8") as f:
+ new_block = f.read()
+ # update the source and colored block
+ self.src_blocks[index] = new_block
+ self.src_blocks_colored[index] = self.highlight(new_block)
+ self.block_index = index
+ # call to run with the newly edited index
+ self()
+
+ def show(self,index=None):
+ """Show a single block on screen"""
+
+ index = self._get_index(index)
+ if index is None:
+ return
+
+ print(self.marquee('<%s> block # %s (%s remaining)' %
+ (self.title,index,self.nblocks-index-1)))
+ print(self.src_blocks_colored[index])
+ sys.stdout.flush()
+
+ def show_all(self):
+ """Show entire demo on screen, block by block"""
+
+ fname = self.title
+ title = self.title
+ nblocks = self.nblocks
+ silent = self._silent
+ marquee = self.marquee
+ for index,block in enumerate(self.src_blocks_colored):
+ if silent[index]:
+ print(marquee('<%s> SILENT block # %s (%s remaining)' %
+ (title,index,nblocks-index-1)))
+ else:
+ print(marquee('<%s> block # %s (%s remaining)' %
+ (title,index,nblocks-index-1)))
+ print(block, end=' ')
+ sys.stdout.flush()
+
+ def run_cell(self,source):
+ """Execute a string with one or more lines of code"""
+
+ exec(source, self.user_ns)
+
+ def __call__(self,index=None):
+ """run a block of the demo.
+
+ If index is given, it should be an integer >=1 and <= nblocks. This
+ means that the calling convention is one off from typical Python
+ lists. The reason for the inconsistency is that the demo always
+ prints 'Block n/N, and N is the total, so it would be very odd to use
+ zero-indexing here."""
+
+ index = self._get_index(index)
+ if index is None:
+ return
+ try:
+ marquee = self.marquee
+ next_block = self.src_blocks[index]
+ self.block_index += 1
+ if self._silent[index]:
+ print(marquee('Executing silent block # %s (%s remaining)' %
+ (index,self.nblocks-index-1)))
+ else:
+ self.pre_cmd()
+ self.show(index)
+ if self.auto_all or self._auto[index]:
+ print(marquee('output:'))
+ else:
+ print(marquee('Press <q> to quit, <Enter> to execute...'), end=' ')
+ ans = py3compat.input().strip()
+ if ans:
+ print(marquee('Block NOT executed'))
+ return
+ try:
+ save_argv = sys.argv
+ sys.argv = self.sys_argv
+ self.run_cell(next_block)
+ self.post_cmd()
+ finally:
+ sys.argv = save_argv
+
+ except:
+ if self.inside_ipython:
+ self.ip_showtb(filename=self.fname)
+ else:
+ if self.inside_ipython:
+ self.ip_ns.update(self.user_ns)
+
+ if self.block_index == self.nblocks:
+ mq1 = self.marquee('END OF DEMO')
+ if mq1:
+ # avoid spurious print if empty marquees are used
+ print()
+ print(mq1)
+ print(self.marquee('Use <demo_name>.reset() if you want to rerun it.'))
+ self.finished = True
+
+ # These methods are meant to be overridden by subclasses who may wish to
+ # customize the behavior of of their demos.
+ def marquee(self,txt='',width=78,mark='*'):
+ """Return the input string centered in a 'marquee'."""
+ return marquee(txt,width,mark)
+
+ def pre_cmd(self):
+ """Method called before executing each block."""
+ pass
+
+ def post_cmd(self):
+ """Method called after executing each block."""
+ pass
+
+ def highlight(self, block):
+ """Method called on each block to highlight it content"""
+ tokens = pygments.lex(block, self.python_lexer)
+ if self.format_rst:
+ from pygments.token import Token
+ toks = []
+ for token in tokens:
+ if token[0] == Token.String.Doc and len(token[1]) > 6:
+ toks += pygments.lex(token[1][:3], self.python_lexer)
+ # parse doc string content by rst lexer
+ toks += pygments.lex(token[1][3:-3], self.rst_lexer)
+ toks += pygments.lex(token[1][-3:], self.python_lexer)
+ elif token[0] == Token.Comment.Single:
+ toks.append((Token.Comment.Single, token[1][0]))
+ # parse comment content by rst lexer
+ # remove the extra newline added by rst lexer
+ toks += list(pygments.lex(token[1][1:], self.rst_lexer))[:-1]
+ else:
+ toks.append(token)
+ tokens = toks
+ return pygments.format(tokens, self.formatter)
+
+
+class IPythonDemo(Demo):
+ """Class for interactive demos with IPython's input processing applied.
+
+ This subclasses Demo, but instead of executing each block by the Python
+ interpreter (via exec), it actually calls IPython on it, so that any input
+ filters which may be in place are applied to the input block.
+
+ If you have an interactive environment which exposes special input
+ processing, you can use this class instead to write demo scripts which
+ operate exactly as if you had typed them interactively. The default Demo
+ class requires the input to be valid, pure Python code.
+ """
+
+ def run_cell(self,source):
+ """Execute a string with one or more lines of code"""
+
+ self.shell.run_cell(source)
+
+class LineDemo(Demo):
+ """Demo where each line is executed as a separate block.
+
+ The input script should be valid Python code.
+
+ This class doesn't require any markup at all, and it's meant for simple
+ scripts (with no nesting or any kind of indentation) which consist of
+ multiple lines of input to be executed, one at a time, as if they had been
+ typed in the interactive prompt.
+
+ Note: the input can not have *any* indentation, which means that only
+ single-lines of input are accepted, not even function definitions are
+ valid."""
+
+ def reload(self):
+ """Reload source from disk and initialize state."""
+ # read data and parse into blocks
+ self.fload()
+ lines = self.fobj.readlines()
+ src_b = [l for l in lines if l.strip()]
+ nblocks = len(src_b)
+ self.src = ''.join(lines)
+ self._silent = [False]*nblocks
+ self._auto = [True]*nblocks
+ self.auto_all = True
+ self.nblocks = nblocks
+ self.src_blocks = src_b
+
+ # also build syntax-highlighted source
+ self.src_blocks_colored = list(map(self.highlight,self.src_blocks))
+
+ # ensure clean namespace and seek offset
+ self.reset()
+
+
+class IPythonLineDemo(IPythonDemo,LineDemo):
+ """Variant of the LineDemo class whose input is processed by IPython."""
+ pass
+
+
+class ClearMixin(object):
+ """Use this mixin to make Demo classes with less visual clutter.
+
+ Demos using this mixin will clear the screen before every block and use
+ blank marquees.
+
+ Note that in order for the methods defined here to actually override those
+ of the classes it's mixed with, it must go /first/ in the inheritance
+ tree. For example:
+
+ class ClearIPDemo(ClearMixin,IPythonDemo): pass
+
+ will provide an IPythonDemo class with the mixin's features.
+ """
+
+ def marquee(self,txt='',width=78,mark='*'):
+ """Blank marquee that returns '' no matter what the input."""
+ return ''
+
+ def pre_cmd(self):
+ """Method called before executing each block.
+
+ This one simply clears the screen."""
+ from IPython.utils.terminal import _term_clear
+ _term_clear()
+
+class ClearDemo(ClearMixin,Demo):
+ pass
+
+
+class ClearIPDemo(ClearMixin,IPythonDemo):
+ pass
+
+
+def slide(file_path, noclear=False, format_rst=True, formatter="terminal",
+ style="native", auto_all=False, delimiter='...'):
+ if noclear:
+ demo_class = Demo
+ else:
+ demo_class = ClearDemo
+ demo = demo_class(file_path, format_rst=format_rst, formatter=formatter,
+ style=style, auto_all=auto_all)
+ while not demo.finished:
+ demo()
+ try:
+ py3compat.input('\n' + delimiter)
+ except KeyboardInterrupt:
+ exit(1)
+
+if __name__ == '__main__':
+ import argparse
+ parser = argparse.ArgumentParser(description='Run python demos')
+ parser.add_argument('--noclear', '-C', action='store_true',
+ help='Do not clear terminal on each slide')
+ parser.add_argument('--rst', '-r', action='store_true',
+ help='Highlight comments and dostrings as rst')
+ parser.add_argument('--formatter', '-f', default='terminal',
+ help='pygments formatter name could be: terminal, '
+ 'terminal256, terminal16m')
+ parser.add_argument('--style', '-s', default='default',
+ help='pygments style name')
+ parser.add_argument('--auto', '-a', action='store_true',
+ help='Run all blocks automatically without'
+ 'confirmation')
+ parser.add_argument('--delimiter', '-d', default='...',
+ help='slides delimiter added after each slide run')
+ parser.add_argument('file', nargs=1,
+ help='python demo file')
+ args = parser.parse_args()
+ slide(args.file[0], noclear=args.noclear, format_rst=args.rst,
+ formatter=args.formatter, style=args.style, auto_all=args.auto,
+ delimiter=args.delimiter)
diff --git a/contrib/python/ipython/py3/IPython/lib/display.py b/contrib/python/ipython/py3/IPython/lib/display.py
new file mode 100644
index 0000000000..f39f389f98
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/lib/display.py
@@ -0,0 +1,677 @@
+"""Various display related classes.
+
+Authors : MinRK, gregcaporaso, dannystaple
+"""
+from html import escape as html_escape
+from os.path import exists, isfile, splitext, abspath, join, isdir
+from os import walk, sep, fsdecode
+
+from IPython.core.display import DisplayObject, TextDisplayObject
+
+from typing import Tuple, Iterable, Optional
+
+__all__ = ['Audio', 'IFrame', 'YouTubeVideo', 'VimeoVideo', 'ScribdDocument',
+ 'FileLink', 'FileLinks', 'Code']
+
+
+class Audio(DisplayObject):
+ """Create an audio object.
+
+ When this object is returned by an input cell or passed to the
+ display function, it will result in Audio controls being displayed
+ in the frontend (only works in the notebook).
+
+ Parameters
+ ----------
+ data : numpy array, list, unicode, str or bytes
+ Can be one of
+
+ * Numpy 1d array containing the desired waveform (mono)
+ * Numpy 2d array containing waveforms for each channel.
+ Shape=(NCHAN, NSAMPLES). For the standard channel order, see
+ http://msdn.microsoft.com/en-us/library/windows/hardware/dn653308(v=vs.85).aspx
+ * List of float or integer representing the waveform (mono)
+ * String containing the filename
+ * Bytestring containing raw PCM data or
+ * URL pointing to a file on the web.
+
+ If the array option is used, the waveform will be normalized.
+
+ If a filename or url is used, the format support will be browser
+ dependent.
+ url : unicode
+ A URL to download the data from.
+ filename : unicode
+ Path to a local file to load the data from.
+ embed : boolean
+ Should the audio data be embedded using a data URI (True) or should
+ the original source be referenced. Set this to True if you want the
+ audio to playable later with no internet connection in the notebook.
+
+ Default is `True`, unless the keyword argument `url` is set, then
+ default value is `False`.
+ rate : integer
+ The sampling rate of the raw data.
+ Only required when data parameter is being used as an array
+ autoplay : bool
+ Set to True if the audio should immediately start playing.
+ Default is `False`.
+ normalize : bool
+ Whether audio should be normalized (rescaled) to the maximum possible
+ range. Default is `True`. When set to `False`, `data` must be between
+ -1 and 1 (inclusive), otherwise an error is raised.
+ Applies only when `data` is a list or array of samples; other types of
+ audio are never normalized.
+
+ Examples
+ --------
+
+ >>> import pytest
+ >>> np = pytest.importorskip("numpy")
+
+ Generate a sound
+
+ >>> import numpy as np
+ >>> framerate = 44100
+ >>> t = np.linspace(0,5,framerate*5)
+ >>> data = np.sin(2*np.pi*220*t) + np.sin(2*np.pi*224*t)
+ >>> Audio(data, rate=framerate)
+ <IPython.lib.display.Audio object>
+
+ Can also do stereo or more channels
+
+ >>> dataleft = np.sin(2*np.pi*220*t)
+ >>> dataright = np.sin(2*np.pi*224*t)
+ >>> Audio([dataleft, dataright], rate=framerate)
+ <IPython.lib.display.Audio object>
+
+ From URL:
+
+ >>> Audio("http://www.nch.com.au/acm/8k16bitpcm.wav") # doctest: +SKIP
+ >>> Audio(url="http://www.w3schools.com/html/horse.ogg") # doctest: +SKIP
+
+ From a File:
+
+ >>> Audio('IPython/lib/tests/test.wav') # doctest: +SKIP
+ >>> Audio(filename='IPython/lib/tests/test.wav') # doctest: +SKIP
+
+ From Bytes:
+
+ >>> Audio(b'RAW_WAV_DATA..') # doctest: +SKIP
+ >>> Audio(data=b'RAW_WAV_DATA..') # doctest: +SKIP
+
+ See Also
+ --------
+ ipywidgets.Audio
+
+ Audio widget with more more flexibility and options.
+
+ """
+ _read_flags = 'rb'
+
+ def __init__(self, data=None, filename=None, url=None, embed=None, rate=None, autoplay=False, normalize=True, *,
+ element_id=None):
+ if filename is None and url is None and data is None:
+ raise ValueError("No audio data found. Expecting filename, url, or data.")
+ if embed is False and url is None:
+ raise ValueError("No url found. Expecting url when embed=False")
+
+ if url is not None and embed is not True:
+ self.embed = False
+ else:
+ self.embed = True
+ self.autoplay = autoplay
+ self.element_id = element_id
+ super(Audio, self).__init__(data=data, url=url, filename=filename)
+
+ if self.data is not None and not isinstance(self.data, bytes):
+ if rate is None:
+ raise ValueError("rate must be specified when data is a numpy array or list of audio samples.")
+ self.data = Audio._make_wav(data, rate, normalize)
+
+ def reload(self):
+ """Reload the raw data from file or URL."""
+ import mimetypes
+ if self.embed:
+ super(Audio, self).reload()
+
+ if self.filename is not None:
+ self.mimetype = mimetypes.guess_type(self.filename)[0]
+ elif self.url is not None:
+ self.mimetype = mimetypes.guess_type(self.url)[0]
+ else:
+ self.mimetype = "audio/wav"
+
+ @staticmethod
+ def _make_wav(data, rate, normalize):
+ """ Transform a numpy array to a PCM bytestring """
+ from io import BytesIO
+ import wave
+
+ try:
+ scaled, nchan = Audio._validate_and_normalize_with_numpy(data, normalize)
+ except ImportError:
+ scaled, nchan = Audio._validate_and_normalize_without_numpy(data, normalize)
+
+ fp = BytesIO()
+ waveobj = wave.open(fp,mode='wb')
+ waveobj.setnchannels(nchan)
+ waveobj.setframerate(rate)
+ waveobj.setsampwidth(2)
+ waveobj.setcomptype('NONE','NONE')
+ waveobj.writeframes(scaled)
+ val = fp.getvalue()
+ waveobj.close()
+
+ return val
+
+ @staticmethod
+ def _validate_and_normalize_with_numpy(data, normalize) -> Tuple[bytes, int]:
+ import numpy as np
+
+ data = np.array(data, dtype=float)
+ if len(data.shape) == 1:
+ nchan = 1
+ elif len(data.shape) == 2:
+ # In wave files,channels are interleaved. E.g.,
+ # "L1R1L2R2..." for stereo. See
+ # http://msdn.microsoft.com/en-us/library/windows/hardware/dn653308(v=vs.85).aspx
+ # for channel ordering
+ nchan = data.shape[0]
+ data = data.T.ravel()
+ else:
+ raise ValueError('Array audio input must be a 1D or 2D array')
+
+ max_abs_value = np.max(np.abs(data))
+ normalization_factor = Audio._get_normalization_factor(max_abs_value, normalize)
+ scaled = data / normalization_factor * 32767
+ return scaled.astype("<h").tobytes(), nchan
+
+ @staticmethod
+ def _validate_and_normalize_without_numpy(data, normalize):
+ import array
+ import sys
+
+ data = array.array('f', data)
+
+ try:
+ max_abs_value = float(max([abs(x) for x in data]))
+ except TypeError as e:
+ raise TypeError('Only lists of mono audio are '
+ 'supported if numpy is not installed') from e
+
+ normalization_factor = Audio._get_normalization_factor(max_abs_value, normalize)
+ scaled = array.array('h', [int(x / normalization_factor * 32767) for x in data])
+ if sys.byteorder == 'big':
+ scaled.byteswap()
+ nchan = 1
+ return scaled.tobytes(), nchan
+
+ @staticmethod
+ def _get_normalization_factor(max_abs_value, normalize):
+ if not normalize and max_abs_value > 1:
+ raise ValueError('Audio data must be between -1 and 1 when normalize=False.')
+ return max_abs_value if normalize else 1
+
+ def _data_and_metadata(self):
+ """shortcut for returning metadata with url information, if defined"""
+ md = {}
+ if self.url:
+ md['url'] = self.url
+ if md:
+ return self.data, md
+ else:
+ return self.data
+
+ def _repr_html_(self):
+ src = """
+ <audio {element_id} controls="controls" {autoplay}>
+ <source src="{src}" type="{type}" />
+ Your browser does not support the audio element.
+ </audio>
+ """
+ return src.format(src=self.src_attr(), type=self.mimetype, autoplay=self.autoplay_attr(),
+ element_id=self.element_id_attr())
+
+ def src_attr(self):
+ import base64
+ if self.embed and (self.data is not None):
+ data = base64=base64.b64encode(self.data).decode('ascii')
+ return """data:{type};base64,{base64}""".format(type=self.mimetype,
+ base64=data)
+ elif self.url is not None:
+ return self.url
+ else:
+ return ""
+
+ def autoplay_attr(self):
+ if(self.autoplay):
+ return 'autoplay="autoplay"'
+ else:
+ return ''
+
+ def element_id_attr(self):
+ if (self.element_id):
+ return 'id="{element_id}"'.format(element_id=self.element_id)
+ else:
+ return ''
+
+class IFrame(object):
+ """
+ Generic class to embed an iframe in an IPython notebook
+ """
+
+ iframe = """
+ <iframe
+ width="{width}"
+ height="{height}"
+ src="{src}{params}"
+ frameborder="0"
+ allowfullscreen
+ {extras}
+ ></iframe>
+ """
+
+ def __init__(
+ self, src, width, height, extras: Optional[Iterable[str]] = None, **kwargs
+ ):
+ if extras is None:
+ extras = []
+
+ self.src = src
+ self.width = width
+ self.height = height
+ self.extras = extras
+ self.params = kwargs
+
+ def _repr_html_(self):
+ """return the embed iframe"""
+ if self.params:
+ from urllib.parse import urlencode
+ params = "?" + urlencode(self.params)
+ else:
+ params = ""
+ return self.iframe.format(
+ src=self.src,
+ width=self.width,
+ height=self.height,
+ params=params,
+ extras=" ".join(self.extras),
+ )
+
+
+class YouTubeVideo(IFrame):
+ """Class for embedding a YouTube Video in an IPython session, based on its video id.
+
+ e.g. to embed the video from https://www.youtube.com/watch?v=foo , you would
+ do::
+
+ vid = YouTubeVideo("foo")
+ display(vid)
+
+ To start from 30 seconds::
+
+ vid = YouTubeVideo("abc", start=30)
+ display(vid)
+
+ To calculate seconds from time as hours, minutes, seconds use
+ :class:`datetime.timedelta`::
+
+ start=int(timedelta(hours=1, minutes=46, seconds=40).total_seconds())
+
+ Other parameters can be provided as documented at
+ https://developers.google.com/youtube/player_parameters#Parameters
+
+ When converting the notebook using nbconvert, a jpeg representation of the video
+ will be inserted in the document.
+ """
+
+ def __init__(self, id, width=400, height=300, allow_autoplay=False, **kwargs):
+ self.id=id
+ src = "https://www.youtube.com/embed/{0}".format(id)
+ if allow_autoplay:
+ extras = list(kwargs.get("extras", [])) + ['allow="autoplay"']
+ kwargs.update(autoplay=1, extras=extras)
+ super(YouTubeVideo, self).__init__(src, width, height, **kwargs)
+
+ def _repr_jpeg_(self):
+ # Deferred import
+ from urllib.request import urlopen
+
+ try:
+ return urlopen("https://img.youtube.com/vi/{id}/hqdefault.jpg".format(id=self.id)).read()
+ except IOError:
+ return None
+
+class VimeoVideo(IFrame):
+ """
+ Class for embedding a Vimeo video in an IPython session, based on its video id.
+ """
+
+ def __init__(self, id, width=400, height=300, **kwargs):
+ src="https://player.vimeo.com/video/{0}".format(id)
+ super(VimeoVideo, self).__init__(src, width, height, **kwargs)
+
+class ScribdDocument(IFrame):
+ """
+ Class for embedding a Scribd document in an IPython session
+
+ Use the start_page params to specify a starting point in the document
+ Use the view_mode params to specify display type one off scroll | slideshow | book
+
+ e.g to Display Wes' foundational paper about PANDAS in book mode from page 3
+
+ ScribdDocument(71048089, width=800, height=400, start_page=3, view_mode="book")
+ """
+
+ def __init__(self, id, width=400, height=300, **kwargs):
+ src="https://www.scribd.com/embeds/{0}/content".format(id)
+ super(ScribdDocument, self).__init__(src, width, height, **kwargs)
+
+class FileLink(object):
+ """Class for embedding a local file link in an IPython session, based on path
+
+ e.g. to embed a link that was generated in the IPython notebook as my/data.txt
+
+ you would do::
+
+ local_file = FileLink("my/data.txt")
+ display(local_file)
+
+ or in the HTML notebook, just::
+
+ FileLink("my/data.txt")
+ """
+
+ html_link_str = "<a href='%s' target='_blank'>%s</a>"
+
+ def __init__(self,
+ path,
+ url_prefix='',
+ result_html_prefix='',
+ result_html_suffix='<br>'):
+ """
+ Parameters
+ ----------
+ path : str
+ path to the file or directory that should be formatted
+ url_prefix : str
+ prefix to be prepended to all files to form a working link [default:
+ '']
+ result_html_prefix : str
+ text to append to beginning to link [default: '']
+ result_html_suffix : str
+ text to append at the end of link [default: '<br>']
+ """
+ if isdir(path):
+ raise ValueError("Cannot display a directory using FileLink. "
+ "Use FileLinks to display '%s'." % path)
+ self.path = fsdecode(path)
+ self.url_prefix = url_prefix
+ self.result_html_prefix = result_html_prefix
+ self.result_html_suffix = result_html_suffix
+
+ def _format_path(self):
+ fp = ''.join([self.url_prefix, html_escape(self.path)])
+ return ''.join([self.result_html_prefix,
+ self.html_link_str % \
+ (fp, html_escape(self.path, quote=False)),
+ self.result_html_suffix])
+
+ def _repr_html_(self):
+ """return html link to file
+ """
+ if not exists(self.path):
+ return ("Path (<tt>%s</tt>) doesn't exist. "
+ "It may still be in the process of "
+ "being generated, or you may have the "
+ "incorrect path." % self.path)
+
+ return self._format_path()
+
+ def __repr__(self):
+ """return absolute path to file
+ """
+ return abspath(self.path)
+
+class FileLinks(FileLink):
+ """Class for embedding local file links in an IPython session, based on path
+
+ e.g. to embed links to files that were generated in the IPython notebook
+ under ``my/data``, you would do::
+
+ local_files = FileLinks("my/data")
+ display(local_files)
+
+ or in the HTML notebook, just::
+
+ FileLinks("my/data")
+ """
+ def __init__(self,
+ path,
+ url_prefix='',
+ included_suffixes=None,
+ result_html_prefix='',
+ result_html_suffix='<br>',
+ notebook_display_formatter=None,
+ terminal_display_formatter=None,
+ recursive=True):
+ """
+ See :class:`FileLink` for the ``path``, ``url_prefix``,
+ ``result_html_prefix`` and ``result_html_suffix`` parameters.
+
+ included_suffixes : list
+ Filename suffixes to include when formatting output [default: include
+ all files]
+
+ notebook_display_formatter : function
+ Used to format links for display in the notebook. See discussion of
+ formatter functions below.
+
+ terminal_display_formatter : function
+ Used to format links for display in the terminal. See discussion of
+ formatter functions below.
+
+ Formatter functions must be of the form::
+
+ f(dirname, fnames, included_suffixes)
+
+ dirname : str
+ The name of a directory
+ fnames : list
+ The files in that directory
+ included_suffixes : list
+ The file suffixes that should be included in the output (passing None
+ meansto include all suffixes in the output in the built-in formatters)
+ recursive : boolean
+ Whether to recurse into subdirectories. Default is True.
+
+ The function should return a list of lines that will be printed in the
+ notebook (if passing notebook_display_formatter) or the terminal (if
+ passing terminal_display_formatter). This function is iterated over for
+ each directory in self.path. Default formatters are in place, can be
+ passed here to support alternative formatting.
+
+ """
+ if isfile(path):
+ raise ValueError("Cannot display a file using FileLinks. "
+ "Use FileLink to display '%s'." % path)
+ self.included_suffixes = included_suffixes
+ # remove trailing slashes for more consistent output formatting
+ path = path.rstrip('/')
+
+ self.path = path
+ self.url_prefix = url_prefix
+ self.result_html_prefix = result_html_prefix
+ self.result_html_suffix = result_html_suffix
+
+ self.notebook_display_formatter = \
+ notebook_display_formatter or self._get_notebook_display_formatter()
+ self.terminal_display_formatter = \
+ terminal_display_formatter or self._get_terminal_display_formatter()
+
+ self.recursive = recursive
+
+ def _get_display_formatter(
+ self, dirname_output_format, fname_output_format, fp_format, fp_cleaner=None
+ ):
+ """generate built-in formatter function
+
+ this is used to define both the notebook and terminal built-in
+ formatters as they only differ by some wrapper text for each entry
+
+ dirname_output_format: string to use for formatting directory
+ names, dirname will be substituted for a single "%s" which
+ must appear in this string
+ fname_output_format: string to use for formatting file names,
+ if a single "%s" appears in the string, fname will be substituted
+ if two "%s" appear in the string, the path to fname will be
+ substituted for the first and fname will be substituted for the
+ second
+ fp_format: string to use for formatting filepaths, must contain
+ exactly two "%s" and the dirname will be substituted for the first
+ and fname will be substituted for the second
+ """
+ def f(dirname, fnames, included_suffixes=None):
+ result = []
+ # begin by figuring out which filenames, if any,
+ # are going to be displayed
+ display_fnames = []
+ for fname in fnames:
+ if (isfile(join(dirname,fname)) and
+ (included_suffixes is None or
+ splitext(fname)[1] in included_suffixes)):
+ display_fnames.append(fname)
+
+ if len(display_fnames) == 0:
+ # if there are no filenames to display, don't print anything
+ # (not even the directory name)
+ pass
+ else:
+ # otherwise print the formatted directory name followed by
+ # the formatted filenames
+ dirname_output_line = dirname_output_format % dirname
+ result.append(dirname_output_line)
+ for fname in display_fnames:
+ fp = fp_format % (dirname,fname)
+ if fp_cleaner is not None:
+ fp = fp_cleaner(fp)
+ try:
+ # output can include both a filepath and a filename...
+ fname_output_line = fname_output_format % (fp, fname)
+ except TypeError:
+ # ... or just a single filepath
+ fname_output_line = fname_output_format % fname
+ result.append(fname_output_line)
+ return result
+ return f
+
+ def _get_notebook_display_formatter(self,
+ spacer="&nbsp;&nbsp;"):
+ """ generate function to use for notebook formatting
+ """
+ dirname_output_format = \
+ self.result_html_prefix + "%s/" + self.result_html_suffix
+ fname_output_format = \
+ self.result_html_prefix + spacer + self.html_link_str + self.result_html_suffix
+ fp_format = self.url_prefix + '%s/%s'
+ if sep == "\\":
+ # Working on a platform where the path separator is "\", so
+ # must convert these to "/" for generating a URI
+ def fp_cleaner(fp):
+ # Replace all occurrences of backslash ("\") with a forward
+ # slash ("/") - this is necessary on windows when a path is
+ # provided as input, but we must link to a URI
+ return fp.replace('\\','/')
+ else:
+ fp_cleaner = None
+
+ return self._get_display_formatter(dirname_output_format,
+ fname_output_format,
+ fp_format,
+ fp_cleaner)
+
+ def _get_terminal_display_formatter(self,
+ spacer=" "):
+ """ generate function to use for terminal formatting
+ """
+ dirname_output_format = "%s/"
+ fname_output_format = spacer + "%s"
+ fp_format = '%s/%s'
+
+ return self._get_display_formatter(dirname_output_format,
+ fname_output_format,
+ fp_format)
+
+ def _format_path(self):
+ result_lines = []
+ if self.recursive:
+ walked_dir = list(walk(self.path))
+ else:
+ walked_dir = [next(walk(self.path))]
+ walked_dir.sort()
+ for dirname, subdirs, fnames in walked_dir:
+ result_lines += self.notebook_display_formatter(dirname, fnames, self.included_suffixes)
+ return '\n'.join(result_lines)
+
+ def __repr__(self):
+ """return newline-separated absolute paths
+ """
+ result_lines = []
+ if self.recursive:
+ walked_dir = list(walk(self.path))
+ else:
+ walked_dir = [next(walk(self.path))]
+ walked_dir.sort()
+ for dirname, subdirs, fnames in walked_dir:
+ result_lines += self.terminal_display_formatter(dirname, fnames, self.included_suffixes)
+ return '\n'.join(result_lines)
+
+
+class Code(TextDisplayObject):
+ """Display syntax-highlighted source code.
+
+ This uses Pygments to highlight the code for HTML and Latex output.
+
+ Parameters
+ ----------
+ data : str
+ The code as a string
+ url : str
+ A URL to fetch the code from
+ filename : str
+ A local filename to load the code from
+ language : str
+ The short name of a Pygments lexer to use for highlighting.
+ If not specified, it will guess the lexer based on the filename
+ or the code. Available lexers: http://pygments.org/docs/lexers/
+ """
+ def __init__(self, data=None, url=None, filename=None, language=None):
+ self.language = language
+ super().__init__(data=data, url=url, filename=filename)
+
+ def _get_lexer(self):
+ if self.language:
+ from pygments.lexers import get_lexer_by_name
+ return get_lexer_by_name(self.language)
+ elif self.filename:
+ from pygments.lexers import get_lexer_for_filename
+ return get_lexer_for_filename(self.filename)
+ else:
+ from pygments.lexers import guess_lexer
+ return guess_lexer(self.data)
+
+ def __repr__(self):
+ return self.data
+
+ def _repr_html_(self):
+ from pygments import highlight
+ from pygments.formatters import HtmlFormatter
+ fmt = HtmlFormatter()
+ style = '<style>{}</style>'.format(fmt.get_style_defs('.output_html'))
+ return style + highlight(self.data, self._get_lexer(), fmt)
+
+ def _repr_latex_(self):
+ from pygments import highlight
+ from pygments.formatters import LatexFormatter
+ return highlight(self.data, self._get_lexer(), LatexFormatter())
diff --git a/contrib/python/ipython/py3/IPython/lib/editorhooks.py b/contrib/python/ipython/py3/IPython/lib/editorhooks.py
new file mode 100644
index 0000000000..d8bd6ac81b
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/lib/editorhooks.py
@@ -0,0 +1,127 @@
+""" 'editor' hooks for common editors that work well with ipython
+
+They should honor the line number argument, at least.
+
+Contributions are *very* welcome.
+"""
+
+import os
+import shlex
+import subprocess
+import sys
+
+from IPython import get_ipython
+from IPython.core.error import TryNext
+from IPython.utils import py3compat
+
+
+def install_editor(template, wait=False):
+ """Installs the editor that is called by IPython for the %edit magic.
+
+ This overrides the default editor, which is generally set by your EDITOR
+ environment variable or is notepad (windows) or vi (linux). By supplying a
+ template string `run_template`, you can control how the editor is invoked
+ by IPython -- (e.g. the format in which it accepts command line options)
+
+ Parameters
+ ----------
+ template : basestring
+ run_template acts as a template for how your editor is invoked by
+ the shell. It should contain '{filename}', which will be replaced on
+ invocation with the file name, and '{line}', $line by line number
+ (or 0) to invoke the file with.
+ wait : bool
+ If `wait` is true, wait until the user presses enter before returning,
+ to facilitate non-blocking editors that exit immediately after
+ the call.
+ """
+
+ # not all editors support $line, so we'll leave out this check
+ # for substitution in ['$file', '$line']:
+ # if not substitution in run_template:
+ # raise ValueError(('run_template should contain %s'
+ # ' for string substitution. You supplied "%s"' % (substitution,
+ # run_template)))
+
+ def call_editor(self, filename, line=0):
+ if line is None:
+ line = 0
+ cmd = template.format(filename=shlex.quote(filename), line=line)
+ print(">", cmd)
+ # shlex.quote doesn't work right on Windows, but it does after splitting
+ if sys.platform.startswith('win'):
+ cmd = shlex.split(cmd)
+ proc = subprocess.Popen(cmd, shell=True)
+ if proc.wait() != 0:
+ raise TryNext()
+ if wait:
+ py3compat.input("Press Enter when done editing:")
+
+ get_ipython().set_hook('editor', call_editor)
+ get_ipython().editor = template
+
+
+# in these, exe is always the path/name of the executable. Useful
+# if you don't have the editor directory in your path
+def komodo(exe=u'komodo'):
+ """ Activestate Komodo [Edit] """
+ install_editor(exe + u' -l {line} {filename}', wait=True)
+
+
+def scite(exe=u"scite"):
+ """ SciTE or Sc1 """
+ install_editor(exe + u' {filename} -goto:{line}')
+
+
+def notepadplusplus(exe=u'notepad++'):
+ """ Notepad++ http://notepad-plus.sourceforge.net """
+ install_editor(exe + u' -n{line} {filename}')
+
+
+def jed(exe=u'jed'):
+ """ JED, the lightweight emacsish editor """
+ install_editor(exe + u' +{line} {filename}')
+
+
+def idle(exe=u'idle'):
+ """ Idle, the editor bundled with python
+
+ Parameters
+ ----------
+ exe : str, None
+ If none, should be pretty smart about finding the executable.
+ """
+ if exe is None:
+ import idlelib
+ p = os.path.dirname(idlelib.__filename__)
+ # i'm not sure if this actually works. Is this idle.py script
+ # guaranteed to be executable?
+ exe = os.path.join(p, 'idle.py')
+ install_editor(exe + u' {filename}')
+
+
+def mate(exe=u'mate'):
+ """ TextMate, the missing editor"""
+ # wait=True is not required since we're using the -w flag to mate
+ install_editor(exe + u' -w -l {line} {filename}')
+
+
+# ##########################################
+# these are untested, report any problems
+# ##########################################
+
+
+def emacs(exe=u'emacs'):
+ install_editor(exe + u' +{line} {filename}')
+
+
+def gnuclient(exe=u'gnuclient'):
+ install_editor(exe + u' -nw +{line} {filename}')
+
+
+def crimson_editor(exe=u'cedt.exe'):
+ install_editor(exe + u' /L:{line} {filename}')
+
+
+def kate(exe=u'kate'):
+ install_editor(exe + u' -u -l {line} {filename}')
diff --git a/contrib/python/ipython/py3/IPython/lib/guisupport.py b/contrib/python/ipython/py3/IPython/lib/guisupport.py
new file mode 100644
index 0000000000..4d532d0f4d
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/lib/guisupport.py
@@ -0,0 +1,155 @@
+# coding: utf-8
+"""
+Support for creating GUI apps and starting event loops.
+
+IPython's GUI integration allows interactive plotting and GUI usage in IPython
+session. IPython has two different types of GUI integration:
+
+1. The terminal based IPython supports GUI event loops through Python's
+ PyOS_InputHook. PyOS_InputHook is a hook that Python calls periodically
+ whenever raw_input is waiting for a user to type code. We implement GUI
+ support in the terminal by setting PyOS_InputHook to a function that
+ iterates the event loop for a short while. It is important to note that
+ in this situation, the real GUI event loop is NOT run in the normal
+ manner, so you can't use the normal means to detect that it is running.
+2. In the two process IPython kernel/frontend, the GUI event loop is run in
+ the kernel. In this case, the event loop is run in the normal manner by
+ calling the function or method of the GUI toolkit that starts the event
+ loop.
+
+In addition to starting the GUI event loops in one of these two ways, IPython
+will *always* create an appropriate GUI application object when GUi
+integration is enabled.
+
+If you want your GUI apps to run in IPython you need to do two things:
+
+1. Test to see if there is already an existing main application object. If
+ there is, you should use it. If there is not an existing application object
+ you should create one.
+2. Test to see if the GUI event loop is running. If it is, you should not
+ start it. If the event loop is not running you may start it.
+
+This module contains functions for each toolkit that perform these things
+in a consistent manner. Because of how PyOS_InputHook runs the event loop
+you cannot detect if the event loop is running using the traditional calls
+(such as ``wx.GetApp.IsMainLoopRunning()`` in wxPython). If PyOS_InputHook is
+set These methods will return a false negative. That is, they will say the
+event loop is not running, when is actually is. To work around this limitation
+we proposed the following informal protocol:
+
+* Whenever someone starts the event loop, they *must* set the ``_in_event_loop``
+ attribute of the main application object to ``True``. This should be done
+ regardless of how the event loop is actually run.
+* Whenever someone stops the event loop, they *must* set the ``_in_event_loop``
+ attribute of the main application object to ``False``.
+* If you want to see if the event loop is running, you *must* use ``hasattr``
+ to see if ``_in_event_loop`` attribute has been set. If it is set, you
+ *must* use its value. If it has not been set, you can query the toolkit
+ in the normal manner.
+* If you want GUI support and no one else has created an application or
+ started the event loop you *must* do this. We don't want projects to
+ attempt to defer these things to someone else if they themselves need it.
+
+The functions below implement this logic for each GUI toolkit. If you need
+to create custom application subclasses, you will likely have to modify this
+code for your own purposes. This code can be copied into your own project
+so you don't have to depend on IPython.
+
+"""
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+from IPython.core.getipython import get_ipython
+
+#-----------------------------------------------------------------------------
+# wx
+#-----------------------------------------------------------------------------
+
+def get_app_wx(*args, **kwargs):
+ """Create a new wx app or return an exiting one."""
+ import wx
+ app = wx.GetApp()
+ if app is None:
+ if 'redirect' not in kwargs:
+ kwargs['redirect'] = False
+ app = wx.PySimpleApp(*args, **kwargs)
+ return app
+
+def is_event_loop_running_wx(app=None):
+ """Is the wx event loop running."""
+ # New way: check attribute on shell instance
+ ip = get_ipython()
+ if ip is not None:
+ if ip.active_eventloop and ip.active_eventloop == 'wx':
+ return True
+ # Fall through to checking the application, because Wx has a native way
+ # to check if the event loop is running, unlike Qt.
+
+ # Old way: check Wx application
+ if app is None:
+ app = get_app_wx()
+ if hasattr(app, '_in_event_loop'):
+ return app._in_event_loop
+ else:
+ return app.IsMainLoopRunning()
+
+def start_event_loop_wx(app=None):
+ """Start the wx event loop in a consistent manner."""
+ if app is None:
+ app = get_app_wx()
+ if not is_event_loop_running_wx(app):
+ app._in_event_loop = True
+ app.MainLoop()
+ app._in_event_loop = False
+ else:
+ app._in_event_loop = True
+
+#-----------------------------------------------------------------------------
+# Qt
+#-----------------------------------------------------------------------------
+
+def get_app_qt4(*args, **kwargs):
+ """Create a new Qt app or return an existing one."""
+ from IPython.external.qt_for_kernel import QtGui
+ app = QtGui.QApplication.instance()
+ if app is None:
+ if not args:
+ args = ([""],)
+ app = QtGui.QApplication(*args, **kwargs)
+ return app
+
+def is_event_loop_running_qt4(app=None):
+ """Is the qt event loop running."""
+ # New way: check attribute on shell instance
+ ip = get_ipython()
+ if ip is not None:
+ return ip.active_eventloop and ip.active_eventloop.startswith('qt')
+
+ # Old way: check attribute on QApplication singleton
+ if app is None:
+ app = get_app_qt4([""])
+ if hasattr(app, '_in_event_loop'):
+ return app._in_event_loop
+ else:
+ # Does qt provide a other way to detect this?
+ return False
+
+def start_event_loop_qt4(app=None):
+ """Start the qt event loop in a consistent manner."""
+ if app is None:
+ app = get_app_qt4([""])
+ if not is_event_loop_running_qt4(app):
+ app._in_event_loop = True
+ app.exec_()
+ app._in_event_loop = False
+ else:
+ app._in_event_loop = True
+
+#-----------------------------------------------------------------------------
+# Tk
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# gtk
+#-----------------------------------------------------------------------------
diff --git a/contrib/python/ipython/py3/IPython/lib/latextools.py b/contrib/python/ipython/py3/IPython/lib/latextools.py
new file mode 100644
index 0000000000..f2aa572884
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/lib/latextools.py
@@ -0,0 +1,257 @@
+# -*- coding: utf-8 -*-
+"""Tools for handling LaTeX."""
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+from io import BytesIO, open
+import os
+import tempfile
+import shutil
+import subprocess
+from base64 import encodebytes
+import textwrap
+
+from pathlib import Path
+
+from IPython.utils.process import find_cmd, FindCmdError
+from traitlets.config import get_config
+from traitlets.config.configurable import SingletonConfigurable
+from traitlets import List, Bool, Unicode
+from IPython.utils.py3compat import cast_unicode
+
+
+class LaTeXTool(SingletonConfigurable):
+ """An object to store configuration of the LaTeX tool."""
+ def _config_default(self):
+ return get_config()
+
+ backends = List(
+ Unicode(), ["matplotlib", "dvipng"],
+ help="Preferred backend to draw LaTeX math equations. "
+ "Backends in the list are checked one by one and the first "
+ "usable one is used. Note that `matplotlib` backend "
+ "is usable only for inline style equations. To draw "
+ "display style equations, `dvipng` backend must be specified. ",
+ # It is a List instead of Enum, to make configuration more
+ # flexible. For example, to use matplotlib mainly but dvipng
+ # for display style, the default ["matplotlib", "dvipng"] can
+ # be used. To NOT use dvipng so that other repr such as
+ # unicode pretty printing is used, you can use ["matplotlib"].
+ ).tag(config=True)
+
+ use_breqn = Bool(
+ True,
+ help="Use breqn.sty to automatically break long equations. "
+ "This configuration takes effect only for dvipng backend.",
+ ).tag(config=True)
+
+ packages = List(
+ ['amsmath', 'amsthm', 'amssymb', 'bm'],
+ help="A list of packages to use for dvipng backend. "
+ "'breqn' will be automatically appended when use_breqn=True.",
+ ).tag(config=True)
+
+ preamble = Unicode(
+ help="Additional preamble to use when generating LaTeX source "
+ "for dvipng backend.",
+ ).tag(config=True)
+
+
+def latex_to_png(s, encode=False, backend=None, wrap=False, color='Black',
+ scale=1.0):
+ """Render a LaTeX string to PNG.
+
+ Parameters
+ ----------
+ s : str
+ The raw string containing valid inline LaTeX.
+ encode : bool, optional
+ Should the PNG data base64 encoded to make it JSON'able.
+ backend : {matplotlib, dvipng}
+ Backend for producing PNG data.
+ wrap : bool
+ If true, Automatically wrap `s` as a LaTeX equation.
+ color : string
+ Foreground color name among dvipsnames, e.g. 'Maroon' or on hex RGB
+ format, e.g. '#AA20FA'.
+ scale : float
+ Scale factor for the resulting PNG.
+ None is returned when the backend cannot be used.
+
+ """
+ s = cast_unicode(s)
+ allowed_backends = LaTeXTool.instance().backends
+ if backend is None:
+ backend = allowed_backends[0]
+ if backend not in allowed_backends:
+ return None
+ if backend == 'matplotlib':
+ f = latex_to_png_mpl
+ elif backend == 'dvipng':
+ f = latex_to_png_dvipng
+ if color.startswith('#'):
+ # Convert hex RGB color to LaTeX RGB color.
+ if len(color) == 7:
+ try:
+ color = "RGB {}".format(" ".join([str(int(x, 16)) for x in
+ textwrap.wrap(color[1:], 2)]))
+ except ValueError as e:
+ raise ValueError('Invalid color specification {}.'.format(color)) from e
+ else:
+ raise ValueError('Invalid color specification {}.'.format(color))
+ else:
+ raise ValueError('No such backend {0}'.format(backend))
+ bin_data = f(s, wrap, color, scale)
+ if encode and bin_data:
+ bin_data = encodebytes(bin_data)
+ return bin_data
+
+
+def latex_to_png_mpl(s, wrap, color='Black', scale=1.0):
+ try:
+ from matplotlib import figure, font_manager, mathtext
+ from matplotlib.backends import backend_agg
+ from pyparsing import ParseFatalException
+ except ImportError:
+ return None
+
+ # mpl mathtext doesn't support display math, force inline
+ s = s.replace('$$', '$')
+ if wrap:
+ s = u'${0}$'.format(s)
+
+ try:
+ prop = font_manager.FontProperties(size=12)
+ dpi = 120 * scale
+ buffer = BytesIO()
+
+ # Adapted from mathtext.math_to_image
+ parser = mathtext.MathTextParser("path")
+ width, height, depth, _, _ = parser.parse(s, dpi=72, prop=prop)
+ fig = figure.Figure(figsize=(width / 72, height / 72))
+ fig.text(0, depth / height, s, fontproperties=prop, color=color)
+ backend_agg.FigureCanvasAgg(fig)
+ fig.savefig(buffer, dpi=dpi, format="png", transparent=True)
+ return buffer.getvalue()
+ except (ValueError, RuntimeError, ParseFatalException):
+ return None
+
+
+def latex_to_png_dvipng(s, wrap, color='Black', scale=1.0):
+ try:
+ find_cmd('latex')
+ find_cmd('dvipng')
+ except FindCmdError:
+ return None
+
+ startupinfo = None
+ if os.name == "nt":
+ # prevent popup-windows
+ startupinfo = subprocess.STARTUPINFO()
+ startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
+
+ try:
+ workdir = Path(tempfile.mkdtemp())
+ tmpfile = "tmp.tex"
+ dvifile = "tmp.dvi"
+ outfile = "tmp.png"
+
+ with workdir.joinpath(tmpfile).open("w", encoding="utf8") as f:
+ f.writelines(genelatex(s, wrap))
+
+ subprocess.check_call(
+ ["latex", "-halt-on-error", "-interaction", "batchmode", tmpfile],
+ cwd=workdir,
+ stdout=subprocess.DEVNULL,
+ stderr=subprocess.DEVNULL,
+ startupinfo=startupinfo,
+ )
+
+ resolution = round(150 * scale)
+ subprocess.check_call(
+ [
+ "dvipng",
+ "-T",
+ "tight",
+ "-D",
+ str(resolution),
+ "-z",
+ "9",
+ "-bg",
+ "Transparent",
+ "-o",
+ outfile,
+ dvifile,
+ "-fg",
+ color,
+ ],
+ cwd=workdir,
+ stdout=subprocess.DEVNULL,
+ stderr=subprocess.DEVNULL,
+ startupinfo=startupinfo,
+ )
+
+ with workdir.joinpath(outfile).open("rb") as f:
+ return f.read()
+ except subprocess.CalledProcessError:
+ return None
+ finally:
+ shutil.rmtree(workdir)
+
+
+def kpsewhich(filename):
+ """Invoke kpsewhich command with an argument `filename`."""
+ try:
+ find_cmd("kpsewhich")
+ proc = subprocess.Popen(
+ ["kpsewhich", filename],
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ (stdout, stderr) = proc.communicate()
+ return stdout.strip().decode('utf8', 'replace')
+ except FindCmdError:
+ pass
+
+
+def genelatex(body, wrap):
+ """Generate LaTeX document for dvipng backend."""
+ lt = LaTeXTool.instance()
+ breqn = wrap and lt.use_breqn and kpsewhich("breqn.sty")
+ yield r'\documentclass{article}'
+ packages = lt.packages
+ if breqn:
+ packages = packages + ['breqn']
+ for pack in packages:
+ yield r'\usepackage{{{0}}}'.format(pack)
+ yield r'\pagestyle{empty}'
+ if lt.preamble:
+ yield lt.preamble
+ yield r'\begin{document}'
+ if breqn:
+ yield r'\begin{dmath*}'
+ yield body
+ yield r'\end{dmath*}'
+ elif wrap:
+ yield u'$${0}$$'.format(body)
+ else:
+ yield body
+ yield u'\\end{document}'
+
+
+_data_uri_template_png = u"""<img src="data:image/png;base64,%s" alt=%s />"""
+
+def latex_to_html(s, alt='image'):
+ """Render LaTeX to HTML with embedded PNG data using data URIs.
+
+ Parameters
+ ----------
+ s : str
+ The raw string containing valid inline LateX.
+ alt : str
+ The alt text to use for the HTML.
+ """
+ base64_data = latex_to_png(s, encode=True).decode('ascii')
+ if base64_data:
+ return _data_uri_template_png % (base64_data, alt)
+
+
diff --git a/contrib/python/ipython/py3/IPython/lib/lexers.py b/contrib/python/ipython/py3/IPython/lib/lexers.py
new file mode 100644
index 0000000000..42d5b7a87c
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/lib/lexers.py
@@ -0,0 +1,540 @@
+# -*- coding: utf-8 -*-
+"""
+Defines a variety of Pygments lexers for highlighting IPython code.
+
+This includes:
+
+ IPythonLexer, IPython3Lexer
+ Lexers for pure IPython (python + magic/shell commands)
+
+ IPythonPartialTracebackLexer, IPythonTracebackLexer
+ Supports 2.x and 3.x via keyword `python3`. The partial traceback
+ lexer reads everything but the Python code appearing in a traceback.
+ The full lexer combines the partial lexer with an IPython lexer.
+
+ IPythonConsoleLexer
+ A lexer for IPython console sessions, with support for tracebacks.
+
+ IPyLexer
+ A friendly lexer which examines the first line of text and from it,
+ decides whether to use an IPython lexer or an IPython console lexer.
+ This is probably the only lexer that needs to be explicitly added
+ to Pygments.
+
+"""
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, the IPython Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+# Standard library
+import re
+
+# Third party
+from pygments.lexers import (
+ BashLexer, HtmlLexer, JavascriptLexer, RubyLexer, PerlLexer, PythonLexer,
+ Python3Lexer, TexLexer)
+from pygments.lexer import (
+ Lexer, DelegatingLexer, RegexLexer, do_insertions, bygroups, using,
+)
+from pygments.token import (
+ Generic, Keyword, Literal, Name, Operator, Other, Text, Error,
+)
+from pygments.util import get_bool_opt
+
+# Local
+
+line_re = re.compile('.*?\n')
+
+__all__ = ['build_ipy_lexer', 'IPython3Lexer', 'IPythonLexer',
+ 'IPythonPartialTracebackLexer', 'IPythonTracebackLexer',
+ 'IPythonConsoleLexer', 'IPyLexer']
+
+
+def build_ipy_lexer(python3):
+ """Builds IPython lexers depending on the value of `python3`.
+
+ The lexer inherits from an appropriate Python lexer and then adds
+ information about IPython specific keywords (i.e. magic commands,
+ shell commands, etc.)
+
+ Parameters
+ ----------
+ python3 : bool
+ If `True`, then build an IPython lexer from a Python 3 lexer.
+
+ """
+ # It would be nice to have a single IPython lexer class which takes
+ # a boolean `python3`. But since there are two Python lexer classes,
+ # we will also have two IPython lexer classes.
+ if python3:
+ PyLexer = Python3Lexer
+ name = 'IPython3'
+ aliases = ['ipython3']
+ doc = """IPython3 Lexer"""
+ else:
+ PyLexer = PythonLexer
+ name = 'IPython'
+ aliases = ['ipython2', 'ipython']
+ doc = """IPython Lexer"""
+
+ ipython_tokens = [
+ (r'(?s)(\s*)(%%capture)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(PyLexer))),
+ (r'(?s)(\s*)(%%debug)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(PyLexer))),
+ (r'(?is)(\s*)(%%html)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(HtmlLexer))),
+ (r'(?s)(\s*)(%%javascript)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(JavascriptLexer))),
+ (r'(?s)(\s*)(%%js)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(JavascriptLexer))),
+ (r'(?s)(\s*)(%%latex)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(TexLexer))),
+ (r'(?s)(\s*)(%%perl)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(PerlLexer))),
+ (r'(?s)(\s*)(%%prun)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(PyLexer))),
+ (r'(?s)(\s*)(%%pypy)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(PyLexer))),
+ (r'(?s)(\s*)(%%python)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(PyLexer))),
+ (r'(?s)(\s*)(%%python2)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(PythonLexer))),
+ (r'(?s)(\s*)(%%python3)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(Python3Lexer))),
+ (r'(?s)(\s*)(%%ruby)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(RubyLexer))),
+ (r'(?s)(\s*)(%%time)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(PyLexer))),
+ (r'(?s)(\s*)(%%timeit)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(PyLexer))),
+ (r'(?s)(\s*)(%%writefile)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(PyLexer))),
+ (r'(?s)(\s*)(%%file)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(PyLexer))),
+ (r"(?s)(\s*)(%%)(\w+)(.*)", bygroups(Text, Operator, Keyword, Text)),
+ (r'(?s)(^\s*)(%%!)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(BashLexer))),
+ (r"(%%?)(\w+)(\?\??)$", bygroups(Operator, Keyword, Operator)),
+ (r"\b(\?\??)(\s*)$", bygroups(Operator, Text)),
+ (r'(%)(sx|sc|system)(.*)(\n)', bygroups(Operator, Keyword,
+ using(BashLexer), Text)),
+ (r'(%)(\w+)(.*\n)', bygroups(Operator, Keyword, Text)),
+ (r'^(!!)(.+)(\n)', bygroups(Operator, using(BashLexer), Text)),
+ (r'(!)(?!=)(.+)(\n)', bygroups(Operator, using(BashLexer), Text)),
+ (r'^(\s*)(\?\??)(\s*%{0,2}[\w\.\*]*)', bygroups(Text, Operator, Text)),
+ (r'(\s*%{0,2}[\w\.\*]*)(\?\??)(\s*)$', bygroups(Text, Operator, Text)),
+ ]
+
+ tokens = PyLexer.tokens.copy()
+ tokens['root'] = ipython_tokens + tokens['root']
+
+ attrs = {'name': name, 'aliases': aliases, 'filenames': [],
+ '__doc__': doc, 'tokens': tokens}
+
+ return type(name, (PyLexer,), attrs)
+
+
+IPython3Lexer = build_ipy_lexer(python3=True)
+IPythonLexer = build_ipy_lexer(python3=False)
+
+
+class IPythonPartialTracebackLexer(RegexLexer):
+ """
+ Partial lexer for IPython tracebacks.
+
+ Handles all the non-python output.
+
+ """
+ name = 'IPython Partial Traceback'
+
+ tokens = {
+ 'root': [
+ # Tracebacks for syntax errors have a different style.
+ # For both types of tracebacks, we mark the first line with
+ # Generic.Traceback. For syntax errors, we mark the filename
+ # as we mark the filenames for non-syntax tracebacks.
+ #
+ # These two regexps define how IPythonConsoleLexer finds a
+ # traceback.
+ #
+ ## Non-syntax traceback
+ (r'^(\^C)?(-+\n)', bygroups(Error, Generic.Traceback)),
+ ## Syntax traceback
+ (r'^( File)(.*)(, line )(\d+\n)',
+ bygroups(Generic.Traceback, Name.Namespace,
+ Generic.Traceback, Literal.Number.Integer)),
+
+ # (Exception Identifier)(Whitespace)(Traceback Message)
+ (r'(?u)(^[^\d\W]\w*)(\s*)(Traceback.*?\n)',
+ bygroups(Name.Exception, Generic.Whitespace, Text)),
+ # (Module/Filename)(Text)(Callee)(Function Signature)
+ # Better options for callee and function signature?
+ (r'(.*)( in )(.*)(\(.*\)\n)',
+ bygroups(Name.Namespace, Text, Name.Entity, Name.Tag)),
+ # Regular line: (Whitespace)(Line Number)(Python Code)
+ (r'(\s*?)(\d+)(.*?\n)',
+ bygroups(Generic.Whitespace, Literal.Number.Integer, Other)),
+ # Emphasized line: (Arrow)(Line Number)(Python Code)
+ # Using Exception token so arrow color matches the Exception.
+ (r'(-*>?\s?)(\d+)(.*?\n)',
+ bygroups(Name.Exception, Literal.Number.Integer, Other)),
+ # (Exception Identifier)(Message)
+ (r'(?u)(^[^\d\W]\w*)(:.*?\n)',
+ bygroups(Name.Exception, Text)),
+ # Tag everything else as Other, will be handled later.
+ (r'.*\n', Other),
+ ],
+ }
+
+
+class IPythonTracebackLexer(DelegatingLexer):
+ """
+ IPython traceback lexer.
+
+ For doctests, the tracebacks can be snipped as much as desired with the
+ exception to the lines that designate a traceback. For non-syntax error
+ tracebacks, this is the line of hyphens. For syntax error tracebacks,
+ this is the line which lists the File and line number.
+
+ """
+ # The lexer inherits from DelegatingLexer. The "root" lexer is an
+ # appropriate IPython lexer, which depends on the value of the boolean
+ # `python3`. First, we parse with the partial IPython traceback lexer.
+ # Then, any code marked with the "Other" token is delegated to the root
+ # lexer.
+ #
+ name = 'IPython Traceback'
+ aliases = ['ipythontb']
+
+ def __init__(self, **options):
+ """
+ A subclass of `DelegatingLexer` which delegates to the appropriate to either IPyLexer,
+ IPythonPartialTracebackLexer.
+ """
+ # note we need a __init__ doc, as otherwise it inherits the doc from the super class
+ # which will fail the documentation build as it references section of the pygments docs that
+ # do not exists when building IPython's docs.
+ self.python3 = get_bool_opt(options, 'python3', False)
+ if self.python3:
+ self.aliases = ['ipython3tb']
+ else:
+ self.aliases = ['ipython2tb', 'ipythontb']
+
+ if self.python3:
+ IPyLexer = IPython3Lexer
+ else:
+ IPyLexer = IPythonLexer
+
+ DelegatingLexer.__init__(self, IPyLexer,
+ IPythonPartialTracebackLexer, **options)
+
+class IPythonConsoleLexer(Lexer):
+ """
+ An IPython console lexer for IPython code-blocks and doctests, such as:
+
+ .. code-block:: rst
+
+ .. code-block:: ipythonconsole
+
+ In [1]: a = 'foo'
+
+ In [2]: a
+ Out[2]: 'foo'
+
+ In [3]: print(a)
+ foo
+
+
+ Support is also provided for IPython exceptions:
+
+ .. code-block:: rst
+
+ .. code-block:: ipythonconsole
+
+ In [1]: raise Exception
+ Traceback (most recent call last):
+ ...
+ Exception
+
+ """
+ name = 'IPython console session'
+ aliases = ['ipythonconsole']
+ mimetypes = ['text/x-ipython-console']
+
+ # The regexps used to determine what is input and what is output.
+ # The default prompts for IPython are:
+ #
+ # in = 'In [#]: '
+ # continuation = ' .D.: '
+ # template = 'Out[#]: '
+ #
+ # Where '#' is the 'prompt number' or 'execution count' and 'D'
+ # D is a number of dots matching the width of the execution count
+ #
+ in1_regex = r'In \[[0-9]+\]: '
+ in2_regex = r' \.\.+\.: '
+ out_regex = r'Out\[[0-9]+\]: '
+
+ #: The regex to determine when a traceback starts.
+ ipytb_start = re.compile(r'^(\^C)?(-+\n)|^( File)(.*)(, line )(\d+\n)')
+
+ def __init__(self, **options):
+ """Initialize the IPython console lexer.
+
+ Parameters
+ ----------
+ python3 : bool
+ If `True`, then the console inputs are parsed using a Python 3
+ lexer. Otherwise, they are parsed using a Python 2 lexer.
+ in1_regex : RegexObject
+ The compiled regular expression used to detect the start
+ of inputs. Although the IPython configuration setting may have a
+ trailing whitespace, do not include it in the regex. If `None`,
+ then the default input prompt is assumed.
+ in2_regex : RegexObject
+ The compiled regular expression used to detect the continuation
+ of inputs. Although the IPython configuration setting may have a
+ trailing whitespace, do not include it in the regex. If `None`,
+ then the default input prompt is assumed.
+ out_regex : RegexObject
+ The compiled regular expression used to detect outputs. If `None`,
+ then the default output prompt is assumed.
+
+ """
+ self.python3 = get_bool_opt(options, 'python3', False)
+ if self.python3:
+ self.aliases = ['ipython3console']
+ else:
+ self.aliases = ['ipython2console', 'ipythonconsole']
+
+ in1_regex = options.get('in1_regex', self.in1_regex)
+ in2_regex = options.get('in2_regex', self.in2_regex)
+ out_regex = options.get('out_regex', self.out_regex)
+
+ # So that we can work with input and output prompts which have been
+ # rstrip'd (possibly by editors) we also need rstrip'd variants. If
+ # we do not do this, then such prompts will be tagged as 'output'.
+ # The reason can't just use the rstrip'd variants instead is because
+ # we want any whitespace associated with the prompt to be inserted
+ # with the token. This allows formatted code to be modified so as hide
+ # the appearance of prompts, with the whitespace included. One example
+ # use of this is in copybutton.js from the standard lib Python docs.
+ in1_regex_rstrip = in1_regex.rstrip() + '\n'
+ in2_regex_rstrip = in2_regex.rstrip() + '\n'
+ out_regex_rstrip = out_regex.rstrip() + '\n'
+
+ # Compile and save them all.
+ attrs = ['in1_regex', 'in2_regex', 'out_regex',
+ 'in1_regex_rstrip', 'in2_regex_rstrip', 'out_regex_rstrip']
+ for attr in attrs:
+ self.__setattr__(attr, re.compile(locals()[attr]))
+
+ Lexer.__init__(self, **options)
+
+ if self.python3:
+ pylexer = IPython3Lexer
+ tblexer = IPythonTracebackLexer
+ else:
+ pylexer = IPythonLexer
+ tblexer = IPythonTracebackLexer
+
+ self.pylexer = pylexer(**options)
+ self.tblexer = tblexer(**options)
+
+ self.reset()
+
+ def reset(self):
+ self.mode = 'output'
+ self.index = 0
+ self.buffer = u''
+ self.insertions = []
+
+ def buffered_tokens(self):
+ """
+ Generator of unprocessed tokens after doing insertions and before
+ changing to a new state.
+
+ """
+ if self.mode == 'output':
+ tokens = [(0, Generic.Output, self.buffer)]
+ elif self.mode == 'input':
+ tokens = self.pylexer.get_tokens_unprocessed(self.buffer)
+ else: # traceback
+ tokens = self.tblexer.get_tokens_unprocessed(self.buffer)
+
+ for i, t, v in do_insertions(self.insertions, tokens):
+ # All token indexes are relative to the buffer.
+ yield self.index + i, t, v
+
+ # Clear it all
+ self.index += len(self.buffer)
+ self.buffer = u''
+ self.insertions = []
+
+ def get_mci(self, line):
+ """
+ Parses the line and returns a 3-tuple: (mode, code, insertion).
+
+ `mode` is the next mode (or state) of the lexer, and is always equal
+ to 'input', 'output', or 'tb'.
+
+ `code` is a portion of the line that should be added to the buffer
+ corresponding to the next mode and eventually lexed by another lexer.
+ For example, `code` could be Python code if `mode` were 'input'.
+
+ `insertion` is a 3-tuple (index, token, text) representing an
+ unprocessed "token" that will be inserted into the stream of tokens
+ that are created from the buffer once we change modes. This is usually
+ the input or output prompt.
+
+ In general, the next mode depends on current mode and on the contents
+ of `line`.
+
+ """
+ # To reduce the number of regex match checks, we have multiple
+ # 'if' blocks instead of 'if-elif' blocks.
+
+ # Check for possible end of input
+ in2_match = self.in2_regex.match(line)
+ in2_match_rstrip = self.in2_regex_rstrip.match(line)
+ if (in2_match and in2_match.group().rstrip() == line.rstrip()) or \
+ in2_match_rstrip:
+ end_input = True
+ else:
+ end_input = False
+ if end_input and self.mode != 'tb':
+ # Only look for an end of input when not in tb mode.
+ # An ellipsis could appear within the traceback.
+ mode = 'output'
+ code = u''
+ insertion = (0, Generic.Prompt, line)
+ return mode, code, insertion
+
+ # Check for output prompt
+ out_match = self.out_regex.match(line)
+ out_match_rstrip = self.out_regex_rstrip.match(line)
+ if out_match or out_match_rstrip:
+ mode = 'output'
+ if out_match:
+ idx = out_match.end()
+ else:
+ idx = out_match_rstrip.end()
+ code = line[idx:]
+ # Use the 'heading' token for output. We cannot use Generic.Error
+ # since it would conflict with exceptions.
+ insertion = (0, Generic.Heading, line[:idx])
+ return mode, code, insertion
+
+
+ # Check for input or continuation prompt (non stripped version)
+ in1_match = self.in1_regex.match(line)
+ if in1_match or (in2_match and self.mode != 'tb'):
+ # New input or when not in tb, continued input.
+ # We do not check for continued input when in tb since it is
+ # allowable to replace a long stack with an ellipsis.
+ mode = 'input'
+ if in1_match:
+ idx = in1_match.end()
+ else: # in2_match
+ idx = in2_match.end()
+ code = line[idx:]
+ insertion = (0, Generic.Prompt, line[:idx])
+ return mode, code, insertion
+
+ # Check for input or continuation prompt (stripped version)
+ in1_match_rstrip = self.in1_regex_rstrip.match(line)
+ if in1_match_rstrip or (in2_match_rstrip and self.mode != 'tb'):
+ # New input or when not in tb, continued input.
+ # We do not check for continued input when in tb since it is
+ # allowable to replace a long stack with an ellipsis.
+ mode = 'input'
+ if in1_match_rstrip:
+ idx = in1_match_rstrip.end()
+ else: # in2_match
+ idx = in2_match_rstrip.end()
+ code = line[idx:]
+ insertion = (0, Generic.Prompt, line[:idx])
+ return mode, code, insertion
+
+ # Check for traceback
+ if self.ipytb_start.match(line):
+ mode = 'tb'
+ code = line
+ insertion = None
+ return mode, code, insertion
+
+ # All other stuff...
+ if self.mode in ('input', 'output'):
+ # We assume all other text is output. Multiline input that
+ # does not use the continuation marker cannot be detected.
+ # For example, the 3 in the following is clearly output:
+ #
+ # In [1]: print 3
+ # 3
+ #
+ # But the following second line is part of the input:
+ #
+ # In [2]: while True:
+ # print True
+ #
+ # In both cases, the 2nd line will be 'output'.
+ #
+ mode = 'output'
+ else:
+ mode = 'tb'
+
+ code = line
+ insertion = None
+
+ return mode, code, insertion
+
+ def get_tokens_unprocessed(self, text):
+ self.reset()
+ for match in line_re.finditer(text):
+ line = match.group()
+ mode, code, insertion = self.get_mci(line)
+
+ if mode != self.mode:
+ # Yield buffered tokens before transitioning to new mode.
+ for token in self.buffered_tokens():
+ yield token
+ self.mode = mode
+
+ if insertion:
+ self.insertions.append((len(self.buffer), [insertion]))
+ self.buffer += code
+
+ for token in self.buffered_tokens():
+ yield token
+
+class IPyLexer(Lexer):
+ r"""
+ Primary lexer for all IPython-like code.
+
+ This is a simple helper lexer. If the first line of the text begins with
+ "In \[[0-9]+\]:", then the entire text is parsed with an IPython console
+ lexer. If not, then the entire text is parsed with an IPython lexer.
+
+ The goal is to reduce the number of lexers that are registered
+ with Pygments.
+
+ """
+ name = 'IPy session'
+ aliases = ['ipy']
+
+ def __init__(self, **options):
+ """
+ Create a new IPyLexer instance which dispatch to either an
+ IPythonCOnsoleLexer (if In prompts are present) or and IPythonLexer (if
+ In prompts are not present).
+ """
+ # init docstring is necessary for docs not to fail to build do to parent
+ # docs referenceing a section in pygments docs.
+ self.python3 = get_bool_opt(options, 'python3', False)
+ if self.python3:
+ self.aliases = ['ipy3']
+ else:
+ self.aliases = ['ipy2', 'ipy']
+
+ Lexer.__init__(self, **options)
+
+ self.IPythonLexer = IPythonLexer(**options)
+ self.IPythonConsoleLexer = IPythonConsoleLexer(**options)
+
+ def get_tokens_unprocessed(self, text):
+ # Search for the input prompt anywhere...this allows code blocks to
+ # begin with comments as well.
+ if re.match(r'.*(In \[[0-9]+\]:)', text.strip(), re.DOTALL):
+ lex = self.IPythonConsoleLexer
+ else:
+ lex = self.IPythonLexer
+ for token in lex.get_tokens_unprocessed(text):
+ yield token
+
diff --git a/contrib/python/ipython/py3/IPython/lib/pretty.py b/contrib/python/ipython/py3/IPython/lib/pretty.py
new file mode 100644
index 0000000000..3486450786
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/lib/pretty.py
@@ -0,0 +1,953 @@
+# -*- coding: utf-8 -*-
+"""
+Python advanced pretty printer. This pretty printer is intended to
+replace the old `pprint` python module which does not allow developers
+to provide their own pretty print callbacks.
+
+This module is based on ruby's `prettyprint.rb` library by `Tanaka Akira`.
+
+
+Example Usage
+-------------
+
+To directly print the representation of an object use `pprint`::
+
+ from pretty import pprint
+ pprint(complex_object)
+
+To get a string of the output use `pretty`::
+
+ from pretty import pretty
+ string = pretty(complex_object)
+
+
+Extending
+---------
+
+The pretty library allows developers to add pretty printing rules for their
+own objects. This process is straightforward. All you have to do is to
+add a `_repr_pretty_` method to your object and call the methods on the
+pretty printer passed::
+
+ class MyObject(object):
+
+ def _repr_pretty_(self, p, cycle):
+ ...
+
+Here's an example for a class with a simple constructor::
+
+ class MySimpleObject:
+
+ def __init__(self, a, b, *, c=None):
+ self.a = a
+ self.b = b
+ self.c = c
+
+ def _repr_pretty_(self, p, cycle):
+ ctor = CallExpression.factory(self.__class__.__name__)
+ if self.c is None:
+ p.pretty(ctor(a, b))
+ else:
+ p.pretty(ctor(a, b, c=c))
+
+Here is an example implementation of a `_repr_pretty_` method for a list
+subclass::
+
+ class MyList(list):
+
+ def _repr_pretty_(self, p, cycle):
+ if cycle:
+ p.text('MyList(...)')
+ else:
+ with p.group(8, 'MyList([', '])'):
+ for idx, item in enumerate(self):
+ if idx:
+ p.text(',')
+ p.breakable()
+ p.pretty(item)
+
+The `cycle` parameter is `True` if pretty detected a cycle. You *have* to
+react to that or the result is an infinite loop. `p.text()` just adds
+non breaking text to the output, `p.breakable()` either adds a whitespace
+or breaks here. If you pass it an argument it's used instead of the
+default space. `p.pretty` prettyprints another object using the pretty print
+method.
+
+The first parameter to the `group` function specifies the extra indentation
+of the next line. In this example the next item will either be on the same
+line (if the items are short enough) or aligned with the right edge of the
+opening bracket of `MyList`.
+
+If you just want to indent something you can use the group function
+without open / close parameters. You can also use this code::
+
+ with p.indent(2):
+ ...
+
+Inheritance diagram:
+
+.. inheritance-diagram:: IPython.lib.pretty
+ :parts: 3
+
+:copyright: 2007 by Armin Ronacher.
+ Portions (c) 2009 by Robert Kern.
+:license: BSD License.
+"""
+
+from contextlib import contextmanager
+import datetime
+import os
+import re
+import sys
+import types
+from collections import deque
+from inspect import signature
+from io import StringIO
+from warnings import warn
+
+from IPython.utils.decorators import undoc
+from IPython.utils.py3compat import PYPY
+
+__all__ = ['pretty', 'pprint', 'PrettyPrinter', 'RepresentationPrinter',
+ 'for_type', 'for_type_by_name', 'RawText', 'RawStringLiteral', 'CallExpression']
+
+
+MAX_SEQ_LENGTH = 1000
+_re_pattern_type = type(re.compile(''))
+
+def _safe_getattr(obj, attr, default=None):
+ """Safe version of getattr.
+
+ Same as getattr, but will return ``default`` on any Exception,
+ rather than raising.
+ """
+ try:
+ return getattr(obj, attr, default)
+ except Exception:
+ return default
+
+@undoc
+class CUnicodeIO(StringIO):
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ warn(("CUnicodeIO is deprecated since IPython 6.0. "
+ "Please use io.StringIO instead."),
+ DeprecationWarning, stacklevel=2)
+
+def _sorted_for_pprint(items):
+ """
+ Sort the given items for pretty printing. Since some predictable
+ sorting is better than no sorting at all, we sort on the string
+ representation if normal sorting fails.
+ """
+ items = list(items)
+ try:
+ return sorted(items)
+ except Exception:
+ try:
+ return sorted(items, key=str)
+ except Exception:
+ return items
+
+def pretty(obj, verbose=False, max_width=79, newline='\n', max_seq_length=MAX_SEQ_LENGTH):
+ """
+ Pretty print the object's representation.
+ """
+ stream = StringIO()
+ printer = RepresentationPrinter(stream, verbose, max_width, newline, max_seq_length=max_seq_length)
+ printer.pretty(obj)
+ printer.flush()
+ return stream.getvalue()
+
+
+def pprint(obj, verbose=False, max_width=79, newline='\n', max_seq_length=MAX_SEQ_LENGTH):
+ """
+ Like `pretty` but print to stdout.
+ """
+ printer = RepresentationPrinter(sys.stdout, verbose, max_width, newline, max_seq_length=max_seq_length)
+ printer.pretty(obj)
+ printer.flush()
+ sys.stdout.write(newline)
+ sys.stdout.flush()
+
+class _PrettyPrinterBase(object):
+
+ @contextmanager
+ def indent(self, indent):
+ """with statement support for indenting/dedenting."""
+ self.indentation += indent
+ try:
+ yield
+ finally:
+ self.indentation -= indent
+
+ @contextmanager
+ def group(self, indent=0, open='', close=''):
+ """like begin_group / end_group but for the with statement."""
+ self.begin_group(indent, open)
+ try:
+ yield
+ finally:
+ self.end_group(indent, close)
+
+class PrettyPrinter(_PrettyPrinterBase):
+ """
+ Baseclass for the `RepresentationPrinter` prettyprinter that is used to
+ generate pretty reprs of objects. Contrary to the `RepresentationPrinter`
+ this printer knows nothing about the default pprinters or the `_repr_pretty_`
+ callback method.
+ """
+
+ def __init__(self, output, max_width=79, newline='\n', max_seq_length=MAX_SEQ_LENGTH):
+ self.output = output
+ self.max_width = max_width
+ self.newline = newline
+ self.max_seq_length = max_seq_length
+ self.output_width = 0
+ self.buffer_width = 0
+ self.buffer = deque()
+
+ root_group = Group(0)
+ self.group_stack = [root_group]
+ self.group_queue = GroupQueue(root_group)
+ self.indentation = 0
+
+ def _break_one_group(self, group):
+ while group.breakables:
+ x = self.buffer.popleft()
+ self.output_width = x.output(self.output, self.output_width)
+ self.buffer_width -= x.width
+ while self.buffer and isinstance(self.buffer[0], Text):
+ x = self.buffer.popleft()
+ self.output_width = x.output(self.output, self.output_width)
+ self.buffer_width -= x.width
+
+ def _break_outer_groups(self):
+ while self.max_width < self.output_width + self.buffer_width:
+ group = self.group_queue.deq()
+ if not group:
+ return
+ self._break_one_group(group)
+
+ def text(self, obj):
+ """Add literal text to the output."""
+ width = len(obj)
+ if self.buffer:
+ text = self.buffer[-1]
+ if not isinstance(text, Text):
+ text = Text()
+ self.buffer.append(text)
+ text.add(obj, width)
+ self.buffer_width += width
+ self._break_outer_groups()
+ else:
+ self.output.write(obj)
+ self.output_width += width
+
+ def breakable(self, sep=' '):
+ """
+ Add a breakable separator to the output. This does not mean that it
+ will automatically break here. If no breaking on this position takes
+ place the `sep` is inserted which default to one space.
+ """
+ width = len(sep)
+ group = self.group_stack[-1]
+ if group.want_break:
+ self.flush()
+ self.output.write(self.newline)
+ self.output.write(' ' * self.indentation)
+ self.output_width = self.indentation
+ self.buffer_width = 0
+ else:
+ self.buffer.append(Breakable(sep, width, self))
+ self.buffer_width += width
+ self._break_outer_groups()
+
+ def break_(self):
+ """
+ Explicitly insert a newline into the output, maintaining correct indentation.
+ """
+ group = self.group_queue.deq()
+ if group:
+ self._break_one_group(group)
+ self.flush()
+ self.output.write(self.newline)
+ self.output.write(' ' * self.indentation)
+ self.output_width = self.indentation
+ self.buffer_width = 0
+
+
+ def begin_group(self, indent=0, open=''):
+ """
+ Begin a group.
+ The first parameter specifies the indentation for the next line (usually
+ the width of the opening text), the second the opening text. All
+ parameters are optional.
+ """
+ if open:
+ self.text(open)
+ group = Group(self.group_stack[-1].depth + 1)
+ self.group_stack.append(group)
+ self.group_queue.enq(group)
+ self.indentation += indent
+
+ def _enumerate(self, seq):
+ """like enumerate, but with an upper limit on the number of items"""
+ for idx, x in enumerate(seq):
+ if self.max_seq_length and idx >= self.max_seq_length:
+ self.text(',')
+ self.breakable()
+ self.text('...')
+ return
+ yield idx, x
+
+ def end_group(self, dedent=0, close=''):
+ """End a group. See `begin_group` for more details."""
+ self.indentation -= dedent
+ group = self.group_stack.pop()
+ if not group.breakables:
+ self.group_queue.remove(group)
+ if close:
+ self.text(close)
+
+ def flush(self):
+ """Flush data that is left in the buffer."""
+ for data in self.buffer:
+ self.output_width += data.output(self.output, self.output_width)
+ self.buffer.clear()
+ self.buffer_width = 0
+
+
+def _get_mro(obj_class):
+ """ Get a reasonable method resolution order of a class and its superclasses
+ for both old-style and new-style classes.
+ """
+ if not hasattr(obj_class, '__mro__'):
+ # Old-style class. Mix in object to make a fake new-style class.
+ try:
+ obj_class = type(obj_class.__name__, (obj_class, object), {})
+ except TypeError:
+ # Old-style extension type that does not descend from object.
+ # FIXME: try to construct a more thorough MRO.
+ mro = [obj_class]
+ else:
+ mro = obj_class.__mro__[1:-1]
+ else:
+ mro = obj_class.__mro__
+ return mro
+
+
+class RepresentationPrinter(PrettyPrinter):
+ """
+ Special pretty printer that has a `pretty` method that calls the pretty
+ printer for a python object.
+
+ This class stores processing data on `self` so you must *never* use
+ this class in a threaded environment. Always lock it or reinstanciate
+ it.
+
+ Instances also have a verbose flag callbacks can access to control their
+ output. For example the default instance repr prints all attributes and
+ methods that are not prefixed by an underscore if the printer is in
+ verbose mode.
+ """
+
+ def __init__(self, output, verbose=False, max_width=79, newline='\n',
+ singleton_pprinters=None, type_pprinters=None, deferred_pprinters=None,
+ max_seq_length=MAX_SEQ_LENGTH):
+
+ PrettyPrinter.__init__(self, output, max_width, newline, max_seq_length=max_seq_length)
+ self.verbose = verbose
+ self.stack = []
+ if singleton_pprinters is None:
+ singleton_pprinters = _singleton_pprinters.copy()
+ self.singleton_pprinters = singleton_pprinters
+ if type_pprinters is None:
+ type_pprinters = _type_pprinters.copy()
+ self.type_pprinters = type_pprinters
+ if deferred_pprinters is None:
+ deferred_pprinters = _deferred_type_pprinters.copy()
+ self.deferred_pprinters = deferred_pprinters
+
+ def pretty(self, obj):
+ """Pretty print the given object."""
+ obj_id = id(obj)
+ cycle = obj_id in self.stack
+ self.stack.append(obj_id)
+ self.begin_group()
+ try:
+ obj_class = _safe_getattr(obj, '__class__', None) or type(obj)
+ # First try to find registered singleton printers for the type.
+ try:
+ printer = self.singleton_pprinters[obj_id]
+ except (TypeError, KeyError):
+ pass
+ else:
+ return printer(obj, self, cycle)
+ # Next walk the mro and check for either:
+ # 1) a registered printer
+ # 2) a _repr_pretty_ method
+ for cls in _get_mro(obj_class):
+ if cls in self.type_pprinters:
+ # printer registered in self.type_pprinters
+ return self.type_pprinters[cls](obj, self, cycle)
+ else:
+ # deferred printer
+ printer = self._in_deferred_types(cls)
+ if printer is not None:
+ return printer(obj, self, cycle)
+ else:
+ # Finally look for special method names.
+ # Some objects automatically create any requested
+ # attribute. Try to ignore most of them by checking for
+ # callability.
+ if '_repr_pretty_' in cls.__dict__:
+ meth = cls._repr_pretty_
+ if callable(meth):
+ return meth(obj, self, cycle)
+ if cls is not object \
+ and callable(cls.__dict__.get('__repr__')):
+ return _repr_pprint(obj, self, cycle)
+
+ return _default_pprint(obj, self, cycle)
+ finally:
+ self.end_group()
+ self.stack.pop()
+
+ def _in_deferred_types(self, cls):
+ """
+ Check if the given class is specified in the deferred type registry.
+
+ Returns the printer from the registry if it exists, and None if the
+ class is not in the registry. Successful matches will be moved to the
+ regular type registry for future use.
+ """
+ mod = _safe_getattr(cls, '__module__', None)
+ name = _safe_getattr(cls, '__name__', None)
+ key = (mod, name)
+ printer = None
+ if key in self.deferred_pprinters:
+ # Move the printer over to the regular registry.
+ printer = self.deferred_pprinters.pop(key)
+ self.type_pprinters[cls] = printer
+ return printer
+
+
+class Printable(object):
+
+ def output(self, stream, output_width):
+ return output_width
+
+
+class Text(Printable):
+
+ def __init__(self):
+ self.objs = []
+ self.width = 0
+
+ def output(self, stream, output_width):
+ for obj in self.objs:
+ stream.write(obj)
+ return output_width + self.width
+
+ def add(self, obj, width):
+ self.objs.append(obj)
+ self.width += width
+
+
+class Breakable(Printable):
+
+ def __init__(self, seq, width, pretty):
+ self.obj = seq
+ self.width = width
+ self.pretty = pretty
+ self.indentation = pretty.indentation
+ self.group = pretty.group_stack[-1]
+ self.group.breakables.append(self)
+
+ def output(self, stream, output_width):
+ self.group.breakables.popleft()
+ if self.group.want_break:
+ stream.write(self.pretty.newline)
+ stream.write(' ' * self.indentation)
+ return self.indentation
+ if not self.group.breakables:
+ self.pretty.group_queue.remove(self.group)
+ stream.write(self.obj)
+ return output_width + self.width
+
+
+class Group(Printable):
+
+ def __init__(self, depth):
+ self.depth = depth
+ self.breakables = deque()
+ self.want_break = False
+
+
+class GroupQueue(object):
+
+ def __init__(self, *groups):
+ self.queue = []
+ for group in groups:
+ self.enq(group)
+
+ def enq(self, group):
+ depth = group.depth
+ while depth > len(self.queue) - 1:
+ self.queue.append([])
+ self.queue[depth].append(group)
+
+ def deq(self):
+ for stack in self.queue:
+ for idx, group in enumerate(reversed(stack)):
+ if group.breakables:
+ del stack[idx]
+ group.want_break = True
+ return group
+ for group in stack:
+ group.want_break = True
+ del stack[:]
+
+ def remove(self, group):
+ try:
+ self.queue[group.depth].remove(group)
+ except ValueError:
+ pass
+
+
+class RawText:
+ """ Object such that ``p.pretty(RawText(value))`` is the same as ``p.text(value)``.
+
+ An example usage of this would be to show a list as binary numbers, using
+ ``p.pretty([RawText(bin(i)) for i in integers])``.
+ """
+ def __init__(self, value):
+ self.value = value
+
+ def _repr_pretty_(self, p, cycle):
+ p.text(self.value)
+
+
+class CallExpression:
+ """ Object which emits a line-wrapped call expression in the form `__name(*args, **kwargs)` """
+ def __init__(__self, __name, *args, **kwargs):
+ # dunders are to avoid clashes with kwargs, as python's name manging
+ # will kick in.
+ self = __self
+ self.name = __name
+ self.args = args
+ self.kwargs = kwargs
+
+ @classmethod
+ def factory(cls, name):
+ def inner(*args, **kwargs):
+ return cls(name, *args, **kwargs)
+ return inner
+
+ def _repr_pretty_(self, p, cycle):
+ # dunders are to avoid clashes with kwargs, as python's name manging
+ # will kick in.
+
+ started = False
+ def new_item():
+ nonlocal started
+ if started:
+ p.text(",")
+ p.breakable()
+ started = True
+
+ prefix = self.name + "("
+ with p.group(len(prefix), prefix, ")"):
+ for arg in self.args:
+ new_item()
+ p.pretty(arg)
+ for arg_name, arg in self.kwargs.items():
+ new_item()
+ arg_prefix = arg_name + "="
+ with p.group(len(arg_prefix), arg_prefix):
+ p.pretty(arg)
+
+
+class RawStringLiteral:
+ """ Wrapper that shows a string with a `r` prefix """
+ def __init__(self, value):
+ self.value = value
+
+ def _repr_pretty_(self, p, cycle):
+ base_repr = repr(self.value)
+ if base_repr[:1] in 'uU':
+ base_repr = base_repr[1:]
+ prefix = 'ur'
+ else:
+ prefix = 'r'
+ base_repr = prefix + base_repr.replace('\\\\', '\\')
+ p.text(base_repr)
+
+
+def _default_pprint(obj, p, cycle):
+ """
+ The default print function. Used if an object does not provide one and
+ it's none of the builtin objects.
+ """
+ klass = _safe_getattr(obj, '__class__', None) or type(obj)
+ if _safe_getattr(klass, '__repr__', None) is not object.__repr__:
+ # A user-provided repr. Find newlines and replace them with p.break_()
+ _repr_pprint(obj, p, cycle)
+ return
+ p.begin_group(1, '<')
+ p.pretty(klass)
+ p.text(' at 0x%x' % id(obj))
+ if cycle:
+ p.text(' ...')
+ elif p.verbose:
+ first = True
+ for key in dir(obj):
+ if not key.startswith('_'):
+ try:
+ value = getattr(obj, key)
+ except AttributeError:
+ continue
+ if isinstance(value, types.MethodType):
+ continue
+ if not first:
+ p.text(',')
+ p.breakable()
+ p.text(key)
+ p.text('=')
+ step = len(key) + 1
+ p.indentation += step
+ p.pretty(value)
+ p.indentation -= step
+ first = False
+ p.end_group(1, '>')
+
+
+def _seq_pprinter_factory(start, end):
+ """
+ Factory that returns a pprint function useful for sequences. Used by
+ the default pprint for tuples and lists.
+ """
+ def inner(obj, p, cycle):
+ if cycle:
+ return p.text(start + '...' + end)
+ step = len(start)
+ p.begin_group(step, start)
+ for idx, x in p._enumerate(obj):
+ if idx:
+ p.text(',')
+ p.breakable()
+ p.pretty(x)
+ if len(obj) == 1 and isinstance(obj, tuple):
+ # Special case for 1-item tuples.
+ p.text(',')
+ p.end_group(step, end)
+ return inner
+
+
+def _set_pprinter_factory(start, end):
+ """
+ Factory that returns a pprint function useful for sets and frozensets.
+ """
+ def inner(obj, p, cycle):
+ if cycle:
+ return p.text(start + '...' + end)
+ if len(obj) == 0:
+ # Special case.
+ p.text(type(obj).__name__ + '()')
+ else:
+ step = len(start)
+ p.begin_group(step, start)
+ # Like dictionary keys, we will try to sort the items if there aren't too many
+ if not (p.max_seq_length and len(obj) >= p.max_seq_length):
+ items = _sorted_for_pprint(obj)
+ else:
+ items = obj
+ for idx, x in p._enumerate(items):
+ if idx:
+ p.text(',')
+ p.breakable()
+ p.pretty(x)
+ p.end_group(step, end)
+ return inner
+
+
+def _dict_pprinter_factory(start, end):
+ """
+ Factory that returns a pprint function used by the default pprint of
+ dicts and dict proxies.
+ """
+ def inner(obj, p, cycle):
+ if cycle:
+ return p.text('{...}')
+ step = len(start)
+ p.begin_group(step, start)
+ keys = obj.keys()
+ for idx, key in p._enumerate(keys):
+ if idx:
+ p.text(',')
+ p.breakable()
+ p.pretty(key)
+ p.text(': ')
+ p.pretty(obj[key])
+ p.end_group(step, end)
+ return inner
+
+
+def _super_pprint(obj, p, cycle):
+ """The pprint for the super type."""
+ p.begin_group(8, '<super: ')
+ p.pretty(obj.__thisclass__)
+ p.text(',')
+ p.breakable()
+ if PYPY: # In PyPy, super() objects don't have __self__ attributes
+ dself = obj.__repr__.__self__
+ p.pretty(None if dself is obj else dself)
+ else:
+ p.pretty(obj.__self__)
+ p.end_group(8, '>')
+
+
+
+class _ReFlags:
+ def __init__(self, value):
+ self.value = value
+
+ def _repr_pretty_(self, p, cycle):
+ done_one = False
+ for flag in ('TEMPLATE', 'IGNORECASE', 'LOCALE', 'MULTILINE', 'DOTALL',
+ 'UNICODE', 'VERBOSE', 'DEBUG'):
+ if self.value & getattr(re, flag):
+ if done_one:
+ p.text('|')
+ p.text('re.' + flag)
+ done_one = True
+
+
+def _re_pattern_pprint(obj, p, cycle):
+ """The pprint function for regular expression patterns."""
+ re_compile = CallExpression.factory('re.compile')
+ if obj.flags:
+ p.pretty(re_compile(RawStringLiteral(obj.pattern), _ReFlags(obj.flags)))
+ else:
+ p.pretty(re_compile(RawStringLiteral(obj.pattern)))
+
+
+def _types_simplenamespace_pprint(obj, p, cycle):
+ """The pprint function for types.SimpleNamespace."""
+ namespace = CallExpression.factory('namespace')
+ if cycle:
+ p.pretty(namespace(RawText("...")))
+ else:
+ p.pretty(namespace(**obj.__dict__))
+
+
+def _type_pprint(obj, p, cycle):
+ """The pprint for classes and types."""
+ # Heap allocated types might not have the module attribute,
+ # and others may set it to None.
+
+ # Checks for a __repr__ override in the metaclass. Can't compare the
+ # type(obj).__repr__ directly because in PyPy the representation function
+ # inherited from type isn't the same type.__repr__
+ if [m for m in _get_mro(type(obj)) if "__repr__" in vars(m)][:1] != [type]:
+ _repr_pprint(obj, p, cycle)
+ return
+
+ mod = _safe_getattr(obj, '__module__', None)
+ try:
+ name = obj.__qualname__
+ if not isinstance(name, str):
+ # This can happen if the type implements __qualname__ as a property
+ # or other descriptor in Python 2.
+ raise Exception("Try __name__")
+ except Exception:
+ name = obj.__name__
+ if not isinstance(name, str):
+ name = '<unknown type>'
+
+ if mod in (None, '__builtin__', 'builtins', 'exceptions'):
+ p.text(name)
+ else:
+ p.text(mod + '.' + name)
+
+
+def _repr_pprint(obj, p, cycle):
+ """A pprint that just redirects to the normal repr function."""
+ # Find newlines and replace them with p.break_()
+ output = repr(obj)
+ lines = output.splitlines()
+ with p.group():
+ for idx, output_line in enumerate(lines):
+ if idx:
+ p.break_()
+ p.text(output_line)
+
+
+def _function_pprint(obj, p, cycle):
+ """Base pprint for all functions and builtin functions."""
+ name = _safe_getattr(obj, '__qualname__', obj.__name__)
+ mod = obj.__module__
+ if mod and mod not in ('__builtin__', 'builtins', 'exceptions'):
+ name = mod + '.' + name
+ try:
+ func_def = name + str(signature(obj))
+ except ValueError:
+ func_def = name
+ p.text('<function %s>' % func_def)
+
+
+def _exception_pprint(obj, p, cycle):
+ """Base pprint for all exceptions."""
+ name = getattr(obj.__class__, '__qualname__', obj.__class__.__name__)
+ if obj.__class__.__module__ not in ('exceptions', 'builtins'):
+ name = '%s.%s' % (obj.__class__.__module__, name)
+
+ p.pretty(CallExpression(name, *getattr(obj, 'args', ())))
+
+
+#: the exception base
+try:
+ _exception_base = BaseException
+except NameError:
+ _exception_base = Exception
+
+
+#: printers for builtin types
+_type_pprinters = {
+ int: _repr_pprint,
+ float: _repr_pprint,
+ str: _repr_pprint,
+ tuple: _seq_pprinter_factory('(', ')'),
+ list: _seq_pprinter_factory('[', ']'),
+ dict: _dict_pprinter_factory('{', '}'),
+ set: _set_pprinter_factory('{', '}'),
+ frozenset: _set_pprinter_factory('frozenset({', '})'),
+ super: _super_pprint,
+ _re_pattern_type: _re_pattern_pprint,
+ type: _type_pprint,
+ types.FunctionType: _function_pprint,
+ types.BuiltinFunctionType: _function_pprint,
+ types.MethodType: _repr_pprint,
+ types.SimpleNamespace: _types_simplenamespace_pprint,
+ datetime.datetime: _repr_pprint,
+ datetime.timedelta: _repr_pprint,
+ _exception_base: _exception_pprint
+}
+
+# render os.environ like a dict
+_env_type = type(os.environ)
+# future-proof in case os.environ becomes a plain dict?
+if _env_type is not dict:
+ _type_pprinters[_env_type] = _dict_pprinter_factory('environ{', '}')
+
+try:
+ # In PyPy, types.DictProxyType is dict, setting the dictproxy printer
+ # using dict.setdefault avoids overwriting the dict printer
+ _type_pprinters.setdefault(types.DictProxyType,
+ _dict_pprinter_factory('dict_proxy({', '})'))
+ _type_pprinters[types.ClassType] = _type_pprint
+ _type_pprinters[types.SliceType] = _repr_pprint
+except AttributeError: # Python 3
+ _type_pprinters[types.MappingProxyType] = \
+ _dict_pprinter_factory('mappingproxy({', '})')
+ _type_pprinters[slice] = _repr_pprint
+
+_type_pprinters[range] = _repr_pprint
+_type_pprinters[bytes] = _repr_pprint
+
+#: printers for types specified by name
+_deferred_type_pprinters = {
+}
+
+def for_type(typ, func):
+ """
+ Add a pretty printer for a given type.
+ """
+ oldfunc = _type_pprinters.get(typ, None)
+ if func is not None:
+ # To support easy restoration of old pprinters, we need to ignore Nones.
+ _type_pprinters[typ] = func
+ return oldfunc
+
+def for_type_by_name(type_module, type_name, func):
+ """
+ Add a pretty printer for a type specified by the module and name of a type
+ rather than the type object itself.
+ """
+ key = (type_module, type_name)
+ oldfunc = _deferred_type_pprinters.get(key, None)
+ if func is not None:
+ # To support easy restoration of old pprinters, we need to ignore Nones.
+ _deferred_type_pprinters[key] = func
+ return oldfunc
+
+
+#: printers for the default singletons
+_singleton_pprinters = dict.fromkeys(map(id, [None, True, False, Ellipsis,
+ NotImplemented]), _repr_pprint)
+
+
+def _defaultdict_pprint(obj, p, cycle):
+ cls_ctor = CallExpression.factory(obj.__class__.__name__)
+ if cycle:
+ p.pretty(cls_ctor(RawText("...")))
+ else:
+ p.pretty(cls_ctor(obj.default_factory, dict(obj)))
+
+def _ordereddict_pprint(obj, p, cycle):
+ cls_ctor = CallExpression.factory(obj.__class__.__name__)
+ if cycle:
+ p.pretty(cls_ctor(RawText("...")))
+ elif len(obj):
+ p.pretty(cls_ctor(list(obj.items())))
+ else:
+ p.pretty(cls_ctor())
+
+def _deque_pprint(obj, p, cycle):
+ cls_ctor = CallExpression.factory(obj.__class__.__name__)
+ if cycle:
+ p.pretty(cls_ctor(RawText("...")))
+ elif obj.maxlen is not None:
+ p.pretty(cls_ctor(list(obj), maxlen=obj.maxlen))
+ else:
+ p.pretty(cls_ctor(list(obj)))
+
+def _counter_pprint(obj, p, cycle):
+ cls_ctor = CallExpression.factory(obj.__class__.__name__)
+ if cycle:
+ p.pretty(cls_ctor(RawText("...")))
+ elif len(obj):
+ p.pretty(cls_ctor(dict(obj.most_common())))
+ else:
+ p.pretty(cls_ctor())
+
+
+def _userlist_pprint(obj, p, cycle):
+ cls_ctor = CallExpression.factory(obj.__class__.__name__)
+ if cycle:
+ p.pretty(cls_ctor(RawText("...")))
+ else:
+ p.pretty(cls_ctor(obj.data))
+
+
+for_type_by_name('collections', 'defaultdict', _defaultdict_pprint)
+for_type_by_name('collections', 'OrderedDict', _ordereddict_pprint)
+for_type_by_name('collections', 'deque', _deque_pprint)
+for_type_by_name('collections', 'Counter', _counter_pprint)
+for_type_by_name("collections", "UserList", _userlist_pprint)
+
+if __name__ == '__main__':
+ from random import randrange
+ class Foo(object):
+ def __init__(self):
+ self.foo = 1
+ self.bar = re.compile(r'\s+')
+ self.blub = dict.fromkeys(range(30), randrange(1, 40))
+ self.hehe = 23424.234234
+ self.list = ["blub", "blah", self]
+
+ def get_foo(self):
+ print("foo")
+
+ pprint(Foo(), verbose=True)
diff --git a/contrib/python/ipython/py3/IPython/paths.py b/contrib/python/ipython/py3/IPython/paths.py
new file mode 100644
index 0000000000..cc6408ca43
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/paths.py
@@ -0,0 +1,125 @@
+"""Find files and directories which IPython uses.
+"""
+import os.path
+import tempfile
+from warnings import warn
+
+import IPython
+from IPython.utils.importstring import import_item
+from IPython.utils.path import (
+ get_home_dir,
+ get_xdg_dir,
+ get_xdg_cache_dir,
+ compress_user,
+ _writable_dir,
+ ensure_dir_exists,
+)
+
+
+def get_ipython_dir() -> str:
+ """Get the IPython directory for this platform and user.
+
+ This uses the logic in `get_home_dir` to find the home directory
+ and then adds .ipython to the end of the path.
+ """
+
+ env = os.environ
+ pjoin = os.path.join
+
+
+ ipdir_def = '.ipython'
+
+ home_dir = get_home_dir()
+ xdg_dir = get_xdg_dir()
+
+ if 'IPYTHON_DIR' in env:
+ warn('The environment variable IPYTHON_DIR is deprecated since IPython 3.0. '
+ 'Please use IPYTHONDIR instead.', DeprecationWarning)
+ ipdir = env.get('IPYTHONDIR', env.get('IPYTHON_DIR', None))
+ if ipdir is None:
+ # not set explicitly, use ~/.ipython
+ ipdir = pjoin(home_dir, ipdir_def)
+ if xdg_dir:
+ # Several IPython versions (up to 1.x) defaulted to .config/ipython
+ # on Linux. We have decided to go back to using .ipython everywhere
+ xdg_ipdir = pjoin(xdg_dir, 'ipython')
+
+ if _writable_dir(xdg_ipdir):
+ cu = compress_user
+ if os.path.exists(ipdir):
+ warn(('Ignoring {0} in favour of {1}. Remove {0} to '
+ 'get rid of this message').format(cu(xdg_ipdir), cu(ipdir)))
+ elif os.path.islink(xdg_ipdir):
+ warn(('{0} is deprecated. Move link to {1} to '
+ 'get rid of this message').format(cu(xdg_ipdir), cu(ipdir)))
+ else:
+ ipdir = xdg_ipdir
+
+ ipdir = os.path.normpath(os.path.expanduser(ipdir))
+
+ if os.path.exists(ipdir) and not _writable_dir(ipdir):
+ # ipdir exists, but is not writable
+ warn("IPython dir '{0}' is not a writable location,"
+ " using a temp directory.".format(ipdir))
+ ipdir = tempfile.mkdtemp()
+ elif not os.path.exists(ipdir):
+ parent = os.path.dirname(ipdir)
+ if not _writable_dir(parent):
+ # ipdir does not exist and parent isn't writable
+ warn("IPython parent '{0}' is not a writable location,"
+ " using a temp directory.".format(parent))
+ ipdir = tempfile.mkdtemp()
+ else:
+ os.makedirs(ipdir, exist_ok=True)
+ assert isinstance(ipdir, str), "all path manipulation should be str(unicode), but are not."
+ return ipdir
+
+
+def get_ipython_cache_dir() -> str:
+ """Get the cache directory it is created if it does not exist."""
+ xdgdir = get_xdg_cache_dir()
+ if xdgdir is None:
+ return get_ipython_dir()
+ ipdir = os.path.join(xdgdir, "ipython")
+ if not os.path.exists(ipdir) and _writable_dir(xdgdir):
+ ensure_dir_exists(ipdir)
+ elif not _writable_dir(xdgdir):
+ return get_ipython_dir()
+
+ return ipdir
+
+
+def get_ipython_package_dir() -> str:
+ """Get the base directory where IPython itself is installed."""
+ ipdir = os.path.dirname(IPython.__file__)
+ assert isinstance(ipdir, str)
+ return ipdir
+
+
+def get_ipython_module_path(module_str):
+ """Find the path to an IPython module in this version of IPython.
+
+ This will always find the version of the module that is in this importable
+ IPython package. This will always return the path to the ``.py``
+ version of the module.
+ """
+ if module_str == 'IPython':
+ return os.path.join(get_ipython_package_dir(), '__init__.py')
+ mod = import_item(module_str)
+ the_path = mod.__file__.replace('.pyc', '.py')
+ the_path = the_path.replace('.pyo', '.py')
+ return the_path
+
+
+def locate_profile(profile='default'):
+ """Find the path to the folder associated with a given profile.
+
+ I.e. find $IPYTHONDIR/profile_whatever.
+ """
+ from IPython.core.profiledir import ProfileDir, ProfileDirError
+ try:
+ pd = ProfileDir.find_profile_dir_by_name(get_ipython_dir(), profile)
+ except ProfileDirError as e:
+ # IOError makes more sense when people are expecting a path
+ raise IOError("Couldn't find profile %r" % profile) from e
+ return pd.location
diff --git a/contrib/python/ipython/py3/IPython/py.typed b/contrib/python/ipython/py3/IPython/py.typed
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/py.typed
diff --git a/contrib/python/ipython/py3/IPython/sphinxext/__init__.py b/contrib/python/ipython/py3/IPython/sphinxext/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/sphinxext/__init__.py
diff --git a/contrib/python/ipython/py3/IPython/sphinxext/custom_doctests.py b/contrib/python/ipython/py3/IPython/sphinxext/custom_doctests.py
new file mode 100644
index 0000000000..75c2a25ccb
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/sphinxext/custom_doctests.py
@@ -0,0 +1,155 @@
+"""
+Handlers for IPythonDirective's @doctest pseudo-decorator.
+
+The Sphinx extension that provides support for embedded IPython code provides
+a pseudo-decorator @doctest, which treats the input/output block as a
+doctest, raising a RuntimeError during doc generation if the actual output
+(after running the input) does not match the expected output.
+
+An example usage is:
+
+.. code-block:: rst
+
+ .. ipython::
+
+ In [1]: x = 1
+
+ @doctest
+ In [2]: x + 2
+ Out[3]: 3
+
+One can also provide arguments to the decorator. The first argument should be
+the name of a custom handler. The specification of any other arguments is
+determined by the handler. For example,
+
+.. code-block:: rst
+
+ .. ipython::
+
+ @doctest float
+ In [154]: 0.1 + 0.2
+ Out[154]: 0.3
+
+allows the actual output ``0.30000000000000004`` to match the expected output
+due to a comparison with `np.allclose`.
+
+This module contains handlers for the @doctest pseudo-decorator. Handlers
+should have the following function signature::
+
+ handler(sphinx_shell, args, input_lines, found, submitted)
+
+where `sphinx_shell` is the embedded Sphinx shell, `args` contains the list
+of arguments that follow: '@doctest handler_name', `input_lines` contains
+a list of the lines relevant to the current doctest, `found` is a string
+containing the output from the IPython shell, and `submitted` is a string
+containing the expected output from the IPython shell.
+
+Handlers must be registered in the `doctests` dict at the end of this module.
+
+"""
+
+def str_to_array(s):
+ """
+ Simplistic converter of strings from repr to float NumPy arrays.
+
+ If the repr representation has ellipsis in it, then this will fail.
+
+ Parameters
+ ----------
+ s : str
+ The repr version of a NumPy array.
+
+ Examples
+ --------
+ >>> s = "array([ 0.3, inf, nan])"
+ >>> a = str_to_array(s)
+
+ """
+ import numpy as np
+
+ # Need to make sure eval() knows about inf and nan.
+ # This also assumes default printoptions for NumPy.
+ from numpy import inf, nan
+
+ if s.startswith(u'array'):
+ # Remove array( and )
+ s = s[6:-1]
+
+ if s.startswith(u'['):
+ a = np.array(eval(s), dtype=float)
+ else:
+ # Assume its a regular float. Force 1D so we can index into it.
+ a = np.atleast_1d(float(s))
+ return a
+
+def float_doctest(sphinx_shell, args, input_lines, found, submitted):
+ """
+ Doctest which allow the submitted output to vary slightly from the input.
+
+ Here is how it might appear in an rst file:
+
+ .. code-block:: rst
+
+ .. ipython::
+
+ @doctest float
+ In [1]: 0.1 + 0.2
+ Out[1]: 0.3
+
+ """
+ import numpy as np
+
+ if len(args) == 2:
+ rtol = 1e-05
+ atol = 1e-08
+ else:
+ # Both must be specified if any are specified.
+ try:
+ rtol = float(args[2])
+ atol = float(args[3])
+ except IndexError as e:
+ e = ("Both `rtol` and `atol` must be specified "
+ "if either are specified: {0}".format(args))
+ raise IndexError(e) from e
+
+ try:
+ submitted = str_to_array(submitted)
+ found = str_to_array(found)
+ except:
+ # For example, if the array is huge and there are ellipsis in it.
+ error = True
+ else:
+ found_isnan = np.isnan(found)
+ submitted_isnan = np.isnan(submitted)
+ error = not np.allclose(found_isnan, submitted_isnan)
+ error |= not np.allclose(found[~found_isnan],
+ submitted[~submitted_isnan],
+ rtol=rtol, atol=atol)
+
+ TAB = ' ' * 4
+ directive = sphinx_shell.directive
+ if directive is None:
+ source = 'Unavailable'
+ content = 'Unavailable'
+ else:
+ source = directive.state.document.current_source
+ # Add tabs and make into a single string.
+ content = '\n'.join([TAB + line for line in directive.content])
+
+ if error:
+
+ e = ('doctest float comparison failure\n\n'
+ 'Document source: {0}\n\n'
+ 'Raw content: \n{1}\n\n'
+ 'On input line(s):\n{TAB}{2}\n\n'
+ 'we found output:\n{TAB}{3}\n\n'
+ 'instead of the expected:\n{TAB}{4}\n\n')
+ e = e.format(source, content, '\n'.join(input_lines), repr(found),
+ repr(submitted), TAB=TAB)
+ raise RuntimeError(e)
+
+# dict of allowable doctest handlers. The key represents the first argument
+# that must be given to @doctest in order to activate the handler.
+doctests = {
+ 'float': float_doctest,
+}
diff --git a/contrib/python/ipython/py3/IPython/sphinxext/ipython_console_highlighting.py b/contrib/python/ipython/py3/IPython/sphinxext/ipython_console_highlighting.py
new file mode 100644
index 0000000000..b93a151fb3
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/sphinxext/ipython_console_highlighting.py
@@ -0,0 +1,28 @@
+"""
+reST directive for syntax-highlighting ipython interactive sessions.
+
+"""
+
+from sphinx import highlighting
+from IPython.lib.lexers import IPyLexer
+
+def setup(app):
+ """Setup as a sphinx extension."""
+
+ # This is only a lexer, so adding it below to pygments appears sufficient.
+ # But if somebody knows what the right API usage should be to do that via
+ # sphinx, by all means fix it here. At least having this setup.py
+ # suppresses the sphinx warning we'd get without it.
+ metadata = {'parallel_read_safe': True, 'parallel_write_safe': True}
+ return metadata
+
+# Register the extension as a valid pygments lexer.
+# Alternatively, we could register the lexer with pygments instead. This would
+# require using setuptools entrypoints: http://pygments.org/docs/plugins
+
+ipy2 = IPyLexer(python3=False)
+ipy3 = IPyLexer(python3=True)
+
+highlighting.lexers['ipython'] = ipy2
+highlighting.lexers['ipython2'] = ipy2
+highlighting.lexers['ipython3'] = ipy3
diff --git a/contrib/python/ipython/py3/IPython/sphinxext/ipython_directive.py b/contrib/python/ipython/py3/IPython/sphinxext/ipython_directive.py
new file mode 100644
index 0000000000..c428e7917f
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/sphinxext/ipython_directive.py
@@ -0,0 +1,1272 @@
+# -*- coding: utf-8 -*-
+"""
+Sphinx directive to support embedded IPython code.
+
+IPython provides an extension for `Sphinx <http://www.sphinx-doc.org/>`_ to
+highlight and run code.
+
+This directive allows pasting of entire interactive IPython sessions, prompts
+and all, and their code will actually get re-executed at doc build time, with
+all prompts renumbered sequentially. It also allows you to input code as a pure
+python input by giving the argument python to the directive. The output looks
+like an interactive ipython section.
+
+Here is an example of how the IPython directive can
+**run** python code, at build time.
+
+.. ipython::
+
+ In [1]: 1+1
+
+ In [1]: import datetime
+ ...: datetime.date.fromisoformat('2022-02-22')
+
+It supports IPython construct that plain
+Python does not understand (like magics):
+
+.. ipython::
+
+ In [0]: import time
+
+ In [0]: %pdoc time.sleep
+
+This will also support top-level async when using IPython 7.0+
+
+.. ipython::
+
+ In [2]: import asyncio
+ ...: print('before')
+ ...: await asyncio.sleep(1)
+ ...: print('after')
+
+
+The namespace will persist across multiple code chucks, Let's define a variable:
+
+.. ipython::
+
+ In [0]: who = "World"
+
+And now say hello:
+
+.. ipython::
+
+ In [0]: print('Hello,', who)
+
+If the current section raises an exception, you can add the ``:okexcept:`` flag
+to the current block, otherwise the build will fail.
+
+.. ipython::
+ :okexcept:
+
+ In [1]: 1/0
+
+IPython Sphinx directive module
+===============================
+
+To enable this directive, simply list it in your Sphinx ``conf.py`` file
+(making sure the directory where you placed it is visible to sphinx, as is
+needed for all Sphinx directives). For example, to enable syntax highlighting
+and the IPython directive::
+
+ extensions = ['IPython.sphinxext.ipython_console_highlighting',
+ 'IPython.sphinxext.ipython_directive']
+
+The IPython directive outputs code-blocks with the language 'ipython'. So
+if you do not have the syntax highlighting extension enabled as well, then
+all rendered code-blocks will be uncolored. By default this directive assumes
+that your prompts are unchanged IPython ones, but this can be customized.
+The configurable options that can be placed in conf.py are:
+
+ipython_savefig_dir:
+ The directory in which to save the figures. This is relative to the
+ Sphinx source directory. The default is `html_static_path`.
+ipython_rgxin:
+ The compiled regular expression to denote the start of IPython input
+ lines. The default is ``re.compile('In \\[(\\d+)\\]:\\s?(.*)\\s*')``. You
+ shouldn't need to change this.
+ipython_warning_is_error: [default to True]
+ Fail the build if something unexpected happen, for example if a block raise
+ an exception but does not have the `:okexcept:` flag. The exact behavior of
+ what is considered strict, may change between the sphinx directive version.
+ipython_rgxout:
+ The compiled regular expression to denote the start of IPython output
+ lines. The default is ``re.compile('Out\\[(\\d+)\\]:\\s?(.*)\\s*')``. You
+ shouldn't need to change this.
+ipython_promptin:
+ The string to represent the IPython input prompt in the generated ReST.
+ The default is ``'In [%d]:'``. This expects that the line numbers are used
+ in the prompt.
+ipython_promptout:
+ The string to represent the IPython prompt in the generated ReST. The
+ default is ``'Out [%d]:'``. This expects that the line numbers are used
+ in the prompt.
+ipython_mplbackend:
+ The string which specifies if the embedded Sphinx shell should import
+ Matplotlib and set the backend. The value specifies a backend that is
+ passed to `matplotlib.use()` before any lines in `ipython_execlines` are
+ executed. If not specified in conf.py, then the default value of 'agg' is
+ used. To use the IPython directive without matplotlib as a dependency, set
+ the value to `None`. It may end up that matplotlib is still imported
+ if the user specifies so in `ipython_execlines` or makes use of the
+ @savefig pseudo decorator.
+ipython_execlines:
+ A list of strings to be exec'd in the embedded Sphinx shell. Typical
+ usage is to make certain packages always available. Set this to an empty
+ list if you wish to have no imports always available. If specified in
+ ``conf.py`` as `None`, then it has the effect of making no imports available.
+ If omitted from conf.py altogether, then the default value of
+ ['import numpy as np', 'import matplotlib.pyplot as plt'] is used.
+ipython_holdcount
+ When the @suppress pseudo-decorator is used, the execution count can be
+ incremented or not. The default behavior is to hold the execution count,
+ corresponding to a value of `True`. Set this to `False` to increment
+ the execution count after each suppressed command.
+
+As an example, to use the IPython directive when `matplotlib` is not available,
+one sets the backend to `None`::
+
+ ipython_mplbackend = None
+
+An example usage of the directive is:
+
+.. code-block:: rst
+
+ .. ipython::
+
+ In [1]: x = 1
+
+ In [2]: y = x**2
+
+ In [3]: print(y)
+
+See http://matplotlib.org/sampledoc/ipython_directive.html for additional
+documentation.
+
+Pseudo-Decorators
+=================
+
+Note: Only one decorator is supported per input. If more than one decorator
+is specified, then only the last one is used.
+
+In addition to the Pseudo-Decorators/options described at the above link,
+several enhancements have been made. The directive will emit a message to the
+console at build-time if code-execution resulted in an exception or warning.
+You can suppress these on a per-block basis by specifying the :okexcept:
+or :okwarning: options:
+
+.. code-block:: rst
+
+ .. ipython::
+ :okexcept:
+ :okwarning:
+
+ In [1]: 1/0
+ In [2]: # raise warning.
+
+To Do
+=====
+
+- Turn the ad-hoc test() function into a real test suite.
+- Break up ipython-specific functionality from matplotlib stuff into better
+ separated code.
+
+"""
+
+# Authors
+# =======
+#
+# - John D Hunter: original author.
+# - Fernando Perez: refactoring, documentation, cleanups, port to 0.11.
+# - VáclavŠmilauer <eudoxos-AT-arcig.cz>: Prompt generalizations.
+# - Skipper Seabold, refactoring, cleanups, pure python addition
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+# Stdlib
+import atexit
+import errno
+import os
+import pathlib
+import re
+import sys
+import tempfile
+import ast
+import warnings
+import shutil
+from io import StringIO
+
+# Third-party
+from docutils.parsers.rst import directives
+from docutils.parsers.rst import Directive
+from sphinx.util import logging
+
+# Our own
+from traitlets.config import Config
+from IPython import InteractiveShell
+from IPython.core.profiledir import ProfileDir
+
+use_matplotlib = False
+try:
+ import matplotlib
+ use_matplotlib = True
+except Exception:
+ pass
+
+#-----------------------------------------------------------------------------
+# Globals
+#-----------------------------------------------------------------------------
+# for tokenizing blocks
+COMMENT, INPUT, OUTPUT = range(3)
+
+PSEUDO_DECORATORS = ["suppress", "verbatim", "savefig", "doctest"]
+
+#-----------------------------------------------------------------------------
+# Functions and class declarations
+#-----------------------------------------------------------------------------
+
+def block_parser(part, rgxin, rgxout, fmtin, fmtout):
+ """
+ part is a string of ipython text, comprised of at most one
+ input, one output, comments, and blank lines. The block parser
+ parses the text into a list of::
+
+ blocks = [ (TOKEN0, data0), (TOKEN1, data1), ...]
+
+ where TOKEN is one of [COMMENT | INPUT | OUTPUT ] and
+ data is, depending on the type of token::
+
+ COMMENT : the comment string
+
+ INPUT: the (DECORATOR, INPUT_LINE, REST) where
+ DECORATOR: the input decorator (or None)
+ INPUT_LINE: the input as string (possibly multi-line)
+ REST : any stdout generated by the input line (not OUTPUT)
+
+ OUTPUT: the output string, possibly multi-line
+
+ """
+ block = []
+ lines = part.split('\n')
+ N = len(lines)
+ i = 0
+ decorator = None
+ while 1:
+
+ if i==N:
+ # nothing left to parse -- the last line
+ break
+
+ line = lines[i]
+ i += 1
+ line_stripped = line.strip()
+ if line_stripped.startswith('#'):
+ block.append((COMMENT, line))
+ continue
+
+ if any(
+ line_stripped.startswith("@" + pseudo_decorator)
+ for pseudo_decorator in PSEUDO_DECORATORS
+ ):
+ if decorator:
+ raise RuntimeError(
+ "Applying multiple pseudo-decorators on one line is not supported"
+ )
+ else:
+ decorator = line_stripped
+ continue
+
+ # does this look like an input line?
+ matchin = rgxin.match(line)
+ if matchin:
+ lineno, inputline = int(matchin.group(1)), matchin.group(2)
+
+ # the ....: continuation string
+ continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2))
+ Nc = len(continuation)
+ # input lines can continue on for more than one line, if
+ # we have a '\' line continuation char or a function call
+ # echo line 'print'. The input line can only be
+ # terminated by the end of the block or an output line, so
+ # we parse out the rest of the input line if it is
+ # multiline as well as any echo text
+
+ rest = []
+ while i<N:
+
+ # look ahead; if the next line is blank, or a comment, or
+ # an output line, we're done
+
+ nextline = lines[i]
+ matchout = rgxout.match(nextline)
+ #print "nextline=%s, continuation=%s, starts=%s"%(nextline, continuation, nextline.startswith(continuation))
+ if matchout or nextline.startswith('#'):
+ break
+ elif nextline.startswith(continuation):
+ # The default ipython_rgx* treat the space following the colon as optional.
+ # However, If the space is there we must consume it or code
+ # employing the cython_magic extension will fail to execute.
+ #
+ # This works with the default ipython_rgx* patterns,
+ # If you modify them, YMMV.
+ nextline = nextline[Nc:]
+ if nextline and nextline[0] == ' ':
+ nextline = nextline[1:]
+
+ inputline += '\n' + nextline
+ else:
+ rest.append(nextline)
+ i+= 1
+
+ block.append((INPUT, (decorator, inputline, '\n'.join(rest))))
+ continue
+
+ # if it looks like an output line grab all the text to the end
+ # of the block
+ matchout = rgxout.match(line)
+ if matchout:
+ lineno, output = int(matchout.group(1)), matchout.group(2)
+ if i<N-1:
+ output = '\n'.join([output] + lines[i:])
+
+ block.append((OUTPUT, output))
+ break
+
+ return block
+
+
+class EmbeddedSphinxShell(object):
+ """An embedded IPython instance to run inside Sphinx"""
+
+ def __init__(self, exec_lines=None):
+
+ self.cout = StringIO()
+
+ if exec_lines is None:
+ exec_lines = []
+
+ # Create config object for IPython
+ config = Config()
+ config.HistoryManager.hist_file = ':memory:'
+ config.InteractiveShell.autocall = False
+ config.InteractiveShell.autoindent = False
+ config.InteractiveShell.colors = 'NoColor'
+
+ # create a profile so instance history isn't saved
+ tmp_profile_dir = tempfile.mkdtemp(prefix='profile_')
+ profname = 'auto_profile_sphinx_build'
+ pdir = os.path.join(tmp_profile_dir,profname)
+ profile = ProfileDir.create_profile_dir(pdir)
+
+ # Create and initialize global ipython, but don't start its mainloop.
+ # This will persist across different EmbeddedSphinxShell instances.
+ IP = InteractiveShell.instance(config=config, profile_dir=profile)
+ atexit.register(self.cleanup)
+
+ # Store a few parts of IPython we'll need.
+ self.IP = IP
+ self.user_ns = self.IP.user_ns
+ self.user_global_ns = self.IP.user_global_ns
+
+ self.input = ''
+ self.output = ''
+ self.tmp_profile_dir = tmp_profile_dir
+
+ self.is_verbatim = False
+ self.is_doctest = False
+ self.is_suppress = False
+
+ # Optionally, provide more detailed information to shell.
+ # this is assigned by the SetUp method of IPythonDirective
+ # to point at itself.
+ #
+ # So, you can access handy things at self.directive.state
+ self.directive = None
+
+ # on the first call to the savefig decorator, we'll import
+ # pyplot as plt so we can make a call to the plt.gcf().savefig
+ self._pyplot_imported = False
+
+ # Prepopulate the namespace.
+ for line in exec_lines:
+ self.process_input_line(line, store_history=False)
+
+ def cleanup(self):
+ shutil.rmtree(self.tmp_profile_dir, ignore_errors=True)
+
+ def clear_cout(self):
+ self.cout.seek(0)
+ self.cout.truncate(0)
+
+ def process_input_line(self, line, store_history):
+ return self.process_input_lines([line], store_history=store_history)
+
+ def process_input_lines(self, lines, store_history=True):
+ """process the input, capturing stdout"""
+ stdout = sys.stdout
+ source_raw = '\n'.join(lines)
+ try:
+ sys.stdout = self.cout
+ self.IP.run_cell(source_raw, store_history=store_history)
+ finally:
+ sys.stdout = stdout
+
+ def process_image(self, decorator):
+ """
+ # build out an image directive like
+ # .. image:: somefile.png
+ # :width 4in
+ #
+ # from an input like
+ # savefig somefile.png width=4in
+ """
+ savefig_dir = self.savefig_dir
+ source_dir = self.source_dir
+ saveargs = decorator.split(' ')
+ filename = saveargs[1]
+ # insert relative path to image file in source
+ # as absolute path for Sphinx
+ # sphinx expects a posix path, even on Windows
+ path = pathlib.Path(savefig_dir, filename)
+ outfile = '/' + path.relative_to(source_dir).as_posix()
+
+ imagerows = ['.. image:: %s' % outfile]
+
+ for kwarg in saveargs[2:]:
+ arg, val = kwarg.split('=')
+ arg = arg.strip()
+ val = val.strip()
+ imagerows.append(' :%s: %s'%(arg, val))
+
+ image_file = os.path.basename(outfile) # only return file name
+ image_directive = '\n'.join(imagerows)
+ return image_file, image_directive
+
+ # Callbacks for each type of token
+ def process_input(self, data, input_prompt, lineno):
+ """
+ Process data block for INPUT token.
+
+ """
+ decorator, input, rest = data
+ image_file = None
+ image_directive = None
+
+ is_verbatim = decorator=='@verbatim' or self.is_verbatim
+ is_doctest = (decorator is not None and \
+ decorator.startswith('@doctest')) or self.is_doctest
+ is_suppress = decorator=='@suppress' or self.is_suppress
+ is_okexcept = decorator=='@okexcept' or self.is_okexcept
+ is_okwarning = decorator=='@okwarning' or self.is_okwarning
+ is_savefig = decorator is not None and \
+ decorator.startswith('@savefig')
+
+ input_lines = input.split('\n')
+ if len(input_lines) > 1:
+ if input_lines[-1] != "":
+ input_lines.append('') # make sure there's a blank line
+ # so splitter buffer gets reset
+
+ continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2))
+
+ if is_savefig:
+ image_file, image_directive = self.process_image(decorator)
+
+ ret = []
+ is_semicolon = False
+
+ # Hold the execution count, if requested to do so.
+ if is_suppress and self.hold_count:
+ store_history = False
+ else:
+ store_history = True
+
+ # Note: catch_warnings is not thread safe
+ with warnings.catch_warnings(record=True) as ws:
+ if input_lines[0].endswith(';'):
+ is_semicolon = True
+ #for i, line in enumerate(input_lines):
+
+ # process the first input line
+ if is_verbatim:
+ self.process_input_lines([''])
+ self.IP.execution_count += 1 # increment it anyway
+ else:
+ # only submit the line in non-verbatim mode
+ self.process_input_lines(input_lines, store_history=store_history)
+
+ if not is_suppress:
+ for i, line in enumerate(input_lines):
+ if i == 0:
+ formatted_line = '%s %s'%(input_prompt, line)
+ else:
+ formatted_line = '%s %s'%(continuation, line)
+ ret.append(formatted_line)
+
+ if not is_suppress and len(rest.strip()) and is_verbatim:
+ # The "rest" is the standard output of the input. This needs to be
+ # added when in verbatim mode. If there is no "rest", then we don't
+ # add it, as the new line will be added by the processed output.
+ ret.append(rest)
+
+ # Fetch the processed output. (This is not the submitted output.)
+ self.cout.seek(0)
+ processed_output = self.cout.read()
+ if not is_suppress and not is_semicolon:
+ #
+ # In IPythonDirective.run, the elements of `ret` are eventually
+ # combined such that '' entries correspond to newlines. So if
+ # `processed_output` is equal to '', then the adding it to `ret`
+ # ensures that there is a blank line between consecutive inputs
+ # that have no outputs, as in:
+ #
+ # In [1]: x = 4
+ #
+ # In [2]: x = 5
+ #
+ # When there is processed output, it has a '\n' at the tail end. So
+ # adding the output to `ret` will provide the necessary spacing
+ # between consecutive input/output blocks, as in:
+ #
+ # In [1]: x
+ # Out[1]: 5
+ #
+ # In [2]: x
+ # Out[2]: 5
+ #
+ # When there is stdout from the input, it also has a '\n' at the
+ # tail end, and so this ensures proper spacing as well. E.g.:
+ #
+ # In [1]: print x
+ # 5
+ #
+ # In [2]: x = 5
+ #
+ # When in verbatim mode, `processed_output` is empty (because
+ # nothing was passed to IP. Sometimes the submitted code block has
+ # an Out[] portion and sometimes it does not. When it does not, we
+ # need to ensure proper spacing, so we have to add '' to `ret`.
+ # However, if there is an Out[] in the submitted code, then we do
+ # not want to add a newline as `process_output` has stuff to add.
+ # The difficulty is that `process_input` doesn't know if
+ # `process_output` will be called---so it doesn't know if there is
+ # Out[] in the code block. The requires that we include a hack in
+ # `process_block`. See the comments there.
+ #
+ ret.append(processed_output)
+ elif is_semicolon:
+ # Make sure there is a newline after the semicolon.
+ ret.append('')
+
+ # context information
+ filename = "Unknown"
+ lineno = 0
+ if self.directive.state:
+ filename = self.directive.state.document.current_source
+ lineno = self.directive.state.document.current_line
+
+ # Use sphinx logger for warnings
+ logger = logging.getLogger(__name__)
+
+ # output any exceptions raised during execution to stdout
+ # unless :okexcept: has been specified.
+ if not is_okexcept and (
+ ("Traceback" in processed_output) or ("SyntaxError" in processed_output)
+ ):
+ s = "\n>>>" + ("-" * 73) + "\n"
+ s += "Exception in %s at block ending on line %s\n" % (filename, lineno)
+ s += "Specify :okexcept: as an option in the ipython:: block to suppress this message\n"
+ s += processed_output + "\n"
+ s += "<<<" + ("-" * 73)
+ logger.warning(s)
+ if self.warning_is_error:
+ raise RuntimeError('Non Expected exception in `{}` line {}'.format(filename, lineno))
+
+ # output any warning raised during execution to stdout
+ # unless :okwarning: has been specified.
+ if not is_okwarning:
+ for w in ws:
+ s = "\n>>>" + ("-" * 73) + "\n"
+ s += "Warning in %s at block ending on line %s\n" % (filename, lineno)
+ s += "Specify :okwarning: as an option in the ipython:: block to suppress this message\n"
+ s += ("-" * 76) + "\n"
+ s += warnings.formatwarning(
+ w.message, w.category, w.filename, w.lineno, w.line
+ )
+ s += "<<<" + ("-" * 73)
+ logger.warning(s)
+ if self.warning_is_error:
+ raise RuntimeError('Non Expected warning in `{}` line {}'.format(filename, lineno))
+
+ self.clear_cout()
+ return (ret, input_lines, processed_output,
+ is_doctest, decorator, image_file, image_directive)
+
+
+ def process_output(self, data, output_prompt, input_lines, output,
+ is_doctest, decorator, image_file):
+ """
+ Process data block for OUTPUT token.
+
+ """
+ # Recall: `data` is the submitted output, and `output` is the processed
+ # output from `input_lines`.
+
+ TAB = ' ' * 4
+
+ if is_doctest and output is not None:
+
+ found = output # This is the processed output
+ found = found.strip()
+ submitted = data.strip()
+
+ if self.directive is None:
+ source = 'Unavailable'
+ content = 'Unavailable'
+ else:
+ source = self.directive.state.document.current_source
+ content = self.directive.content
+ # Add tabs and join into a single string.
+ content = '\n'.join([TAB + line for line in content])
+
+ # Make sure the output contains the output prompt.
+ ind = found.find(output_prompt)
+ if ind < 0:
+ e = ('output does not contain output prompt\n\n'
+ 'Document source: {0}\n\n'
+ 'Raw content: \n{1}\n\n'
+ 'Input line(s):\n{TAB}{2}\n\n'
+ 'Output line(s):\n{TAB}{3}\n\n')
+ e = e.format(source, content, '\n'.join(input_lines),
+ repr(found), TAB=TAB)
+ raise RuntimeError(e)
+ found = found[len(output_prompt):].strip()
+
+ # Handle the actual doctest comparison.
+ if decorator.strip() == '@doctest':
+ # Standard doctest
+ if found != submitted:
+ e = ('doctest failure\n\n'
+ 'Document source: {0}\n\n'
+ 'Raw content: \n{1}\n\n'
+ 'On input line(s):\n{TAB}{2}\n\n'
+ 'we found output:\n{TAB}{3}\n\n'
+ 'instead of the expected:\n{TAB}{4}\n\n')
+ e = e.format(source, content, '\n'.join(input_lines),
+ repr(found), repr(submitted), TAB=TAB)
+ raise RuntimeError(e)
+ else:
+ self.custom_doctest(decorator, input_lines, found, submitted)
+
+ # When in verbatim mode, this holds additional submitted output
+ # to be written in the final Sphinx output.
+ # https://github.com/ipython/ipython/issues/5776
+ out_data = []
+
+ is_verbatim = decorator=='@verbatim' or self.is_verbatim
+ if is_verbatim and data.strip():
+ # Note that `ret` in `process_block` has '' as its last element if
+ # the code block was in verbatim mode. So if there is no submitted
+ # output, then we will have proper spacing only if we do not add
+ # an additional '' to `out_data`. This is why we condition on
+ # `and data.strip()`.
+
+ # The submitted output has no output prompt. If we want the
+ # prompt and the code to appear, we need to join them now
+ # instead of adding them separately---as this would create an
+ # undesired newline. How we do this ultimately depends on the
+ # format of the output regex. I'll do what works for the default
+ # prompt for now, and we might have to adjust if it doesn't work
+ # in other cases. Finally, the submitted output does not have
+ # a trailing newline, so we must add it manually.
+ out_data.append("{0} {1}\n".format(output_prompt, data))
+
+ return out_data
+
+ def process_comment(self, data):
+ """Process data fPblock for COMMENT token."""
+ if not self.is_suppress:
+ return [data]
+
+ def save_image(self, image_file):
+ """
+ Saves the image file to disk.
+ """
+ self.ensure_pyplot()
+ command = 'plt.gcf().savefig("%s")'%image_file
+ #print 'SAVEFIG', command # dbg
+ self.process_input_line('bookmark ipy_thisdir', store_history=False)
+ self.process_input_line('cd -b ipy_savedir', store_history=False)
+ self.process_input_line(command, store_history=False)
+ self.process_input_line('cd -b ipy_thisdir', store_history=False)
+ self.process_input_line('bookmark -d ipy_thisdir', store_history=False)
+ self.clear_cout()
+
+ def process_block(self, block):
+ """
+ process block from the block_parser and return a list of processed lines
+ """
+ ret = []
+ output = None
+ input_lines = None
+ lineno = self.IP.execution_count
+
+ input_prompt = self.promptin % lineno
+ output_prompt = self.promptout % lineno
+ image_file = None
+ image_directive = None
+
+ found_input = False
+ for token, data in block:
+ if token == COMMENT:
+ out_data = self.process_comment(data)
+ elif token == INPUT:
+ found_input = True
+ (out_data, input_lines, output, is_doctest,
+ decorator, image_file, image_directive) = \
+ self.process_input(data, input_prompt, lineno)
+ elif token == OUTPUT:
+ if not found_input:
+
+ TAB = ' ' * 4
+ linenumber = 0
+ source = 'Unavailable'
+ content = 'Unavailable'
+ if self.directive:
+ linenumber = self.directive.state.document.current_line
+ source = self.directive.state.document.current_source
+ content = self.directive.content
+ # Add tabs and join into a single string.
+ content = '\n'.join([TAB + line for line in content])
+
+ e = ('\n\nInvalid block: Block contains an output prompt '
+ 'without an input prompt.\n\n'
+ 'Document source: {0}\n\n'
+ 'Content begins at line {1}: \n\n{2}\n\n'
+ 'Problematic block within content: \n\n{TAB}{3}\n\n')
+ e = e.format(source, linenumber, content, block, TAB=TAB)
+
+ # Write, rather than include in exception, since Sphinx
+ # will truncate tracebacks.
+ sys.stdout.write(e)
+ raise RuntimeError('An invalid block was detected.')
+ out_data = \
+ self.process_output(data, output_prompt, input_lines,
+ output, is_doctest, decorator,
+ image_file)
+ if out_data:
+ # Then there was user submitted output in verbatim mode.
+ # We need to remove the last element of `ret` that was
+ # added in `process_input`, as it is '' and would introduce
+ # an undesirable newline.
+ assert(ret[-1] == '')
+ del ret[-1]
+
+ if out_data:
+ ret.extend(out_data)
+
+ # save the image files
+ if image_file is not None:
+ self.save_image(image_file)
+
+ return ret, image_directive
+
+ def ensure_pyplot(self):
+ """
+ Ensures that pyplot has been imported into the embedded IPython shell.
+
+ Also, makes sure to set the backend appropriately if not set already.
+
+ """
+ # We are here if the @figure pseudo decorator was used. Thus, it's
+ # possible that we could be here even if python_mplbackend were set to
+ # `None`. That's also strange and perhaps worthy of raising an
+ # exception, but for now, we just set the backend to 'agg'.
+
+ if not self._pyplot_imported:
+ if 'matplotlib.backends' not in sys.modules:
+ # Then ipython_matplotlib was set to None but there was a
+ # call to the @figure decorator (and ipython_execlines did
+ # not set a backend).
+ #raise Exception("No backend was set, but @figure was used!")
+ import matplotlib
+ matplotlib.use('agg')
+
+ # Always import pyplot into embedded shell.
+ self.process_input_line('import matplotlib.pyplot as plt',
+ store_history=False)
+ self._pyplot_imported = True
+
+ def process_pure_python(self, content):
+ """
+ content is a list of strings. it is unedited directive content
+
+ This runs it line by line in the InteractiveShell, prepends
+ prompts as needed capturing stderr and stdout, then returns
+ the content as a list as if it were ipython code
+ """
+ output = []
+ savefig = False # keep up with this to clear figure
+ multiline = False # to handle line continuation
+ multiline_start = None
+ fmtin = self.promptin
+
+ ct = 0
+
+ for lineno, line in enumerate(content):
+
+ line_stripped = line.strip()
+ if not len(line):
+ output.append(line)
+ continue
+
+ # handle pseudo-decorators, whilst ensuring real python decorators are treated as input
+ if any(
+ line_stripped.startswith("@" + pseudo_decorator)
+ for pseudo_decorator in PSEUDO_DECORATORS
+ ):
+ output.extend([line])
+ if 'savefig' in line:
+ savefig = True # and need to clear figure
+ continue
+
+ # handle comments
+ if line_stripped.startswith('#'):
+ output.extend([line])
+ continue
+
+ # deal with lines checking for multiline
+ continuation = u' %s:'% ''.join(['.']*(len(str(ct))+2))
+ if not multiline:
+ modified = u"%s %s" % (fmtin % ct, line_stripped)
+ output.append(modified)
+ ct += 1
+ try:
+ ast.parse(line_stripped)
+ output.append(u'')
+ except Exception: # on a multiline
+ multiline = True
+ multiline_start = lineno
+ else: # still on a multiline
+ modified = u'%s %s' % (continuation, line)
+ output.append(modified)
+
+ # if the next line is indented, it should be part of multiline
+ if len(content) > lineno + 1:
+ nextline = content[lineno + 1]
+ if len(nextline) - len(nextline.lstrip()) > 3:
+ continue
+ try:
+ mod = ast.parse(
+ '\n'.join(content[multiline_start:lineno+1]))
+ if isinstance(mod.body[0], ast.FunctionDef):
+ # check to see if we have the whole function
+ for element in mod.body[0].body:
+ if isinstance(element, ast.Return):
+ multiline = False
+ else:
+ output.append(u'')
+ multiline = False
+ except Exception:
+ pass
+
+ if savefig: # clear figure if plotted
+ self.ensure_pyplot()
+ self.process_input_line('plt.clf()', store_history=False)
+ self.clear_cout()
+ savefig = False
+
+ return output
+
+ def custom_doctest(self, decorator, input_lines, found, submitted):
+ """
+ Perform a specialized doctest.
+
+ """
+ from .custom_doctests import doctests
+
+ args = decorator.split()
+ doctest_type = args[1]
+ if doctest_type in doctests:
+ doctests[doctest_type](self, args, input_lines, found, submitted)
+ else:
+ e = "Invalid option to @doctest: {0}".format(doctest_type)
+ raise Exception(e)
+
+
+class IPythonDirective(Directive):
+
+ has_content = True
+ required_arguments = 0
+ optional_arguments = 4 # python, suppress, verbatim, doctest
+ final_argumuent_whitespace = True
+ option_spec = { 'python': directives.unchanged,
+ 'suppress' : directives.flag,
+ 'verbatim' : directives.flag,
+ 'doctest' : directives.flag,
+ 'okexcept': directives.flag,
+ 'okwarning': directives.flag
+ }
+
+ shell = None
+
+ seen_docs = set()
+
+ def get_config_options(self):
+ # contains sphinx configuration variables
+ config = self.state.document.settings.env.config
+
+ # get config variables to set figure output directory
+ savefig_dir = config.ipython_savefig_dir
+ source_dir = self.state.document.settings.env.srcdir
+ savefig_dir = os.path.join(source_dir, savefig_dir)
+
+ # get regex and prompt stuff
+ rgxin = config.ipython_rgxin
+ rgxout = config.ipython_rgxout
+ warning_is_error= config.ipython_warning_is_error
+ promptin = config.ipython_promptin
+ promptout = config.ipython_promptout
+ mplbackend = config.ipython_mplbackend
+ exec_lines = config.ipython_execlines
+ hold_count = config.ipython_holdcount
+
+ return (savefig_dir, source_dir, rgxin, rgxout,
+ promptin, promptout, mplbackend, exec_lines, hold_count, warning_is_error)
+
+ def setup(self):
+ # Get configuration values.
+ (savefig_dir, source_dir, rgxin, rgxout, promptin, promptout,
+ mplbackend, exec_lines, hold_count, warning_is_error) = self.get_config_options()
+
+ try:
+ os.makedirs(savefig_dir)
+ except OSError as e:
+ if e.errno != errno.EEXIST:
+ raise
+
+ if self.shell is None:
+ # We will be here many times. However, when the
+ # EmbeddedSphinxShell is created, its interactive shell member
+ # is the same for each instance.
+
+ if mplbackend and 'matplotlib.backends' not in sys.modules and use_matplotlib:
+ import matplotlib
+ matplotlib.use(mplbackend)
+
+ # Must be called after (potentially) importing matplotlib and
+ # setting its backend since exec_lines might import pylab.
+ self.shell = EmbeddedSphinxShell(exec_lines)
+
+ # Store IPython directive to enable better error messages
+ self.shell.directive = self
+
+ # reset the execution count if we haven't processed this doc
+ #NOTE: this may be borked if there are multiple seen_doc tmp files
+ #check time stamp?
+ if not self.state.document.current_source in self.seen_docs:
+ self.shell.IP.history_manager.reset()
+ self.shell.IP.execution_count = 1
+ self.seen_docs.add(self.state.document.current_source)
+
+ # and attach to shell so we don't have to pass them around
+ self.shell.rgxin = rgxin
+ self.shell.rgxout = rgxout
+ self.shell.promptin = promptin
+ self.shell.promptout = promptout
+ self.shell.savefig_dir = savefig_dir
+ self.shell.source_dir = source_dir
+ self.shell.hold_count = hold_count
+ self.shell.warning_is_error = warning_is_error
+
+ # setup bookmark for saving figures directory
+ self.shell.process_input_line(
+ 'bookmark ipy_savedir "%s"' % savefig_dir, store_history=False
+ )
+ self.shell.clear_cout()
+
+ return rgxin, rgxout, promptin, promptout
+
+ def teardown(self):
+ # delete last bookmark
+ self.shell.process_input_line('bookmark -d ipy_savedir',
+ store_history=False)
+ self.shell.clear_cout()
+
+ def run(self):
+ debug = False
+
+ #TODO, any reason block_parser can't be a method of embeddable shell
+ # then we wouldn't have to carry these around
+ rgxin, rgxout, promptin, promptout = self.setup()
+
+ options = self.options
+ self.shell.is_suppress = 'suppress' in options
+ self.shell.is_doctest = 'doctest' in options
+ self.shell.is_verbatim = 'verbatim' in options
+ self.shell.is_okexcept = 'okexcept' in options
+ self.shell.is_okwarning = 'okwarning' in options
+
+ # handle pure python code
+ if 'python' in self.arguments:
+ content = self.content
+ self.content = self.shell.process_pure_python(content)
+
+ # parts consists of all text within the ipython-block.
+ # Each part is an input/output block.
+ parts = '\n'.join(self.content).split('\n\n')
+
+ lines = ['.. code-block:: ipython', '']
+ figures = []
+
+ # Use sphinx logger for warnings
+ logger = logging.getLogger(__name__)
+
+ for part in parts:
+ block = block_parser(part, rgxin, rgxout, promptin, promptout)
+ if len(block):
+ rows, figure = self.shell.process_block(block)
+ for row in rows:
+ lines.extend([' {0}'.format(line)
+ for line in row.split('\n')])
+
+ if figure is not None:
+ figures.append(figure)
+ else:
+ message = 'Code input with no code at {}, line {}'\
+ .format(
+ self.state.document.current_source,
+ self.state.document.current_line)
+ if self.shell.warning_is_error:
+ raise RuntimeError(message)
+ else:
+ logger.warning(message)
+
+ for figure in figures:
+ lines.append('')
+ lines.extend(figure.split('\n'))
+ lines.append('')
+
+ if len(lines) > 2:
+ if debug:
+ print('\n'.join(lines))
+ else:
+ # This has to do with input, not output. But if we comment
+ # these lines out, then no IPython code will appear in the
+ # final output.
+ self.state_machine.insert_input(
+ lines, self.state_machine.input_lines.source(0))
+
+ # cleanup
+ self.teardown()
+
+ return []
+
+# Enable as a proper Sphinx directive
+def setup(app):
+ setup.app = app
+
+ app.add_directive('ipython', IPythonDirective)
+ app.add_config_value('ipython_savefig_dir', 'savefig', 'env')
+ app.add_config_value('ipython_warning_is_error', True, 'env')
+ app.add_config_value('ipython_rgxin',
+ re.compile(r'In \[(\d+)\]:\s?(.*)\s*'), 'env')
+ app.add_config_value('ipython_rgxout',
+ re.compile(r'Out\[(\d+)\]:\s?(.*)\s*'), 'env')
+ app.add_config_value('ipython_promptin', 'In [%d]:', 'env')
+ app.add_config_value('ipython_promptout', 'Out[%d]:', 'env')
+
+ # We could just let matplotlib pick whatever is specified as the default
+ # backend in the matplotlibrc file, but this would cause issues if the
+ # backend didn't work in headless environments. For this reason, 'agg'
+ # is a good default backend choice.
+ app.add_config_value('ipython_mplbackend', 'agg', 'env')
+
+ # If the user sets this config value to `None`, then EmbeddedSphinxShell's
+ # __init__ method will treat it as [].
+ execlines = ['import numpy as np']
+ if use_matplotlib:
+ execlines.append('import matplotlib.pyplot as plt')
+ app.add_config_value('ipython_execlines', execlines, 'env')
+
+ app.add_config_value('ipython_holdcount', True, 'env')
+
+ metadata = {'parallel_read_safe': True, 'parallel_write_safe': True}
+ return metadata
+
+# Simple smoke test, needs to be converted to a proper automatic test.
+def test():
+
+ examples = [
+ r"""
+In [9]: pwd
+Out[9]: '/home/jdhunter/py4science/book'
+
+In [10]: cd bookdata/
+/home/jdhunter/py4science/book/bookdata
+
+In [2]: from pylab import *
+
+In [2]: ion()
+
+In [3]: im = imread('stinkbug.png')
+
+@savefig mystinkbug.png width=4in
+In [4]: imshow(im)
+Out[4]: <matplotlib.image.AxesImage object at 0x39ea850>
+
+""",
+ r"""
+
+In [1]: x = 'hello world'
+
+# string methods can be
+# used to alter the string
+@doctest
+In [2]: x.upper()
+Out[2]: 'HELLO WORLD'
+
+@verbatim
+In [3]: x.st<TAB>
+x.startswith x.strip
+""",
+ r"""
+
+In [130]: url = 'http://ichart.finance.yahoo.com/table.csv?s=CROX\
+ .....: &d=9&e=22&f=2009&g=d&a=1&br=8&c=2006&ignore=.csv'
+
+In [131]: print url.split('&')
+['http://ichart.finance.yahoo.com/table.csv?s=CROX', 'd=9', 'e=22', 'f=2009', 'g=d', 'a=1', 'b=8', 'c=2006', 'ignore=.csv']
+
+In [60]: import urllib
+
+""",
+ r"""\
+
+In [133]: import numpy.random
+
+@suppress
+In [134]: numpy.random.seed(2358)
+
+@doctest
+In [135]: numpy.random.rand(10,2)
+Out[135]:
+array([[ 0.64524308, 0.59943846],
+ [ 0.47102322, 0.8715456 ],
+ [ 0.29370834, 0.74776844],
+ [ 0.99539577, 0.1313423 ],
+ [ 0.16250302, 0.21103583],
+ [ 0.81626524, 0.1312433 ],
+ [ 0.67338089, 0.72302393],
+ [ 0.7566368 , 0.07033696],
+ [ 0.22591016, 0.77731835],
+ [ 0.0072729 , 0.34273127]])
+
+""",
+
+ r"""
+In [106]: print x
+jdh
+
+In [109]: for i in range(10):
+ .....: print i
+ .....:
+ .....:
+0
+1
+2
+3
+4
+5
+6
+7
+8
+9
+""",
+
+ r"""
+
+In [144]: from pylab import *
+
+In [145]: ion()
+
+# use a semicolon to suppress the output
+@savefig test_hist.png width=4in
+In [151]: hist(np.random.randn(10000), 100);
+
+
+@savefig test_plot.png width=4in
+In [151]: plot(np.random.randn(10000), 'o');
+ """,
+
+ r"""
+# use a semicolon to suppress the output
+In [151]: plt.clf()
+
+@savefig plot_simple.png width=4in
+In [151]: plot([1,2,3])
+
+@savefig hist_simple.png width=4in
+In [151]: hist(np.random.randn(10000), 100);
+
+""",
+ r"""
+# update the current fig
+In [151]: ylabel('number')
+
+In [152]: title('normal distribution')
+
+
+@savefig hist_with_text.png
+In [153]: grid(True)
+
+@doctest float
+In [154]: 0.1 + 0.2
+Out[154]: 0.3
+
+@doctest float
+In [155]: np.arange(16).reshape(4,4)
+Out[155]:
+array([[ 0, 1, 2, 3],
+ [ 4, 5, 6, 7],
+ [ 8, 9, 10, 11],
+ [12, 13, 14, 15]])
+
+In [1]: x = np.arange(16, dtype=float).reshape(4,4)
+
+In [2]: x[0,0] = np.inf
+
+In [3]: x[0,1] = np.nan
+
+@doctest float
+In [4]: x
+Out[4]:
+array([[ inf, nan, 2., 3.],
+ [ 4., 5., 6., 7.],
+ [ 8., 9., 10., 11.],
+ [ 12., 13., 14., 15.]])
+
+
+ """,
+ ]
+ # skip local-file depending first example:
+ examples = examples[1:]
+
+ #ipython_directive.DEBUG = True # dbg
+ #options = dict(suppress=True) # dbg
+ options = {}
+ for example in examples:
+ content = example.split('\n')
+ IPythonDirective('debug', arguments=None, options=options,
+ content=content, lineno=0,
+ content_offset=None, block_text=None,
+ state=None, state_machine=None,
+ )
+
+# Run test suite as a script
+if __name__=='__main__':
+ if not os.path.isdir('_static'):
+ os.mkdir('_static')
+ test()
+ print('All OK? Check figures in _static/')
diff --git a/contrib/python/ipython/py3/IPython/terminal/__init__.py b/contrib/python/ipython/py3/IPython/terminal/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/terminal/__init__.py
diff --git a/contrib/python/ipython/py3/IPython/terminal/console.py b/contrib/python/ipython/py3/IPython/terminal/console.py
new file mode 100644
index 0000000000..65571a7572
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/terminal/console.py
@@ -0,0 +1,19 @@
+"""
+Shim to maintain backwards compatibility with old IPython.terminal.console imports.
+"""
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+import sys
+from warnings import warn
+
+from IPython.utils.shimmodule import ShimModule, ShimWarning
+
+warn("The `IPython.terminal.console` package has been deprecated since IPython 4.0. "
+ "You should import from jupyter_console instead.", ShimWarning)
+
+# Unconditionally insert the shim into sys.modules so that further import calls
+# trigger the custom attribute access above
+
+sys.modules['IPython.terminal.console'] = ShimModule(
+ src='IPython.terminal.console', mirror='jupyter_console')
diff --git a/contrib/python/ipython/py3/IPython/terminal/debugger.py b/contrib/python/ipython/py3/IPython/terminal/debugger.py
new file mode 100644
index 0000000000..7a0623c847
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/terminal/debugger.py
@@ -0,0 +1,177 @@
+import asyncio
+import os
+import sys
+
+from IPython.core.debugger import Pdb
+from IPython.core.completer import IPCompleter
+from .ptutils import IPythonPTCompleter
+from .shortcuts import create_ipython_shortcuts
+from . import embed
+
+from pathlib import Path
+from pygments.token import Token
+from prompt_toolkit.shortcuts.prompt import PromptSession
+from prompt_toolkit.enums import EditingMode
+from prompt_toolkit.formatted_text import PygmentsTokens
+from prompt_toolkit.history import InMemoryHistory, FileHistory
+from concurrent.futures import ThreadPoolExecutor
+
+from prompt_toolkit import __version__ as ptk_version
+PTK3 = ptk_version.startswith('3.')
+
+
+# we want to avoid ptk as much as possible when using subprocesses
+# as it uses cursor positioning requests, deletes color ....
+_use_simple_prompt = "IPY_TEST_SIMPLE_PROMPT" in os.environ
+
+
+class TerminalPdb(Pdb):
+ """Standalone IPython debugger."""
+
+ def __init__(self, *args, pt_session_options=None, **kwargs):
+ Pdb.__init__(self, *args, **kwargs)
+ self._ptcomp = None
+ self.pt_init(pt_session_options)
+ self.thread_executor = ThreadPoolExecutor(1)
+
+ def pt_init(self, pt_session_options=None):
+ """Initialize the prompt session and the prompt loop
+ and store them in self.pt_app and self.pt_loop.
+
+ Additional keyword arguments for the PromptSession class
+ can be specified in pt_session_options.
+ """
+ if pt_session_options is None:
+ pt_session_options = {}
+
+ def get_prompt_tokens():
+ return [(Token.Prompt, self.prompt)]
+
+ if self._ptcomp is None:
+ compl = IPCompleter(
+ shell=self.shell, namespace={}, global_namespace={}, parent=self.shell
+ )
+ # add a completer for all the do_ methods
+ methods_names = [m[3:] for m in dir(self) if m.startswith("do_")]
+
+ def gen_comp(self, text):
+ return [m for m in methods_names if m.startswith(text)]
+ import types
+ newcomp = types.MethodType(gen_comp, compl)
+ compl.custom_matchers.insert(0, newcomp)
+ # end add completer.
+
+ self._ptcomp = IPythonPTCompleter(compl)
+
+ # setup history only when we start pdb
+ if self.shell.debugger_history is None:
+ if self.shell.debugger_history_file is not None:
+ p = Path(self.shell.debugger_history_file).expanduser()
+ if not p.exists():
+ p.touch()
+ self.debugger_history = FileHistory(os.path.expanduser(str(p)))
+ else:
+ self.debugger_history = InMemoryHistory()
+ else:
+ self.debugger_history = self.shell.debugger_history
+
+ options = dict(
+ message=(lambda: PygmentsTokens(get_prompt_tokens())),
+ editing_mode=getattr(EditingMode, self.shell.editing_mode.upper()),
+ key_bindings=create_ipython_shortcuts(self.shell),
+ history=self.debugger_history,
+ completer=self._ptcomp,
+ enable_history_search=True,
+ mouse_support=self.shell.mouse_support,
+ complete_style=self.shell.pt_complete_style,
+ style=getattr(self.shell, "style", None),
+ color_depth=self.shell.color_depth,
+ )
+
+ if not PTK3:
+ options['inputhook'] = self.shell.inputhook
+ options.update(pt_session_options)
+ if not _use_simple_prompt:
+ self.pt_loop = asyncio.new_event_loop()
+ self.pt_app = PromptSession(**options)
+
+ def cmdloop(self, intro=None):
+ """Repeatedly issue a prompt, accept input, parse an initial prefix
+ off the received input, and dispatch to action methods, passing them
+ the remainder of the line as argument.
+
+ override the same methods from cmd.Cmd to provide prompt toolkit replacement.
+ """
+ if not self.use_rawinput:
+ raise ValueError('Sorry ipdb does not support use_rawinput=False')
+
+ # In order to make sure that prompt, which uses asyncio doesn't
+ # interfere with applications in which it's used, we always run the
+ # prompt itself in a different thread (we can't start an event loop
+ # within an event loop). This new thread won't have any event loop
+ # running, and here we run our prompt-loop.
+ self.preloop()
+
+ try:
+ if intro is not None:
+ self.intro = intro
+ if self.intro:
+ print(self.intro, file=self.stdout)
+ stop = None
+ while not stop:
+ if self.cmdqueue:
+ line = self.cmdqueue.pop(0)
+ else:
+ self._ptcomp.ipy_completer.namespace = self.curframe_locals
+ self._ptcomp.ipy_completer.global_namespace = self.curframe.f_globals
+
+ # Run the prompt in a different thread.
+ if not _use_simple_prompt:
+ try:
+ line = self.thread_executor.submit(
+ self.pt_app.prompt
+ ).result()
+ except EOFError:
+ line = "EOF"
+ else:
+ line = input("ipdb> ")
+
+ line = self.precmd(line)
+ stop = self.onecmd(line)
+ stop = self.postcmd(stop, line)
+ self.postloop()
+ except Exception:
+ raise
+
+ def do_interact(self, arg):
+ ipshell = embed.InteractiveShellEmbed(
+ config=self.shell.config,
+ banner1="*interactive*",
+ exit_msg="*exiting interactive console...*",
+ )
+ global_ns = self.curframe.f_globals
+ ipshell(
+ module=sys.modules.get(global_ns["__name__"], None),
+ local_ns=self.curframe_locals,
+ )
+
+
+def set_trace(frame=None):
+ """
+ Start debugging from `frame`.
+
+ If frame is not specified, debugging starts from caller's frame.
+ """
+ TerminalPdb().set_trace(frame or sys._getframe().f_back)
+
+
+if __name__ == '__main__':
+ import pdb
+ # IPython.core.debugger.Pdb.trace_dispatch shall not catch
+ # bdb.BdbQuit. When started through __main__ and an exception
+ # happened after hitting "c", this is needed in order to
+ # be able to quit the debugging session (see #9950).
+ old_trace_dispatch = pdb.Pdb.trace_dispatch
+ pdb.Pdb = TerminalPdb # type: ignore
+ pdb.Pdb.trace_dispatch = old_trace_dispatch # type: ignore
+ pdb.main()
diff --git a/contrib/python/ipython/py3/IPython/terminal/embed.py b/contrib/python/ipython/py3/IPython/terminal/embed.py
new file mode 100644
index 0000000000..ce5ee01ff1
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/terminal/embed.py
@@ -0,0 +1,420 @@
+# encoding: utf-8
+"""
+An embedded IPython shell.
+"""
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+
+import sys
+import warnings
+
+from IPython.core import ultratb, compilerop
+from IPython.core import magic_arguments
+from IPython.core.magic import Magics, magics_class, line_magic
+from IPython.core.interactiveshell import DummyMod, InteractiveShell
+from IPython.terminal.interactiveshell import TerminalInteractiveShell
+from IPython.terminal.ipapp import load_default_config
+
+from traitlets import Bool, CBool, Unicode
+from IPython.utils.io import ask_yes_no
+
+from typing import Set
+
+class KillEmbedded(Exception):pass
+
+# kept for backward compatibility as IPython 6 was released with
+# the typo. See https://github.com/ipython/ipython/pull/10706
+KillEmbeded = KillEmbedded
+
+# This is an additional magic that is exposed in embedded shells.
+@magics_class
+class EmbeddedMagics(Magics):
+
+ @line_magic
+ @magic_arguments.magic_arguments()
+ @magic_arguments.argument('-i', '--instance', action='store_true',
+ help='Kill instance instead of call location')
+ @magic_arguments.argument('-x', '--exit', action='store_true',
+ help='Also exit the current session')
+ @magic_arguments.argument('-y', '--yes', action='store_true',
+ help='Do not ask confirmation')
+ def kill_embedded(self, parameter_s=''):
+ """%kill_embedded : deactivate for good the current embedded IPython
+
+ This function (after asking for confirmation) sets an internal flag so
+ that an embedded IPython will never activate again for the given call
+ location. This is useful to permanently disable a shell that is being
+ called inside a loop: once you've figured out what you needed from it,
+ you may then kill it and the program will then continue to run without
+ the interactive shell interfering again.
+
+ Kill Instance Option:
+
+ If for some reasons you need to kill the location where the instance
+ is created and not called, for example if you create a single
+ instance in one place and debug in many locations, you can use the
+ ``--instance`` option to kill this specific instance. Like for the
+ ``call location`` killing an "instance" should work even if it is
+ recreated within a loop.
+
+ .. note::
+
+ This was the default behavior before IPython 5.2
+
+ """
+
+ args = magic_arguments.parse_argstring(self.kill_embedded, parameter_s)
+ print(args)
+ if args.instance:
+ # let no ask
+ if not args.yes:
+ kill = ask_yes_no(
+ "Are you sure you want to kill this embedded instance? [y/N] ", 'n')
+ else:
+ kill = True
+ if kill:
+ self.shell._disable_init_location()
+ print("This embedded IPython instance will not reactivate anymore "
+ "once you exit.")
+ else:
+ if not args.yes:
+ kill = ask_yes_no(
+ "Are you sure you want to kill this embedded call_location? [y/N] ", 'n')
+ else:
+ kill = True
+ if kill:
+ self.shell.embedded_active = False
+ print("This embedded IPython call location will not reactivate anymore "
+ "once you exit.")
+
+ if args.exit:
+ # Ask-exit does not really ask, it just set internals flags to exit
+ # on next loop.
+ self.shell.ask_exit()
+
+
+ @line_magic
+ def exit_raise(self, parameter_s=''):
+ """%exit_raise Make the current embedded kernel exit and raise and exception.
+
+ This function sets an internal flag so that an embedded IPython will
+ raise a `IPython.terminal.embed.KillEmbedded` Exception on exit, and then exit the current I. This is
+ useful to permanently exit a loop that create IPython embed instance.
+ """
+
+ self.shell.should_raise = True
+ self.shell.ask_exit()
+
+
+class _Sentinel:
+ def __init__(self, repr):
+ assert isinstance(repr, str)
+ self.repr = repr
+
+ def __repr__(self):
+ return repr
+
+
+class InteractiveShellEmbed(TerminalInteractiveShell):
+
+ dummy_mode = Bool(False)
+ exit_msg = Unicode('')
+ embedded = CBool(True)
+ should_raise = CBool(False)
+ # Like the base class display_banner is not configurable, but here it
+ # is True by default.
+ display_banner = CBool(True)
+ exit_msg = Unicode()
+
+ # When embedding, by default we don't change the terminal title
+ term_title = Bool(False,
+ help="Automatically set the terminal title"
+ ).tag(config=True)
+
+ _inactive_locations: Set[str] = set()
+
+ def _disable_init_location(self):
+ """Disable the current Instance creation location"""
+ InteractiveShellEmbed._inactive_locations.add(self._init_location_id)
+
+ @property
+ def embedded_active(self):
+ return (self._call_location_id not in InteractiveShellEmbed._inactive_locations)\
+ and (self._init_location_id not in InteractiveShellEmbed._inactive_locations)
+
+ @embedded_active.setter
+ def embedded_active(self, value):
+ if value:
+ InteractiveShellEmbed._inactive_locations.discard(
+ self._call_location_id)
+ InteractiveShellEmbed._inactive_locations.discard(
+ self._init_location_id)
+ else:
+ InteractiveShellEmbed._inactive_locations.add(
+ self._call_location_id)
+
+ def __init__(self, **kw):
+ assert (
+ "user_global_ns" not in kw
+ ), "Key word argument `user_global_ns` has been replaced by `user_module` since IPython 4.0."
+
+ clid = kw.pop('_init_location_id', None)
+ if not clid:
+ frame = sys._getframe(1)
+ clid = '%s:%s' % (frame.f_code.co_filename, frame.f_lineno)
+ self._init_location_id = clid
+
+ super(InteractiveShellEmbed,self).__init__(**kw)
+
+ # don't use the ipython crash handler so that user exceptions aren't
+ # trapped
+ sys.excepthook = ultratb.FormattedTB(color_scheme=self.colors,
+ mode=self.xmode,
+ call_pdb=self.pdb)
+
+ def init_sys_modules(self):
+ """
+ Explicitly overwrite :mod:`IPython.core.interactiveshell` to do nothing.
+ """
+ pass
+
+ def init_magics(self):
+ super(InteractiveShellEmbed, self).init_magics()
+ self.register_magics(EmbeddedMagics)
+
+ def __call__(
+ self,
+ header="",
+ local_ns=None,
+ module=None,
+ dummy=None,
+ stack_depth=1,
+ compile_flags=None,
+ **kw
+ ):
+ """Activate the interactive interpreter.
+
+ __call__(self,header='',local_ns=None,module=None,dummy=None) -> Start
+ the interpreter shell with the given local and global namespaces, and
+ optionally print a header string at startup.
+
+ The shell can be globally activated/deactivated using the
+ dummy_mode attribute. This allows you to turn off a shell used
+ for debugging globally.
+
+ However, *each* time you call the shell you can override the current
+ state of dummy_mode with the optional keyword parameter 'dummy'. For
+ example, if you set dummy mode on with IPShell.dummy_mode = True, you
+ can still have a specific call work by making it as IPShell(dummy=False).
+ """
+
+ # we are called, set the underlying interactiveshell not to exit.
+ self.keep_running = True
+
+ # If the user has turned it off, go away
+ clid = kw.pop('_call_location_id', None)
+ if not clid:
+ frame = sys._getframe(1)
+ clid = '%s:%s' % (frame.f_code.co_filename, frame.f_lineno)
+ self._call_location_id = clid
+
+ if not self.embedded_active:
+ return
+
+ # Normal exits from interactive mode set this flag, so the shell can't
+ # re-enter (it checks this variable at the start of interactive mode).
+ self.exit_now = False
+
+ # Allow the dummy parameter to override the global __dummy_mode
+ if dummy or (dummy != 0 and self.dummy_mode):
+ return
+
+ # self.banner is auto computed
+ if header:
+ self.old_banner2 = self.banner2
+ self.banner2 = self.banner2 + '\n' + header + '\n'
+ else:
+ self.old_banner2 = ''
+
+ if self.display_banner:
+ self.show_banner()
+
+ # Call the embedding code with a stack depth of 1 so it can skip over
+ # our call and get the original caller's namespaces.
+ self.mainloop(
+ local_ns, module, stack_depth=stack_depth, compile_flags=compile_flags
+ )
+
+ self.banner2 = self.old_banner2
+
+ if self.exit_msg is not None:
+ print(self.exit_msg)
+
+ if self.should_raise:
+ raise KillEmbedded('Embedded IPython raising error, as user requested.')
+
+ def mainloop(
+ self,
+ local_ns=None,
+ module=None,
+ stack_depth=0,
+ compile_flags=None,
+ ):
+ """Embeds IPython into a running python program.
+
+ Parameters
+ ----------
+ local_ns, module
+ Working local namespace (a dict) and module (a module or similar
+ object). If given as None, they are automatically taken from the scope
+ where the shell was called, so that program variables become visible.
+ stack_depth : int
+ How many levels in the stack to go to looking for namespaces (when
+ local_ns or module is None). This allows an intermediate caller to
+ make sure that this function gets the namespace from the intended
+ level in the stack. By default (0) it will get its locals and globals
+ from the immediate caller.
+ compile_flags
+ A bit field identifying the __future__ features
+ that are enabled, as passed to the builtin :func:`compile` function.
+ If given as None, they are automatically taken from the scope where
+ the shell was called.
+
+ """
+
+ # Get locals and globals from caller
+ if ((local_ns is None or module is None or compile_flags is None)
+ and self.default_user_namespaces):
+ call_frame = sys._getframe(stack_depth).f_back
+
+ if local_ns is None:
+ local_ns = call_frame.f_locals
+ if module is None:
+ global_ns = call_frame.f_globals
+ try:
+ module = sys.modules[global_ns['__name__']]
+ except KeyError:
+ warnings.warn("Failed to get module %s" % \
+ global_ns.get('__name__', 'unknown module')
+ )
+ module = DummyMod()
+ module.__dict__ = global_ns
+ if compile_flags is None:
+ compile_flags = (call_frame.f_code.co_flags &
+ compilerop.PyCF_MASK)
+
+ # Save original namespace and module so we can restore them after
+ # embedding; otherwise the shell doesn't shut down correctly.
+ orig_user_module = self.user_module
+ orig_user_ns = self.user_ns
+ orig_compile_flags = self.compile.flags
+
+ # Update namespaces and fire up interpreter
+
+ # The global one is easy, we can just throw it in
+ if module is not None:
+ self.user_module = module
+
+ # But the user/local one is tricky: ipython needs it to store internal
+ # data, but we also need the locals. We'll throw our hidden variables
+ # like _ih and get_ipython() into the local namespace, but delete them
+ # later.
+ if local_ns is not None:
+ reentrant_local_ns = {k: v for (k, v) in local_ns.items() if k not in self.user_ns_hidden.keys()}
+ self.user_ns = reentrant_local_ns
+ self.init_user_ns()
+
+ # Compiler flags
+ if compile_flags is not None:
+ self.compile.flags = compile_flags
+
+ # make sure the tab-completer has the correct frame information, so it
+ # actually completes using the frame's locals/globals
+ self.set_completer_frame()
+
+ with self.builtin_trap, self.display_trap:
+ self.interact()
+
+ # now, purge out the local namespace of IPython's hidden variables.
+ if local_ns is not None:
+ local_ns.update({k: v for (k, v) in self.user_ns.items() if k not in self.user_ns_hidden.keys()})
+
+
+ # Restore original namespace so shell can shut down when we exit.
+ self.user_module = orig_user_module
+ self.user_ns = orig_user_ns
+ self.compile.flags = orig_compile_flags
+
+
+def embed(*, header="", compile_flags=None, **kwargs):
+ """Call this to embed IPython at the current point in your program.
+
+ The first invocation of this will create a :class:`terminal.embed.InteractiveShellEmbed`
+ instance and then call it. Consecutive calls just call the already
+ created instance.
+
+ If you don't want the kernel to initialize the namespace
+ from the scope of the surrounding function,
+ and/or you want to load full IPython configuration,
+ you probably want `IPython.start_ipython()` instead.
+
+ Here is a simple example::
+
+ from IPython import embed
+ a = 10
+ b = 20
+ embed(header='First time')
+ c = 30
+ d = 40
+ embed()
+
+ Parameters
+ ----------
+
+ header : str
+ Optional header string to print at startup.
+ compile_flags
+ Passed to the `compile_flags` parameter of :py:meth:`terminal.embed.InteractiveShellEmbed.mainloop()`,
+ which is called when the :class:`terminal.embed.InteractiveShellEmbed` instance is called.
+ **kwargs : various, optional
+ Any other kwargs will be passed to the :class:`terminal.embed.InteractiveShellEmbed` constructor.
+ Full customization can be done by passing a traitlets :class:`Config` in as the
+ `config` argument (see :ref:`configure_start_ipython` and :ref:`terminal_options`).
+ """
+ config = kwargs.get('config')
+ if config is None:
+ config = load_default_config()
+ config.InteractiveShellEmbed = config.TerminalInteractiveShell
+ kwargs['config'] = config
+ using = kwargs.get('using', 'sync')
+ if using :
+ kwargs['config'].update({'TerminalInteractiveShell':{'loop_runner':using, 'colors':'NoColor', 'autoawait': using!='sync'}})
+ #save ps1/ps2 if defined
+ ps1 = None
+ ps2 = None
+ try:
+ ps1 = sys.ps1
+ ps2 = sys.ps2
+ except AttributeError:
+ pass
+ #save previous instance
+ saved_shell_instance = InteractiveShell._instance
+ if saved_shell_instance is not None:
+ cls = type(saved_shell_instance)
+ cls.clear_instance()
+ frame = sys._getframe(1)
+ shell = InteractiveShellEmbed.instance(_init_location_id='%s:%s' % (
+ frame.f_code.co_filename, frame.f_lineno), **kwargs)
+ shell(header=header, stack_depth=2, compile_flags=compile_flags,
+ _call_location_id='%s:%s' % (frame.f_code.co_filename, frame.f_lineno))
+ InteractiveShellEmbed.clear_instance()
+ #restore previous instance
+ if saved_shell_instance is not None:
+ cls = type(saved_shell_instance)
+ cls.clear_instance()
+ for subclass in cls._walk_mro():
+ subclass._instance = saved_shell_instance
+ if ps1 is not None:
+ sys.ps1 = ps1
+ sys.ps2 = ps2
diff --git a/contrib/python/ipython/py3/IPython/terminal/interactiveshell.py b/contrib/python/ipython/py3/IPython/terminal/interactiveshell.py
new file mode 100644
index 0000000000..75cf25ea66
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/terminal/interactiveshell.py
@@ -0,0 +1,993 @@
+"""IPython terminal interface using prompt_toolkit"""
+
+import asyncio
+import os
+import sys
+from warnings import warn
+from typing import Union as UnionType
+
+from IPython.core.async_helpers import get_asyncio_loop
+from IPython.core.interactiveshell import InteractiveShell, InteractiveShellABC
+from IPython.utils.py3compat import input
+from IPython.utils.terminal import toggle_set_term_title, set_term_title, restore_term_title
+from IPython.utils.process import abbrev_cwd
+from traitlets import (
+ Bool,
+ Unicode,
+ Dict,
+ Integer,
+ List,
+ observe,
+ Instance,
+ Type,
+ default,
+ Enum,
+ Union,
+ Any,
+ validate,
+ Float,
+)
+
+from prompt_toolkit.auto_suggest import AutoSuggestFromHistory
+from prompt_toolkit.enums import DEFAULT_BUFFER, EditingMode
+from prompt_toolkit.filters import HasFocus, Condition, IsDone
+from prompt_toolkit.formatted_text import PygmentsTokens
+from prompt_toolkit.history import History
+from prompt_toolkit.layout.processors import ConditionalProcessor, HighlightMatchingBracketProcessor
+from prompt_toolkit.output import ColorDepth
+from prompt_toolkit.patch_stdout import patch_stdout
+from prompt_toolkit.shortcuts import PromptSession, CompleteStyle, print_formatted_text
+from prompt_toolkit.styles import DynamicStyle, merge_styles
+from prompt_toolkit.styles.pygments import style_from_pygments_cls, style_from_pygments_dict
+from prompt_toolkit import __version__ as ptk_version
+
+from pygments.styles import get_style_by_name
+from pygments.style import Style
+from pygments.token import Token
+
+from .debugger import TerminalPdb, Pdb
+from .magics import TerminalMagics
+from .pt_inputhooks import get_inputhook_name_and_func
+from .prompts import Prompts, ClassicPrompts, RichPromptDisplayHook
+from .ptutils import IPythonPTCompleter, IPythonPTLexer
+from .shortcuts import (
+ KEY_BINDINGS,
+ create_ipython_shortcuts,
+ create_identifier,
+ RuntimeBinding,
+ add_binding,
+)
+from .shortcuts.filters import KEYBINDING_FILTERS, filter_from_string
+from .shortcuts.auto_suggest import (
+ NavigableAutoSuggestFromHistory,
+ AppendAutoSuggestionInAnyLine,
+)
+
+PTK3 = ptk_version.startswith('3.')
+
+
+class _NoStyle(Style): pass
+
+
+
+_style_overrides_light_bg = {
+ Token.Prompt: '#ansibrightblue',
+ Token.PromptNum: '#ansiblue bold',
+ Token.OutPrompt: '#ansibrightred',
+ Token.OutPromptNum: '#ansired bold',
+}
+
+_style_overrides_linux = {
+ Token.Prompt: '#ansibrightgreen',
+ Token.PromptNum: '#ansigreen bold',
+ Token.OutPrompt: '#ansibrightred',
+ Token.OutPromptNum: '#ansired bold',
+}
+
+def get_default_editor():
+ try:
+ return os.environ['EDITOR']
+ except KeyError:
+ pass
+ except UnicodeError:
+ warn("$EDITOR environment variable is not pure ASCII. Using platform "
+ "default editor.")
+
+ if os.name == 'posix':
+ return 'vi' # the only one guaranteed to be there!
+ else:
+ return 'notepad' # same in Windows!
+
+# conservatively check for tty
+# overridden streams can result in things like:
+# - sys.stdin = None
+# - no isatty method
+for _name in ('stdin', 'stdout', 'stderr'):
+ _stream = getattr(sys, _name)
+ try:
+ if not _stream or not hasattr(_stream, "isatty") or not _stream.isatty():
+ _is_tty = False
+ break
+ except ValueError:
+ # stream is closed
+ _is_tty = False
+ break
+else:
+ _is_tty = True
+
+
+_use_simple_prompt = ('IPY_TEST_SIMPLE_PROMPT' in os.environ) or (not _is_tty)
+
+def black_reformat_handler(text_before_cursor):
+ """
+ We do not need to protect against error,
+ this is taken care at a higher level where any reformat error is ignored.
+ Indeed we may call reformatting on incomplete code.
+ """
+ import black
+
+ formatted_text = black.format_str(text_before_cursor, mode=black.FileMode())
+ if not text_before_cursor.endswith("\n") and formatted_text.endswith("\n"):
+ formatted_text = formatted_text[:-1]
+ return formatted_text
+
+
+def yapf_reformat_handler(text_before_cursor):
+ from yapf.yapflib import file_resources
+ from yapf.yapflib import yapf_api
+
+ style_config = file_resources.GetDefaultStyleForDir(os.getcwd())
+ formatted_text, was_formatted = yapf_api.FormatCode(
+ text_before_cursor, style_config=style_config
+ )
+ if was_formatted:
+ if not text_before_cursor.endswith("\n") and formatted_text.endswith("\n"):
+ formatted_text = formatted_text[:-1]
+ return formatted_text
+ else:
+ return text_before_cursor
+
+
+class PtkHistoryAdapter(History):
+ """
+ Prompt toolkit has it's own way of handling history, Where it assumes it can
+ Push/pull from history.
+
+ """
+
+ def __init__(self, shell):
+ super().__init__()
+ self.shell = shell
+ self._refresh()
+
+ def append_string(self, string):
+ # we rely on sql for that.
+ self._loaded = False
+ self._refresh()
+
+ def _refresh(self):
+ if not self._loaded:
+ self._loaded_strings = list(self.load_history_strings())
+
+ def load_history_strings(self):
+ last_cell = ""
+ res = []
+ for __, ___, cell in self.shell.history_manager.get_tail(
+ self.shell.history_load_length, include_latest=True
+ ):
+ # Ignore blank lines and consecutive duplicates
+ cell = cell.rstrip()
+ if cell and (cell != last_cell):
+ res.append(cell)
+ last_cell = cell
+ yield from res[::-1]
+
+ def store_string(self, string: str) -> None:
+ pass
+
+class TerminalInteractiveShell(InteractiveShell):
+ mime_renderers = Dict().tag(config=True)
+
+ space_for_menu = Integer(6, help='Number of line at the bottom of the screen '
+ 'to reserve for the tab completion menu, '
+ 'search history, ...etc, the height of '
+ 'these menus will at most this value. '
+ 'Increase it is you prefer long and skinny '
+ 'menus, decrease for short and wide.'
+ ).tag(config=True)
+
+ pt_app: UnionType[PromptSession, None] = None
+ auto_suggest: UnionType[
+ AutoSuggestFromHistory, NavigableAutoSuggestFromHistory, None
+ ] = None
+ debugger_history = None
+
+ debugger_history_file = Unicode(
+ "~/.pdbhistory", help="File in which to store and read history"
+ ).tag(config=True)
+
+ simple_prompt = Bool(_use_simple_prompt,
+ help="""Use `raw_input` for the REPL, without completion and prompt colors.
+
+ Useful when controlling IPython as a subprocess, and piping STDIN/OUT/ERR. Known usage are:
+ IPython own testing machinery, and emacs inferior-shell integration through elpy.
+
+ This mode default to `True` if the `IPY_TEST_SIMPLE_PROMPT`
+ environment variable is set, or the current terminal is not a tty."""
+ ).tag(config=True)
+
+ @property
+ def debugger_cls(self):
+ return Pdb if self.simple_prompt else TerminalPdb
+
+ confirm_exit = Bool(True,
+ help="""
+ Set to confirm when you try to exit IPython with an EOF (Control-D
+ in Unix, Control-Z/Enter in Windows). By typing 'exit' or 'quit',
+ you can force a direct exit without any confirmation.""",
+ ).tag(config=True)
+
+ editing_mode = Unicode('emacs',
+ help="Shortcut style to use at the prompt. 'vi' or 'emacs'.",
+ ).tag(config=True)
+
+ emacs_bindings_in_vi_insert_mode = Bool(
+ True,
+ help="Add shortcuts from 'emacs' insert mode to 'vi' insert mode.",
+ ).tag(config=True)
+
+ modal_cursor = Bool(
+ True,
+ help="""
+ Cursor shape changes depending on vi mode: beam in vi insert mode,
+ block in nav mode, underscore in replace mode.""",
+ ).tag(config=True)
+
+ ttimeoutlen = Float(
+ 0.01,
+ help="""The time in milliseconds that is waited for a key code
+ to complete.""",
+ ).tag(config=True)
+
+ timeoutlen = Float(
+ 0.5,
+ help="""The time in milliseconds that is waited for a mapped key
+ sequence to complete.""",
+ ).tag(config=True)
+
+ autoformatter = Unicode(
+ None,
+ help="Autoformatter to reformat Terminal code. Can be `'black'`, `'yapf'` or `None`",
+ allow_none=True
+ ).tag(config=True)
+
+ auto_match = Bool(
+ False,
+ help="""
+ Automatically add/delete closing bracket or quote when opening bracket or quote is entered/deleted.
+ Brackets: (), [], {}
+ Quotes: '', \"\"
+ """,
+ ).tag(config=True)
+
+ mouse_support = Bool(False,
+ help="Enable mouse support in the prompt\n(Note: prevents selecting text with the mouse)"
+ ).tag(config=True)
+
+ # We don't load the list of styles for the help string, because loading
+ # Pygments plugins takes time and can cause unexpected errors.
+ highlighting_style = Union([Unicode('legacy'), Type(klass=Style)],
+ help="""The name or class of a Pygments style to use for syntax
+ highlighting. To see available styles, run `pygmentize -L styles`."""
+ ).tag(config=True)
+
+ @validate('editing_mode')
+ def _validate_editing_mode(self, proposal):
+ if proposal['value'].lower() == 'vim':
+ proposal['value']= 'vi'
+ elif proposal['value'].lower() == 'default':
+ proposal['value']= 'emacs'
+
+ if hasattr(EditingMode, proposal['value'].upper()):
+ return proposal['value'].lower()
+
+ return self.editing_mode
+
+
+ @observe('editing_mode')
+ def _editing_mode(self, change):
+ if self.pt_app:
+ self.pt_app.editing_mode = getattr(EditingMode, change.new.upper())
+
+ def _set_formatter(self, formatter):
+ if formatter is None:
+ self.reformat_handler = lambda x:x
+ elif formatter == 'black':
+ self.reformat_handler = black_reformat_handler
+ elif formatter == "yapf":
+ self.reformat_handler = yapf_reformat_handler
+ else:
+ raise ValueError
+
+ @observe("autoformatter")
+ def _autoformatter_changed(self, change):
+ formatter = change.new
+ self._set_formatter(formatter)
+
+ @observe('highlighting_style')
+ @observe('colors')
+ def _highlighting_style_changed(self, change):
+ self.refresh_style()
+
+ def refresh_style(self):
+ self._style = self._make_style_from_name_or_cls(self.highlighting_style)
+
+
+ highlighting_style_overrides = Dict(
+ help="Override highlighting format for specific tokens"
+ ).tag(config=True)
+
+ true_color = Bool(False,
+ help="""Use 24bit colors instead of 256 colors in prompt highlighting.
+ If your terminal supports true color, the following command should
+ print ``TRUECOLOR`` in orange::
+
+ printf \"\\x1b[38;2;255;100;0mTRUECOLOR\\x1b[0m\\n\"
+ """,
+ ).tag(config=True)
+
+ editor = Unicode(get_default_editor(),
+ help="Set the editor used by IPython (default to $EDITOR/vi/notepad)."
+ ).tag(config=True)
+
+ prompts_class = Type(Prompts, help='Class used to generate Prompt token for prompt_toolkit').tag(config=True)
+
+ prompts = Instance(Prompts)
+
+ @default('prompts')
+ def _prompts_default(self):
+ return self.prompts_class(self)
+
+# @observe('prompts')
+# def _(self, change):
+# self._update_layout()
+
+ @default('displayhook_class')
+ def _displayhook_class_default(self):
+ return RichPromptDisplayHook
+
+ term_title = Bool(True,
+ help="Automatically set the terminal title"
+ ).tag(config=True)
+
+ term_title_format = Unicode("IPython: {cwd}",
+ help="Customize the terminal title format. This is a python format string. " +
+ "Available substitutions are: {cwd}."
+ ).tag(config=True)
+
+ display_completions = Enum(('column', 'multicolumn','readlinelike'),
+ help= ( "Options for displaying tab completions, 'column', 'multicolumn', and "
+ "'readlinelike'. These options are for `prompt_toolkit`, see "
+ "`prompt_toolkit` documentation for more information."
+ ),
+ default_value='multicolumn').tag(config=True)
+
+ highlight_matching_brackets = Bool(True,
+ help="Highlight matching brackets.",
+ ).tag(config=True)
+
+ extra_open_editor_shortcuts = Bool(False,
+ help="Enable vi (v) or Emacs (C-X C-E) shortcuts to open an external editor. "
+ "This is in addition to the F2 binding, which is always enabled."
+ ).tag(config=True)
+
+ handle_return = Any(None,
+ help="Provide an alternative handler to be called when the user presses "
+ "Return. This is an advanced option intended for debugging, which "
+ "may be changed or removed in later releases."
+ ).tag(config=True)
+
+ enable_history_search = Bool(True,
+ help="Allows to enable/disable the prompt toolkit history search"
+ ).tag(config=True)
+
+ autosuggestions_provider = Unicode(
+ "NavigableAutoSuggestFromHistory",
+ help="Specifies from which source automatic suggestions are provided. "
+ "Can be set to ``'NavigableAutoSuggestFromHistory'`` (:kbd:`up` and "
+ ":kbd:`down` swap suggestions), ``'AutoSuggestFromHistory'``, "
+ " or ``None`` to disable automatic suggestions. "
+ "Default is `'NavigableAutoSuggestFromHistory`'.",
+ allow_none=True,
+ ).tag(config=True)
+
+ def _set_autosuggestions(self, provider):
+ # disconnect old handler
+ if self.auto_suggest and isinstance(
+ self.auto_suggest, NavigableAutoSuggestFromHistory
+ ):
+ self.auto_suggest.disconnect()
+ if provider is None:
+ self.auto_suggest = None
+ elif provider == "AutoSuggestFromHistory":
+ self.auto_suggest = AutoSuggestFromHistory()
+ elif provider == "NavigableAutoSuggestFromHistory":
+ self.auto_suggest = NavigableAutoSuggestFromHistory()
+ else:
+ raise ValueError("No valid provider.")
+ if self.pt_app:
+ self.pt_app.auto_suggest = self.auto_suggest
+
+ @observe("autosuggestions_provider")
+ def _autosuggestions_provider_changed(self, change):
+ provider = change.new
+ self._set_autosuggestions(provider)
+
+ shortcuts = List(
+ trait=Dict(
+ key_trait=Enum(
+ [
+ "command",
+ "match_keys",
+ "match_filter",
+ "new_keys",
+ "new_filter",
+ "create",
+ ]
+ ),
+ per_key_traits={
+ "command": Unicode(),
+ "match_keys": List(Unicode()),
+ "match_filter": Unicode(),
+ "new_keys": List(Unicode()),
+ "new_filter": Unicode(),
+ "create": Bool(False),
+ },
+ ),
+ help="""Add, disable or modifying shortcuts.
+
+ Each entry on the list should be a dictionary with ``command`` key
+ identifying the target function executed by the shortcut and at least
+ one of the following:
+
+ - ``match_keys``: list of keys used to match an existing shortcut,
+ - ``match_filter``: shortcut filter used to match an existing shortcut,
+ - ``new_keys``: list of keys to set,
+ - ``new_filter``: a new shortcut filter to set
+
+ The filters have to be composed of pre-defined verbs and joined by one
+ of the following conjunctions: ``&`` (and), ``|`` (or), ``~`` (not).
+ The pre-defined verbs are:
+
+ {}
+
+
+ To disable a shortcut set ``new_keys`` to an empty list.
+ To add a shortcut add key ``create`` with value ``True``.
+
+ When modifying/disabling shortcuts, ``match_keys``/``match_filter`` can
+ be omitted if the provided specification uniquely identifies a shortcut
+ to be modified/disabled. When modifying a shortcut ``new_filter`` or
+ ``new_keys`` can be omitted which will result in reuse of the existing
+ filter/keys.
+
+ Only shortcuts defined in IPython (and not default prompt-toolkit
+ shortcuts) can be modified or disabled. The full list of shortcuts,
+ command identifiers and filters is available under
+ :ref:`terminal-shortcuts-list`.
+ """.format(
+ "\n ".join([f"- `{k}`" for k in KEYBINDING_FILTERS])
+ ),
+ ).tag(config=True)
+
+ @observe("shortcuts")
+ def _shortcuts_changed(self, change):
+ if self.pt_app:
+ self.pt_app.key_bindings = self._merge_shortcuts(user_shortcuts=change.new)
+
+ def _merge_shortcuts(self, user_shortcuts):
+ # rebuild the bindings list from scratch
+ key_bindings = create_ipython_shortcuts(self)
+
+ # for now we only allow adding shortcuts for commands which are already
+ # registered; this is a security precaution.
+ known_commands = {
+ create_identifier(binding.command): binding.command
+ for binding in KEY_BINDINGS
+ }
+ shortcuts_to_skip = []
+ shortcuts_to_add = []
+
+ for shortcut in user_shortcuts:
+ command_id = shortcut["command"]
+ if command_id not in known_commands:
+ allowed_commands = "\n - ".join(known_commands)
+ raise ValueError(
+ f"{command_id} is not a known shortcut command."
+ f" Allowed commands are: \n - {allowed_commands}"
+ )
+ old_keys = shortcut.get("match_keys", None)
+ old_filter = (
+ filter_from_string(shortcut["match_filter"])
+ if "match_filter" in shortcut
+ else None
+ )
+ matching = [
+ binding
+ for binding in KEY_BINDINGS
+ if (
+ (old_filter is None or binding.filter == old_filter)
+ and (old_keys is None or [k for k in binding.keys] == old_keys)
+ and create_identifier(binding.command) == command_id
+ )
+ ]
+
+ new_keys = shortcut.get("new_keys", None)
+ new_filter = shortcut.get("new_filter", None)
+
+ command = known_commands[command_id]
+
+ creating_new = shortcut.get("create", False)
+ modifying_existing = not creating_new and (
+ new_keys is not None or new_filter
+ )
+
+ if creating_new and new_keys == []:
+ raise ValueError("Cannot add a shortcut without keys")
+
+ if modifying_existing:
+ specification = {
+ key: shortcut[key]
+ for key in ["command", "filter"]
+ if key in shortcut
+ }
+ if len(matching) == 0:
+ raise ValueError(
+ f"No shortcuts matching {specification} found in {KEY_BINDINGS}"
+ )
+ elif len(matching) > 1:
+ raise ValueError(
+ f"Multiple shortcuts matching {specification} found,"
+ f" please add keys/filter to select one of: {matching}"
+ )
+
+ matched = matching[0]
+ old_filter = matched.filter
+ old_keys = list(matched.keys)
+ shortcuts_to_skip.append(
+ RuntimeBinding(
+ command,
+ keys=old_keys,
+ filter=old_filter,
+ )
+ )
+
+ if new_keys != []:
+ shortcuts_to_add.append(
+ RuntimeBinding(
+ command,
+ keys=new_keys or old_keys,
+ filter=filter_from_string(new_filter)
+ if new_filter is not None
+ else (
+ old_filter
+ if old_filter is not None
+ else filter_from_string("always")
+ ),
+ )
+ )
+
+ # rebuild the bindings list from scratch
+ key_bindings = create_ipython_shortcuts(self, skip=shortcuts_to_skip)
+ for binding in shortcuts_to_add:
+ add_binding(key_bindings, binding)
+
+ return key_bindings
+
+ prompt_includes_vi_mode = Bool(True,
+ help="Display the current vi mode (when using vi editing mode)."
+ ).tag(config=True)
+
+ @observe('term_title')
+ def init_term_title(self, change=None):
+ # Enable or disable the terminal title.
+ if self.term_title and _is_tty:
+ toggle_set_term_title(True)
+ set_term_title(self.term_title_format.format(cwd=abbrev_cwd()))
+ else:
+ toggle_set_term_title(False)
+
+ def restore_term_title(self):
+ if self.term_title and _is_tty:
+ restore_term_title()
+
+ def init_display_formatter(self):
+ super(TerminalInteractiveShell, self).init_display_formatter()
+ # terminal only supports plain text
+ self.display_formatter.active_types = ["text/plain"]
+
+ def init_prompt_toolkit_cli(self):
+ if self.simple_prompt:
+ # Fall back to plain non-interactive output for tests.
+ # This is very limited.
+ def prompt():
+ prompt_text = "".join(x[1] for x in self.prompts.in_prompt_tokens())
+ lines = [input(prompt_text)]
+ prompt_continuation = "".join(x[1] for x in self.prompts.continuation_prompt_tokens())
+ while self.check_complete('\n'.join(lines))[0] == 'incomplete':
+ lines.append( input(prompt_continuation) )
+ return '\n'.join(lines)
+ self.prompt_for_code = prompt
+ return
+
+ # Set up keyboard shortcuts
+ key_bindings = self._merge_shortcuts(user_shortcuts=self.shortcuts)
+
+ # Pre-populate history from IPython's history database
+ history = PtkHistoryAdapter(self)
+
+ self._style = self._make_style_from_name_or_cls(self.highlighting_style)
+ self.style = DynamicStyle(lambda: self._style)
+
+ editing_mode = getattr(EditingMode, self.editing_mode.upper())
+
+ self.pt_loop = asyncio.new_event_loop()
+ self.pt_app = PromptSession(
+ auto_suggest=self.auto_suggest,
+ editing_mode=editing_mode,
+ key_bindings=key_bindings,
+ history=history,
+ completer=IPythonPTCompleter(shell=self),
+ enable_history_search=self.enable_history_search,
+ style=self.style,
+ include_default_pygments_style=False,
+ mouse_support=self.mouse_support,
+ enable_open_in_editor=self.extra_open_editor_shortcuts,
+ color_depth=self.color_depth,
+ tempfile_suffix=".py",
+ **self._extra_prompt_options(),
+ )
+ if isinstance(self.auto_suggest, NavigableAutoSuggestFromHistory):
+ self.auto_suggest.connect(self.pt_app)
+
+ def _make_style_from_name_or_cls(self, name_or_cls):
+ """
+ Small wrapper that make an IPython compatible style from a style name
+
+ We need that to add style for prompt ... etc.
+ """
+ style_overrides = {}
+ if name_or_cls == 'legacy':
+ legacy = self.colors.lower()
+ if legacy == 'linux':
+ style_cls = get_style_by_name('monokai')
+ style_overrides = _style_overrides_linux
+ elif legacy == 'lightbg':
+ style_overrides = _style_overrides_light_bg
+ style_cls = get_style_by_name('pastie')
+ elif legacy == 'neutral':
+ # The default theme needs to be visible on both a dark background
+ # and a light background, because we can't tell what the terminal
+ # looks like. These tweaks to the default theme help with that.
+ style_cls = get_style_by_name('default')
+ style_overrides.update({
+ Token.Number: '#ansigreen',
+ Token.Operator: 'noinherit',
+ Token.String: '#ansiyellow',
+ Token.Name.Function: '#ansiblue',
+ Token.Name.Class: 'bold #ansiblue',
+ Token.Name.Namespace: 'bold #ansiblue',
+ Token.Name.Variable.Magic: '#ansiblue',
+ Token.Prompt: '#ansigreen',
+ Token.PromptNum: '#ansibrightgreen bold',
+ Token.OutPrompt: '#ansired',
+ Token.OutPromptNum: '#ansibrightred bold',
+ })
+
+ # Hack: Due to limited color support on the Windows console
+ # the prompt colors will be wrong without this
+ if os.name == 'nt':
+ style_overrides.update({
+ Token.Prompt: '#ansidarkgreen',
+ Token.PromptNum: '#ansigreen bold',
+ Token.OutPrompt: '#ansidarkred',
+ Token.OutPromptNum: '#ansired bold',
+ })
+ elif legacy =='nocolor':
+ style_cls=_NoStyle
+ style_overrides = {}
+ else :
+ raise ValueError('Got unknown colors: ', legacy)
+ else :
+ if isinstance(name_or_cls, str):
+ style_cls = get_style_by_name(name_or_cls)
+ else:
+ style_cls = name_or_cls
+ style_overrides = {
+ Token.Prompt: '#ansigreen',
+ Token.PromptNum: '#ansibrightgreen bold',
+ Token.OutPrompt: '#ansired',
+ Token.OutPromptNum: '#ansibrightred bold',
+ }
+ style_overrides.update(self.highlighting_style_overrides)
+ style = merge_styles([
+ style_from_pygments_cls(style_cls),
+ style_from_pygments_dict(style_overrides),
+ ])
+
+ return style
+
+ @property
+ def pt_complete_style(self):
+ return {
+ 'multicolumn': CompleteStyle.MULTI_COLUMN,
+ 'column': CompleteStyle.COLUMN,
+ 'readlinelike': CompleteStyle.READLINE_LIKE,
+ }[self.display_completions]
+
+ @property
+ def color_depth(self):
+ return (ColorDepth.TRUE_COLOR if self.true_color else None)
+
+ def _extra_prompt_options(self):
+ """
+ Return the current layout option for the current Terminal InteractiveShell
+ """
+ def get_message():
+ return PygmentsTokens(self.prompts.in_prompt_tokens())
+
+ if self.editing_mode == 'emacs':
+ # with emacs mode the prompt is (usually) static, so we call only
+ # the function once. With VI mode it can toggle between [ins] and
+ # [nor] so we can't precompute.
+ # here I'm going to favor the default keybinding which almost
+ # everybody uses to decrease CPU usage.
+ # if we have issues with users with custom Prompts we can see how to
+ # work around this.
+ get_message = get_message()
+
+ options = {
+ "complete_in_thread": False,
+ "lexer": IPythonPTLexer(),
+ "reserve_space_for_menu": self.space_for_menu,
+ "message": get_message,
+ "prompt_continuation": (
+ lambda width, lineno, is_soft_wrap: PygmentsTokens(
+ self.prompts.continuation_prompt_tokens(width)
+ )
+ ),
+ "multiline": True,
+ "complete_style": self.pt_complete_style,
+ "input_processors": [
+ # Highlight matching brackets, but only when this setting is
+ # enabled, and only when the DEFAULT_BUFFER has the focus.
+ ConditionalProcessor(
+ processor=HighlightMatchingBracketProcessor(chars="[](){}"),
+ filter=HasFocus(DEFAULT_BUFFER)
+ & ~IsDone()
+ & Condition(lambda: self.highlight_matching_brackets),
+ ),
+ # Show auto-suggestion in lines other than the last line.
+ ConditionalProcessor(
+ processor=AppendAutoSuggestionInAnyLine(),
+ filter=HasFocus(DEFAULT_BUFFER)
+ & ~IsDone()
+ & Condition(
+ lambda: isinstance(
+ self.auto_suggest, NavigableAutoSuggestFromHistory
+ )
+ ),
+ ),
+ ],
+ }
+ if not PTK3:
+ options['inputhook'] = self.inputhook
+
+ return options
+
+ def prompt_for_code(self):
+ if self.rl_next_input:
+ default = self.rl_next_input
+ self.rl_next_input = None
+ else:
+ default = ''
+
+ # In order to make sure that asyncio code written in the
+ # interactive shell doesn't interfere with the prompt, we run the
+ # prompt in a different event loop.
+ # If we don't do this, people could spawn coroutine with a
+ # while/true inside which will freeze the prompt.
+
+ policy = asyncio.get_event_loop_policy()
+ old_loop = get_asyncio_loop()
+
+ # FIXME: prompt_toolkit is using the deprecated `asyncio.get_event_loop`
+ # to get the current event loop.
+ # This will probably be replaced by an attribute or input argument,
+ # at which point we can stop calling the soon-to-be-deprecated `set_event_loop` here.
+ if old_loop is not self.pt_loop:
+ policy.set_event_loop(self.pt_loop)
+ try:
+ with patch_stdout(raw=True):
+ text = self.pt_app.prompt(
+ default=default,
+ **self._extra_prompt_options())
+ finally:
+ # Restore the original event loop.
+ if old_loop is not None and old_loop is not self.pt_loop:
+ policy.set_event_loop(old_loop)
+
+ return text
+
+ def enable_win_unicode_console(self):
+ # Since IPython 7.10 doesn't support python < 3.6 and PEP 528, Python uses the unicode APIs for the Windows
+ # console by default, so WUC shouldn't be needed.
+ warn("`enable_win_unicode_console` is deprecated since IPython 7.10, does not do anything and will be removed in the future",
+ DeprecationWarning,
+ stacklevel=2)
+
+ def init_io(self):
+ if sys.platform not in {'win32', 'cli'}:
+ return
+
+ import colorama
+ colorama.init()
+
+ def init_magics(self):
+ super(TerminalInteractiveShell, self).init_magics()
+ self.register_magics(TerminalMagics)
+
+ def init_alias(self):
+ # The parent class defines aliases that can be safely used with any
+ # frontend.
+ super(TerminalInteractiveShell, self).init_alias()
+
+ # Now define aliases that only make sense on the terminal, because they
+ # need direct access to the console in a way that we can't emulate in
+ # GUI or web frontend
+ if os.name == 'posix':
+ for cmd in ('clear', 'more', 'less', 'man'):
+ self.alias_manager.soft_define_alias(cmd, cmd)
+
+
+ def __init__(self, *args, **kwargs) -> None:
+ super(TerminalInteractiveShell, self).__init__(*args, **kwargs)
+ self._set_autosuggestions(self.autosuggestions_provider)
+ self.init_prompt_toolkit_cli()
+ self.init_term_title()
+ self.keep_running = True
+ self._set_formatter(self.autoformatter)
+
+
+ def ask_exit(self):
+ self.keep_running = False
+
+ rl_next_input = None
+
+ def interact(self):
+ self.keep_running = True
+ while self.keep_running:
+ print(self.separate_in, end='')
+
+ try:
+ code = self.prompt_for_code()
+ except EOFError:
+ if (not self.confirm_exit) \
+ or self.ask_yes_no('Do you really want to exit ([y]/n)?','y','n'):
+ self.ask_exit()
+
+ else:
+ if code:
+ self.run_cell(code, store_history=True)
+
+ def mainloop(self):
+ # An extra layer of protection in case someone mashing Ctrl-C breaks
+ # out of our internal code.
+ while True:
+ try:
+ self.interact()
+ break
+ except KeyboardInterrupt as e:
+ print("\n%s escaped interact()\n" % type(e).__name__)
+ finally:
+ # An interrupt during the eventloop will mess up the
+ # internal state of the prompt_toolkit library.
+ # Stopping the eventloop fixes this, see
+ # https://github.com/ipython/ipython/pull/9867
+ if hasattr(self, '_eventloop'):
+ self._eventloop.stop()
+
+ self.restore_term_title()
+
+ # try to call some at-exit operation optimistically as some things can't
+ # be done during interpreter shutdown. this is technically inaccurate as
+ # this make mainlool not re-callable, but that should be a rare if not
+ # in existent use case.
+
+ self._atexit_once()
+
+
+ _inputhook = None
+ def inputhook(self, context):
+ if self._inputhook is not None:
+ self._inputhook(context)
+
+ active_eventloop = None
+ def enable_gui(self, gui=None):
+ if self._inputhook is None and gui is None:
+ print("No event loop hook running.")
+ return
+
+ if self._inputhook is not None and gui is not None:
+ print(
+ f"Shell is already running a gui event loop for {self.active_eventloop}. "
+ "Call with no arguments to disable the current loop."
+ )
+ return
+ if self._inputhook is not None and gui is None:
+ self.active_eventloop = self._inputhook = None
+
+ if gui and (gui not in {"inline", "webagg"}):
+ # This hook runs with each cycle of the `prompt_toolkit`'s event loop.
+ self.active_eventloop, self._inputhook = get_inputhook_name_and_func(gui)
+ else:
+ self.active_eventloop = self._inputhook = None
+
+ # For prompt_toolkit 3.0. We have to create an asyncio event loop with
+ # this inputhook.
+ if PTK3:
+ import asyncio
+ from prompt_toolkit.eventloop import new_eventloop_with_inputhook
+
+ if gui == 'asyncio':
+ # When we integrate the asyncio event loop, run the UI in the
+ # same event loop as the rest of the code. don't use an actual
+ # input hook. (Asyncio is not made for nesting event loops.)
+ self.pt_loop = get_asyncio_loop()
+ print("Installed asyncio event loop hook.")
+
+ elif self._inputhook:
+ # If an inputhook was set, create a new asyncio event loop with
+ # this inputhook for the prompt.
+ self.pt_loop = new_eventloop_with_inputhook(self._inputhook)
+ print(f"Installed {self.active_eventloop} event loop hook.")
+ else:
+ # When there's no inputhook, run the prompt in a separate
+ # asyncio event loop.
+ self.pt_loop = asyncio.new_event_loop()
+ print("GUI event loop hook disabled.")
+
+ # Run !system commands directly, not through pipes, so terminal programs
+ # work correctly.
+ system = InteractiveShell.system_raw
+
+ def auto_rewrite_input(self, cmd):
+ """Overridden from the parent class to use fancy rewriting prompt"""
+ if not self.show_rewritten_input:
+ return
+
+ tokens = self.prompts.rewrite_prompt_tokens()
+ if self.pt_app:
+ print_formatted_text(PygmentsTokens(tokens), end='',
+ style=self.pt_app.app.style)
+ print(cmd)
+ else:
+ prompt = ''.join(s for t, s in tokens)
+ print(prompt, cmd, sep='')
+
+ _prompts_before = None
+ def switch_doctest_mode(self, mode):
+ """Switch prompts to classic for %doctest_mode"""
+ if mode:
+ self._prompts_before = self.prompts
+ self.prompts = ClassicPrompts(self)
+ elif self._prompts_before:
+ self.prompts = self._prompts_before
+ self._prompts_before = None
+# self._update_layout()
+
+
+InteractiveShellABC.register(TerminalInteractiveShell)
+
+if __name__ == '__main__':
+ TerminalInteractiveShell.instance().interact()
diff --git a/contrib/python/ipython/py3/IPython/terminal/ipapp.py b/contrib/python/ipython/py3/IPython/terminal/ipapp.py
new file mode 100644
index 0000000000..6280bce3b2
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/terminal/ipapp.py
@@ -0,0 +1,343 @@
+#!/usr/bin/env python
+# encoding: utf-8
+"""
+The :class:`~traitlets.config.application.Application` object for the command
+line :command:`ipython` program.
+"""
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+
+import logging
+import os
+import sys
+import warnings
+
+from traitlets.config.loader import Config
+from traitlets.config.application import boolean_flag, catch_config_error
+from IPython.core import release
+from IPython.core import usage
+from IPython.core.completer import IPCompleter
+from IPython.core.crashhandler import CrashHandler
+from IPython.core.formatters import PlainTextFormatter
+from IPython.core.history import HistoryManager
+from IPython.core.application import (
+ ProfileDir, BaseIPythonApplication, base_flags, base_aliases
+)
+from IPython.core.magic import MagicsManager
+from IPython.core.magics import (
+ ScriptMagics, LoggingMagics
+)
+from IPython.core.shellapp import (
+ InteractiveShellApp, shell_flags, shell_aliases
+)
+from IPython.extensions.storemagic import StoreMagics
+from .interactiveshell import TerminalInteractiveShell
+from IPython.paths import get_ipython_dir
+from traitlets import (
+ Bool, List, default, observe, Type
+)
+
+#-----------------------------------------------------------------------------
+# Globals, utilities and helpers
+#-----------------------------------------------------------------------------
+
+_examples = """
+ipython --matplotlib # enable matplotlib integration
+ipython --matplotlib=qt # enable matplotlib integration with qt4 backend
+
+ipython --log-level=DEBUG # set logging to DEBUG
+ipython --profile=foo # start with profile foo
+
+ipython profile create foo # create profile foo w/ default config files
+ipython help profile # show the help for the profile subcmd
+
+ipython locate # print the path to the IPython directory
+ipython locate profile foo # print the path to the directory for profile `foo`
+"""
+
+#-----------------------------------------------------------------------------
+# Crash handler for this application
+#-----------------------------------------------------------------------------
+
+class IPAppCrashHandler(CrashHandler):
+ """sys.excepthook for IPython itself, leaves a detailed report on disk."""
+
+ def __init__(self, app):
+ contact_name = release.author
+ contact_email = release.author_email
+ bug_tracker = 'https://github.com/ipython/ipython/issues'
+ super(IPAppCrashHandler,self).__init__(
+ app, contact_name, contact_email, bug_tracker
+ )
+
+ def make_report(self,traceback):
+ """Return a string containing a crash report."""
+
+ sec_sep = self.section_sep
+ # Start with parent report
+ report = [super(IPAppCrashHandler, self).make_report(traceback)]
+ # Add interactive-specific info we may have
+ rpt_add = report.append
+ try:
+ rpt_add(sec_sep+"History of session input:")
+ for line in self.app.shell.user_ns['_ih']:
+ rpt_add(line)
+ rpt_add('\n*** Last line of input (may not be in above history):\n')
+ rpt_add(self.app.shell._last_input_line+'\n')
+ except:
+ pass
+
+ return ''.join(report)
+
+#-----------------------------------------------------------------------------
+# Aliases and Flags
+#-----------------------------------------------------------------------------
+flags = dict(base_flags)
+flags.update(shell_flags)
+frontend_flags = {}
+addflag = lambda *args: frontend_flags.update(boolean_flag(*args))
+addflag('autoedit-syntax', 'TerminalInteractiveShell.autoedit_syntax',
+ 'Turn on auto editing of files with syntax errors.',
+ 'Turn off auto editing of files with syntax errors.'
+)
+addflag('simple-prompt', 'TerminalInteractiveShell.simple_prompt',
+ "Force simple minimal prompt using `raw_input`",
+ "Use a rich interactive prompt with prompt_toolkit",
+)
+
+addflag('banner', 'TerminalIPythonApp.display_banner',
+ "Display a banner upon starting IPython.",
+ "Don't display a banner upon starting IPython."
+)
+addflag('confirm-exit', 'TerminalInteractiveShell.confirm_exit',
+ """Set to confirm when you try to exit IPython with an EOF (Control-D
+ in Unix, Control-Z/Enter in Windows). By typing 'exit' or 'quit',
+ you can force a direct exit without any confirmation.""",
+ "Don't prompt the user when exiting."
+)
+addflag('term-title', 'TerminalInteractiveShell.term_title',
+ "Enable auto setting the terminal title.",
+ "Disable auto setting the terminal title."
+)
+classic_config = Config()
+classic_config.InteractiveShell.cache_size = 0
+classic_config.PlainTextFormatter.pprint = False
+classic_config.TerminalInteractiveShell.prompts_class='IPython.terminal.prompts.ClassicPrompts'
+classic_config.InteractiveShell.separate_in = ''
+classic_config.InteractiveShell.separate_out = ''
+classic_config.InteractiveShell.separate_out2 = ''
+classic_config.InteractiveShell.colors = 'NoColor'
+classic_config.InteractiveShell.xmode = 'Plain'
+
+frontend_flags['classic']=(
+ classic_config,
+ "Gives IPython a similar feel to the classic Python prompt."
+)
+# # log doesn't make so much sense this way anymore
+# paa('--log','-l',
+# action='store_true', dest='InteractiveShell.logstart',
+# help="Start logging to the default log file (./ipython_log.py).")
+#
+# # quick is harder to implement
+frontend_flags['quick']=(
+ {'TerminalIPythonApp' : {'quick' : True}},
+ "Enable quick startup with no config files."
+)
+
+frontend_flags['i'] = (
+ {'TerminalIPythonApp' : {'force_interact' : True}},
+ """If running code from the command line, become interactive afterwards.
+ It is often useful to follow this with `--` to treat remaining flags as
+ script arguments.
+ """
+)
+flags.update(frontend_flags)
+
+aliases = dict(base_aliases)
+aliases.update(shell_aliases) # type: ignore[arg-type]
+
+#-----------------------------------------------------------------------------
+# Main classes and functions
+#-----------------------------------------------------------------------------
+
+
+class LocateIPythonApp(BaseIPythonApplication):
+ description = """print the path to the IPython dir"""
+ subcommands = dict(
+ profile=('IPython.core.profileapp.ProfileLocate',
+ "print the path to an IPython profile directory",
+ ),
+ )
+ def start(self):
+ if self.subapp is not None:
+ return self.subapp.start()
+ else:
+ print(self.ipython_dir)
+
+
+class TerminalIPythonApp(BaseIPythonApplication, InteractiveShellApp):
+ name = u'ipython'
+ description = usage.cl_usage
+ crash_handler_class = IPAppCrashHandler # typing: ignore[assignment]
+ examples = _examples
+
+ flags = flags
+ aliases = aliases
+ classes = List()
+
+ interactive_shell_class = Type(
+ klass=object, # use default_value otherwise which only allow subclasses.
+ default_value=TerminalInteractiveShell,
+ help="Class to use to instantiate the TerminalInteractiveShell object. Useful for custom Frontends"
+ ).tag(config=True)
+
+ @default('classes')
+ def _classes_default(self):
+ """This has to be in a method, for TerminalIPythonApp to be available."""
+ return [
+ InteractiveShellApp, # ShellApp comes before TerminalApp, because
+ self.__class__, # it will also affect subclasses (e.g. QtConsole)
+ TerminalInteractiveShell,
+ HistoryManager,
+ MagicsManager,
+ ProfileDir,
+ PlainTextFormatter,
+ IPCompleter,
+ ScriptMagics,
+ LoggingMagics,
+ StoreMagics,
+ ]
+
+ subcommands = dict(
+ profile = ("IPython.core.profileapp.ProfileApp",
+ "Create and manage IPython profiles."
+ ),
+ kernel = ("ipykernel.kernelapp.IPKernelApp",
+ "Start a kernel without an attached frontend."
+ ),
+ locate=('IPython.terminal.ipapp.LocateIPythonApp',
+ LocateIPythonApp.description
+ ),
+ history=('IPython.core.historyapp.HistoryApp',
+ "Manage the IPython history database."
+ ),
+ )
+
+
+ # *do* autocreate requested profile, but don't create the config file.
+ auto_create=Bool(True)
+ # configurables
+ quick = Bool(False,
+ help="""Start IPython quickly by skipping the loading of config files."""
+ ).tag(config=True)
+ @observe('quick')
+ def _quick_changed(self, change):
+ if change['new']:
+ self.load_config_file = lambda *a, **kw: None
+
+ display_banner = Bool(True,
+ help="Whether to display a banner upon starting IPython."
+ ).tag(config=True)
+
+ # if there is code of files to run from the cmd line, don't interact
+ # unless the --i flag (App.force_interact) is true.
+ force_interact = Bool(False,
+ help="""If a command or file is given via the command-line,
+ e.g. 'ipython foo.py', start an interactive shell after executing the
+ file or command."""
+ ).tag(config=True)
+ @observe('force_interact')
+ def _force_interact_changed(self, change):
+ if change['new']:
+ self.interact = True
+
+ @observe('file_to_run', 'code_to_run', 'module_to_run')
+ def _file_to_run_changed(self, change):
+ new = change['new']
+ if new:
+ self.something_to_run = True
+ if new and not self.force_interact:
+ self.interact = False
+
+ # internal, not-configurable
+ something_to_run=Bool(False)
+
+ @catch_config_error
+ def initialize(self, argv=None):
+ """Do actions after construct, but before starting the app."""
+ super(TerminalIPythonApp, self).initialize(argv)
+ if self.subapp is not None:
+ # don't bother initializing further, starting subapp
+ return
+ # print self.extra_args
+ if self.extra_args and not self.something_to_run:
+ self.file_to_run = self.extra_args[0]
+ self.init_path()
+ # create the shell
+ self.init_shell()
+ # and draw the banner
+ self.init_banner()
+ # Now a variety of things that happen after the banner is printed.
+ self.init_gui_pylab()
+ self.init_extensions()
+ self.init_code()
+
+ def init_shell(self):
+ """initialize the InteractiveShell instance"""
+ # Create an InteractiveShell instance.
+ # shell.display_banner should always be False for the terminal
+ # based app, because we call shell.show_banner() by hand below
+ # so the banner shows *before* all extension loading stuff.
+ self.shell = self.interactive_shell_class.instance(parent=self,
+ profile_dir=self.profile_dir,
+ ipython_dir=self.ipython_dir, user_ns=self.user_ns)
+ self.shell.configurables.append(self)
+
+ def init_banner(self):
+ """optionally display the banner"""
+ if self.display_banner and self.interact:
+ self.shell.show_banner()
+ # Make sure there is a space below the banner.
+ if self.log_level <= logging.INFO: print()
+
+ def _pylab_changed(self, name, old, new):
+ """Replace --pylab='inline' with --pylab='auto'"""
+ if new == 'inline':
+ warnings.warn("'inline' not available as pylab backend, "
+ "using 'auto' instead.")
+ self.pylab = 'auto'
+
+ def start(self):
+ if self.subapp is not None:
+ return self.subapp.start()
+ # perform any prexec steps:
+ if self.interact:
+ self.log.debug("Starting IPython's mainloop...")
+ self.shell.mainloop()
+ else:
+ self.log.debug("IPython not interactive...")
+ self.shell.restore_term_title()
+ if not self.shell.last_execution_succeeded:
+ sys.exit(1)
+
+def load_default_config(ipython_dir=None):
+ """Load the default config file from the default ipython_dir.
+
+ This is useful for embedded shells.
+ """
+ if ipython_dir is None:
+ ipython_dir = get_ipython_dir()
+
+ profile_dir = os.path.join(ipython_dir, 'profile_default')
+ app = TerminalIPythonApp()
+ app.config_file_paths.append(profile_dir)
+ app.load_config_file()
+ return app.config
+
+launch_new_instance = TerminalIPythonApp.launch_instance
+
+
+if __name__ == '__main__':
+ launch_new_instance()
diff --git a/contrib/python/ipython/py3/IPython/terminal/magics.py b/contrib/python/ipython/py3/IPython/terminal/magics.py
new file mode 100644
index 0000000000..cea53e4a24
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/terminal/magics.py
@@ -0,0 +1,214 @@
+"""Extra magics for terminal use."""
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+
+from logging import error
+import os
+import sys
+
+from IPython.core.error import TryNext, UsageError
+from IPython.core.magic import Magics, magics_class, line_magic
+from IPython.lib.clipboard import ClipboardEmpty
+from IPython.testing.skipdoctest import skip_doctest
+from IPython.utils.text import SList, strip_email_quotes
+from IPython.utils import py3compat
+
+def get_pasted_lines(sentinel, l_input=py3compat.input, quiet=False):
+ """ Yield pasted lines until the user enters the given sentinel value.
+ """
+ if not quiet:
+ print("Pasting code; enter '%s' alone on the line to stop or use Ctrl-D." \
+ % sentinel)
+ prompt = ":"
+ else:
+ prompt = ""
+ while True:
+ try:
+ l = l_input(prompt)
+ if l == sentinel:
+ return
+ else:
+ yield l
+ except EOFError:
+ print('<EOF>')
+ return
+
+
+@magics_class
+class TerminalMagics(Magics):
+ def __init__(self, shell):
+ super(TerminalMagics, self).__init__(shell)
+
+ def store_or_execute(self, block, name, store_history=False):
+ """ Execute a block, or store it in a variable, per the user's request.
+ """
+ if name:
+ # If storing it for further editing
+ self.shell.user_ns[name] = SList(block.splitlines())
+ print("Block assigned to '%s'" % name)
+ else:
+ b = self.preclean_input(block)
+ self.shell.user_ns['pasted_block'] = b
+ self.shell.using_paste_magics = True
+ try:
+ self.shell.run_cell(b, store_history)
+ finally:
+ self.shell.using_paste_magics = False
+
+ def preclean_input(self, block):
+ lines = block.splitlines()
+ while lines and not lines[0].strip():
+ lines = lines[1:]
+ return strip_email_quotes('\n'.join(lines))
+
+ def rerun_pasted(self, name='pasted_block'):
+ """ Rerun a previously pasted command.
+ """
+ b = self.shell.user_ns.get(name)
+
+ # Sanity checks
+ if b is None:
+ raise UsageError('No previous pasted block available')
+ if not isinstance(b, str):
+ raise UsageError(
+ "Variable 'pasted_block' is not a string, can't execute")
+
+ print("Re-executing '%s...' (%d chars)"% (b.split('\n',1)[0], len(b)))
+ self.shell.run_cell(b)
+
+ @line_magic
+ def autoindent(self, parameter_s = ''):
+ """Toggle autoindent on/off (deprecated)"""
+ self.shell.set_autoindent()
+ print("Automatic indentation is:",['OFF','ON'][self.shell.autoindent])
+
+ @skip_doctest
+ @line_magic
+ def cpaste(self, parameter_s=''):
+ """Paste & execute a pre-formatted code block from clipboard.
+
+ You must terminate the block with '--' (two minus-signs) or Ctrl-D
+ alone on the line. You can also provide your own sentinel with '%paste
+ -s %%' ('%%' is the new sentinel for this operation).
+
+ The block is dedented prior to execution to enable execution of method
+ definitions. '>' and '+' characters at the beginning of a line are
+ ignored, to allow pasting directly from e-mails, diff files and
+ doctests (the '...' continuation prompt is also stripped). The
+ executed block is also assigned to variable named 'pasted_block' for
+ later editing with '%edit pasted_block'.
+
+ You can also pass a variable name as an argument, e.g. '%cpaste foo'.
+ This assigns the pasted block to variable 'foo' as string, without
+ dedenting or executing it (preceding >>> and + is still stripped)
+
+ '%cpaste -r' re-executes the block previously entered by cpaste.
+ '%cpaste -q' suppresses any additional output messages.
+
+ Do not be alarmed by garbled output on Windows (it's a readline bug).
+ Just press enter and type -- (and press enter again) and the block
+ will be what was just pasted.
+
+ Shell escapes are not supported (yet).
+
+ See Also
+ --------
+ paste : automatically pull code from clipboard.
+
+ Examples
+ --------
+ ::
+
+ In [8]: %cpaste
+ Pasting code; enter '--' alone on the line to stop.
+ :>>> a = ["world!", "Hello"]
+ :>>> print(" ".join(sorted(a)))
+ :--
+ Hello world!
+
+ ::
+ In [8]: %cpaste
+ Pasting code; enter '--' alone on the line to stop.
+ :>>> %alias_magic t timeit
+ :>>> %t -n1 pass
+ :--
+ Created `%t` as an alias for `%timeit`.
+ Created `%%t` as an alias for `%%timeit`.
+ 354 ns ± 224 ns per loop (mean ± std. dev. of 7 runs, 1 loop each)
+ """
+ opts, name = self.parse_options(parameter_s, 'rqs:', mode='string')
+ if 'r' in opts:
+ self.rerun_pasted()
+ return
+
+ quiet = ('q' in opts)
+
+ sentinel = opts.get('s', u'--')
+ block = '\n'.join(get_pasted_lines(sentinel, quiet=quiet))
+ self.store_or_execute(block, name, store_history=True)
+
+ @line_magic
+ def paste(self, parameter_s=''):
+ """Paste & execute a pre-formatted code block from clipboard.
+
+ The text is pulled directly from the clipboard without user
+ intervention and printed back on the screen before execution (unless
+ the -q flag is given to force quiet mode).
+
+ The block is dedented prior to execution to enable execution of method
+ definitions. '>' and '+' characters at the beginning of a line are
+ ignored, to allow pasting directly from e-mails, diff files and
+ doctests (the '...' continuation prompt is also stripped). The
+ executed block is also assigned to variable named 'pasted_block' for
+ later editing with '%edit pasted_block'.
+
+ You can also pass a variable name as an argument, e.g. '%paste foo'.
+ This assigns the pasted block to variable 'foo' as string, without
+ executing it (preceding >>> and + is still stripped).
+
+ Options:
+
+ -r: re-executes the block previously entered by cpaste.
+
+ -q: quiet mode: do not echo the pasted text back to the terminal.
+
+ IPython statements (magics, shell escapes) are not supported (yet).
+
+ See Also
+ --------
+ cpaste : manually paste code into terminal until you mark its end.
+ """
+ opts, name = self.parse_options(parameter_s, 'rq', mode='string')
+ if 'r' in opts:
+ self.rerun_pasted()
+ return
+ try:
+ block = self.shell.hooks.clipboard_get()
+ except TryNext as clipboard_exc:
+ message = getattr(clipboard_exc, 'args')
+ if message:
+ error(message[0])
+ else:
+ error('Could not get text from the clipboard.')
+ return
+ except ClipboardEmpty as e:
+ raise UsageError("The clipboard appears to be empty") from e
+
+ # By default, echo back to terminal unless quiet mode is requested
+ if 'q' not in opts:
+ sys.stdout.write(self.shell.pycolorize(block))
+ if not block.endswith("\n"):
+ sys.stdout.write("\n")
+ sys.stdout.write("## -- End pasted text --\n")
+
+ self.store_or_execute(block, name, store_history=True)
+
+ # Class-level: add a '%cls' magic only on Windows
+ if sys.platform == 'win32':
+ @line_magic
+ def cls(self, s):
+ """Clear screen.
+ """
+ os.system("cls")
diff --git a/contrib/python/ipython/py3/IPython/terminal/prompts.py b/contrib/python/ipython/py3/IPython/terminal/prompts.py
new file mode 100644
index 0000000000..3f5c07b980
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/terminal/prompts.py
@@ -0,0 +1,108 @@
+"""Terminal input and output prompts."""
+
+from pygments.token import Token
+import sys
+
+from IPython.core.displayhook import DisplayHook
+
+from prompt_toolkit.formatted_text import fragment_list_width, PygmentsTokens
+from prompt_toolkit.shortcuts import print_formatted_text
+from prompt_toolkit.enums import EditingMode
+
+
+class Prompts(object):
+ def __init__(self, shell):
+ self.shell = shell
+
+ def vi_mode(self):
+ if (getattr(self.shell.pt_app, 'editing_mode', None) == EditingMode.VI
+ and self.shell.prompt_includes_vi_mode):
+ mode = str(self.shell.pt_app.app.vi_state.input_mode)
+ if mode.startswith('InputMode.'):
+ mode = mode[10:13].lower()
+ elif mode.startswith('vi-'):
+ mode = mode[3:6]
+ return '['+mode+'] '
+ return ''
+
+
+ def in_prompt_tokens(self):
+ return [
+ (Token.Prompt, self.vi_mode() ),
+ (Token.Prompt, 'In ['),
+ (Token.PromptNum, str(self.shell.execution_count)),
+ (Token.Prompt, ']: '),
+ ]
+
+ def _width(self):
+ return fragment_list_width(self.in_prompt_tokens())
+
+ def continuation_prompt_tokens(self, width=None):
+ if width is None:
+ width = self._width()
+ return [
+ (Token.Prompt, (' ' * (width - 5)) + '...: '),
+ ]
+
+ def rewrite_prompt_tokens(self):
+ width = self._width()
+ return [
+ (Token.Prompt, ('-' * (width - 2)) + '> '),
+ ]
+
+ def out_prompt_tokens(self):
+ return [
+ (Token.OutPrompt, 'Out['),
+ (Token.OutPromptNum, str(self.shell.execution_count)),
+ (Token.OutPrompt, ']: '),
+ ]
+
+class ClassicPrompts(Prompts):
+ def in_prompt_tokens(self):
+ return [
+ (Token.Prompt, '>>> '),
+ ]
+
+ def continuation_prompt_tokens(self, width=None):
+ return [
+ (Token.Prompt, '... ')
+ ]
+
+ def rewrite_prompt_tokens(self):
+ return []
+
+ def out_prompt_tokens(self):
+ return []
+
+class RichPromptDisplayHook(DisplayHook):
+ """Subclass of base display hook using coloured prompt"""
+ def write_output_prompt(self):
+ sys.stdout.write(self.shell.separate_out)
+ # If we're not displaying a prompt, it effectively ends with a newline,
+ # because the output will be left-aligned.
+ self.prompt_end_newline = True
+
+ if self.do_full_cache:
+ tokens = self.shell.prompts.out_prompt_tokens()
+ prompt_txt = ''.join(s for t, s in tokens)
+ if prompt_txt and not prompt_txt.endswith('\n'):
+ # Ask for a newline before multiline output
+ self.prompt_end_newline = False
+
+ if self.shell.pt_app:
+ print_formatted_text(PygmentsTokens(tokens),
+ style=self.shell.pt_app.app.style, end='',
+ )
+ else:
+ sys.stdout.write(prompt_txt)
+
+ def write_format_data(self, format_dict, md_dict=None) -> None:
+ if self.shell.mime_renderers:
+
+ for mime, handler in self.shell.mime_renderers.items():
+ if mime in format_dict:
+ handler(format_dict[mime], None)
+ return
+
+ super().write_format_data(format_dict, md_dict)
+
diff --git a/contrib/python/ipython/py3/IPython/terminal/pt_inputhooks/__init__.py b/contrib/python/ipython/py3/IPython/terminal/pt_inputhooks/__init__.py
new file mode 100644
index 0000000000..9043f15e86
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/terminal/pt_inputhooks/__init__.py
@@ -0,0 +1,138 @@
+import importlib
+import os
+
+aliases = {
+ 'qt4': 'qt',
+ 'gtk2': 'gtk',
+}
+
+backends = [
+ "qt",
+ "qt5",
+ "qt6",
+ "gtk",
+ "gtk2",
+ "gtk3",
+ "gtk4",
+ "tk",
+ "wx",
+ "pyglet",
+ "glut",
+ "osx",
+ "asyncio",
+]
+
+registered = {}
+
+def register(name, inputhook):
+ """Register the function *inputhook* as an event loop integration."""
+ registered[name] = inputhook
+
+
+class UnknownBackend(KeyError):
+ def __init__(self, name):
+ self.name = name
+
+ def __str__(self):
+ return ("No event loop integration for {!r}. "
+ "Supported event loops are: {}").format(self.name,
+ ', '.join(backends + sorted(registered)))
+
+
+def set_qt_api(gui):
+ """Sets the `QT_API` environment variable if it isn't already set."""
+
+ qt_api = os.environ.get("QT_API", None)
+
+ from IPython.external.qt_loaders import (
+ QT_API_PYQT,
+ QT_API_PYQT5,
+ QT_API_PYQT6,
+ QT_API_PYSIDE,
+ QT_API_PYSIDE2,
+ QT_API_PYSIDE6,
+ QT_API_PYQTv1,
+ loaded_api,
+ )
+
+ loaded = loaded_api()
+
+ qt_env2gui = {
+ QT_API_PYSIDE: "qt4",
+ QT_API_PYQTv1: "qt4",
+ QT_API_PYQT: "qt4",
+ QT_API_PYSIDE2: "qt5",
+ QT_API_PYQT5: "qt5",
+ QT_API_PYSIDE6: "qt6",
+ QT_API_PYQT6: "qt6",
+ }
+ if loaded is not None and gui != "qt":
+ if qt_env2gui[loaded] != gui:
+ print(
+ f"Cannot switch Qt versions for this session; will use {qt_env2gui[loaded]}."
+ )
+ return qt_env2gui[loaded]
+
+ if qt_api is not None and gui != "qt":
+ if qt_env2gui[qt_api] != gui:
+ print(
+ f'Request for "{gui}" will be ignored because `QT_API` '
+ f'environment variable is set to "{qt_api}"'
+ )
+ return qt_env2gui[qt_api]
+ else:
+ if gui == "qt5":
+ try:
+ import PyQt5 # noqa
+
+ os.environ["QT_API"] = "pyqt5"
+ except ImportError:
+ try:
+ import PySide2 # noqa
+
+ os.environ["QT_API"] = "pyside2"
+ except ImportError:
+ os.environ["QT_API"] = "pyqt5"
+ elif gui == "qt6":
+ try:
+ import PyQt6 # noqa
+
+ os.environ["QT_API"] = "pyqt6"
+ except ImportError:
+ try:
+ import PySide6 # noqa
+
+ os.environ["QT_API"] = "pyside6"
+ except ImportError:
+ os.environ["QT_API"] = "pyqt6"
+ elif gui == "qt":
+ # Don't set QT_API; let IPython logic choose the version.
+ if "QT_API" in os.environ.keys():
+ del os.environ["QT_API"]
+ else:
+ print(f'Unrecognized Qt version: {gui}. Should be "qt5", "qt6", or "qt".')
+ return
+
+ # Import it now so we can figure out which version it is.
+ from IPython.external.qt_for_kernel import QT_API
+
+ return qt_env2gui[QT_API]
+
+
+def get_inputhook_name_and_func(gui):
+ if gui in registered:
+ return gui, registered[gui]
+
+ if gui not in backends:
+ raise UnknownBackend(gui)
+
+ if gui in aliases:
+ return get_inputhook_name_and_func(aliases[gui])
+
+ gui_mod = gui
+ if gui.startswith("qt"):
+ gui = set_qt_api(gui)
+ gui_mod = "qt"
+
+ mod = importlib.import_module("IPython.terminal.pt_inputhooks." + gui_mod)
+ return gui, mod.inputhook
diff --git a/contrib/python/ipython/py3/IPython/terminal/pt_inputhooks/asyncio.py b/contrib/python/ipython/py3/IPython/terminal/pt_inputhooks/asyncio.py
new file mode 100644
index 0000000000..d2499e11e6
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/terminal/pt_inputhooks/asyncio.py
@@ -0,0 +1,62 @@
+"""
+Inputhook for running the original asyncio event loop while we're waiting for
+input.
+
+By default, in IPython, we run the prompt with a different asyncio event loop,
+because otherwise we risk that people are freezing the prompt by scheduling bad
+coroutines. E.g., a coroutine that does a while/true and never yield back
+control to the loop. We can't cancel that.
+
+However, sometimes we want the asyncio loop to keep running while waiting for
+a prompt.
+
+The following example will print the numbers from 1 to 10 above the prompt,
+while we are waiting for input. (This works also because we use
+prompt_toolkit`s `patch_stdout`)::
+
+ In [1]: import asyncio
+
+ In [2]: %gui asyncio
+
+ In [3]: async def f():
+ ...: for i in range(10):
+ ...: await asyncio.sleep(1)
+ ...: print(i)
+
+
+ In [4]: asyncio.ensure_future(f())
+
+"""
+from prompt_toolkit import __version__ as ptk_version
+
+from IPython.core.async_helpers import get_asyncio_loop
+
+PTK3 = ptk_version.startswith("3.")
+
+
+def inputhook(context):
+ """
+ Inputhook for asyncio event loop integration.
+ """
+ # For prompt_toolkit 3.0, this input hook literally doesn't do anything.
+ # The event loop integration here is implemented in `interactiveshell.py`
+ # by running the prompt itself in the current asyncio loop. The main reason
+ # for this is that nesting asyncio event loops is unreliable.
+ if PTK3:
+ return
+
+ # For prompt_toolkit 2.0, we can run the current asyncio event loop,
+ # because prompt_toolkit 2.0 uses a different event loop internally.
+
+ # get the persistent asyncio event loop
+ loop = get_asyncio_loop()
+
+ def stop():
+ loop.stop()
+
+ fileno = context.fileno()
+ loop.add_reader(fileno, stop)
+ try:
+ loop.run_forever()
+ finally:
+ loop.remove_reader(fileno)
diff --git a/contrib/python/ipython/py3/IPython/terminal/pt_inputhooks/glut.py b/contrib/python/ipython/py3/IPython/terminal/pt_inputhooks/glut.py
new file mode 100644
index 0000000000..835aadfc97
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/terminal/pt_inputhooks/glut.py
@@ -0,0 +1,140 @@
+"""GLUT Input hook for interactive use with prompt_toolkit
+"""
+
+
+# GLUT is quite an old library and it is difficult to ensure proper
+# integration within IPython since original GLUT does not allow to handle
+# events one by one. Instead, it requires for the mainloop to be entered
+# and never returned (there is not even a function to exit he
+# mainloop). Fortunately, there are alternatives such as freeglut
+# (available for linux and windows) and the OSX implementation gives
+# access to a glutCheckLoop() function that blocks itself until a new
+# event is received. This means we have to setup the idle callback to
+# ensure we got at least one event that will unblock the function.
+#
+# Furthermore, it is not possible to install these handlers without a window
+# being first created. We choose to make this window invisible. This means that
+# display mode options are set at this level and user won't be able to change
+# them later without modifying the code. This should probably be made available
+# via IPython options system.
+
+import sys
+import time
+import signal
+import OpenGL.GLUT as glut
+import OpenGL.platform as platform
+from timeit import default_timer as clock
+
+# Frame per second : 60
+# Should probably be an IPython option
+glut_fps = 60
+
+# Display mode : double buffeed + rgba + depth
+# Should probably be an IPython option
+glut_display_mode = (glut.GLUT_DOUBLE |
+ glut.GLUT_RGBA |
+ glut.GLUT_DEPTH)
+
+glutMainLoopEvent = None
+if sys.platform == 'darwin':
+ try:
+ glutCheckLoop = platform.createBaseFunction(
+ 'glutCheckLoop', dll=platform.GLUT, resultType=None,
+ argTypes=[],
+ doc='glutCheckLoop( ) -> None',
+ argNames=(),
+ )
+ except AttributeError as e:
+ raise RuntimeError(
+ '''Your glut implementation does not allow interactive sessions. '''
+ '''Consider installing freeglut.''') from e
+ glutMainLoopEvent = glutCheckLoop
+elif glut.HAVE_FREEGLUT:
+ glutMainLoopEvent = glut.glutMainLoopEvent
+else:
+ raise RuntimeError(
+ '''Your glut implementation does not allow interactive sessions. '''
+ '''Consider installing freeglut.''')
+
+
+def glut_display():
+ # Dummy display function
+ pass
+
+def glut_idle():
+ # Dummy idle function
+ pass
+
+def glut_close():
+ # Close function only hides the current window
+ glut.glutHideWindow()
+ glutMainLoopEvent()
+
+def glut_int_handler(signum, frame):
+ # Catch sigint and print the defaultipyt message
+ signal.signal(signal.SIGINT, signal.default_int_handler)
+ print('\nKeyboardInterrupt')
+ # Need to reprint the prompt at this stage
+
+# Initialisation code
+glut.glutInit( sys.argv )
+glut.glutInitDisplayMode( glut_display_mode )
+# This is specific to freeglut
+if bool(glut.glutSetOption):
+ glut.glutSetOption( glut.GLUT_ACTION_ON_WINDOW_CLOSE,
+ glut.GLUT_ACTION_GLUTMAINLOOP_RETURNS )
+glut.glutCreateWindow( b'ipython' )
+glut.glutReshapeWindow( 1, 1 )
+glut.glutHideWindow( )
+glut.glutWMCloseFunc( glut_close )
+glut.glutDisplayFunc( glut_display )
+glut.glutIdleFunc( glut_idle )
+
+
+def inputhook(context):
+ """Run the pyglet event loop by processing pending events only.
+
+ This keeps processing pending events until stdin is ready. After
+ processing all pending events, a call to time.sleep is inserted. This is
+ needed, otherwise, CPU usage is at 100%. This sleep time should be tuned
+ though for best performance.
+ """
+ # We need to protect against a user pressing Control-C when IPython is
+ # idle and this is running. We trap KeyboardInterrupt and pass.
+
+ signal.signal(signal.SIGINT, glut_int_handler)
+
+ try:
+ t = clock()
+
+ # Make sure the default window is set after a window has been closed
+ if glut.glutGetWindow() == 0:
+ glut.glutSetWindow( 1 )
+ glutMainLoopEvent()
+ return 0
+
+ while not context.input_is_ready():
+ glutMainLoopEvent()
+ # We need to sleep at this point to keep the idle CPU load
+ # low. However, if sleep to long, GUI response is poor. As
+ # a compromise, we watch how often GUI events are being processed
+ # and switch between a short and long sleep time. Here are some
+ # stats useful in helping to tune this.
+ # time CPU load
+ # 0.001 13%
+ # 0.005 3%
+ # 0.01 1.5%
+ # 0.05 0.5%
+ used_time = clock() - t
+ if used_time > 10.0:
+ # print 'Sleep for 1 s' # dbg
+ time.sleep(1.0)
+ elif used_time > 0.1:
+ # Few GUI events coming in, so we can sleep longer
+ # print 'Sleep for 0.05 s' # dbg
+ time.sleep(0.05)
+ else:
+ # Many GUI events coming in, so sleep only very little
+ time.sleep(0.001)
+ except KeyboardInterrupt:
+ pass
diff --git a/contrib/python/ipython/py3/IPython/terminal/pt_inputhooks/gtk.py b/contrib/python/ipython/py3/IPython/terminal/pt_inputhooks/gtk.py
new file mode 100644
index 0000000000..5c201b65d7
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/terminal/pt_inputhooks/gtk.py
@@ -0,0 +1,60 @@
+# Code borrowed from python-prompt-toolkit examples
+# https://github.com/jonathanslenders/python-prompt-toolkit/blob/77cdcfbc7f4b4c34a9d2f9a34d422d7152f16209/examples/inputhook.py
+
+# Copyright (c) 2014, Jonathan Slenders
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright notice, this
+# list of conditions and the following disclaimer.
+#
+# * Redistributions in binary form must reproduce the above copyright notice, this
+# list of conditions and the following disclaimer in the documentation and/or
+# other materials provided with the distribution.
+#
+# * Neither the name of the {organization} nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
+# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""
+PyGTK input hook for prompt_toolkit.
+
+Listens on the pipe prompt_toolkit sets up for a notification that it should
+return control to the terminal event loop.
+"""
+
+import gtk, gobject
+
+# Enable threading in GTK. (Otherwise, GTK will keep the GIL.)
+gtk.gdk.threads_init()
+
+
+def inputhook(context):
+ """
+ When the eventloop of prompt-toolkit is idle, call this inputhook.
+
+ This will run the GTK main loop until the file descriptor
+ `context.fileno()` becomes ready.
+
+ :param context: An `InputHookContext` instance.
+ """
+
+ def _main_quit(*a, **kw):
+ gtk.main_quit()
+ return False
+
+ gobject.io_add_watch(context.fileno(), gobject.IO_IN, _main_quit)
+ gtk.main()
diff --git a/contrib/python/ipython/py3/IPython/terminal/pt_inputhooks/gtk3.py b/contrib/python/ipython/py3/IPython/terminal/pt_inputhooks/gtk3.py
new file mode 100644
index 0000000000..b073bd94d9
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/terminal/pt_inputhooks/gtk3.py
@@ -0,0 +1,14 @@
+"""prompt_toolkit input hook for GTK 3
+"""
+
+from gi.repository import Gtk, GLib
+
+
+def _main_quit(*args, **kwargs):
+ Gtk.main_quit()
+ return False
+
+
+def inputhook(context):
+ GLib.io_add_watch(context.fileno(), GLib.PRIORITY_DEFAULT, GLib.IO_IN, _main_quit)
+ Gtk.main()
diff --git a/contrib/python/ipython/py3/IPython/terminal/pt_inputhooks/gtk4.py b/contrib/python/ipython/py3/IPython/terminal/pt_inputhooks/gtk4.py
new file mode 100644
index 0000000000..009fbf1212
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/terminal/pt_inputhooks/gtk4.py
@@ -0,0 +1,27 @@
+"""
+prompt_toolkit input hook for GTK 4.
+"""
+
+from gi.repository import GLib
+
+
+class _InputHook:
+ def __init__(self, context):
+ self._quit = False
+ GLib.io_add_watch(
+ context.fileno(), GLib.PRIORITY_DEFAULT, GLib.IO_IN, self.quit
+ )
+
+ def quit(self, *args, **kwargs):
+ self._quit = True
+ return False
+
+ def run(self):
+ context = GLib.MainContext.default()
+ while not self._quit:
+ context.iteration(True)
+
+
+def inputhook(context):
+ hook = _InputHook(context)
+ hook.run()
diff --git a/contrib/python/ipython/py3/IPython/terminal/pt_inputhooks/osx.py b/contrib/python/ipython/py3/IPython/terminal/pt_inputhooks/osx.py
new file mode 100644
index 0000000000..2754820efc
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/terminal/pt_inputhooks/osx.py
@@ -0,0 +1,157 @@
+"""Inputhook for OS X
+
+Calls NSApp / CoreFoundation APIs via ctypes.
+"""
+
+# obj-c boilerplate from appnope, used under BSD 2-clause
+
+import ctypes
+import ctypes.util
+from threading import Event
+
+objc = ctypes.cdll.LoadLibrary(ctypes.util.find_library("objc")) # type: ignore
+
+void_p = ctypes.c_void_p
+
+objc.objc_getClass.restype = void_p
+objc.sel_registerName.restype = void_p
+objc.objc_msgSend.restype = void_p
+objc.objc_msgSend.argtypes = [void_p, void_p]
+
+msg = objc.objc_msgSend
+
+def _utf8(s):
+ """ensure utf8 bytes"""
+ if not isinstance(s, bytes):
+ s = s.encode('utf8')
+ return s
+
+def n(name):
+ """create a selector name (for ObjC methods)"""
+ return objc.sel_registerName(_utf8(name))
+
+def C(classname):
+ """get an ObjC Class by name"""
+ return objc.objc_getClass(_utf8(classname))
+
+# end obj-c boilerplate from appnope
+
+# CoreFoundation C-API calls we will use:
+CoreFoundation = ctypes.cdll.LoadLibrary(ctypes.util.find_library("CoreFoundation")) # type: ignore
+
+CFFileDescriptorCreate = CoreFoundation.CFFileDescriptorCreate
+CFFileDescriptorCreate.restype = void_p
+CFFileDescriptorCreate.argtypes = [void_p, ctypes.c_int, ctypes.c_bool, void_p, void_p]
+
+CFFileDescriptorGetNativeDescriptor = CoreFoundation.CFFileDescriptorGetNativeDescriptor
+CFFileDescriptorGetNativeDescriptor.restype = ctypes.c_int
+CFFileDescriptorGetNativeDescriptor.argtypes = [void_p]
+
+CFFileDescriptorEnableCallBacks = CoreFoundation.CFFileDescriptorEnableCallBacks
+CFFileDescriptorEnableCallBacks.restype = None
+CFFileDescriptorEnableCallBacks.argtypes = [void_p, ctypes.c_ulong]
+
+CFFileDescriptorCreateRunLoopSource = CoreFoundation.CFFileDescriptorCreateRunLoopSource
+CFFileDescriptorCreateRunLoopSource.restype = void_p
+CFFileDescriptorCreateRunLoopSource.argtypes = [void_p, void_p, void_p]
+
+CFRunLoopGetCurrent = CoreFoundation.CFRunLoopGetCurrent
+CFRunLoopGetCurrent.restype = void_p
+
+CFRunLoopAddSource = CoreFoundation.CFRunLoopAddSource
+CFRunLoopAddSource.restype = None
+CFRunLoopAddSource.argtypes = [void_p, void_p, void_p]
+
+CFRelease = CoreFoundation.CFRelease
+CFRelease.restype = None
+CFRelease.argtypes = [void_p]
+
+CFFileDescriptorInvalidate = CoreFoundation.CFFileDescriptorInvalidate
+CFFileDescriptorInvalidate.restype = None
+CFFileDescriptorInvalidate.argtypes = [void_p]
+
+# From CFFileDescriptor.h
+kCFFileDescriptorReadCallBack = 1
+kCFRunLoopCommonModes = void_p.in_dll(CoreFoundation, 'kCFRunLoopCommonModes')
+
+
+def _NSApp():
+ """Return the global NSApplication instance (NSApp)"""
+ objc.objc_msgSend.argtypes = [void_p, void_p]
+ return msg(C('NSApplication'), n('sharedApplication'))
+
+
+def _wake(NSApp):
+ """Wake the Application"""
+ objc.objc_msgSend.argtypes = [
+ void_p,
+ void_p,
+ void_p,
+ void_p,
+ void_p,
+ void_p,
+ void_p,
+ void_p,
+ void_p,
+ void_p,
+ void_p,
+ ]
+ event = msg(
+ C("NSEvent"),
+ n(
+ "otherEventWithType:location:modifierFlags:"
+ "timestamp:windowNumber:context:subtype:data1:data2:"
+ ),
+ 15, # Type
+ 0, # location
+ 0, # flags
+ 0, # timestamp
+ 0, # window
+ None, # context
+ 0, # subtype
+ 0, # data1
+ 0, # data2
+ )
+ objc.objc_msgSend.argtypes = [void_p, void_p, void_p, void_p]
+ msg(NSApp, n('postEvent:atStart:'), void_p(event), True)
+
+
+_triggered = Event()
+
+def _input_callback(fdref, flags, info):
+ """Callback to fire when there's input to be read"""
+ _triggered.set()
+ CFFileDescriptorInvalidate(fdref)
+ CFRelease(fdref)
+ NSApp = _NSApp()
+ objc.objc_msgSend.argtypes = [void_p, void_p, void_p]
+ msg(NSApp, n('stop:'), NSApp)
+ _wake(NSApp)
+
+_c_callback_func_type = ctypes.CFUNCTYPE(None, void_p, void_p, void_p)
+_c_input_callback = _c_callback_func_type(_input_callback)
+
+
+def _stop_on_read(fd):
+ """Register callback to stop eventloop when there's data on fd"""
+ _triggered.clear()
+ fdref = CFFileDescriptorCreate(None, fd, False, _c_input_callback, None)
+ CFFileDescriptorEnableCallBacks(fdref, kCFFileDescriptorReadCallBack)
+ source = CFFileDescriptorCreateRunLoopSource(None, fdref, 0)
+ loop = CFRunLoopGetCurrent()
+ CFRunLoopAddSource(loop, source, kCFRunLoopCommonModes)
+ CFRelease(source)
+
+
+def inputhook(context):
+ """Inputhook for Cocoa (NSApp)"""
+ NSApp = _NSApp()
+ _stop_on_read(context.fileno())
+ objc.objc_msgSend.argtypes = [void_p, void_p]
+ msg(NSApp, n('run'))
+ if not _triggered.is_set():
+ # app closed without firing callback,
+ # probably due to last window being closed.
+ # Run the loop manually in this case,
+ # since there may be events still to process (#9734)
+ CoreFoundation.CFRunLoopRun()
diff --git a/contrib/python/ipython/py3/IPython/terminal/pt_inputhooks/pyglet.py b/contrib/python/ipython/py3/IPython/terminal/pt_inputhooks/pyglet.py
new file mode 100644
index 0000000000..49ec86d223
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/terminal/pt_inputhooks/pyglet.py
@@ -0,0 +1,66 @@
+"""Enable pyglet to be used interactively with prompt_toolkit
+"""
+
+import sys
+import time
+from timeit import default_timer as clock
+import pyglet
+
+# On linux only, window.flip() has a bug that causes an AttributeError on
+# window close. For details, see:
+# http://groups.google.com/group/pyglet-users/browse_thread/thread/47c1aab9aa4a3d23/c22f9e819826799e?#c22f9e819826799e
+
+if sys.platform.startswith('linux'):
+ def flip(window):
+ try:
+ window.flip()
+ except AttributeError:
+ pass
+else:
+ def flip(window):
+ window.flip()
+
+
+def inputhook(context):
+ """Run the pyglet event loop by processing pending events only.
+
+ This keeps processing pending events until stdin is ready. After
+ processing all pending events, a call to time.sleep is inserted. This is
+ needed, otherwise, CPU usage is at 100%. This sleep time should be tuned
+ though for best performance.
+ """
+ # We need to protect against a user pressing Control-C when IPython is
+ # idle and this is running. We trap KeyboardInterrupt and pass.
+ try:
+ t = clock()
+ while not context.input_is_ready():
+ pyglet.clock.tick()
+ for window in pyglet.app.windows:
+ window.switch_to()
+ window.dispatch_events()
+ window.dispatch_event('on_draw')
+ flip(window)
+
+ # We need to sleep at this point to keep the idle CPU load
+ # low. However, if sleep to long, GUI response is poor. As
+ # a compromise, we watch how often GUI events are being processed
+ # and switch between a short and long sleep time. Here are some
+ # stats useful in helping to tune this.
+ # time CPU load
+ # 0.001 13%
+ # 0.005 3%
+ # 0.01 1.5%
+ # 0.05 0.5%
+ used_time = clock() - t
+ if used_time > 10.0:
+ # print 'Sleep for 1 s' # dbg
+ time.sleep(1.0)
+ elif used_time > 0.1:
+ # Few GUI events coming in, so we can sleep longer
+ # print 'Sleep for 0.05 s' # dbg
+ time.sleep(0.05)
+ else:
+ # Many GUI events coming in, so sleep only very little
+ time.sleep(0.001)
+ except KeyboardInterrupt:
+ pass
diff --git a/contrib/python/ipython/py3/IPython/terminal/pt_inputhooks/qt.py b/contrib/python/ipython/py3/IPython/terminal/pt_inputhooks/qt.py
new file mode 100644
index 0000000000..cf6d11ea6c
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/terminal/pt_inputhooks/qt.py
@@ -0,0 +1,86 @@
+import sys
+import os
+from IPython.external.qt_for_kernel import QtCore, QtGui, enum_helper
+from IPython import get_ipython
+
+# If we create a QApplication, keep a reference to it so that it doesn't get
+# garbage collected.
+_appref = None
+_already_warned = False
+
+
+def _exec(obj):
+ # exec on PyQt6, exec_ elsewhere.
+ obj.exec() if hasattr(obj, "exec") else obj.exec_()
+
+
+def _reclaim_excepthook():
+ shell = get_ipython()
+ if shell is not None:
+ sys.excepthook = shell.excepthook
+
+
+def inputhook(context):
+ global _appref
+ app = QtCore.QCoreApplication.instance()
+ if not app:
+ if sys.platform == 'linux':
+ if not os.environ.get('DISPLAY') \
+ and not os.environ.get('WAYLAND_DISPLAY'):
+ import warnings
+ global _already_warned
+ if not _already_warned:
+ _already_warned = True
+ warnings.warn(
+ 'The DISPLAY or WAYLAND_DISPLAY environment variable is '
+ 'not set or empty and Qt5 requires this environment '
+ 'variable. Deactivate Qt5 code.'
+ )
+ return
+ try:
+ QtCore.QApplication.setAttribute(QtCore.Qt.AA_EnableHighDpiScaling)
+ except AttributeError: # Only for Qt>=5.6, <6.
+ pass
+ try:
+ QtCore.QApplication.setHighDpiScaleFactorRoundingPolicy(
+ QtCore.Qt.HighDpiScaleFactorRoundingPolicy.PassThrough
+ )
+ except AttributeError: # Only for Qt>=5.14.
+ pass
+ _appref = app = QtGui.QApplication([" "])
+
+ # "reclaim" IPython sys.excepthook after event loop starts
+ # without this, it defaults back to BaseIPythonApplication.excepthook
+ # and exceptions in the Qt event loop are rendered without traceback
+ # formatting and look like "bug in IPython".
+ QtCore.QTimer.singleShot(0, _reclaim_excepthook)
+
+ event_loop = QtCore.QEventLoop(app)
+
+ if sys.platform == 'win32':
+ # The QSocketNotifier method doesn't appear to work on Windows.
+ # Use polling instead.
+ timer = QtCore.QTimer()
+ timer.timeout.connect(event_loop.quit)
+ while not context.input_is_ready():
+ # NOTE: run the event loop, and after 50 ms, call `quit` to exit it.
+ timer.start(50) # 50 ms
+ _exec(event_loop)
+ timer.stop()
+ else:
+ # On POSIX platforms, we can use a file descriptor to quit the event
+ # loop when there is input ready to read.
+ notifier = QtCore.QSocketNotifier(
+ context.fileno(), enum_helper("QtCore.QSocketNotifier.Type").Read
+ )
+ try:
+ # connect the callback we care about before we turn it on
+ # lambda is necessary as PyQT inspect the function signature to know
+ # what arguments to pass to. See https://github.com/ipython/ipython/pull/12355
+ notifier.activated.connect(lambda: event_loop.exit())
+ notifier.setEnabled(True)
+ # only start the event loop we are not already flipped
+ if not context.input_is_ready():
+ _exec(event_loop)
+ finally:
+ notifier.setEnabled(False)
diff --git a/contrib/python/ipython/py3/IPython/terminal/pt_inputhooks/tk.py b/contrib/python/ipython/py3/IPython/terminal/pt_inputhooks/tk.py
new file mode 100644
index 0000000000..2715505f1f
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/terminal/pt_inputhooks/tk.py
@@ -0,0 +1,90 @@
+# Code borrowed from ptpython
+# https://github.com/jonathanslenders/ptpython/blob/86b71a89626114b18898a0af463978bdb32eeb70/ptpython/eventloop.py
+
+# Copyright (c) 2015, Jonathan Slenders
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright notice, this
+# list of conditions and the following disclaimer.
+#
+# * Redistributions in binary form must reproduce the above copyright notice, this
+# list of conditions and the following disclaimer in the documentation and/or
+# other materials provided with the distribution.
+#
+# * Neither the name of the {organization} nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
+# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""
+Wrapper around the eventloop that gives some time to the Tkinter GUI to process
+events when it's loaded and while we are waiting for input at the REPL. This
+way we don't block the UI of for instance ``turtle`` and other Tk libraries.
+
+(Normally Tkinter registers it's callbacks in ``PyOS_InputHook`` to integrate
+in readline. ``prompt-toolkit`` doesn't understand that input hook, but this
+will fix it for Tk.)
+"""
+import time
+
+import _tkinter
+import tkinter
+
+def inputhook(inputhook_context):
+ """
+ Inputhook for Tk.
+ Run the Tk eventloop until prompt-toolkit needs to process the next input.
+ """
+ # Get the current TK application.
+ root = tkinter._default_root
+
+ def wait_using_filehandler():
+ """
+ Run the TK eventloop until the file handler that we got from the
+ inputhook becomes readable.
+ """
+ # Add a handler that sets the stop flag when `prompt-toolkit` has input
+ # to process.
+ stop = [False]
+ def done(*a):
+ stop[0] = True
+
+ root.createfilehandler(inputhook_context.fileno(), _tkinter.READABLE, done)
+
+ # Run the TK event loop as long as we don't receive input.
+ while root.dooneevent(_tkinter.ALL_EVENTS):
+ if stop[0]:
+ break
+
+ root.deletefilehandler(inputhook_context.fileno())
+
+ def wait_using_polling():
+ """
+ Windows TK doesn't support 'createfilehandler'.
+ So, run the TK eventloop and poll until input is ready.
+ """
+ while not inputhook_context.input_is_ready():
+ while root.dooneevent(_tkinter.ALL_EVENTS | _tkinter.DONT_WAIT):
+ pass
+ # Sleep to make the CPU idle, but not too long, so that the UI
+ # stays responsive.
+ time.sleep(.01)
+
+ if root is not None:
+ if hasattr(root, 'createfilehandler'):
+ wait_using_filehandler()
+ else:
+ wait_using_polling()
diff --git a/contrib/python/ipython/py3/IPython/terminal/pt_inputhooks/wx.py b/contrib/python/ipython/py3/IPython/terminal/pt_inputhooks/wx.py
new file mode 100644
index 0000000000..a0f4442c77
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/terminal/pt_inputhooks/wx.py
@@ -0,0 +1,219 @@
+"""Enable wxPython to be used interactively in prompt_toolkit
+"""
+
+import sys
+import signal
+import time
+from timeit import default_timer as clock
+import wx
+
+
+def ignore_keyboardinterrupts(func):
+ """Decorator which causes KeyboardInterrupt exceptions to be ignored during
+ execution of the decorated function.
+
+ This is used by the inputhook functions to handle the event where the user
+ presses CTRL+C while IPython is idle, and the inputhook loop is running. In
+ this case, we want to ignore interrupts.
+ """
+ def wrapper(*args, **kwargs):
+ try:
+ func(*args, **kwargs)
+ except KeyboardInterrupt:
+ pass
+ return wrapper
+
+
+@ignore_keyboardinterrupts
+def inputhook_wx1(context):
+ """Run the wx event loop by processing pending events only.
+
+ This approach seems to work, but its performance is not great as it
+ relies on having PyOS_InputHook called regularly.
+ """
+ app = wx.GetApp()
+ if app is not None:
+ assert wx.Thread_IsMain()
+
+ # Make a temporary event loop and process system events until
+ # there are no more waiting, then allow idle events (which
+ # will also deal with pending or posted wx events.)
+ evtloop = wx.EventLoop()
+ ea = wx.EventLoopActivator(evtloop)
+ while evtloop.Pending():
+ evtloop.Dispatch()
+ app.ProcessIdle()
+ del ea
+ return 0
+
+
+class EventLoopTimer(wx.Timer):
+
+ def __init__(self, func):
+ self.func = func
+ wx.Timer.__init__(self)
+
+ def Notify(self):
+ self.func()
+
+
+class EventLoopRunner(object):
+
+ def Run(self, time, input_is_ready):
+ self.input_is_ready = input_is_ready
+ self.evtloop = wx.EventLoop()
+ self.timer = EventLoopTimer(self.check_stdin)
+ self.timer.Start(time)
+ self.evtloop.Run()
+
+ def check_stdin(self):
+ if self.input_is_ready():
+ self.timer.Stop()
+ self.evtloop.Exit()
+
+
+@ignore_keyboardinterrupts
+def inputhook_wx2(context):
+ """Run the wx event loop, polling for stdin.
+
+ This version runs the wx eventloop for an undetermined amount of time,
+ during which it periodically checks to see if anything is ready on
+ stdin. If anything is ready on stdin, the event loop exits.
+
+ The argument to elr.Run controls how often the event loop looks at stdin.
+ This determines the responsiveness at the keyboard. A setting of 1000
+ enables a user to type at most 1 char per second. I have found that a
+ setting of 10 gives good keyboard response. We can shorten it further,
+ but eventually performance would suffer from calling select/kbhit too
+ often.
+ """
+ app = wx.GetApp()
+ if app is not None:
+ assert wx.Thread_IsMain()
+ elr = EventLoopRunner()
+ # As this time is made shorter, keyboard response improves, but idle
+ # CPU load goes up. 10 ms seems like a good compromise.
+ elr.Run(time=10, # CHANGE time here to control polling interval
+ input_is_ready=context.input_is_ready)
+ return 0
+
+
+@ignore_keyboardinterrupts
+def inputhook_wx3(context):
+ """Run the wx event loop by processing pending events only.
+
+ This is like inputhook_wx1, but it keeps processing pending events
+ until stdin is ready. After processing all pending events, a call to
+ time.sleep is inserted. This is needed, otherwise, CPU usage is at 100%.
+ This sleep time should be tuned though for best performance.
+ """
+ app = wx.GetApp()
+ if app is not None:
+ assert wx.Thread_IsMain()
+
+ # The import of wx on Linux sets the handler for signal.SIGINT
+ # to 0. This is a bug in wx or gtk. We fix by just setting it
+ # back to the Python default.
+ if not callable(signal.getsignal(signal.SIGINT)):
+ signal.signal(signal.SIGINT, signal.default_int_handler)
+
+ evtloop = wx.EventLoop()
+ ea = wx.EventLoopActivator(evtloop)
+ t = clock()
+ while not context.input_is_ready():
+ while evtloop.Pending():
+ t = clock()
+ evtloop.Dispatch()
+ app.ProcessIdle()
+ # We need to sleep at this point to keep the idle CPU load
+ # low. However, if sleep to long, GUI response is poor. As
+ # a compromise, we watch how often GUI events are being processed
+ # and switch between a short and long sleep time. Here are some
+ # stats useful in helping to tune this.
+ # time CPU load
+ # 0.001 13%
+ # 0.005 3%
+ # 0.01 1.5%
+ # 0.05 0.5%
+ used_time = clock() - t
+ if used_time > 10.0:
+ # print 'Sleep for 1 s' # dbg
+ time.sleep(1.0)
+ elif used_time > 0.1:
+ # Few GUI events coming in, so we can sleep longer
+ # print 'Sleep for 0.05 s' # dbg
+ time.sleep(0.05)
+ else:
+ # Many GUI events coming in, so sleep only very little
+ time.sleep(0.001)
+ del ea
+ return 0
+
+
+@ignore_keyboardinterrupts
+def inputhook_wxphoenix(context):
+ """Run the wx event loop until the user provides more input.
+
+ This input hook is suitable for use with wxPython >= 4 (a.k.a. Phoenix).
+
+ It uses the same approach to that used in
+ ipykernel.eventloops.loop_wx. The wx.MainLoop is executed, and a wx.Timer
+ is used to periodically poll the context for input. As soon as input is
+ ready, the wx.MainLoop is stopped.
+ """
+
+ app = wx.GetApp()
+
+ if app is None:
+ return
+
+ if context.input_is_ready():
+ return
+
+ assert wx.IsMainThread()
+
+ # Wx uses milliseconds
+ poll_interval = 100
+
+ # Use a wx.Timer to periodically check whether input is ready - as soon as
+ # it is, we exit the main loop
+ timer = wx.Timer()
+
+ def poll(ev):
+ if context.input_is_ready():
+ timer.Stop()
+ app.ExitMainLoop()
+
+ timer.Start(poll_interval)
+ timer.Bind(wx.EVT_TIMER, poll)
+
+ # The import of wx on Linux sets the handler for signal.SIGINT to 0. This
+ # is a bug in wx or gtk. We fix by just setting it back to the Python
+ # default.
+ if not callable(signal.getsignal(signal.SIGINT)):
+ signal.signal(signal.SIGINT, signal.default_int_handler)
+
+ # The SetExitOnFrameDelete call allows us to run the wx mainloop without
+ # having a frame open.
+ app.SetExitOnFrameDelete(False)
+ app.MainLoop()
+
+
+# Get the major wx version number to figure out what input hook we should use.
+major_version = 3
+
+try:
+ major_version = int(wx.__version__[0])
+except Exception:
+ pass
+
+# Use the phoenix hook on all platforms for wxpython >= 4
+if major_version >= 4:
+ inputhook = inputhook_wxphoenix
+# On OSX, evtloop.Pending() always returns True, regardless of there being
+# any events pending. As such we can't use implementations 1 or 3 of the
+# inputhook as those depend on a pending/dispatch loop.
+elif sys.platform == 'darwin':
+ inputhook = inputhook_wx2
+else:
+ inputhook = inputhook_wx3
diff --git a/contrib/python/ipython/py3/IPython/terminal/ptutils.py b/contrib/python/ipython/py3/IPython/terminal/ptutils.py
new file mode 100644
index 0000000000..39bc2e15af
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/terminal/ptutils.py
@@ -0,0 +1,204 @@
+"""prompt-toolkit utilities
+
+Everything in this module is a private API,
+not to be used outside IPython.
+"""
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+import unicodedata
+from wcwidth import wcwidth
+
+from IPython.core.completer import (
+ provisionalcompleter, cursor_to_position,
+ _deduplicate_completions)
+from prompt_toolkit.completion import Completer, Completion
+from prompt_toolkit.lexers import Lexer
+from prompt_toolkit.lexers import PygmentsLexer
+from prompt_toolkit.patch_stdout import patch_stdout
+
+import pygments.lexers as pygments_lexers
+import os
+import sys
+import traceback
+
+_completion_sentinel = object()
+
+def _elide_point(string:str, *, min_elide=30)->str:
+ """
+ If a string is long enough, and has at least 3 dots,
+ replace the middle part with ellipses.
+
+ If a string naming a file is long enough, and has at least 3 slashes,
+ replace the middle part with ellipses.
+
+ If three consecutive dots, or two consecutive dots are encountered these are
+ replaced by the equivalents HORIZONTAL ELLIPSIS or TWO DOT LEADER unicode
+ equivalents
+ """
+ string = string.replace('...','\N{HORIZONTAL ELLIPSIS}')
+ string = string.replace('..','\N{TWO DOT LEADER}')
+ if len(string) < min_elide:
+ return string
+
+ object_parts = string.split('.')
+ file_parts = string.split(os.sep)
+ if file_parts[-1] == '':
+ file_parts.pop()
+
+ if len(object_parts) > 3:
+ return "{}.{}\N{HORIZONTAL ELLIPSIS}{}.{}".format(
+ object_parts[0],
+ object_parts[1][:1],
+ object_parts[-2][-1:],
+ object_parts[-1],
+ )
+
+ elif len(file_parts) > 3:
+ return ("{}" + os.sep + "{}\N{HORIZONTAL ELLIPSIS}{}" + os.sep + "{}").format(
+ file_parts[0], file_parts[1][:1], file_parts[-2][-1:], file_parts[-1]
+ )
+
+ return string
+
+def _elide_typed(string:str, typed:str, *, min_elide:int=30)->str:
+ """
+ Elide the middle of a long string if the beginning has already been typed.
+ """
+
+ if len(string) < min_elide:
+ return string
+ cut_how_much = len(typed)-3
+ if cut_how_much < 7:
+ return string
+ if string.startswith(typed) and len(string)> len(typed):
+ return f"{string[:3]}\N{HORIZONTAL ELLIPSIS}{string[cut_how_much:]}"
+ return string
+
+def _elide(string:str, typed:str, min_elide=30)->str:
+ return _elide_typed(
+ _elide_point(string, min_elide=min_elide),
+ typed, min_elide=min_elide)
+
+
+
+def _adjust_completion_text_based_on_context(text, body, offset):
+ if text.endswith('=') and len(body) > offset and body[offset] == '=':
+ return text[:-1]
+ else:
+ return text
+
+
+class IPythonPTCompleter(Completer):
+ """Adaptor to provide IPython completions to prompt_toolkit"""
+ def __init__(self, ipy_completer=None, shell=None):
+ if shell is None and ipy_completer is None:
+ raise TypeError("Please pass shell=an InteractiveShell instance.")
+ self._ipy_completer = ipy_completer
+ self.shell = shell
+
+ @property
+ def ipy_completer(self):
+ if self._ipy_completer:
+ return self._ipy_completer
+ else:
+ return self.shell.Completer
+
+ def get_completions(self, document, complete_event):
+ if not document.current_line.strip():
+ return
+ # Some bits of our completion system may print stuff (e.g. if a module
+ # is imported). This context manager ensures that doesn't interfere with
+ # the prompt.
+
+ with patch_stdout(), provisionalcompleter():
+ body = document.text
+ cursor_row = document.cursor_position_row
+ cursor_col = document.cursor_position_col
+ cursor_position = document.cursor_position
+ offset = cursor_to_position(body, cursor_row, cursor_col)
+ try:
+ yield from self._get_completions(body, offset, cursor_position, self.ipy_completer)
+ except Exception as e:
+ try:
+ exc_type, exc_value, exc_tb = sys.exc_info()
+ traceback.print_exception(exc_type, exc_value, exc_tb)
+ except AttributeError:
+ print('Unrecoverable Error in completions')
+
+ @staticmethod
+ def _get_completions(body, offset, cursor_position, ipyc):
+ """
+ Private equivalent of get_completions() use only for unit_testing.
+ """
+ debug = getattr(ipyc, 'debug', False)
+ completions = _deduplicate_completions(
+ body, ipyc.completions(body, offset))
+ for c in completions:
+ if not c.text:
+ # Guard against completion machinery giving us an empty string.
+ continue
+ text = unicodedata.normalize('NFC', c.text)
+ # When the first character of the completion has a zero length,
+ # then it's probably a decomposed unicode character. E.g. caused by
+ # the "\dot" completion. Try to compose again with the previous
+ # character.
+ if wcwidth(text[0]) == 0:
+ if cursor_position + c.start > 0:
+ char_before = body[c.start - 1]
+ fixed_text = unicodedata.normalize(
+ 'NFC', char_before + text)
+
+ # Yield the modified completion instead, if this worked.
+ if wcwidth(text[0:1]) == 1:
+ yield Completion(fixed_text, start_position=c.start - offset - 1)
+ continue
+
+ # TODO: Use Jedi to determine meta_text
+ # (Jedi currently has a bug that results in incorrect information.)
+ # meta_text = ''
+ # yield Completion(m, start_position=start_pos,
+ # display_meta=meta_text)
+ display_text = c.text
+
+ adjusted_text = _adjust_completion_text_based_on_context(c.text, body, offset)
+ if c.type == 'function':
+ yield Completion(adjusted_text, start_position=c.start - offset, display=_elide(display_text+'()', body[c.start:c.end]), display_meta=c.type+c.signature)
+ else:
+ yield Completion(adjusted_text, start_position=c.start - offset, display=_elide(display_text, body[c.start:c.end]), display_meta=c.type)
+
+class IPythonPTLexer(Lexer):
+ """
+ Wrapper around PythonLexer and BashLexer.
+ """
+ def __init__(self):
+ l = pygments_lexers
+ self.python_lexer = PygmentsLexer(l.Python3Lexer)
+ self.shell_lexer = PygmentsLexer(l.BashLexer)
+
+ self.magic_lexers = {
+ 'HTML': PygmentsLexer(l.HtmlLexer),
+ 'html': PygmentsLexer(l.HtmlLexer),
+ 'javascript': PygmentsLexer(l.JavascriptLexer),
+ 'js': PygmentsLexer(l.JavascriptLexer),
+ 'perl': PygmentsLexer(l.PerlLexer),
+ 'ruby': PygmentsLexer(l.RubyLexer),
+ 'latex': PygmentsLexer(l.TexLexer),
+ }
+
+ def lex_document(self, document):
+ text = document.text.lstrip()
+
+ lexer = self.python_lexer
+
+ if text.startswith('!') or text.startswith('%%bash'):
+ lexer = self.shell_lexer
+
+ elif text.startswith('%%'):
+ for magic, l in self.magic_lexers.items():
+ if text.startswith('%%' + magic):
+ lexer = l
+ break
+
+ return lexer.lex_document(document)
diff --git a/contrib/python/ipython/py3/IPython/terminal/shortcuts/__init__.py b/contrib/python/ipython/py3/IPython/terminal/shortcuts/__init__.py
new file mode 100644
index 0000000000..12890f4ab6
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/terminal/shortcuts/__init__.py
@@ -0,0 +1,630 @@
+"""
+Module to define and register Terminal IPython shortcuts with
+:mod:`prompt_toolkit`
+"""
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+import os
+import signal
+import sys
+import warnings
+from dataclasses import dataclass
+from typing import Callable, Any, Optional, List
+
+from prompt_toolkit.application.current import get_app
+from prompt_toolkit.key_binding import KeyBindings
+from prompt_toolkit.key_binding.key_processor import KeyPressEvent
+from prompt_toolkit.key_binding.bindings import named_commands as nc
+from prompt_toolkit.key_binding.bindings.completion import (
+ display_completions_like_readline,
+)
+from prompt_toolkit.key_binding.vi_state import InputMode, ViState
+from prompt_toolkit.filters import Condition
+
+from IPython.core.getipython import get_ipython
+from IPython.terminal.shortcuts import auto_match as match
+from IPython.terminal.shortcuts import auto_suggest
+from IPython.terminal.shortcuts.filters import filter_from_string
+from IPython.utils.decorators import undoc
+
+from prompt_toolkit.enums import DEFAULT_BUFFER
+
+__all__ = ["create_ipython_shortcuts"]
+
+
+@dataclass
+class BaseBinding:
+ command: Callable[[KeyPressEvent], Any]
+ keys: List[str]
+
+
+@dataclass
+class RuntimeBinding(BaseBinding):
+ filter: Condition
+
+
+@dataclass
+class Binding(BaseBinding):
+ # while filter could be created by referencing variables directly (rather
+ # than created from strings), by using strings we ensure that users will
+ # be able to create filters in configuration (e.g. JSON) files too, which
+ # also benefits the documentation by enforcing human-readable filter names.
+ condition: Optional[str] = None
+
+ def __post_init__(self):
+ if self.condition:
+ self.filter = filter_from_string(self.condition)
+ else:
+ self.filter = None
+
+
+def create_identifier(handler: Callable):
+ parts = handler.__module__.split(".")
+ name = handler.__name__
+ package = parts[0]
+ if len(parts) > 1:
+ final_module = parts[-1]
+ return f"{package}:{final_module}.{name}"
+ else:
+ return f"{package}:{name}"
+
+
+AUTO_MATCH_BINDINGS = [
+ *[
+ Binding(
+ cmd, [key], "focused_insert & auto_match & followed_by_closing_paren_or_end"
+ )
+ for key, cmd in match.auto_match_parens.items()
+ ],
+ *[
+ # raw string
+ Binding(cmd, [key], "focused_insert & auto_match & preceded_by_raw_str_prefix")
+ for key, cmd in match.auto_match_parens_raw_string.items()
+ ],
+ Binding(
+ match.double_quote,
+ ['"'],
+ "focused_insert"
+ " & auto_match"
+ " & not_inside_unclosed_string"
+ " & preceded_by_paired_double_quotes"
+ " & followed_by_closing_paren_or_end",
+ ),
+ Binding(
+ match.single_quote,
+ ["'"],
+ "focused_insert"
+ " & auto_match"
+ " & not_inside_unclosed_string"
+ " & preceded_by_paired_single_quotes"
+ " & followed_by_closing_paren_or_end",
+ ),
+ Binding(
+ match.docstring_double_quotes,
+ ['"'],
+ "focused_insert"
+ " & auto_match"
+ " & not_inside_unclosed_string"
+ " & preceded_by_two_double_quotes",
+ ),
+ Binding(
+ match.docstring_single_quotes,
+ ["'"],
+ "focused_insert"
+ " & auto_match"
+ " & not_inside_unclosed_string"
+ " & preceded_by_two_single_quotes",
+ ),
+ Binding(
+ match.skip_over,
+ [")"],
+ "focused_insert & auto_match & followed_by_closing_round_paren",
+ ),
+ Binding(
+ match.skip_over,
+ ["]"],
+ "focused_insert & auto_match & followed_by_closing_bracket",
+ ),
+ Binding(
+ match.skip_over,
+ ["}"],
+ "focused_insert & auto_match & followed_by_closing_brace",
+ ),
+ Binding(
+ match.skip_over, ['"'], "focused_insert & auto_match & followed_by_double_quote"
+ ),
+ Binding(
+ match.skip_over, ["'"], "focused_insert & auto_match & followed_by_single_quote"
+ ),
+ Binding(
+ match.delete_pair,
+ ["backspace"],
+ "focused_insert"
+ " & preceded_by_opening_round_paren"
+ " & auto_match"
+ " & followed_by_closing_round_paren",
+ ),
+ Binding(
+ match.delete_pair,
+ ["backspace"],
+ "focused_insert"
+ " & preceded_by_opening_bracket"
+ " & auto_match"
+ " & followed_by_closing_bracket",
+ ),
+ Binding(
+ match.delete_pair,
+ ["backspace"],
+ "focused_insert"
+ " & preceded_by_opening_brace"
+ " & auto_match"
+ " & followed_by_closing_brace",
+ ),
+ Binding(
+ match.delete_pair,
+ ["backspace"],
+ "focused_insert"
+ " & preceded_by_double_quote"
+ " & auto_match"
+ " & followed_by_double_quote",
+ ),
+ Binding(
+ match.delete_pair,
+ ["backspace"],
+ "focused_insert"
+ " & preceded_by_single_quote"
+ " & auto_match"
+ " & followed_by_single_quote",
+ ),
+]
+
+AUTO_SUGGEST_BINDINGS = [
+ # there are two reasons for re-defining bindings defined upstream:
+ # 1) prompt-toolkit does not execute autosuggestion bindings in vi mode,
+ # 2) prompt-toolkit checks if we are at the end of text, not end of line
+ # hence it does not work in multi-line mode of navigable provider
+ Binding(
+ auto_suggest.accept_or_jump_to_end,
+ ["end"],
+ "has_suggestion & default_buffer_focused & emacs_like_insert_mode",
+ ),
+ Binding(
+ auto_suggest.accept_or_jump_to_end,
+ ["c-e"],
+ "has_suggestion & default_buffer_focused & emacs_like_insert_mode",
+ ),
+ Binding(
+ auto_suggest.accept,
+ ["c-f"],
+ "has_suggestion & default_buffer_focused & emacs_like_insert_mode",
+ ),
+ Binding(
+ auto_suggest.accept,
+ ["right"],
+ "has_suggestion & default_buffer_focused & emacs_like_insert_mode",
+ ),
+ Binding(
+ auto_suggest.accept_word,
+ ["escape", "f"],
+ "has_suggestion & default_buffer_focused & emacs_like_insert_mode",
+ ),
+ Binding(
+ auto_suggest.accept_token,
+ ["c-right"],
+ "has_suggestion & default_buffer_focused & emacs_like_insert_mode",
+ ),
+ Binding(
+ auto_suggest.discard,
+ ["escape"],
+ # note this one is using `emacs_insert_mode`, not `emacs_like_insert_mode`
+ # as in `vi_insert_mode` we do not want `escape` to be shadowed (ever).
+ "has_suggestion & default_buffer_focused & emacs_insert_mode",
+ ),
+ Binding(
+ auto_suggest.discard,
+ ["delete"],
+ "has_suggestion & default_buffer_focused & emacs_insert_mode",
+ ),
+ Binding(
+ auto_suggest.swap_autosuggestion_up,
+ ["c-up"],
+ "navigable_suggestions"
+ " & ~has_line_above"
+ " & has_suggestion"
+ " & default_buffer_focused",
+ ),
+ Binding(
+ auto_suggest.swap_autosuggestion_down,
+ ["c-down"],
+ "navigable_suggestions"
+ " & ~has_line_below"
+ " & has_suggestion"
+ " & default_buffer_focused",
+ ),
+ Binding(
+ auto_suggest.up_and_update_hint,
+ ["c-up"],
+ "has_line_above & navigable_suggestions & default_buffer_focused",
+ ),
+ Binding(
+ auto_suggest.down_and_update_hint,
+ ["c-down"],
+ "has_line_below & navigable_suggestions & default_buffer_focused",
+ ),
+ Binding(
+ auto_suggest.accept_character,
+ ["escape", "right"],
+ "has_suggestion & default_buffer_focused & emacs_like_insert_mode",
+ ),
+ Binding(
+ auto_suggest.accept_and_move_cursor_left,
+ ["c-left"],
+ "has_suggestion & default_buffer_focused & emacs_like_insert_mode",
+ ),
+ Binding(
+ auto_suggest.accept_and_keep_cursor,
+ ["escape", "down"],
+ "has_suggestion & default_buffer_focused & emacs_insert_mode",
+ ),
+ Binding(
+ auto_suggest.backspace_and_resume_hint,
+ ["backspace"],
+ # no `has_suggestion` here to allow resuming if no suggestion
+ "default_buffer_focused & emacs_like_insert_mode",
+ ),
+ Binding(
+ auto_suggest.resume_hinting,
+ ["right"],
+ "is_cursor_at_the_end_of_line"
+ " & default_buffer_focused"
+ " & emacs_like_insert_mode"
+ " & pass_through",
+ ),
+]
+
+
+SIMPLE_CONTROL_BINDINGS = [
+ Binding(cmd, [key], "vi_insert_mode & default_buffer_focused & ebivim")
+ for key, cmd in {
+ "c-a": nc.beginning_of_line,
+ "c-b": nc.backward_char,
+ "c-k": nc.kill_line,
+ "c-w": nc.backward_kill_word,
+ "c-y": nc.yank,
+ "c-_": nc.undo,
+ }.items()
+]
+
+
+ALT_AND_COMOBO_CONTROL_BINDINGS = [
+ Binding(cmd, list(keys), "vi_insert_mode & default_buffer_focused & ebivim")
+ for keys, cmd in {
+ # Control Combos
+ ("c-x", "c-e"): nc.edit_and_execute,
+ ("c-x", "e"): nc.edit_and_execute,
+ # Alt
+ ("escape", "b"): nc.backward_word,
+ ("escape", "c"): nc.capitalize_word,
+ ("escape", "d"): nc.kill_word,
+ ("escape", "h"): nc.backward_kill_word,
+ ("escape", "l"): nc.downcase_word,
+ ("escape", "u"): nc.uppercase_word,
+ ("escape", "y"): nc.yank_pop,
+ ("escape", "."): nc.yank_last_arg,
+ }.items()
+]
+
+
+def add_binding(bindings: KeyBindings, binding: Binding):
+ bindings.add(
+ *binding.keys,
+ **({"filter": binding.filter} if binding.filter is not None else {}),
+ )(binding.command)
+
+
+def create_ipython_shortcuts(shell, skip=None) -> KeyBindings:
+ """Set up the prompt_toolkit keyboard shortcuts for IPython.
+
+ Parameters
+ ----------
+ shell: InteractiveShell
+ The current IPython shell Instance
+ skip: List[Binding]
+ Bindings to skip.
+
+ Returns
+ -------
+ KeyBindings
+ the keybinding instance for prompt toolkit.
+
+ """
+ kb = KeyBindings()
+ skip = skip or []
+ for binding in KEY_BINDINGS:
+ skip_this_one = False
+ for to_skip in skip:
+ if (
+ to_skip.command == binding.command
+ and to_skip.filter == binding.filter
+ and to_skip.keys == binding.keys
+ ):
+ skip_this_one = True
+ break
+ if skip_this_one:
+ continue
+ add_binding(kb, binding)
+
+ def get_input_mode(self):
+ app = get_app()
+ app.ttimeoutlen = shell.ttimeoutlen
+ app.timeoutlen = shell.timeoutlen
+
+ return self._input_mode
+
+ def set_input_mode(self, mode):
+ shape = {InputMode.NAVIGATION: 2, InputMode.REPLACE: 4}.get(mode, 6)
+ cursor = "\x1b[{} q".format(shape)
+
+ sys.stdout.write(cursor)
+ sys.stdout.flush()
+
+ self._input_mode = mode
+
+ if shell.editing_mode == "vi" and shell.modal_cursor:
+ ViState._input_mode = InputMode.INSERT # type: ignore
+ ViState.input_mode = property(get_input_mode, set_input_mode) # type: ignore
+
+ return kb
+
+
+def reformat_and_execute(event):
+ """Reformat code and execute it"""
+ shell = get_ipython()
+ reformat_text_before_cursor(
+ event.current_buffer, event.current_buffer.document, shell
+ )
+ event.current_buffer.validate_and_handle()
+
+
+def reformat_text_before_cursor(buffer, document, shell):
+ text = buffer.delete_before_cursor(len(document.text[: document.cursor_position]))
+ try:
+ formatted_text = shell.reformat_handler(text)
+ buffer.insert_text(formatted_text)
+ except Exception as e:
+ buffer.insert_text(text)
+
+
+def handle_return_or_newline_or_execute(event):
+ shell = get_ipython()
+ if getattr(shell, "handle_return", None):
+ return shell.handle_return(shell)(event)
+ else:
+ return newline_or_execute_outer(shell)(event)
+
+
+def newline_or_execute_outer(shell):
+ def newline_or_execute(event):
+ """When the user presses return, insert a newline or execute the code."""
+ b = event.current_buffer
+ d = b.document
+
+ if b.complete_state:
+ cc = b.complete_state.current_completion
+ if cc:
+ b.apply_completion(cc)
+ else:
+ b.cancel_completion()
+ return
+
+ # If there's only one line, treat it as if the cursor is at the end.
+ # See https://github.com/ipython/ipython/issues/10425
+ if d.line_count == 1:
+ check_text = d.text
+ else:
+ check_text = d.text[: d.cursor_position]
+ status, indent = shell.check_complete(check_text)
+
+ # if all we have after the cursor is whitespace: reformat current text
+ # before cursor
+ after_cursor = d.text[d.cursor_position :]
+ reformatted = False
+ if not after_cursor.strip():
+ reformat_text_before_cursor(b, d, shell)
+ reformatted = True
+ if not (
+ d.on_last_line
+ or d.cursor_position_row >= d.line_count - d.empty_line_count_at_the_end()
+ ):
+ if shell.autoindent:
+ b.insert_text("\n" + indent)
+ else:
+ b.insert_text("\n")
+ return
+
+ if (status != "incomplete") and b.accept_handler:
+ if not reformatted:
+ reformat_text_before_cursor(b, d, shell)
+ b.validate_and_handle()
+ else:
+ if shell.autoindent:
+ b.insert_text("\n" + indent)
+ else:
+ b.insert_text("\n")
+
+ return newline_or_execute
+
+
+def previous_history_or_previous_completion(event):
+ """
+ Control-P in vi edit mode on readline is history next, unlike default prompt toolkit.
+
+ If completer is open this still select previous completion.
+ """
+ event.current_buffer.auto_up()
+
+
+def next_history_or_next_completion(event):
+ """
+ Control-N in vi edit mode on readline is history previous, unlike default prompt toolkit.
+
+ If completer is open this still select next completion.
+ """
+ event.current_buffer.auto_down()
+
+
+def dismiss_completion(event):
+ """Dismiss completion"""
+ b = event.current_buffer
+ if b.complete_state:
+ b.cancel_completion()
+
+
+def reset_buffer(event):
+ """Reset buffer"""
+ b = event.current_buffer
+ if b.complete_state:
+ b.cancel_completion()
+ else:
+ b.reset()
+
+
+def reset_search_buffer(event):
+ """Reset search buffer"""
+ if event.current_buffer.document.text:
+ event.current_buffer.reset()
+ else:
+ event.app.layout.focus(DEFAULT_BUFFER)
+
+
+def suspend_to_bg(event):
+ """Suspend to background"""
+ event.app.suspend_to_background()
+
+
+def quit(event):
+ """
+ Quit application with ``SIGQUIT`` if supported or ``sys.exit`` otherwise.
+
+ On platforms that support SIGQUIT, send SIGQUIT to the current process.
+ On other platforms, just exit the process with a message.
+ """
+ sigquit = getattr(signal, "SIGQUIT", None)
+ if sigquit is not None:
+ os.kill(0, signal.SIGQUIT)
+ else:
+ sys.exit("Quit")
+
+
+def indent_buffer(event):
+ """Indent buffer"""
+ event.current_buffer.insert_text(" " * 4)
+
+
+def newline_autoindent(event):
+ """Insert a newline after the cursor indented appropriately.
+
+ Fancier version of former ``newline_with_copy_margin`` which should
+ compute the correct indentation of the inserted line. That is to say, indent
+ by 4 extra space after a function definition, class definition, context
+ manager... And dedent by 4 space after ``pass``, ``return``, ``raise ...``.
+ """
+ shell = get_ipython()
+ inputsplitter = shell.input_transformer_manager
+ b = event.current_buffer
+ d = b.document
+
+ if b.complete_state:
+ b.cancel_completion()
+ text = d.text[: d.cursor_position] + "\n"
+ _, indent = inputsplitter.check_complete(text)
+ b.insert_text("\n" + (" " * (indent or 0)), move_cursor=False)
+
+
+def open_input_in_editor(event):
+ """Open code from input in external editor"""
+ event.app.current_buffer.open_in_editor()
+
+
+if sys.platform == "win32":
+ from IPython.core.error import TryNext
+ from IPython.lib.clipboard import (
+ ClipboardEmpty,
+ tkinter_clipboard_get,
+ win32_clipboard_get,
+ )
+
+ @undoc
+ def win_paste(event):
+ try:
+ text = win32_clipboard_get()
+ except TryNext:
+ try:
+ text = tkinter_clipboard_get()
+ except (TryNext, ClipboardEmpty):
+ return
+ except ClipboardEmpty:
+ return
+ event.current_buffer.insert_text(text.replace("\t", " " * 4))
+
+else:
+
+ @undoc
+ def win_paste(event):
+ """Stub used on other platforms"""
+ pass
+
+
+KEY_BINDINGS = [
+ Binding(
+ handle_return_or_newline_or_execute,
+ ["enter"],
+ "default_buffer_focused & ~has_selection & insert_mode",
+ ),
+ Binding(
+ reformat_and_execute,
+ ["escape", "enter"],
+ "default_buffer_focused & ~has_selection & insert_mode & ebivim",
+ ),
+ Binding(quit, ["c-\\"]),
+ Binding(
+ previous_history_or_previous_completion,
+ ["c-p"],
+ "vi_insert_mode & default_buffer_focused",
+ ),
+ Binding(
+ next_history_or_next_completion,
+ ["c-n"],
+ "vi_insert_mode & default_buffer_focused",
+ ),
+ Binding(dismiss_completion, ["c-g"], "default_buffer_focused & has_completions"),
+ Binding(reset_buffer, ["c-c"], "default_buffer_focused"),
+ Binding(reset_search_buffer, ["c-c"], "search_buffer_focused"),
+ Binding(suspend_to_bg, ["c-z"], "supports_suspend"),
+ Binding(
+ indent_buffer,
+ ["tab"], # Ctrl+I == Tab
+ "default_buffer_focused"
+ " & ~has_selection"
+ " & insert_mode"
+ " & cursor_in_leading_ws",
+ ),
+ Binding(newline_autoindent, ["c-o"], "default_buffer_focused & emacs_insert_mode"),
+ Binding(open_input_in_editor, ["f2"], "default_buffer_focused"),
+ *AUTO_MATCH_BINDINGS,
+ *AUTO_SUGGEST_BINDINGS,
+ Binding(
+ display_completions_like_readline,
+ ["c-i"],
+ "readline_like_completions"
+ " & default_buffer_focused"
+ " & ~has_selection"
+ " & insert_mode"
+ " & ~cursor_in_leading_ws",
+ ),
+ Binding(win_paste, ["c-v"], "default_buffer_focused & ~vi_mode & is_windows_os"),
+ *SIMPLE_CONTROL_BINDINGS,
+ *ALT_AND_COMOBO_CONTROL_BINDINGS,
+]
diff --git a/contrib/python/ipython/py3/IPython/terminal/shortcuts/auto_match.py b/contrib/python/ipython/py3/IPython/terminal/shortcuts/auto_match.py
new file mode 100644
index 0000000000..6c2b1ef70c
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/terminal/shortcuts/auto_match.py
@@ -0,0 +1,104 @@
+"""
+Utilities function for keybinding with prompt toolkit.
+
+This will be bound to specific key press and filter modes,
+like whether we are in edit mode, and whether the completer is open.
+"""
+import re
+from prompt_toolkit.key_binding import KeyPressEvent
+
+
+def parenthesis(event: KeyPressEvent):
+ """Auto-close parenthesis"""
+ event.current_buffer.insert_text("()")
+ event.current_buffer.cursor_left()
+
+
+def brackets(event: KeyPressEvent):
+ """Auto-close brackets"""
+ event.current_buffer.insert_text("[]")
+ event.current_buffer.cursor_left()
+
+
+def braces(event: KeyPressEvent):
+ """Auto-close braces"""
+ event.current_buffer.insert_text("{}")
+ event.current_buffer.cursor_left()
+
+
+def double_quote(event: KeyPressEvent):
+ """Auto-close double quotes"""
+ event.current_buffer.insert_text('""')
+ event.current_buffer.cursor_left()
+
+
+def single_quote(event: KeyPressEvent):
+ """Auto-close single quotes"""
+ event.current_buffer.insert_text("''")
+ event.current_buffer.cursor_left()
+
+
+def docstring_double_quotes(event: KeyPressEvent):
+ """Auto-close docstring (double quotes)"""
+ event.current_buffer.insert_text('""""')
+ event.current_buffer.cursor_left(3)
+
+
+def docstring_single_quotes(event: KeyPressEvent):
+ """Auto-close docstring (single quotes)"""
+ event.current_buffer.insert_text("''''")
+ event.current_buffer.cursor_left(3)
+
+
+def raw_string_parenthesis(event: KeyPressEvent):
+ """Auto-close parenthesis in raw strings"""
+ matches = re.match(
+ r".*(r|R)[\"'](-*)",
+ event.current_buffer.document.current_line_before_cursor,
+ )
+ dashes = matches.group(2) if matches else ""
+ event.current_buffer.insert_text("()" + dashes)
+ event.current_buffer.cursor_left(len(dashes) + 1)
+
+
+def raw_string_bracket(event: KeyPressEvent):
+ """Auto-close bracker in raw strings"""
+ matches = re.match(
+ r".*(r|R)[\"'](-*)",
+ event.current_buffer.document.current_line_before_cursor,
+ )
+ dashes = matches.group(2) if matches else ""
+ event.current_buffer.insert_text("[]" + dashes)
+ event.current_buffer.cursor_left(len(dashes) + 1)
+
+
+def raw_string_braces(event: KeyPressEvent):
+ """Auto-close braces in raw strings"""
+ matches = re.match(
+ r".*(r|R)[\"'](-*)",
+ event.current_buffer.document.current_line_before_cursor,
+ )
+ dashes = matches.group(2) if matches else ""
+ event.current_buffer.insert_text("{}" + dashes)
+ event.current_buffer.cursor_left(len(dashes) + 1)
+
+
+def skip_over(event: KeyPressEvent):
+ """Skip over automatically added parenthesis/quote.
+
+ (rather than adding another parenthesis/quote)"""
+ event.current_buffer.cursor_right()
+
+
+def delete_pair(event: KeyPressEvent):
+ """Delete auto-closed parenthesis"""
+ event.current_buffer.delete()
+ event.current_buffer.delete_before_cursor()
+
+
+auto_match_parens = {"(": parenthesis, "[": brackets, "{": braces}
+auto_match_parens_raw_string = {
+ "(": raw_string_parenthesis,
+ "[": raw_string_bracket,
+ "{": raw_string_braces,
+}
diff --git a/contrib/python/ipython/py3/IPython/terminal/shortcuts/auto_suggest.py b/contrib/python/ipython/py3/IPython/terminal/shortcuts/auto_suggest.py
new file mode 100644
index 0000000000..65f91577ce
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/terminal/shortcuts/auto_suggest.py
@@ -0,0 +1,401 @@
+import re
+import tokenize
+from io import StringIO
+from typing import Callable, List, Optional, Union, Generator, Tuple
+import warnings
+
+from prompt_toolkit.buffer import Buffer
+from prompt_toolkit.key_binding import KeyPressEvent
+from prompt_toolkit.key_binding.bindings import named_commands as nc
+from prompt_toolkit.auto_suggest import AutoSuggestFromHistory, Suggestion
+from prompt_toolkit.document import Document
+from prompt_toolkit.history import History
+from prompt_toolkit.shortcuts import PromptSession
+from prompt_toolkit.layout.processors import (
+ Processor,
+ Transformation,
+ TransformationInput,
+)
+
+from IPython.core.getipython import get_ipython
+from IPython.utils.tokenutil import generate_tokens
+
+from .filters import pass_through
+
+
+def _get_query(document: Document):
+ return document.lines[document.cursor_position_row]
+
+
+class AppendAutoSuggestionInAnyLine(Processor):
+ """
+ Append the auto suggestion to lines other than the last (appending to the
+ last line is natively supported by the prompt toolkit).
+ """
+
+ def __init__(self, style: str = "class:auto-suggestion") -> None:
+ self.style = style
+
+ def apply_transformation(self, ti: TransformationInput) -> Transformation:
+ is_last_line = ti.lineno == ti.document.line_count - 1
+ is_active_line = ti.lineno == ti.document.cursor_position_row
+
+ if not is_last_line and is_active_line:
+ buffer = ti.buffer_control.buffer
+
+ if buffer.suggestion and ti.document.is_cursor_at_the_end_of_line:
+ suggestion = buffer.suggestion.text
+ else:
+ suggestion = ""
+
+ return Transformation(fragments=ti.fragments + [(self.style, suggestion)])
+ else:
+ return Transformation(fragments=ti.fragments)
+
+
+class NavigableAutoSuggestFromHistory(AutoSuggestFromHistory):
+ """
+ A subclass of AutoSuggestFromHistory that allow navigation to next/previous
+ suggestion from history. To do so it remembers the current position, but it
+ state need to carefully be cleared on the right events.
+ """
+
+ def __init__(
+ self,
+ ):
+ self.skip_lines = 0
+ self._connected_apps = []
+
+ def reset_history_position(self, _: Buffer):
+ self.skip_lines = 0
+
+ def disconnect(self):
+ for pt_app in self._connected_apps:
+ text_insert_event = pt_app.default_buffer.on_text_insert
+ text_insert_event.remove_handler(self.reset_history_position)
+
+ def connect(self, pt_app: PromptSession):
+ self._connected_apps.append(pt_app)
+ # note: `on_text_changed` could be used for a bit different behaviour
+ # on character deletion (i.e. reseting history position on backspace)
+ pt_app.default_buffer.on_text_insert.add_handler(self.reset_history_position)
+ pt_app.default_buffer.on_cursor_position_changed.add_handler(self._dismiss)
+
+ def get_suggestion(
+ self, buffer: Buffer, document: Document
+ ) -> Optional[Suggestion]:
+ text = _get_query(document)
+
+ if text.strip():
+ for suggestion, _ in self._find_next_match(
+ text, self.skip_lines, buffer.history
+ ):
+ return Suggestion(suggestion)
+
+ return None
+
+ def _dismiss(self, buffer, *args, **kwargs):
+ buffer.suggestion = None
+
+ def _find_match(
+ self, text: str, skip_lines: float, history: History, previous: bool
+ ) -> Generator[Tuple[str, float], None, None]:
+ """
+ text : str
+ Text content to find a match for, the user cursor is most of the
+ time at the end of this text.
+ skip_lines : float
+ number of items to skip in the search, this is used to indicate how
+ far in the list the user has navigated by pressing up or down.
+ The float type is used as the base value is +inf
+ history : History
+ prompt_toolkit History instance to fetch previous entries from.
+ previous : bool
+ Direction of the search, whether we are looking previous match
+ (True), or next match (False).
+
+ Yields
+ ------
+ Tuple with:
+ str:
+ current suggestion.
+ float:
+ will actually yield only ints, which is passed back via skip_lines,
+ which may be a +inf (float)
+
+
+ """
+ line_number = -1
+ for string in reversed(list(history.get_strings())):
+ for line in reversed(string.splitlines()):
+ line_number += 1
+ if not previous and line_number < skip_lines:
+ continue
+ # do not return empty suggestions as these
+ # close the auto-suggestion overlay (and are useless)
+ if line.startswith(text) and len(line) > len(text):
+ yield line[len(text) :], line_number
+ if previous and line_number >= skip_lines:
+ return
+
+ def _find_next_match(
+ self, text: str, skip_lines: float, history: History
+ ) -> Generator[Tuple[str, float], None, None]:
+ return self._find_match(text, skip_lines, history, previous=False)
+
+ def _find_previous_match(self, text: str, skip_lines: float, history: History):
+ return reversed(
+ list(self._find_match(text, skip_lines, history, previous=True))
+ )
+
+ def up(self, query: str, other_than: str, history: History) -> None:
+ for suggestion, line_number in self._find_next_match(
+ query, self.skip_lines, history
+ ):
+ # if user has history ['very.a', 'very', 'very.b'] and typed 'very'
+ # we want to switch from 'very.b' to 'very.a' because a) if the
+ # suggestion equals current text, prompt-toolkit aborts suggesting
+ # b) user likely would not be interested in 'very' anyways (they
+ # already typed it).
+ if query + suggestion != other_than:
+ self.skip_lines = line_number
+ break
+ else:
+ # no matches found, cycle back to beginning
+ self.skip_lines = 0
+
+ def down(self, query: str, other_than: str, history: History) -> None:
+ for suggestion, line_number in self._find_previous_match(
+ query, self.skip_lines, history
+ ):
+ if query + suggestion != other_than:
+ self.skip_lines = line_number
+ break
+ else:
+ # no matches found, cycle to end
+ for suggestion, line_number in self._find_previous_match(
+ query, float("Inf"), history
+ ):
+ if query + suggestion != other_than:
+ self.skip_lines = line_number
+ break
+
+
+def accept_or_jump_to_end(event: KeyPressEvent):
+ """Apply autosuggestion or jump to end of line."""
+ buffer = event.current_buffer
+ d = buffer.document
+ after_cursor = d.text[d.cursor_position :]
+ lines = after_cursor.split("\n")
+ end_of_current_line = lines[0].strip()
+ suggestion = buffer.suggestion
+ if (suggestion is not None) and (suggestion.text) and (end_of_current_line == ""):
+ buffer.insert_text(suggestion.text)
+ else:
+ nc.end_of_line(event)
+
+
+def _deprected_accept_in_vi_insert_mode(event: KeyPressEvent):
+ """Accept autosuggestion or jump to end of line.
+
+ .. deprecated:: 8.12
+ Use `accept_or_jump_to_end` instead.
+ """
+ return accept_or_jump_to_end(event)
+
+
+def accept(event: KeyPressEvent):
+ """Accept autosuggestion"""
+ buffer = event.current_buffer
+ suggestion = buffer.suggestion
+ if suggestion:
+ buffer.insert_text(suggestion.text)
+ else:
+ nc.forward_char(event)
+
+
+def discard(event: KeyPressEvent):
+ """Discard autosuggestion"""
+ buffer = event.current_buffer
+ buffer.suggestion = None
+
+
+def accept_word(event: KeyPressEvent):
+ """Fill partial autosuggestion by word"""
+ buffer = event.current_buffer
+ suggestion = buffer.suggestion
+ if suggestion:
+ t = re.split(r"(\S+\s+)", suggestion.text)
+ buffer.insert_text(next((x for x in t if x), ""))
+ else:
+ nc.forward_word(event)
+
+
+def accept_character(event: KeyPressEvent):
+ """Fill partial autosuggestion by character"""
+ b = event.current_buffer
+ suggestion = b.suggestion
+ if suggestion and suggestion.text:
+ b.insert_text(suggestion.text[0])
+
+
+def accept_and_keep_cursor(event: KeyPressEvent):
+ """Accept autosuggestion and keep cursor in place"""
+ buffer = event.current_buffer
+ old_position = buffer.cursor_position
+ suggestion = buffer.suggestion
+ if suggestion:
+ buffer.insert_text(suggestion.text)
+ buffer.cursor_position = old_position
+
+
+def accept_and_move_cursor_left(event: KeyPressEvent):
+ """Accept autosuggestion and move cursor left in place"""
+ accept_and_keep_cursor(event)
+ nc.backward_char(event)
+
+
+def _update_hint(buffer: Buffer):
+ if buffer.auto_suggest:
+ suggestion = buffer.auto_suggest.get_suggestion(buffer, buffer.document)
+ buffer.suggestion = suggestion
+
+
+def backspace_and_resume_hint(event: KeyPressEvent):
+ """Resume autosuggestions after deleting last character"""
+ nc.backward_delete_char(event)
+ _update_hint(event.current_buffer)
+
+
+def resume_hinting(event: KeyPressEvent):
+ """Resume autosuggestions"""
+ pass_through.reply(event)
+ # Order matters: if update happened first and event reply second, the
+ # suggestion would be auto-accepted if both actions are bound to same key.
+ _update_hint(event.current_buffer)
+
+
+def up_and_update_hint(event: KeyPressEvent):
+ """Go up and update hint"""
+ current_buffer = event.current_buffer
+
+ current_buffer.auto_up(count=event.arg)
+ _update_hint(current_buffer)
+
+
+def down_and_update_hint(event: KeyPressEvent):
+ """Go down and update hint"""
+ current_buffer = event.current_buffer
+
+ current_buffer.auto_down(count=event.arg)
+ _update_hint(current_buffer)
+
+
+def accept_token(event: KeyPressEvent):
+ """Fill partial autosuggestion by token"""
+ b = event.current_buffer
+ suggestion = b.suggestion
+
+ if suggestion:
+ prefix = _get_query(b.document)
+ text = prefix + suggestion.text
+
+ tokens: List[Optional[str]] = [None, None, None]
+ substrings = [""]
+ i = 0
+
+ for token in generate_tokens(StringIO(text).readline):
+ if token.type == tokenize.NEWLINE:
+ index = len(text)
+ else:
+ index = text.index(token[1], len(substrings[-1]))
+ substrings.append(text[:index])
+ tokenized_so_far = substrings[-1]
+ if tokenized_so_far.startswith(prefix):
+ if i == 0 and len(tokenized_so_far) > len(prefix):
+ tokens[0] = tokenized_so_far[len(prefix) :]
+ substrings.append(tokenized_so_far)
+ i += 1
+ tokens[i] = token[1]
+ if i == 2:
+ break
+ i += 1
+
+ if tokens[0]:
+ to_insert: str
+ insert_text = substrings[-2]
+ if tokens[1] and len(tokens[1]) == 1:
+ insert_text = substrings[-1]
+ to_insert = insert_text[len(prefix) :]
+ b.insert_text(to_insert)
+ return
+
+ nc.forward_word(event)
+
+
+Provider = Union[AutoSuggestFromHistory, NavigableAutoSuggestFromHistory, None]
+
+
+def _swap_autosuggestion(
+ buffer: Buffer,
+ provider: NavigableAutoSuggestFromHistory,
+ direction_method: Callable,
+):
+ """
+ We skip most recent history entry (in either direction) if it equals the
+ current autosuggestion because if user cycles when auto-suggestion is shown
+ they most likely want something else than what was suggested (otherwise
+ they would have accepted the suggestion).
+ """
+ suggestion = buffer.suggestion
+ if not suggestion:
+ return
+
+ query = _get_query(buffer.document)
+ current = query + suggestion.text
+
+ direction_method(query=query, other_than=current, history=buffer.history)
+
+ new_suggestion = provider.get_suggestion(buffer, buffer.document)
+ buffer.suggestion = new_suggestion
+
+
+def swap_autosuggestion_up(event: KeyPressEvent):
+ """Get next autosuggestion from history."""
+ shell = get_ipython()
+ provider = shell.auto_suggest
+
+ if not isinstance(provider, NavigableAutoSuggestFromHistory):
+ return
+
+ return _swap_autosuggestion(
+ buffer=event.current_buffer, provider=provider, direction_method=provider.up
+ )
+
+
+def swap_autosuggestion_down(event: KeyPressEvent):
+ """Get previous autosuggestion from history."""
+ shell = get_ipython()
+ provider = shell.auto_suggest
+
+ if not isinstance(provider, NavigableAutoSuggestFromHistory):
+ return
+
+ return _swap_autosuggestion(
+ buffer=event.current_buffer,
+ provider=provider,
+ direction_method=provider.down,
+ )
+
+
+def __getattr__(key):
+ if key == "accept_in_vi_insert_mode":
+ warnings.warn(
+ "`accept_in_vi_insert_mode` is deprecated since IPython 8.12 and "
+ "renamed to `accept_or_jump_to_end`. Please update your configuration "
+ "accordingly",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ return _deprected_accept_in_vi_insert_mode
+ raise AttributeError
diff --git a/contrib/python/ipython/py3/IPython/terminal/shortcuts/filters.py b/contrib/python/ipython/py3/IPython/terminal/shortcuts/filters.py
new file mode 100644
index 0000000000..7c9d6a9c41
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/terminal/shortcuts/filters.py
@@ -0,0 +1,322 @@
+"""
+Filters restricting scope of IPython Terminal shortcuts.
+"""
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+import ast
+import re
+import signal
+import sys
+from typing import Callable, Dict, Union
+
+from prompt_toolkit.application.current import get_app
+from prompt_toolkit.enums import DEFAULT_BUFFER, SEARCH_BUFFER
+from prompt_toolkit.key_binding import KeyPressEvent
+from prompt_toolkit.filters import Condition, Filter, emacs_insert_mode, has_completions
+from prompt_toolkit.filters import has_focus as has_focus_impl
+from prompt_toolkit.filters import (
+ Always,
+ Never,
+ has_selection,
+ has_suggestion,
+ vi_insert_mode,
+ vi_mode,
+)
+from prompt_toolkit.layout.layout import FocusableElement
+
+from IPython.core.getipython import get_ipython
+from IPython.core.guarded_eval import _find_dunder, BINARY_OP_DUNDERS, UNARY_OP_DUNDERS
+from IPython.terminal.shortcuts import auto_suggest
+from IPython.utils.decorators import undoc
+
+
+@undoc
+@Condition
+def cursor_in_leading_ws():
+ before = get_app().current_buffer.document.current_line_before_cursor
+ return (not before) or before.isspace()
+
+
+def has_focus(value: FocusableElement):
+ """Wrapper around has_focus adding a nice `__name__` to tester function"""
+ tester = has_focus_impl(value).func
+ tester.__name__ = f"is_focused({value})"
+ return Condition(tester)
+
+
+@undoc
+@Condition
+def has_line_below() -> bool:
+ document = get_app().current_buffer.document
+ return document.cursor_position_row < len(document.lines) - 1
+
+
+@undoc
+@Condition
+def is_cursor_at_the_end_of_line() -> bool:
+ document = get_app().current_buffer.document
+ return document.is_cursor_at_the_end_of_line
+
+
+@undoc
+@Condition
+def has_line_above() -> bool:
+ document = get_app().current_buffer.document
+ return document.cursor_position_row != 0
+
+
+@Condition
+def ebivim():
+ shell = get_ipython()
+ return shell.emacs_bindings_in_vi_insert_mode
+
+
+@Condition
+def supports_suspend():
+ return hasattr(signal, "SIGTSTP")
+
+
+@Condition
+def auto_match():
+ shell = get_ipython()
+ return shell.auto_match
+
+
+def all_quotes_paired(quote, buf):
+ paired = True
+ i = 0
+ while i < len(buf):
+ c = buf[i]
+ if c == quote:
+ paired = not paired
+ elif c == "\\":
+ i += 1
+ i += 1
+ return paired
+
+
+_preceding_text_cache: Dict[Union[str, Callable], Condition] = {}
+_following_text_cache: Dict[Union[str, Callable], Condition] = {}
+
+
+def preceding_text(pattern: Union[str, Callable]):
+ if pattern in _preceding_text_cache:
+ return _preceding_text_cache[pattern]
+
+ if callable(pattern):
+
+ def _preceding_text():
+ app = get_app()
+ before_cursor = app.current_buffer.document.current_line_before_cursor
+ # mypy can't infer if(callable): https://github.com/python/mypy/issues/3603
+ return bool(pattern(before_cursor)) # type: ignore[operator]
+
+ else:
+ m = re.compile(pattern)
+
+ def _preceding_text():
+ app = get_app()
+ before_cursor = app.current_buffer.document.current_line_before_cursor
+ return bool(m.match(before_cursor))
+
+ _preceding_text.__name__ = f"preceding_text({pattern!r})"
+
+ condition = Condition(_preceding_text)
+ _preceding_text_cache[pattern] = condition
+ return condition
+
+
+def following_text(pattern):
+ try:
+ return _following_text_cache[pattern]
+ except KeyError:
+ pass
+ m = re.compile(pattern)
+
+ def _following_text():
+ app = get_app()
+ return bool(m.match(app.current_buffer.document.current_line_after_cursor))
+
+ _following_text.__name__ = f"following_text({pattern!r})"
+
+ condition = Condition(_following_text)
+ _following_text_cache[pattern] = condition
+ return condition
+
+
+@Condition
+def not_inside_unclosed_string():
+ app = get_app()
+ s = app.current_buffer.document.text_before_cursor
+ # remove escaped quotes
+ s = s.replace('\\"', "").replace("\\'", "")
+ # remove triple-quoted string literals
+ s = re.sub(r"(?:\"\"\"[\s\S]*\"\"\"|'''[\s\S]*''')", "", s)
+ # remove single-quoted string literals
+ s = re.sub(r"""(?:"[^"]*["\n]|'[^']*['\n])""", "", s)
+ return not ('"' in s or "'" in s)
+
+
+@Condition
+def navigable_suggestions():
+ shell = get_ipython()
+ return isinstance(shell.auto_suggest, auto_suggest.NavigableAutoSuggestFromHistory)
+
+
+@Condition
+def readline_like_completions():
+ shell = get_ipython()
+ return shell.display_completions == "readlinelike"
+
+
+@Condition
+def is_windows_os():
+ return sys.platform == "win32"
+
+
+class PassThrough(Filter):
+ """A filter allowing to implement pass-through behaviour of keybindings.
+
+ Prompt toolkit key processor dispatches only one event per binding match,
+ which means that adding a new shortcut will suppress the old shortcut
+ if the keybindings are the same (unless one is filtered out).
+
+ To stop a shortcut binding from suppressing other shortcuts:
+ - add the `pass_through` filter to list of filter, and
+ - call `pass_through.reply(event)` in the shortcut handler.
+ """
+
+ def __init__(self):
+ self._is_replying = False
+
+ def reply(self, event: KeyPressEvent):
+ self._is_replying = True
+ try:
+ event.key_processor.reset()
+ event.key_processor.feed_multiple(event.key_sequence)
+ event.key_processor.process_keys()
+ finally:
+ self._is_replying = False
+
+ def __call__(self):
+ return not self._is_replying
+
+
+pass_through = PassThrough()
+
+# these one is callable and re-used multiple times hence needs to be
+# only defined once beforhand so that transforming back to human-readable
+# names works well in the documentation.
+default_buffer_focused = has_focus(DEFAULT_BUFFER)
+
+KEYBINDING_FILTERS = {
+ "always": Always(),
+ # never is used for exposing commands which have no default keybindings
+ "never": Never(),
+ "has_line_below": has_line_below,
+ "has_line_above": has_line_above,
+ "is_cursor_at_the_end_of_line": is_cursor_at_the_end_of_line,
+ "has_selection": has_selection,
+ "has_suggestion": has_suggestion,
+ "vi_mode": vi_mode,
+ "vi_insert_mode": vi_insert_mode,
+ "emacs_insert_mode": emacs_insert_mode,
+ # https://github.com/ipython/ipython/pull/12603 argued for inclusion of
+ # emacs key bindings with a configurable `emacs_bindings_in_vi_insert_mode`
+ # toggle; when the toggle is on user can access keybindigns like `ctrl + e`
+ # in vi insert mode. Because some of the emacs bindings involve `escape`
+ # followed by another key, e.g. `escape` followed by `f`, prompt-toolkit
+ # needs to wait to see if there will be another character typed in before
+ # executing pure `escape` keybinding; in vi insert mode `escape` switches to
+ # command mode which is common and performance critical action for vi users.
+ # To avoid the delay users employ a workaround:
+ # https://github.com/ipython/ipython/issues/13443#issuecomment-1032753703
+ # which involves switching `emacs_bindings_in_vi_insert_mode` off.
+ #
+ # For the workaround to work:
+ # 1) end users need to toggle `emacs_bindings_in_vi_insert_mode` off
+ # 2) all keybindings which would involve `escape` need to respect that
+ # toggle by including either:
+ # - `vi_insert_mode & ebivim` for actions which have emacs keybindings
+ # predefined upstream in prompt-toolkit, or
+ # - `emacs_like_insert_mode` for actions which do not have existing
+ # emacs keybindings predefined upstream (or need overriding of the
+ # upstream bindings to modify behaviour), defined below.
+ "emacs_like_insert_mode": (vi_insert_mode & ebivim) | emacs_insert_mode,
+ "has_completions": has_completions,
+ "insert_mode": vi_insert_mode | emacs_insert_mode,
+ "default_buffer_focused": default_buffer_focused,
+ "search_buffer_focused": has_focus(SEARCH_BUFFER),
+ # `ebivim` stands for emacs bindings in vi insert mode
+ "ebivim": ebivim,
+ "supports_suspend": supports_suspend,
+ "is_windows_os": is_windows_os,
+ "auto_match": auto_match,
+ "focused_insert": (vi_insert_mode | emacs_insert_mode) & default_buffer_focused,
+ "not_inside_unclosed_string": not_inside_unclosed_string,
+ "readline_like_completions": readline_like_completions,
+ "preceded_by_paired_double_quotes": preceding_text(
+ lambda line: all_quotes_paired('"', line)
+ ),
+ "preceded_by_paired_single_quotes": preceding_text(
+ lambda line: all_quotes_paired("'", line)
+ ),
+ "preceded_by_raw_str_prefix": preceding_text(r".*(r|R)[\"'](-*)$"),
+ "preceded_by_two_double_quotes": preceding_text(r'^.*""$'),
+ "preceded_by_two_single_quotes": preceding_text(r"^.*''$"),
+ "followed_by_closing_paren_or_end": following_text(r"[,)}\]]|$"),
+ "preceded_by_opening_round_paren": preceding_text(r".*\($"),
+ "preceded_by_opening_bracket": preceding_text(r".*\[$"),
+ "preceded_by_opening_brace": preceding_text(r".*\{$"),
+ "preceded_by_double_quote": preceding_text('.*"$'),
+ "preceded_by_single_quote": preceding_text(r".*'$"),
+ "followed_by_closing_round_paren": following_text(r"^\)"),
+ "followed_by_closing_bracket": following_text(r"^\]"),
+ "followed_by_closing_brace": following_text(r"^\}"),
+ "followed_by_double_quote": following_text('^"'),
+ "followed_by_single_quote": following_text("^'"),
+ "navigable_suggestions": navigable_suggestions,
+ "cursor_in_leading_ws": cursor_in_leading_ws,
+ "pass_through": pass_through,
+}
+
+
+def eval_node(node: Union[ast.AST, None]):
+ if node is None:
+ return None
+ if isinstance(node, ast.Expression):
+ return eval_node(node.body)
+ if isinstance(node, ast.BinOp):
+ left = eval_node(node.left)
+ right = eval_node(node.right)
+ dunders = _find_dunder(node.op, BINARY_OP_DUNDERS)
+ if dunders:
+ return getattr(left, dunders[0])(right)
+ raise ValueError(f"Unknown binary operation: {node.op}")
+ if isinstance(node, ast.UnaryOp):
+ value = eval_node(node.operand)
+ dunders = _find_dunder(node.op, UNARY_OP_DUNDERS)
+ if dunders:
+ return getattr(value, dunders[0])()
+ raise ValueError(f"Unknown unary operation: {node.op}")
+ if isinstance(node, ast.Name):
+ if node.id in KEYBINDING_FILTERS:
+ return KEYBINDING_FILTERS[node.id]
+ else:
+ sep = "\n - "
+ known_filters = sep.join(sorted(KEYBINDING_FILTERS))
+ raise NameError(
+ f"{node.id} is not a known shortcut filter."
+ f" Known filters are: {sep}{known_filters}."
+ )
+ raise ValueError("Unhandled node", ast.dump(node))
+
+
+def filter_from_string(code: str):
+ expression = ast.parse(code, mode="eval")
+ return eval_node(expression)
+
+
+__all__ = ["KEYBINDING_FILTERS", "filter_from_string"]
diff --git a/contrib/python/ipython/py3/IPython/testing/__init__.py b/contrib/python/ipython/py3/IPython/testing/__init__.py
new file mode 100644
index 0000000000..8fcd65ea41
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/testing/__init__.py
@@ -0,0 +1,20 @@
+"""Testing support (tools to test IPython itself).
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (C) 2009-2011 The IPython Development Team
+#
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#-----------------------------------------------------------------------------
+
+
+import os
+
+#-----------------------------------------------------------------------------
+# Constants
+#-----------------------------------------------------------------------------
+
+# We scale all timeouts via this factor, slow machines can increase it
+IPYTHON_TESTING_TIMEOUT_SCALE = float(os.getenv(
+ 'IPYTHON_TESTING_TIMEOUT_SCALE', 1))
diff --git a/contrib/python/ipython/py3/IPython/testing/decorators.py b/contrib/python/ipython/py3/IPython/testing/decorators.py
new file mode 100644
index 0000000000..af42f349d5
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/testing/decorators.py
@@ -0,0 +1,201 @@
+# -*- coding: utf-8 -*-
+"""Decorators for labeling test objects.
+
+Decorators that merely return a modified version of the original function
+object are straightforward. Decorators that return a new function object need
+to use nose.tools.make_decorator(original_function)(decorator) in returning the
+decorator, in order to preserve metadata such as function name, setup and
+teardown functions and so on - see nose.tools for more information.
+
+This module provides a set of useful decorators meant to be ready to use in
+your own tests. See the bottom of the file for the ready-made ones, and if you
+find yourself writing a new one that may be of generic use, add it here.
+
+Included decorators:
+
+
+Lightweight testing that remains unittest-compatible.
+
+- An @as_unittest decorator can be used to tag any normal parameter-less
+ function as a unittest TestCase. Then, both nose and normal unittest will
+ recognize it as such. This will make it easier to migrate away from Nose if
+ we ever need/want to while maintaining very lightweight tests.
+
+NOTE: This file contains IPython-specific decorators. Using the machinery in
+IPython.external.decorators, we import either numpy.testing.decorators if numpy is
+available, OR use equivalent code in IPython.external._decorators, which
+we've copied verbatim from numpy.
+
+"""
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+import os
+import shutil
+import sys
+import tempfile
+import unittest
+from importlib import import_module
+
+from decorator import decorator
+
+# Expose the unittest-driven decorators
+from .ipunittest import ipdoctest, ipdocstring
+
+#-----------------------------------------------------------------------------
+# Classes and functions
+#-----------------------------------------------------------------------------
+
+# Simple example of the basic idea
+def as_unittest(func):
+ """Decorator to make a simple function into a normal test via unittest."""
+ class Tester(unittest.TestCase):
+ def test(self):
+ func()
+
+ Tester.__name__ = func.__name__
+
+ return Tester
+
+# Utility functions
+
+
+def skipif(skip_condition, msg=None):
+ """Make function raise SkipTest exception if skip_condition is true
+
+ Parameters
+ ----------
+
+ skip_condition : bool or callable
+ Flag to determine whether to skip test. If the condition is a
+ callable, it is used at runtime to dynamically make the decision. This
+ is useful for tests that may require costly imports, to delay the cost
+ until the test suite is actually executed.
+ msg : string
+ Message to give on raising a SkipTest exception.
+
+ Returns
+ -------
+ decorator : function
+ Decorator, which, when applied to a function, causes SkipTest
+ to be raised when the skip_condition was True, and the function
+ to be called normally otherwise.
+ """
+ if msg is None:
+ msg = "Test skipped due to test condition."
+
+ import pytest
+
+ assert isinstance(skip_condition, bool)
+ return pytest.mark.skipif(skip_condition, reason=msg)
+
+
+# A version with the condition set to true, common case just to attach a message
+# to a skip decorator
+def skip(msg=None):
+ """Decorator factory - mark a test function for skipping from test suite.
+
+ Parameters
+ ----------
+ msg : string
+ Optional message to be added.
+
+ Returns
+ -------
+ decorator : function
+ Decorator, which, when applied to a function, causes SkipTest
+ to be raised, with the optional message added.
+ """
+ if msg and not isinstance(msg, str):
+ raise ValueError('invalid object passed to `@skip` decorator, did you '
+ 'meant `@skip()` with brackets ?')
+ return skipif(True, msg)
+
+
+def onlyif(condition, msg):
+ """The reverse from skipif, see skipif for details."""
+
+ return skipif(not condition, msg)
+
+#-----------------------------------------------------------------------------
+# Utility functions for decorators
+def module_not_available(module):
+ """Can module be imported? Returns true if module does NOT import.
+
+ This is used to make a decorator to skip tests that require module to be
+ available, but delay the 'import numpy' to test execution time.
+ """
+ try:
+ mod = import_module(module)
+ mod_not_avail = False
+ except ImportError:
+ mod_not_avail = True
+
+ return mod_not_avail
+
+
+#-----------------------------------------------------------------------------
+# Decorators for public use
+
+# Decorators to skip certain tests on specific platforms.
+skip_win32 = skipif(sys.platform == 'win32',
+ "This test does not run under Windows")
+skip_linux = skipif(sys.platform.startswith('linux'),
+ "This test does not run under Linux")
+skip_osx = skipif(sys.platform == 'darwin',"This test does not run under OS X")
+
+
+# Decorators to skip tests if not on specific platforms.
+skip_if_not_win32 = skipif(sys.platform != 'win32',
+ "This test only runs under Windows")
+skip_if_not_linux = skipif(not sys.platform.startswith('linux'),
+ "This test only runs under Linux")
+
+_x11_skip_cond = (sys.platform not in ('darwin', 'win32') and
+ os.environ.get('DISPLAY', '') == '')
+_x11_skip_msg = "Skipped under *nix when X11/XOrg not available"
+
+skip_if_no_x11 = skipif(_x11_skip_cond, _x11_skip_msg)
+
+# Other skip decorators
+
+# generic skip without module
+skip_without = lambda mod: skipif(module_not_available(mod), "This test requires %s" % mod)
+
+skipif_not_numpy = skip_without('numpy')
+
+skipif_not_matplotlib = skip_without('matplotlib')
+
+# A null 'decorator', useful to make more readable code that needs to pick
+# between different decorators based on OS or other conditions
+null_deco = lambda f: f
+
+# Some tests only run where we can use unicode paths. Note that we can't just
+# check os.path.supports_unicode_filenames, which is always False on Linux.
+try:
+ f = tempfile.NamedTemporaryFile(prefix=u"tmp€")
+except UnicodeEncodeError:
+ unicode_paths = False
+else:
+ unicode_paths = True
+ f.close()
+
+onlyif_unicode_paths = onlyif(unicode_paths, ("This test is only applicable "
+ "where we can use unicode in filenames."))
+
+
+def onlyif_cmds_exist(*commands):
+ """
+ Decorator to skip test when at least one of `commands` is not found.
+ """
+ assert (
+ os.environ.get("IPTEST_WORKING_DIR", None) is None
+ ), "iptest deprecated since IPython 8.0"
+ for cmd in commands:
+ reason = f"This test runs only if command '{cmd}' is installed"
+ if not shutil.which(cmd):
+ import pytest
+
+ return pytest.mark.skip(reason=reason)
+ return null_deco
diff --git a/contrib/python/ipython/py3/IPython/testing/globalipapp.py b/contrib/python/ipython/py3/IPython/testing/globalipapp.py
new file mode 100644
index 0000000000..3a699e07d6
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/testing/globalipapp.py
@@ -0,0 +1,114 @@
+"""Global IPython app to support test running.
+
+We must start our own ipython object and heavily muck with it so that all the
+modifications IPython makes to system behavior don't send the doctest machinery
+into a fit. This code should be considered a gross hack, but it gets the job
+done.
+"""
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+import builtins as builtin_mod
+import sys
+import types
+
+from pathlib import Path
+
+from . import tools
+
+from IPython.core import page
+from IPython.utils import io
+from IPython.terminal.interactiveshell import TerminalInteractiveShell
+
+
+def get_ipython():
+ # This will get replaced by the real thing once we start IPython below
+ return start_ipython()
+
+
+# A couple of methods to override those in the running IPython to interact
+# better with doctest (doctest captures on raw stdout, so we need to direct
+# various types of output there otherwise it will miss them).
+
+def xsys(self, cmd):
+ """Replace the default system call with a capturing one for doctest.
+ """
+ # We use getoutput, but we need to strip it because pexpect captures
+ # the trailing newline differently from commands.getoutput
+ print(self.getoutput(cmd, split=False, depth=1).rstrip(), end='', file=sys.stdout)
+ sys.stdout.flush()
+
+
+def _showtraceback(self, etype, evalue, stb):
+ """Print the traceback purely on stdout for doctest to capture it.
+ """
+ print(self.InteractiveTB.stb2text(stb), file=sys.stdout)
+
+
+def start_ipython():
+ """Start a global IPython shell, which we need for IPython-specific syntax.
+ """
+ global get_ipython
+
+ # This function should only ever run once!
+ if hasattr(start_ipython, 'already_called'):
+ return
+ start_ipython.already_called = True
+
+ # Store certain global objects that IPython modifies
+ _displayhook = sys.displayhook
+ _excepthook = sys.excepthook
+ _main = sys.modules.get('__main__')
+
+ # Create custom argv and namespaces for our IPython to be test-friendly
+ config = tools.default_config()
+ config.TerminalInteractiveShell.simple_prompt = True
+
+ # Create and initialize our test-friendly IPython instance.
+ shell = TerminalInteractiveShell.instance(config=config,
+ )
+
+ # A few more tweaks needed for playing nicely with doctests...
+
+ # remove history file
+ shell.tempfiles.append(Path(config.HistoryManager.hist_file))
+
+ # These traps are normally only active for interactive use, set them
+ # permanently since we'll be mocking interactive sessions.
+ shell.builtin_trap.activate()
+
+ # Modify the IPython system call with one that uses getoutput, so that we
+ # can capture subcommands and print them to Python's stdout, otherwise the
+ # doctest machinery would miss them.
+ shell.system = types.MethodType(xsys, shell)
+
+ shell._showtraceback = types.MethodType(_showtraceback, shell)
+
+ # IPython is ready, now clean up some global state...
+
+ # Deactivate the various python system hooks added by ipython for
+ # interactive convenience so we don't confuse the doctest system
+ sys.modules['__main__'] = _main
+ sys.displayhook = _displayhook
+ sys.excepthook = _excepthook
+
+ # So that ipython magics and aliases can be doctested (they work by making
+ # a call into a global _ip object). Also make the top-level get_ipython
+ # now return this without recursively calling here again.
+ _ip = shell
+ get_ipython = _ip.get_ipython
+ builtin_mod._ip = _ip
+ builtin_mod.ip = _ip
+ builtin_mod.get_ipython = get_ipython
+
+ # Override paging, so we don't require user interaction during the tests.
+ def nopage(strng, start=0, screen_lines=0, pager_cmd=None):
+ if isinstance(strng, dict):
+ strng = strng.get('text/plain', '')
+ print(strng)
+
+ page.orig_page = page.pager_page
+ page.pager_page = nopage
+
+ return _ip
diff --git a/contrib/python/ipython/py3/IPython/testing/ipunittest.py b/contrib/python/ipython/py3/IPython/testing/ipunittest.py
new file mode 100644
index 0000000000..5a940a5fe9
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/testing/ipunittest.py
@@ -0,0 +1,178 @@
+"""Experimental code for cleaner support of IPython syntax with unittest.
+
+In IPython up until 0.10, we've used very hacked up nose machinery for running
+tests with IPython special syntax, and this has proved to be extremely slow.
+This module provides decorators to try a different approach, stemming from a
+conversation Brian and I (FP) had about this problem Sept/09.
+
+The goal is to be able to easily write simple functions that can be seen by
+unittest as tests, and ultimately for these to support doctests with full
+IPython syntax. Nose already offers this based on naming conventions and our
+hackish plugins, but we are seeking to move away from nose dependencies if
+possible.
+
+This module follows a different approach, based on decorators.
+
+- A decorator called @ipdoctest can mark any function as having a docstring
+ that should be viewed as a doctest, but after syntax conversion.
+
+Authors
+-------
+
+- Fernando Perez <Fernando.Perez@berkeley.edu>
+"""
+
+
+#-----------------------------------------------------------------------------
+# Copyright (C) 2009-2011 The IPython Development Team
+#
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+# Stdlib
+import re
+import unittest
+from doctest import DocTestFinder, DocTestRunner, TestResults
+from IPython.terminal.interactiveshell import InteractiveShell
+
+#-----------------------------------------------------------------------------
+# Classes and functions
+#-----------------------------------------------------------------------------
+
+def count_failures(runner):
+ """Count number of failures in a doctest runner.
+
+ Code modeled after the summarize() method in doctest.
+ """
+ return [TestResults(f, t) for f, t in runner._name2ft.values() if f > 0 ]
+
+
+class IPython2PythonConverter(object):
+ """Convert IPython 'syntax' to valid Python.
+
+ Eventually this code may grow to be the full IPython syntax conversion
+ implementation, but for now it only does prompt conversion."""
+
+ def __init__(self):
+ self.rps1 = re.compile(r'In\ \[\d+\]: ')
+ self.rps2 = re.compile(r'\ \ \ \.\.\.+: ')
+ self.rout = re.compile(r'Out\[\d+\]: \s*?\n?')
+ self.pyps1 = '>>> '
+ self.pyps2 = '... '
+ self.rpyps1 = re.compile (r'(\s*%s)(.*)$' % self.pyps1)
+ self.rpyps2 = re.compile (r'(\s*%s)(.*)$' % self.pyps2)
+
+ def __call__(self, ds):
+ """Convert IPython prompts to python ones in a string."""
+ from . import globalipapp
+
+ pyps1 = '>>> '
+ pyps2 = '... '
+ pyout = ''
+
+ dnew = ds
+ dnew = self.rps1.sub(pyps1, dnew)
+ dnew = self.rps2.sub(pyps2, dnew)
+ dnew = self.rout.sub(pyout, dnew)
+ ip = InteractiveShell.instance()
+
+ # Convert input IPython source into valid Python.
+ out = []
+ newline = out.append
+ for line in dnew.splitlines():
+
+ mps1 = self.rpyps1.match(line)
+ if mps1 is not None:
+ prompt, text = mps1.groups()
+ newline(prompt+ip.prefilter(text, False))
+ continue
+
+ mps2 = self.rpyps2.match(line)
+ if mps2 is not None:
+ prompt, text = mps2.groups()
+ newline(prompt+ip.prefilter(text, True))
+ continue
+
+ newline(line)
+ newline('') # ensure a closing newline, needed by doctest
+ #print "PYSRC:", '\n'.join(out) # dbg
+ return '\n'.join(out)
+
+ #return dnew
+
+
+class Doc2UnitTester(object):
+ """Class whose instances act as a decorator for docstring testing.
+
+ In practice we're only likely to need one instance ever, made below (though
+ no attempt is made at turning it into a singleton, there is no need for
+ that).
+ """
+ def __init__(self, verbose=False):
+ """New decorator.
+
+ Parameters
+ ----------
+
+ verbose : boolean, optional (False)
+ Passed to the doctest finder and runner to control verbosity.
+ """
+ self.verbose = verbose
+ # We can reuse the same finder for all instances
+ self.finder = DocTestFinder(verbose=verbose, recurse=False)
+
+ def __call__(self, func):
+ """Use as a decorator: doctest a function's docstring as a unittest.
+
+ This version runs normal doctests, but the idea is to make it later run
+ ipython syntax instead."""
+
+ # Capture the enclosing instance with a different name, so the new
+ # class below can see it without confusion regarding its own 'self'
+ # that will point to the test instance at runtime
+ d2u = self
+
+ # Rewrite the function's docstring to have python syntax
+ if func.__doc__ is not None:
+ func.__doc__ = ip2py(func.__doc__)
+
+ # Now, create a tester object that is a real unittest instance, so
+ # normal unittest machinery (or Nose, or Trial) can find it.
+ class Tester(unittest.TestCase):
+ def test(self):
+ # Make a new runner per function to be tested
+ runner = DocTestRunner(verbose=d2u.verbose)
+ for the_test in d2u.finder.find(func, func.__name__):
+ runner.run(the_test)
+ failed = count_failures(runner)
+ if failed:
+ # Since we only looked at a single function's docstring,
+ # failed should contain at most one item. More than that
+ # is a case we can't handle and should error out on
+ if len(failed) > 1:
+ err = "Invalid number of test results: %s" % failed
+ raise ValueError(err)
+ # Report a normal failure.
+ self.fail('failed doctests: %s' % str(failed[0]))
+
+ # Rename it so test reports have the original signature.
+ Tester.__name__ = func.__name__
+ return Tester
+
+
+def ipdocstring(func):
+ """Change the function docstring via ip2py.
+ """
+ if func.__doc__ is not None:
+ func.__doc__ = ip2py(func.__doc__)
+ return func
+
+
+# Make an instance of the classes for public use
+ipdoctest = Doc2UnitTester()
+ip2py = IPython2PythonConverter()
diff --git a/contrib/python/ipython/py3/IPython/testing/plugin/README.txt b/contrib/python/ipython/py3/IPython/testing/plugin/README.txt
new file mode 100644
index 0000000000..a85e5a12a1
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/testing/plugin/README.txt
@@ -0,0 +1,34 @@
+=======================================================
+ Nose plugin with IPython and extension module support
+=======================================================
+
+This directory provides the key functionality for test support that IPython
+needs as a nose plugin, which can be installed for use in projects other than
+IPython.
+
+The presence of a Makefile here is mostly for development and debugging
+purposes as it only provides a few shorthand commands. You can manually
+install the plugin by using standard Python procedures (``setup.py install``
+with appropriate arguments).
+
+To install the plugin using the Makefile, edit its first line to reflect where
+you'd like the installation.
+
+Once you've set the prefix, simply build/install the plugin with::
+
+ make
+
+and run the tests with::
+
+ make test
+
+You should see output similar to::
+
+ maqroll[plugin]> make test
+ nosetests -s --with-ipdoctest --doctest-tests dtexample.py
+ ..
+ ----------------------------------------------------------------------
+ Ran 2 tests in 0.016s
+
+ OK
+
diff --git a/contrib/python/ipython/py3/IPython/testing/plugin/__init__.py b/contrib/python/ipython/py3/IPython/testing/plugin/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/testing/plugin/__init__.py
diff --git a/contrib/python/ipython/py3/IPython/testing/plugin/dtexample.py b/contrib/python/ipython/py3/IPython/testing/plugin/dtexample.py
new file mode 100644
index 0000000000..68f7016e34
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/testing/plugin/dtexample.py
@@ -0,0 +1,167 @@
+"""Simple example using doctests.
+
+This file just contains doctests both using plain python and IPython prompts.
+All tests should be loaded by nose.
+"""
+
+import os
+
+
+def pyfunc():
+ """Some pure python tests...
+
+ >>> pyfunc()
+ 'pyfunc'
+
+ >>> import os
+
+ >>> 2+3
+ 5
+
+ >>> for i in range(3):
+ ... print(i, end=' ')
+ ... print(i+1, end=' ')
+ ...
+ 0 1 1 2 2 3
+ """
+ return 'pyfunc'
+
+def ipfunc():
+ """Some ipython tests...
+
+ In [1]: import os
+
+ In [3]: 2+3
+ Out[3]: 5
+
+ In [26]: for i in range(3):
+ ....: print(i, end=' ')
+ ....: print(i+1, end=' ')
+ ....:
+ 0 1 1 2 2 3
+
+
+ It's OK to use '_' for the last result, but do NOT try to use IPython's
+ numbered history of _NN outputs, since those won't exist under the
+ doctest environment:
+
+ In [7]: 'hi'
+ Out[7]: 'hi'
+
+ In [8]: print(repr(_))
+ 'hi'
+
+ In [7]: 3+4
+ Out[7]: 7
+
+ In [8]: _+3
+ Out[8]: 10
+
+ In [9]: ipfunc()
+ Out[9]: 'ipfunc'
+ """
+ return "ipfunc"
+
+
+def ipos():
+ """Examples that access the operating system work:
+
+ In [1]: !echo hello
+ hello
+
+ In [2]: !echo hello > /tmp/foo_iptest
+
+ In [3]: !cat /tmp/foo_iptest
+ hello
+
+ In [4]: rm -f /tmp/foo_iptest
+ """
+ pass
+
+
+ipos.__skip_doctest__ = os.name == "nt"
+
+
+def ranfunc():
+ """A function with some random output.
+
+ Normal examples are verified as usual:
+ >>> 1+3
+ 4
+
+ But if you put '# random' in the output, it is ignored:
+ >>> 1+3
+ junk goes here... # random
+
+ >>> 1+2
+ again, anything goes #random
+ if multiline, the random mark is only needed once.
+
+ >>> 1+2
+ You can also put the random marker at the end:
+ # random
+
+ >>> 1+2
+ # random
+ .. or at the beginning.
+
+ More correct input is properly verified:
+ >>> ranfunc()
+ 'ranfunc'
+ """
+ return 'ranfunc'
+
+
+def random_all():
+ """A function where we ignore the output of ALL examples.
+
+ Examples:
+
+ # all-random
+
+ This mark tells the testing machinery that all subsequent examples should
+ be treated as random (ignoring their output). They are still executed,
+ so if a they raise an error, it will be detected as such, but their
+ output is completely ignored.
+
+ >>> 1+3
+ junk goes here...
+
+ >>> 1+3
+ klasdfj;
+
+ >>> 1+2
+ again, anything goes
+ blah...
+ """
+ pass
+
+def iprand():
+ """Some ipython tests with random output.
+
+ In [7]: 3+4
+ Out[7]: 7
+
+ In [8]: print('hello')
+ world # random
+
+ In [9]: iprand()
+ Out[9]: 'iprand'
+ """
+ return 'iprand'
+
+def iprand_all():
+ """Some ipython tests with fully random output.
+
+ # all-random
+
+ In [7]: 1
+ Out[7]: 99
+
+ In [8]: print('hello')
+ world
+
+ In [9]: iprand_all()
+ Out[9]: 'junk'
+ """
+ return 'iprand_all'
diff --git a/contrib/python/ipython/py3/IPython/testing/plugin/ipdoctest.py b/contrib/python/ipython/py3/IPython/testing/plugin/ipdoctest.py
new file mode 100644
index 0000000000..e7edf9837f
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/testing/plugin/ipdoctest.py
@@ -0,0 +1,299 @@
+"""Nose Plugin that supports IPython doctests.
+
+Limitations:
+
+- When generating examples for use as doctests, make sure that you have
+ pretty-printing OFF. This can be done either by setting the
+ ``PlainTextFormatter.pprint`` option in your configuration file to False, or
+ by interactively disabling it with %Pprint. This is required so that IPython
+ output matches that of normal Python, which is used by doctest for internal
+ execution.
+
+- Do not rely on specific prompt numbers for results (such as using
+ '_34==True', for example). For IPython tests run via an external process the
+ prompt numbers may be different, and IPython tests run as normal python code
+ won't even have these special _NN variables set at all.
+"""
+
+#-----------------------------------------------------------------------------
+# Module imports
+
+# From the standard library
+import doctest
+import logging
+import re
+
+from testpath import modified_env
+
+#-----------------------------------------------------------------------------
+# Module globals and other constants
+#-----------------------------------------------------------------------------
+
+log = logging.getLogger(__name__)
+
+
+#-----------------------------------------------------------------------------
+# Classes and functions
+#-----------------------------------------------------------------------------
+
+
+class DocTestFinder(doctest.DocTestFinder):
+ def _get_test(self, obj, name, module, globs, source_lines):
+ test = super()._get_test(obj, name, module, globs, source_lines)
+
+ if bool(getattr(obj, "__skip_doctest__", False)) and test is not None:
+ for example in test.examples:
+ example.options[doctest.SKIP] = True
+
+ return test
+
+
+class IPDoctestOutputChecker(doctest.OutputChecker):
+ """Second-chance checker with support for random tests.
+
+ If the default comparison doesn't pass, this checker looks in the expected
+ output string for flags that tell us to ignore the output.
+ """
+
+ random_re = re.compile(r'#\s*random\s+')
+
+ def check_output(self, want, got, optionflags):
+ """Check output, accepting special markers embedded in the output.
+
+ If the output didn't pass the default validation but the special string
+ '#random' is included, we accept it."""
+
+ # Let the original tester verify first, in case people have valid tests
+ # that happen to have a comment saying '#random' embedded in.
+ ret = doctest.OutputChecker.check_output(self, want, got,
+ optionflags)
+ if not ret and self.random_re.search(want):
+ #print >> sys.stderr, 'RANDOM OK:',want # dbg
+ return True
+
+ return ret
+
+
+# A simple subclassing of the original with a different class name, so we can
+# distinguish and treat differently IPython examples from pure python ones.
+class IPExample(doctest.Example): pass
+
+
+class IPDocTestParser(doctest.DocTestParser):
+ """
+ A class used to parse strings containing doctest examples.
+
+ Note: This is a version modified to properly recognize IPython input and
+ convert any IPython examples into valid Python ones.
+ """
+ # This regular expression is used to find doctest examples in a
+ # string. It defines three groups: `source` is the source code
+ # (including leading indentation and prompts); `indent` is the
+ # indentation of the first (PS1) line of the source code; and
+ # `want` is the expected output (including leading indentation).
+
+ # Classic Python prompts or default IPython ones
+ _PS1_PY = r'>>>'
+ _PS2_PY = r'\.\.\.'
+
+ _PS1_IP = r'In\ \[\d+\]:'
+ _PS2_IP = r'\ \ \ \.\.\.+:'
+
+ _RE_TPL = r'''
+ # Source consists of a PS1 line followed by zero or more PS2 lines.
+ (?P<source>
+ (?:^(?P<indent> [ ]*) (?P<ps1> %s) .*) # PS1 line
+ (?:\n [ ]* (?P<ps2> %s) .*)*) # PS2 lines
+ \n? # a newline
+ # Want consists of any non-blank lines that do not start with PS1.
+ (?P<want> (?:(?![ ]*$) # Not a blank line
+ (?![ ]*%s) # Not a line starting with PS1
+ (?![ ]*%s) # Not a line starting with PS2
+ .*$\n? # But any other line
+ )*)
+ '''
+
+ _EXAMPLE_RE_PY = re.compile( _RE_TPL % (_PS1_PY,_PS2_PY,_PS1_PY,_PS2_PY),
+ re.MULTILINE | re.VERBOSE)
+
+ _EXAMPLE_RE_IP = re.compile( _RE_TPL % (_PS1_IP,_PS2_IP,_PS1_IP,_PS2_IP),
+ re.MULTILINE | re.VERBOSE)
+
+ # Mark a test as being fully random. In this case, we simply append the
+ # random marker ('#random') to each individual example's output. This way
+ # we don't need to modify any other code.
+ _RANDOM_TEST = re.compile(r'#\s*all-random\s+')
+
+ def ip2py(self,source):
+ """Convert input IPython source into valid Python."""
+ block = _ip.input_transformer_manager.transform_cell(source)
+ if len(block.splitlines()) == 1:
+ return _ip.prefilter(block)
+ else:
+ return block
+
+ def parse(self, string, name='<string>'):
+ """
+ Divide the given string into examples and intervening text,
+ and return them as a list of alternating Examples and strings.
+ Line numbers for the Examples are 0-based. The optional
+ argument `name` is a name identifying this string, and is only
+ used for error messages.
+ """
+
+ #print 'Parse string:\n',string # dbg
+
+ string = string.expandtabs()
+ # If all lines begin with the same indentation, then strip it.
+ min_indent = self._min_indent(string)
+ if min_indent > 0:
+ string = '\n'.join([l[min_indent:] for l in string.split('\n')])
+
+ output = []
+ charno, lineno = 0, 0
+
+ # We make 'all random' tests by adding the '# random' mark to every
+ # block of output in the test.
+ if self._RANDOM_TEST.search(string):
+ random_marker = '\n# random'
+ else:
+ random_marker = ''
+
+ # Whether to convert the input from ipython to python syntax
+ ip2py = False
+ # Find all doctest examples in the string. First, try them as Python
+ # examples, then as IPython ones
+ terms = list(self._EXAMPLE_RE_PY.finditer(string))
+ if terms:
+ # Normal Python example
+ Example = doctest.Example
+ else:
+ # It's an ipython example.
+ terms = list(self._EXAMPLE_RE_IP.finditer(string))
+ Example = IPExample
+ ip2py = True
+
+ for m in terms:
+ # Add the pre-example text to `output`.
+ output.append(string[charno:m.start()])
+ # Update lineno (lines before this example)
+ lineno += string.count('\n', charno, m.start())
+ # Extract info from the regexp match.
+ (source, options, want, exc_msg) = \
+ self._parse_example(m, name, lineno,ip2py)
+
+ # Append the random-output marker (it defaults to empty in most
+ # cases, it's only non-empty for 'all-random' tests):
+ want += random_marker
+
+ # Create an Example, and add it to the list.
+ if not self._IS_BLANK_OR_COMMENT(source):
+ output.append(Example(source, want, exc_msg,
+ lineno=lineno,
+ indent=min_indent+len(m.group('indent')),
+ options=options))
+ # Update lineno (lines inside this example)
+ lineno += string.count('\n', m.start(), m.end())
+ # Update charno.
+ charno = m.end()
+ # Add any remaining post-example text to `output`.
+ output.append(string[charno:])
+ return output
+
+ def _parse_example(self, m, name, lineno,ip2py=False):
+ """
+ Given a regular expression match from `_EXAMPLE_RE` (`m`),
+ return a pair `(source, want)`, where `source` is the matched
+ example's source code (with prompts and indentation stripped);
+ and `want` is the example's expected output (with indentation
+ stripped).
+
+ `name` is the string's name, and `lineno` is the line number
+ where the example starts; both are used for error messages.
+
+ Optional:
+ `ip2py`: if true, filter the input via IPython to convert the syntax
+ into valid python.
+ """
+
+ # Get the example's indentation level.
+ indent = len(m.group('indent'))
+
+ # Divide source into lines; check that they're properly
+ # indented; and then strip their indentation & prompts.
+ source_lines = m.group('source').split('\n')
+
+ # We're using variable-length input prompts
+ ps1 = m.group('ps1')
+ ps2 = m.group('ps2')
+ ps1_len = len(ps1)
+
+ self._check_prompt_blank(source_lines, indent, name, lineno,ps1_len)
+ if ps2:
+ self._check_prefix(source_lines[1:], ' '*indent + ps2, name, lineno)
+
+ source = '\n'.join([sl[indent+ps1_len+1:] for sl in source_lines])
+
+ if ip2py:
+ # Convert source input from IPython into valid Python syntax
+ source = self.ip2py(source)
+
+ # Divide want into lines; check that it's properly indented; and
+ # then strip the indentation. Spaces before the last newline should
+ # be preserved, so plain rstrip() isn't good enough.
+ want = m.group('want')
+ want_lines = want.split('\n')
+ if len(want_lines) > 1 and re.match(r' *$', want_lines[-1]):
+ del want_lines[-1] # forget final newline & spaces after it
+ self._check_prefix(want_lines, ' '*indent, name,
+ lineno + len(source_lines))
+
+ # Remove ipython output prompt that might be present in the first line
+ want_lines[0] = re.sub(r'Out\[\d+\]: \s*?\n?','',want_lines[0])
+
+ want = '\n'.join([wl[indent:] for wl in want_lines])
+
+ # If `want` contains a traceback message, then extract it.
+ m = self._EXCEPTION_RE.match(want)
+ if m:
+ exc_msg = m.group('msg')
+ else:
+ exc_msg = None
+
+ # Extract options from the source.
+ options = self._find_options(source, name, lineno)
+
+ return source, options, want, exc_msg
+
+ def _check_prompt_blank(self, lines, indent, name, lineno, ps1_len):
+ """
+ Given the lines of a source string (including prompts and
+ leading indentation), check to make sure that every prompt is
+ followed by a space character. If any line is not followed by
+ a space character, then raise ValueError.
+
+ Note: IPython-modified version which takes the input prompt length as a
+ parameter, so that prompts of variable length can be dealt with.
+ """
+ space_idx = indent+ps1_len
+ min_len = space_idx+1
+ for i, line in enumerate(lines):
+ if len(line) >= min_len and line[space_idx] != ' ':
+ raise ValueError('line %r of the docstring for %s '
+ 'lacks blank after %s: %r' %
+ (lineno+i+1, name,
+ line[indent:space_idx], line))
+
+
+SKIP = doctest.register_optionflag('SKIP')
+
+
+class IPDocTestRunner(doctest.DocTestRunner,object):
+ """Test runner that synchronizes the IPython namespace with test globals.
+ """
+
+ def run(self, test, compileflags=None, out=None, clear_globs=True):
+ # Override terminal size to standardise traceback format
+ with modified_env({'COLUMNS': '80', 'LINES': '24'}):
+ return super(IPDocTestRunner,self).run(test,
+ compileflags,out,clear_globs)
diff --git a/contrib/python/ipython/py3/IPython/testing/plugin/pytest_ipdoctest.py b/contrib/python/ipython/py3/IPython/testing/plugin/pytest_ipdoctest.py
new file mode 100644
index 0000000000..fd19ba4966
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/testing/plugin/pytest_ipdoctest.py
@@ -0,0 +1,859 @@
+# Based on Pytest doctest.py
+# Original license:
+# The MIT License (MIT)
+#
+# Copyright (c) 2004-2021 Holger Krekel and others
+"""Discover and run ipdoctests in modules and test files."""
+import builtins
+import bdb
+import inspect
+import os
+import platform
+import sys
+import traceback
+import types
+import warnings
+from contextlib import contextmanager
+from pathlib import Path
+from typing import Any
+from typing import Callable
+from typing import Dict
+from typing import Generator
+from typing import Iterable
+from typing import List
+from typing import Optional
+from typing import Pattern
+from typing import Sequence
+from typing import Tuple
+from typing import Type
+from typing import TYPE_CHECKING
+from typing import Union
+
+import pytest
+from _pytest import outcomes
+from _pytest._code.code import ExceptionInfo
+from _pytest._code.code import ReprFileLocation
+from _pytest._code.code import TerminalRepr
+from _pytest._io import TerminalWriter
+from _pytest.compat import safe_getattr
+from _pytest.config import Config
+from _pytest.config.argparsing import Parser
+from _pytest.fixtures import FixtureRequest
+from _pytest.nodes import Collector
+from _pytest.outcomes import OutcomeException
+from _pytest.pathlib import fnmatch_ex
+from _pytest.pathlib import import_path
+from _pytest.python_api import approx
+from _pytest.warning_types import PytestWarning
+
+if TYPE_CHECKING:
+ import doctest
+
+DOCTEST_REPORT_CHOICE_NONE = "none"
+DOCTEST_REPORT_CHOICE_CDIFF = "cdiff"
+DOCTEST_REPORT_CHOICE_NDIFF = "ndiff"
+DOCTEST_REPORT_CHOICE_UDIFF = "udiff"
+DOCTEST_REPORT_CHOICE_ONLY_FIRST_FAILURE = "only_first_failure"
+
+DOCTEST_REPORT_CHOICES = (
+ DOCTEST_REPORT_CHOICE_NONE,
+ DOCTEST_REPORT_CHOICE_CDIFF,
+ DOCTEST_REPORT_CHOICE_NDIFF,
+ DOCTEST_REPORT_CHOICE_UDIFF,
+ DOCTEST_REPORT_CHOICE_ONLY_FIRST_FAILURE,
+)
+
+# Lazy definition of runner class
+RUNNER_CLASS = None
+# Lazy definition of output checker class
+CHECKER_CLASS: Optional[Type["IPDoctestOutputChecker"]] = None
+
+
+def pytest_addoption(parser: Parser) -> None:
+ parser.addini(
+ "ipdoctest_optionflags",
+ "option flags for ipdoctests",
+ type="args",
+ default=["ELLIPSIS"],
+ )
+ parser.addini(
+ "ipdoctest_encoding", "encoding used for ipdoctest files", default="utf-8"
+ )
+ group = parser.getgroup("collect")
+ group.addoption(
+ "--ipdoctest-modules",
+ action="store_true",
+ default=False,
+ help="run ipdoctests in all .py modules",
+ dest="ipdoctestmodules",
+ )
+ group.addoption(
+ "--ipdoctest-report",
+ type=str.lower,
+ default="udiff",
+ help="choose another output format for diffs on ipdoctest failure",
+ choices=DOCTEST_REPORT_CHOICES,
+ dest="ipdoctestreport",
+ )
+ group.addoption(
+ "--ipdoctest-glob",
+ action="append",
+ default=[],
+ metavar="pat",
+ help="ipdoctests file matching pattern, default: test*.txt",
+ dest="ipdoctestglob",
+ )
+ group.addoption(
+ "--ipdoctest-ignore-import-errors",
+ action="store_true",
+ default=False,
+ help="ignore ipdoctest ImportErrors",
+ dest="ipdoctest_ignore_import_errors",
+ )
+ group.addoption(
+ "--ipdoctest-continue-on-failure",
+ action="store_true",
+ default=False,
+ help="for a given ipdoctest, continue to run after the first failure",
+ dest="ipdoctest_continue_on_failure",
+ )
+
+
+def pytest_unconfigure() -> None:
+ global RUNNER_CLASS
+
+ RUNNER_CLASS = None
+
+
+def pytest_collect_file(
+ file_path: Path,
+ parent: Collector,
+) -> Optional[Union["IPDoctestModule", "IPDoctestTextfile"]]:
+ config = parent.config
+ if file_path.suffix == ".py":
+ if config.option.ipdoctestmodules and not any(
+ (_is_setup_py(file_path), _is_main_py(file_path))
+ ):
+ mod: IPDoctestModule = IPDoctestModule.from_parent(parent, path=file_path)
+ return mod
+ elif _is_ipdoctest(config, file_path, parent):
+ txt: IPDoctestTextfile = IPDoctestTextfile.from_parent(parent, path=file_path)
+ return txt
+ return None
+
+
+if int(pytest.__version__.split(".")[0]) < 7:
+ _collect_file = pytest_collect_file
+
+ def pytest_collect_file(
+ path,
+ parent: Collector,
+ ) -> Optional[Union["IPDoctestModule", "IPDoctestTextfile"]]:
+ return _collect_file(Path(path), parent)
+
+ _import_path = import_path
+
+ def import_path(path, root):
+ import py.path
+
+ return _import_path(py.path.local(path))
+
+
+def _is_setup_py(path: Path) -> bool:
+ if path.name != "setup.py":
+ return False
+ contents = path.read_bytes()
+ return b"setuptools" in contents or b"distutils" in contents
+
+
+def _is_ipdoctest(config: Config, path: Path, parent: Collector) -> bool:
+ if path.suffix in (".txt", ".rst") and parent.session.isinitpath(path):
+ return True
+ globs = config.getoption("ipdoctestglob") or ["test*.txt"]
+ return any(fnmatch_ex(glob, path) for glob in globs)
+
+
+def _is_main_py(path: Path) -> bool:
+ return path.name == "__main__.py"
+
+
+class ReprFailDoctest(TerminalRepr):
+ def __init__(
+ self, reprlocation_lines: Sequence[Tuple[ReprFileLocation, Sequence[str]]]
+ ) -> None:
+ self.reprlocation_lines = reprlocation_lines
+
+ def toterminal(self, tw: TerminalWriter) -> None:
+ for reprlocation, lines in self.reprlocation_lines:
+ for line in lines:
+ tw.line(line)
+ reprlocation.toterminal(tw)
+
+
+class MultipleDoctestFailures(Exception):
+ def __init__(self, failures: Sequence["doctest.DocTestFailure"]) -> None:
+ super().__init__()
+ self.failures = failures
+
+
+def _init_runner_class() -> Type["IPDocTestRunner"]:
+ import doctest
+ from .ipdoctest import IPDocTestRunner
+
+ class PytestDoctestRunner(IPDocTestRunner):
+ """Runner to collect failures.
+
+ Note that the out variable in this case is a list instead of a
+ stdout-like object.
+ """
+
+ def __init__(
+ self,
+ checker: Optional["IPDoctestOutputChecker"] = None,
+ verbose: Optional[bool] = None,
+ optionflags: int = 0,
+ continue_on_failure: bool = True,
+ ) -> None:
+ super().__init__(checker=checker, verbose=verbose, optionflags=optionflags)
+ self.continue_on_failure = continue_on_failure
+
+ def report_failure(
+ self,
+ out,
+ test: "doctest.DocTest",
+ example: "doctest.Example",
+ got: str,
+ ) -> None:
+ failure = doctest.DocTestFailure(test, example, got)
+ if self.continue_on_failure:
+ out.append(failure)
+ else:
+ raise failure
+
+ def report_unexpected_exception(
+ self,
+ out,
+ test: "doctest.DocTest",
+ example: "doctest.Example",
+ exc_info: Tuple[Type[BaseException], BaseException, types.TracebackType],
+ ) -> None:
+ if isinstance(exc_info[1], OutcomeException):
+ raise exc_info[1]
+ if isinstance(exc_info[1], bdb.BdbQuit):
+ outcomes.exit("Quitting debugger")
+ failure = doctest.UnexpectedException(test, example, exc_info)
+ if self.continue_on_failure:
+ out.append(failure)
+ else:
+ raise failure
+
+ return PytestDoctestRunner
+
+
+def _get_runner(
+ checker: Optional["IPDoctestOutputChecker"] = None,
+ verbose: Optional[bool] = None,
+ optionflags: int = 0,
+ continue_on_failure: bool = True,
+) -> "IPDocTestRunner":
+ # We need this in order to do a lazy import on doctest
+ global RUNNER_CLASS
+ if RUNNER_CLASS is None:
+ RUNNER_CLASS = _init_runner_class()
+ # Type ignored because the continue_on_failure argument is only defined on
+ # PytestDoctestRunner, which is lazily defined so can't be used as a type.
+ return RUNNER_CLASS( # type: ignore
+ checker=checker,
+ verbose=verbose,
+ optionflags=optionflags,
+ continue_on_failure=continue_on_failure,
+ )
+
+
+class IPDoctestItem(pytest.Item):
+ def __init__(
+ self,
+ name: str,
+ parent: "Union[IPDoctestTextfile, IPDoctestModule]",
+ runner: Optional["IPDocTestRunner"] = None,
+ dtest: Optional["doctest.DocTest"] = None,
+ ) -> None:
+ super().__init__(name, parent)
+ self.runner = runner
+ self.dtest = dtest
+ self.obj = None
+ self.fixture_request: Optional[FixtureRequest] = None
+
+ @classmethod
+ def from_parent( # type: ignore
+ cls,
+ parent: "Union[IPDoctestTextfile, IPDoctestModule]",
+ *,
+ name: str,
+ runner: "IPDocTestRunner",
+ dtest: "doctest.DocTest",
+ ):
+ # incompatible signature due to imposed limits on subclass
+ """The public named constructor."""
+ return super().from_parent(name=name, parent=parent, runner=runner, dtest=dtest)
+
+ def setup(self) -> None:
+ if self.dtest is not None:
+ self.fixture_request = _setup_fixtures(self)
+ globs = dict(getfixture=self.fixture_request.getfixturevalue)
+ for name, value in self.fixture_request.getfixturevalue(
+ "ipdoctest_namespace"
+ ).items():
+ globs[name] = value
+ self.dtest.globs.update(globs)
+
+ from .ipdoctest import IPExample
+
+ if isinstance(self.dtest.examples[0], IPExample):
+ # for IPython examples *only*, we swap the globals with the ipython
+ # namespace, after updating it with the globals (which doctest
+ # fills with the necessary info from the module being tested).
+ self._user_ns_orig = {}
+ self._user_ns_orig.update(_ip.user_ns)
+ _ip.user_ns.update(self.dtest.globs)
+ # We must remove the _ key in the namespace, so that Python's
+ # doctest code sets it naturally
+ _ip.user_ns.pop("_", None)
+ _ip.user_ns["__builtins__"] = builtins
+ self.dtest.globs = _ip.user_ns
+
+ def teardown(self) -> None:
+ from .ipdoctest import IPExample
+
+ # Undo the test.globs reassignment we made
+ if isinstance(self.dtest.examples[0], IPExample):
+ self.dtest.globs = {}
+ _ip.user_ns.clear()
+ _ip.user_ns.update(self._user_ns_orig)
+ del self._user_ns_orig
+
+ self.dtest.globs.clear()
+
+ def runtest(self) -> None:
+ assert self.dtest is not None
+ assert self.runner is not None
+ _check_all_skipped(self.dtest)
+ self._disable_output_capturing_for_darwin()
+ failures: List["doctest.DocTestFailure"] = []
+
+ # exec(compile(..., "single", ...), ...) puts result in builtins._
+ had_underscore_value = hasattr(builtins, "_")
+ underscore_original_value = getattr(builtins, "_", None)
+
+ # Save our current directory and switch out to the one where the
+ # test was originally created, in case another doctest did a
+ # directory change. We'll restore this in the finally clause.
+ curdir = os.getcwd()
+ os.chdir(self.fspath.dirname)
+ try:
+ # Type ignored because we change the type of `out` from what
+ # ipdoctest expects.
+ self.runner.run(self.dtest, out=failures, clear_globs=False) # type: ignore[arg-type]
+ finally:
+ os.chdir(curdir)
+ if had_underscore_value:
+ setattr(builtins, "_", underscore_original_value)
+ elif hasattr(builtins, "_"):
+ delattr(builtins, "_")
+
+ if failures:
+ raise MultipleDoctestFailures(failures)
+
+ def _disable_output_capturing_for_darwin(self) -> None:
+ """Disable output capturing. Otherwise, stdout is lost to ipdoctest (pytest#985)."""
+ if platform.system() != "Darwin":
+ return
+ capman = self.config.pluginmanager.getplugin("capturemanager")
+ if capman:
+ capman.suspend_global_capture(in_=True)
+ out, err = capman.read_global_capture()
+ sys.stdout.write(out)
+ sys.stderr.write(err)
+
+ # TODO: Type ignored -- breaks Liskov Substitution.
+ def repr_failure( # type: ignore[override]
+ self,
+ excinfo: ExceptionInfo[BaseException],
+ ) -> Union[str, TerminalRepr]:
+ import doctest
+
+ failures: Optional[
+ Sequence[Union[doctest.DocTestFailure, doctest.UnexpectedException]]
+ ] = None
+ if isinstance(
+ excinfo.value, (doctest.DocTestFailure, doctest.UnexpectedException)
+ ):
+ failures = [excinfo.value]
+ elif isinstance(excinfo.value, MultipleDoctestFailures):
+ failures = excinfo.value.failures
+
+ if failures is None:
+ return super().repr_failure(excinfo)
+
+ reprlocation_lines = []
+ for failure in failures:
+ example = failure.example
+ test = failure.test
+ filename = test.filename
+ if test.lineno is None:
+ lineno = None
+ else:
+ lineno = test.lineno + example.lineno + 1
+ message = type(failure).__name__
+ # TODO: ReprFileLocation doesn't expect a None lineno.
+ reprlocation = ReprFileLocation(filename, lineno, message) # type: ignore[arg-type]
+ checker = _get_checker()
+ report_choice = _get_report_choice(self.config.getoption("ipdoctestreport"))
+ if lineno is not None:
+ assert failure.test.docstring is not None
+ lines = failure.test.docstring.splitlines(False)
+ # add line numbers to the left of the error message
+ assert test.lineno is not None
+ lines = [
+ "%03d %s" % (i + test.lineno + 1, x) for (i, x) in enumerate(lines)
+ ]
+ # trim docstring error lines to 10
+ lines = lines[max(example.lineno - 9, 0) : example.lineno + 1]
+ else:
+ lines = [
+ "EXAMPLE LOCATION UNKNOWN, not showing all tests of that example"
+ ]
+ indent = ">>>"
+ for line in example.source.splitlines():
+ lines.append(f"??? {indent} {line}")
+ indent = "..."
+ if isinstance(failure, doctest.DocTestFailure):
+ lines += checker.output_difference(
+ example, failure.got, report_choice
+ ).split("\n")
+ else:
+ inner_excinfo = ExceptionInfo.from_exc_info(failure.exc_info)
+ lines += ["UNEXPECTED EXCEPTION: %s" % repr(inner_excinfo.value)]
+ lines += [
+ x.strip("\n") for x in traceback.format_exception(*failure.exc_info)
+ ]
+ reprlocation_lines.append((reprlocation, lines))
+ return ReprFailDoctest(reprlocation_lines)
+
+ def reportinfo(self) -> Tuple[Union["os.PathLike[str]", str], Optional[int], str]:
+ assert self.dtest is not None
+ return self.path, self.dtest.lineno, "[ipdoctest] %s" % self.name
+
+ if int(pytest.__version__.split(".")[0]) < 7:
+
+ @property
+ def path(self) -> Path:
+ return Path(self.fspath)
+
+
+def _get_flag_lookup() -> Dict[str, int]:
+ import doctest
+
+ return dict(
+ DONT_ACCEPT_TRUE_FOR_1=doctest.DONT_ACCEPT_TRUE_FOR_1,
+ DONT_ACCEPT_BLANKLINE=doctest.DONT_ACCEPT_BLANKLINE,
+ NORMALIZE_WHITESPACE=doctest.NORMALIZE_WHITESPACE,
+ ELLIPSIS=doctest.ELLIPSIS,
+ IGNORE_EXCEPTION_DETAIL=doctest.IGNORE_EXCEPTION_DETAIL,
+ COMPARISON_FLAGS=doctest.COMPARISON_FLAGS,
+ ALLOW_UNICODE=_get_allow_unicode_flag(),
+ ALLOW_BYTES=_get_allow_bytes_flag(),
+ NUMBER=_get_number_flag(),
+ )
+
+
+def get_optionflags(parent):
+ optionflags_str = parent.config.getini("ipdoctest_optionflags")
+ flag_lookup_table = _get_flag_lookup()
+ flag_acc = 0
+ for flag in optionflags_str:
+ flag_acc |= flag_lookup_table[flag]
+ return flag_acc
+
+
+def _get_continue_on_failure(config):
+ continue_on_failure = config.getvalue("ipdoctest_continue_on_failure")
+ if continue_on_failure:
+ # We need to turn off this if we use pdb since we should stop at
+ # the first failure.
+ if config.getvalue("usepdb"):
+ continue_on_failure = False
+ return continue_on_failure
+
+
+class IPDoctestTextfile(pytest.Module):
+ obj = None
+
+ def collect(self) -> Iterable[IPDoctestItem]:
+ import doctest
+ from .ipdoctest import IPDocTestParser
+
+ # Inspired by doctest.testfile; ideally we would use it directly,
+ # but it doesn't support passing a custom checker.
+ encoding = self.config.getini("ipdoctest_encoding")
+ text = self.path.read_text(encoding)
+ filename = str(self.path)
+ name = self.path.name
+ globs = {"__name__": "__main__"}
+
+ optionflags = get_optionflags(self)
+
+ runner = _get_runner(
+ verbose=False,
+ optionflags=optionflags,
+ checker=_get_checker(),
+ continue_on_failure=_get_continue_on_failure(self.config),
+ )
+
+ parser = IPDocTestParser()
+ test = parser.get_doctest(text, globs, name, filename, 0)
+ if test.examples:
+ yield IPDoctestItem.from_parent(
+ self, name=test.name, runner=runner, dtest=test
+ )
+
+ if int(pytest.__version__.split(".")[0]) < 7:
+
+ @property
+ def path(self) -> Path:
+ return Path(self.fspath)
+
+ @classmethod
+ def from_parent(
+ cls,
+ parent,
+ *,
+ fspath=None,
+ path: Optional[Path] = None,
+ **kw,
+ ):
+ if path is not None:
+ import py.path
+
+ fspath = py.path.local(path)
+ return super().from_parent(parent=parent, fspath=fspath, **kw)
+
+
+def _check_all_skipped(test: "doctest.DocTest") -> None:
+ """Raise pytest.skip() if all examples in the given DocTest have the SKIP
+ option set."""
+ import doctest
+
+ all_skipped = all(x.options.get(doctest.SKIP, False) for x in test.examples)
+ if all_skipped:
+ pytest.skip("all docstests skipped by +SKIP option")
+
+
+def _is_mocked(obj: object) -> bool:
+ """Return if an object is possibly a mock object by checking the
+ existence of a highly improbable attribute."""
+ return (
+ safe_getattr(obj, "pytest_mock_example_attribute_that_shouldnt_exist", None)
+ is not None
+ )
+
+
+@contextmanager
+def _patch_unwrap_mock_aware() -> Generator[None, None, None]:
+ """Context manager which replaces ``inspect.unwrap`` with a version
+ that's aware of mock objects and doesn't recurse into them."""
+ real_unwrap = inspect.unwrap
+
+ def _mock_aware_unwrap(
+ func: Callable[..., Any], *, stop: Optional[Callable[[Any], Any]] = None
+ ) -> Any:
+ try:
+ if stop is None or stop is _is_mocked:
+ return real_unwrap(func, stop=_is_mocked)
+ _stop = stop
+ return real_unwrap(func, stop=lambda obj: _is_mocked(obj) or _stop(func))
+ except Exception as e:
+ warnings.warn(
+ "Got %r when unwrapping %r. This is usually caused "
+ "by a violation of Python's object protocol; see e.g. "
+ "https://github.com/pytest-dev/pytest/issues/5080" % (e, func),
+ PytestWarning,
+ )
+ raise
+
+ inspect.unwrap = _mock_aware_unwrap
+ try:
+ yield
+ finally:
+ inspect.unwrap = real_unwrap
+
+
+class IPDoctestModule(pytest.Module):
+ def collect(self) -> Iterable[IPDoctestItem]:
+ import doctest
+ from .ipdoctest import DocTestFinder, IPDocTestParser
+
+ class MockAwareDocTestFinder(DocTestFinder):
+ """A hackish ipdoctest finder that overrides stdlib internals to fix a stdlib bug.
+
+ https://github.com/pytest-dev/pytest/issues/3456
+ https://bugs.python.org/issue25532
+ """
+
+ def _find_lineno(self, obj, source_lines):
+ """Doctest code does not take into account `@property`, this
+ is a hackish way to fix it. https://bugs.python.org/issue17446
+
+ Wrapped Doctests will need to be unwrapped so the correct
+ line number is returned. This will be reported upstream. #8796
+ """
+ if isinstance(obj, property):
+ obj = getattr(obj, "fget", obj)
+
+ if hasattr(obj, "__wrapped__"):
+ # Get the main obj in case of it being wrapped
+ obj = inspect.unwrap(obj)
+
+ # Type ignored because this is a private function.
+ return super()._find_lineno( # type:ignore[misc]
+ obj,
+ source_lines,
+ )
+
+ def _find(
+ self, tests, obj, name, module, source_lines, globs, seen
+ ) -> None:
+ if _is_mocked(obj):
+ return
+ with _patch_unwrap_mock_aware():
+ # Type ignored because this is a private function.
+ super()._find( # type:ignore[misc]
+ tests, obj, name, module, source_lines, globs, seen
+ )
+
+ if self.path.name == "conftest.py":
+ if int(pytest.__version__.split(".")[0]) < 7:
+ module = self.config.pluginmanager._importconftest(
+ self.path,
+ self.config.getoption("importmode"),
+ )
+ else:
+ module = self.config.pluginmanager._importconftest(
+ self.path,
+ self.config.getoption("importmode"),
+ rootpath=self.config.rootpath,
+ )
+ else:
+ try:
+ module = import_path(self.path, root=self.config.rootpath)
+ except ImportError:
+ if self.config.getvalue("ipdoctest_ignore_import_errors"):
+ pytest.skip("unable to import module %r" % self.path)
+ else:
+ raise
+ # Uses internal doctest module parsing mechanism.
+ finder = MockAwareDocTestFinder(parser=IPDocTestParser())
+ optionflags = get_optionflags(self)
+ runner = _get_runner(
+ verbose=False,
+ optionflags=optionflags,
+ checker=_get_checker(),
+ continue_on_failure=_get_continue_on_failure(self.config),
+ )
+
+ for test in finder.find(module, module.__name__):
+ if test.examples: # skip empty ipdoctests
+ yield IPDoctestItem.from_parent(
+ self, name=test.name, runner=runner, dtest=test
+ )
+
+ if int(pytest.__version__.split(".")[0]) < 7:
+
+ @property
+ def path(self) -> Path:
+ return Path(self.fspath)
+
+ @classmethod
+ def from_parent(
+ cls,
+ parent,
+ *,
+ fspath=None,
+ path: Optional[Path] = None,
+ **kw,
+ ):
+ if path is not None:
+ import py.path
+
+ fspath = py.path.local(path)
+ return super().from_parent(parent=parent, fspath=fspath, **kw)
+
+
+def _setup_fixtures(doctest_item: IPDoctestItem) -> FixtureRequest:
+ """Used by IPDoctestTextfile and IPDoctestItem to setup fixture information."""
+
+ def func() -> None:
+ pass
+
+ doctest_item.funcargs = {} # type: ignore[attr-defined]
+ fm = doctest_item.session._fixturemanager
+ doctest_item._fixtureinfo = fm.getfixtureinfo( # type: ignore[attr-defined]
+ node=doctest_item, func=func, cls=None, funcargs=False
+ )
+ fixture_request = FixtureRequest(doctest_item, _ispytest=True)
+ fixture_request._fillfixtures()
+ return fixture_request
+
+
+def _init_checker_class() -> Type["IPDoctestOutputChecker"]:
+ import doctest
+ import re
+ from .ipdoctest import IPDoctestOutputChecker
+
+ class LiteralsOutputChecker(IPDoctestOutputChecker):
+ # Based on doctest_nose_plugin.py from the nltk project
+ # (https://github.com/nltk/nltk) and on the "numtest" doctest extension
+ # by Sebastien Boisgerault (https://github.com/boisgera/numtest).
+
+ _unicode_literal_re = re.compile(r"(\W|^)[uU]([rR]?[\'\"])", re.UNICODE)
+ _bytes_literal_re = re.compile(r"(\W|^)[bB]([rR]?[\'\"])", re.UNICODE)
+ _number_re = re.compile(
+ r"""
+ (?P<number>
+ (?P<mantissa>
+ (?P<integer1> [+-]?\d*)\.(?P<fraction>\d+)
+ |
+ (?P<integer2> [+-]?\d+)\.
+ )
+ (?:
+ [Ee]
+ (?P<exponent1> [+-]?\d+)
+ )?
+ |
+ (?P<integer3> [+-]?\d+)
+ (?:
+ [Ee]
+ (?P<exponent2> [+-]?\d+)
+ )
+ )
+ """,
+ re.VERBOSE,
+ )
+
+ def check_output(self, want: str, got: str, optionflags: int) -> bool:
+ if super().check_output(want, got, optionflags):
+ return True
+
+ allow_unicode = optionflags & _get_allow_unicode_flag()
+ allow_bytes = optionflags & _get_allow_bytes_flag()
+ allow_number = optionflags & _get_number_flag()
+
+ if not allow_unicode and not allow_bytes and not allow_number:
+ return False
+
+ def remove_prefixes(regex: Pattern[str], txt: str) -> str:
+ return re.sub(regex, r"\1\2", txt)
+
+ if allow_unicode:
+ want = remove_prefixes(self._unicode_literal_re, want)
+ got = remove_prefixes(self._unicode_literal_re, got)
+
+ if allow_bytes:
+ want = remove_prefixes(self._bytes_literal_re, want)
+ got = remove_prefixes(self._bytes_literal_re, got)
+
+ if allow_number:
+ got = self._remove_unwanted_precision(want, got)
+
+ return super().check_output(want, got, optionflags)
+
+ def _remove_unwanted_precision(self, want: str, got: str) -> str:
+ wants = list(self._number_re.finditer(want))
+ gots = list(self._number_re.finditer(got))
+ if len(wants) != len(gots):
+ return got
+ offset = 0
+ for w, g in zip(wants, gots):
+ fraction: Optional[str] = w.group("fraction")
+ exponent: Optional[str] = w.group("exponent1")
+ if exponent is None:
+ exponent = w.group("exponent2")
+ precision = 0 if fraction is None else len(fraction)
+ if exponent is not None:
+ precision -= int(exponent)
+ if float(w.group()) == approx(float(g.group()), abs=10**-precision):
+ # They're close enough. Replace the text we actually
+ # got with the text we want, so that it will match when we
+ # check the string literally.
+ got = (
+ got[: g.start() + offset] + w.group() + got[g.end() + offset :]
+ )
+ offset += w.end() - w.start() - (g.end() - g.start())
+ return got
+
+ return LiteralsOutputChecker
+
+
+def _get_checker() -> "IPDoctestOutputChecker":
+ """Return a IPDoctestOutputChecker subclass that supports some
+ additional options:
+
+ * ALLOW_UNICODE and ALLOW_BYTES options to ignore u'' and b''
+ prefixes (respectively) in string literals. Useful when the same
+ ipdoctest should run in Python 2 and Python 3.
+
+ * NUMBER to ignore floating-point differences smaller than the
+ precision of the literal number in the ipdoctest.
+
+ An inner class is used to avoid importing "ipdoctest" at the module
+ level.
+ """
+ global CHECKER_CLASS
+ if CHECKER_CLASS is None:
+ CHECKER_CLASS = _init_checker_class()
+ return CHECKER_CLASS()
+
+
+def _get_allow_unicode_flag() -> int:
+ """Register and return the ALLOW_UNICODE flag."""
+ import doctest
+
+ return doctest.register_optionflag("ALLOW_UNICODE")
+
+
+def _get_allow_bytes_flag() -> int:
+ """Register and return the ALLOW_BYTES flag."""
+ import doctest
+
+ return doctest.register_optionflag("ALLOW_BYTES")
+
+
+def _get_number_flag() -> int:
+ """Register and return the NUMBER flag."""
+ import doctest
+
+ return doctest.register_optionflag("NUMBER")
+
+
+def _get_report_choice(key: str) -> int:
+ """Return the actual `ipdoctest` module flag value.
+
+ We want to do it as late as possible to avoid importing `ipdoctest` and all
+ its dependencies when parsing options, as it adds overhead and breaks tests.
+ """
+ import doctest
+
+ return {
+ DOCTEST_REPORT_CHOICE_UDIFF: doctest.REPORT_UDIFF,
+ DOCTEST_REPORT_CHOICE_CDIFF: doctest.REPORT_CDIFF,
+ DOCTEST_REPORT_CHOICE_NDIFF: doctest.REPORT_NDIFF,
+ DOCTEST_REPORT_CHOICE_ONLY_FIRST_FAILURE: doctest.REPORT_ONLY_FIRST_FAILURE,
+ DOCTEST_REPORT_CHOICE_NONE: 0,
+ }[key]
+
+
+@pytest.fixture(scope="session")
+def ipdoctest_namespace() -> Dict[str, Any]:
+ """Fixture that returns a :py:class:`dict` that will be injected into the
+ namespace of ipdoctests."""
+ return dict()
diff --git a/contrib/python/ipython/py3/IPython/testing/plugin/setup.py b/contrib/python/ipython/py3/IPython/testing/plugin/setup.py
new file mode 100644
index 0000000000..a3281d30c8
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/testing/plugin/setup.py
@@ -0,0 +1,18 @@
+#!/usr/bin/env python
+"""A Nose plugin to support IPython doctests.
+"""
+
+from setuptools import setup
+
+setup(name='IPython doctest plugin',
+ version='0.1',
+ author='The IPython Team',
+ description = 'Nose plugin to load IPython-extended doctests',
+ license = 'LGPL',
+ py_modules = ['ipdoctest'],
+ entry_points = {
+ 'nose.plugins.0.10': ['ipdoctest = ipdoctest:IPythonDoctest',
+ 'extdoctest = ipdoctest:ExtensionDoctest',
+ ],
+ },
+ )
diff --git a/contrib/python/ipython/py3/IPython/testing/plugin/simple.py b/contrib/python/ipython/py3/IPython/testing/plugin/simple.py
new file mode 100644
index 0000000000..35fbfd2fbd
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/testing/plugin/simple.py
@@ -0,0 +1,44 @@
+"""Simple example using doctests.
+
+This file just contains doctests both using plain python and IPython prompts.
+All tests should be loaded by Pytest.
+"""
+
+def pyfunc():
+ """Some pure python tests...
+
+ >>> pyfunc()
+ 'pyfunc'
+
+ >>> import os
+
+ >>> 2+3
+ 5
+
+ >>> for i in range(3):
+ ... print(i, end=' ')
+ ... print(i+1, end=' ')
+ ...
+ 0 1 1 2 2 3
+ """
+ return 'pyfunc'
+
+
+def ipyfunc():
+ """Some IPython tests...
+
+ In [1]: ipyfunc()
+ Out[1]: 'ipyfunc'
+
+ In [2]: import os
+
+ In [3]: 2+3
+ Out[3]: 5
+
+ In [4]: for i in range(3):
+ ...: print(i, end=' ')
+ ...: print(i+1, end=' ')
+ ...:
+ Out[4]: 0 1 1 2 2 3
+ """
+ return "ipyfunc"
diff --git a/contrib/python/ipython/py3/IPython/testing/plugin/simplevars.py b/contrib/python/ipython/py3/IPython/testing/plugin/simplevars.py
new file mode 100644
index 0000000000..82a5edb028
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/testing/plugin/simplevars.py
@@ -0,0 +1,2 @@
+x = 1
+print("x is:", x)
diff --git a/contrib/python/ipython/py3/IPython/testing/plugin/test_combo.txt b/contrib/python/ipython/py3/IPython/testing/plugin/test_combo.txt
new file mode 100644
index 0000000000..6c8759f3e7
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/testing/plugin/test_combo.txt
@@ -0,0 +1,36 @@
+=======================
+ Combo testing example
+=======================
+
+This is a simple example that mixes ipython doctests::
+
+ In [1]: import code
+
+ In [2]: 2**12
+ Out[2]: 4096
+
+with command-line example information that does *not* get executed::
+
+ $ mpirun -n 4 ipengine --controller-port=10000 --controller-ip=host0
+
+and with literal examples of Python source code::
+
+ controller = dict(host='myhost',
+ engine_port=None, # default is 10105
+ control_port=None,
+ )
+
+ # keys are hostnames, values are the number of engine on that host
+ engines = dict(node1=2,
+ node2=2,
+ node3=2,
+ node3=2,
+ )
+
+ # Force failure to detect that this test is being run.
+ 1/0
+
+These source code examples are executed but no output is compared at all. An
+error or failure is reported only if an exception is raised.
+
+NOTE: the execution of pure python blocks is not yet working!
diff --git a/contrib/python/ipython/py3/IPython/testing/plugin/test_example.txt b/contrib/python/ipython/py3/IPython/testing/plugin/test_example.txt
new file mode 100644
index 0000000000..f8b681eb4f
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/testing/plugin/test_example.txt
@@ -0,0 +1,24 @@
+=====================================
+ Tests in example form - pure python
+=====================================
+
+This file contains doctest examples embedded as code blocks, using normal
+Python prompts. See the accompanying file for similar examples using IPython
+prompts (you can't mix both types within one file). The following will be run
+as a test::
+
+ >>> 1+1
+ 2
+ >>> print ("hello")
+ hello
+
+More than one example works::
+
+ >>> s="Hello World"
+
+ >>> s.upper()
+ 'HELLO WORLD'
+
+but you should note that the *entire* test file is considered to be a single
+test. Individual code blocks that fail are printed separately as ``example
+failures``, but the whole file is still counted and reported as one test.
diff --git a/contrib/python/ipython/py3/IPython/testing/plugin/test_exampleip.txt b/contrib/python/ipython/py3/IPython/testing/plugin/test_exampleip.txt
new file mode 100644
index 0000000000..96b1eae19f
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/testing/plugin/test_exampleip.txt
@@ -0,0 +1,30 @@
+=================================
+ Tests in example form - IPython
+=================================
+
+You can write text files with examples that use IPython prompts (as long as you
+use the nose ipython doctest plugin), but you can not mix and match prompt
+styles in a single file. That is, you either use all ``>>>`` prompts or all
+IPython-style prompts. Your test suite *can* have both types, you just need to
+put each type of example in a separate. Using IPython prompts, you can paste
+directly from your session::
+
+ In [5]: s="Hello World"
+
+ In [6]: s.upper()
+ Out[6]: 'HELLO WORLD'
+
+Another example::
+
+ In [8]: 1+3
+ Out[8]: 4
+
+Just like in IPython docstrings, you can use all IPython syntax and features::
+
+ In [9]: !echo hello
+ hello
+
+ In [10]: a='hi'
+
+ In [11]: !echo $a
+ hi
diff --git a/contrib/python/ipython/py3/IPython/testing/plugin/test_ipdoctest.py b/contrib/python/ipython/py3/IPython/testing/plugin/test_ipdoctest.py
new file mode 100644
index 0000000000..2686172bb2
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/testing/plugin/test_ipdoctest.py
@@ -0,0 +1,92 @@
+"""Tests for the ipdoctest machinery itself.
+
+Note: in a file named test_X, functions whose only test is their docstring (as
+a doctest) and which have no test functionality of their own, should be called
+'doctest_foo' instead of 'test_foo', otherwise they get double-counted (the
+empty function call is counted as a test, which just inflates tests numbers
+artificially).
+"""
+
+def doctest_simple():
+ """ipdoctest must handle simple inputs
+
+ In [1]: 1
+ Out[1]: 1
+
+ In [2]: print(1)
+ 1
+ """
+
+def doctest_multiline1():
+ """The ipdoctest machinery must handle multiline examples gracefully.
+
+ In [2]: for i in range(4):
+ ...: print(i)
+ ...:
+ 0
+ 1
+ 2
+ 3
+ """
+
+def doctest_multiline2():
+ """Multiline examples that define functions and print output.
+
+ In [7]: def f(x):
+ ...: return x+1
+ ...:
+
+ In [8]: f(1)
+ Out[8]: 2
+
+ In [9]: def g(x):
+ ...: print('x is:',x)
+ ...:
+
+ In [10]: g(1)
+ x is: 1
+
+ In [11]: g('hello')
+ x is: hello
+ """
+
+
+def doctest_multiline3():
+ """Multiline examples with blank lines.
+
+ In [12]: def h(x):
+ ....: if x>1:
+ ....: return x**2
+ ....: # To leave a blank line in the input, you must mark it
+ ....: # with a comment character:
+ ....: #
+ ....: # otherwise the doctest parser gets confused.
+ ....: else:
+ ....: return -1
+ ....:
+
+ In [13]: h(5)
+ Out[13]: 25
+
+ In [14]: h(1)
+ Out[14]: -1
+
+ In [15]: h(0)
+ Out[15]: -1
+ """
+
+
+def doctest_builtin_underscore():
+ """Defining builtins._ should not break anything outside the doctest
+ while also should be working as expected inside the doctest.
+
+ In [1]: import builtins
+
+ In [2]: builtins._ = 42
+
+ In [3]: builtins._
+ Out[3]: 42
+
+ In [4]: _
+ Out[4]: 42
+ """
diff --git a/contrib/python/ipython/py3/IPython/testing/plugin/test_refs.py b/contrib/python/ipython/py3/IPython/testing/plugin/test_refs.py
new file mode 100644
index 0000000000..b92448be07
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/testing/plugin/test_refs.py
@@ -0,0 +1,39 @@
+"""Some simple tests for the plugin while running scripts.
+"""
+# Module imports
+# Std lib
+import inspect
+
+# Our own
+
+#-----------------------------------------------------------------------------
+# Testing functions
+
+def test_trivial():
+ """A trivial passing test."""
+ pass
+
+def doctest_run():
+ """Test running a trivial script.
+
+ In [13]: run simplevars.py
+ x is: 1
+ """
+
+def doctest_runvars():
+ """Test that variables defined in scripts get loaded correctly via %run.
+
+ In [13]: run simplevars.py
+ x is: 1
+
+ In [14]: x
+ Out[14]: 1
+ """
+
+def doctest_ivars():
+ """Test that variables defined interactively are picked up.
+ In [5]: zz=1
+
+ In [6]: zz
+ Out[6]: 1
+ """
diff --git a/contrib/python/ipython/py3/IPython/testing/skipdoctest.py b/contrib/python/ipython/py3/IPython/testing/skipdoctest.py
new file mode 100644
index 0000000000..f440ea14b2
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/testing/skipdoctest.py
@@ -0,0 +1,19 @@
+"""Decorators marks that a doctest should be skipped.
+
+The IPython.testing.decorators module triggers various extra imports, including
+numpy and sympy if they're present. Since this decorator is used in core parts
+of IPython, it's in a separate module so that running IPython doesn't trigger
+those imports."""
+
+# Copyright (C) IPython Development Team
+# Distributed under the terms of the Modified BSD License.
+
+
+def skip_doctest(f):
+ """Decorator - mark a function or method for skipping its doctest.
+
+ This decorator allows you to mark a function whose docstring you wish to
+ omit from testing, while preserving the docstring for introspection, help,
+ etc."""
+ f.__skip_doctest__ = True
+ return f
diff --git a/contrib/python/ipython/py3/IPython/testing/tools.py b/contrib/python/ipython/py3/IPython/testing/tools.py
new file mode 100644
index 0000000000..2ff63a6d4a
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/testing/tools.py
@@ -0,0 +1,476 @@
+"""Generic testing tools.
+
+Authors
+-------
+- Fernando Perez <Fernando.Perez@berkeley.edu>
+"""
+
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+import os
+from pathlib import Path
+import re
+import sys
+import tempfile
+import unittest
+
+from contextlib import contextmanager
+from io import StringIO
+from subprocess import Popen, PIPE
+from unittest.mock import patch
+
+from traitlets.config.loader import Config
+from IPython.utils.process import get_output_error_code
+from IPython.utils.text import list_strings
+from IPython.utils.io import temp_pyfile, Tee
+from IPython.utils import py3compat
+
+from . import decorators as dec
+from . import skipdoctest
+
+
+# The docstring for full_path doctests differently on win32 (different path
+# separator) so just skip the doctest there. The example remains informative.
+doctest_deco = skipdoctest.skip_doctest if sys.platform == 'win32' else dec.null_deco
+
+@doctest_deco
+def full_path(startPath,files):
+ """Make full paths for all the listed files, based on startPath.
+
+ Only the base part of startPath is kept, since this routine is typically
+ used with a script's ``__file__`` variable as startPath. The base of startPath
+ is then prepended to all the listed files, forming the output list.
+
+ Parameters
+ ----------
+ startPath : string
+ Initial path to use as the base for the results. This path is split
+ using os.path.split() and only its first component is kept.
+
+ files : string or list
+ One or more files.
+
+ Examples
+ --------
+
+ >>> full_path('/foo/bar.py',['a.txt','b.txt'])
+ ['/foo/a.txt', '/foo/b.txt']
+
+ >>> full_path('/foo',['a.txt','b.txt'])
+ ['/a.txt', '/b.txt']
+
+ If a single file is given, the output is still a list::
+
+ >>> full_path('/foo','a.txt')
+ ['/a.txt']
+ """
+
+ files = list_strings(files)
+ base = os.path.split(startPath)[0]
+ return [ os.path.join(base,f) for f in files ]
+
+
+def parse_test_output(txt):
+ """Parse the output of a test run and return errors, failures.
+
+ Parameters
+ ----------
+ txt : str
+ Text output of a test run, assumed to contain a line of one of the
+ following forms::
+
+ 'FAILED (errors=1)'
+ 'FAILED (failures=1)'
+ 'FAILED (errors=1, failures=1)'
+
+ Returns
+ -------
+ nerr, nfail
+ number of errors and failures.
+ """
+
+ err_m = re.search(r'^FAILED \(errors=(\d+)\)', txt, re.MULTILINE)
+ if err_m:
+ nerr = int(err_m.group(1))
+ nfail = 0
+ return nerr, nfail
+
+ fail_m = re.search(r'^FAILED \(failures=(\d+)\)', txt, re.MULTILINE)
+ if fail_m:
+ nerr = 0
+ nfail = int(fail_m.group(1))
+ return nerr, nfail
+
+ both_m = re.search(r'^FAILED \(errors=(\d+), failures=(\d+)\)', txt,
+ re.MULTILINE)
+ if both_m:
+ nerr = int(both_m.group(1))
+ nfail = int(both_m.group(2))
+ return nerr, nfail
+
+ # If the input didn't match any of these forms, assume no error/failures
+ return 0, 0
+
+
+# So nose doesn't think this is a test
+parse_test_output.__test__ = False
+
+
+def default_argv():
+ """Return a valid default argv for creating testing instances of ipython"""
+
+ return ['--quick', # so no config file is loaded
+ # Other defaults to minimize side effects on stdout
+ '--colors=NoColor', '--no-term-title','--no-banner',
+ '--autocall=0']
+
+
+def default_config():
+ """Return a config object with good defaults for testing."""
+ config = Config()
+ config.TerminalInteractiveShell.colors = 'NoColor'
+ config.TerminalTerminalInteractiveShell.term_title = False,
+ config.TerminalInteractiveShell.autocall = 0
+ f = tempfile.NamedTemporaryFile(suffix=u'test_hist.sqlite', delete=False)
+ config.HistoryManager.hist_file = Path(f.name)
+ f.close()
+ config.HistoryManager.db_cache_size = 10000
+ return config
+
+
+def get_ipython_cmd(as_string=False):
+ """
+ Return appropriate IPython command line name. By default, this will return
+ a list that can be used with subprocess.Popen, for example, but passing
+ `as_string=True` allows for returning the IPython command as a string.
+
+ Parameters
+ ----------
+ as_string: bool
+ Flag to allow to return the command as a string.
+ """
+ ipython_cmd = [sys.executable, "-m", "IPython"]
+
+ if as_string:
+ ipython_cmd = " ".join(ipython_cmd)
+
+ return ipython_cmd
+
+def ipexec(fname, options=None, commands=()):
+ """Utility to call 'ipython filename'.
+
+ Starts IPython with a minimal and safe configuration to make startup as fast
+ as possible.
+
+ Note that this starts IPython in a subprocess!
+
+ Parameters
+ ----------
+ fname : str, Path
+ Name of file to be executed (should have .py or .ipy extension).
+
+ options : optional, list
+ Extra command-line flags to be passed to IPython.
+
+ commands : optional, list
+ Commands to send in on stdin
+
+ Returns
+ -------
+ ``(stdout, stderr)`` of ipython subprocess.
+ """
+ __tracebackhide__ = True
+
+ if options is None:
+ options = []
+
+ cmdargs = default_argv() + options
+
+ test_dir = os.path.dirname(__file__)
+
+ ipython_cmd = get_ipython_cmd()
+ # Absolute path for filename
+ full_fname = os.path.join(test_dir, fname)
+ full_cmd = ipython_cmd + cmdargs + ['--', full_fname]
+ env = os.environ.copy()
+ # FIXME: ignore all warnings in ipexec while we have shims
+ # should we keep suppressing warnings here, even after removing shims?
+ env['PYTHONWARNINGS'] = 'ignore'
+ # env.pop('PYTHONWARNINGS', None) # Avoid extraneous warnings appearing on stderr
+ # Prevent coloring under PyCharm ("\x1b[0m" at the end of the stdout)
+ env.pop("PYCHARM_HOSTED", None)
+ for k, v in env.items():
+ # Debug a bizarre failure we've seen on Windows:
+ # TypeError: environment can only contain strings
+ if not isinstance(v, str):
+ print(k, v)
+ p = Popen(full_cmd, stdout=PIPE, stderr=PIPE, stdin=PIPE, env=env)
+ out, err = p.communicate(input=py3compat.encode('\n'.join(commands)) or None)
+ out, err = py3compat.decode(out), py3compat.decode(err)
+ # `import readline` causes 'ESC[?1034h' to be output sometimes,
+ # so strip that out before doing comparisons
+ if out:
+ out = re.sub(r'\x1b\[[^h]+h', '', out)
+ return out, err
+
+
+def ipexec_validate(fname, expected_out, expected_err='',
+ options=None, commands=()):
+ """Utility to call 'ipython filename' and validate output/error.
+
+ This function raises an AssertionError if the validation fails.
+
+ Note that this starts IPython in a subprocess!
+
+ Parameters
+ ----------
+ fname : str, Path
+ Name of the file to be executed (should have .py or .ipy extension).
+
+ expected_out : str
+ Expected stdout of the process.
+
+ expected_err : optional, str
+ Expected stderr of the process.
+
+ options : optional, list
+ Extra command-line flags to be passed to IPython.
+
+ Returns
+ -------
+ None
+ """
+ __tracebackhide__ = True
+
+ out, err = ipexec(fname, options, commands)
+ #print 'OUT', out # dbg
+ #print 'ERR', err # dbg
+ # If there are any errors, we must check those before stdout, as they may be
+ # more informative than simply having an empty stdout.
+ if err:
+ if expected_err:
+ assert "\n".join(err.strip().splitlines()) == "\n".join(
+ expected_err.strip().splitlines()
+ )
+ else:
+ raise ValueError('Running file %r produced error: %r' %
+ (fname, err))
+ # If no errors or output on stderr was expected, match stdout
+ assert "\n".join(out.strip().splitlines()) == "\n".join(
+ expected_out.strip().splitlines()
+ )
+
+
+class TempFileMixin(unittest.TestCase):
+ """Utility class to create temporary Python/IPython files.
+
+ Meant as a mixin class for test cases."""
+
+ def mktmp(self, src, ext='.py'):
+ """Make a valid python temp file."""
+ fname = temp_pyfile(src, ext)
+ if not hasattr(self, 'tmps'):
+ self.tmps=[]
+ self.tmps.append(fname)
+ self.fname = fname
+
+ def tearDown(self):
+ # If the tmpfile wasn't made because of skipped tests, like in
+ # win32, there's nothing to cleanup.
+ if hasattr(self, 'tmps'):
+ for fname in self.tmps:
+ # If the tmpfile wasn't made because of skipped tests, like in
+ # win32, there's nothing to cleanup.
+ try:
+ os.unlink(fname)
+ except:
+ # On Windows, even though we close the file, we still can't
+ # delete it. I have no clue why
+ pass
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ self.tearDown()
+
+
+pair_fail_msg = ("Testing {0}\n\n"
+ "In:\n"
+ " {1!r}\n"
+ "Expected:\n"
+ " {2!r}\n"
+ "Got:\n"
+ " {3!r}\n")
+def check_pairs(func, pairs):
+ """Utility function for the common case of checking a function with a
+ sequence of input/output pairs.
+
+ Parameters
+ ----------
+ func : callable
+ The function to be tested. Should accept a single argument.
+ pairs : iterable
+ A list of (input, expected_output) tuples.
+
+ Returns
+ -------
+ None. Raises an AssertionError if any output does not match the expected
+ value.
+ """
+ __tracebackhide__ = True
+
+ name = getattr(func, "func_name", getattr(func, "__name__", "<unknown>"))
+ for inp, expected in pairs:
+ out = func(inp)
+ assert out == expected, pair_fail_msg.format(name, inp, expected, out)
+
+
+MyStringIO = StringIO
+
+_re_type = type(re.compile(r''))
+
+notprinted_msg = """Did not find {0!r} in printed output (on {1}):
+-------
+{2!s}
+-------
+"""
+
+class AssertPrints(object):
+ """Context manager for testing that code prints certain text.
+
+ Examples
+ --------
+ >>> with AssertPrints("abc", suppress=False):
+ ... print("abcd")
+ ... print("def")
+ ...
+ abcd
+ def
+ """
+ def __init__(self, s, channel='stdout', suppress=True):
+ self.s = s
+ if isinstance(self.s, (str, _re_type)):
+ self.s = [self.s]
+ self.channel = channel
+ self.suppress = suppress
+
+ def __enter__(self):
+ self.orig_stream = getattr(sys, self.channel)
+ self.buffer = MyStringIO()
+ self.tee = Tee(self.buffer, channel=self.channel)
+ setattr(sys, self.channel, self.buffer if self.suppress else self.tee)
+
+ def __exit__(self, etype, value, traceback):
+ __tracebackhide__ = True
+
+ try:
+ if value is not None:
+ # If an error was raised, don't check anything else
+ return False
+ self.tee.flush()
+ setattr(sys, self.channel, self.orig_stream)
+ printed = self.buffer.getvalue()
+ for s in self.s:
+ if isinstance(s, _re_type):
+ assert s.search(printed), notprinted_msg.format(s.pattern, self.channel, printed)
+ else:
+ assert s in printed, notprinted_msg.format(s, self.channel, printed)
+ return False
+ finally:
+ self.tee.close()
+
+printed_msg = """Found {0!r} in printed output (on {1}):
+-------
+{2!s}
+-------
+"""
+
+class AssertNotPrints(AssertPrints):
+ """Context manager for checking that certain output *isn't* produced.
+
+ Counterpart of AssertPrints"""
+ def __exit__(self, etype, value, traceback):
+ __tracebackhide__ = True
+
+ try:
+ if value is not None:
+ # If an error was raised, don't check anything else
+ self.tee.close()
+ return False
+ self.tee.flush()
+ setattr(sys, self.channel, self.orig_stream)
+ printed = self.buffer.getvalue()
+ for s in self.s:
+ if isinstance(s, _re_type):
+ assert not s.search(printed),printed_msg.format(
+ s.pattern, self.channel, printed)
+ else:
+ assert s not in printed, printed_msg.format(
+ s, self.channel, printed)
+ return False
+ finally:
+ self.tee.close()
+
+@contextmanager
+def mute_warn():
+ from IPython.utils import warn
+ save_warn = warn.warn
+ warn.warn = lambda *a, **kw: None
+ try:
+ yield
+ finally:
+ warn.warn = save_warn
+
+@contextmanager
+def make_tempfile(name):
+ """Create an empty, named, temporary file for the duration of the context."""
+ open(name, "w", encoding="utf-8").close()
+ try:
+ yield
+ finally:
+ os.unlink(name)
+
+def fake_input(inputs):
+ """Temporarily replace the input() function to return the given values
+
+ Use as a context manager:
+
+ with fake_input(['result1', 'result2']):
+ ...
+
+ Values are returned in order. If input() is called again after the last value
+ was used, EOFError is raised.
+ """
+ it = iter(inputs)
+ def mock_input(prompt=''):
+ try:
+ return next(it)
+ except StopIteration as e:
+ raise EOFError('No more inputs given') from e
+
+ return patch('builtins.input', mock_input)
+
+def help_output_test(subcommand=''):
+ """test that `ipython [subcommand] -h` works"""
+ cmd = get_ipython_cmd() + [subcommand, '-h']
+ out, err, rc = get_output_error_code(cmd)
+ assert rc == 0, err
+ assert "Traceback" not in err
+ assert "Options" in out
+ assert "--help-all" in out
+ return out, err
+
+
+def help_all_output_test(subcommand=''):
+ """test that `ipython [subcommand] --help-all` works"""
+ cmd = get_ipython_cmd() + [subcommand, '--help-all']
+ out, err, rc = get_output_error_code(cmd)
+ assert rc == 0, err
+ assert "Traceback" not in err
+ assert "Options" in out
+ assert "Class" in out
+ return out, err
+
diff --git a/contrib/python/ipython/py3/IPython/utils/PyColorize.py b/contrib/python/ipython/py3/IPython/utils/PyColorize.py
new file mode 100644
index 0000000000..86bb9af4c1
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/utils/PyColorize.py
@@ -0,0 +1,331 @@
+# -*- coding: utf-8 -*-
+"""
+Class and program to colorize python source code for ANSI terminals.
+
+Based on an HTML code highlighter by Jurgen Hermann found at:
+http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52298
+
+Modifications by Fernando Perez (fperez@colorado.edu).
+
+Information on the original HTML highlighter follows:
+
+MoinMoin - Python Source Parser
+
+Title: Colorize Python source using the built-in tokenizer
+
+Submitter: Jurgen Hermann
+Last Updated:2001/04/06
+
+Version no:1.2
+
+Description:
+
+This code is part of MoinMoin (http://moin.sourceforge.net/) and converts
+Python source code to HTML markup, rendering comments, keywords,
+operators, numeric and string literals in different colors.
+
+It shows how to use the built-in keyword, token and tokenize modules to
+scan Python source code and re-emit it with no changes to its original
+formatting (which is the hard part).
+"""
+
+__all__ = ['ANSICodeColors', 'Parser']
+
+_scheme_default = 'Linux'
+
+
+# Imports
+import keyword
+import os
+import sys
+import token
+import tokenize
+
+generate_tokens = tokenize.generate_tokens
+
+from IPython.utils.coloransi import TermColors, InputTermColors,ColorScheme, ColorSchemeTable
+from .colorable import Colorable
+from io import StringIO
+
+#############################################################################
+### Python Source Parser (does Highlighting)
+#############################################################################
+
+_KEYWORD = token.NT_OFFSET + 1
+_TEXT = token.NT_OFFSET + 2
+
+#****************************************************************************
+# Builtin color schemes
+
+Colors = TermColors # just a shorthand
+
+# Build a few color schemes
+NoColor = ColorScheme(
+ 'NoColor',{
+ 'header' : Colors.NoColor,
+ token.NUMBER : Colors.NoColor,
+ token.OP : Colors.NoColor,
+ token.STRING : Colors.NoColor,
+ tokenize.COMMENT : Colors.NoColor,
+ token.NAME : Colors.NoColor,
+ token.ERRORTOKEN : Colors.NoColor,
+
+ _KEYWORD : Colors.NoColor,
+ _TEXT : Colors.NoColor,
+
+ 'in_prompt' : InputTermColors.NoColor, # Input prompt
+ 'in_number' : InputTermColors.NoColor, # Input prompt number
+ 'in_prompt2' : InputTermColors.NoColor, # Continuation prompt
+ 'in_normal' : InputTermColors.NoColor, # color off (usu. Colors.Normal)
+
+ 'out_prompt' : Colors.NoColor, # Output prompt
+ 'out_number' : Colors.NoColor, # Output prompt number
+
+ 'normal' : Colors.NoColor # color off (usu. Colors.Normal)
+ } )
+
+LinuxColors = ColorScheme(
+ 'Linux',{
+ 'header' : Colors.LightRed,
+ token.NUMBER : Colors.LightCyan,
+ token.OP : Colors.Yellow,
+ token.STRING : Colors.LightBlue,
+ tokenize.COMMENT : Colors.LightRed,
+ token.NAME : Colors.Normal,
+ token.ERRORTOKEN : Colors.Red,
+
+ _KEYWORD : Colors.LightGreen,
+ _TEXT : Colors.Yellow,
+
+ 'in_prompt' : InputTermColors.Green,
+ 'in_number' : InputTermColors.LightGreen,
+ 'in_prompt2' : InputTermColors.Green,
+ 'in_normal' : InputTermColors.Normal, # color off (usu. Colors.Normal)
+
+ 'out_prompt' : Colors.Red,
+ 'out_number' : Colors.LightRed,
+
+ 'normal' : Colors.Normal # color off (usu. Colors.Normal)
+ } )
+
+NeutralColors = ColorScheme(
+ 'Neutral',{
+ 'header' : Colors.Red,
+ token.NUMBER : Colors.Cyan,
+ token.OP : Colors.Blue,
+ token.STRING : Colors.Blue,
+ tokenize.COMMENT : Colors.Red,
+ token.NAME : Colors.Normal,
+ token.ERRORTOKEN : Colors.Red,
+
+ _KEYWORD : Colors.Green,
+ _TEXT : Colors.Blue,
+
+ 'in_prompt' : InputTermColors.Blue,
+ 'in_number' : InputTermColors.LightBlue,
+ 'in_prompt2' : InputTermColors.Blue,
+ 'in_normal' : InputTermColors.Normal, # color off (usu. Colors.Normal)
+
+ 'out_prompt' : Colors.Red,
+ 'out_number' : Colors.LightRed,
+
+ 'normal' : Colors.Normal # color off (usu. Colors.Normal)
+ } )
+
+# Hack: the 'neutral' colours are not very visible on a dark background on
+# Windows. Since Windows command prompts have a dark background by default, and
+# relatively few users are likely to alter that, we will use the 'Linux' colours,
+# designed for a dark background, as the default on Windows. Changing it here
+# avoids affecting the prompt colours rendered by prompt_toolkit, where the
+# neutral defaults do work OK.
+
+if os.name == 'nt':
+ NeutralColors = LinuxColors.copy(name='Neutral')
+
+LightBGColors = ColorScheme(
+ 'LightBG',{
+ 'header' : Colors.Red,
+ token.NUMBER : Colors.Cyan,
+ token.OP : Colors.Blue,
+ token.STRING : Colors.Blue,
+ tokenize.COMMENT : Colors.Red,
+ token.NAME : Colors.Normal,
+ token.ERRORTOKEN : Colors.Red,
+
+
+ _KEYWORD : Colors.Green,
+ _TEXT : Colors.Blue,
+
+ 'in_prompt' : InputTermColors.Blue,
+ 'in_number' : InputTermColors.LightBlue,
+ 'in_prompt2' : InputTermColors.Blue,
+ 'in_normal' : InputTermColors.Normal, # color off (usu. Colors.Normal)
+
+ 'out_prompt' : Colors.Red,
+ 'out_number' : Colors.LightRed,
+
+ 'normal' : Colors.Normal # color off (usu. Colors.Normal)
+ } )
+
+# Build table of color schemes (needed by the parser)
+ANSICodeColors = ColorSchemeTable([NoColor,LinuxColors,LightBGColors, NeutralColors],
+ _scheme_default)
+
+Undefined = object()
+
+class Parser(Colorable):
+ """ Format colored Python source.
+ """
+
+ def __init__(self, color_table=None, out = sys.stdout, parent=None, style=None):
+ """ Create a parser with a specified color table and output channel.
+
+ Call format() to process code.
+ """
+
+ super(Parser, self).__init__(parent=parent)
+
+ self.color_table = color_table if color_table else ANSICodeColors
+ self.out = out
+ self.pos = None
+ self.lines = None
+ self.raw = None
+ if not style:
+ self.style = self.default_style
+ else:
+ self.style = style
+
+
+ def format(self, raw, out=None, scheme=Undefined):
+ import warnings
+ if scheme is not Undefined:
+ warnings.warn('The `scheme` argument of IPython.utils.PyColorize:Parser.format is deprecated since IPython 6.0.'
+ 'It will have no effect. Set the parser `style` directly.',
+ stacklevel=2)
+ return self.format2(raw, out)[0]
+
+ def format2(self, raw, out = None):
+ """ Parse and send the colored source.
+
+ If out and scheme are not specified, the defaults (given to
+ constructor) are used.
+
+ out should be a file-type object. Optionally, out can be given as the
+ string 'str' and the parser will automatically return the output in a
+ string."""
+
+ string_output = 0
+ if out == 'str' or self.out == 'str' or \
+ isinstance(self.out, StringIO):
+ # XXX - I don't really like this state handling logic, but at this
+ # point I don't want to make major changes, so adding the
+ # isinstance() check is the simplest I can do to ensure correct
+ # behavior.
+ out_old = self.out
+ self.out = StringIO()
+ string_output = 1
+ elif out is not None:
+ self.out = out
+ else:
+ raise ValueError('`out` or `self.out` should be file-like or the value `"str"`')
+
+ # Fast return of the unmodified input for NoColor scheme
+ if self.style == 'NoColor':
+ error = False
+ self.out.write(raw)
+ if string_output:
+ return raw, error
+ return None, error
+
+ # local shorthands
+ colors = self.color_table[self.style].colors
+ self.colors = colors # put in object so __call__ sees it
+
+ # Remove trailing whitespace and normalize tabs
+ self.raw = raw.expandtabs().rstrip()
+
+ # store line offsets in self.lines
+ self.lines = [0, 0]
+ pos = 0
+ raw_find = self.raw.find
+ lines_append = self.lines.append
+ while True:
+ pos = raw_find('\n', pos) + 1
+ if not pos:
+ break
+ lines_append(pos)
+ lines_append(len(self.raw))
+
+ # parse the source and write it
+ self.pos = 0
+ text = StringIO(self.raw)
+
+ error = False
+ try:
+ for atoken in generate_tokens(text.readline):
+ self(*atoken)
+ except tokenize.TokenError as ex:
+ msg = ex.args[0]
+ line = ex.args[1][0]
+ self.out.write("%s\n\n*** ERROR: %s%s%s\n" %
+ (colors[token.ERRORTOKEN],
+ msg, self.raw[self.lines[line]:],
+ colors.normal)
+ )
+ error = True
+ self.out.write(colors.normal+'\n')
+ if string_output:
+ output = self.out.getvalue()
+ self.out = out_old
+ return (output, error)
+ return (None, error)
+
+
+ def _inner_call_(self, toktype, toktext, start_pos):
+ """like call but write to a temporary buffer"""
+ buff = StringIO()
+ srow, scol = start_pos
+ colors = self.colors
+ owrite = buff.write
+
+ # line separator, so this works across platforms
+ linesep = os.linesep
+
+ # calculate new positions
+ oldpos = self.pos
+ newpos = self.lines[srow] + scol
+ self.pos = newpos + len(toktext)
+
+ # send the original whitespace, if needed
+ if newpos > oldpos:
+ owrite(self.raw[oldpos:newpos])
+
+ # skip indenting tokens
+ if toktype in [token.INDENT, token.DEDENT]:
+ self.pos = newpos
+ buff.seek(0)
+ return buff.read()
+
+ # map token type to a color group
+ if token.LPAR <= toktype <= token.OP:
+ toktype = token.OP
+ elif toktype == token.NAME and keyword.iskeyword(toktext):
+ toktype = _KEYWORD
+ color = colors.get(toktype, colors[_TEXT])
+
+ # Triple quoted strings must be handled carefully so that backtracking
+ # in pagers works correctly. We need color terminators on _each_ line.
+ if linesep in toktext:
+ toktext = toktext.replace(linesep, '%s%s%s' %
+ (colors.normal,linesep,color))
+
+ # send text
+ owrite('%s%s%s' % (color,toktext,colors.normal))
+ buff.seek(0)
+ return buff.read()
+
+
+ def __call__(self, toktype, toktext, start_pos, end_pos, line):
+ """ Token handler, with syntax highlighting."""
+ self.out.write(
+ self._inner_call_(toktype, toktext, start_pos))
diff --git a/contrib/python/ipython/py3/IPython/utils/__init__.py b/contrib/python/ipython/py3/IPython/utils/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/utils/__init__.py
diff --git a/contrib/python/ipython/py3/IPython/utils/_process_cli.py b/contrib/python/ipython/py3/IPython/utils/_process_cli.py
new file mode 100644
index 0000000000..86e918a8d0
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/utils/_process_cli.py
@@ -0,0 +1,69 @@
+"""cli-specific implementation of process utilities.
+
+cli - Common Language Infrastructure for IronPython. Code
+ can run on any operating system. Check os.name for os-
+ specific settings.
+
+This file is only meant to be imported by process.py, not by end-users.
+
+This file is largely untested. To become a full drop-in process
+interface for IronPython will probably require you to help fill
+in the details.
+"""
+
+# Import cli libraries:
+import clr
+import System
+
+# Import Python libraries:
+import os
+
+# Import IPython libraries:
+from ._process_common import arg_split
+
+
+def system(cmd):
+ """
+ system(cmd) should work in a cli environment on Mac OSX, Linux,
+ and Windows
+ """
+ psi = System.Diagnostics.ProcessStartInfo(cmd)
+ psi.RedirectStandardOutput = True
+ psi.RedirectStandardError = True
+ psi.WindowStyle = System.Diagnostics.ProcessWindowStyle.Normal
+ psi.UseShellExecute = False
+ # Start up process:
+ reg = System.Diagnostics.Process.Start(psi)
+
+def getoutput(cmd):
+ """
+ getoutput(cmd) should work in a cli environment on Mac OSX, Linux,
+ and Windows
+ """
+ psi = System.Diagnostics.ProcessStartInfo(cmd)
+ psi.RedirectStandardOutput = True
+ psi.RedirectStandardError = True
+ psi.WindowStyle = System.Diagnostics.ProcessWindowStyle.Normal
+ psi.UseShellExecute = False
+ # Start up process:
+ reg = System.Diagnostics.Process.Start(psi)
+ myOutput = reg.StandardOutput
+ output = myOutput.ReadToEnd()
+ myError = reg.StandardError
+ error = myError.ReadToEnd()
+ return output
+
+def check_pid(pid):
+ """
+ Check if a process with the given PID (pid) exists
+ """
+ try:
+ System.Diagnostics.Process.GetProcessById(pid)
+ # process with given pid is running
+ return True
+ except System.InvalidOperationException:
+ # process wasn't started by this object (but is running)
+ return True
+ except System.ArgumentException:
+ # process with given pid isn't running
+ return False
diff --git a/contrib/python/ipython/py3/IPython/utils/_process_common.py b/contrib/python/ipython/py3/IPython/utils/_process_common.py
new file mode 100644
index 0000000000..2a0b828839
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/utils/_process_common.py
@@ -0,0 +1,210 @@
+"""Common utilities for the various process_* implementations.
+
+This file is only meant to be imported by the platform-specific implementations
+of subprocess utilities, and it contains tools that are common to all of them.
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (C) 2010-2011 The IPython Development Team
+#
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+import subprocess
+import shlex
+import sys
+import os
+
+from IPython.utils import py3compat
+
+#-----------------------------------------------------------------------------
+# Function definitions
+#-----------------------------------------------------------------------------
+
+def read_no_interrupt(p):
+ """Read from a pipe ignoring EINTR errors.
+
+ This is necessary because when reading from pipes with GUI event loops
+ running in the background, often interrupts are raised that stop the
+ command from completing."""
+ import errno
+
+ try:
+ return p.read()
+ except IOError as err:
+ if err.errno != errno.EINTR:
+ raise
+
+
+def process_handler(cmd, callback, stderr=subprocess.PIPE):
+ """Open a command in a shell subprocess and execute a callback.
+
+ This function provides common scaffolding for creating subprocess.Popen()
+ calls. It creates a Popen object and then calls the callback with it.
+
+ Parameters
+ ----------
+ cmd : str or list
+ A command to be executed by the system, using :class:`subprocess.Popen`.
+ If a string is passed, it will be run in the system shell. If a list is
+ passed, it will be used directly as arguments.
+ callback : callable
+ A one-argument function that will be called with the Popen object.
+ stderr : file descriptor number, optional
+ By default this is set to ``subprocess.PIPE``, but you can also pass the
+ value ``subprocess.STDOUT`` to force the subprocess' stderr to go into
+ the same file descriptor as its stdout. This is useful to read stdout
+ and stderr combined in the order they are generated.
+
+ Returns
+ -------
+ The return value of the provided callback is returned.
+ """
+ sys.stdout.flush()
+ sys.stderr.flush()
+ # On win32, close_fds can't be true when using pipes for stdin/out/err
+ close_fds = sys.platform != 'win32'
+ # Determine if cmd should be run with system shell.
+ shell = isinstance(cmd, str)
+ # On POSIX systems run shell commands with user-preferred shell.
+ executable = None
+ if shell and os.name == 'posix' and 'SHELL' in os.environ:
+ executable = os.environ['SHELL']
+ p = subprocess.Popen(cmd, shell=shell,
+ executable=executable,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=stderr,
+ close_fds=close_fds)
+
+ try:
+ out = callback(p)
+ except KeyboardInterrupt:
+ print('^C')
+ sys.stdout.flush()
+ sys.stderr.flush()
+ out = None
+ finally:
+ # Make really sure that we don't leave processes behind, in case the
+ # call above raises an exception
+ # We start by assuming the subprocess finished (to avoid NameErrors
+ # later depending on the path taken)
+ if p.returncode is None:
+ try:
+ p.terminate()
+ p.poll()
+ except OSError:
+ pass
+ # One last try on our way out
+ if p.returncode is None:
+ try:
+ p.kill()
+ except OSError:
+ pass
+
+ return out
+
+
+def getoutput(cmd):
+ """Run a command and return its stdout/stderr as a string.
+
+ Parameters
+ ----------
+ cmd : str or list
+ A command to be executed in the system shell.
+
+ Returns
+ -------
+ output : str
+ A string containing the combination of stdout and stderr from the
+ subprocess, in whatever order the subprocess originally wrote to its
+ file descriptors (so the order of the information in this string is the
+ correct order as would be seen if running the command in a terminal).
+ """
+ out = process_handler(cmd, lambda p: p.communicate()[0], subprocess.STDOUT)
+ if out is None:
+ return ''
+ return py3compat.decode(out)
+
+
+def getoutputerror(cmd):
+ """Return (standard output, standard error) of executing cmd in a shell.
+
+ Accepts the same arguments as os.system().
+
+ Parameters
+ ----------
+ cmd : str or list
+ A command to be executed in the system shell.
+
+ Returns
+ -------
+ stdout : str
+ stderr : str
+ """
+ return get_output_error_code(cmd)[:2]
+
+def get_output_error_code(cmd):
+ """Return (standard output, standard error, return code) of executing cmd
+ in a shell.
+
+ Accepts the same arguments as os.system().
+
+ Parameters
+ ----------
+ cmd : str or list
+ A command to be executed in the system shell.
+
+ Returns
+ -------
+ stdout : str
+ stderr : str
+ returncode: int
+ """
+
+ out_err, p = process_handler(cmd, lambda p: (p.communicate(), p))
+ if out_err is None:
+ return '', '', p.returncode
+ out, err = out_err
+ return py3compat.decode(out), py3compat.decode(err), p.returncode
+
+def arg_split(s, posix=False, strict=True):
+ """Split a command line's arguments in a shell-like manner.
+
+ This is a modified version of the standard library's shlex.split()
+ function, but with a default of posix=False for splitting, so that quotes
+ in inputs are respected.
+
+ if strict=False, then any errors shlex.split would raise will result in the
+ unparsed remainder being the last element of the list, rather than raising.
+ This is because we sometimes use arg_split to parse things other than
+ command-line args.
+ """
+
+ lex = shlex.shlex(s, posix=posix)
+ lex.whitespace_split = True
+ # Extract tokens, ensuring that things like leaving open quotes
+ # does not cause this to raise. This is important, because we
+ # sometimes pass Python source through this (e.g. %timeit f(" ")),
+ # and it shouldn't raise an exception.
+ # It may be a bad idea to parse things that are not command-line args
+ # through this function, but we do, so let's be safe about it.
+ lex.commenters='' #fix for GH-1269
+ tokens = []
+ while True:
+ try:
+ tokens.append(next(lex))
+ except StopIteration:
+ break
+ except ValueError:
+ if strict:
+ raise
+ # couldn't parse, get remaining blob as last token
+ tokens.append(lex.token)
+ break
+
+ return tokens
diff --git a/contrib/python/ipython/py3/IPython/utils/_process_posix.py b/contrib/python/ipython/py3/IPython/utils/_process_posix.py
new file mode 100644
index 0000000000..59b5c23896
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/utils/_process_posix.py
@@ -0,0 +1,216 @@
+"""Posix-specific implementation of process utilities.
+
+This file is only meant to be imported by process.py, not by end-users.
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (C) 2010-2011 The IPython Development Team
+#
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+# Stdlib
+import errno
+import os
+import subprocess as sp
+import sys
+
+import pexpect
+
+# Our own
+from ._process_common import getoutput, arg_split
+from IPython.utils.encoding import DEFAULT_ENCODING
+
+#-----------------------------------------------------------------------------
+# Function definitions
+#-----------------------------------------------------------------------------
+
+class ProcessHandler(object):
+ """Execute subprocesses under the control of pexpect.
+ """
+ # Timeout in seconds to wait on each reading of the subprocess' output.
+ # This should not be set too low to avoid cpu overusage from our side,
+ # since we read in a loop whose period is controlled by this timeout.
+ read_timeout = 0.05
+
+ # Timeout to give a process if we receive SIGINT, between sending the
+ # SIGINT to the process and forcefully terminating it.
+ terminate_timeout = 0.2
+
+ # File object where stdout and stderr of the subprocess will be written
+ logfile = None
+
+ # Shell to call for subprocesses to execute
+ _sh = None
+
+ @property
+ def sh(self):
+ if self._sh is None:
+ shell_name = os.environ.get("SHELL", "sh")
+ self._sh = pexpect.which(shell_name)
+ if self._sh is None:
+ raise OSError('"{}" shell not found'.format(shell_name))
+
+ return self._sh
+
+ def __init__(self, logfile=None, read_timeout=None, terminate_timeout=None):
+ """Arguments are used for pexpect calls."""
+ self.read_timeout = (ProcessHandler.read_timeout if read_timeout is
+ None else read_timeout)
+ self.terminate_timeout = (ProcessHandler.terminate_timeout if
+ terminate_timeout is None else
+ terminate_timeout)
+ self.logfile = sys.stdout if logfile is None else logfile
+
+ def getoutput(self, cmd):
+ """Run a command and return its stdout/stderr as a string.
+
+ Parameters
+ ----------
+ cmd : str
+ A command to be executed in the system shell.
+
+ Returns
+ -------
+ output : str
+ A string containing the combination of stdout and stderr from the
+ subprocess, in whatever order the subprocess originally wrote to its
+ file descriptors (so the order of the information in this string is the
+ correct order as would be seen if running the command in a terminal).
+ """
+ try:
+ return pexpect.run(self.sh, args=['-c', cmd]).replace('\r\n', '\n')
+ except KeyboardInterrupt:
+ print('^C', file=sys.stderr, end='')
+
+ def getoutput_pexpect(self, cmd):
+ """Run a command and return its stdout/stderr as a string.
+
+ Parameters
+ ----------
+ cmd : str
+ A command to be executed in the system shell.
+
+ Returns
+ -------
+ output : str
+ A string containing the combination of stdout and stderr from the
+ subprocess, in whatever order the subprocess originally wrote to its
+ file descriptors (so the order of the information in this string is the
+ correct order as would be seen if running the command in a terminal).
+ """
+ try:
+ return pexpect.run(self.sh, args=['-c', cmd]).replace('\r\n', '\n')
+ except KeyboardInterrupt:
+ print('^C', file=sys.stderr, end='')
+
+ def system(self, cmd):
+ """Execute a command in a subshell.
+
+ Parameters
+ ----------
+ cmd : str
+ A command to be executed in the system shell.
+
+ Returns
+ -------
+ int : child's exitstatus
+ """
+ # Get likely encoding for the output.
+ enc = DEFAULT_ENCODING
+
+ # Patterns to match on the output, for pexpect. We read input and
+ # allow either a short timeout or EOF
+ patterns = [pexpect.TIMEOUT, pexpect.EOF]
+ # the index of the EOF pattern in the list.
+ # even though we know it's 1, this call means we don't have to worry if
+ # we change the above list, and forget to change this value:
+ EOF_index = patterns.index(pexpect.EOF)
+ # The size of the output stored so far in the process output buffer.
+ # Since pexpect only appends to this buffer, each time we print we
+ # record how far we've printed, so that next time we only print *new*
+ # content from the buffer.
+ out_size = 0
+ try:
+ # Since we're not really searching the buffer for text patterns, we
+ # can set pexpect's search window to be tiny and it won't matter.
+ # We only search for the 'patterns' timeout or EOF, which aren't in
+ # the text itself.
+ #child = pexpect.spawn(pcmd, searchwindowsize=1)
+ if hasattr(pexpect, 'spawnb'):
+ child = pexpect.spawnb(self.sh, args=['-c', cmd]) # Pexpect-U
+ else:
+ child = pexpect.spawn(self.sh, args=['-c', cmd]) # Vanilla Pexpect
+ flush = sys.stdout.flush
+ while True:
+ # res is the index of the pattern that caused the match, so we
+ # know whether we've finished (if we matched EOF) or not
+ res_idx = child.expect_list(patterns, self.read_timeout)
+ print(child.before[out_size:].decode(enc, 'replace'), end='')
+ flush()
+ if res_idx==EOF_index:
+ break
+ # Update the pointer to what we've already printed
+ out_size = len(child.before)
+ except KeyboardInterrupt:
+ # We need to send ^C to the process. The ascii code for '^C' is 3
+ # (the character is known as ETX for 'End of Text', see
+ # curses.ascii.ETX).
+ child.sendline(chr(3))
+ # Read and print any more output the program might produce on its
+ # way out.
+ try:
+ out_size = len(child.before)
+ child.expect_list(patterns, self.terminate_timeout)
+ print(child.before[out_size:].decode(enc, 'replace'), end='')
+ sys.stdout.flush()
+ except KeyboardInterrupt:
+ # Impatient users tend to type it multiple times
+ pass
+ finally:
+ # Ensure the subprocess really is terminated
+ child.terminate(force=True)
+ # add isalive check, to ensure exitstatus is set:
+ child.isalive()
+
+ # We follow the subprocess pattern, returning either the exit status
+ # as a positive number, or the terminating signal as a negative
+ # number.
+ # on Linux, sh returns 128+n for signals terminating child processes on Linux
+ # on BSD (OS X), the signal code is set instead
+ if child.exitstatus is None:
+ # on WIFSIGNALED, pexpect sets signalstatus, leaving exitstatus=None
+ if child.signalstatus is None:
+ # this condition may never occur,
+ # but let's be certain we always return an integer.
+ return 0
+ return -child.signalstatus
+ if child.exitstatus > 128:
+ return -(child.exitstatus - 128)
+ return child.exitstatus
+
+
+# Make system() with a functional interface for outside use. Note that we use
+# getoutput() from the _common utils, which is built on top of popen(). Using
+# pexpect to get subprocess output produces difficult to parse output, since
+# programs think they are talking to a tty and produce highly formatted output
+# (ls is a good example) that makes them hard.
+system = ProcessHandler().system
+
+def check_pid(pid):
+ try:
+ os.kill(pid, 0)
+ except OSError as err:
+ if err.errno == errno.ESRCH:
+ return False
+ elif err.errno == errno.EPERM:
+ # Don't have permission to signal the process - probably means it exists
+ return True
+ raise
+ else:
+ return True
diff --git a/contrib/python/ipython/py3/IPython/utils/_process_win32.py b/contrib/python/ipython/py3/IPython/utils/_process_win32.py
new file mode 100644
index 0000000000..36fb092d7b
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/utils/_process_win32.py
@@ -0,0 +1,184 @@
+"""Windows-specific implementation of process utilities.
+
+This file is only meant to be imported by process.py, not by end-users.
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (C) 2010-2011 The IPython Development Team
+#
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+# stdlib
+import os
+import sys
+import ctypes
+import time
+
+from ctypes import c_int, POINTER
+from ctypes.wintypes import LPCWSTR, HLOCAL
+from subprocess import STDOUT, TimeoutExpired
+from threading import Thread
+
+# our own imports
+from ._process_common import read_no_interrupt, process_handler, arg_split as py_arg_split
+from . import py3compat
+from .encoding import DEFAULT_ENCODING
+
+#-----------------------------------------------------------------------------
+# Function definitions
+#-----------------------------------------------------------------------------
+
+class AvoidUNCPath(object):
+ """A context manager to protect command execution from UNC paths.
+
+ In the Win32 API, commands can't be invoked with the cwd being a UNC path.
+ This context manager temporarily changes directory to the 'C:' drive on
+ entering, and restores the original working directory on exit.
+
+ The context manager returns the starting working directory *if* it made a
+ change and None otherwise, so that users can apply the necessary adjustment
+ to their system calls in the event of a change.
+
+ Examples
+ --------
+ ::
+ cmd = 'dir'
+ with AvoidUNCPath() as path:
+ if path is not None:
+ cmd = '"pushd %s &&"%s' % (path, cmd)
+ os.system(cmd)
+ """
+ def __enter__(self):
+ self.path = os.getcwd()
+ self.is_unc_path = self.path.startswith(r"\\")
+ if self.is_unc_path:
+ # change to c drive (as cmd.exe cannot handle UNC addresses)
+ os.chdir("C:")
+ return self.path
+ else:
+ # We return None to signal that there was no change in the working
+ # directory
+ return None
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ if self.is_unc_path:
+ os.chdir(self.path)
+
+
+def _system_body(p):
+ """Callback for _system."""
+ enc = DEFAULT_ENCODING
+
+ def stdout_read():
+ for line in read_no_interrupt(p.stdout).splitlines():
+ line = line.decode(enc, 'replace')
+ print(line, file=sys.stdout)
+
+ def stderr_read():
+ for line in read_no_interrupt(p.stderr).splitlines():
+ line = line.decode(enc, 'replace')
+ print(line, file=sys.stderr)
+
+ Thread(target=stdout_read).start()
+ Thread(target=stderr_read).start()
+
+ # Wait to finish for returncode. Unfortunately, Python has a bug where
+ # wait() isn't interruptible (https://bugs.python.org/issue28168) so poll in
+ # a loop instead of just doing `return p.wait()`.
+ while True:
+ result = p.poll()
+ if result is None:
+ time.sleep(0.01)
+ else:
+ return result
+
+
+def system(cmd):
+ """Win32 version of os.system() that works with network shares.
+
+ Note that this implementation returns None, as meant for use in IPython.
+
+ Parameters
+ ----------
+ cmd : str or list
+ A command to be executed in the system shell.
+
+ Returns
+ -------
+ int : child process' exit code.
+ """
+ # The controller provides interactivity with both
+ # stdin and stdout
+ #import _process_win32_controller
+ #_process_win32_controller.system(cmd)
+
+ with AvoidUNCPath() as path:
+ if path is not None:
+ cmd = '"pushd %s &&"%s' % (path, cmd)
+ return process_handler(cmd, _system_body)
+
+def getoutput(cmd):
+ """Return standard output of executing cmd in a shell.
+
+ Accepts the same arguments as os.system().
+
+ Parameters
+ ----------
+ cmd : str or list
+ A command to be executed in the system shell.
+
+ Returns
+ -------
+ stdout : str
+ """
+
+ with AvoidUNCPath() as path:
+ if path is not None:
+ cmd = '"pushd %s &&"%s' % (path, cmd)
+ out = process_handler(cmd, lambda p: p.communicate()[0], STDOUT)
+
+ if out is None:
+ out = b''
+ return py3compat.decode(out)
+
+try:
+ CommandLineToArgvW = ctypes.windll.shell32.CommandLineToArgvW
+ CommandLineToArgvW.arg_types = [LPCWSTR, POINTER(c_int)]
+ CommandLineToArgvW.restype = POINTER(LPCWSTR)
+ LocalFree = ctypes.windll.kernel32.LocalFree
+ LocalFree.res_type = HLOCAL
+ LocalFree.arg_types = [HLOCAL]
+
+ def arg_split(commandline, posix=False, strict=True):
+ """Split a command line's arguments in a shell-like manner.
+
+ This is a special version for windows that use a ctypes call to CommandLineToArgvW
+ to do the argv splitting. The posix parameter is ignored.
+
+ If strict=False, process_common.arg_split(...strict=False) is used instead.
+ """
+ #CommandLineToArgvW returns path to executable if called with empty string.
+ if commandline.strip() == "":
+ return []
+ if not strict:
+ # not really a cl-arg, fallback on _process_common
+ return py_arg_split(commandline, posix=posix, strict=strict)
+ argvn = c_int()
+ result_pointer = CommandLineToArgvW(py3compat.cast_unicode(commandline.lstrip()), ctypes.byref(argvn))
+ result_array_type = LPCWSTR * argvn.value
+ result = [arg for arg in result_array_type.from_address(ctypes.addressof(result_pointer.contents))]
+ retval = LocalFree(result_pointer)
+ return result
+except AttributeError:
+ arg_split = py_arg_split
+
+def check_pid(pid):
+ # OpenProcess returns 0 if no such process (of ours) exists
+ # positive int otherwise
+ return bool(ctypes.windll.kernel32.OpenProcess(1,0,pid))
diff --git a/contrib/python/ipython/py3/IPython/utils/_process_win32_controller.py b/contrib/python/ipython/py3/IPython/utils/_process_win32_controller.py
new file mode 100644
index 0000000000..f8c2a057a8
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/utils/_process_win32_controller.py
@@ -0,0 +1,573 @@
+"""Windows-specific implementation of process utilities with direct WinAPI.
+
+This file is meant to be used by process.py
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (C) 2010-2011 The IPython Development Team
+#
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#-----------------------------------------------------------------------------
+
+
+# stdlib
+import os, sys, threading
+import ctypes, msvcrt
+
+# Win32 API types needed for the API calls
+from ctypes import POINTER
+from ctypes.wintypes import HANDLE, HLOCAL, LPVOID, WORD, DWORD, BOOL, \
+ ULONG, LPCWSTR
+LPDWORD = POINTER(DWORD)
+LPHANDLE = POINTER(HANDLE)
+ULONG_PTR = POINTER(ULONG)
+class SECURITY_ATTRIBUTES(ctypes.Structure):
+ _fields_ = [("nLength", DWORD),
+ ("lpSecurityDescriptor", LPVOID),
+ ("bInheritHandle", BOOL)]
+LPSECURITY_ATTRIBUTES = POINTER(SECURITY_ATTRIBUTES)
+class STARTUPINFO(ctypes.Structure):
+ _fields_ = [("cb", DWORD),
+ ("lpReserved", LPCWSTR),
+ ("lpDesktop", LPCWSTR),
+ ("lpTitle", LPCWSTR),
+ ("dwX", DWORD),
+ ("dwY", DWORD),
+ ("dwXSize", DWORD),
+ ("dwYSize", DWORD),
+ ("dwXCountChars", DWORD),
+ ("dwYCountChars", DWORD),
+ ("dwFillAttribute", DWORD),
+ ("dwFlags", DWORD),
+ ("wShowWindow", WORD),
+ ("cbReserved2", WORD),
+ ("lpReserved2", LPVOID),
+ ("hStdInput", HANDLE),
+ ("hStdOutput", HANDLE),
+ ("hStdError", HANDLE)]
+LPSTARTUPINFO = POINTER(STARTUPINFO)
+class PROCESS_INFORMATION(ctypes.Structure):
+ _fields_ = [("hProcess", HANDLE),
+ ("hThread", HANDLE),
+ ("dwProcessId", DWORD),
+ ("dwThreadId", DWORD)]
+LPPROCESS_INFORMATION = POINTER(PROCESS_INFORMATION)
+
+# Win32 API constants needed
+ERROR_HANDLE_EOF = 38
+ERROR_BROKEN_PIPE = 109
+ERROR_NO_DATA = 232
+HANDLE_FLAG_INHERIT = 0x0001
+STARTF_USESTDHANDLES = 0x0100
+CREATE_SUSPENDED = 0x0004
+CREATE_NEW_CONSOLE = 0x0010
+CREATE_NO_WINDOW = 0x08000000
+STILL_ACTIVE = 259
+WAIT_TIMEOUT = 0x0102
+WAIT_FAILED = 0xFFFFFFFF
+INFINITE = 0xFFFFFFFF
+DUPLICATE_SAME_ACCESS = 0x00000002
+ENABLE_ECHO_INPUT = 0x0004
+ENABLE_LINE_INPUT = 0x0002
+ENABLE_PROCESSED_INPUT = 0x0001
+
+# Win32 API functions needed
+GetLastError = ctypes.windll.kernel32.GetLastError
+GetLastError.argtypes = []
+GetLastError.restype = DWORD
+
+CreateFile = ctypes.windll.kernel32.CreateFileW
+CreateFile.argtypes = [LPCWSTR, DWORD, DWORD, LPVOID, DWORD, DWORD, HANDLE]
+CreateFile.restype = HANDLE
+
+CreatePipe = ctypes.windll.kernel32.CreatePipe
+CreatePipe.argtypes = [POINTER(HANDLE), POINTER(HANDLE),
+ LPSECURITY_ATTRIBUTES, DWORD]
+CreatePipe.restype = BOOL
+
+CreateProcess = ctypes.windll.kernel32.CreateProcessW
+CreateProcess.argtypes = [LPCWSTR, LPCWSTR, LPSECURITY_ATTRIBUTES,
+ LPSECURITY_ATTRIBUTES, BOOL, DWORD, LPVOID, LPCWSTR, LPSTARTUPINFO,
+ LPPROCESS_INFORMATION]
+CreateProcess.restype = BOOL
+
+GetExitCodeProcess = ctypes.windll.kernel32.GetExitCodeProcess
+GetExitCodeProcess.argtypes = [HANDLE, LPDWORD]
+GetExitCodeProcess.restype = BOOL
+
+GetCurrentProcess = ctypes.windll.kernel32.GetCurrentProcess
+GetCurrentProcess.argtypes = []
+GetCurrentProcess.restype = HANDLE
+
+ResumeThread = ctypes.windll.kernel32.ResumeThread
+ResumeThread.argtypes = [HANDLE]
+ResumeThread.restype = DWORD
+
+ReadFile = ctypes.windll.kernel32.ReadFile
+ReadFile.argtypes = [HANDLE, LPVOID, DWORD, LPDWORD, LPVOID]
+ReadFile.restype = BOOL
+
+WriteFile = ctypes.windll.kernel32.WriteFile
+WriteFile.argtypes = [HANDLE, LPVOID, DWORD, LPDWORD, LPVOID]
+WriteFile.restype = BOOL
+
+GetConsoleMode = ctypes.windll.kernel32.GetConsoleMode
+GetConsoleMode.argtypes = [HANDLE, LPDWORD]
+GetConsoleMode.restype = BOOL
+
+SetConsoleMode = ctypes.windll.kernel32.SetConsoleMode
+SetConsoleMode.argtypes = [HANDLE, DWORD]
+SetConsoleMode.restype = BOOL
+
+FlushConsoleInputBuffer = ctypes.windll.kernel32.FlushConsoleInputBuffer
+FlushConsoleInputBuffer.argtypes = [HANDLE]
+FlushConsoleInputBuffer.restype = BOOL
+
+WaitForSingleObject = ctypes.windll.kernel32.WaitForSingleObject
+WaitForSingleObject.argtypes = [HANDLE, DWORD]
+WaitForSingleObject.restype = DWORD
+
+DuplicateHandle = ctypes.windll.kernel32.DuplicateHandle
+DuplicateHandle.argtypes = [HANDLE, HANDLE, HANDLE, LPHANDLE,
+ DWORD, BOOL, DWORD]
+DuplicateHandle.restype = BOOL
+
+SetHandleInformation = ctypes.windll.kernel32.SetHandleInformation
+SetHandleInformation.argtypes = [HANDLE, DWORD, DWORD]
+SetHandleInformation.restype = BOOL
+
+CloseHandle = ctypes.windll.kernel32.CloseHandle
+CloseHandle.argtypes = [HANDLE]
+CloseHandle.restype = BOOL
+
+CommandLineToArgvW = ctypes.windll.shell32.CommandLineToArgvW
+CommandLineToArgvW.argtypes = [LPCWSTR, POINTER(ctypes.c_int)]
+CommandLineToArgvW.restype = POINTER(LPCWSTR)
+
+LocalFree = ctypes.windll.kernel32.LocalFree
+LocalFree.argtypes = [HLOCAL]
+LocalFree.restype = HLOCAL
+
+class AvoidUNCPath(object):
+ """A context manager to protect command execution from UNC paths.
+
+ In the Win32 API, commands can't be invoked with the cwd being a UNC path.
+ This context manager temporarily changes directory to the 'C:' drive on
+ entering, and restores the original working directory on exit.
+
+ The context manager returns the starting working directory *if* it made a
+ change and None otherwise, so that users can apply the necessary adjustment
+ to their system calls in the event of a change.
+
+ Examples
+ --------
+ ::
+ cmd = 'dir'
+ with AvoidUNCPath() as path:
+ if path is not None:
+ cmd = '"pushd %s &&"%s' % (path, cmd)
+ os.system(cmd)
+ """
+ def __enter__(self):
+ self.path = os.getcwd()
+ self.is_unc_path = self.path.startswith(r"\\")
+ if self.is_unc_path:
+ # change to c drive (as cmd.exe cannot handle UNC addresses)
+ os.chdir("C:")
+ return self.path
+ else:
+ # We return None to signal that there was no change in the working
+ # directory
+ return None
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ if self.is_unc_path:
+ os.chdir(self.path)
+
+
+class Win32ShellCommandController(object):
+ """Runs a shell command in a 'with' context.
+
+ This implementation is Win32-specific.
+
+ Example:
+ # Runs the command interactively with default console stdin/stdout
+ with ShellCommandController('python -i') as scc:
+ scc.run()
+
+ # Runs the command using the provided functions for stdin/stdout
+ def my_stdout_func(s):
+ # print or save the string 's'
+ write_to_stdout(s)
+ def my_stdin_func():
+ # If input is available, return it as a string.
+ if input_available():
+ return get_input()
+ # If no input available, return None after a short delay to
+ # keep from blocking.
+ else:
+ time.sleep(0.01)
+ return None
+
+ with ShellCommandController('python -i') as scc:
+ scc.run(my_stdout_func, my_stdin_func)
+ """
+
+ def __init__(self, cmd, mergeout = True):
+ """Initializes the shell command controller.
+
+ The cmd is the program to execute, and mergeout is
+ whether to blend stdout and stderr into one output
+ in stdout. Merging them together in this fashion more
+ reliably keeps stdout and stderr in the correct order
+ especially for interactive shell usage.
+ """
+ self.cmd = cmd
+ self.mergeout = mergeout
+
+ def __enter__(self):
+ cmd = self.cmd
+ mergeout = self.mergeout
+
+ self.hstdout, self.hstdin, self.hstderr = None, None, None
+ self.piProcInfo = None
+ try:
+ p_hstdout, c_hstdout, p_hstderr, \
+ c_hstderr, p_hstdin, c_hstdin = [None]*6
+
+ # SECURITY_ATTRIBUTES with inherit handle set to True
+ saAttr = SECURITY_ATTRIBUTES()
+ saAttr.nLength = ctypes.sizeof(saAttr)
+ saAttr.bInheritHandle = True
+ saAttr.lpSecurityDescriptor = None
+
+ def create_pipe(uninherit):
+ """Creates a Windows pipe, which consists of two handles.
+
+ The 'uninherit' parameter controls which handle is not
+ inherited by the child process.
+ """
+ handles = HANDLE(), HANDLE()
+ if not CreatePipe(ctypes.byref(handles[0]),
+ ctypes.byref(handles[1]), ctypes.byref(saAttr), 0):
+ raise ctypes.WinError()
+ if not SetHandleInformation(handles[uninherit],
+ HANDLE_FLAG_INHERIT, 0):
+ raise ctypes.WinError()
+ return handles[0].value, handles[1].value
+
+ p_hstdout, c_hstdout = create_pipe(uninherit=0)
+ # 'mergeout' signals that stdout and stderr should be merged.
+ # We do that by using one pipe for both of them.
+ if mergeout:
+ c_hstderr = HANDLE()
+ if not DuplicateHandle(GetCurrentProcess(), c_hstdout,
+ GetCurrentProcess(), ctypes.byref(c_hstderr),
+ 0, True, DUPLICATE_SAME_ACCESS):
+ raise ctypes.WinError()
+ else:
+ p_hstderr, c_hstderr = create_pipe(uninherit=0)
+ c_hstdin, p_hstdin = create_pipe(uninherit=1)
+
+ # Create the process object
+ piProcInfo = PROCESS_INFORMATION()
+ siStartInfo = STARTUPINFO()
+ siStartInfo.cb = ctypes.sizeof(siStartInfo)
+ siStartInfo.hStdInput = c_hstdin
+ siStartInfo.hStdOutput = c_hstdout
+ siStartInfo.hStdError = c_hstderr
+ siStartInfo.dwFlags = STARTF_USESTDHANDLES
+ dwCreationFlags = CREATE_SUSPENDED | CREATE_NO_WINDOW # | CREATE_NEW_CONSOLE
+
+ if not CreateProcess(None,
+ u"cmd.exe /c " + cmd,
+ None, None, True, dwCreationFlags,
+ None, None, ctypes.byref(siStartInfo),
+ ctypes.byref(piProcInfo)):
+ raise ctypes.WinError()
+
+ # Close this process's versions of the child handles
+ CloseHandle(c_hstdin)
+ c_hstdin = None
+ CloseHandle(c_hstdout)
+ c_hstdout = None
+ if c_hstderr is not None:
+ CloseHandle(c_hstderr)
+ c_hstderr = None
+
+ # Transfer ownership of the parent handles to the object
+ self.hstdin = p_hstdin
+ p_hstdin = None
+ self.hstdout = p_hstdout
+ p_hstdout = None
+ if not mergeout:
+ self.hstderr = p_hstderr
+ p_hstderr = None
+ self.piProcInfo = piProcInfo
+
+ finally:
+ if p_hstdin:
+ CloseHandle(p_hstdin)
+ if c_hstdin:
+ CloseHandle(c_hstdin)
+ if p_hstdout:
+ CloseHandle(p_hstdout)
+ if c_hstdout:
+ CloseHandle(c_hstdout)
+ if p_hstderr:
+ CloseHandle(p_hstderr)
+ if c_hstderr:
+ CloseHandle(c_hstderr)
+
+ return self
+
+ def _stdin_thread(self, handle, hprocess, func, stdout_func):
+ exitCode = DWORD()
+ bytesWritten = DWORD(0)
+ while True:
+ #print("stdin thread loop start")
+ # Get the input string (may be bytes or unicode)
+ data = func()
+
+ # None signals to poll whether the process has exited
+ if data is None:
+ #print("checking for process completion")
+ if not GetExitCodeProcess(hprocess, ctypes.byref(exitCode)):
+ raise ctypes.WinError()
+ if exitCode.value != STILL_ACTIVE:
+ return
+ # TESTING: Does zero-sized writefile help?
+ if not WriteFile(handle, "", 0,
+ ctypes.byref(bytesWritten), None):
+ raise ctypes.WinError()
+ continue
+ #print("\nGot str %s\n" % repr(data), file=sys.stderr)
+
+ # Encode the string to the console encoding
+ if isinstance(data, unicode): #FIXME: Python3
+ data = data.encode('utf_8')
+
+ # What we have now must be a string of bytes
+ if not isinstance(data, str): #FIXME: Python3
+ raise RuntimeError("internal stdin function string error")
+
+ # An empty string signals EOF
+ if len(data) == 0:
+ return
+
+ # In a windows console, sometimes the input is echoed,
+ # but sometimes not. How do we determine when to do this?
+ stdout_func(data)
+ # WriteFile may not accept all the data at once.
+ # Loop until everything is processed
+ while len(data) != 0:
+ #print("Calling writefile")
+ if not WriteFile(handle, data, len(data),
+ ctypes.byref(bytesWritten), None):
+ # This occurs at exit
+ if GetLastError() == ERROR_NO_DATA:
+ return
+ raise ctypes.WinError()
+ #print("Called writefile")
+ data = data[bytesWritten.value:]
+
+ def _stdout_thread(self, handle, func):
+ # Allocate the output buffer
+ data = ctypes.create_string_buffer(4096)
+ while True:
+ bytesRead = DWORD(0)
+ if not ReadFile(handle, data, 4096,
+ ctypes.byref(bytesRead), None):
+ le = GetLastError()
+ if le == ERROR_BROKEN_PIPE:
+ return
+ else:
+ raise ctypes.WinError()
+ # FIXME: Python3
+ s = data.value[0:bytesRead.value]
+ #print("\nv: %s" % repr(s), file=sys.stderr)
+ func(s.decode('utf_8', 'replace'))
+
+ def run(self, stdout_func = None, stdin_func = None, stderr_func = None):
+ """Runs the process, using the provided functions for I/O.
+
+ The function stdin_func should return strings whenever a
+ character or characters become available.
+ The functions stdout_func and stderr_func are called whenever
+ something is printed to stdout or stderr, respectively.
+ These functions are called from different threads (but not
+ concurrently, because of the GIL).
+ """
+ if stdout_func is None and stdin_func is None and stderr_func is None:
+ return self._run_stdio()
+
+ if stderr_func is not None and self.mergeout:
+ raise RuntimeError("Shell command was initiated with "
+ "merged stdin/stdout, but a separate stderr_func "
+ "was provided to the run() method")
+
+ # Create a thread for each input/output handle
+ stdin_thread = None
+ threads = []
+ if stdin_func:
+ stdin_thread = threading.Thread(target=self._stdin_thread,
+ args=(self.hstdin, self.piProcInfo.hProcess,
+ stdin_func, stdout_func))
+ threads.append(threading.Thread(target=self._stdout_thread,
+ args=(self.hstdout, stdout_func)))
+ if not self.mergeout:
+ if stderr_func is None:
+ stderr_func = stdout_func
+ threads.append(threading.Thread(target=self._stdout_thread,
+ args=(self.hstderr, stderr_func)))
+ # Start the I/O threads and the process
+ if ResumeThread(self.piProcInfo.hThread) == 0xFFFFFFFF:
+ raise ctypes.WinError()
+ if stdin_thread is not None:
+ stdin_thread.start()
+ for thread in threads:
+ thread.start()
+ # Wait for the process to complete
+ if WaitForSingleObject(self.piProcInfo.hProcess, INFINITE) == \
+ WAIT_FAILED:
+ raise ctypes.WinError()
+ # Wait for the I/O threads to complete
+ for thread in threads:
+ thread.join()
+
+ # Wait for the stdin thread to complete
+ if stdin_thread is not None:
+ stdin_thread.join()
+
+ def _stdin_raw_nonblock(self):
+ """Use the raw Win32 handle of sys.stdin to do non-blocking reads"""
+ # WARNING: This is experimental, and produces inconsistent results.
+ # It's possible for the handle not to be appropriate for use
+ # with WaitForSingleObject, among other things.
+ handle = msvcrt.get_osfhandle(sys.stdin.fileno())
+ result = WaitForSingleObject(handle, 100)
+ if result == WAIT_FAILED:
+ raise ctypes.WinError()
+ elif result == WAIT_TIMEOUT:
+ print(".", end='')
+ return None
+ else:
+ data = ctypes.create_string_buffer(256)
+ bytesRead = DWORD(0)
+ print('?', end='')
+
+ if not ReadFile(handle, data, 256,
+ ctypes.byref(bytesRead), None):
+ raise ctypes.WinError()
+ # This ensures the non-blocking works with an actual console
+ # Not checking the error, so the processing will still work with
+ # other handle types
+ FlushConsoleInputBuffer(handle)
+
+ data = data.value
+ data = data.replace('\r\n', '\n')
+ data = data.replace('\r', '\n')
+ print(repr(data) + " ", end='')
+ return data
+
+ def _stdin_raw_block(self):
+ """Use a blocking stdin read"""
+ # The big problem with the blocking read is that it doesn't
+ # exit when it's supposed to in all contexts. An extra
+ # key-press may be required to trigger the exit.
+ try:
+ data = sys.stdin.read(1)
+ data = data.replace('\r', '\n')
+ return data
+ except WindowsError as we:
+ if we.winerror == ERROR_NO_DATA:
+ # This error occurs when the pipe is closed
+ return None
+ else:
+ # Otherwise let the error propagate
+ raise we
+
+ def _stdout_raw(self, s):
+ """Writes the string to stdout"""
+ print(s, end='', file=sys.stdout)
+ sys.stdout.flush()
+
+ def _stderr_raw(self, s):
+ """Writes the string to stdout"""
+ print(s, end='', file=sys.stderr)
+ sys.stderr.flush()
+
+ def _run_stdio(self):
+ """Runs the process using the system standard I/O.
+
+ IMPORTANT: stdin needs to be asynchronous, so the Python
+ sys.stdin object is not used. Instead,
+ msvcrt.kbhit/getwch are used asynchronously.
+ """
+ # Disable Line and Echo mode
+ #lpMode = DWORD()
+ #handle = msvcrt.get_osfhandle(sys.stdin.fileno())
+ #if GetConsoleMode(handle, ctypes.byref(lpMode)):
+ # set_console_mode = True
+ # if not SetConsoleMode(handle, lpMode.value &
+ # ~(ENABLE_ECHO_INPUT | ENABLE_LINE_INPUT | ENABLE_PROCESSED_INPUT)):
+ # raise ctypes.WinError()
+
+ if self.mergeout:
+ return self.run(stdout_func = self._stdout_raw,
+ stdin_func = self._stdin_raw_block)
+ else:
+ return self.run(stdout_func = self._stdout_raw,
+ stdin_func = self._stdin_raw_block,
+ stderr_func = self._stderr_raw)
+
+ # Restore the previous console mode
+ #if set_console_mode:
+ # if not SetConsoleMode(handle, lpMode.value):
+ # raise ctypes.WinError()
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ if self.hstdin:
+ CloseHandle(self.hstdin)
+ self.hstdin = None
+ if self.hstdout:
+ CloseHandle(self.hstdout)
+ self.hstdout = None
+ if self.hstderr:
+ CloseHandle(self.hstderr)
+ self.hstderr = None
+ if self.piProcInfo is not None:
+ CloseHandle(self.piProcInfo.hProcess)
+ CloseHandle(self.piProcInfo.hThread)
+ self.piProcInfo = None
+
+
+def system(cmd):
+ """Win32 version of os.system() that works with network shares.
+
+ Note that this implementation returns None, as meant for use in IPython.
+
+ Parameters
+ ----------
+ cmd : str
+ A command to be executed in the system shell.
+
+ Returns
+ -------
+ None : we explicitly do NOT return the subprocess status code, as this
+ utility is meant to be used extensively in IPython, where any return value
+ would trigger : func:`sys.displayhook` calls.
+ """
+ with AvoidUNCPath() as path:
+ if path is not None:
+ cmd = '"pushd %s &&"%s' % (path, cmd)
+ with Win32ShellCommandController(cmd) as scc:
+ scc.run()
+
+
+if __name__ == "__main__":
+ print("Test starting!")
+ #system("cmd")
+ system("python -i")
+ print("Test finished!")
diff --git a/contrib/python/ipython/py3/IPython/utils/_sysinfo.py b/contrib/python/ipython/py3/IPython/utils/_sysinfo.py
new file mode 100644
index 0000000000..49941f7881
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/utils/_sysinfo.py
@@ -0,0 +1,2 @@
+# GENERATED BY setup.py
+commit = "f11276427"
diff --git a/contrib/python/ipython/py3/IPython/utils/capture.py b/contrib/python/ipython/py3/IPython/utils/capture.py
new file mode 100644
index 0000000000..97b6336688
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/utils/capture.py
@@ -0,0 +1,170 @@
+# encoding: utf-8
+"""IO capturing utilities."""
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+
+import sys
+from io import StringIO
+
+#-----------------------------------------------------------------------------
+# Classes and functions
+#-----------------------------------------------------------------------------
+
+
+class RichOutput(object):
+ def __init__(self, data=None, metadata=None, transient=None, update=False):
+ self.data = data or {}
+ self.metadata = metadata or {}
+ self.transient = transient or {}
+ self.update = update
+
+ def display(self):
+ from IPython.display import publish_display_data
+ publish_display_data(data=self.data, metadata=self.metadata,
+ transient=self.transient, update=self.update)
+
+ def _repr_mime_(self, mime):
+ if mime not in self.data:
+ return
+ data = self.data[mime]
+ if mime in self.metadata:
+ return data, self.metadata[mime]
+ else:
+ return data
+
+ def _repr_mimebundle_(self, include=None, exclude=None):
+ return self.data, self.metadata
+
+ def _repr_html_(self):
+ return self._repr_mime_("text/html")
+
+ def _repr_latex_(self):
+ return self._repr_mime_("text/latex")
+
+ def _repr_json_(self):
+ return self._repr_mime_("application/json")
+
+ def _repr_javascript_(self):
+ return self._repr_mime_("application/javascript")
+
+ def _repr_png_(self):
+ return self._repr_mime_("image/png")
+
+ def _repr_jpeg_(self):
+ return self._repr_mime_("image/jpeg")
+
+ def _repr_svg_(self):
+ return self._repr_mime_("image/svg+xml")
+
+
+class CapturedIO(object):
+ """Simple object for containing captured stdout/err and rich display StringIO objects
+
+ Each instance `c` has three attributes:
+
+ - ``c.stdout`` : standard output as a string
+ - ``c.stderr`` : standard error as a string
+ - ``c.outputs``: a list of rich display outputs
+
+ Additionally, there's a ``c.show()`` method which will print all of the
+ above in the same order, and can be invoked simply via ``c()``.
+ """
+
+ def __init__(self, stdout, stderr, outputs=None):
+ self._stdout = stdout
+ self._stderr = stderr
+ if outputs is None:
+ outputs = []
+ self._outputs = outputs
+
+ def __str__(self):
+ return self.stdout
+
+ @property
+ def stdout(self):
+ "Captured standard output"
+ if not self._stdout:
+ return ''
+ return self._stdout.getvalue()
+
+ @property
+ def stderr(self):
+ "Captured standard error"
+ if not self._stderr:
+ return ''
+ return self._stderr.getvalue()
+
+ @property
+ def outputs(self):
+ """A list of the captured rich display outputs, if any.
+
+ If you have a CapturedIO object ``c``, these can be displayed in IPython
+ using::
+
+ from IPython.display import display
+ for o in c.outputs:
+ display(o)
+ """
+ return [ RichOutput(**kargs) for kargs in self._outputs ]
+
+ def show(self):
+ """write my output to sys.stdout/err as appropriate"""
+ sys.stdout.write(self.stdout)
+ sys.stderr.write(self.stderr)
+ sys.stdout.flush()
+ sys.stderr.flush()
+ for kargs in self._outputs:
+ RichOutput(**kargs).display()
+
+ __call__ = show
+
+
+class capture_output(object):
+ """context manager for capturing stdout/err"""
+ stdout = True
+ stderr = True
+ display = True
+
+ def __init__(self, stdout=True, stderr=True, display=True):
+ self.stdout = stdout
+ self.stderr = stderr
+ self.display = display
+ self.shell = None
+
+ def __enter__(self):
+ from IPython.core.getipython import get_ipython
+ from IPython.core.displaypub import CapturingDisplayPublisher
+ from IPython.core.displayhook import CapturingDisplayHook
+
+ self.sys_stdout = sys.stdout
+ self.sys_stderr = sys.stderr
+
+ if self.display:
+ self.shell = get_ipython()
+ if self.shell is None:
+ self.save_display_pub = None
+ self.display = False
+
+ stdout = stderr = outputs = None
+ if self.stdout:
+ stdout = sys.stdout = StringIO()
+ if self.stderr:
+ stderr = sys.stderr = StringIO()
+ if self.display:
+ self.save_display_pub = self.shell.display_pub
+ self.shell.display_pub = CapturingDisplayPublisher()
+ outputs = self.shell.display_pub.outputs
+ self.save_display_hook = sys.displayhook
+ sys.displayhook = CapturingDisplayHook(shell=self.shell,
+ outputs=outputs)
+
+ return CapturedIO(stdout, stderr, outputs)
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ sys.stdout = self.sys_stdout
+ sys.stderr = self.sys_stderr
+ if self.display and self.shell:
+ self.shell.display_pub = self.save_display_pub
+ sys.displayhook = self.save_display_hook
diff --git a/contrib/python/ipython/py3/IPython/utils/colorable.py b/contrib/python/ipython/py3/IPython/utils/colorable.py
new file mode 100644
index 0000000000..1e3caef62b
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/utils/colorable.py
@@ -0,0 +1,25 @@
+#*****************************************************************************
+# Copyright (C) 2016 The IPython Team <ipython-dev@scipy.org>
+#
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#*****************************************************************************
+
+"""
+Color managing related utilities
+"""
+
+import pygments
+
+from traitlets.config import Configurable
+from traitlets import Unicode
+
+
+available_themes = lambda : [s for s in pygments.styles.get_all_styles()]+['NoColor','LightBG','Linux', 'Neutral']
+
+class Colorable(Configurable):
+ """
+ A subclass of configurable for all the classes that have a `default_scheme`
+ """
+ default_style=Unicode('LightBG').tag(config=True)
+
diff --git a/contrib/python/ipython/py3/IPython/utils/coloransi.py b/contrib/python/ipython/py3/IPython/utils/coloransi.py
new file mode 100644
index 0000000000..9300b01085
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/utils/coloransi.py
@@ -0,0 +1,187 @@
+# -*- coding: utf-8 -*-
+"""Tools for coloring text in ANSI terminals.
+"""
+
+#*****************************************************************************
+# Copyright (C) 2002-2006 Fernando Perez. <fperez@colorado.edu>
+#
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#*****************************************************************************
+
+__all__ = ['TermColors','InputTermColors','ColorScheme','ColorSchemeTable']
+
+import os
+
+from IPython.utils.ipstruct import Struct
+
+color_templates = (
+ # Dark colors
+ ("Black" , "0;30"),
+ ("Red" , "0;31"),
+ ("Green" , "0;32"),
+ ("Brown" , "0;33"),
+ ("Blue" , "0;34"),
+ ("Purple" , "0;35"),
+ ("Cyan" , "0;36"),
+ ("LightGray" , "0;37"),
+ # Light colors
+ ("DarkGray" , "1;30"),
+ ("LightRed" , "1;31"),
+ ("LightGreen" , "1;32"),
+ ("Yellow" , "1;33"),
+ ("LightBlue" , "1;34"),
+ ("LightPurple" , "1;35"),
+ ("LightCyan" , "1;36"),
+ ("White" , "1;37"),
+ # Blinking colors. Probably should not be used in anything serious.
+ ("BlinkBlack" , "5;30"),
+ ("BlinkRed" , "5;31"),
+ ("BlinkGreen" , "5;32"),
+ ("BlinkYellow" , "5;33"),
+ ("BlinkBlue" , "5;34"),
+ ("BlinkPurple" , "5;35"),
+ ("BlinkCyan" , "5;36"),
+ ("BlinkLightGray", "5;37"),
+ )
+
+def make_color_table(in_class):
+ """Build a set of color attributes in a class.
+
+ Helper function for building the :class:`TermColors` and
+ :class`InputTermColors`.
+ """
+ for name,value in color_templates:
+ setattr(in_class,name,in_class._base % value)
+
+class TermColors:
+ """Color escape sequences.
+
+ This class defines the escape sequences for all the standard (ANSI?)
+ colors in terminals. Also defines a NoColor escape which is just the null
+ string, suitable for defining 'dummy' color schemes in terminals which get
+ confused by color escapes.
+
+ This class should be used as a mixin for building color schemes."""
+
+ NoColor = '' # for color schemes in color-less terminals.
+ Normal = '\033[0m' # Reset normal coloring
+ _base = '\033[%sm' # Template for all other colors
+
+# Build the actual color table as a set of class attributes:
+make_color_table(TermColors)
+
+class InputTermColors:
+ """Color escape sequences for input prompts.
+
+ This class is similar to TermColors, but the escapes are wrapped in \\001
+ and \\002 so that readline can properly know the length of each line and
+ can wrap lines accordingly. Use this class for any colored text which
+ needs to be used in input prompts, such as in calls to raw_input().
+
+ This class defines the escape sequences for all the standard (ANSI?)
+ colors in terminals. Also defines a NoColor escape which is just the null
+ string, suitable for defining 'dummy' color schemes in terminals which get
+ confused by color escapes.
+
+ This class should be used as a mixin for building color schemes."""
+
+ NoColor = '' # for color schemes in color-less terminals.
+
+ if os.name == 'nt' and os.environ.get('TERM','dumb') == 'emacs':
+ # (X)emacs on W32 gets confused with \001 and \002 so we remove them
+ Normal = '\033[0m' # Reset normal coloring
+ _base = '\033[%sm' # Template for all other colors
+ else:
+ Normal = '\001\033[0m\002' # Reset normal coloring
+ _base = '\001\033[%sm\002' # Template for all other colors
+
+# Build the actual color table as a set of class attributes:
+make_color_table(InputTermColors)
+
+class NoColors:
+ """This defines all the same names as the colour classes, but maps them to
+ empty strings, so it can easily be substituted to turn off colours."""
+ NoColor = ''
+ Normal = ''
+
+for name, value in color_templates:
+ setattr(NoColors, name, '')
+
+class ColorScheme:
+ """Generic color scheme class. Just a name and a Struct."""
+ def __init__(self,__scheme_name_,colordict=None,**colormap):
+ self.name = __scheme_name_
+ if colordict is None:
+ self.colors = Struct(**colormap)
+ else:
+ self.colors = Struct(colordict)
+
+ def copy(self,name=None):
+ """Return a full copy of the object, optionally renaming it."""
+ if name is None:
+ name = self.name
+ return ColorScheme(name, self.colors.dict())
+
+class ColorSchemeTable(dict):
+ """General class to handle tables of color schemes.
+
+ It's basically a dict of color schemes with a couple of shorthand
+ attributes and some convenient methods.
+
+ active_scheme_name -> obvious
+ active_colors -> actual color table of the active scheme"""
+
+ def __init__(self, scheme_list=None, default_scheme=''):
+ """Create a table of color schemes.
+
+ The table can be created empty and manually filled or it can be
+ created with a list of valid color schemes AND the specification for
+ the default active scheme.
+ """
+
+ # create object attributes to be set later
+ self.active_scheme_name = ''
+ self.active_colors = None
+
+ if scheme_list:
+ if default_scheme == '':
+ raise ValueError('you must specify the default color scheme')
+ for scheme in scheme_list:
+ self.add_scheme(scheme)
+ self.set_active_scheme(default_scheme)
+
+ def copy(self):
+ """Return full copy of object"""
+ return ColorSchemeTable(self.values(),self.active_scheme_name)
+
+ def add_scheme(self,new_scheme):
+ """Add a new color scheme to the table."""
+ if not isinstance(new_scheme,ColorScheme):
+ raise ValueError('ColorSchemeTable only accepts ColorScheme instances')
+ self[new_scheme.name] = new_scheme
+
+ def set_active_scheme(self,scheme,case_sensitive=0):
+ """Set the currently active scheme.
+
+ Names are by default compared in a case-insensitive way, but this can
+ be changed by setting the parameter case_sensitive to true."""
+
+ scheme_names = list(self.keys())
+ if case_sensitive:
+ valid_schemes = scheme_names
+ scheme_test = scheme
+ else:
+ valid_schemes = [s.lower() for s in scheme_names]
+ scheme_test = scheme.lower()
+ try:
+ scheme_idx = valid_schemes.index(scheme_test)
+ except ValueError as e:
+ raise ValueError('Unrecognized color scheme: ' + scheme + \
+ '\nValid schemes: '+str(scheme_names).replace("'', ",'')) from e
+ else:
+ active = scheme_names[scheme_idx]
+ self.active_scheme_name = active
+ self.active_colors = self[active].colors
+ # Now allow using '' as an index for the current active scheme
+ self[''] = self[active]
diff --git a/contrib/python/ipython/py3/IPython/utils/contexts.py b/contrib/python/ipython/py3/IPython/utils/contexts.py
new file mode 100644
index 0000000000..73c3f2e5b3
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/utils/contexts.py
@@ -0,0 +1,61 @@
+# encoding: utf-8
+"""Miscellaneous context managers.
+"""
+
+import warnings
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+
+class preserve_keys(object):
+ """Preserve a set of keys in a dictionary.
+
+ Upon entering the context manager the current values of the keys
+ will be saved. Upon exiting, the dictionary will be updated to
+ restore the original value of the preserved keys. Preserved keys
+ which did not exist when entering the context manager will be
+ deleted.
+
+ Examples
+ --------
+
+ >>> d = {'a': 1, 'b': 2, 'c': 3}
+ >>> with preserve_keys(d, 'b', 'c', 'd'):
+ ... del d['a']
+ ... del d['b'] # will be reset to 2
+ ... d['c'] = None # will be reset to 3
+ ... d['d'] = 4 # will be deleted
+ ... d['e'] = 5
+ ... print(sorted(d.items()))
+ ...
+ [('c', None), ('d', 4), ('e', 5)]
+ >>> print(sorted(d.items()))
+ [('b', 2), ('c', 3), ('e', 5)]
+ """
+
+ def __init__(self, dictionary, *keys):
+ self.dictionary = dictionary
+ self.keys = keys
+
+ def __enter__(self):
+ # Actions to perform upon exiting.
+ to_delete = []
+ to_update = {}
+
+ d = self.dictionary
+ for k in self.keys:
+ if k in d:
+ to_update[k] = d[k]
+ else:
+ to_delete.append(k)
+
+ self.to_delete = to_delete
+ self.to_update = to_update
+
+ def __exit__(self, *exc_info):
+ d = self.dictionary
+
+ for k in self.to_delete:
+ d.pop(k, None)
+ d.update(self.to_update)
diff --git a/contrib/python/ipython/py3/IPython/utils/daemonize.py b/contrib/python/ipython/py3/IPython/utils/daemonize.py
new file mode 100644
index 0000000000..44b4a2832e
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/utils/daemonize.py
@@ -0,0 +1,4 @@
+from warnings import warn
+
+warn("IPython.utils.daemonize has moved to ipyparallel.apps.daemonize since IPython 4.0", DeprecationWarning, stacklevel=2)
+from ipyparallel.apps.daemonize import daemonize
diff --git a/contrib/python/ipython/py3/IPython/utils/data.py b/contrib/python/ipython/py3/IPython/utils/data.py
new file mode 100644
index 0000000000..433c90916c
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/utils/data.py
@@ -0,0 +1,30 @@
+# encoding: utf-8
+"""Utilities for working with data structures like lists, dicts and tuples.
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (C) 2008-2011 The IPython Development Team
+#
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#-----------------------------------------------------------------------------
+
+
+def uniq_stable(elems):
+ """uniq_stable(elems) -> list
+
+ Return from an iterable, a list of all the unique elements in the input,
+ but maintaining the order in which they first appear.
+
+ Note: All elements in the input must be hashable for this routine
+ to work, as it internally uses a set for efficiency reasons.
+ """
+ seen = set()
+ return [x for x in elems if x not in seen and not seen.add(x)]
+
+
+def chop(seq, size):
+ """Chop a sequence into chunks of the given size."""
+ return [seq[i:i+size] for i in range(0,len(seq),size)]
+
+
diff --git a/contrib/python/ipython/py3/IPython/utils/decorators.py b/contrib/python/ipython/py3/IPython/utils/decorators.py
new file mode 100644
index 0000000000..bc7589cd35
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/utils/decorators.py
@@ -0,0 +1,83 @@
+# encoding: utf-8
+"""Decorators that don't go anywhere else.
+
+This module contains misc. decorators that don't really go with another module
+in :mod:`IPython.utils`. Before putting something here please see if it should
+go into another topical module in :mod:`IPython.utils`.
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (C) 2008-2011 The IPython Development Team
+#
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+from typing import Sequence
+
+from IPython.utils.docs import GENERATING_DOCUMENTATION
+
+
+#-----------------------------------------------------------------------------
+# Code
+#-----------------------------------------------------------------------------
+
+def flag_calls(func):
+ """Wrap a function to detect and flag when it gets called.
+
+ This is a decorator which takes a function and wraps it in a function with
+ a 'called' attribute. wrapper.called is initialized to False.
+
+ The wrapper.called attribute is set to False right before each call to the
+ wrapped function, so if the call fails it remains False. After the call
+ completes, wrapper.called is set to True and the output is returned.
+
+ Testing for truth in wrapper.called allows you to determine if a call to
+ func() was attempted and succeeded."""
+
+ # don't wrap twice
+ if hasattr(func, 'called'):
+ return func
+
+ def wrapper(*args,**kw):
+ wrapper.called = False
+ out = func(*args,**kw)
+ wrapper.called = True
+ return out
+
+ wrapper.called = False
+ wrapper.__doc__ = func.__doc__
+ return wrapper
+
+
+def undoc(func):
+ """Mark a function or class as undocumented.
+
+ This is found by inspecting the AST, so for now it must be used directly
+ as @undoc, not as e.g. @decorators.undoc
+ """
+ return func
+
+
+def sphinx_options(
+ show_inheritance: bool = True,
+ show_inherited_members: bool = False,
+ exclude_inherited_from: Sequence[str] = tuple(),
+):
+ """Set sphinx options"""
+
+ def wrapper(func):
+ if not GENERATING_DOCUMENTATION:
+ return func
+
+ func._sphinx_options = dict(
+ show_inheritance=show_inheritance,
+ show_inherited_members=show_inherited_members,
+ exclude_inherited_from=exclude_inherited_from,
+ )
+ return func
+
+ return wrapper
diff --git a/contrib/python/ipython/py3/IPython/utils/dir2.py b/contrib/python/ipython/py3/IPython/utils/dir2.py
new file mode 100644
index 0000000000..9f19b2dd84
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/utils/dir2.py
@@ -0,0 +1,84 @@
+# encoding: utf-8
+"""A fancy version of Python's builtin :func:`dir` function.
+"""
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+import inspect
+import types
+
+
+def safe_hasattr(obj, attr):
+ """In recent versions of Python, hasattr() only catches AttributeError.
+ This catches all errors.
+ """
+ try:
+ getattr(obj, attr)
+ return True
+ except:
+ return False
+
+
+def dir2(obj):
+ """dir2(obj) -> list of strings
+
+ Extended version of the Python builtin dir(), which does a few extra
+ checks.
+
+ This version is guaranteed to return only a list of true strings, whereas
+ dir() returns anything that objects inject into themselves, even if they
+ are later not really valid for attribute access (many extension libraries
+ have such bugs).
+ """
+
+ # Start building the attribute list via dir(), and then complete it
+ # with a few extra special-purpose calls.
+
+ try:
+ words = set(dir(obj))
+ except Exception:
+ # TypeError: dir(obj) does not return a list
+ words = set()
+
+ if safe_hasattr(obj, '__class__'):
+ words |= set(dir(obj.__class__))
+
+ # filter out non-string attributes which may be stuffed by dir() calls
+ # and poor coding in third-party modules
+
+ words = [w for w in words if isinstance(w, str)]
+ return sorted(words)
+
+
+def get_real_method(obj, name):
+ """Like getattr, but with a few extra sanity checks:
+
+ - If obj is a class, ignore everything except class methods
+ - Check if obj is a proxy that claims to have all attributes
+ - Catch attribute access failing with any exception
+ - Check that the attribute is a callable object
+
+ Returns the method or None.
+ """
+ try:
+ canary = getattr(obj, '_ipython_canary_method_should_not_exist_', None)
+ except Exception:
+ return None
+
+ if canary is not None:
+ # It claimed to have an attribute it should never have
+ return None
+
+ try:
+ m = getattr(obj, name, None)
+ except Exception:
+ return None
+
+ if inspect.isclass(obj) and not isinstance(m, types.MethodType):
+ return None
+
+ if callable(m):
+ return m
+
+ return None
diff --git a/contrib/python/ipython/py3/IPython/utils/docs.py b/contrib/python/ipython/py3/IPython/utils/docs.py
new file mode 100644
index 0000000000..6a97815cdc
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/utils/docs.py
@@ -0,0 +1,3 @@
+import os
+
+GENERATING_DOCUMENTATION = os.environ.get("IN_SPHINX_RUN", None) == "True"
diff --git a/contrib/python/ipython/py3/IPython/utils/encoding.py b/contrib/python/ipython/py3/IPython/utils/encoding.py
new file mode 100644
index 0000000000..651ee0c0b5
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/utils/encoding.py
@@ -0,0 +1,71 @@
+# coding: utf-8
+"""
+Utilities for dealing with text encodings
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (C) 2008-2012 The IPython Development Team
+#
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+import sys
+import locale
+import warnings
+
+# to deal with the possibility of sys.std* not being a stream at all
+def get_stream_enc(stream, default=None):
+ """Return the given stream's encoding or a default.
+
+ There are cases where ``sys.std*`` might not actually be a stream, so
+ check for the encoding attribute prior to returning it, and return
+ a default if it doesn't exist or evaluates as False. ``default``
+ is None if not provided.
+ """
+ if not hasattr(stream, 'encoding') or not stream.encoding:
+ return default
+ else:
+ return stream.encoding
+
+# Less conservative replacement for sys.getdefaultencoding, that will try
+# to match the environment.
+# Defined here as central function, so if we find better choices, we
+# won't need to make changes all over IPython.
+def getdefaultencoding(prefer_stream=True):
+ """Return IPython's guess for the default encoding for bytes as text.
+
+ If prefer_stream is True (default), asks for stdin.encoding first,
+ to match the calling Terminal, but that is often None for subprocesses.
+
+ Then fall back on locale.getpreferredencoding(),
+ which should be a sensible platform default (that respects LANG environment),
+ and finally to sys.getdefaultencoding() which is the most conservative option,
+ and usually UTF8 as of Python 3.
+ """
+ enc = None
+ if prefer_stream:
+ enc = get_stream_enc(sys.stdin)
+ if not enc or enc=='ascii':
+ try:
+ # There are reports of getpreferredencoding raising errors
+ # in some cases, which may well be fixed, but let's be conservative here.
+ enc = locale.getpreferredencoding()
+ except Exception:
+ pass
+ enc = enc or sys.getdefaultencoding()
+ # On windows `cp0` can be returned to indicate that there is no code page.
+ # Since cp0 is an invalid encoding return instead cp1252 which is the
+ # Western European default.
+ if enc == 'cp0':
+ warnings.warn(
+ "Invalid code page cp0 detected - using cp1252 instead."
+ "If cp1252 is incorrect please ensure a valid code page "
+ "is defined for the process.", RuntimeWarning)
+ return 'cp1252'
+ return enc
+
+DEFAULT_ENCODING = getdefaultencoding()
diff --git a/contrib/python/ipython/py3/IPython/utils/eventful.py b/contrib/python/ipython/py3/IPython/utils/eventful.py
new file mode 100644
index 0000000000..837c6e0344
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/utils/eventful.py
@@ -0,0 +1,5 @@
+from warnings import warn
+
+warn("IPython.utils.eventful has moved to traitlets.eventful", stacklevel=2)
+
+from traitlets.eventful import *
diff --git a/contrib/python/ipython/py3/IPython/utils/frame.py b/contrib/python/ipython/py3/IPython/utils/frame.py
new file mode 100644
index 0000000000..808906bda8
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/utils/frame.py
@@ -0,0 +1,92 @@
+# encoding: utf-8
+"""
+Utilities for working with stack frames.
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (C) 2008-2011 The IPython Development Team
+#
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+import sys
+
+#-----------------------------------------------------------------------------
+# Code
+#-----------------------------------------------------------------------------
+
+def extract_vars(*names,**kw):
+ """Extract a set of variables by name from another frame.
+
+ Parameters
+ ----------
+ *names : str
+ One or more variable names which will be extracted from the caller's
+ frame.
+ **kw : integer, optional
+ How many frames in the stack to walk when looking for your variables.
+ The default is 0, which will use the frame where the call was made.
+
+ Examples
+ --------
+ ::
+
+ In [2]: def func(x):
+ ...: y = 1
+ ...: print(sorted(extract_vars('x','y').items()))
+ ...:
+
+ In [3]: func('hello')
+ [('x', 'hello'), ('y', 1)]
+ """
+
+ depth = kw.get('depth',0)
+
+ callerNS = sys._getframe(depth+1).f_locals
+ return dict((k,callerNS[k]) for k in names)
+
+
+def extract_vars_above(*names):
+ """Extract a set of variables by name from another frame.
+
+ Similar to extractVars(), but with a specified depth of 1, so that names
+ are extracted exactly from above the caller.
+
+ This is simply a convenience function so that the very common case (for us)
+ of skipping exactly 1 frame doesn't have to construct a special dict for
+ keyword passing."""
+
+ callerNS = sys._getframe(2).f_locals
+ return dict((k,callerNS[k]) for k in names)
+
+
+def debugx(expr,pre_msg=''):
+ """Print the value of an expression from the caller's frame.
+
+ Takes an expression, evaluates it in the caller's frame and prints both
+ the given expression and the resulting value (as well as a debug mark
+ indicating the name of the calling function. The input must be of a form
+ suitable for eval().
+
+ An optional message can be passed, which will be prepended to the printed
+ expr->value pair."""
+
+ cf = sys._getframe(1)
+ print('[DBG:%s] %s%s -> %r' % (cf.f_code.co_name,pre_msg,expr,
+ eval(expr,cf.f_globals,cf.f_locals)))
+
+
+# deactivate it by uncommenting the following line, which makes it a no-op
+#def debugx(expr,pre_msg=''): pass
+
+def extract_module_locals(depth=0):
+ """Returns (module, locals) of the function `depth` frames away from the caller"""
+ f = sys._getframe(depth + 1)
+ global_ns = f.f_globals
+ module = sys.modules[global_ns['__name__']]
+ return (module, f.f_locals)
diff --git a/contrib/python/ipython/py3/IPython/utils/generics.py b/contrib/python/ipython/py3/IPython/utils/generics.py
new file mode 100644
index 0000000000..3626ca4cc7
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/utils/generics.py
@@ -0,0 +1,29 @@
+# encoding: utf-8
+"""Generic functions for extending IPython.
+"""
+
+from IPython.core.error import TryNext
+from functools import singledispatch
+
+
+@singledispatch
+def inspect_object(obj):
+ """Called when you do obj?"""
+ raise TryNext
+
+
+@singledispatch
+def complete_object(obj, prev_completions):
+ """Custom completer dispatching for python objects.
+
+ Parameters
+ ----------
+ obj : object
+ The object to complete.
+ prev_completions : list
+ List of attributes discovered so far.
+ This should return the list of attributes in obj. If you only wish to
+ add to the attributes already discovered normally, return
+ own_attrs + prev_completions.
+ """
+ raise TryNext
diff --git a/contrib/python/ipython/py3/IPython/utils/importstring.py b/contrib/python/ipython/py3/IPython/utils/importstring.py
new file mode 100644
index 0000000000..51bfc7b569
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/utils/importstring.py
@@ -0,0 +1,39 @@
+# encoding: utf-8
+"""
+A simple utility to import something by its string name.
+"""
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+
+def import_item(name):
+ """Import and return ``bar`` given the string ``foo.bar``.
+
+ Calling ``bar = import_item("foo.bar")`` is the functional equivalent of
+ executing the code ``from foo import bar``.
+
+ Parameters
+ ----------
+ name : string
+ The fully qualified name of the module/package being imported.
+
+ Returns
+ -------
+ mod : module object
+ The module that was imported.
+ """
+
+ parts = name.rsplit('.', 1)
+ if len(parts) == 2:
+ # called with 'foo.bar....'
+ package, obj = parts
+ module = __import__(package, fromlist=[obj])
+ try:
+ pak = getattr(module, obj)
+ except AttributeError as e:
+ raise ImportError('No module named %s' % obj) from e
+ return pak
+ else:
+ # called with un-dotted string
+ return __import__(parts[0])
diff --git a/contrib/python/ipython/py3/IPython/utils/io.py b/contrib/python/ipython/py3/IPython/utils/io.py
new file mode 100644
index 0000000000..cef4319f92
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/utils/io.py
@@ -0,0 +1,151 @@
+# encoding: utf-8
+"""
+IO related utilities.
+"""
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+
+
+import atexit
+import os
+import sys
+import tempfile
+from pathlib import Path
+from warnings import warn
+
+from IPython.utils.decorators import undoc
+from .capture import CapturedIO, capture_output
+
+class Tee(object):
+ """A class to duplicate an output stream to stdout/err.
+
+ This works in a manner very similar to the Unix 'tee' command.
+
+ When the object is closed or deleted, it closes the original file given to
+ it for duplication.
+ """
+ # Inspired by:
+ # http://mail.python.org/pipermail/python-list/2007-May/442737.html
+
+ def __init__(self, file_or_name, mode="w", channel='stdout'):
+ """Construct a new Tee object.
+
+ Parameters
+ ----------
+ file_or_name : filename or open filehandle (writable)
+ File that will be duplicated
+ mode : optional, valid mode for open().
+ If a filename was give, open with this mode.
+ channel : str, one of ['stdout', 'stderr']
+ """
+ if channel not in ['stdout', 'stderr']:
+ raise ValueError('Invalid channel spec %s' % channel)
+
+ if hasattr(file_or_name, 'write') and hasattr(file_or_name, 'seek'):
+ self.file = file_or_name
+ else:
+ encoding = None if "b" in mode else "utf-8"
+ self.file = open(file_or_name, mode, encoding=encoding)
+ self.channel = channel
+ self.ostream = getattr(sys, channel)
+ setattr(sys, channel, self)
+ self._closed = False
+
+ def close(self):
+ """Close the file and restore the channel."""
+ self.flush()
+ setattr(sys, self.channel, self.ostream)
+ self.file.close()
+ self._closed = True
+
+ def write(self, data):
+ """Write data to both channels."""
+ self.file.write(data)
+ self.ostream.write(data)
+ self.ostream.flush()
+
+ def flush(self):
+ """Flush both channels."""
+ self.file.flush()
+ self.ostream.flush()
+
+ def __del__(self):
+ if not self._closed:
+ self.close()
+
+
+def ask_yes_no(prompt, default=None, interrupt=None):
+ """Asks a question and returns a boolean (y/n) answer.
+
+ If default is given (one of 'y','n'), it is used if the user input is
+ empty. If interrupt is given (one of 'y','n'), it is used if the user
+ presses Ctrl-C. Otherwise the question is repeated until an answer is
+ given.
+
+ An EOF is treated as the default answer. If there is no default, an
+ exception is raised to prevent infinite loops.
+
+ Valid answers are: y/yes/n/no (match is not case sensitive)."""
+
+ answers = {'y':True,'n':False,'yes':True,'no':False}
+ ans = None
+ while ans not in answers.keys():
+ try:
+ ans = input(prompt+' ').lower()
+ if not ans: # response was an empty string
+ ans = default
+ except KeyboardInterrupt:
+ if interrupt:
+ ans = interrupt
+ print("\r")
+ except EOFError:
+ if default in answers.keys():
+ ans = default
+ print()
+ else:
+ raise
+
+ return answers[ans]
+
+
+def temp_pyfile(src, ext='.py'):
+ """Make a temporary python file, return filename and filehandle.
+
+ Parameters
+ ----------
+ src : string or list of strings (no need for ending newlines if list)
+ Source code to be written to the file.
+ ext : optional, string
+ Extension for the generated file.
+
+ Returns
+ -------
+ (filename, open filehandle)
+ It is the caller's responsibility to close the open file and unlink it.
+ """
+ fname = tempfile.mkstemp(ext)[1]
+ with open(Path(fname), "w", encoding="utf-8") as f:
+ f.write(src)
+ f.flush()
+ return fname
+
+
+@undoc
+def raw_print(*args, **kw):
+ """DEPRECATED: Raw print to sys.__stdout__, otherwise identical interface to print()."""
+ warn("IPython.utils.io.raw_print has been deprecated since IPython 7.0", DeprecationWarning, stacklevel=2)
+
+ print(*args, sep=kw.get('sep', ' '), end=kw.get('end', '\n'),
+ file=sys.__stdout__)
+ sys.__stdout__.flush()
+
+@undoc
+def raw_print_err(*args, **kw):
+ """DEPRECATED: Raw print to sys.__stderr__, otherwise identical interface to print()."""
+ warn("IPython.utils.io.raw_print_err has been deprecated since IPython 7.0", DeprecationWarning, stacklevel=2)
+
+ print(*args, sep=kw.get('sep', ' '), end=kw.get('end', '\n'),
+ file=sys.__stderr__)
+ sys.__stderr__.flush()
diff --git a/contrib/python/ipython/py3/IPython/utils/ipstruct.py b/contrib/python/ipython/py3/IPython/utils/ipstruct.py
new file mode 100644
index 0000000000..ed112101a3
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/utils/ipstruct.py
@@ -0,0 +1,379 @@
+# encoding: utf-8
+"""A dict subclass that supports attribute style access.
+
+Authors:
+
+* Fernando Perez (original)
+* Brian Granger (refactoring to a dict subclass)
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (C) 2008-2011 The IPython Development Team
+#
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+__all__ = ['Struct']
+
+#-----------------------------------------------------------------------------
+# Code
+#-----------------------------------------------------------------------------
+
+
+class Struct(dict):
+ """A dict subclass with attribute style access.
+
+ This dict subclass has a a few extra features:
+
+ * Attribute style access.
+ * Protection of class members (like keys, items) when using attribute
+ style access.
+ * The ability to restrict assignment to only existing keys.
+ * Intelligent merging.
+ * Overloaded operators.
+ """
+ _allownew = True
+ def __init__(self, *args, **kw):
+ """Initialize with a dictionary, another Struct, or data.
+
+ Parameters
+ ----------
+ *args : dict, Struct
+ Initialize with one dict or Struct
+ **kw : dict
+ Initialize with key, value pairs.
+
+ Examples
+ --------
+ >>> s = Struct(a=10,b=30)
+ >>> s.a
+ 10
+ >>> s.b
+ 30
+ >>> s2 = Struct(s,c=30)
+ >>> sorted(s2.keys())
+ ['a', 'b', 'c']
+ """
+ object.__setattr__(self, '_allownew', True)
+ dict.__init__(self, *args, **kw)
+
+ def __setitem__(self, key, value):
+ """Set an item with check for allownew.
+
+ Examples
+ --------
+ >>> s = Struct()
+ >>> s['a'] = 10
+ >>> s.allow_new_attr(False)
+ >>> s['a'] = 10
+ >>> s['a']
+ 10
+ >>> try:
+ ... s['b'] = 20
+ ... except KeyError:
+ ... print('this is not allowed')
+ ...
+ this is not allowed
+ """
+ if not self._allownew and key not in self:
+ raise KeyError(
+ "can't create new attribute %s when allow_new_attr(False)" % key)
+ dict.__setitem__(self, key, value)
+
+ def __setattr__(self, key, value):
+ """Set an attr with protection of class members.
+
+ This calls :meth:`self.__setitem__` but convert :exc:`KeyError` to
+ :exc:`AttributeError`.
+
+ Examples
+ --------
+ >>> s = Struct()
+ >>> s.a = 10
+ >>> s.a
+ 10
+ >>> try:
+ ... s.get = 10
+ ... except AttributeError:
+ ... print("you can't set a class member")
+ ...
+ you can't set a class member
+ """
+ # If key is an str it might be a class member or instance var
+ if isinstance(key, str):
+ # I can't simply call hasattr here because it calls getattr, which
+ # calls self.__getattr__, which returns True for keys in
+ # self._data. But I only want keys in the class and in
+ # self.__dict__
+ if key in self.__dict__ or hasattr(Struct, key):
+ raise AttributeError(
+ 'attr %s is a protected member of class Struct.' % key
+ )
+ try:
+ self.__setitem__(key, value)
+ except KeyError as e:
+ raise AttributeError(e) from e
+
+ def __getattr__(self, key):
+ """Get an attr by calling :meth:`dict.__getitem__`.
+
+ Like :meth:`__setattr__`, this method converts :exc:`KeyError` to
+ :exc:`AttributeError`.
+
+ Examples
+ --------
+ >>> s = Struct(a=10)
+ >>> s.a
+ 10
+ >>> type(s.get)
+ <...method'>
+ >>> try:
+ ... s.b
+ ... except AttributeError:
+ ... print("I don't have that key")
+ ...
+ I don't have that key
+ """
+ try:
+ result = self[key]
+ except KeyError as e:
+ raise AttributeError(key) from e
+ else:
+ return result
+
+ def __iadd__(self, other):
+ """s += s2 is a shorthand for s.merge(s2).
+
+ Examples
+ --------
+ >>> s = Struct(a=10,b=30)
+ >>> s2 = Struct(a=20,c=40)
+ >>> s += s2
+ >>> sorted(s.keys())
+ ['a', 'b', 'c']
+ """
+ self.merge(other)
+ return self
+
+ def __add__(self,other):
+ """s + s2 -> New Struct made from s.merge(s2).
+
+ Examples
+ --------
+ >>> s1 = Struct(a=10,b=30)
+ >>> s2 = Struct(a=20,c=40)
+ >>> s = s1 + s2
+ >>> sorted(s.keys())
+ ['a', 'b', 'c']
+ """
+ sout = self.copy()
+ sout.merge(other)
+ return sout
+
+ def __sub__(self,other):
+ """s1 - s2 -> remove keys in s2 from s1.
+
+ Examples
+ --------
+ >>> s1 = Struct(a=10,b=30)
+ >>> s2 = Struct(a=40)
+ >>> s = s1 - s2
+ >>> s
+ {'b': 30}
+ """
+ sout = self.copy()
+ sout -= other
+ return sout
+
+ def __isub__(self,other):
+ """Inplace remove keys from self that are in other.
+
+ Examples
+ --------
+ >>> s1 = Struct(a=10,b=30)
+ >>> s2 = Struct(a=40)
+ >>> s1 -= s2
+ >>> s1
+ {'b': 30}
+ """
+ for k in other.keys():
+ if k in self:
+ del self[k]
+ return self
+
+ def __dict_invert(self, data):
+ """Helper function for merge.
+
+ Takes a dictionary whose values are lists and returns a dict with
+ the elements of each list as keys and the original keys as values.
+ """
+ outdict = {}
+ for k,lst in data.items():
+ if isinstance(lst, str):
+ lst = lst.split()
+ for entry in lst:
+ outdict[entry] = k
+ return outdict
+
+ def dict(self):
+ return self
+
+ def copy(self):
+ """Return a copy as a Struct.
+
+ Examples
+ --------
+ >>> s = Struct(a=10,b=30)
+ >>> s2 = s.copy()
+ >>> type(s2) is Struct
+ True
+ """
+ return Struct(dict.copy(self))
+
+ def hasattr(self, key):
+ """hasattr function available as a method.
+
+ Implemented like has_key.
+
+ Examples
+ --------
+ >>> s = Struct(a=10)
+ >>> s.hasattr('a')
+ True
+ >>> s.hasattr('b')
+ False
+ >>> s.hasattr('get')
+ False
+ """
+ return key in self
+
+ def allow_new_attr(self, allow = True):
+ """Set whether new attributes can be created in this Struct.
+
+ This can be used to catch typos by verifying that the attribute user
+ tries to change already exists in this Struct.
+ """
+ object.__setattr__(self, '_allownew', allow)
+
+ def merge(self, __loc_data__=None, __conflict_solve=None, **kw):
+ """Merge two Structs with customizable conflict resolution.
+
+ This is similar to :meth:`update`, but much more flexible. First, a
+ dict is made from data+key=value pairs. When merging this dict with
+ the Struct S, the optional dictionary 'conflict' is used to decide
+ what to do.
+
+ If conflict is not given, the default behavior is to preserve any keys
+ with their current value (the opposite of the :meth:`update` method's
+ behavior).
+
+ Parameters
+ ----------
+ __loc_data__ : dict, Struct
+ The data to merge into self
+ __conflict_solve : dict
+ The conflict policy dict. The keys are binary functions used to
+ resolve the conflict and the values are lists of strings naming
+ the keys the conflict resolution function applies to. Instead of
+ a list of strings a space separated string can be used, like
+ 'a b c'.
+ **kw : dict
+ Additional key, value pairs to merge in
+
+ Notes
+ -----
+ The `__conflict_solve` dict is a dictionary of binary functions which will be used to
+ solve key conflicts. Here is an example::
+
+ __conflict_solve = dict(
+ func1=['a','b','c'],
+ func2=['d','e']
+ )
+
+ In this case, the function :func:`func1` will be used to resolve
+ keys 'a', 'b' and 'c' and the function :func:`func2` will be used for
+ keys 'd' and 'e'. This could also be written as::
+
+ __conflict_solve = dict(func1='a b c',func2='d e')
+
+ These functions will be called for each key they apply to with the
+ form::
+
+ func1(self['a'], other['a'])
+
+ The return value is used as the final merged value.
+
+ As a convenience, merge() provides five (the most commonly needed)
+ pre-defined policies: preserve, update, add, add_flip and add_s. The
+ easiest explanation is their implementation::
+
+ preserve = lambda old,new: old
+ update = lambda old,new: new
+ add = lambda old,new: old + new
+ add_flip = lambda old,new: new + old # note change of order!
+ add_s = lambda old,new: old + ' ' + new # only for str!
+
+ You can use those four words (as strings) as keys instead
+ of defining them as functions, and the merge method will substitute
+ the appropriate functions for you.
+
+ For more complicated conflict resolution policies, you still need to
+ construct your own functions.
+
+ Examples
+ --------
+ This show the default policy:
+
+ >>> s = Struct(a=10,b=30)
+ >>> s2 = Struct(a=20,c=40)
+ >>> s.merge(s2)
+ >>> sorted(s.items())
+ [('a', 10), ('b', 30), ('c', 40)]
+
+ Now, show how to specify a conflict dict:
+
+ >>> s = Struct(a=10,b=30)
+ >>> s2 = Struct(a=20,b=40)
+ >>> conflict = {'update':'a','add':'b'}
+ >>> s.merge(s2,conflict)
+ >>> sorted(s.items())
+ [('a', 20), ('b', 70)]
+ """
+
+ data_dict = dict(__loc_data__,**kw)
+
+ # policies for conflict resolution: two argument functions which return
+ # the value that will go in the new struct
+ preserve = lambda old,new: old
+ update = lambda old,new: new
+ add = lambda old,new: old + new
+ add_flip = lambda old,new: new + old # note change of order!
+ add_s = lambda old,new: old + ' ' + new
+
+ # default policy is to keep current keys when there's a conflict
+ conflict_solve = dict.fromkeys(self, preserve)
+
+ # the conflict_solve dictionary is given by the user 'inverted': we
+ # need a name-function mapping, it comes as a function -> names
+ # dict. Make a local copy (b/c we'll make changes), replace user
+ # strings for the three builtin policies and invert it.
+ if __conflict_solve:
+ inv_conflict_solve_user = __conflict_solve.copy()
+ for name, func in [('preserve',preserve), ('update',update),
+ ('add',add), ('add_flip',add_flip),
+ ('add_s',add_s)]:
+ if name in inv_conflict_solve_user.keys():
+ inv_conflict_solve_user[func] = inv_conflict_solve_user[name]
+ del inv_conflict_solve_user[name]
+ conflict_solve.update(self.__dict_invert(inv_conflict_solve_user))
+ for key in data_dict:
+ if key not in self:
+ self[key] = data_dict[key]
+ else:
+ self[key] = conflict_solve[key](self[key],data_dict[key])
+
diff --git a/contrib/python/ipython/py3/IPython/utils/jsonutil.py b/contrib/python/ipython/py3/IPython/utils/jsonutil.py
new file mode 100644
index 0000000000..2672e09e16
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/utils/jsonutil.py
@@ -0,0 +1,5 @@
+from warnings import warn
+
+warn("IPython.utils.jsonutil has moved to jupyter_client.jsonutil", stacklevel=2)
+
+from jupyter_client.jsonutil import *
diff --git a/contrib/python/ipython/py3/IPython/utils/localinterfaces.py b/contrib/python/ipython/py3/IPython/utils/localinterfaces.py
new file mode 100644
index 0000000000..2f911222d8
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/utils/localinterfaces.py
@@ -0,0 +1,5 @@
+from warnings import warn
+
+warn("IPython.utils.localinterfaces has moved to jupyter_client.localinterfaces", stacklevel=2)
+
+from jupyter_client.localinterfaces import *
diff --git a/contrib/python/ipython/py3/IPython/utils/log.py b/contrib/python/ipython/py3/IPython/utils/log.py
new file mode 100644
index 0000000000..f9dea91ce9
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/utils/log.py
@@ -0,0 +1,5 @@
+from warnings import warn
+
+warn("IPython.utils.log has moved to traitlets.log", stacklevel=2)
+
+from traitlets.log import *
diff --git a/contrib/python/ipython/py3/IPython/utils/module_paths.py b/contrib/python/ipython/py3/IPython/utils/module_paths.py
new file mode 100644
index 0000000000..6f8cb1004a
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/utils/module_paths.py
@@ -0,0 +1,70 @@
+"""Utility functions for finding modules
+
+Utility functions for finding modules on sys.path.
+
+"""
+#-----------------------------------------------------------------------------
+# Copyright (c) 2011, the IPython Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+# Stdlib imports
+import importlib
+import sys
+
+# Third-party imports
+
+# Our own imports
+
+
+#-----------------------------------------------------------------------------
+# Globals and constants
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Local utilities
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Classes and functions
+#-----------------------------------------------------------------------------
+
+def find_mod(module_name):
+ """
+ Find module `module_name` on sys.path, and return the path to module `module_name`.
+
+ - If `module_name` refers to a module directory, then return path to __init__ file.
+ - If `module_name` is a directory without an __init__file, return None.
+ - If module is missing or does not have a `.py` or `.pyw` extension, return None.
+ - Note that we are not interested in running bytecode.
+ - Otherwise, return the fill path of the module.
+
+ Parameters
+ ----------
+ module_name : str
+
+ Returns
+ -------
+ module_path : str
+ Path to module `module_name`, its __init__.py, or None,
+ depending on above conditions.
+ """
+ spec = importlib.util.find_spec(module_name)
+ module_path = spec.origin
+ if module_path is None:
+ if spec.loader in sys.meta_path:
+ return spec.loader
+ return None
+ else:
+ split_path = module_path.split(".")
+ if split_path[-1] in ["py", "pyw"]:
+ return module_path
+ else:
+ return None
diff --git a/contrib/python/ipython/py3/IPython/utils/openpy.py b/contrib/python/ipython/py3/IPython/utils/openpy.py
new file mode 100644
index 0000000000..297a762c7d
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/utils/openpy.py
@@ -0,0 +1,105 @@
+"""
+Tools to open .py files as Unicode, using the encoding specified within the file,
+as per PEP 263.
+
+Much of the code is taken from the tokenize module in Python 3.2.
+"""
+
+import io
+from io import TextIOWrapper, BytesIO
+from pathlib import Path
+import re
+from tokenize import open, detect_encoding
+
+cookie_re = re.compile(r"coding[:=]\s*([-\w.]+)", re.UNICODE)
+cookie_comment_re = re.compile(r"^\s*#.*coding[:=]\s*([-\w.]+)", re.UNICODE)
+
+def source_to_unicode(txt, errors='replace', skip_encoding_cookie=True):
+ """Converts a bytes string with python source code to unicode.
+
+ Unicode strings are passed through unchanged. Byte strings are checked
+ for the python source file encoding cookie to determine encoding.
+ txt can be either a bytes buffer or a string containing the source
+ code.
+ """
+ if isinstance(txt, str):
+ return txt
+ if isinstance(txt, bytes):
+ buffer = BytesIO(txt)
+ else:
+ buffer = txt
+ try:
+ encoding, _ = detect_encoding(buffer.readline)
+ except SyntaxError:
+ encoding = "ascii"
+ buffer.seek(0)
+ with TextIOWrapper(buffer, encoding, errors=errors, line_buffering=True) as text:
+ text.mode = 'r'
+ if skip_encoding_cookie:
+ return u"".join(strip_encoding_cookie(text))
+ else:
+ return text.read()
+
+def strip_encoding_cookie(filelike):
+ """Generator to pull lines from a text-mode file, skipping the encoding
+ cookie if it is found in the first two lines.
+ """
+ it = iter(filelike)
+ try:
+ first = next(it)
+ if not cookie_comment_re.match(first):
+ yield first
+ second = next(it)
+ if not cookie_comment_re.match(second):
+ yield second
+ except StopIteration:
+ return
+
+ for line in it:
+ yield line
+
+def read_py_file(filename, skip_encoding_cookie=True):
+ """Read a Python file, using the encoding declared inside the file.
+
+ Parameters
+ ----------
+ filename : str
+ The path to the file to read.
+ skip_encoding_cookie : bool
+ If True (the default), and the encoding declaration is found in the first
+ two lines, that line will be excluded from the output.
+
+ Returns
+ -------
+ A unicode string containing the contents of the file.
+ """
+ filepath = Path(filename)
+ with open(filepath) as f: # the open function defined in this module.
+ if skip_encoding_cookie:
+ return "".join(strip_encoding_cookie(f))
+ else:
+ return f.read()
+
+def read_py_url(url, errors='replace', skip_encoding_cookie=True):
+ """Read a Python file from a URL, using the encoding declared inside the file.
+
+ Parameters
+ ----------
+ url : str
+ The URL from which to fetch the file.
+ errors : str
+ How to handle decoding errors in the file. Options are the same as for
+ bytes.decode(), but here 'replace' is the default.
+ skip_encoding_cookie : bool
+ If True (the default), and the encoding declaration is found in the first
+ two lines, that line will be excluded from the output.
+
+ Returns
+ -------
+ A unicode string containing the contents of the file.
+ """
+ # Deferred import for faster start
+ from urllib.request import urlopen
+ response = urlopen(url)
+ buffer = io.BytesIO(response.read())
+ return source_to_unicode(buffer, errors, skip_encoding_cookie)
diff --git a/contrib/python/ipython/py3/IPython/utils/path.py b/contrib/python/ipython/py3/IPython/utils/path.py
new file mode 100644
index 0000000000..ccb70dccd4
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/utils/path.py
@@ -0,0 +1,391 @@
+# encoding: utf-8
+"""
+Utilities for path handling.
+"""
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+import os
+import sys
+import errno
+import shutil
+import random
+import glob
+
+from IPython.utils.process import system
+
+#-----------------------------------------------------------------------------
+# Code
+#-----------------------------------------------------------------------------
+fs_encoding = sys.getfilesystemencoding()
+
+def _writable_dir(path):
+ """Whether `path` is a directory, to which the user has write access."""
+ return os.path.isdir(path) and os.access(path, os.W_OK)
+
+if sys.platform == 'win32':
+ def _get_long_path_name(path):
+ """Get a long path name (expand ~) on Windows using ctypes.
+
+ Examples
+ --------
+
+ >>> get_long_path_name('c:\\\\docume~1')
+ 'c:\\\\Documents and Settings'
+
+ """
+ try:
+ import ctypes
+ except ImportError as e:
+ raise ImportError('you need to have ctypes installed for this to work') from e
+ _GetLongPathName = ctypes.windll.kernel32.GetLongPathNameW
+ _GetLongPathName.argtypes = [ctypes.c_wchar_p, ctypes.c_wchar_p,
+ ctypes.c_uint ]
+
+ buf = ctypes.create_unicode_buffer(260)
+ rv = _GetLongPathName(path, buf, 260)
+ if rv == 0 or rv > 260:
+ return path
+ else:
+ return buf.value
+else:
+ def _get_long_path_name(path):
+ """Dummy no-op."""
+ return path
+
+
+
+def get_long_path_name(path):
+ """Expand a path into its long form.
+
+ On Windows this expands any ~ in the paths. On other platforms, it is
+ a null operation.
+ """
+ return _get_long_path_name(path)
+
+
+def compress_user(path):
+ """Reverse of :func:`os.path.expanduser`
+ """
+ home = os.path.expanduser('~')
+ if path.startswith(home):
+ path = "~" + path[len(home):]
+ return path
+
+def get_py_filename(name):
+ """Return a valid python filename in the current directory.
+
+ If the given name is not a file, it adds '.py' and searches again.
+ Raises IOError with an informative message if the file isn't found.
+ """
+
+ name = os.path.expanduser(name)
+ if os.path.isfile(name):
+ return name
+ if not name.endswith(".py"):
+ py_name = name + ".py"
+ if os.path.isfile(py_name):
+ return py_name
+ raise IOError("File `%r` not found." % name)
+
+
+def filefind(filename: str, path_dirs=None) -> str:
+ """Find a file by looking through a sequence of paths.
+
+ This iterates through a sequence of paths looking for a file and returns
+ the full, absolute path of the first occurrence of the file. If no set of
+ path dirs is given, the filename is tested as is, after running through
+ :func:`expandvars` and :func:`expanduser`. Thus a simple call::
+
+ filefind('myfile.txt')
+
+ will find the file in the current working dir, but::
+
+ filefind('~/myfile.txt')
+
+ Will find the file in the users home directory. This function does not
+ automatically try any paths, such as the cwd or the user's home directory.
+
+ Parameters
+ ----------
+ filename : str
+ The filename to look for.
+ path_dirs : str, None or sequence of str
+ The sequence of paths to look for the file in. If None, the filename
+ need to be absolute or be in the cwd. If a string, the string is
+ put into a sequence and the searched. If a sequence, walk through
+ each element and join with ``filename``, calling :func:`expandvars`
+ and :func:`expanduser` before testing for existence.
+
+ Returns
+ -------
+ path : str
+ returns absolute path to file.
+
+ Raises
+ ------
+ IOError
+ """
+
+ # If paths are quoted, abspath gets confused, strip them...
+ filename = filename.strip('"').strip("'")
+ # If the input is an absolute path, just check it exists
+ if os.path.isabs(filename) and os.path.isfile(filename):
+ return filename
+
+ if path_dirs is None:
+ path_dirs = ("",)
+ elif isinstance(path_dirs, str):
+ path_dirs = (path_dirs,)
+
+ for path in path_dirs:
+ if path == '.': path = os.getcwd()
+ testname = expand_path(os.path.join(path, filename))
+ if os.path.isfile(testname):
+ return os.path.abspath(testname)
+
+ raise IOError("File %r does not exist in any of the search paths: %r" %
+ (filename, path_dirs) )
+
+
+class HomeDirError(Exception):
+ pass
+
+
+def get_home_dir(require_writable=False) -> str:
+ """Return the 'home' directory, as a unicode string.
+
+ Uses os.path.expanduser('~'), and checks for writability.
+
+ See stdlib docs for how this is determined.
+ For Python <3.8, $HOME is first priority on *ALL* platforms.
+ For Python >=3.8 on Windows, %HOME% is no longer considered.
+
+ Parameters
+ ----------
+ require_writable : bool [default: False]
+ if True:
+ guarantees the return value is a writable directory, otherwise
+ raises HomeDirError
+ if False:
+ The path is resolved, but it is not guaranteed to exist or be writable.
+ """
+
+ homedir = os.path.expanduser('~')
+ # Next line will make things work even when /home/ is a symlink to
+ # /usr/home as it is on FreeBSD, for example
+ homedir = os.path.realpath(homedir)
+
+ if not _writable_dir(homedir) and os.name == 'nt':
+ # expanduser failed, use the registry to get the 'My Documents' folder.
+ try:
+ import winreg as wreg
+ with wreg.OpenKey(
+ wreg.HKEY_CURRENT_USER,
+ r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders"
+ ) as key:
+ homedir = wreg.QueryValueEx(key,'Personal')[0]
+ except:
+ pass
+
+ if (not require_writable) or _writable_dir(homedir):
+ assert isinstance(homedir, str), "Homedir should be unicode not bytes"
+ return homedir
+ else:
+ raise HomeDirError('%s is not a writable dir, '
+ 'set $HOME environment variable to override' % homedir)
+
+def get_xdg_dir():
+ """Return the XDG_CONFIG_HOME, if it is defined and exists, else None.
+
+ This is only for non-OS X posix (Linux,Unix,etc.) systems.
+ """
+
+ env = os.environ
+
+ if os.name == "posix":
+ # Linux, Unix, AIX, etc.
+ # use ~/.config if empty OR not set
+ xdg = env.get("XDG_CONFIG_HOME", None) or os.path.join(get_home_dir(), '.config')
+ if xdg and _writable_dir(xdg):
+ assert isinstance(xdg, str)
+ return xdg
+
+ return None
+
+
+def get_xdg_cache_dir():
+ """Return the XDG_CACHE_HOME, if it is defined and exists, else None.
+
+ This is only for non-OS X posix (Linux,Unix,etc.) systems.
+ """
+
+ env = os.environ
+
+ if os.name == "posix":
+ # Linux, Unix, AIX, etc.
+ # use ~/.cache if empty OR not set
+ xdg = env.get("XDG_CACHE_HOME", None) or os.path.join(get_home_dir(), '.cache')
+ if xdg and _writable_dir(xdg):
+ assert isinstance(xdg, str)
+ return xdg
+
+ return None
+
+
+def expand_path(s):
+ """Expand $VARS and ~names in a string, like a shell
+
+ :Examples:
+
+ In [2]: os.environ['FOO']='test'
+
+ In [3]: expand_path('variable FOO is $FOO')
+ Out[3]: 'variable FOO is test'
+ """
+ # This is a pretty subtle hack. When expand user is given a UNC path
+ # on Windows (\\server\share$\%username%), os.path.expandvars, removes
+ # the $ to get (\\server\share\%username%). I think it considered $
+ # alone an empty var. But, we need the $ to remains there (it indicates
+ # a hidden share).
+ if os.name=='nt':
+ s = s.replace('$\\', 'IPYTHON_TEMP')
+ s = os.path.expandvars(os.path.expanduser(s))
+ if os.name=='nt':
+ s = s.replace('IPYTHON_TEMP', '$\\')
+ return s
+
+
+def unescape_glob(string):
+ """Unescape glob pattern in `string`."""
+ def unescape(s):
+ for pattern in '*[]!?':
+ s = s.replace(r'\{0}'.format(pattern), pattern)
+ return s
+ return '\\'.join(map(unescape, string.split('\\\\')))
+
+
+def shellglob(args):
+ """
+ Do glob expansion for each element in `args` and return a flattened list.
+
+ Unmatched glob pattern will remain as-is in the returned list.
+
+ """
+ expanded = []
+ # Do not unescape backslash in Windows as it is interpreted as
+ # path separator:
+ unescape = unescape_glob if sys.platform != 'win32' else lambda x: x
+ for a in args:
+ expanded.extend(glob.glob(a) or [unescape(a)])
+ return expanded
+
+
+def target_outdated(target,deps):
+ """Determine whether a target is out of date.
+
+ target_outdated(target,deps) -> 1/0
+
+ deps: list of filenames which MUST exist.
+ target: single filename which may or may not exist.
+
+ If target doesn't exist or is older than any file listed in deps, return
+ true, otherwise return false.
+ """
+ try:
+ target_time = os.path.getmtime(target)
+ except os.error:
+ return 1
+ for dep in deps:
+ dep_time = os.path.getmtime(dep)
+ if dep_time > target_time:
+ #print "For target",target,"Dep failed:",dep # dbg
+ #print "times (dep,tar):",dep_time,target_time # dbg
+ return 1
+ return 0
+
+
+def target_update(target,deps,cmd):
+ """Update a target with a given command given a list of dependencies.
+
+ target_update(target,deps,cmd) -> runs cmd if target is outdated.
+
+ This is just a wrapper around target_outdated() which calls the given
+ command if target is outdated."""
+
+ if target_outdated(target,deps):
+ system(cmd)
+
+
+ENOLINK = 1998
+
+def link(src, dst):
+ """Hard links ``src`` to ``dst``, returning 0 or errno.
+
+ Note that the special errno ``ENOLINK`` will be returned if ``os.link`` isn't
+ supported by the operating system.
+ """
+
+ if not hasattr(os, "link"):
+ return ENOLINK
+ link_errno = 0
+ try:
+ os.link(src, dst)
+ except OSError as e:
+ link_errno = e.errno
+ return link_errno
+
+
+def link_or_copy(src, dst):
+ """Attempts to hardlink ``src`` to ``dst``, copying if the link fails.
+
+ Attempts to maintain the semantics of ``shutil.copy``.
+
+ Because ``os.link`` does not overwrite files, a unique temporary file
+ will be used if the target already exists, then that file will be moved
+ into place.
+ """
+
+ if os.path.isdir(dst):
+ dst = os.path.join(dst, os.path.basename(src))
+
+ link_errno = link(src, dst)
+ if link_errno == errno.EEXIST:
+ if os.stat(src).st_ino == os.stat(dst).st_ino:
+ # dst is already a hard link to the correct file, so we don't need
+ # to do anything else. If we try to link and rename the file
+ # anyway, we get duplicate files - see http://bugs.python.org/issue21876
+ return
+
+ new_dst = dst + "-temp-%04X" %(random.randint(1, 16**4), )
+ try:
+ link_or_copy(src, new_dst)
+ except:
+ try:
+ os.remove(new_dst)
+ except OSError:
+ pass
+ raise
+ os.rename(new_dst, dst)
+ elif link_errno != 0:
+ # Either link isn't supported, or the filesystem doesn't support
+ # linking, or 'src' and 'dst' are on different filesystems.
+ shutil.copy(src, dst)
+
+def ensure_dir_exists(path, mode=0o755):
+ """ensure that a directory exists
+
+ If it doesn't exist, try to create it and protect against a race condition
+ if another process is doing the same.
+
+ The default permissions are 755, which differ from os.makedirs default of 777.
+ """
+ if not os.path.exists(path):
+ try:
+ os.makedirs(path, mode=mode)
+ except OSError as e:
+ if e.errno != errno.EEXIST:
+ raise
+ elif not os.path.isdir(path):
+ raise IOError("%r exists but is not a directory" % path)
diff --git a/contrib/python/ipython/py3/IPython/utils/process.py b/contrib/python/ipython/py3/IPython/utils/process.py
new file mode 100644
index 0000000000..489b7c13d0
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/utils/process.py
@@ -0,0 +1,69 @@
+# encoding: utf-8
+"""
+Utilities for working with external processes.
+"""
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+
+import os
+import shutil
+import sys
+
+if sys.platform == 'win32':
+ from ._process_win32 import system, getoutput, arg_split, check_pid
+elif sys.platform == 'cli':
+ from ._process_cli import system, getoutput, arg_split, check_pid
+else:
+ from ._process_posix import system, getoutput, arg_split, check_pid
+
+from ._process_common import getoutputerror, get_output_error_code, process_handler
+
+
+class FindCmdError(Exception):
+ pass
+
+
+def find_cmd(cmd):
+ """Find absolute path to executable cmd in a cross platform manner.
+
+ This function tries to determine the full path to a command line program
+ using `which` on Unix/Linux/OS X and `win32api` on Windows. Most of the
+ time it will use the version that is first on the users `PATH`.
+
+ Warning, don't use this to find IPython command line programs as there
+ is a risk you will find the wrong one. Instead find those using the
+ following code and looking for the application itself::
+
+ import sys
+ argv = [sys.executable, '-m', 'IPython']
+
+ Parameters
+ ----------
+ cmd : str
+ The command line program to look for.
+ """
+ path = shutil.which(cmd)
+ if path is None:
+ raise FindCmdError('command could not be found: %s' % cmd)
+ return path
+
+
+def abbrev_cwd():
+ """ Return abbreviated version of cwd, e.g. d:mydir """
+ cwd = os.getcwd().replace('\\','/')
+ drivepart = ''
+ tail = cwd
+ if sys.platform == 'win32':
+ if len(cwd) < 4:
+ return cwd
+ drivepart,tail = os.path.splitdrive(cwd)
+
+
+ parts = tail.split('/')
+ if len(parts) > 2:
+ tail = '/'.join(parts[-2:])
+
+ return (drivepart + (
+ cwd == '/' and '/' or tail))
diff --git a/contrib/python/ipython/py3/IPython/utils/py3compat.py b/contrib/python/ipython/py3/IPython/utils/py3compat.py
new file mode 100644
index 0000000000..34af4c58f4
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/utils/py3compat.py
@@ -0,0 +1,67 @@
+# coding: utf-8
+"""Compatibility tricks for Python 3. Mainly to do with unicode.
+
+This file is deprecated and will be removed in a future version.
+"""
+import platform
+import builtins as builtin_mod
+
+from .encoding import DEFAULT_ENCODING
+
+
+def decode(s, encoding=None):
+ encoding = encoding or DEFAULT_ENCODING
+ return s.decode(encoding, "replace")
+
+
+def encode(u, encoding=None):
+ encoding = encoding or DEFAULT_ENCODING
+ return u.encode(encoding, "replace")
+
+
+def cast_unicode(s, encoding=None):
+ if isinstance(s, bytes):
+ return decode(s, encoding)
+ return s
+
+
+def safe_unicode(e):
+ """unicode(e) with various fallbacks. Used for exceptions, which may not be
+ safe to call unicode() on.
+ """
+ try:
+ return str(e)
+ except UnicodeError:
+ pass
+
+ try:
+ return repr(e)
+ except UnicodeError:
+ pass
+
+ return "Unrecoverably corrupt evalue"
+
+
+# keep reference to builtin_mod because the kernel overrides that value
+# to forward requests to a frontend.
+def input(prompt=""):
+ return builtin_mod.input(prompt)
+
+
+def execfile(fname, glob, loc=None, compiler=None):
+ loc = loc if (loc is not None) else glob
+ with open(fname, "rb") as f:
+ compiler = compiler or compile
+ exec(compiler(f.read(), fname, "exec"), glob, loc)
+
+
+PYPY = platform.python_implementation() == "PyPy"
+
+# Cython still rely on that as a Dec 28 2019
+# See https://github.com/cython/cython/pull/3291 and
+# https://github.com/ipython/ipython/issues/12068
+def no_code(x, encoding=None):
+ return x
+
+
+unicode_to_str = cast_bytes_py2 = no_code
diff --git a/contrib/python/ipython/py3/IPython/utils/sentinel.py b/contrib/python/ipython/py3/IPython/utils/sentinel.py
new file mode 100644
index 0000000000..dc57a2591c
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/utils/sentinel.py
@@ -0,0 +1,17 @@
+"""Sentinel class for constants with useful reprs"""
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+class Sentinel(object):
+
+ def __init__(self, name, module, docstring=None):
+ self.name = name
+ self.module = module
+ if docstring:
+ self.__doc__ = docstring
+
+
+ def __repr__(self):
+ return str(self.module)+'.'+self.name
+
diff --git a/contrib/python/ipython/py3/IPython/utils/shimmodule.py b/contrib/python/ipython/py3/IPython/utils/shimmodule.py
new file mode 100644
index 0000000000..8af44caa98
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/utils/shimmodule.py
@@ -0,0 +1,89 @@
+"""A shim module for deprecated imports
+"""
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+import importlib.abc
+import importlib.util
+import sys
+import types
+from importlib import import_module
+
+from .importstring import import_item
+
+
+class ShimWarning(Warning):
+ """A warning to show when a module has moved, and a shim is in its place."""
+
+
+class ShimImporter(importlib.abc.MetaPathFinder):
+ """Import hook for a shim.
+
+ This ensures that submodule imports return the real target module,
+ not a clone that will confuse `is` and `isinstance` checks.
+ """
+ def __init__(self, src, mirror):
+ self.src = src
+ self.mirror = mirror
+
+ def _mirror_name(self, fullname):
+ """get the name of the mirrored module"""
+
+ return self.mirror + fullname[len(self.src) :]
+
+ def find_spec(self, fullname, path, target=None):
+ if fullname.startswith(self.src + "."):
+ mirror_name = self._mirror_name(fullname)
+ return importlib.util.find_spec(mirror_name)
+
+
+class ShimModule(types.ModuleType):
+
+ def __init__(self, *args, **kwargs):
+ self._mirror = kwargs.pop("mirror")
+ src = kwargs.pop("src", None)
+ if src:
+ kwargs['name'] = src.rsplit('.', 1)[-1]
+ super(ShimModule, self).__init__(*args, **kwargs)
+ # add import hook for descendent modules
+ if src:
+ sys.meta_path.append(
+ ShimImporter(src=src, mirror=self._mirror)
+ )
+
+ @property
+ def __path__(self):
+ return []
+
+ @property
+ def __spec__(self):
+ """Don't produce __spec__ until requested"""
+ return import_module(self._mirror).__spec__
+
+ def __dir__(self):
+ return dir(import_module(self._mirror))
+
+ @property
+ def __all__(self):
+ """Ensure __all__ is always defined"""
+ mod = import_module(self._mirror)
+ try:
+ return mod.__all__
+ except AttributeError:
+ return [name for name in dir(mod) if not name.startswith('_')]
+
+ def __getattr__(self, key):
+ # Use the equivalent of import_item(name), see below
+ name = "%s.%s" % (self._mirror, key)
+ try:
+ return import_item(name)
+ except ImportError as e:
+ raise AttributeError(key) from e
+
+ def __repr__(self):
+ # repr on a module can be called during error handling; make sure
+ # it does not fail, even if the import fails
+ try:
+ return self.__getattr__("__repr__")()
+ except AttributeError:
+ return f"<ShimModule for {self._mirror!r}>"
diff --git a/contrib/python/ipython/py3/IPython/utils/signatures.py b/contrib/python/ipython/py3/IPython/utils/signatures.py
new file mode 100644
index 0000000000..88d72b185e
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/utils/signatures.py
@@ -0,0 +1,12 @@
+"""DEPRECATED: Function signature objects for callables.
+
+Use the standard library version if available, as it is more up to date.
+Fallback on backport otherwise.
+"""
+
+import warnings
+warnings.warn("{} backport for Python 2 is deprecated in IPython 6, which only supports "
+ "Python 3. Import directly from standard library `inspect`".format(__name__),
+ DeprecationWarning, stacklevel=2)
+
+from inspect import BoundArguments, Parameter, Signature, signature
diff --git a/contrib/python/ipython/py3/IPython/utils/strdispatch.py b/contrib/python/ipython/py3/IPython/utils/strdispatch.py
new file mode 100644
index 0000000000..d6bf510535
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/utils/strdispatch.py
@@ -0,0 +1,68 @@
+"""String dispatch class to match regexps and dispatch commands.
+"""
+
+# Stdlib imports
+import re
+
+# Our own modules
+from IPython.core.hooks import CommandChainDispatcher
+
+# Code begins
+class StrDispatch(object):
+ """Dispatch (lookup) a set of strings / regexps for match.
+
+ Example:
+
+ >>> dis = StrDispatch()
+ >>> dis.add_s('hei',34, priority = 4)
+ >>> dis.add_s('hei',123, priority = 2)
+ >>> dis.add_re('h.i', 686)
+ >>> print(list(dis.flat_matches('hei')))
+ [123, 34, 686]
+ """
+
+ def __init__(self):
+ self.strs = {}
+ self.regexs = {}
+
+ def add_s(self, s, obj, priority= 0 ):
+ """ Adds a target 'string' for dispatching """
+
+ chain = self.strs.get(s, CommandChainDispatcher())
+ chain.add(obj,priority)
+ self.strs[s] = chain
+
+ def add_re(self, regex, obj, priority= 0 ):
+ """ Adds a target regexp for dispatching """
+
+ chain = self.regexs.get(regex, CommandChainDispatcher())
+ chain.add(obj,priority)
+ self.regexs[regex] = chain
+
+ def dispatch(self, key):
+ """ Get a seq of Commandchain objects that match key """
+ if key in self.strs:
+ yield self.strs[key]
+
+ for r, obj in self.regexs.items():
+ if re.match(r, key):
+ yield obj
+ else:
+ #print "nomatch",key # dbg
+ pass
+
+ def __repr__(self):
+ return "<Strdispatch %s, %s>" % (self.strs, self.regexs)
+
+ def s_matches(self, key):
+ if key not in self.strs:
+ return
+ for el in self.strs[key]:
+ yield el[1]
+
+ def flat_matches(self, key):
+ """ Yield all 'value' targets, without priority """
+ for val in self.dispatch(key):
+ for el in val:
+ yield el[1] # only value, no priority
+ return
diff --git a/contrib/python/ipython/py3/IPython/utils/sysinfo.py b/contrib/python/ipython/py3/IPython/utils/sysinfo.py
new file mode 100644
index 0000000000..857f0cf2d8
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/utils/sysinfo.py
@@ -0,0 +1,142 @@
+# encoding: utf-8
+"""
+Utilities for getting information about IPython and the system it's running in.
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (C) 2008-2011 The IPython Development Team
+#
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+import os
+import platform
+import pprint
+import sys
+import subprocess
+
+from IPython.core import release
+from IPython.utils import _sysinfo, encoding
+
+#-----------------------------------------------------------------------------
+# Code
+#-----------------------------------------------------------------------------
+
+def pkg_commit_hash(pkg_path):
+ """Get short form of commit hash given directory `pkg_path`
+
+ We get the commit hash from (in order of preference):
+
+ * IPython.utils._sysinfo.commit
+ * git output, if we are in a git repository
+
+ If these fail, we return a not-found placeholder tuple
+
+ Parameters
+ ----------
+ pkg_path : str
+ directory containing package
+ only used for getting commit from active repo
+
+ Returns
+ -------
+ hash_from : str
+ Where we got the hash from - description
+ hash_str : str
+ short form of hash
+ """
+ # Try and get commit from written commit text file
+ if _sysinfo.commit:
+ return "installation", _sysinfo.commit
+
+ # maybe we are in a repository
+ proc = subprocess.Popen('git rev-parse --short HEAD'.split(' '),
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ cwd=pkg_path)
+ repo_commit, _ = proc.communicate()
+ if repo_commit:
+ return 'repository', repo_commit.strip().decode('ascii')
+ return '(none found)', '<not found>'
+
+
+def pkg_info(pkg_path):
+ """Return dict describing the context of this package
+
+ Parameters
+ ----------
+ pkg_path : str
+ path containing __init__.py for package
+
+ Returns
+ -------
+ context : dict
+ with named parameters of interest
+ """
+ src, hsh = pkg_commit_hash(pkg_path)
+ return dict(
+ ipython_version=release.version,
+ ipython_path=pkg_path,
+ commit_source=src,
+ commit_hash=hsh,
+ sys_version=sys.version,
+ sys_executable=sys.executable,
+ sys_platform=sys.platform,
+ platform=platform.platform(),
+ os_name=os.name,
+ default_encoding=encoding.DEFAULT_ENCODING,
+ )
+
+def get_sys_info():
+ """Return useful information about IPython and the system, as a dict."""
+ p = os.path
+ path = p.realpath(p.dirname(p.abspath(p.join(__file__, '..'))))
+ return pkg_info(path)
+
+def sys_info():
+ """Return useful information about IPython and the system, as a string.
+
+ Examples
+ --------
+ ::
+
+ In [2]: print(sys_info())
+ {'commit_hash': '144fdae', # random
+ 'commit_source': 'repository',
+ 'ipython_path': '/home/fperez/usr/lib/python2.6/site-packages/IPython',
+ 'ipython_version': '0.11.dev',
+ 'os_name': 'posix',
+ 'platform': 'Linux-2.6.35-22-generic-i686-with-Ubuntu-10.10-maverick',
+ 'sys_executable': '/usr/bin/python',
+ 'sys_platform': 'linux2',
+ 'sys_version': '2.6.6 (r266:84292, Sep 15 2010, 15:52:39) \\n[GCC 4.4.5]'}
+ """
+ return pprint.pformat(get_sys_info())
+
+
+def num_cpus():
+ """DEPRECATED
+
+ Return the effective number of CPUs in the system as an integer.
+
+ This cross-platform function makes an attempt at finding the total number of
+ available CPUs in the system, as returned by various underlying system and
+ python calls.
+
+ If it can't find a sensible answer, it returns 1 (though an error *may* make
+ it return a large positive number that's actually incorrect).
+ """
+ import warnings
+
+ warnings.warn(
+ "`num_cpus` is deprecated since IPython 8.0. Use `os.cpu_count` instead.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+
+ return os.cpu_count() or 1
diff --git a/contrib/python/ipython/py3/IPython/utils/syspathcontext.py b/contrib/python/ipython/py3/IPython/utils/syspathcontext.py
new file mode 100644
index 0000000000..7af1ab60af
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/utils/syspathcontext.py
@@ -0,0 +1,71 @@
+# encoding: utf-8
+"""
+Context managers for adding things to sys.path temporarily.
+
+Authors:
+
+* Brian Granger
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (C) 2008-2011 The IPython Development Team
+#
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#-----------------------------------------------------------------------------
+
+import sys
+import warnings
+
+
+class appended_to_syspath(object):
+ """
+ Deprecated since IPython 8.1, no replacements.
+
+ A context for appending a directory to sys.path for a second."""
+
+ def __init__(self, dir):
+ warnings.warn(
+ "`appended_to_syspath` is deprecated since IPython 8.1, and has no replacements",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ self.dir = dir
+
+ def __enter__(self):
+ if self.dir not in sys.path:
+ sys.path.append(self.dir)
+ self.added = True
+ else:
+ self.added = False
+
+ def __exit__(self, type, value, traceback):
+ if self.added:
+ try:
+ sys.path.remove(self.dir)
+ except ValueError:
+ pass
+ # Returning False causes any exceptions to be re-raised.
+ return False
+
+class prepended_to_syspath(object):
+ """A context for prepending a directory to sys.path for a second."""
+
+ def __init__(self, dir):
+ self.dir = dir
+
+ def __enter__(self):
+ if self.dir not in sys.path:
+ sys.path.insert(0,self.dir)
+ self.added = True
+ else:
+ self.added = False
+
+ def __exit__(self, type, value, traceback):
+ if self.added:
+ try:
+ sys.path.remove(self.dir)
+ except ValueError:
+ pass
+ # Returning False causes any exceptions to be re-raised.
+ return False
diff --git a/contrib/python/ipython/py3/IPython/utils/tempdir.py b/contrib/python/ipython/py3/IPython/utils/tempdir.py
new file mode 100644
index 0000000000..a233c73e38
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/utils/tempdir.py
@@ -0,0 +1,59 @@
+""" This module contains classes - NamedFileInTemporaryDirectory, TemporaryWorkingDirectory.
+
+These classes add extra features such as creating a named file in temporary directory and
+creating a context manager for the working directory which is also temporary.
+"""
+
+import os as _os
+from pathlib import Path
+from tempfile import TemporaryDirectory
+
+
+class NamedFileInTemporaryDirectory(object):
+ def __init__(self, filename, mode="w+b", bufsize=-1, add_to_syspath=False, **kwds):
+ """
+ Open a file named `filename` in a temporary directory.
+
+ This context manager is preferred over `NamedTemporaryFile` in
+ stdlib `tempfile` when one needs to reopen the file.
+
+ Arguments `mode` and `bufsize` are passed to `open`.
+ Rest of the arguments are passed to `TemporaryDirectory`.
+
+ """
+ self._tmpdir = TemporaryDirectory(**kwds)
+ path = Path(self._tmpdir.name) / filename
+ encoding = None if "b" in mode else "utf-8"
+ self.file = open(path, mode, bufsize, encoding=encoding)
+
+ def cleanup(self):
+ self.file.close()
+ self._tmpdir.cleanup()
+
+ __del__ = cleanup
+
+ def __enter__(self):
+ return self.file
+
+ def __exit__(self, type, value, traceback):
+ self.cleanup()
+
+
+class TemporaryWorkingDirectory(TemporaryDirectory):
+ """
+ Creates a temporary directory and sets the cwd to that directory.
+ Automatically reverts to previous cwd upon cleanup.
+ Usage example:
+
+ with TemporaryWorkingDirectory() as tmpdir:
+ ...
+ """
+
+ def __enter__(self):
+ self.old_wd = Path.cwd()
+ _os.chdir(self.name)
+ return super(TemporaryWorkingDirectory, self).__enter__()
+
+ def __exit__(self, exc, value, tb):
+ _os.chdir(self.old_wd)
+ return super(TemporaryWorkingDirectory, self).__exit__(exc, value, tb)
diff --git a/contrib/python/ipython/py3/IPython/utils/terminal.py b/contrib/python/ipython/py3/IPython/utils/terminal.py
new file mode 100644
index 0000000000..b09cfe0d22
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/utils/terminal.py
@@ -0,0 +1,125 @@
+# encoding: utf-8
+"""
+Utilities for working with terminals.
+
+Authors:
+
+* Brian E. Granger
+* Fernando Perez
+* Alexander Belchenko (e-mail: bialix AT ukr.net)
+"""
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+import os
+import sys
+import warnings
+from shutil import get_terminal_size as _get_terminal_size
+
+# This variable is part of the expected API of the module:
+ignore_termtitle = True
+
+
+
+if os.name == 'posix':
+ def _term_clear():
+ os.system('clear')
+elif sys.platform == 'win32':
+ def _term_clear():
+ os.system('cls')
+else:
+ def _term_clear():
+ pass
+
+
+
+def toggle_set_term_title(val):
+ """Control whether set_term_title is active or not.
+
+ set_term_title() allows writing to the console titlebar. In embedded
+ widgets this can cause problems, so this call can be used to toggle it on
+ or off as needed.
+
+ The default state of the module is for the function to be disabled.
+
+ Parameters
+ ----------
+ val : bool
+ If True, set_term_title() actually writes to the terminal (using the
+ appropriate platform-specific module). If False, it is a no-op.
+ """
+ global ignore_termtitle
+ ignore_termtitle = not(val)
+
+
+def _set_term_title(*args,**kw):
+ """Dummy no-op."""
+ pass
+
+
+def _restore_term_title():
+ pass
+
+
+_xterm_term_title_saved = False
+
+
+def _set_term_title_xterm(title):
+ """ Change virtual terminal title in xterm-workalikes """
+ global _xterm_term_title_saved
+ # Only save the title the first time we set, otherwise restore will only
+ # go back one title (probably undoing a %cd title change).
+ if not _xterm_term_title_saved:
+ # save the current title to the xterm "stack"
+ sys.stdout.write("\033[22;0t")
+ _xterm_term_title_saved = True
+ sys.stdout.write('\033]0;%s\007' % title)
+
+
+def _restore_term_title_xterm():
+ # Make sure the restore has at least one accompanying set.
+ global _xterm_term_title_saved
+ assert _xterm_term_title_saved
+ sys.stdout.write('\033[23;0t')
+ _xterm_term_title_saved = False
+
+
+if os.name == 'posix':
+ TERM = os.environ.get('TERM','')
+ if TERM.startswith('xterm'):
+ _set_term_title = _set_term_title_xterm
+ _restore_term_title = _restore_term_title_xterm
+elif sys.platform == 'win32':
+ import ctypes
+
+ SetConsoleTitleW = ctypes.windll.kernel32.SetConsoleTitleW
+ SetConsoleTitleW.argtypes = [ctypes.c_wchar_p]
+
+ def _set_term_title(title):
+ """Set terminal title using ctypes to access the Win32 APIs."""
+ SetConsoleTitleW(title)
+
+
+def set_term_title(title):
+ """Set terminal title using the necessary platform-dependent calls."""
+ if ignore_termtitle:
+ return
+ _set_term_title(title)
+
+
+def restore_term_title():
+ """Restore, if possible, terminal title to the original state"""
+ if ignore_termtitle:
+ return
+ _restore_term_title()
+
+
+def freeze_term_title():
+ warnings.warn("This function is deprecated, use toggle_set_term_title()")
+ global ignore_termtitle
+ ignore_termtitle = True
+
+
+def get_terminal_size(defaultx=80, defaulty=25):
+ return _get_terminal_size((defaultx, defaulty))
diff --git a/contrib/python/ipython/py3/IPython/utils/text.py b/contrib/python/ipython/py3/IPython/utils/text.py
new file mode 100644
index 0000000000..74bccddf68
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/utils/text.py
@@ -0,0 +1,752 @@
+# encoding: utf-8
+"""
+Utilities for working with strings and text.
+
+Inheritance diagram:
+
+.. inheritance-diagram:: IPython.utils.text
+ :parts: 3
+"""
+
+import os
+import re
+import string
+import sys
+import textwrap
+from string import Formatter
+from pathlib import Path
+
+
+# datetime.strftime date format for ipython
+if sys.platform == 'win32':
+ date_format = "%B %d, %Y"
+else:
+ date_format = "%B %-d, %Y"
+
+class LSString(str):
+ """String derivative with a special access attributes.
+
+ These are normal strings, but with the special attributes:
+
+ .l (or .list) : value as list (split on newlines).
+ .n (or .nlstr): original value (the string itself).
+ .s (or .spstr): value as whitespace-separated string.
+ .p (or .paths): list of path objects (requires path.py package)
+
+ Any values which require transformations are computed only once and
+ cached.
+
+ Such strings are very useful to efficiently interact with the shell, which
+ typically only understands whitespace-separated options for commands."""
+
+ def get_list(self):
+ try:
+ return self.__list
+ except AttributeError:
+ self.__list = self.split('\n')
+ return self.__list
+
+ l = list = property(get_list)
+
+ def get_spstr(self):
+ try:
+ return self.__spstr
+ except AttributeError:
+ self.__spstr = self.replace('\n',' ')
+ return self.__spstr
+
+ s = spstr = property(get_spstr)
+
+ def get_nlstr(self):
+ return self
+
+ n = nlstr = property(get_nlstr)
+
+ def get_paths(self):
+ try:
+ return self.__paths
+ except AttributeError:
+ self.__paths = [Path(p) for p in self.split('\n') if os.path.exists(p)]
+ return self.__paths
+
+ p = paths = property(get_paths)
+
+# FIXME: We need to reimplement type specific displayhook and then add this
+# back as a custom printer. This should also be moved outside utils into the
+# core.
+
+# def print_lsstring(arg):
+# """ Prettier (non-repr-like) and more informative printer for LSString """
+# print "LSString (.p, .n, .l, .s available). Value:"
+# print arg
+#
+#
+# print_lsstring = result_display.register(LSString)(print_lsstring)
+
+
+class SList(list):
+ """List derivative with a special access attributes.
+
+ These are normal lists, but with the special attributes:
+
+ * .l (or .list) : value as list (the list itself).
+ * .n (or .nlstr): value as a string, joined on newlines.
+ * .s (or .spstr): value as a string, joined on spaces.
+ * .p (or .paths): list of path objects (requires path.py package)
+
+ Any values which require transformations are computed only once and
+ cached."""
+
+ def get_list(self):
+ return self
+
+ l = list = property(get_list)
+
+ def get_spstr(self):
+ try:
+ return self.__spstr
+ except AttributeError:
+ self.__spstr = ' '.join(self)
+ return self.__spstr
+
+ s = spstr = property(get_spstr)
+
+ def get_nlstr(self):
+ try:
+ return self.__nlstr
+ except AttributeError:
+ self.__nlstr = '\n'.join(self)
+ return self.__nlstr
+
+ n = nlstr = property(get_nlstr)
+
+ def get_paths(self):
+ try:
+ return self.__paths
+ except AttributeError:
+ self.__paths = [Path(p) for p in self if os.path.exists(p)]
+ return self.__paths
+
+ p = paths = property(get_paths)
+
+ def grep(self, pattern, prune = False, field = None):
+ """ Return all strings matching 'pattern' (a regex or callable)
+
+ This is case-insensitive. If prune is true, return all items
+ NOT matching the pattern.
+
+ If field is specified, the match must occur in the specified
+ whitespace-separated field.
+
+ Examples::
+
+ a.grep( lambda x: x.startswith('C') )
+ a.grep('Cha.*log', prune=1)
+ a.grep('chm', field=-1)
+ """
+
+ def match_target(s):
+ if field is None:
+ return s
+ parts = s.split()
+ try:
+ tgt = parts[field]
+ return tgt
+ except IndexError:
+ return ""
+
+ if isinstance(pattern, str):
+ pred = lambda x : re.search(pattern, x, re.IGNORECASE)
+ else:
+ pred = pattern
+ if not prune:
+ return SList([el for el in self if pred(match_target(el))])
+ else:
+ return SList([el for el in self if not pred(match_target(el))])
+
+ def fields(self, *fields):
+ """ Collect whitespace-separated fields from string list
+
+ Allows quick awk-like usage of string lists.
+
+ Example data (in var a, created by 'a = !ls -l')::
+
+ -rwxrwxrwx 1 ville None 18 Dec 14 2006 ChangeLog
+ drwxrwxrwx+ 6 ville None 0 Oct 24 18:05 IPython
+
+ * ``a.fields(0)`` is ``['-rwxrwxrwx', 'drwxrwxrwx+']``
+ * ``a.fields(1,0)`` is ``['1 -rwxrwxrwx', '6 drwxrwxrwx+']``
+ (note the joining by space).
+ * ``a.fields(-1)`` is ``['ChangeLog', 'IPython']``
+
+ IndexErrors are ignored.
+
+ Without args, fields() just split()'s the strings.
+ """
+ if len(fields) == 0:
+ return [el.split() for el in self]
+
+ res = SList()
+ for el in [f.split() for f in self]:
+ lineparts = []
+
+ for fd in fields:
+ try:
+ lineparts.append(el[fd])
+ except IndexError:
+ pass
+ if lineparts:
+ res.append(" ".join(lineparts))
+
+ return res
+
+ def sort(self,field= None, nums = False):
+ """ sort by specified fields (see fields())
+
+ Example::
+
+ a.sort(1, nums = True)
+
+ Sorts a by second field, in numerical order (so that 21 > 3)
+
+ """
+
+ #decorate, sort, undecorate
+ if field is not None:
+ dsu = [[SList([line]).fields(field), line] for line in self]
+ else:
+ dsu = [[line, line] for line in self]
+ if nums:
+ for i in range(len(dsu)):
+ numstr = "".join([ch for ch in dsu[i][0] if ch.isdigit()])
+ try:
+ n = int(numstr)
+ except ValueError:
+ n = 0
+ dsu[i][0] = n
+
+
+ dsu.sort()
+ return SList([t[1] for t in dsu])
+
+
+# FIXME: We need to reimplement type specific displayhook and then add this
+# back as a custom printer. This should also be moved outside utils into the
+# core.
+
+# def print_slist(arg):
+# """ Prettier (non-repr-like) and more informative printer for SList """
+# print "SList (.p, .n, .l, .s, .grep(), .fields(), sort() available):"
+# if hasattr(arg, 'hideonce') and arg.hideonce:
+# arg.hideonce = False
+# return
+#
+# nlprint(arg) # This was a nested list printer, now removed.
+#
+# print_slist = result_display.register(SList)(print_slist)
+
+
+def indent(instr,nspaces=4, ntabs=0, flatten=False):
+ """Indent a string a given number of spaces or tabstops.
+
+ indent(str,nspaces=4,ntabs=0) -> indent str by ntabs+nspaces.
+
+ Parameters
+ ----------
+ instr : basestring
+ The string to be indented.
+ nspaces : int (default: 4)
+ The number of spaces to be indented.
+ ntabs : int (default: 0)
+ The number of tabs to be indented.
+ flatten : bool (default: False)
+ Whether to scrub existing indentation. If True, all lines will be
+ aligned to the same indentation. If False, existing indentation will
+ be strictly increased.
+
+ Returns
+ -------
+ str|unicode : string indented by ntabs and nspaces.
+
+ """
+ if instr is None:
+ return
+ ind = '\t'*ntabs+' '*nspaces
+ if flatten:
+ pat = re.compile(r'^\s*', re.MULTILINE)
+ else:
+ pat = re.compile(r'^', re.MULTILINE)
+ outstr = re.sub(pat, ind, instr)
+ if outstr.endswith(os.linesep+ind):
+ return outstr[:-len(ind)]
+ else:
+ return outstr
+
+
+def list_strings(arg):
+ """Always return a list of strings, given a string or list of strings
+ as input.
+
+ Examples
+ --------
+ ::
+
+ In [7]: list_strings('A single string')
+ Out[7]: ['A single string']
+
+ In [8]: list_strings(['A single string in a list'])
+ Out[8]: ['A single string in a list']
+
+ In [9]: list_strings(['A','list','of','strings'])
+ Out[9]: ['A', 'list', 'of', 'strings']
+ """
+
+ if isinstance(arg, str):
+ return [arg]
+ else:
+ return arg
+
+
+def marquee(txt='',width=78,mark='*'):
+ """Return the input string centered in a 'marquee'.
+
+ Examples
+ --------
+ ::
+
+ In [16]: marquee('A test',40)
+ Out[16]: '**************** A test ****************'
+
+ In [17]: marquee('A test',40,'-')
+ Out[17]: '---------------- A test ----------------'
+
+ In [18]: marquee('A test',40,' ')
+ Out[18]: ' A test '
+
+ """
+ if not txt:
+ return (mark*width)[:width]
+ nmark = (width-len(txt)-2)//len(mark)//2
+ if nmark < 0: nmark =0
+ marks = mark*nmark
+ return '%s %s %s' % (marks,txt,marks)
+
+
+ini_spaces_re = re.compile(r'^(\s+)')
+
+def num_ini_spaces(strng):
+ """Return the number of initial spaces in a string"""
+
+ ini_spaces = ini_spaces_re.match(strng)
+ if ini_spaces:
+ return ini_spaces.end()
+ else:
+ return 0
+
+
+def format_screen(strng):
+ """Format a string for screen printing.
+
+ This removes some latex-type format codes."""
+ # Paragraph continue
+ par_re = re.compile(r'\\$',re.MULTILINE)
+ strng = par_re.sub('',strng)
+ return strng
+
+
+def dedent(text):
+ """Equivalent of textwrap.dedent that ignores unindented first line.
+
+ This means it will still dedent strings like:
+ '''foo
+ is a bar
+ '''
+
+ For use in wrap_paragraphs.
+ """
+
+ if text.startswith('\n'):
+ # text starts with blank line, don't ignore the first line
+ return textwrap.dedent(text)
+
+ # split first line
+ splits = text.split('\n',1)
+ if len(splits) == 1:
+ # only one line
+ return textwrap.dedent(text)
+
+ first, rest = splits
+ # dedent everything but the first line
+ rest = textwrap.dedent(rest)
+ return '\n'.join([first, rest])
+
+
+def wrap_paragraphs(text, ncols=80):
+ """Wrap multiple paragraphs to fit a specified width.
+
+ This is equivalent to textwrap.wrap, but with support for multiple
+ paragraphs, as separated by empty lines.
+
+ Returns
+ -------
+ list of complete paragraphs, wrapped to fill `ncols` columns.
+ """
+ paragraph_re = re.compile(r'\n(\s*\n)+', re.MULTILINE)
+ text = dedent(text).strip()
+ paragraphs = paragraph_re.split(text)[::2] # every other entry is space
+ out_ps = []
+ indent_re = re.compile(r'\n\s+', re.MULTILINE)
+ for p in paragraphs:
+ # presume indentation that survives dedent is meaningful formatting,
+ # so don't fill unless text is flush.
+ if indent_re.search(p) is None:
+ # wrap paragraph
+ p = textwrap.fill(p, ncols)
+ out_ps.append(p)
+ return out_ps
+
+
+def strip_email_quotes(text):
+ """Strip leading email quotation characters ('>').
+
+ Removes any combination of leading '>' interspersed with whitespace that
+ appears *identically* in all lines of the input text.
+
+ Parameters
+ ----------
+ text : str
+
+ Examples
+ --------
+
+ Simple uses::
+
+ In [2]: strip_email_quotes('> > text')
+ Out[2]: 'text'
+
+ In [3]: strip_email_quotes('> > text\\n> > more')
+ Out[3]: 'text\\nmore'
+
+ Note how only the common prefix that appears in all lines is stripped::
+
+ In [4]: strip_email_quotes('> > text\\n> > more\\n> more...')
+ Out[4]: '> text\\n> more\\nmore...'
+
+ So if any line has no quote marks ('>'), then none are stripped from any
+ of them ::
+
+ In [5]: strip_email_quotes('> > text\\n> > more\\nlast different')
+ Out[5]: '> > text\\n> > more\\nlast different'
+ """
+ lines = text.splitlines()
+ strip_len = 0
+
+ for characters in zip(*lines):
+ # Check if all characters in this position are the same
+ if len(set(characters)) > 1:
+ break
+ prefix_char = characters[0]
+
+ if prefix_char in string.whitespace or prefix_char == ">":
+ strip_len += 1
+ else:
+ break
+
+ text = "\n".join([ln[strip_len:] for ln in lines])
+ return text
+
+
+def strip_ansi(source):
+ """
+ Remove ansi escape codes from text.
+
+ Parameters
+ ----------
+ source : str
+ Source to remove the ansi from
+ """
+ return re.sub(r'\033\[(\d|;)+?m', '', source)
+
+
+class EvalFormatter(Formatter):
+ """A String Formatter that allows evaluation of simple expressions.
+
+ Note that this version interprets a `:` as specifying a format string (as per
+ standard string formatting), so if slicing is required, you must explicitly
+ create a slice.
+
+ This is to be used in templating cases, such as the parallel batch
+ script templates, where simple arithmetic on arguments is useful.
+
+ Examples
+ --------
+ ::
+
+ In [1]: f = EvalFormatter()
+ In [2]: f.format('{n//4}', n=8)
+ Out[2]: '2'
+
+ In [3]: f.format("{greeting[slice(2,4)]}", greeting="Hello")
+ Out[3]: 'll'
+ """
+ def get_field(self, name, args, kwargs):
+ v = eval(name, kwargs)
+ return v, name
+
+#XXX: As of Python 3.4, the format string parsing no longer splits on a colon
+# inside [], so EvalFormatter can handle slicing. Once we only support 3.4 and
+# above, it should be possible to remove FullEvalFormatter.
+
+class FullEvalFormatter(Formatter):
+ """A String Formatter that allows evaluation of simple expressions.
+
+ Any time a format key is not found in the kwargs,
+ it will be tried as an expression in the kwargs namespace.
+
+ Note that this version allows slicing using [1:2], so you cannot specify
+ a format string. Use :class:`EvalFormatter` to permit format strings.
+
+ Examples
+ --------
+ ::
+
+ In [1]: f = FullEvalFormatter()
+ In [2]: f.format('{n//4}', n=8)
+ Out[2]: '2'
+
+ In [3]: f.format('{list(range(5))[2:4]}')
+ Out[3]: '[2, 3]'
+
+ In [4]: f.format('{3*2}')
+ Out[4]: '6'
+ """
+ # copied from Formatter._vformat with minor changes to allow eval
+ # and replace the format_spec code with slicing
+ def vformat(self, format_string:str, args, kwargs)->str:
+ result = []
+ for literal_text, field_name, format_spec, conversion in \
+ self.parse(format_string):
+
+ # output the literal text
+ if literal_text:
+ result.append(literal_text)
+
+ # if there's a field, output it
+ if field_name is not None:
+ # this is some markup, find the object and do
+ # the formatting
+
+ if format_spec:
+ # override format spec, to allow slicing:
+ field_name = ':'.join([field_name, format_spec])
+
+ # eval the contents of the field for the object
+ # to be formatted
+ obj = eval(field_name, kwargs)
+
+ # do any conversion on the resulting object
+ obj = self.convert_field(obj, conversion)
+
+ # format the object and append to the result
+ result.append(self.format_field(obj, ''))
+
+ return ''.join(result)
+
+
+class DollarFormatter(FullEvalFormatter):
+ """Formatter allowing Itpl style $foo replacement, for names and attribute
+ access only. Standard {foo} replacement also works, and allows full
+ evaluation of its arguments.
+
+ Examples
+ --------
+ ::
+
+ In [1]: f = DollarFormatter()
+ In [2]: f.format('{n//4}', n=8)
+ Out[2]: '2'
+
+ In [3]: f.format('23 * 76 is $result', result=23*76)
+ Out[3]: '23 * 76 is 1748'
+
+ In [4]: f.format('$a or {b}', a=1, b=2)
+ Out[4]: '1 or 2'
+ """
+ _dollar_pattern_ignore_single_quote = re.compile(r"(.*?)\$(\$?[\w\.]+)(?=([^']*'[^']*')*[^']*$)")
+ def parse(self, fmt_string):
+ for literal_txt, field_name, format_spec, conversion \
+ in Formatter.parse(self, fmt_string):
+
+ # Find $foo patterns in the literal text.
+ continue_from = 0
+ txt = ""
+ for m in self._dollar_pattern_ignore_single_quote.finditer(literal_txt):
+ new_txt, new_field = m.group(1,2)
+ # $$foo --> $foo
+ if new_field.startswith("$"):
+ txt += new_txt + new_field
+ else:
+ yield (txt + new_txt, new_field, "", None)
+ txt = ""
+ continue_from = m.end()
+
+ # Re-yield the {foo} style pattern
+ yield (txt + literal_txt[continue_from:], field_name, format_spec, conversion)
+
+ def __repr__(self):
+ return "<DollarFormatter>"
+
+#-----------------------------------------------------------------------------
+# Utils to columnize a list of string
+#-----------------------------------------------------------------------------
+
+def _col_chunks(l, max_rows, row_first=False):
+ """Yield successive max_rows-sized column chunks from l."""
+ if row_first:
+ ncols = (len(l) // max_rows) + (len(l) % max_rows > 0)
+ for i in range(ncols):
+ yield [l[j] for j in range(i, len(l), ncols)]
+ else:
+ for i in range(0, len(l), max_rows):
+ yield l[i:(i + max_rows)]
+
+
+def _find_optimal(rlist, row_first=False, separator_size=2, displaywidth=80):
+ """Calculate optimal info to columnize a list of string"""
+ for max_rows in range(1, len(rlist) + 1):
+ col_widths = list(map(max, _col_chunks(rlist, max_rows, row_first)))
+ sumlength = sum(col_widths)
+ ncols = len(col_widths)
+ if sumlength + separator_size * (ncols - 1) <= displaywidth:
+ break
+ return {'num_columns': ncols,
+ 'optimal_separator_width': (displaywidth - sumlength) // (ncols - 1) if (ncols - 1) else 0,
+ 'max_rows': max_rows,
+ 'column_widths': col_widths
+ }
+
+
+def _get_or_default(mylist, i, default=None):
+ """return list item number, or default if don't exist"""
+ if i >= len(mylist):
+ return default
+ else :
+ return mylist[i]
+
+
+def compute_item_matrix(items, row_first=False, empty=None, *args, **kwargs) :
+ """Returns a nested list, and info to columnize items
+
+ Parameters
+ ----------
+ items
+ list of strings to columize
+ row_first : (default False)
+ Whether to compute columns for a row-first matrix instead of
+ column-first (default).
+ empty : (default None)
+ default value to fill list if needed
+ separator_size : int (default=2)
+ How much characters will be used as a separation between each columns.
+ displaywidth : int (default=80)
+ The width of the area onto which the columns should enter
+
+ Returns
+ -------
+ strings_matrix
+ nested list of string, the outer most list contains as many list as
+ rows, the innermost lists have each as many element as columns. If the
+ total number of elements in `items` does not equal the product of
+ rows*columns, the last element of some lists are filled with `None`.
+ dict_info
+ some info to make columnize easier:
+
+ num_columns
+ number of columns
+ max_rows
+ maximum number of rows (final number may be less)
+ column_widths
+ list of with of each columns
+ optimal_separator_width
+ best separator width between columns
+
+ Examples
+ --------
+ ::
+
+ In [1]: l = ['aaa','b','cc','d','eeeee','f','g','h','i','j','k','l']
+ In [2]: list, info = compute_item_matrix(l, displaywidth=12)
+ In [3]: list
+ Out[3]: [['aaa', 'f', 'k'], ['b', 'g', 'l'], ['cc', 'h', None], ['d', 'i', None], ['eeeee', 'j', None]]
+ In [4]: ideal = {'num_columns': 3, 'column_widths': [5, 1, 1], 'optimal_separator_width': 2, 'max_rows': 5}
+ In [5]: all((info[k] == ideal[k] for k in ideal.keys()))
+ Out[5]: True
+ """
+ info = _find_optimal(list(map(len, items)), row_first, *args, **kwargs)
+ nrow, ncol = info['max_rows'], info['num_columns']
+ if row_first:
+ return ([[_get_or_default(items, r * ncol + c, default=empty) for c in range(ncol)] for r in range(nrow)], info)
+ else:
+ return ([[_get_or_default(items, c * nrow + r, default=empty) for c in range(ncol)] for r in range(nrow)], info)
+
+
+def columnize(items, row_first=False, separator=" ", displaywidth=80, spread=False):
+ """Transform a list of strings into a single string with columns.
+
+ Parameters
+ ----------
+ items : sequence of strings
+ The strings to process.
+ row_first : (default False)
+ Whether to compute columns for a row-first matrix instead of
+ column-first (default).
+ separator : str, optional [default is two spaces]
+ The string that separates columns.
+ displaywidth : int, optional [default is 80]
+ Width of the display in number of characters.
+
+ Returns
+ -------
+ The formatted string.
+ """
+ if not items:
+ return '\n'
+ matrix, info = compute_item_matrix(items, row_first=row_first, separator_size=len(separator), displaywidth=displaywidth)
+ if spread:
+ separator = separator.ljust(int(info['optimal_separator_width']))
+ fmatrix = [filter(None, x) for x in matrix]
+ sjoin = lambda x : separator.join([ y.ljust(w, ' ') for y, w in zip(x, info['column_widths'])])
+ return '\n'.join(map(sjoin, fmatrix))+'\n'
+
+
+def get_text_list(list_, last_sep=' and ', sep=", ", wrap_item_with=""):
+ """
+ Return a string with a natural enumeration of items
+
+ >>> get_text_list(['a', 'b', 'c', 'd'])
+ 'a, b, c and d'
+ >>> get_text_list(['a', 'b', 'c'], ' or ')
+ 'a, b or c'
+ >>> get_text_list(['a', 'b', 'c'], ', ')
+ 'a, b, c'
+ >>> get_text_list(['a', 'b'], ' or ')
+ 'a or b'
+ >>> get_text_list(['a'])
+ 'a'
+ >>> get_text_list([])
+ ''
+ >>> get_text_list(['a', 'b'], wrap_item_with="`")
+ '`a` and `b`'
+ >>> get_text_list(['a', 'b', 'c', 'd'], " = ", sep=" + ")
+ 'a + b + c = d'
+ """
+ if len(list_) == 0:
+ return ''
+ if wrap_item_with:
+ list_ = ['%s%s%s' % (wrap_item_with, item, wrap_item_with) for
+ item in list_]
+ if len(list_) == 1:
+ return list_[0]
+ return '%s%s%s' % (
+ sep.join(i for i in list_[:-1]),
+ last_sep, list_[-1])
diff --git a/contrib/python/ipython/py3/IPython/utils/timing.py b/contrib/python/ipython/py3/IPython/utils/timing.py
new file mode 100644
index 0000000000..3a181ae728
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/utils/timing.py
@@ -0,0 +1,123 @@
+# encoding: utf-8
+"""
+Utilities for timing code execution.
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (C) 2008-2011 The IPython Development Team
+#
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+import time
+
+#-----------------------------------------------------------------------------
+# Code
+#-----------------------------------------------------------------------------
+
+# If possible (Unix), use the resource module instead of time.clock()
+try:
+ import resource
+except ImportError:
+ resource = None
+
+# Some implementations (like jyputerlite) don't have getrusage
+if resource is not None and hasattr(resource, "getrusage"):
+ def clocku():
+ """clocku() -> floating point number
+
+ Return the *USER* CPU time in seconds since the start of the process.
+ This is done via a call to resource.getrusage, so it avoids the
+ wraparound problems in time.clock()."""
+
+ return resource.getrusage(resource.RUSAGE_SELF)[0]
+
+ def clocks():
+ """clocks() -> floating point number
+
+ Return the *SYSTEM* CPU time in seconds since the start of the process.
+ This is done via a call to resource.getrusage, so it avoids the
+ wraparound problems in time.clock()."""
+
+ return resource.getrusage(resource.RUSAGE_SELF)[1]
+
+ def clock():
+ """clock() -> floating point number
+
+ Return the *TOTAL USER+SYSTEM* CPU time in seconds since the start of
+ the process. This is done via a call to resource.getrusage, so it
+ avoids the wraparound problems in time.clock()."""
+
+ u,s = resource.getrusage(resource.RUSAGE_SELF)[:2]
+ return u+s
+
+ def clock2():
+ """clock2() -> (t_user,t_system)
+
+ Similar to clock(), but return a tuple of user/system times."""
+ return resource.getrusage(resource.RUSAGE_SELF)[:2]
+
+else:
+ # There is no distinction of user/system time under windows, so we just use
+ # time.process_time() for everything...
+ clocku = clocks = clock = time.process_time
+
+ def clock2():
+ """Under windows, system CPU time can't be measured.
+
+ This just returns process_time() and zero."""
+ return time.process_time(), 0.0
+
+
+def timings_out(reps,func,*args,**kw):
+ """timings_out(reps,func,*args,**kw) -> (t_total,t_per_call,output)
+
+ Execute a function reps times, return a tuple with the elapsed total
+ CPU time in seconds, the time per call and the function's output.
+
+ Under Unix, the return value is the sum of user+system time consumed by
+ the process, computed via the resource module. This prevents problems
+ related to the wraparound effect which the time.clock() function has.
+
+ Under Windows the return value is in wall clock seconds. See the
+ documentation for the time module for more details."""
+
+ reps = int(reps)
+ assert reps >=1, 'reps must be >= 1'
+ if reps==1:
+ start = clock()
+ out = func(*args,**kw)
+ tot_time = clock()-start
+ else:
+ rng = range(reps-1) # the last time is executed separately to store output
+ start = clock()
+ for dummy in rng: func(*args,**kw)
+ out = func(*args,**kw) # one last time
+ tot_time = clock()-start
+ av_time = tot_time / reps
+ return tot_time,av_time,out
+
+
+def timings(reps,func,*args,**kw):
+ """timings(reps,func,*args,**kw) -> (t_total,t_per_call)
+
+ Execute a function reps times, return a tuple with the elapsed total CPU
+ time in seconds and the time per call. These are just the first two values
+ in timings_out()."""
+
+ return timings_out(reps,func,*args,**kw)[0:2]
+
+
+def timing(func,*args,**kw):
+ """timing(func,*args,**kw) -> t_total
+
+ Execute a function once, return the elapsed total CPU time in
+ seconds. This is just the first value in timings_out()."""
+
+ return timings_out(1,func,*args,**kw)[0]
+
diff --git a/contrib/python/ipython/py3/IPython/utils/tokenutil.py b/contrib/python/ipython/py3/IPython/utils/tokenutil.py
new file mode 100644
index 0000000000..697d2b504a
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/utils/tokenutil.py
@@ -0,0 +1,127 @@
+"""Token-related utilities"""
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+from collections import namedtuple
+from io import StringIO
+from keyword import iskeyword
+
+import tokenize
+
+
+Token = namedtuple('Token', ['token', 'text', 'start', 'end', 'line'])
+
+def generate_tokens(readline):
+ """wrap generate_tokens to catch EOF errors"""
+ try:
+ for token in tokenize.generate_tokens(readline):
+ yield token
+ except tokenize.TokenError:
+ # catch EOF error
+ return
+
+def line_at_cursor(cell, cursor_pos=0):
+ """Return the line in a cell at a given cursor position
+
+ Used for calling line-based APIs that don't support multi-line input, yet.
+
+ Parameters
+ ----------
+ cell : str
+ multiline block of text
+ cursor_pos : integer
+ the cursor position
+
+ Returns
+ -------
+ (line, offset): (string, integer)
+ The line with the current cursor, and the character offset of the start of the line.
+ """
+ offset = 0
+ lines = cell.splitlines(True)
+ for line in lines:
+ next_offset = offset + len(line)
+ if not line.endswith('\n'):
+ # If the last line doesn't have a trailing newline, treat it as if
+ # it does so that the cursor at the end of the line still counts
+ # as being on that line.
+ next_offset += 1
+ if next_offset > cursor_pos:
+ break
+ offset = next_offset
+ else:
+ line = ""
+ return (line, offset)
+
+def token_at_cursor(cell, cursor_pos=0):
+ """Get the token at a given cursor
+
+ Used for introspection.
+
+ Function calls are prioritized, so the token for the callable will be returned
+ if the cursor is anywhere inside the call.
+
+ Parameters
+ ----------
+ cell : unicode
+ A block of Python code
+ cursor_pos : int
+ The location of the cursor in the block where the token should be found
+ """
+ names = []
+ tokens = []
+ call_names = []
+
+ offsets = {1: 0} # lines start at 1
+ for tup in generate_tokens(StringIO(cell).readline):
+
+ tok = Token(*tup)
+
+ # token, text, start, end, line = tup
+ start_line, start_col = tok.start
+ end_line, end_col = tok.end
+ if end_line + 1 not in offsets:
+ # keep track of offsets for each line
+ lines = tok.line.splitlines(True)
+ for lineno, line in enumerate(lines, start_line + 1):
+ if lineno not in offsets:
+ offsets[lineno] = offsets[lineno-1] + len(line)
+
+ offset = offsets[start_line]
+ # allow '|foo' to find 'foo' at the beginning of a line
+ boundary = cursor_pos + 1 if start_col == 0 else cursor_pos
+ if offset + start_col >= boundary:
+ # current token starts after the cursor,
+ # don't consume it
+ break
+
+ if tok.token == tokenize.NAME and not iskeyword(tok.text):
+ if names and tokens and tokens[-1].token == tokenize.OP and tokens[-1].text == '.':
+ names[-1] = "%s.%s" % (names[-1], tok.text)
+ else:
+ names.append(tok.text)
+ elif tok.token == tokenize.OP:
+ if tok.text == '=' and names:
+ # don't inspect the lhs of an assignment
+ names.pop(-1)
+ if tok.text == '(' and names:
+ # if we are inside a function call, inspect the function
+ call_names.append(names[-1])
+ elif tok.text == ')' and call_names:
+ call_names.pop(-1)
+
+ tokens.append(tok)
+
+ if offsets[end_line] + end_col > cursor_pos:
+ # we found the cursor, stop reading
+ break
+
+ if call_names:
+ return call_names[-1]
+ elif names:
+ return names[-1]
+ else:
+ return ''
+
+
diff --git a/contrib/python/ipython/py3/IPython/utils/traitlets.py b/contrib/python/ipython/py3/IPython/utils/traitlets.py
new file mode 100644
index 0000000000..2f979fa727
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/utils/traitlets.py
@@ -0,0 +1,6 @@
+
+from warnings import warn
+
+warn("IPython.utils.traitlets has moved to a top-level traitlets package.", stacklevel=2)
+
+from traitlets import *
diff --git a/contrib/python/ipython/py3/IPython/utils/tz.py b/contrib/python/ipython/py3/IPython/utils/tz.py
new file mode 100644
index 0000000000..dbe1e8e732
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/utils/tz.py
@@ -0,0 +1,48 @@
+# encoding: utf-8
+"""
+Timezone utilities
+
+Just UTC-awareness right now
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (C) 2013 The IPython Development Team
+#
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+from datetime import tzinfo, timedelta, datetime
+
+#-----------------------------------------------------------------------------
+# Code
+#-----------------------------------------------------------------------------
+# constant for zero offset
+ZERO = timedelta(0)
+
+class tzUTC(tzinfo):
+ """tzinfo object for UTC (zero offset)"""
+
+ def utcoffset(self, d):
+ return ZERO
+
+ def dst(self, d):
+ return ZERO
+
+
+UTC = tzUTC() # type: ignore[abstract]
+
+
+def utc_aware(unaware):
+ """decorator for adding UTC tzinfo to datetime's utcfoo methods"""
+ def utc_method(*args, **kwargs):
+ dt = unaware(*args, **kwargs)
+ return dt.replace(tzinfo=UTC)
+ return utc_method
+
+utcfromtimestamp = utc_aware(datetime.utcfromtimestamp)
+utcnow = utc_aware(datetime.utcnow)
diff --git a/contrib/python/ipython/py3/IPython/utils/ulinecache.py b/contrib/python/ipython/py3/IPython/utils/ulinecache.py
new file mode 100644
index 0000000000..0b4ede08e6
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/utils/ulinecache.py
@@ -0,0 +1,21 @@
+"""
+This module has been deprecated since IPython 6.0.
+
+Wrapper around linecache which decodes files to unicode according to PEP 263.
+"""
+import functools
+import linecache
+from warnings import warn
+
+getline = linecache.getline
+
+# getlines has to be looked up at runtime, because doctests monkeypatch it.
+@functools.wraps(linecache.getlines)
+def getlines(filename, module_globals=None):
+ """
+ Deprecated since IPython 6.0
+ """
+ warn(("`IPython.utils.ulinecache.getlines` is deprecated since"
+ " IPython 6.0 and will be removed in future versions."),
+ DeprecationWarning, stacklevel=2)
+ return linecache.getlines(filename, module_globals=module_globals)
diff --git a/contrib/python/ipython/py3/IPython/utils/version.py b/contrib/python/ipython/py3/IPython/utils/version.py
new file mode 100644
index 0000000000..8c65c78e15
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/utils/version.py
@@ -0,0 +1,43 @@
+# encoding: utf-8
+"""
+Utilities for version comparison
+
+It is a bit ridiculous that we need these.
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (C) 2013 The IPython Development Team
+#
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#-----------------------------------------------------------------------------
+
+from warnings import warn
+
+warn(
+ "The `IPython.utils.version` module has been deprecated since IPython 8.0.",
+ DeprecationWarning,
+)
+
+
+def check_version(v, check):
+ """check version string v >= check
+
+ If dev/prerelease tags result in TypeError for string-number comparison,
+ it is assumed that the dependency is satisfied.
+ Users on dev branches are responsible for keeping their own packages up to date.
+ """
+ warn(
+ "`check_version` function is deprecated as of IPython 8.0"
+ "and will be removed in future versions.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+
+ from distutils.version import LooseVersion
+
+ try:
+ return LooseVersion(v) >= LooseVersion(check)
+ except TypeError:
+ return True
+
diff --git a/contrib/python/ipython/py3/IPython/utils/wildcard.py b/contrib/python/ipython/py3/IPython/utils/wildcard.py
new file mode 100644
index 0000000000..cbef8c5175
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/utils/wildcard.py
@@ -0,0 +1,111 @@
+# -*- coding: utf-8 -*-
+"""Support for wildcard pattern matching in object inspection.
+
+Authors
+-------
+- Jörgen Stenarson <jorgen.stenarson@bostream.nu>
+- Thomas Kluyver
+"""
+
+#*****************************************************************************
+# Copyright (C) 2005 Jörgen Stenarson <jorgen.stenarson@bostream.nu>
+#
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#*****************************************************************************
+
+import re
+import types
+
+from IPython.utils.dir2 import dir2
+
+def create_typestr2type_dicts(dont_include_in_type2typestr=["lambda"]):
+ """Return dictionaries mapping lower case typename (e.g. 'tuple') to type
+ objects from the types package, and vice versa."""
+ typenamelist = [tname for tname in dir(types) if tname.endswith("Type")]
+ typestr2type, type2typestr = {}, {}
+
+ for tname in typenamelist:
+ name = tname[:-4].lower() # Cut 'Type' off the end of the name
+ obj = getattr(types, tname)
+ typestr2type[name] = obj
+ if name not in dont_include_in_type2typestr:
+ type2typestr[obj] = name
+ return typestr2type, type2typestr
+
+typestr2type, type2typestr = create_typestr2type_dicts()
+
+def is_type(obj, typestr_or_type):
+ """is_type(obj, typestr_or_type) verifies if obj is of a certain type. It
+ can take strings or actual python types for the second argument, i.e.
+ 'tuple'<->TupleType. 'all' matches all types.
+
+ TODO: Should be extended for choosing more than one type."""
+ if typestr_or_type == "all":
+ return True
+ if type(typestr_or_type) == type:
+ test_type = typestr_or_type
+ else:
+ test_type = typestr2type.get(typestr_or_type, False)
+ if test_type:
+ return isinstance(obj, test_type)
+ return False
+
+def show_hidden(str, show_all=False):
+ """Return true for strings starting with single _ if show_all is true."""
+ return show_all or str.startswith("__") or not str.startswith("_")
+
+def dict_dir(obj):
+ """Produce a dictionary of an object's attributes. Builds on dir2 by
+ checking that a getattr() call actually succeeds."""
+ ns = {}
+ for key in dir2(obj):
+ # This seemingly unnecessary try/except is actually needed
+ # because there is code out there with metaclasses that
+ # create 'write only' attributes, where a getattr() call
+ # will fail even if the attribute appears listed in the
+ # object's dictionary. Properties can actually do the same
+ # thing. In particular, Traits use this pattern
+ try:
+ ns[key] = getattr(obj, key)
+ except AttributeError:
+ pass
+ return ns
+
+def filter_ns(ns, name_pattern="*", type_pattern="all", ignore_case=True,
+ show_all=True):
+ """Filter a namespace dictionary by name pattern and item type."""
+ pattern = name_pattern.replace("*",".*").replace("?",".")
+ if ignore_case:
+ reg = re.compile(pattern+"$", re.I)
+ else:
+ reg = re.compile(pattern+"$")
+
+ # Check each one matches regex; shouldn't be hidden; of correct type.
+ return dict((key,obj) for key, obj in ns.items() if reg.match(key) \
+ and show_hidden(key, show_all) \
+ and is_type(obj, type_pattern) )
+
+def list_namespace(namespace, type_pattern, filter, ignore_case=False, show_all=False):
+ """Return dictionary of all objects in a namespace dictionary that match
+ type_pattern and filter."""
+ pattern_list=filter.split(".")
+ if len(pattern_list) == 1:
+ return filter_ns(namespace, name_pattern=pattern_list[0],
+ type_pattern=type_pattern,
+ ignore_case=ignore_case, show_all=show_all)
+ else:
+ # This is where we can change if all objects should be searched or
+ # only modules. Just change the type_pattern to module to search only
+ # modules
+ filtered = filter_ns(namespace, name_pattern=pattern_list[0],
+ type_pattern="all",
+ ignore_case=ignore_case, show_all=show_all)
+ results = {}
+ for name, obj in filtered.items():
+ ns = list_namespace(dict_dir(obj), type_pattern,
+ ".".join(pattern_list[1:]),
+ ignore_case=ignore_case, show_all=show_all)
+ for inner_name, inner_obj in ns.items():
+ results["%s.%s"%(name,inner_name)] = inner_obj
+ return results
diff --git a/contrib/python/ipython/py3/LICENSE b/contrib/python/ipython/py3/LICENSE
new file mode 100644
index 0000000000..d4bb8d39df
--- /dev/null
+++ b/contrib/python/ipython/py3/LICENSE
@@ -0,0 +1,33 @@
+BSD 3-Clause License
+
+- Copyright (c) 2008-Present, IPython Development Team
+- Copyright (c) 2001-2007, Fernando Perez <fernando.perez@colorado.edu>
+- Copyright (c) 2001, Janko Hauser <jhauser@zscout.de>
+- Copyright (c) 2001, Nathaniel Gray <n8gray@caltech.edu>
+
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+* Neither the name of the copyright holder nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/contrib/python/ipython/py3/README.rst b/contrib/python/ipython/py3/README.rst
new file mode 100644
index 0000000000..b004792e0e
--- /dev/null
+++ b/contrib/python/ipython/py3/README.rst
@@ -0,0 +1,174 @@
+.. image:: https://codecov.io/github/ipython/ipython/coverage.svg?branch=main
+ :target: https://codecov.io/github/ipython/ipython?branch=main
+
+.. image:: https://img.shields.io/pypi/v/IPython.svg
+ :target: https://pypi.python.org/pypi/ipython
+
+.. image:: https://github.com/ipython/ipython/actions/workflows/test.yml/badge.svg
+ :target: https://github.com/ipython/ipython/actions/workflows/test.yml
+
+.. image:: https://www.codetriage.com/ipython/ipython/badges/users.svg
+ :target: https://www.codetriage.com/ipython/ipython/
+
+.. image:: https://raster.shields.io/badge/Follows-NEP29-brightgreen.png
+ :target: https://numpy.org/neps/nep-0029-deprecation_policy.html
+
+.. image:: https://tidelift.com/badges/package/pypi/ipython?style=flat
+ :target: https://tidelift.com/subscription/pkg/pypi-ipython
+
+
+===========================================
+ IPython: Productive Interactive Computing
+===========================================
+
+Overview
+========
+
+Welcome to IPython. Our full documentation is available on `ipython.readthedocs.io
+<https://ipython.readthedocs.io/en/stable/>`_ and contains information on how to install, use, and
+contribute to the project.
+IPython (Interactive Python) is a command shell for interactive computing in multiple programming languages, originally developed for the Python programming language, that offers introspection, rich media, shell syntax, tab completion, and history.
+
+**IPython versions and Python Support**
+
+Starting with IPython 7.10, IPython follows `NEP 29 <https://numpy.org/neps/nep-0029-deprecation_policy.html>`_
+
+**IPython 7.17+** requires Python version 3.7 and above.
+
+**IPython 7.10+** requires Python version 3.6 and above.
+
+**IPython 7.0** requires Python version 3.5 and above.
+
+**IPython 6.x** requires Python version 3.3 and above.
+
+**IPython 5.x LTS** is the compatible release for Python 2.7.
+If you require Python 2 support, you **must** use IPython 5.x LTS. Please
+update your project configurations and requirements as necessary.
+
+
+The Notebook, Qt console and a number of other pieces are now parts of *Jupyter*.
+See the `Jupyter installation docs <https://jupyter.readthedocs.io/en/latest/install.html>`__
+if you want to use these.
+
+Main features of IPython
+========================
+Comprehensive object introspection.
+
+Input history, persistent across sessions.
+
+Caching of output results during a session with automatically generated references.
+
+Extensible tab completion, with support by default for completion of python variables and keywords, filenames and function keywords.
+
+Extensible system of ‘magic’ commands for controlling the environment and performing many tasks related to IPython or the operating system.
+
+A rich configuration system with easy switching between different setups (simpler than changing $PYTHONSTARTUP environment variables every time).
+
+Session logging and reloading.
+
+Extensible syntax processing for special purpose situations.
+
+Access to the system shell with user-extensible alias system.
+
+Easily embeddable in other Python programs and GUIs.
+
+Integrated access to the pdb debugger and the Python profiler.
+
+
+Development and Instant running
+===============================
+
+You can find the latest version of the development documentation on `readthedocs
+<https://ipython.readthedocs.io/en/latest/>`_.
+
+You can run IPython from this directory without even installing it system-wide
+by typing at the terminal::
+
+ $ python -m IPython
+
+Or see the `development installation docs
+<https://ipython.readthedocs.io/en/latest/install/install.html#installing-the-development-version>`_
+for the latest revision on read the docs.
+
+Documentation and installation instructions for older version of IPython can be
+found on the `IPython website <https://ipython.org/documentation.html>`_
+
+
+
+IPython requires Python version 3 or above
+==========================================
+
+Starting with version 6.0, IPython does not support Python 2.7, 3.0, 3.1, or
+3.2.
+
+For a version compatible with Python 2.7, please install the 5.x LTS Long Term
+Support version.
+
+If you are encountering this error message you are likely trying to install or
+use IPython from source. You need to checkout the remote 5.x branch. If you are
+using git the following should work::
+
+ $ git fetch origin
+ $ git checkout 5.x
+
+If you encounter this error message with a regular install of IPython, then you
+likely need to update your package manager, for example if you are using `pip`
+check the version of pip with::
+
+ $ pip --version
+
+You will need to update pip to the version 9.0.1 or greater. If you are not using
+pip, please inquiry with the maintainers of the package for your package
+manager.
+
+For more information see one of our blog posts:
+
+ https://blog.jupyter.org/release-of-ipython-5-0-8ce60b8d2e8e
+
+As well as the following Pull-Request for discussion:
+
+ https://github.com/ipython/ipython/pull/9900
+
+This error does also occur if you are invoking ``setup.py`` directly – which you
+should not – or are using ``easy_install`` If this is the case, use ``pip
+install .`` instead of ``setup.py install`` , and ``pip install -e .`` instead
+of ``setup.py develop`` If you are depending on IPython as a dependency you may
+also want to have a conditional dependency on IPython depending on the Python
+version::
+
+ install_req = ['ipython']
+ if sys.version_info[0] < 3 and 'bdist_wheel' not in sys.argv:
+ install_req.remove('ipython')
+ install_req.append('ipython<6')
+
+ setup(
+ ...
+ install_requires=install_req
+ )
+
+Alternatives to IPython
+=======================
+
+IPython may not be to your taste; if that's the case there might be similar
+project that you might want to use:
+
+- The classic Python REPL.
+- `bpython <https://bpython-interpreter.org/>`_
+- `mypython <https://www.asmeurer.com/mypython/>`_
+- `ptpython and ptipython <https://pypi.org/project/ptpython/>`_
+- `Xonsh <https://xon.sh/>`_
+
+Ignoring commits with git blame.ignoreRevsFile
+==============================================
+
+As of git 2.23, it is possible to make formatting changes without breaking
+``git blame``. See the `git documentation
+<https://git-scm.com/docs/git-config#Documentation/git-config.txt-blameignoreRevsFile>`_
+for more details.
+
+To use this feature you must:
+
+- Install git >= 2.23
+- Configure your local git repo by running:
+ - POSIX: ``tools\configure-git-blame-ignore-revs.sh``
+ - Windows: ``tools\configure-git-blame-ignore-revs.bat``
diff --git a/contrib/python/ipython/py3/bin/ya.make b/contrib/python/ipython/py3/bin/ya.make
new file mode 100644
index 0000000000..40cc790c2d
--- /dev/null
+++ b/contrib/python/ipython/py3/bin/ya.make
@@ -0,0 +1,11 @@
+PY3_PROGRAM(ipython)
+
+LICENSE(BSD-3-Clause)
+
+PEERDIR(
+ contrib/python/ipython
+)
+
+PY_MAIN(IPython:start_ipython)
+
+END()
diff --git a/contrib/python/ipython/py3/ya.make b/contrib/python/ipython/py3/ya.make
new file mode 100644
index 0000000000..e9d28face4
--- /dev/null
+++ b/contrib/python/ipython/py3/ya.make
@@ -0,0 +1,237 @@
+# Generated by devtools/yamaker (pypi).
+
+PY3_LIBRARY()
+
+VERSION(8.14.0)
+
+LICENSE(BSD-3-Clause)
+
+PEERDIR(
+ contrib/python/Pygments
+ contrib/python/backcall
+ contrib/python/decorator
+ contrib/python/jedi
+ contrib/python/matplotlib-inline
+ contrib/python/pickleshare
+ contrib/python/prompt-toolkit
+ contrib/python/stack-data
+ contrib/python/traitlets
+)
+
+IF (OS_WINDOWS)
+ PEERDIR(
+ contrib/python/colorama
+ )
+ELSE()
+ PEERDIR(
+ contrib/python/pexpect
+ )
+ENDIF()
+
+IF (OS_DARWIN)
+ PEERDIR(
+ contrib/python/appnope
+ )
+ENDIF()
+
+NO_LINT()
+
+NO_CHECK_IMPORTS(
+ IPython.*
+)
+
+PY_SRCS(
+ TOP_LEVEL
+ IPython/__init__.py
+ IPython/__main__.py
+ IPython/consoleapp.py
+ IPython/core/__init__.py
+ IPython/core/alias.py
+ IPython/core/application.py
+ IPython/core/async_helpers.py
+ IPython/core/autocall.py
+ IPython/core/builtin_trap.py
+ IPython/core/compilerop.py
+ IPython/core/completer.py
+ IPython/core/completerlib.py
+ IPython/core/crashhandler.py
+ IPython/core/debugger.py
+ IPython/core/display.py
+ IPython/core/display_functions.py
+ IPython/core/display_trap.py
+ IPython/core/displayhook.py
+ IPython/core/displaypub.py
+ IPython/core/error.py
+ IPython/core/events.py
+ IPython/core/excolors.py
+ IPython/core/extensions.py
+ IPython/core/formatters.py
+ IPython/core/getipython.py
+ IPython/core/guarded_eval.py
+ IPython/core/history.py
+ IPython/core/historyapp.py
+ IPython/core/hooks.py
+ IPython/core/inputsplitter.py
+ IPython/core/inputtransformer.py
+ IPython/core/inputtransformer2.py
+ IPython/core/interactiveshell.py
+ IPython/core/latex_symbols.py
+ IPython/core/logger.py
+ IPython/core/macro.py
+ IPython/core/magic.py
+ IPython/core/magic_arguments.py
+ IPython/core/magics/__init__.py
+ IPython/core/magics/auto.py
+ IPython/core/magics/basic.py
+ IPython/core/magics/code.py
+ IPython/core/magics/config.py
+ IPython/core/magics/display.py
+ IPython/core/magics/execution.py
+ IPython/core/magics/extension.py
+ IPython/core/magics/history.py
+ IPython/core/magics/logging.py
+ IPython/core/magics/namespace.py
+ IPython/core/magics/osm.py
+ IPython/core/magics/packaging.py
+ IPython/core/magics/pylab.py
+ IPython/core/magics/script.py
+ IPython/core/oinspect.py
+ IPython/core/page.py
+ IPython/core/payload.py
+ IPython/core/payloadpage.py
+ IPython/core/prefilter.py
+ IPython/core/profileapp.py
+ IPython/core/profiledir.py
+ IPython/core/prompts.py
+ IPython/core/pylabtools.py
+ IPython/core/release.py
+ IPython/core/shellapp.py
+ IPython/core/splitinput.py
+ IPython/core/ultratb.py
+ IPython/core/usage.py
+ IPython/display.py
+ IPython/extensions/__init__.py
+ IPython/extensions/autoreload.py
+ IPython/extensions/storemagic.py
+ IPython/external/__init__.py
+ IPython/external/qt_for_kernel.py
+ IPython/external/qt_loaders.py
+ IPython/lib/__init__.py
+ IPython/lib/backgroundjobs.py
+ IPython/lib/clipboard.py
+ IPython/lib/deepreload.py
+ IPython/lib/demo.py
+ IPython/lib/display.py
+ IPython/lib/editorhooks.py
+ IPython/lib/guisupport.py
+ IPython/lib/latextools.py
+ IPython/lib/lexers.py
+ IPython/lib/pretty.py
+ IPython/paths.py
+ IPython/sphinxext/__init__.py
+ IPython/sphinxext/custom_doctests.py
+ IPython/sphinxext/ipython_console_highlighting.py
+ IPython/sphinxext/ipython_directive.py
+ IPython/terminal/__init__.py
+ IPython/terminal/console.py
+ IPython/terminal/debugger.py
+ IPython/terminal/embed.py
+ IPython/terminal/interactiveshell.py
+ IPython/terminal/ipapp.py
+ IPython/terminal/magics.py
+ IPython/terminal/prompts.py
+ IPython/terminal/pt_inputhooks/__init__.py
+ IPython/terminal/pt_inputhooks/asyncio.py
+ IPython/terminal/pt_inputhooks/glut.py
+ IPython/terminal/pt_inputhooks/gtk.py
+ IPython/terminal/pt_inputhooks/gtk3.py
+ IPython/terminal/pt_inputhooks/gtk4.py
+ IPython/terminal/pt_inputhooks/osx.py
+ IPython/terminal/pt_inputhooks/pyglet.py
+ IPython/terminal/pt_inputhooks/qt.py
+ IPython/terminal/pt_inputhooks/tk.py
+ IPython/terminal/pt_inputhooks/wx.py
+ IPython/terminal/ptutils.py
+ IPython/terminal/shortcuts/__init__.py
+ IPython/terminal/shortcuts/auto_match.py
+ IPython/terminal/shortcuts/auto_suggest.py
+ IPython/terminal/shortcuts/filters.py
+ IPython/testing/__init__.py
+ IPython/testing/decorators.py
+ IPython/testing/globalipapp.py
+ IPython/testing/ipunittest.py
+ IPython/testing/plugin/__init__.py
+ IPython/testing/plugin/dtexample.py
+ IPython/testing/plugin/ipdoctest.py
+ IPython/testing/plugin/pytest_ipdoctest.py
+ IPython/testing/plugin/setup.py
+ IPython/testing/plugin/simple.py
+ IPython/testing/plugin/simplevars.py
+ IPython/testing/plugin/test_ipdoctest.py
+ IPython/testing/plugin/test_refs.py
+ IPython/testing/skipdoctest.py
+ IPython/testing/tools.py
+ IPython/utils/PyColorize.py
+ IPython/utils/__init__.py
+ IPython/utils/_process_cli.py
+ IPython/utils/_process_common.py
+ IPython/utils/_process_posix.py
+ IPython/utils/_process_win32.py
+ IPython/utils/_process_win32_controller.py
+ IPython/utils/_sysinfo.py
+ IPython/utils/capture.py
+ IPython/utils/colorable.py
+ IPython/utils/coloransi.py
+ IPython/utils/contexts.py
+ IPython/utils/daemonize.py
+ IPython/utils/data.py
+ IPython/utils/decorators.py
+ IPython/utils/dir2.py
+ IPython/utils/docs.py
+ IPython/utils/encoding.py
+ IPython/utils/eventful.py
+ IPython/utils/frame.py
+ IPython/utils/generics.py
+ IPython/utils/importstring.py
+ IPython/utils/io.py
+ IPython/utils/ipstruct.py
+ IPython/utils/jsonutil.py
+ IPython/utils/localinterfaces.py
+ IPython/utils/log.py
+ IPython/utils/module_paths.py
+ IPython/utils/openpy.py
+ IPython/utils/path.py
+ IPython/utils/process.py
+ IPython/utils/py3compat.py
+ IPython/utils/sentinel.py
+ IPython/utils/shimmodule.py
+ IPython/utils/signatures.py
+ IPython/utils/strdispatch.py
+ IPython/utils/sysinfo.py
+ IPython/utils/syspathcontext.py
+ IPython/utils/tempdir.py
+ IPython/utils/terminal.py
+ IPython/utils/text.py
+ IPython/utils/timing.py
+ IPython/utils/tokenutil.py
+ IPython/utils/traitlets.py
+ IPython/utils/tz.py
+ IPython/utils/ulinecache.py
+ IPython/utils/version.py
+ IPython/utils/wildcard.py
+)
+
+RESOURCE_FILES(
+ PREFIX contrib/python/ipython/py3/
+ .dist-info/METADATA
+ .dist-info/entry_points.txt
+ .dist-info/top_level.txt
+ IPython/core/profile/README_STARTUP
+ IPython/py.typed
+)
+
+END()
+
+RECURSE(
+ bin
+)