aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/python/ipython/py3/IPython/core
diff options
context:
space:
mode:
authornkozlovskiy <nmk@ydb.tech>2023-09-29 12:24:06 +0300
committernkozlovskiy <nmk@ydb.tech>2023-09-29 12:41:34 +0300
commite0e3e1717e3d33762ce61950504f9637a6e669ed (patch)
treebca3ff6939b10ed60c3d5c12439963a1146b9711 /contrib/python/ipython/py3/IPython/core
parent38f2c5852db84c7b4d83adfcb009eb61541d1ccd (diff)
downloadydb-e0e3e1717e3d33762ce61950504f9637a6e669ed.tar.gz
add ydb deps
Diffstat (limited to 'contrib/python/ipython/py3/IPython/core')
-rw-r--r--contrib/python/ipython/py3/IPython/core/__init__.py0
-rw-r--r--contrib/python/ipython/py3/IPython/core/alias.py258
-rw-r--r--contrib/python/ipython/py3/IPython/core/application.py488
-rw-r--r--contrib/python/ipython/py3/IPython/core/async_helpers.py156
-rw-r--r--contrib/python/ipython/py3/IPython/core/autocall.py70
-rw-r--r--contrib/python/ipython/py3/IPython/core/builtin_trap.py86
-rw-r--r--contrib/python/ipython/py3/IPython/core/compilerop.py214
-rw-r--r--contrib/python/ipython/py3/IPython/core/completer.py3347
-rw-r--r--contrib/python/ipython/py3/IPython/core/completerlib.py418
-rw-r--r--contrib/python/ipython/py3/IPython/core/crashhandler.py236
-rw-r--r--contrib/python/ipython/py3/IPython/core/debugger.py997
-rw-r--r--contrib/python/ipython/py3/IPython/core/display.py1290
-rw-r--r--contrib/python/ipython/py3/IPython/core/display_functions.py391
-rw-r--r--contrib/python/ipython/py3/IPython/core/display_trap.py70
-rw-r--r--contrib/python/ipython/py3/IPython/core/displayhook.py331
-rw-r--r--contrib/python/ipython/py3/IPython/core/displaypub.py138
-rw-r--r--contrib/python/ipython/py3/IPython/core/error.py60
-rw-r--r--contrib/python/ipython/py3/IPython/core/events.py166
-rw-r--r--contrib/python/ipython/py3/IPython/core/excolors.py165
-rw-r--r--contrib/python/ipython/py3/IPython/core/extensions.py151
-rw-r--r--contrib/python/ipython/py3/IPython/core/formatters.py1028
-rw-r--r--contrib/python/ipython/py3/IPython/core/getipython.py24
-rw-r--r--contrib/python/ipython/py3/IPython/core/guarded_eval.py733
-rw-r--r--contrib/python/ipython/py3/IPython/core/history.py968
-rw-r--r--contrib/python/ipython/py3/IPython/core/historyapp.py161
-rw-r--r--contrib/python/ipython/py3/IPython/core/hooks.py173
-rw-r--r--contrib/python/ipython/py3/IPython/core/inputsplitter.py773
-rw-r--r--contrib/python/ipython/py3/IPython/core/inputtransformer.py536
-rw-r--r--contrib/python/ipython/py3/IPython/core/inputtransformer2.py797
-rw-r--r--contrib/python/ipython/py3/IPython/core/interactiveshell.py3910
-rw-r--r--contrib/python/ipython/py3/IPython/core/latex_symbols.py1301
-rw-r--r--contrib/python/ipython/py3/IPython/core/logger.py227
-rw-r--r--contrib/python/ipython/py3/IPython/core/macro.py53
-rw-r--r--contrib/python/ipython/py3/IPython/core/magic.py757
-rw-r--r--contrib/python/ipython/py3/IPython/core/magic_arguments.py310
-rw-r--r--contrib/python/ipython/py3/IPython/core/magics/__init__.py42
-rw-r--r--contrib/python/ipython/py3/IPython/core/magics/auto.py144
-rw-r--r--contrib/python/ipython/py3/IPython/core/magics/basic.py663
-rw-r--r--contrib/python/ipython/py3/IPython/core/magics/code.py755
-rw-r--r--contrib/python/ipython/py3/IPython/core/magics/config.py140
-rw-r--r--contrib/python/ipython/py3/IPython/core/magics/display.py93
-rw-r--r--contrib/python/ipython/py3/IPython/core/magics/execution.py1522
-rw-r--r--contrib/python/ipython/py3/IPython/core/magics/extension.py63
-rw-r--r--contrib/python/ipython/py3/IPython/core/magics/history.py338
-rw-r--r--contrib/python/ipython/py3/IPython/core/magics/logging.py195
-rw-r--r--contrib/python/ipython/py3/IPython/core/magics/namespace.py711
-rw-r--r--contrib/python/ipython/py3/IPython/core/magics/osm.py855
-rw-r--r--contrib/python/ipython/py3/IPython/core/magics/packaging.py112
-rw-r--r--contrib/python/ipython/py3/IPython/core/magics/pylab.py169
-rw-r--r--contrib/python/ipython/py3/IPython/core/magics/script.py371
-rw-r--r--contrib/python/ipython/py3/IPython/core/oinspect.py1171
-rw-r--r--contrib/python/ipython/py3/IPython/core/page.py348
-rw-r--r--contrib/python/ipython/py3/IPython/core/payload.py55
-rw-r--r--contrib/python/ipython/py3/IPython/core/payloadpage.py51
-rw-r--r--contrib/python/ipython/py3/IPython/core/prefilter.py700
-rw-r--r--contrib/python/ipython/py3/IPython/core/profile/README_STARTUP11
-rw-r--r--contrib/python/ipython/py3/IPython/core/profileapp.py312
-rw-r--r--contrib/python/ipython/py3/IPython/core/profiledir.py223
-rw-r--r--contrib/python/ipython/py3/IPython/core/prompts.py21
-rw-r--r--contrib/python/ipython/py3/IPython/core/pylabtools.py425
-rw-r--r--contrib/python/ipython/py3/IPython/core/release.py54
-rw-r--r--contrib/python/ipython/py3/IPython/core/shellapp.py451
-rw-r--r--contrib/python/ipython/py3/IPython/core/splitinput.py138
-rw-r--r--contrib/python/ipython/py3/IPython/core/ultratb.py1518
-rw-r--r--contrib/python/ipython/py3/IPython/core/usage.py341
65 files changed, 32775 insertions, 0 deletions
diff --git a/contrib/python/ipython/py3/IPython/core/__init__.py b/contrib/python/ipython/py3/IPython/core/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/__init__.py
diff --git a/contrib/python/ipython/py3/IPython/core/alias.py b/contrib/python/ipython/py3/IPython/core/alias.py
new file mode 100644
index 0000000000..2ad990231a
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/alias.py
@@ -0,0 +1,258 @@
+# encoding: utf-8
+"""
+System command aliases.
+
+Authors:
+
+* Fernando Perez
+* Brian Granger
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (C) 2008-2011 The IPython Development Team
+#
+# Distributed under the terms of the BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+import os
+import re
+import sys
+
+from traitlets.config.configurable import Configurable
+from .error import UsageError
+
+from traitlets import List, Instance
+from logging import error
+
+#-----------------------------------------------------------------------------
+# Utilities
+#-----------------------------------------------------------------------------
+
+# This is used as the pattern for calls to split_user_input.
+shell_line_split = re.compile(r'^(\s*)()(\S+)(.*$)')
+
+def default_aliases():
+ """Return list of shell aliases to auto-define.
+ """
+ # Note: the aliases defined here should be safe to use on a kernel
+ # regardless of what frontend it is attached to. Frontends that use a
+ # kernel in-process can define additional aliases that will only work in
+ # their case. For example, things like 'less' or 'clear' that manipulate
+ # the terminal should NOT be declared here, as they will only work if the
+ # kernel is running inside a true terminal, and not over the network.
+
+ if os.name == 'posix':
+ default_aliases = [('mkdir', 'mkdir'), ('rmdir', 'rmdir'),
+ ('mv', 'mv'), ('rm', 'rm'), ('cp', 'cp'),
+ ('cat', 'cat'),
+ ]
+ # Useful set of ls aliases. The GNU and BSD options are a little
+ # different, so we make aliases that provide as similar as possible
+ # behavior in ipython, by passing the right flags for each platform
+ if sys.platform.startswith('linux'):
+ ls_aliases = [('ls', 'ls -F --color'),
+ # long ls
+ ('ll', 'ls -F -o --color'),
+ # ls normal files only
+ ('lf', 'ls -F -o --color %l | grep ^-'),
+ # ls symbolic links
+ ('lk', 'ls -F -o --color %l | grep ^l'),
+ # directories or links to directories,
+ ('ldir', 'ls -F -o --color %l | grep /$'),
+ # things which are executable
+ ('lx', 'ls -F -o --color %l | grep ^-..x'),
+ ]
+ elif sys.platform.startswith('openbsd') or sys.platform.startswith('netbsd'):
+ # OpenBSD, NetBSD. The ls implementation on these platforms do not support
+ # the -G switch and lack the ability to use colorized output.
+ ls_aliases = [('ls', 'ls -F'),
+ # long ls
+ ('ll', 'ls -F -l'),
+ # ls normal files only
+ ('lf', 'ls -F -l %l | grep ^-'),
+ # ls symbolic links
+ ('lk', 'ls -F -l %l | grep ^l'),
+ # directories or links to directories,
+ ('ldir', 'ls -F -l %l | grep /$'),
+ # things which are executable
+ ('lx', 'ls -F -l %l | grep ^-..x'),
+ ]
+ else:
+ # BSD, OSX, etc.
+ ls_aliases = [('ls', 'ls -F -G'),
+ # long ls
+ ('ll', 'ls -F -l -G'),
+ # ls normal files only
+ ('lf', 'ls -F -l -G %l | grep ^-'),
+ # ls symbolic links
+ ('lk', 'ls -F -l -G %l | grep ^l'),
+ # directories or links to directories,
+ ('ldir', 'ls -F -G -l %l | grep /$'),
+ # things which are executable
+ ('lx', 'ls -F -l -G %l | grep ^-..x'),
+ ]
+ default_aliases = default_aliases + ls_aliases
+ elif os.name in ['nt', 'dos']:
+ default_aliases = [('ls', 'dir /on'),
+ ('ddir', 'dir /ad /on'), ('ldir', 'dir /ad /on'),
+ ('mkdir', 'mkdir'), ('rmdir', 'rmdir'),
+ ('echo', 'echo'), ('ren', 'ren'), ('copy', 'copy'),
+ ]
+ else:
+ default_aliases = []
+
+ return default_aliases
+
+
+class AliasError(Exception):
+ pass
+
+
+class InvalidAliasError(AliasError):
+ pass
+
+class Alias(object):
+ """Callable object storing the details of one alias.
+
+ Instances are registered as magic functions to allow use of aliases.
+ """
+
+ # Prepare blacklist
+ blacklist = {'cd','popd','pushd','dhist','alias','unalias'}
+
+ def __init__(self, shell, name, cmd):
+ self.shell = shell
+ self.name = name
+ self.cmd = cmd
+ self.__doc__ = "Alias for `!{}`".format(cmd)
+ self.nargs = self.validate()
+
+ def validate(self):
+ """Validate the alias, and return the number of arguments."""
+ if self.name in self.blacklist:
+ raise InvalidAliasError("The name %s can't be aliased "
+ "because it is a keyword or builtin." % self.name)
+ try:
+ caller = self.shell.magics_manager.magics['line'][self.name]
+ except KeyError:
+ pass
+ else:
+ if not isinstance(caller, Alias):
+ raise InvalidAliasError("The name %s can't be aliased "
+ "because it is another magic command." % self.name)
+
+ if not (isinstance(self.cmd, str)):
+ raise InvalidAliasError("An alias command must be a string, "
+ "got: %r" % self.cmd)
+
+ nargs = self.cmd.count('%s') - self.cmd.count('%%s')
+
+ if (nargs > 0) and (self.cmd.find('%l') >= 0):
+ raise InvalidAliasError('The %s and %l specifiers are mutually '
+ 'exclusive in alias definitions.')
+
+ return nargs
+
+ def __repr__(self):
+ return "<alias {} for {!r}>".format(self.name, self.cmd)
+
+ def __call__(self, rest=''):
+ cmd = self.cmd
+ nargs = self.nargs
+ # Expand the %l special to be the user's input line
+ if cmd.find('%l') >= 0:
+ cmd = cmd.replace('%l', rest)
+ rest = ''
+
+ if nargs==0:
+ if cmd.find('%%s') >= 1:
+ cmd = cmd.replace('%%s', '%s')
+ # Simple, argument-less aliases
+ cmd = '%s %s' % (cmd, rest)
+ else:
+ # Handle aliases with positional arguments
+ args = rest.split(None, nargs)
+ if len(args) < nargs:
+ raise UsageError('Alias <%s> requires %s arguments, %s given.' %
+ (self.name, nargs, len(args)))
+ cmd = '%s %s' % (cmd % tuple(args[:nargs]),' '.join(args[nargs:]))
+
+ self.shell.system(cmd)
+
+#-----------------------------------------------------------------------------
+# Main AliasManager class
+#-----------------------------------------------------------------------------
+
+class AliasManager(Configurable):
+
+ default_aliases = List(default_aliases()).tag(config=True)
+ user_aliases = List(default_value=[]).tag(config=True)
+ shell = Instance('IPython.core.interactiveshell.InteractiveShellABC', allow_none=True)
+
+ def __init__(self, shell=None, **kwargs):
+ super(AliasManager, self).__init__(shell=shell, **kwargs)
+ # For convenient access
+ self.linemagics = self.shell.magics_manager.magics['line']
+ self.init_aliases()
+
+ def init_aliases(self):
+ # Load default & user aliases
+ for name, cmd in self.default_aliases + self.user_aliases:
+ if cmd.startswith('ls ') and self.shell.colors == 'NoColor':
+ cmd = cmd.replace(' --color', '')
+ self.soft_define_alias(name, cmd)
+
+ @property
+ def aliases(self):
+ return [(n, func.cmd) for (n, func) in self.linemagics.items()
+ if isinstance(func, Alias)]
+
+ def soft_define_alias(self, name, cmd):
+ """Define an alias, but don't raise on an AliasError."""
+ try:
+ self.define_alias(name, cmd)
+ except AliasError as e:
+ error("Invalid alias: %s" % e)
+
+ def define_alias(self, name, cmd):
+ """Define a new alias after validating it.
+
+ This will raise an :exc:`AliasError` if there are validation
+ problems.
+ """
+ caller = Alias(shell=self.shell, name=name, cmd=cmd)
+ self.shell.magics_manager.register_function(caller, magic_kind='line',
+ magic_name=name)
+
+ def get_alias(self, name):
+ """Return an alias, or None if no alias by that name exists."""
+ aname = self.linemagics.get(name, None)
+ return aname if isinstance(aname, Alias) else None
+
+ def is_alias(self, name):
+ """Return whether or not a given name has been defined as an alias"""
+ return self.get_alias(name) is not None
+
+ def undefine_alias(self, name):
+ if self.is_alias(name):
+ del self.linemagics[name]
+ else:
+ raise ValueError('%s is not an alias' % name)
+
+ def clear_aliases(self):
+ for name, cmd in self.aliases:
+ self.undefine_alias(name)
+
+ def retrieve_alias(self, name):
+ """Retrieve the command to which an alias expands."""
+ caller = self.get_alias(name)
+ if caller:
+ return caller.cmd
+ else:
+ raise ValueError('%s is not an alias' % name)
diff --git a/contrib/python/ipython/py3/IPython/core/application.py b/contrib/python/ipython/py3/IPython/core/application.py
new file mode 100644
index 0000000000..e0a8174f15
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/application.py
@@ -0,0 +1,488 @@
+# encoding: utf-8
+"""
+An application for IPython.
+
+All top-level applications should use the classes in this module for
+handling configuration and creating configurables.
+
+The job of an :class:`Application` is to create the master configuration
+object and then create the configurable objects, passing the config to them.
+"""
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+import atexit
+from copy import deepcopy
+import logging
+import os
+import shutil
+import sys
+
+from pathlib import Path
+
+from traitlets.config.application import Application, catch_config_error
+from traitlets.config.loader import ConfigFileNotFound, PyFileConfigLoader
+from IPython.core import release, crashhandler
+from IPython.core.profiledir import ProfileDir, ProfileDirError
+from IPython.paths import get_ipython_dir, get_ipython_package_dir
+from IPython.utils.path import ensure_dir_exists
+from traitlets import (
+ List, Unicode, Type, Bool, Set, Instance, Undefined,
+ default, observe,
+)
+
+if os.name == "nt":
+ programdata = os.environ.get("PROGRAMDATA", None)
+ if programdata is not None:
+ SYSTEM_CONFIG_DIRS = [str(Path(programdata) / "ipython")]
+ else: # PROGRAMDATA is not defined by default on XP.
+ SYSTEM_CONFIG_DIRS = []
+else:
+ SYSTEM_CONFIG_DIRS = [
+ "/usr/local/etc/ipython",
+ "/etc/ipython",
+ ]
+
+
+ENV_CONFIG_DIRS = []
+_env_config_dir = os.path.join(sys.prefix, 'etc', 'ipython')
+if _env_config_dir not in SYSTEM_CONFIG_DIRS:
+ # only add ENV_CONFIG if sys.prefix is not already included
+ ENV_CONFIG_DIRS.append(_env_config_dir)
+
+
+_envvar = os.environ.get('IPYTHON_SUPPRESS_CONFIG_ERRORS')
+if _envvar in {None, ''}:
+ IPYTHON_SUPPRESS_CONFIG_ERRORS = None
+else:
+ if _envvar.lower() in {'1','true'}:
+ IPYTHON_SUPPRESS_CONFIG_ERRORS = True
+ elif _envvar.lower() in {'0','false'} :
+ IPYTHON_SUPPRESS_CONFIG_ERRORS = False
+ else:
+ sys.exit("Unsupported value for environment variable: 'IPYTHON_SUPPRESS_CONFIG_ERRORS' is set to '%s' which is none of {'0', '1', 'false', 'true', ''}."% _envvar )
+
+# aliases and flags
+
+base_aliases = {}
+if isinstance(Application.aliases, dict):
+ # traitlets 5
+ base_aliases.update(Application.aliases)
+base_aliases.update(
+ {
+ "profile-dir": "ProfileDir.location",
+ "profile": "BaseIPythonApplication.profile",
+ "ipython-dir": "BaseIPythonApplication.ipython_dir",
+ "log-level": "Application.log_level",
+ "config": "BaseIPythonApplication.extra_config_file",
+ }
+)
+
+base_flags = dict()
+if isinstance(Application.flags, dict):
+ # traitlets 5
+ base_flags.update(Application.flags)
+base_flags.update(
+ dict(
+ debug=(
+ {"Application": {"log_level": logging.DEBUG}},
+ "set log level to logging.DEBUG (maximize logging output)",
+ ),
+ quiet=(
+ {"Application": {"log_level": logging.CRITICAL}},
+ "set log level to logging.CRITICAL (minimize logging output)",
+ ),
+ init=(
+ {
+ "BaseIPythonApplication": {
+ "copy_config_files": True,
+ "auto_create": True,
+ }
+ },
+ """Initialize profile with default config files. This is equivalent
+ to running `ipython profile create <profile>` prior to startup.
+ """,
+ ),
+ )
+)
+
+
+class ProfileAwareConfigLoader(PyFileConfigLoader):
+ """A Python file config loader that is aware of IPython profiles."""
+ def load_subconfig(self, fname, path=None, profile=None):
+ if profile is not None:
+ try:
+ profile_dir = ProfileDir.find_profile_dir_by_name(
+ get_ipython_dir(),
+ profile,
+ )
+ except ProfileDirError:
+ return
+ path = profile_dir.location
+ return super(ProfileAwareConfigLoader, self).load_subconfig(fname, path=path)
+
+class BaseIPythonApplication(Application):
+ name = "ipython"
+ description = "IPython: an enhanced interactive Python shell."
+ version = Unicode(release.version)
+
+ aliases = base_aliases
+ flags = base_flags
+ classes = List([ProfileDir])
+
+ # enable `load_subconfig('cfg.py', profile='name')`
+ python_config_loader_class = ProfileAwareConfigLoader
+
+ # Track whether the config_file has changed,
+ # because some logic happens only if we aren't using the default.
+ config_file_specified = Set()
+
+ config_file_name = Unicode()
+ @default('config_file_name')
+ def _config_file_name_default(self):
+ return self.name.replace('-','_') + u'_config.py'
+ @observe('config_file_name')
+ def _config_file_name_changed(self, change):
+ if change['new'] != change['old']:
+ self.config_file_specified.add(change['new'])
+
+ # The directory that contains IPython's builtin profiles.
+ builtin_profile_dir = Unicode(
+ os.path.join(get_ipython_package_dir(), u'config', u'profile', u'default')
+ )
+
+ config_file_paths = List(Unicode())
+ @default('config_file_paths')
+ def _config_file_paths_default(self):
+ return []
+
+ extra_config_file = Unicode(
+ help="""Path to an extra config file to load.
+
+ If specified, load this config file in addition to any other IPython config.
+ """).tag(config=True)
+ @observe('extra_config_file')
+ def _extra_config_file_changed(self, change):
+ old = change['old']
+ new = change['new']
+ try:
+ self.config_files.remove(old)
+ except ValueError:
+ pass
+ self.config_file_specified.add(new)
+ self.config_files.append(new)
+
+ profile = Unicode(u'default',
+ help="""The IPython profile to use."""
+ ).tag(config=True)
+
+ @observe('profile')
+ def _profile_changed(self, change):
+ self.builtin_profile_dir = os.path.join(
+ get_ipython_package_dir(), u'config', u'profile', change['new']
+ )
+
+ add_ipython_dir_to_sys_path = Bool(
+ False,
+ """Should the IPython profile directory be added to sys path ?
+
+ This option was non-existing before IPython 8.0, and ipython_dir was added to
+ sys path to allow import of extensions present there. This was historical
+ baggage from when pip did not exist. This now default to false,
+ but can be set to true for legacy reasons.
+ """,
+ ).tag(config=True)
+
+ ipython_dir = Unicode(
+ help="""
+ The name of the IPython directory. This directory is used for logging
+ configuration (through profiles), history storage, etc. The default
+ is usually $HOME/.ipython. This option can also be specified through
+ the environment variable IPYTHONDIR.
+ """
+ ).tag(config=True)
+ @default('ipython_dir')
+ def _ipython_dir_default(self):
+ d = get_ipython_dir()
+ self._ipython_dir_changed({
+ 'name': 'ipython_dir',
+ 'old': d,
+ 'new': d,
+ })
+ return d
+
+ _in_init_profile_dir = False
+ profile_dir = Instance(ProfileDir, allow_none=True)
+ @default('profile_dir')
+ def _profile_dir_default(self):
+ # avoid recursion
+ if self._in_init_profile_dir:
+ return
+ # profile_dir requested early, force initialization
+ self.init_profile_dir()
+ return self.profile_dir
+
+ overwrite = Bool(False,
+ help="""Whether to overwrite existing config files when copying"""
+ ).tag(config=True)
+ auto_create = Bool(False,
+ help="""Whether to create profile dir if it doesn't exist"""
+ ).tag(config=True)
+
+ config_files = List(Unicode())
+ @default('config_files')
+ def _config_files_default(self):
+ return [self.config_file_name]
+
+ copy_config_files = Bool(False,
+ help="""Whether to install the default config files into the profile dir.
+ If a new profile is being created, and IPython contains config files for that
+ profile, then they will be staged into the new directory. Otherwise,
+ default config files will be automatically generated.
+ """).tag(config=True)
+
+ verbose_crash = Bool(False,
+ help="""Create a massive crash report when IPython encounters what may be an
+ internal error. The default is to append a short message to the
+ usual traceback""").tag(config=True)
+
+ # The class to use as the crash handler.
+ crash_handler_class = Type(crashhandler.CrashHandler)
+
+ @catch_config_error
+ def __init__(self, **kwargs):
+ super(BaseIPythonApplication, self).__init__(**kwargs)
+ # ensure current working directory exists
+ try:
+ os.getcwd()
+ except:
+ # exit if cwd doesn't exist
+ self.log.error("Current working directory doesn't exist.")
+ self.exit(1)
+
+ #-------------------------------------------------------------------------
+ # Various stages of Application creation
+ #-------------------------------------------------------------------------
+
+ def init_crash_handler(self):
+ """Create a crash handler, typically setting sys.excepthook to it."""
+ self.crash_handler = self.crash_handler_class(self)
+ sys.excepthook = self.excepthook
+ def unset_crashhandler():
+ sys.excepthook = sys.__excepthook__
+ atexit.register(unset_crashhandler)
+
+ def excepthook(self, etype, evalue, tb):
+ """this is sys.excepthook after init_crashhandler
+
+ set self.verbose_crash=True to use our full crashhandler, instead of
+ a regular traceback with a short message (crash_handler_lite)
+ """
+
+ if self.verbose_crash:
+ return self.crash_handler(etype, evalue, tb)
+ else:
+ return crashhandler.crash_handler_lite(etype, evalue, tb)
+
+ @observe('ipython_dir')
+ def _ipython_dir_changed(self, change):
+ old = change['old']
+ new = change['new']
+ if old is not Undefined:
+ str_old = os.path.abspath(old)
+ if str_old in sys.path:
+ sys.path.remove(str_old)
+ if self.add_ipython_dir_to_sys_path:
+ str_path = os.path.abspath(new)
+ sys.path.append(str_path)
+ ensure_dir_exists(new)
+ readme = os.path.join(new, "README")
+ readme_src = os.path.join(
+ get_ipython_package_dir(), "config", "profile", "README"
+ )
+ if not os.path.exists(readme) and os.path.exists(readme_src):
+ shutil.copy(readme_src, readme)
+ for d in ("extensions", "nbextensions"):
+ path = os.path.join(new, d)
+ try:
+ ensure_dir_exists(path)
+ except OSError as e:
+ # this will not be EEXIST
+ self.log.error("couldn't create path %s: %s", path, e)
+ self.log.debug("IPYTHONDIR set to: %s", new)
+
+ def load_config_file(self, suppress_errors=IPYTHON_SUPPRESS_CONFIG_ERRORS):
+ """Load the config file.
+
+ By default, errors in loading config are handled, and a warning
+ printed on screen. For testing, the suppress_errors option is set
+ to False, so errors will make tests fail.
+
+ `suppress_errors` default value is to be `None` in which case the
+ behavior default to the one of `traitlets.Application`.
+
+ The default value can be set :
+ - to `False` by setting 'IPYTHON_SUPPRESS_CONFIG_ERRORS' environment variable to '0', or 'false' (case insensitive).
+ - to `True` by setting 'IPYTHON_SUPPRESS_CONFIG_ERRORS' environment variable to '1' or 'true' (case insensitive).
+ - to `None` by setting 'IPYTHON_SUPPRESS_CONFIG_ERRORS' environment variable to '' (empty string) or leaving it unset.
+
+ Any other value are invalid, and will make IPython exit with a non-zero return code.
+ """
+
+
+ self.log.debug("Searching path %s for config files", self.config_file_paths)
+ base_config = 'ipython_config.py'
+ self.log.debug("Attempting to load config file: %s" %
+ base_config)
+ try:
+ if suppress_errors is not None:
+ old_value = Application.raise_config_file_errors
+ Application.raise_config_file_errors = not suppress_errors;
+ Application.load_config_file(
+ self,
+ base_config,
+ path=self.config_file_paths
+ )
+ except ConfigFileNotFound:
+ # ignore errors loading parent
+ self.log.debug("Config file %s not found", base_config)
+ pass
+ if suppress_errors is not None:
+ Application.raise_config_file_errors = old_value
+
+ for config_file_name in self.config_files:
+ if not config_file_name or config_file_name == base_config:
+ continue
+ self.log.debug("Attempting to load config file: %s" %
+ self.config_file_name)
+ try:
+ Application.load_config_file(
+ self,
+ config_file_name,
+ path=self.config_file_paths
+ )
+ except ConfigFileNotFound:
+ # Only warn if the default config file was NOT being used.
+ if config_file_name in self.config_file_specified:
+ msg = self.log.warning
+ else:
+ msg = self.log.debug
+ msg("Config file not found, skipping: %s", config_file_name)
+ except Exception:
+ # For testing purposes.
+ if not suppress_errors:
+ raise
+ self.log.warning("Error loading config file: %s" %
+ self.config_file_name, exc_info=True)
+
+ def init_profile_dir(self):
+ """initialize the profile dir"""
+ self._in_init_profile_dir = True
+ if self.profile_dir is not None:
+ # already ran
+ return
+ if 'ProfileDir.location' not in self.config:
+ # location not specified, find by profile name
+ try:
+ p = ProfileDir.find_profile_dir_by_name(self.ipython_dir, self.profile, self.config)
+ except ProfileDirError:
+ # not found, maybe create it (always create default profile)
+ if self.auto_create or self.profile == 'default':
+ try:
+ p = ProfileDir.create_profile_dir_by_name(self.ipython_dir, self.profile, self.config)
+ except ProfileDirError:
+ self.log.fatal("Could not create profile: %r"%self.profile)
+ self.exit(1)
+ else:
+ self.log.info("Created profile dir: %r"%p.location)
+ else:
+ self.log.fatal("Profile %r not found."%self.profile)
+ self.exit(1)
+ else:
+ self.log.debug("Using existing profile dir: %r", p.location)
+ else:
+ location = self.config.ProfileDir.location
+ # location is fully specified
+ try:
+ p = ProfileDir.find_profile_dir(location, self.config)
+ except ProfileDirError:
+ # not found, maybe create it
+ if self.auto_create:
+ try:
+ p = ProfileDir.create_profile_dir(location, self.config)
+ except ProfileDirError:
+ self.log.fatal("Could not create profile directory: %r"%location)
+ self.exit(1)
+ else:
+ self.log.debug("Creating new profile dir: %r"%location)
+ else:
+ self.log.fatal("Profile directory %r not found."%location)
+ self.exit(1)
+ else:
+ self.log.debug("Using existing profile dir: %r", p.location)
+ # if profile_dir is specified explicitly, set profile name
+ dir_name = os.path.basename(p.location)
+ if dir_name.startswith('profile_'):
+ self.profile = dir_name[8:]
+
+ self.profile_dir = p
+ self.config_file_paths.append(p.location)
+ self._in_init_profile_dir = False
+
+ def init_config_files(self):
+ """[optionally] copy default config files into profile dir."""
+ self.config_file_paths.extend(ENV_CONFIG_DIRS)
+ self.config_file_paths.extend(SYSTEM_CONFIG_DIRS)
+ # copy config files
+ path = Path(self.builtin_profile_dir)
+ if self.copy_config_files:
+ src = self.profile
+
+ cfg = self.config_file_name
+ if path and (path / cfg).exists():
+ self.log.warning(
+ "Staging %r from %s into %r [overwrite=%s]"
+ % (cfg, src, self.profile_dir.location, self.overwrite)
+ )
+ self.profile_dir.copy_config_file(cfg, path=path, overwrite=self.overwrite)
+ else:
+ self.stage_default_config_file()
+ else:
+ # Still stage *bundled* config files, but not generated ones
+ # This is necessary for `ipython profile=sympy` to load the profile
+ # on the first go
+ files = path.glob("*.py")
+ for fullpath in files:
+ cfg = fullpath.name
+ if self.profile_dir.copy_config_file(cfg, path=path, overwrite=False):
+ # file was copied
+ self.log.warning("Staging bundled %s from %s into %r"%(
+ cfg, self.profile, self.profile_dir.location)
+ )
+
+
+ def stage_default_config_file(self):
+ """auto generate default config file, and stage it into the profile."""
+ s = self.generate_config_file()
+ config_file = Path(self.profile_dir.location) / self.config_file_name
+ if self.overwrite or not config_file.exists():
+ self.log.warning("Generating default config file: %r", (config_file))
+ config_file.write_text(s, encoding="utf-8")
+
+ @catch_config_error
+ def initialize(self, argv=None):
+ # don't hook up crash handler before parsing command-line
+ self.parse_command_line(argv)
+ self.init_crash_handler()
+ if self.subapp is not None:
+ # stop here if subapp is taking over
+ return
+ # save a copy of CLI config to re-load after config files
+ # so that it has highest priority
+ cl_config = deepcopy(self.config)
+ self.init_profile_dir()
+ self.init_config_files()
+ self.load_config_file()
+ # enforce cl-opts override configfile opts:
+ self.update_config(cl_config)
diff --git a/contrib/python/ipython/py3/IPython/core/async_helpers.py b/contrib/python/ipython/py3/IPython/core/async_helpers.py
new file mode 100644
index 0000000000..0e7db0bb54
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/async_helpers.py
@@ -0,0 +1,156 @@
+"""
+Async helper function that are invalid syntax on Python 3.5 and below.
+
+This code is best effort, and may have edge cases not behaving as expected. In
+particular it contain a number of heuristics to detect whether code is
+effectively async and need to run in an event loop or not.
+
+Some constructs (like top-level `return`, or `yield`) are taken care of
+explicitly to actually raise a SyntaxError and stay as close as possible to
+Python semantics.
+"""
+
+
+import ast
+import asyncio
+import inspect
+from functools import wraps
+
+_asyncio_event_loop = None
+
+
+def get_asyncio_loop():
+ """asyncio has deprecated get_event_loop
+
+ Replicate it here, with our desired semantics:
+
+ - always returns a valid, not-closed loop
+ - not thread-local like asyncio's,
+ because we only want one loop for IPython
+ - if called from inside a coroutine (e.g. in ipykernel),
+ return the running loop
+
+ .. versionadded:: 8.0
+ """
+ try:
+ return asyncio.get_running_loop()
+ except RuntimeError:
+ # not inside a coroutine,
+ # track our own global
+ pass
+
+ # not thread-local like asyncio's,
+ # because we only track one event loop to run for IPython itself,
+ # always in the main thread.
+ global _asyncio_event_loop
+ if _asyncio_event_loop is None or _asyncio_event_loop.is_closed():
+ _asyncio_event_loop = asyncio.new_event_loop()
+ return _asyncio_event_loop
+
+
+class _AsyncIORunner:
+ def __call__(self, coro):
+ """
+ Handler for asyncio autoawait
+ """
+ return get_asyncio_loop().run_until_complete(coro)
+
+ def __str__(self):
+ return "asyncio"
+
+
+_asyncio_runner = _AsyncIORunner()
+
+
+class _AsyncIOProxy:
+ """Proxy-object for an asyncio
+
+ Any coroutine methods will be wrapped in event_loop.run_
+ """
+
+ def __init__(self, obj, event_loop):
+ self._obj = obj
+ self._event_loop = event_loop
+
+ def __repr__(self):
+ return f"<_AsyncIOProxy({self._obj!r})>"
+
+ def __getattr__(self, key):
+ attr = getattr(self._obj, key)
+ if inspect.iscoroutinefunction(attr):
+ # if it's a coroutine method,
+ # return a threadsafe wrapper onto the _current_ asyncio loop
+ @wraps(attr)
+ def _wrapped(*args, **kwargs):
+ concurrent_future = asyncio.run_coroutine_threadsafe(
+ attr(*args, **kwargs), self._event_loop
+ )
+ return asyncio.wrap_future(concurrent_future)
+
+ return _wrapped
+ else:
+ return attr
+
+ def __dir__(self):
+ return dir(self._obj)
+
+
+def _curio_runner(coroutine):
+ """
+ handler for curio autoawait
+ """
+ import curio
+
+ return curio.run(coroutine)
+
+
+def _trio_runner(async_fn):
+ import trio
+
+ async def loc(coro):
+ """
+ We need the dummy no-op async def to protect from
+ trio's internal. See https://github.com/python-trio/trio/issues/89
+ """
+ return await coro
+
+ return trio.run(loc, async_fn)
+
+
+def _pseudo_sync_runner(coro):
+ """
+ A runner that does not really allow async execution, and just advance the coroutine.
+
+ See discussion in https://github.com/python-trio/trio/issues/608,
+
+ Credit to Nathaniel Smith
+ """
+ try:
+ coro.send(None)
+ except StopIteration as exc:
+ return exc.value
+ else:
+ # TODO: do not raise but return an execution result with the right info.
+ raise RuntimeError(
+ "{coro_name!r} needs a real async loop".format(coro_name=coro.__name__)
+ )
+
+
+def _should_be_async(cell: str) -> bool:
+ """Detect if a block of code need to be wrapped in an `async def`
+
+ Attempt to parse the block of code, it it compile we're fine.
+ Otherwise we wrap if and try to compile.
+
+ If it works, assume it should be async. Otherwise Return False.
+
+ Not handled yet: If the block of code has a return statement as the top
+ level, it will be seen as async. This is a know limitation.
+ """
+ try:
+ code = compile(
+ cell, "<>", "exec", flags=getattr(ast, "PyCF_ALLOW_TOP_LEVEL_AWAIT", 0x0)
+ )
+ return inspect.CO_COROUTINE & code.co_flags == inspect.CO_COROUTINE
+ except (SyntaxError, MemoryError):
+ return False
diff --git a/contrib/python/ipython/py3/IPython/core/autocall.py b/contrib/python/ipython/py3/IPython/core/autocall.py
new file mode 100644
index 0000000000..54beec3f58
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/autocall.py
@@ -0,0 +1,70 @@
+# encoding: utf-8
+"""
+Autocall capabilities for IPython.core.
+
+Authors:
+
+* Brian Granger
+* Fernando Perez
+* Thomas Kluyver
+
+Notes
+-----
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (C) 2008-2011 The IPython Development Team
+#
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+
+#-----------------------------------------------------------------------------
+# Code
+#-----------------------------------------------------------------------------
+
+class IPyAutocall(object):
+ """ Instances of this class are always autocalled
+
+ This happens regardless of 'autocall' variable state. Use this to
+ develop macro-like mechanisms.
+ """
+ _ip = None
+ rewrite = True
+ def __init__(self, ip=None):
+ self._ip = ip
+
+ def set_ip(self, ip):
+ """Will be used to set _ip point to current ipython instance b/f call
+
+ Override this method if you don't want this to happen.
+
+ """
+ self._ip = ip
+
+
+class ExitAutocall(IPyAutocall):
+ """An autocallable object which will be added to the user namespace so that
+ exit, exit(), quit or quit() are all valid ways to close the shell."""
+ rewrite = False
+
+ def __call__(self):
+ self._ip.ask_exit()
+
+class ZMQExitAutocall(ExitAutocall):
+ """Exit IPython. Autocallable, so it needn't be explicitly called.
+
+ Parameters
+ ----------
+ keep_kernel : bool
+ If True, leave the kernel alive. Otherwise, tell the kernel to exit too
+ (default).
+ """
+ def __call__(self, keep_kernel=False):
+ self._ip.keepkernel_on_exit = keep_kernel
+ self._ip.ask_exit()
diff --git a/contrib/python/ipython/py3/IPython/core/builtin_trap.py b/contrib/python/ipython/py3/IPython/core/builtin_trap.py
new file mode 100644
index 0000000000..a8ea4abcd9
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/builtin_trap.py
@@ -0,0 +1,86 @@
+"""
+A context manager for managing things injected into :mod:`builtins`.
+"""
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+import builtins as builtin_mod
+
+from traitlets.config.configurable import Configurable
+
+from traitlets import Instance
+
+
+class __BuiltinUndefined(object): pass
+BuiltinUndefined = __BuiltinUndefined()
+
+class __HideBuiltin(object): pass
+HideBuiltin = __HideBuiltin()
+
+
+class BuiltinTrap(Configurable):
+
+ shell = Instance('IPython.core.interactiveshell.InteractiveShellABC',
+ allow_none=True)
+
+ def __init__(self, shell=None):
+ super(BuiltinTrap, self).__init__(shell=shell, config=None)
+ self._orig_builtins = {}
+ # We define this to track if a single BuiltinTrap is nested.
+ # Only turn off the trap when the outermost call to __exit__ is made.
+ self._nested_level = 0
+ self.shell = shell
+ # builtins we always add - if set to HideBuiltin, they will just
+ # be removed instead of being replaced by something else
+ self.auto_builtins = {'exit': HideBuiltin,
+ 'quit': HideBuiltin,
+ 'get_ipython': self.shell.get_ipython,
+ }
+
+ def __enter__(self):
+ if self._nested_level == 0:
+ self.activate()
+ self._nested_level += 1
+ # I return self, so callers can use add_builtin in a with clause.
+ return self
+
+ def __exit__(self, type, value, traceback):
+ if self._nested_level == 1:
+ self.deactivate()
+ self._nested_level -= 1
+ # Returning False will cause exceptions to propagate
+ return False
+
+ def add_builtin(self, key, value):
+ """Add a builtin and save the original."""
+ bdict = builtin_mod.__dict__
+ orig = bdict.get(key, BuiltinUndefined)
+ if value is HideBuiltin:
+ if orig is not BuiltinUndefined: #same as 'key in bdict'
+ self._orig_builtins[key] = orig
+ del bdict[key]
+ else:
+ self._orig_builtins[key] = orig
+ bdict[key] = value
+
+ def remove_builtin(self, key, orig):
+ """Remove an added builtin and re-set the original."""
+ if orig is BuiltinUndefined:
+ del builtin_mod.__dict__[key]
+ else:
+ builtin_mod.__dict__[key] = orig
+
+ def activate(self):
+ """Store ipython references in the __builtin__ namespace."""
+
+ add_builtin = self.add_builtin
+ for name, func in self.auto_builtins.items():
+ add_builtin(name, func)
+
+ def deactivate(self):
+ """Remove any builtins which might have been added by add_builtins, or
+ restore overwritten ones to their previous values."""
+ remove_builtin = self.remove_builtin
+ for key, val in self._orig_builtins.items():
+ remove_builtin(key, val)
+ self._orig_builtins.clear()
+ self._builtins_added = False
diff --git a/contrib/python/ipython/py3/IPython/core/compilerop.py b/contrib/python/ipython/py3/IPython/core/compilerop.py
new file mode 100644
index 0000000000..7799a4fc99
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/compilerop.py
@@ -0,0 +1,214 @@
+"""Compiler tools with improved interactive support.
+
+Provides compilation machinery similar to codeop, but with caching support so
+we can provide interactive tracebacks.
+
+Authors
+-------
+* Robert Kern
+* Fernando Perez
+* Thomas Kluyver
+"""
+
+# Note: though it might be more natural to name this module 'compiler', that
+# name is in the stdlib and name collisions with the stdlib tend to produce
+# weird problems (often with third-party tools).
+
+#-----------------------------------------------------------------------------
+# Copyright (C) 2010-2011 The IPython Development Team.
+#
+# Distributed under the terms of the BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+# Stdlib imports
+import __future__
+from ast import PyCF_ONLY_AST
+import codeop
+import functools
+import hashlib
+import linecache
+import operator
+import time
+from contextlib import contextmanager
+
+#-----------------------------------------------------------------------------
+# Constants
+#-----------------------------------------------------------------------------
+
+# Roughly equal to PyCF_MASK | PyCF_MASK_OBSOLETE as defined in pythonrun.h,
+# this is used as a bitmask to extract future-related code flags.
+PyCF_MASK = functools.reduce(operator.or_,
+ (getattr(__future__, fname).compiler_flag
+ for fname in __future__.all_feature_names))
+
+#-----------------------------------------------------------------------------
+# Local utilities
+#-----------------------------------------------------------------------------
+
+def code_name(code, number=0):
+ """ Compute a (probably) unique name for code for caching.
+
+ This now expects code to be unicode.
+ """
+ hash_digest = hashlib.sha1(code.encode("utf-8")).hexdigest()
+ # Include the number and 12 characters of the hash in the name. It's
+ # pretty much impossible that in a single session we'll have collisions
+ # even with truncated hashes, and the full one makes tracebacks too long
+ return '<ipython-input-{0}-{1}>'.format(number, hash_digest[:12])
+
+#-----------------------------------------------------------------------------
+# Classes and functions
+#-----------------------------------------------------------------------------
+
+class CachingCompiler(codeop.Compile):
+ """A compiler that caches code compiled from interactive statements.
+ """
+
+ def __init__(self):
+ codeop.Compile.__init__(self)
+
+ # Caching a dictionary { filename: execution_count } for nicely
+ # rendered tracebacks. The filename corresponds to the filename
+ # argument used for the builtins.compile function.
+ self._filename_map = {}
+
+ def ast_parse(self, source, filename='<unknown>', symbol='exec'):
+ """Parse code to an AST with the current compiler flags active.
+
+ Arguments are exactly the same as ast.parse (in the standard library),
+ and are passed to the built-in compile function."""
+ return compile(source, filename, symbol, self.flags | PyCF_ONLY_AST, 1)
+
+ def reset_compiler_flags(self):
+ """Reset compiler flags to default state."""
+ # This value is copied from codeop.Compile.__init__, so if that ever
+ # changes, it will need to be updated.
+ self.flags = codeop.PyCF_DONT_IMPLY_DEDENT
+
+ @property
+ def compiler_flags(self):
+ """Flags currently active in the compilation process.
+ """
+ return self.flags
+
+ def get_code_name(self, raw_code, transformed_code, number):
+ """Compute filename given the code, and the cell number.
+
+ Parameters
+ ----------
+ raw_code : str
+ The raw cell code.
+ transformed_code : str
+ The executable Python source code to cache and compile.
+ number : int
+ A number which forms part of the code's name. Used for the execution
+ counter.
+
+ Returns
+ -------
+ The computed filename.
+ """
+ return code_name(transformed_code, number)
+
+ def format_code_name(self, name):
+ """Return a user-friendly label and name for a code block.
+
+ Parameters
+ ----------
+ name : str
+ The name for the code block returned from get_code_name
+
+ Returns
+ -------
+ A (label, name) pair that can be used in tracebacks, or None if the default formatting should be used.
+ """
+ if name in self._filename_map:
+ return "Cell", "In[%s]" % self._filename_map[name]
+
+ def cache(self, transformed_code, number=0, raw_code=None):
+ """Make a name for a block of code, and cache the code.
+
+ Parameters
+ ----------
+ transformed_code : str
+ The executable Python source code to cache and compile.
+ number : int
+ A number which forms part of the code's name. Used for the execution
+ counter.
+ raw_code : str
+ The raw code before transformation, if None, set to `transformed_code`.
+
+ Returns
+ -------
+ The name of the cached code (as a string). Pass this as the filename
+ argument to compilation, so that tracebacks are correctly hooked up.
+ """
+ if raw_code is None:
+ raw_code = transformed_code
+
+ name = self.get_code_name(raw_code, transformed_code, number)
+
+ # Save the execution count
+ self._filename_map[name] = number
+
+ # Since Python 2.5, setting mtime to `None` means the lines will
+ # never be removed by `linecache.checkcache`. This means all the
+ # monkeypatching has *never* been necessary, since this code was
+ # only added in 2010, at which point IPython had already stopped
+ # supporting Python 2.4.
+ #
+ # Note that `linecache.clearcache` and `linecache.updatecache` may
+ # still remove our code from the cache, but those show explicit
+ # intent, and we should not try to interfere. Normally the former
+ # is never called except when out of memory, and the latter is only
+ # called for lines *not* in the cache.
+ entry = (
+ len(transformed_code),
+ None,
+ [line + "\n" for line in transformed_code.splitlines()],
+ name,
+ )
+ linecache.cache[name] = entry
+ return name
+
+ @contextmanager
+ def extra_flags(self, flags):
+ ## bits that we'll set to 1
+ turn_on_bits = ~self.flags & flags
+
+
+ self.flags = self.flags | flags
+ try:
+ yield
+ finally:
+ # turn off only the bits we turned on so that something like
+ # __future__ that set flags stays.
+ self.flags &= ~turn_on_bits
+
+
+def check_linecache_ipython(*args):
+ """Deprecated since IPython 8.6. Call linecache.checkcache() directly.
+
+ It was already not necessary to call this function directly. If no
+ CachingCompiler had been created, this function would fail badly. If
+ an instance had been created, this function would've been monkeypatched
+ into place.
+
+ As of IPython 8.6, the monkeypatching has gone away entirely. But there
+ were still internal callers of this function, so maybe external callers
+ also existed?
+ """
+ import warnings
+
+ warnings.warn(
+ "Deprecated Since IPython 8.6, Just call linecache.checkcache() directly.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ linecache.checkcache()
diff --git a/contrib/python/ipython/py3/IPython/core/completer.py b/contrib/python/ipython/py3/IPython/core/completer.py
new file mode 100644
index 0000000000..cc5f6c4270
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/completer.py
@@ -0,0 +1,3347 @@
+"""Completion for IPython.
+
+This module started as fork of the rlcompleter module in the Python standard
+library. The original enhancements made to rlcompleter have been sent
+upstream and were accepted as of Python 2.3,
+
+This module now support a wide variety of completion mechanism both available
+for normal classic Python code, as well as completer for IPython specific
+Syntax like magics.
+
+Latex and Unicode completion
+============================
+
+IPython and compatible frontends not only can complete your code, but can help
+you to input a wide range of characters. In particular we allow you to insert
+a unicode character using the tab completion mechanism.
+
+Forward latex/unicode completion
+--------------------------------
+
+Forward completion allows you to easily type a unicode character using its latex
+name, or unicode long description. To do so type a backslash follow by the
+relevant name and press tab:
+
+
+Using latex completion:
+
+.. code::
+
+ \\alpha<tab>
+ α
+
+or using unicode completion:
+
+
+.. code::
+
+ \\GREEK SMALL LETTER ALPHA<tab>
+ α
+
+
+Only valid Python identifiers will complete. Combining characters (like arrow or
+dots) are also available, unlike latex they need to be put after the their
+counterpart that is to say, ``F\\\\vec<tab>`` is correct, not ``\\\\vec<tab>F``.
+
+Some browsers are known to display combining characters incorrectly.
+
+Backward latex completion
+-------------------------
+
+It is sometime challenging to know how to type a character, if you are using
+IPython, or any compatible frontend you can prepend backslash to the character
+and press :kbd:`Tab` to expand it to its latex form.
+
+.. code::
+
+ \\α<tab>
+ \\alpha
+
+
+Both forward and backward completions can be deactivated by setting the
+:std:configtrait:`Completer.backslash_combining_completions` option to
+``False``.
+
+
+Experimental
+============
+
+Starting with IPython 6.0, this module can make use of the Jedi library to
+generate completions both using static analysis of the code, and dynamically
+inspecting multiple namespaces. Jedi is an autocompletion and static analysis
+for Python. The APIs attached to this new mechanism is unstable and will
+raise unless use in an :any:`provisionalcompleter` context manager.
+
+You will find that the following are experimental:
+
+ - :any:`provisionalcompleter`
+ - :any:`IPCompleter.completions`
+ - :any:`Completion`
+ - :any:`rectify_completions`
+
+.. note::
+
+ better name for :any:`rectify_completions` ?
+
+We welcome any feedback on these new API, and we also encourage you to try this
+module in debug mode (start IPython with ``--Completer.debug=True``) in order
+to have extra logging information if :any:`jedi` is crashing, or if current
+IPython completer pending deprecations are returning results not yet handled
+by :any:`jedi`
+
+Using Jedi for tab completion allow snippets like the following to work without
+having to execute any code:
+
+ >>> myvar = ['hello', 42]
+ ... myvar[1].bi<tab>
+
+Tab completion will be able to infer that ``myvar[1]`` is a real number without
+executing almost any code unlike the deprecated :any:`IPCompleter.greedy`
+option.
+
+Be sure to update :any:`jedi` to the latest stable version or to try the
+current development version to get better completions.
+
+Matchers
+========
+
+All completions routines are implemented using unified *Matchers* API.
+The matchers API is provisional and subject to change without notice.
+
+The built-in matchers include:
+
+- :any:`IPCompleter.dict_key_matcher`: dictionary key completions,
+- :any:`IPCompleter.magic_matcher`: completions for magics,
+- :any:`IPCompleter.unicode_name_matcher`,
+ :any:`IPCompleter.fwd_unicode_matcher`
+ and :any:`IPCompleter.latex_name_matcher`: see `Forward latex/unicode completion`_,
+- :any:`back_unicode_name_matcher` and :any:`back_latex_name_matcher`: see `Backward latex completion`_,
+- :any:`IPCompleter.file_matcher`: paths to files and directories,
+- :any:`IPCompleter.python_func_kw_matcher` - function keywords,
+- :any:`IPCompleter.python_matches` - globals and attributes (v1 API),
+- ``IPCompleter.jedi_matcher`` - static analysis with Jedi,
+- :any:`IPCompleter.custom_completer_matcher` - pluggable completer with a default
+ implementation in :any:`InteractiveShell` which uses IPython hooks system
+ (`complete_command`) with string dispatch (including regular expressions).
+ Differently to other matchers, ``custom_completer_matcher`` will not suppress
+ Jedi results to match behaviour in earlier IPython versions.
+
+Custom matchers can be added by appending to ``IPCompleter.custom_matchers`` list.
+
+Matcher API
+-----------
+
+Simplifying some details, the ``Matcher`` interface can described as
+
+.. code-block::
+
+ MatcherAPIv1 = Callable[[str], list[str]]
+ MatcherAPIv2 = Callable[[CompletionContext], SimpleMatcherResult]
+
+ Matcher = MatcherAPIv1 | MatcherAPIv2
+
+The ``MatcherAPIv1`` reflects the matcher API as available prior to IPython 8.6.0
+and remains supported as a simplest way for generating completions. This is also
+currently the only API supported by the IPython hooks system `complete_command`.
+
+To distinguish between matcher versions ``matcher_api_version`` attribute is used.
+More precisely, the API allows to omit ``matcher_api_version`` for v1 Matchers,
+and requires a literal ``2`` for v2 Matchers.
+
+Once the API stabilises future versions may relax the requirement for specifying
+``matcher_api_version`` by switching to :any:`functools.singledispatch`, therefore
+please do not rely on the presence of ``matcher_api_version`` for any purposes.
+
+Suppression of competing matchers
+---------------------------------
+
+By default results from all matchers are combined, in the order determined by
+their priority. Matchers can request to suppress results from subsequent
+matchers by setting ``suppress`` to ``True`` in the ``MatcherResult``.
+
+When multiple matchers simultaneously request surpression, the results from of
+the matcher with higher priority will be returned.
+
+Sometimes it is desirable to suppress most but not all other matchers;
+this can be achieved by adding a set of identifiers of matchers which
+should not be suppressed to ``MatcherResult`` under ``do_not_suppress`` key.
+
+The suppression behaviour can is user-configurable via
+:std:configtrait:`IPCompleter.suppress_competing_matchers`.
+"""
+
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+#
+# Some of this code originated from rlcompleter in the Python standard library
+# Copyright (C) 2001 Python Software Foundation, www.python.org
+
+from __future__ import annotations
+import builtins as builtin_mod
+import enum
+import glob
+import inspect
+import itertools
+import keyword
+import os
+import re
+import string
+import sys
+import tokenize
+import time
+import unicodedata
+import uuid
+import warnings
+from ast import literal_eval
+from collections import defaultdict
+from contextlib import contextmanager
+from dataclasses import dataclass
+from functools import cached_property, partial
+from types import SimpleNamespace
+from typing import (
+ Iterable,
+ Iterator,
+ List,
+ Tuple,
+ Union,
+ Any,
+ Sequence,
+ Dict,
+ Optional,
+ TYPE_CHECKING,
+ Set,
+ Sized,
+ TypeVar,
+ Literal,
+)
+
+from IPython.core.guarded_eval import guarded_eval, EvaluationContext
+from IPython.core.error import TryNext
+from IPython.core.inputtransformer2 import ESC_MAGIC
+from IPython.core.latex_symbols import latex_symbols, reverse_latex_symbol
+from IPython.core.oinspect import InspectColors
+from IPython.testing.skipdoctest import skip_doctest
+from IPython.utils import generics
+from IPython.utils.decorators import sphinx_options
+from IPython.utils.dir2 import dir2, get_real_method
+from IPython.utils.docs import GENERATING_DOCUMENTATION
+from IPython.utils.path import ensure_dir_exists
+from IPython.utils.process import arg_split
+from traitlets import (
+ Bool,
+ Enum,
+ Int,
+ List as ListTrait,
+ Unicode,
+ Dict as DictTrait,
+ Union as UnionTrait,
+ observe,
+)
+from traitlets.config.configurable import Configurable
+
+import __main__
+
+# skip module docstests
+__skip_doctest__ = True
+
+
+try:
+ import jedi
+ jedi.settings.case_insensitive_completion = False
+ import jedi.api.helpers
+ import jedi.api.classes
+ JEDI_INSTALLED = True
+except ImportError:
+ JEDI_INSTALLED = False
+
+
+if TYPE_CHECKING or GENERATING_DOCUMENTATION and sys.version_info >= (3, 11):
+ from typing import cast
+ from typing_extensions import TypedDict, NotRequired, Protocol, TypeAlias, TypeGuard
+else:
+ from typing import Generic
+
+ def cast(type_, obj):
+ """Workaround for `TypeError: MatcherAPIv2() takes no arguments`"""
+ return obj
+
+ # do not require on runtime
+ NotRequired = Tuple # requires Python >=3.11
+ TypedDict = Dict # by extension of `NotRequired` requires 3.11 too
+ Protocol = object # requires Python >=3.8
+ TypeAlias = Any # requires Python >=3.10
+ TypeGuard = Generic # requires Python >=3.10
+if GENERATING_DOCUMENTATION:
+ from typing import TypedDict
+
+# -----------------------------------------------------------------------------
+# Globals
+#-----------------------------------------------------------------------------
+
+# ranges where we have most of the valid unicode names. We could be more finer
+# grained but is it worth it for performance While unicode have character in the
+# range 0, 0x110000, we seem to have name for about 10% of those. (131808 as I
+# write this). With below range we cover them all, with a density of ~67%
+# biggest next gap we consider only adds up about 1% density and there are 600
+# gaps that would need hard coding.
+_UNICODE_RANGES = [(32, 0x323B0), (0xE0001, 0xE01F0)]
+
+# Public API
+__all__ = ["Completer", "IPCompleter"]
+
+if sys.platform == 'win32':
+ PROTECTABLES = ' '
+else:
+ PROTECTABLES = ' ()[]{}?=\\|;:\'#*"^&'
+
+# Protect against returning an enormous number of completions which the frontend
+# may have trouble processing.
+MATCHES_LIMIT = 500
+
+# Completion type reported when no type can be inferred.
+_UNKNOWN_TYPE = "<unknown>"
+
+# sentinel value to signal lack of a match
+not_found = object()
+
+class ProvisionalCompleterWarning(FutureWarning):
+ """
+ Exception raise by an experimental feature in this module.
+
+ Wrap code in :any:`provisionalcompleter` context manager if you
+ are certain you want to use an unstable feature.
+ """
+ pass
+
+warnings.filterwarnings('error', category=ProvisionalCompleterWarning)
+
+
+@skip_doctest
+@contextmanager
+def provisionalcompleter(action='ignore'):
+ """
+ This context manager has to be used in any place where unstable completer
+ behavior and API may be called.
+
+ >>> with provisionalcompleter():
+ ... completer.do_experimental_things() # works
+
+ >>> completer.do_experimental_things() # raises.
+
+ .. note::
+
+ Unstable
+
+ By using this context manager you agree that the API in use may change
+ without warning, and that you won't complain if they do so.
+
+ You also understand that, if the API is not to your liking, you should report
+ a bug to explain your use case upstream.
+
+ We'll be happy to get your feedback, feature requests, and improvements on
+ any of the unstable APIs!
+ """
+ with warnings.catch_warnings():
+ warnings.filterwarnings(action, category=ProvisionalCompleterWarning)
+ yield
+
+
+def has_open_quotes(s):
+ """Return whether a string has open quotes.
+
+ This simply counts whether the number of quote characters of either type in
+ the string is odd.
+
+ Returns
+ -------
+ If there is an open quote, the quote character is returned. Else, return
+ False.
+ """
+ # We check " first, then ', so complex cases with nested quotes will get
+ # the " to take precedence.
+ if s.count('"') % 2:
+ return '"'
+ elif s.count("'") % 2:
+ return "'"
+ else:
+ return False
+
+
+def protect_filename(s, protectables=PROTECTABLES):
+ """Escape a string to protect certain characters."""
+ if set(s) & set(protectables):
+ if sys.platform == "win32":
+ return '"' + s + '"'
+ else:
+ return "".join(("\\" + c if c in protectables else c) for c in s)
+ else:
+ return s
+
+
+def expand_user(path:str) -> Tuple[str, bool, str]:
+ """Expand ``~``-style usernames in strings.
+
+ This is similar to :func:`os.path.expanduser`, but it computes and returns
+ extra information that will be useful if the input was being used in
+ computing completions, and you wish to return the completions with the
+ original '~' instead of its expanded value.
+
+ Parameters
+ ----------
+ path : str
+ String to be expanded. If no ~ is present, the output is the same as the
+ input.
+
+ Returns
+ -------
+ newpath : str
+ Result of ~ expansion in the input path.
+ tilde_expand : bool
+ Whether any expansion was performed or not.
+ tilde_val : str
+ The value that ~ was replaced with.
+ """
+ # Default values
+ tilde_expand = False
+ tilde_val = ''
+ newpath = path
+
+ if path.startswith('~'):
+ tilde_expand = True
+ rest = len(path)-1
+ newpath = os.path.expanduser(path)
+ if rest:
+ tilde_val = newpath[:-rest]
+ else:
+ tilde_val = newpath
+
+ return newpath, tilde_expand, tilde_val
+
+
+def compress_user(path:str, tilde_expand:bool, tilde_val:str) -> str:
+ """Does the opposite of expand_user, with its outputs.
+ """
+ if tilde_expand:
+ return path.replace(tilde_val, '~')
+ else:
+ return path
+
+
+def completions_sorting_key(word):
+ """key for sorting completions
+
+ This does several things:
+
+ - Demote any completions starting with underscores to the end
+ - Insert any %magic and %%cellmagic completions in the alphabetical order
+ by their name
+ """
+ prio1, prio2 = 0, 0
+
+ if word.startswith('__'):
+ prio1 = 2
+ elif word.startswith('_'):
+ prio1 = 1
+
+ if word.endswith('='):
+ prio1 = -1
+
+ if word.startswith('%%'):
+ # If there's another % in there, this is something else, so leave it alone
+ if not "%" in word[2:]:
+ word = word[2:]
+ prio2 = 2
+ elif word.startswith('%'):
+ if not "%" in word[1:]:
+ word = word[1:]
+ prio2 = 1
+
+ return prio1, word, prio2
+
+
+class _FakeJediCompletion:
+ """
+ This is a workaround to communicate to the UI that Jedi has crashed and to
+ report a bug. Will be used only id :any:`IPCompleter.debug` is set to true.
+
+ Added in IPython 6.0 so should likely be removed for 7.0
+
+ """
+
+ def __init__(self, name):
+
+ self.name = name
+ self.complete = name
+ self.type = 'crashed'
+ self.name_with_symbols = name
+ self.signature = ""
+ self._origin = "fake"
+ self.text = "crashed"
+
+ def __repr__(self):
+ return '<Fake completion object jedi has crashed>'
+
+
+_JediCompletionLike = Union["jedi.api.Completion", _FakeJediCompletion]
+
+
+class Completion:
+ """
+ Completion object used and returned by IPython completers.
+
+ .. warning::
+
+ Unstable
+
+ This function is unstable, API may change without warning.
+ It will also raise unless use in proper context manager.
+
+ This act as a middle ground :any:`Completion` object between the
+ :any:`jedi.api.classes.Completion` object and the Prompt Toolkit completion
+ object. While Jedi need a lot of information about evaluator and how the
+ code should be ran/inspected, PromptToolkit (and other frontend) mostly
+ need user facing information.
+
+ - Which range should be replaced replaced by what.
+ - Some metadata (like completion type), or meta information to displayed to
+ the use user.
+
+ For debugging purpose we can also store the origin of the completion (``jedi``,
+ ``IPython.python_matches``, ``IPython.magics_matches``...).
+ """
+
+ __slots__ = ['start', 'end', 'text', 'type', 'signature', '_origin']
+
+ def __init__(
+ self,
+ start: int,
+ end: int,
+ text: str,
+ *,
+ type: Optional[str] = None,
+ _origin="",
+ signature="",
+ ) -> None:
+ warnings.warn(
+ "``Completion`` is a provisional API (as of IPython 6.0). "
+ "It may change without warnings. "
+ "Use in corresponding context manager.",
+ category=ProvisionalCompleterWarning,
+ stacklevel=2,
+ )
+
+ self.start = start
+ self.end = end
+ self.text = text
+ self.type = type
+ self.signature = signature
+ self._origin = _origin
+
+ def __repr__(self):
+ return '<Completion start=%s end=%s text=%r type=%r, signature=%r,>' % \
+ (self.start, self.end, self.text, self.type or '?', self.signature or '?')
+
+ def __eq__(self, other) -> bool:
+ """
+ Equality and hash do not hash the type (as some completer may not be
+ able to infer the type), but are use to (partially) de-duplicate
+ completion.
+
+ Completely de-duplicating completion is a bit tricker that just
+ comparing as it depends on surrounding text, which Completions are not
+ aware of.
+ """
+ return self.start == other.start and \
+ self.end == other.end and \
+ self.text == other.text
+
+ def __hash__(self):
+ return hash((self.start, self.end, self.text))
+
+
+class SimpleCompletion:
+ """Completion item to be included in the dictionary returned by new-style Matcher (API v2).
+
+ .. warning::
+
+ Provisional
+
+ This class is used to describe the currently supported attributes of
+ simple completion items, and any additional implementation details
+ should not be relied on. Additional attributes may be included in
+ future versions, and meaning of text disambiguated from the current
+ dual meaning of "text to insert" and "text to used as a label".
+ """
+
+ __slots__ = ["text", "type"]
+
+ def __init__(self, text: str, *, type: Optional[str] = None):
+ self.text = text
+ self.type = type
+
+ def __repr__(self):
+ return f"<SimpleCompletion text={self.text!r} type={self.type!r}>"
+
+
+class _MatcherResultBase(TypedDict):
+ """Definition of dictionary to be returned by new-style Matcher (API v2)."""
+
+ #: Suffix of the provided ``CompletionContext.token``, if not given defaults to full token.
+ matched_fragment: NotRequired[str]
+
+ #: Whether to suppress results from all other matchers (True), some
+ #: matchers (set of identifiers) or none (False); default is False.
+ suppress: NotRequired[Union[bool, Set[str]]]
+
+ #: Identifiers of matchers which should NOT be suppressed when this matcher
+ #: requests to suppress all other matchers; defaults to an empty set.
+ do_not_suppress: NotRequired[Set[str]]
+
+ #: Are completions already ordered and should be left as-is? default is False.
+ ordered: NotRequired[bool]
+
+
+@sphinx_options(show_inherited_members=True, exclude_inherited_from=["dict"])
+class SimpleMatcherResult(_MatcherResultBase, TypedDict):
+ """Result of new-style completion matcher."""
+
+ # note: TypedDict is added again to the inheritance chain
+ # in order to get __orig_bases__ for documentation
+
+ #: List of candidate completions
+ completions: Sequence[SimpleCompletion] | Iterator[SimpleCompletion]
+
+
+class _JediMatcherResult(_MatcherResultBase):
+ """Matching result returned by Jedi (will be processed differently)"""
+
+ #: list of candidate completions
+ completions: Iterator[_JediCompletionLike]
+
+
+AnyMatcherCompletion = Union[_JediCompletionLike, SimpleCompletion]
+AnyCompletion = TypeVar("AnyCompletion", AnyMatcherCompletion, Completion)
+
+
+@dataclass
+class CompletionContext:
+ """Completion context provided as an argument to matchers in the Matcher API v2."""
+
+ # rationale: many legacy matchers relied on completer state (`self.text_until_cursor`)
+ # which was not explicitly visible as an argument of the matcher, making any refactor
+ # prone to errors; by explicitly passing `cursor_position` we can decouple the matchers
+ # from the completer, and make substituting them in sub-classes easier.
+
+ #: Relevant fragment of code directly preceding the cursor.
+ #: The extraction of token is implemented via splitter heuristic
+ #: (following readline behaviour for legacy reasons), which is user configurable
+ #: (by switching the greedy mode).
+ token: str
+
+ #: The full available content of the editor or buffer
+ full_text: str
+
+ #: Cursor position in the line (the same for ``full_text`` and ``text``).
+ cursor_position: int
+
+ #: Cursor line in ``full_text``.
+ cursor_line: int
+
+ #: The maximum number of completions that will be used downstream.
+ #: Matchers can use this information to abort early.
+ #: The built-in Jedi matcher is currently excepted from this limit.
+ # If not given, return all possible completions.
+ limit: Optional[int]
+
+ @cached_property
+ def text_until_cursor(self) -> str:
+ return self.line_with_cursor[: self.cursor_position]
+
+ @cached_property
+ def line_with_cursor(self) -> str:
+ return self.full_text.split("\n")[self.cursor_line]
+
+
+#: Matcher results for API v2.
+MatcherResult = Union[SimpleMatcherResult, _JediMatcherResult]
+
+
+class _MatcherAPIv1Base(Protocol):
+ def __call__(self, text: str) -> List[str]:
+ """Call signature."""
+ ...
+
+ #: Used to construct the default matcher identifier
+ __qualname__: str
+
+
+class _MatcherAPIv1Total(_MatcherAPIv1Base, Protocol):
+ #: API version
+ matcher_api_version: Optional[Literal[1]]
+
+ def __call__(self, text: str) -> List[str]:
+ """Call signature."""
+ ...
+
+
+#: Protocol describing Matcher API v1.
+MatcherAPIv1: TypeAlias = Union[_MatcherAPIv1Base, _MatcherAPIv1Total]
+
+
+class MatcherAPIv2(Protocol):
+ """Protocol describing Matcher API v2."""
+
+ #: API version
+ matcher_api_version: Literal[2] = 2
+
+ def __call__(self, context: CompletionContext) -> MatcherResult:
+ """Call signature."""
+ ...
+
+ #: Used to construct the default matcher identifier
+ __qualname__: str
+
+
+Matcher: TypeAlias = Union[MatcherAPIv1, MatcherAPIv2]
+
+
+def _is_matcher_v1(matcher: Matcher) -> TypeGuard[MatcherAPIv1]:
+ api_version = _get_matcher_api_version(matcher)
+ return api_version == 1
+
+
+def _is_matcher_v2(matcher: Matcher) -> TypeGuard[MatcherAPIv2]:
+ api_version = _get_matcher_api_version(matcher)
+ return api_version == 2
+
+
+def _is_sizable(value: Any) -> TypeGuard[Sized]:
+ """Determines whether objects is sizable"""
+ return hasattr(value, "__len__")
+
+
+def _is_iterator(value: Any) -> TypeGuard[Iterator]:
+ """Determines whether objects is sizable"""
+ return hasattr(value, "__next__")
+
+
+def has_any_completions(result: MatcherResult) -> bool:
+ """Check if any result includes any completions."""
+ completions = result["completions"]
+ if _is_sizable(completions):
+ return len(completions) != 0
+ if _is_iterator(completions):
+ try:
+ old_iterator = completions
+ first = next(old_iterator)
+ result["completions"] = cast(
+ Iterator[SimpleCompletion],
+ itertools.chain([first], old_iterator),
+ )
+ return True
+ except StopIteration:
+ return False
+ raise ValueError(
+ "Completions returned by matcher need to be an Iterator or a Sizable"
+ )
+
+
+def completion_matcher(
+ *,
+ priority: Optional[float] = None,
+ identifier: Optional[str] = None,
+ api_version: int = 1,
+):
+ """Adds attributes describing the matcher.
+
+ Parameters
+ ----------
+ priority : Optional[float]
+ The priority of the matcher, determines the order of execution of matchers.
+ Higher priority means that the matcher will be executed first. Defaults to 0.
+ identifier : Optional[str]
+ identifier of the matcher allowing users to modify the behaviour via traitlets,
+ and also used to for debugging (will be passed as ``origin`` with the completions).
+
+ Defaults to matcher function's ``__qualname__`` (for example,
+ ``IPCompleter.file_matcher`` for the built-in matched defined
+ as a ``file_matcher`` method of the ``IPCompleter`` class).
+ api_version: Optional[int]
+ version of the Matcher API used by this matcher.
+ Currently supported values are 1 and 2.
+ Defaults to 1.
+ """
+
+ def wrapper(func: Matcher):
+ func.matcher_priority = priority or 0 # type: ignore
+ func.matcher_identifier = identifier or func.__qualname__ # type: ignore
+ func.matcher_api_version = api_version # type: ignore
+ if TYPE_CHECKING:
+ if api_version == 1:
+ func = cast(MatcherAPIv1, func)
+ elif api_version == 2:
+ func = cast(MatcherAPIv2, func)
+ return func
+
+ return wrapper
+
+
+def _get_matcher_priority(matcher: Matcher):
+ return getattr(matcher, "matcher_priority", 0)
+
+
+def _get_matcher_id(matcher: Matcher):
+ return getattr(matcher, "matcher_identifier", matcher.__qualname__)
+
+
+def _get_matcher_api_version(matcher):
+ return getattr(matcher, "matcher_api_version", 1)
+
+
+context_matcher = partial(completion_matcher, api_version=2)
+
+
+_IC = Iterable[Completion]
+
+
+def _deduplicate_completions(text: str, completions: _IC)-> _IC:
+ """
+ Deduplicate a set of completions.
+
+ .. warning::
+
+ Unstable
+
+ This function is unstable, API may change without warning.
+
+ Parameters
+ ----------
+ text : str
+ text that should be completed.
+ completions : Iterator[Completion]
+ iterator over the completions to deduplicate
+
+ Yields
+ ------
+ `Completions` objects
+ Completions coming from multiple sources, may be different but end up having
+ the same effect when applied to ``text``. If this is the case, this will
+ consider completions as equal and only emit the first encountered.
+ Not folded in `completions()` yet for debugging purpose, and to detect when
+ the IPython completer does return things that Jedi does not, but should be
+ at some point.
+ """
+ completions = list(completions)
+ if not completions:
+ return
+
+ new_start = min(c.start for c in completions)
+ new_end = max(c.end for c in completions)
+
+ seen = set()
+ for c in completions:
+ new_text = text[new_start:c.start] + c.text + text[c.end:new_end]
+ if new_text not in seen:
+ yield c
+ seen.add(new_text)
+
+
+def rectify_completions(text: str, completions: _IC, *, _debug: bool = False) -> _IC:
+ """
+ Rectify a set of completions to all have the same ``start`` and ``end``
+
+ .. warning::
+
+ Unstable
+
+ This function is unstable, API may change without warning.
+ It will also raise unless use in proper context manager.
+
+ Parameters
+ ----------
+ text : str
+ text that should be completed.
+ completions : Iterator[Completion]
+ iterator over the completions to rectify
+ _debug : bool
+ Log failed completion
+
+ Notes
+ -----
+ :any:`jedi.api.classes.Completion` s returned by Jedi may not have the same start and end, though
+ the Jupyter Protocol requires them to behave like so. This will readjust
+ the completion to have the same ``start`` and ``end`` by padding both
+ extremities with surrounding text.
+
+ During stabilisation should support a ``_debug`` option to log which
+ completion are return by the IPython completer and not found in Jedi in
+ order to make upstream bug report.
+ """
+ warnings.warn("`rectify_completions` is a provisional API (as of IPython 6.0). "
+ "It may change without warnings. "
+ "Use in corresponding context manager.",
+ category=ProvisionalCompleterWarning, stacklevel=2)
+
+ completions = list(completions)
+ if not completions:
+ return
+ starts = (c.start for c in completions)
+ ends = (c.end for c in completions)
+
+ new_start = min(starts)
+ new_end = max(ends)
+
+ seen_jedi = set()
+ seen_python_matches = set()
+ for c in completions:
+ new_text = text[new_start:c.start] + c.text + text[c.end:new_end]
+ if c._origin == 'jedi':
+ seen_jedi.add(new_text)
+ elif c._origin == 'IPCompleter.python_matches':
+ seen_python_matches.add(new_text)
+ yield Completion(new_start, new_end, new_text, type=c.type, _origin=c._origin, signature=c.signature)
+ diff = seen_python_matches.difference(seen_jedi)
+ if diff and _debug:
+ print('IPython.python matches have extras:', diff)
+
+
+if sys.platform == 'win32':
+ DELIMS = ' \t\n`!@#$^&*()=+[{]}|;\'",<>?'
+else:
+ DELIMS = ' \t\n`!@#$^&*()=+[{]}\\|;:\'",<>?'
+
+GREEDY_DELIMS = ' =\r\n'
+
+
+class CompletionSplitter(object):
+ """An object to split an input line in a manner similar to readline.
+
+ By having our own implementation, we can expose readline-like completion in
+ a uniform manner to all frontends. This object only needs to be given the
+ line of text to be split and the cursor position on said line, and it
+ returns the 'word' to be completed on at the cursor after splitting the
+ entire line.
+
+ What characters are used as splitting delimiters can be controlled by
+ setting the ``delims`` attribute (this is a property that internally
+ automatically builds the necessary regular expression)"""
+
+ # Private interface
+
+ # A string of delimiter characters. The default value makes sense for
+ # IPython's most typical usage patterns.
+ _delims = DELIMS
+
+ # The expression (a normal string) to be compiled into a regular expression
+ # for actual splitting. We store it as an attribute mostly for ease of
+ # debugging, since this type of code can be so tricky to debug.
+ _delim_expr = None
+
+ # The regular expression that does the actual splitting
+ _delim_re = None
+
+ def __init__(self, delims=None):
+ delims = CompletionSplitter._delims if delims is None else delims
+ self.delims = delims
+
+ @property
+ def delims(self):
+ """Return the string of delimiter characters."""
+ return self._delims
+
+ @delims.setter
+ def delims(self, delims):
+ """Set the delimiters for line splitting."""
+ expr = '[' + ''.join('\\'+ c for c in delims) + ']'
+ self._delim_re = re.compile(expr)
+ self._delims = delims
+ self._delim_expr = expr
+
+ def split_line(self, line, cursor_pos=None):
+ """Split a line of text with a cursor at the given position.
+ """
+ l = line if cursor_pos is None else line[:cursor_pos]
+ return self._delim_re.split(l)[-1]
+
+
+
+class Completer(Configurable):
+
+ greedy = Bool(
+ False,
+ help="""Activate greedy completion.
+
+ .. deprecated:: 8.8
+ Use :std:configtrait:`Completer.evaluation` and :std:configtrait:`Completer.auto_close_dict_keys` instead.
+
+ When enabled in IPython 8.8 or newer, changes configuration as follows:
+
+ - ``Completer.evaluation = 'unsafe'``
+ - ``Completer.auto_close_dict_keys = True``
+ """,
+ ).tag(config=True)
+
+ evaluation = Enum(
+ ("forbidden", "minimal", "limited", "unsafe", "dangerous"),
+ default_value="limited",
+ help="""Policy for code evaluation under completion.
+
+ Successive options allow to enable more eager evaluation for better
+ completion suggestions, including for nested dictionaries, nested lists,
+ or even results of function calls.
+ Setting ``unsafe`` or higher can lead to evaluation of arbitrary user
+ code on :kbd:`Tab` with potentially unwanted or dangerous side effects.
+
+ Allowed values are:
+
+ - ``forbidden``: no evaluation of code is permitted,
+ - ``minimal``: evaluation of literals and access to built-in namespace;
+ no item/attribute evaluationm no access to locals/globals,
+ no evaluation of any operations or comparisons.
+ - ``limited``: access to all namespaces, evaluation of hard-coded methods
+ (for example: :any:`dict.keys`, :any:`object.__getattr__`,
+ :any:`object.__getitem__`) on allow-listed objects (for example:
+ :any:`dict`, :any:`list`, :any:`tuple`, ``pandas.Series``),
+ - ``unsafe``: evaluation of all methods and function calls but not of
+ syntax with side-effects like `del x`,
+ - ``dangerous``: completely arbitrary evaluation.
+ """,
+ ).tag(config=True)
+
+ use_jedi = Bool(default_value=JEDI_INSTALLED,
+ help="Experimental: Use Jedi to generate autocompletions. "
+ "Default to True if jedi is installed.").tag(config=True)
+
+ jedi_compute_type_timeout = Int(default_value=400,
+ help="""Experimental: restrict time (in milliseconds) during which Jedi can compute types.
+ Set to 0 to stop computing types. Non-zero value lower than 100ms may hurt
+ performance by preventing jedi to build its cache.
+ """).tag(config=True)
+
+ debug = Bool(default_value=False,
+ help='Enable debug for the Completer. Mostly print extra '
+ 'information for experimental jedi integration.')\
+ .tag(config=True)
+
+ backslash_combining_completions = Bool(True,
+ help="Enable unicode completions, e.g. \\alpha<tab> . "
+ "Includes completion of latex commands, unicode names, and expanding "
+ "unicode characters back to latex commands.").tag(config=True)
+
+ auto_close_dict_keys = Bool(
+ False,
+ help="""
+ Enable auto-closing dictionary keys.
+
+ When enabled string keys will be suffixed with a final quote
+ (matching the opening quote), tuple keys will also receive a
+ separating comma if needed, and keys which are final will
+ receive a closing bracket (``]``).
+ """,
+ ).tag(config=True)
+
+ def __init__(self, namespace=None, global_namespace=None, **kwargs):
+ """Create a new completer for the command line.
+
+ Completer(namespace=ns, global_namespace=ns2) -> completer instance.
+
+ If unspecified, the default namespace where completions are performed
+ is __main__ (technically, __main__.__dict__). Namespaces should be
+ given as dictionaries.
+
+ An optional second namespace can be given. This allows the completer
+ to handle cases where both the local and global scopes need to be
+ distinguished.
+ """
+
+ # Don't bind to namespace quite yet, but flag whether the user wants a
+ # specific namespace or to use __main__.__dict__. This will allow us
+ # to bind to __main__.__dict__ at completion time, not now.
+ if namespace is None:
+ self.use_main_ns = True
+ else:
+ self.use_main_ns = False
+ self.namespace = namespace
+
+ # The global namespace, if given, can be bound directly
+ if global_namespace is None:
+ self.global_namespace = {}
+ else:
+ self.global_namespace = global_namespace
+
+ self.custom_matchers = []
+
+ super(Completer, self).__init__(**kwargs)
+
+ def complete(self, text, state):
+ """Return the next possible completion for 'text'.
+
+ This is called successively with state == 0, 1, 2, ... until it
+ returns None. The completion should begin with 'text'.
+
+ """
+ if self.use_main_ns:
+ self.namespace = __main__.__dict__
+
+ if state == 0:
+ if "." in text:
+ self.matches = self.attr_matches(text)
+ else:
+ self.matches = self.global_matches(text)
+ try:
+ return self.matches[state]
+ except IndexError:
+ return None
+
+ def global_matches(self, text):
+ """Compute matches when text is a simple name.
+
+ Return a list of all keywords, built-in functions and names currently
+ defined in self.namespace or self.global_namespace that match.
+
+ """
+ matches = []
+ match_append = matches.append
+ n = len(text)
+ for lst in [
+ keyword.kwlist,
+ builtin_mod.__dict__.keys(),
+ list(self.namespace.keys()),
+ list(self.global_namespace.keys()),
+ ]:
+ for word in lst:
+ if word[:n] == text and word != "__builtins__":
+ match_append(word)
+
+ snake_case_re = re.compile(r"[^_]+(_[^_]+)+?\Z")
+ for lst in [list(self.namespace.keys()), list(self.global_namespace.keys())]:
+ shortened = {
+ "_".join([sub[0] for sub in word.split("_")]): word
+ for word in lst
+ if snake_case_re.match(word)
+ }
+ for word in shortened.keys():
+ if word[:n] == text and word != "__builtins__":
+ match_append(shortened[word])
+ return matches
+
+ def attr_matches(self, text):
+ """Compute matches when text contains a dot.
+
+ Assuming the text is of the form NAME.NAME....[NAME], and is
+ evaluatable in self.namespace or self.global_namespace, it will be
+ evaluated and its attributes (as revealed by dir()) are used as
+ possible completions. (For class instances, class members are
+ also considered.)
+
+ WARNING: this can still invoke arbitrary C code, if an object
+ with a __getattr__ hook is evaluated.
+
+ """
+ m2 = re.match(r"(.+)\.(\w*)$", self.line_buffer)
+ if not m2:
+ return []
+ expr, attr = m2.group(1, 2)
+
+ obj = self._evaluate_expr(expr)
+
+ if obj is not_found:
+ return []
+
+ if self.limit_to__all__ and hasattr(obj, '__all__'):
+ words = get__all__entries(obj)
+ else:
+ words = dir2(obj)
+
+ try:
+ words = generics.complete_object(obj, words)
+ except TryNext:
+ pass
+ except AssertionError:
+ raise
+ except Exception:
+ # Silence errors from completion function
+ pass
+ # Build match list to return
+ n = len(attr)
+
+ # Note: ideally we would just return words here and the prefix
+ # reconciliator would know that we intend to append to rather than
+ # replace the input text; this requires refactoring to return range
+ # which ought to be replaced (as does jedi).
+ tokens = _parse_tokens(expr)
+ rev_tokens = reversed(tokens)
+ skip_over = {tokenize.ENDMARKER, tokenize.NEWLINE}
+ name_turn = True
+
+ parts = []
+ for token in rev_tokens:
+ if token.type in skip_over:
+ continue
+ if token.type == tokenize.NAME and name_turn:
+ parts.append(token.string)
+ name_turn = False
+ elif token.type == tokenize.OP and token.string == "." and not name_turn:
+ parts.append(token.string)
+ name_turn = True
+ else:
+ # short-circuit if not empty nor name token
+ break
+
+ prefix_after_space = "".join(reversed(parts))
+
+ return ["%s.%s" % (prefix_after_space, w) for w in words if w[:n] == attr]
+
+ def _evaluate_expr(self, expr):
+ obj = not_found
+ done = False
+ while not done and expr:
+ try:
+ obj = guarded_eval(
+ expr,
+ EvaluationContext(
+ globals=self.global_namespace,
+ locals=self.namespace,
+ evaluation=self.evaluation,
+ ),
+ )
+ done = True
+ except Exception as e:
+ if self.debug:
+ print("Evaluation exception", e)
+ # trim the expression to remove any invalid prefix
+ # e.g. user starts `(d[`, so we get `expr = '(d'`,
+ # where parenthesis is not closed.
+ # TODO: make this faster by reusing parts of the computation?
+ expr = expr[1:]
+ return obj
+
+def get__all__entries(obj):
+ """returns the strings in the __all__ attribute"""
+ try:
+ words = getattr(obj, '__all__')
+ except:
+ return []
+
+ return [w for w in words if isinstance(w, str)]
+
+
+class _DictKeyState(enum.Flag):
+ """Represent state of the key match in context of other possible matches.
+
+ - given `d1 = {'a': 1}` completion on `d1['<tab>` will yield `{'a': END_OF_ITEM}` as there is no tuple.
+ - given `d2 = {('a', 'b'): 1}`: `d2['a', '<tab>` will yield `{'b': END_OF_TUPLE}` as there is no tuple members to add beyond `'b'`.
+ - given `d3 = {('a', 'b'): 1}`: `d3['<tab>` will yield `{'a': IN_TUPLE}` as `'a'` can be added.
+ - given `d4 = {'a': 1, ('a', 'b'): 2}`: `d4['<tab>` will yield `{'a': END_OF_ITEM & END_OF_TUPLE}`
+ """
+
+ BASELINE = 0
+ END_OF_ITEM = enum.auto()
+ END_OF_TUPLE = enum.auto()
+ IN_TUPLE = enum.auto()
+
+
+def _parse_tokens(c):
+ """Parse tokens even if there is an error."""
+ tokens = []
+ token_generator = tokenize.generate_tokens(iter(c.splitlines()).__next__)
+ while True:
+ try:
+ tokens.append(next(token_generator))
+ except tokenize.TokenError:
+ return tokens
+ except StopIteration:
+ return tokens
+
+
+def _match_number_in_dict_key_prefix(prefix: str) -> Union[str, None]:
+ """Match any valid Python numeric literal in a prefix of dictionary keys.
+
+ References:
+ - https://docs.python.org/3/reference/lexical_analysis.html#numeric-literals
+ - https://docs.python.org/3/library/tokenize.html
+ """
+ if prefix[-1].isspace():
+ # if user typed a space we do not have anything to complete
+ # even if there was a valid number token before
+ return None
+ tokens = _parse_tokens(prefix)
+ rev_tokens = reversed(tokens)
+ skip_over = {tokenize.ENDMARKER, tokenize.NEWLINE}
+ number = None
+ for token in rev_tokens:
+ if token.type in skip_over:
+ continue
+ if number is None:
+ if token.type == tokenize.NUMBER:
+ number = token.string
+ continue
+ else:
+ # we did not match a number
+ return None
+ if token.type == tokenize.OP:
+ if token.string == ",":
+ break
+ if token.string in {"+", "-"}:
+ number = token.string + number
+ else:
+ return None
+ return number
+
+
+_INT_FORMATS = {
+ "0b": bin,
+ "0o": oct,
+ "0x": hex,
+}
+
+
+def match_dict_keys(
+ keys: List[Union[str, bytes, Tuple[Union[str, bytes], ...]]],
+ prefix: str,
+ delims: str,
+ extra_prefix: Optional[Tuple[Union[str, bytes], ...]] = None,
+) -> Tuple[str, int, Dict[str, _DictKeyState]]:
+ """Used by dict_key_matches, matching the prefix to a list of keys
+
+ Parameters
+ ----------
+ keys
+ list of keys in dictionary currently being completed.
+ prefix
+ Part of the text already typed by the user. E.g. `mydict[b'fo`
+ delims
+ String of delimiters to consider when finding the current key.
+ extra_prefix : optional
+ Part of the text already typed in multi-key index cases. E.g. for
+ `mydict['foo', "bar", 'b`, this would be `('foo', 'bar')`.
+
+ Returns
+ -------
+ A tuple of three elements: ``quote``, ``token_start``, ``matched``, with
+ ``quote`` being the quote that need to be used to close current string.
+ ``token_start`` the position where the replacement should start occurring,
+ ``matches`` a dictionary of replacement/completion keys on keys and values
+ indicating whether the state.
+ """
+ prefix_tuple = extra_prefix if extra_prefix else ()
+
+ prefix_tuple_size = sum(
+ [
+ # for pandas, do not count slices as taking space
+ not isinstance(k, slice)
+ for k in prefix_tuple
+ ]
+ )
+ text_serializable_types = (str, bytes, int, float, slice)
+
+ def filter_prefix_tuple(key):
+ # Reject too short keys
+ if len(key) <= prefix_tuple_size:
+ return False
+ # Reject keys which cannot be serialised to text
+ for k in key:
+ if not isinstance(k, text_serializable_types):
+ return False
+ # Reject keys that do not match the prefix
+ for k, pt in zip(key, prefix_tuple):
+ if k != pt and not isinstance(pt, slice):
+ return False
+ # All checks passed!
+ return True
+
+ filtered_key_is_final: Dict[
+ Union[str, bytes, int, float], _DictKeyState
+ ] = defaultdict(lambda: _DictKeyState.BASELINE)
+
+ for k in keys:
+ # If at least one of the matches is not final, mark as undetermined.
+ # This can happen with `d = {111: 'b', (111, 222): 'a'}` where
+ # `111` appears final on first match but is not final on the second.
+
+ if isinstance(k, tuple):
+ if filter_prefix_tuple(k):
+ key_fragment = k[prefix_tuple_size]
+ filtered_key_is_final[key_fragment] |= (
+ _DictKeyState.END_OF_TUPLE
+ if len(k) == prefix_tuple_size + 1
+ else _DictKeyState.IN_TUPLE
+ )
+ elif prefix_tuple_size > 0:
+ # we are completing a tuple but this key is not a tuple,
+ # so we should ignore it
+ pass
+ else:
+ if isinstance(k, text_serializable_types):
+ filtered_key_is_final[k] |= _DictKeyState.END_OF_ITEM
+
+ filtered_keys = filtered_key_is_final.keys()
+
+ if not prefix:
+ return "", 0, {repr(k): v for k, v in filtered_key_is_final.items()}
+
+ quote_match = re.search("(?:\"|')", prefix)
+ is_user_prefix_numeric = False
+
+ if quote_match:
+ quote = quote_match.group()
+ valid_prefix = prefix + quote
+ try:
+ prefix_str = literal_eval(valid_prefix)
+ except Exception:
+ return "", 0, {}
+ else:
+ # If it does not look like a string, let's assume
+ # we are dealing with a number or variable.
+ number_match = _match_number_in_dict_key_prefix(prefix)
+
+ # We do not want the key matcher to suggest variable names so we yield:
+ if number_match is None:
+ # The alternative would be to assume that user forgort the quote
+ # and if the substring matches, suggest adding it at the start.
+ return "", 0, {}
+
+ prefix_str = number_match
+ is_user_prefix_numeric = True
+ quote = ""
+
+ pattern = '[^' + ''.join('\\' + c for c in delims) + ']*$'
+ token_match = re.search(pattern, prefix, re.UNICODE)
+ assert token_match is not None # silence mypy
+ token_start = token_match.start()
+ token_prefix = token_match.group()
+
+ matched: Dict[str, _DictKeyState] = {}
+
+ str_key: Union[str, bytes]
+
+ for key in filtered_keys:
+ if isinstance(key, (int, float)):
+ # User typed a number but this key is not a number.
+ if not is_user_prefix_numeric:
+ continue
+ str_key = str(key)
+ if isinstance(key, int):
+ int_base = prefix_str[:2].lower()
+ # if user typed integer using binary/oct/hex notation:
+ if int_base in _INT_FORMATS:
+ int_format = _INT_FORMATS[int_base]
+ str_key = int_format(key)
+ else:
+ # User typed a string but this key is a number.
+ if is_user_prefix_numeric:
+ continue
+ str_key = key
+ try:
+ if not str_key.startswith(prefix_str):
+ continue
+ except (AttributeError, TypeError, UnicodeError) as e:
+ # Python 3+ TypeError on b'a'.startswith('a') or vice-versa
+ continue
+
+ # reformat remainder of key to begin with prefix
+ rem = str_key[len(prefix_str) :]
+ # force repr wrapped in '
+ rem_repr = repr(rem + '"') if isinstance(rem, str) else repr(rem + b'"')
+ rem_repr = rem_repr[1 + rem_repr.index("'"):-2]
+ if quote == '"':
+ # The entered prefix is quoted with ",
+ # but the match is quoted with '.
+ # A contained " hence needs escaping for comparison:
+ rem_repr = rem_repr.replace('"', '\\"')
+
+ # then reinsert prefix from start of token
+ match = "%s%s" % (token_prefix, rem_repr)
+
+ matched[match] = filtered_key_is_final[key]
+ return quote, token_start, matched
+
+
+def cursor_to_position(text:str, line:int, column:int)->int:
+ """
+ Convert the (line,column) position of the cursor in text to an offset in a
+ string.
+
+ Parameters
+ ----------
+ text : str
+ The text in which to calculate the cursor offset
+ line : int
+ Line of the cursor; 0-indexed
+ column : int
+ Column of the cursor 0-indexed
+
+ Returns
+ -------
+ Position of the cursor in ``text``, 0-indexed.
+
+ See Also
+ --------
+ position_to_cursor : reciprocal of this function
+
+ """
+ lines = text.split('\n')
+ assert line <= len(lines), '{} <= {}'.format(str(line), str(len(lines)))
+
+ return sum(len(l) + 1 for l in lines[:line]) + column
+
+def position_to_cursor(text:str, offset:int)->Tuple[int, int]:
+ """
+ Convert the position of the cursor in text (0 indexed) to a line
+ number(0-indexed) and a column number (0-indexed) pair
+
+ Position should be a valid position in ``text``.
+
+ Parameters
+ ----------
+ text : str
+ The text in which to calculate the cursor offset
+ offset : int
+ Position of the cursor in ``text``, 0-indexed.
+
+ Returns
+ -------
+ (line, column) : (int, int)
+ Line of the cursor; 0-indexed, column of the cursor 0-indexed
+
+ See Also
+ --------
+ cursor_to_position : reciprocal of this function
+
+ """
+
+ assert 0 <= offset <= len(text) , "0 <= %s <= %s" % (offset , len(text))
+
+ before = text[:offset]
+ blines = before.split('\n') # ! splitnes trim trailing \n
+ line = before.count('\n')
+ col = len(blines[-1])
+ return line, col
+
+
+def _safe_isinstance(obj, module, class_name, *attrs):
+ """Checks if obj is an instance of module.class_name if loaded
+ """
+ if module in sys.modules:
+ m = sys.modules[module]
+ for attr in [class_name, *attrs]:
+ m = getattr(m, attr)
+ return isinstance(obj, m)
+
+
+@context_matcher()
+def back_unicode_name_matcher(context: CompletionContext):
+ """Match Unicode characters back to Unicode name
+
+ Same as :any:`back_unicode_name_matches`, but adopted to new Matcher API.
+ """
+ fragment, matches = back_unicode_name_matches(context.text_until_cursor)
+ return _convert_matcher_v1_result_to_v2(
+ matches, type="unicode", fragment=fragment, suppress_if_matches=True
+ )
+
+
+def back_unicode_name_matches(text: str) -> Tuple[str, Sequence[str]]:
+ """Match Unicode characters back to Unicode name
+
+ This does ``☃`` -> ``\\snowman``
+
+ Note that snowman is not a valid python3 combining character but will be expanded.
+ Though it will not recombine back to the snowman character by the completion machinery.
+
+ This will not either back-complete standard sequences like \\n, \\b ...
+
+ .. deprecated:: 8.6
+ You can use :meth:`back_unicode_name_matcher` instead.
+
+ Returns
+ =======
+
+ Return a tuple with two elements:
+
+ - The Unicode character that was matched (preceded with a backslash), or
+ empty string,
+ - a sequence (of 1), name for the match Unicode character, preceded by
+ backslash, or empty if no match.
+ """
+ if len(text)<2:
+ return '', ()
+ maybe_slash = text[-2]
+ if maybe_slash != '\\':
+ return '', ()
+
+ char = text[-1]
+ # no expand on quote for completion in strings.
+ # nor backcomplete standard ascii keys
+ if char in string.ascii_letters or char in ('"',"'"):
+ return '', ()
+ try :
+ unic = unicodedata.name(char)
+ return '\\'+char,('\\'+unic,)
+ except KeyError:
+ pass
+ return '', ()
+
+
+@context_matcher()
+def back_latex_name_matcher(context: CompletionContext):
+ """Match latex characters back to unicode name
+
+ Same as :any:`back_latex_name_matches`, but adopted to new Matcher API.
+ """
+ fragment, matches = back_latex_name_matches(context.text_until_cursor)
+ return _convert_matcher_v1_result_to_v2(
+ matches, type="latex", fragment=fragment, suppress_if_matches=True
+ )
+
+
+def back_latex_name_matches(text: str) -> Tuple[str, Sequence[str]]:
+ """Match latex characters back to unicode name
+
+ This does ``\\ℵ`` -> ``\\aleph``
+
+ .. deprecated:: 8.6
+ You can use :meth:`back_latex_name_matcher` instead.
+ """
+ if len(text)<2:
+ return '', ()
+ maybe_slash = text[-2]
+ if maybe_slash != '\\':
+ return '', ()
+
+
+ char = text[-1]
+ # no expand on quote for completion in strings.
+ # nor backcomplete standard ascii keys
+ if char in string.ascii_letters or char in ('"',"'"):
+ return '', ()
+ try :
+ latex = reverse_latex_symbol[char]
+ # '\\' replace the \ as well
+ return '\\'+char,[latex]
+ except KeyError:
+ pass
+ return '', ()
+
+
+def _formatparamchildren(parameter) -> str:
+ """
+ Get parameter name and value from Jedi Private API
+
+ Jedi does not expose a simple way to get `param=value` from its API.
+
+ Parameters
+ ----------
+ parameter
+ Jedi's function `Param`
+
+ Returns
+ -------
+ A string like 'a', 'b=1', '*args', '**kwargs'
+
+ """
+ description = parameter.description
+ if not description.startswith('param '):
+ raise ValueError('Jedi function parameter description have change format.'
+ 'Expected "param ...", found %r".' % description)
+ return description[6:]
+
+def _make_signature(completion)-> str:
+ """
+ Make the signature from a jedi completion
+
+ Parameters
+ ----------
+ completion : jedi.Completion
+ object does not complete a function type
+
+ Returns
+ -------
+ a string consisting of the function signature, with the parenthesis but
+ without the function name. example:
+ `(a, *args, b=1, **kwargs)`
+
+ """
+
+ return '(%s)'% ', '.join([f for f in (_formatparamchildren(p) for p in completion.params) if f])
+ # it looks like this might work on jedi 0.17
+ if hasattr(completion, 'get_signatures'):
+ signatures = completion.get_signatures()
+ if not signatures:
+ return '(?)'
+
+ c0 = completion.get_signatures()[0]
+ return '('+c0.to_string().split('(', maxsplit=1)[1]
+
+ return '(%s)'% ', '.join([f for f in (_formatparamchildren(p) for signature in completion.get_signatures()
+ for p in signature.defined_names()) if f])
+
+
+_CompleteResult = Dict[str, MatcherResult]
+
+
+DICT_MATCHER_REGEX = re.compile(
+ r"""(?x)
+( # match dict-referring - or any get item object - expression
+ .+
+)
+\[ # open bracket
+\s* # and optional whitespace
+# Capture any number of serializable objects (e.g. "a", "b", 'c')
+# and slices
+((?:(?:
+ (?: # closed string
+ [uUbB]? # string prefix (r not handled)
+ (?:
+ '(?:[^']|(?<!\\)\\')*'
+ |
+ "(?:[^"]|(?<!\\)\\")*"
+ )
+ )
+ |
+ # capture integers and slices
+ (?:[-+]?\d+)?(?::(?:[-+]?\d+)?){0,2}
+ |
+ # integer in bin/hex/oct notation
+ 0[bBxXoO]_?(?:\w|\d)+
+ )
+ \s*,\s*
+)*)
+((?:
+ (?: # unclosed string
+ [uUbB]? # string prefix (r not handled)
+ (?:
+ '(?:[^']|(?<!\\)\\')*
+ |
+ "(?:[^"]|(?<!\\)\\")*
+ )
+ )
+ |
+ # unfinished integer
+ (?:[-+]?\d+)
+ |
+ # integer in bin/hex/oct notation
+ 0[bBxXoO]_?(?:\w|\d)+
+ )
+)?
+$
+"""
+)
+
+
+def _convert_matcher_v1_result_to_v2(
+ matches: Sequence[str],
+ type: str,
+ fragment: Optional[str] = None,
+ suppress_if_matches: bool = False,
+) -> SimpleMatcherResult:
+ """Utility to help with transition"""
+ result = {
+ "completions": [SimpleCompletion(text=match, type=type) for match in matches],
+ "suppress": (True if matches else False) if suppress_if_matches else False,
+ }
+ if fragment is not None:
+ result["matched_fragment"] = fragment
+ return cast(SimpleMatcherResult, result)
+
+
+class IPCompleter(Completer):
+ """Extension of the completer class with IPython-specific features"""
+
+ @observe('greedy')
+ def _greedy_changed(self, change):
+ """update the splitter and readline delims when greedy is changed"""
+ if change["new"]:
+ self.evaluation = "unsafe"
+ self.auto_close_dict_keys = True
+ self.splitter.delims = GREEDY_DELIMS
+ else:
+ self.evaluation = "limited"
+ self.auto_close_dict_keys = False
+ self.splitter.delims = DELIMS
+
+ dict_keys_only = Bool(
+ False,
+ help="""
+ Whether to show dict key matches only.
+
+ (disables all matchers except for `IPCompleter.dict_key_matcher`).
+ """,
+ )
+
+ suppress_competing_matchers = UnionTrait(
+ [Bool(allow_none=True), DictTrait(Bool(None, allow_none=True))],
+ default_value=None,
+ help="""
+ Whether to suppress completions from other *Matchers*.
+
+ When set to ``None`` (default) the matchers will attempt to auto-detect
+ whether suppression of other matchers is desirable. For example, at
+ the beginning of a line followed by `%` we expect a magic completion
+ to be the only applicable option, and after ``my_dict['`` we usually
+ expect a completion with an existing dictionary key.
+
+ If you want to disable this heuristic and see completions from all matchers,
+ set ``IPCompleter.suppress_competing_matchers = False``.
+ To disable the heuristic for specific matchers provide a dictionary mapping:
+ ``IPCompleter.suppress_competing_matchers = {'IPCompleter.dict_key_matcher': False}``.
+
+ Set ``IPCompleter.suppress_competing_matchers = True`` to limit
+ completions to the set of matchers with the highest priority;
+ this is equivalent to ``IPCompleter.merge_completions`` and
+ can be beneficial for performance, but will sometimes omit relevant
+ candidates from matchers further down the priority list.
+ """,
+ ).tag(config=True)
+
+ merge_completions = Bool(
+ True,
+ help="""Whether to merge completion results into a single list
+
+ If False, only the completion results from the first non-empty
+ completer will be returned.
+
+ As of version 8.6.0, setting the value to ``False`` is an alias for:
+ ``IPCompleter.suppress_competing_matchers = True.``.
+ """,
+ ).tag(config=True)
+
+ disable_matchers = ListTrait(
+ Unicode(),
+ help="""List of matchers to disable.
+
+ The list should contain matcher identifiers (see :any:`completion_matcher`).
+ """,
+ ).tag(config=True)
+
+ omit__names = Enum(
+ (0, 1, 2),
+ default_value=2,
+ help="""Instruct the completer to omit private method names
+
+ Specifically, when completing on ``object.<tab>``.
+
+ When 2 [default]: all names that start with '_' will be excluded.
+
+ When 1: all 'magic' names (``__foo__``) will be excluded.
+
+ When 0: nothing will be excluded.
+ """
+ ).tag(config=True)
+ limit_to__all__ = Bool(False,
+ help="""
+ DEPRECATED as of version 5.0.
+
+ Instruct the completer to use __all__ for the completion
+
+ Specifically, when completing on ``object.<tab>``.
+
+ When True: only those names in obj.__all__ will be included.
+
+ When False [default]: the __all__ attribute is ignored
+ """,
+ ).tag(config=True)
+
+ profile_completions = Bool(
+ default_value=False,
+ help="If True, emit profiling data for completion subsystem using cProfile."
+ ).tag(config=True)
+
+ profiler_output_dir = Unicode(
+ default_value=".completion_profiles",
+ help="Template for path at which to output profile data for completions."
+ ).tag(config=True)
+
+ @observe('limit_to__all__')
+ def _limit_to_all_changed(self, change):
+ warnings.warn('`IPython.core.IPCompleter.limit_to__all__` configuration '
+ 'value has been deprecated since IPython 5.0, will be made to have '
+ 'no effects and then removed in future version of IPython.',
+ UserWarning)
+
+ def __init__(
+ self, shell=None, namespace=None, global_namespace=None, config=None, **kwargs
+ ):
+ """IPCompleter() -> completer
+
+ Return a completer object.
+
+ Parameters
+ ----------
+ shell
+ a pointer to the ipython shell itself. This is needed
+ because this completer knows about magic functions, and those can
+ only be accessed via the ipython instance.
+ namespace : dict, optional
+ an optional dict where completions are performed.
+ global_namespace : dict, optional
+ secondary optional dict for completions, to
+ handle cases (such as IPython embedded inside functions) where
+ both Python scopes are visible.
+ config : Config
+ traitlet's config object
+ **kwargs
+ passed to super class unmodified.
+ """
+
+ self.magic_escape = ESC_MAGIC
+ self.splitter = CompletionSplitter()
+
+ # _greedy_changed() depends on splitter and readline being defined:
+ super().__init__(
+ namespace=namespace,
+ global_namespace=global_namespace,
+ config=config,
+ **kwargs,
+ )
+
+ # List where completion matches will be stored
+ self.matches = []
+ self.shell = shell
+ # Regexp to split filenames with spaces in them
+ self.space_name_re = re.compile(r'([^\\] )')
+ # Hold a local ref. to glob.glob for speed
+ self.glob = glob.glob
+
+ # Determine if we are running on 'dumb' terminals, like (X)Emacs
+ # buffers, to avoid completion problems.
+ term = os.environ.get('TERM','xterm')
+ self.dumb_terminal = term in ['dumb','emacs']
+
+ # Special handling of backslashes needed in win32 platforms
+ if sys.platform == "win32":
+ self.clean_glob = self._clean_glob_win32
+ else:
+ self.clean_glob = self._clean_glob
+
+ #regexp to parse docstring for function signature
+ self.docstring_sig_re = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
+ self.docstring_kwd_re = re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
+ #use this if positional argument name is also needed
+ #= re.compile(r'[\s|\[]*(\w+)(?:\s*=?\s*.*)')
+
+ self.magic_arg_matchers = [
+ self.magic_config_matcher,
+ self.magic_color_matcher,
+ ]
+
+ # This is set externally by InteractiveShell
+ self.custom_completers = None
+
+ # This is a list of names of unicode characters that can be completed
+ # into their corresponding unicode value. The list is large, so we
+ # lazily initialize it on first use. Consuming code should access this
+ # attribute through the `@unicode_names` property.
+ self._unicode_names = None
+
+ self._backslash_combining_matchers = [
+ self.latex_name_matcher,
+ self.unicode_name_matcher,
+ back_latex_name_matcher,
+ back_unicode_name_matcher,
+ self.fwd_unicode_matcher,
+ ]
+
+ if not self.backslash_combining_completions:
+ for matcher in self._backslash_combining_matchers:
+ self.disable_matchers.append(_get_matcher_id(matcher))
+
+ if not self.merge_completions:
+ self.suppress_competing_matchers = True
+
+ @property
+ def matchers(self) -> List[Matcher]:
+ """All active matcher routines for completion"""
+ if self.dict_keys_only:
+ return [self.dict_key_matcher]
+
+ if self.use_jedi:
+ return [
+ *self.custom_matchers,
+ *self._backslash_combining_matchers,
+ *self.magic_arg_matchers,
+ self.custom_completer_matcher,
+ self.magic_matcher,
+ self._jedi_matcher,
+ self.dict_key_matcher,
+ self.file_matcher,
+ ]
+ else:
+ return [
+ *self.custom_matchers,
+ *self._backslash_combining_matchers,
+ *self.magic_arg_matchers,
+ self.custom_completer_matcher,
+ self.dict_key_matcher,
+ # TODO: convert python_matches to v2 API
+ self.magic_matcher,
+ self.python_matches,
+ self.file_matcher,
+ self.python_func_kw_matcher,
+ ]
+
+ def all_completions(self, text:str) -> List[str]:
+ """
+ Wrapper around the completion methods for the benefit of emacs.
+ """
+ prefix = text.rpartition('.')[0]
+ with provisionalcompleter():
+ return ['.'.join([prefix, c.text]) if prefix and self.use_jedi else c.text
+ for c in self.completions(text, len(text))]
+
+ return self.complete(text)[1]
+
+ def _clean_glob(self, text:str):
+ return self.glob("%s*" % text)
+
+ def _clean_glob_win32(self, text:str):
+ return [f.replace("\\","/")
+ for f in self.glob("%s*" % text)]
+
+ @context_matcher()
+ def file_matcher(self, context: CompletionContext) -> SimpleMatcherResult:
+ """Same as :any:`file_matches`, but adopted to new Matcher API."""
+ matches = self.file_matches(context.token)
+ # TODO: add a heuristic for suppressing (e.g. if it has OS-specific delimiter,
+ # starts with `/home/`, `C:\`, etc)
+ return _convert_matcher_v1_result_to_v2(matches, type="path")
+
+ def file_matches(self, text: str) -> List[str]:
+ """Match filenames, expanding ~USER type strings.
+
+ Most of the seemingly convoluted logic in this completer is an
+ attempt to handle filenames with spaces in them. And yet it's not
+ quite perfect, because Python's readline doesn't expose all of the
+ GNU readline details needed for this to be done correctly.
+
+ For a filename with a space in it, the printed completions will be
+ only the parts after what's already been typed (instead of the
+ full completions, as is normally done). I don't think with the
+ current (as of Python 2.3) Python readline it's possible to do
+ better.
+
+ .. deprecated:: 8.6
+ You can use :meth:`file_matcher` instead.
+ """
+
+ # chars that require escaping with backslash - i.e. chars
+ # that readline treats incorrectly as delimiters, but we
+ # don't want to treat as delimiters in filename matching
+ # when escaped with backslash
+ if text.startswith('!'):
+ text = text[1:]
+ text_prefix = u'!'
+ else:
+ text_prefix = u''
+
+ text_until_cursor = self.text_until_cursor
+ # track strings with open quotes
+ open_quotes = has_open_quotes(text_until_cursor)
+
+ if '(' in text_until_cursor or '[' in text_until_cursor:
+ lsplit = text
+ else:
+ try:
+ # arg_split ~ shlex.split, but with unicode bugs fixed by us
+ lsplit = arg_split(text_until_cursor)[-1]
+ except ValueError:
+ # typically an unmatched ", or backslash without escaped char.
+ if open_quotes:
+ lsplit = text_until_cursor.split(open_quotes)[-1]
+ else:
+ return []
+ except IndexError:
+ # tab pressed on empty line
+ lsplit = ""
+
+ if not open_quotes and lsplit != protect_filename(lsplit):
+ # if protectables are found, do matching on the whole escaped name
+ has_protectables = True
+ text0,text = text,lsplit
+ else:
+ has_protectables = False
+ text = os.path.expanduser(text)
+
+ if text == "":
+ return [text_prefix + protect_filename(f) for f in self.glob("*")]
+
+ # Compute the matches from the filesystem
+ if sys.platform == 'win32':
+ m0 = self.clean_glob(text)
+ else:
+ m0 = self.clean_glob(text.replace('\\', ''))
+
+ if has_protectables:
+ # If we had protectables, we need to revert our changes to the
+ # beginning of filename so that we don't double-write the part
+ # of the filename we have so far
+ len_lsplit = len(lsplit)
+ matches = [text_prefix + text0 +
+ protect_filename(f[len_lsplit:]) for f in m0]
+ else:
+ if open_quotes:
+ # if we have a string with an open quote, we don't need to
+ # protect the names beyond the quote (and we _shouldn't_, as
+ # it would cause bugs when the filesystem call is made).
+ matches = m0 if sys.platform == "win32" else\
+ [protect_filename(f, open_quotes) for f in m0]
+ else:
+ matches = [text_prefix +
+ protect_filename(f) for f in m0]
+
+ # Mark directories in input list by appending '/' to their names.
+ return [x+'/' if os.path.isdir(x) else x for x in matches]
+
+ @context_matcher()
+ def magic_matcher(self, context: CompletionContext) -> SimpleMatcherResult:
+ """Match magics."""
+ text = context.token
+ matches = self.magic_matches(text)
+ result = _convert_matcher_v1_result_to_v2(matches, type="magic")
+ is_magic_prefix = len(text) > 0 and text[0] == "%"
+ result["suppress"] = is_magic_prefix and bool(result["completions"])
+ return result
+
+ def magic_matches(self, text: str):
+ """Match magics.
+
+ .. deprecated:: 8.6
+ You can use :meth:`magic_matcher` instead.
+ """
+ # Get all shell magics now rather than statically, so magics loaded at
+ # runtime show up too.
+ lsm = self.shell.magics_manager.lsmagic()
+ line_magics = lsm['line']
+ cell_magics = lsm['cell']
+ pre = self.magic_escape
+ pre2 = pre+pre
+
+ explicit_magic = text.startswith(pre)
+
+ # Completion logic:
+ # - user gives %%: only do cell magics
+ # - user gives %: do both line and cell magics
+ # - no prefix: do both
+ # In other words, line magics are skipped if the user gives %% explicitly
+ #
+ # We also exclude magics that match any currently visible names:
+ # https://github.com/ipython/ipython/issues/4877, unless the user has
+ # typed a %:
+ # https://github.com/ipython/ipython/issues/10754
+ bare_text = text.lstrip(pre)
+ global_matches = self.global_matches(bare_text)
+ if not explicit_magic:
+ def matches(magic):
+ """
+ Filter magics, in particular remove magics that match
+ a name present in global namespace.
+ """
+ return ( magic.startswith(bare_text) and
+ magic not in global_matches )
+ else:
+ def matches(magic):
+ return magic.startswith(bare_text)
+
+ comp = [ pre2+m for m in cell_magics if matches(m)]
+ if not text.startswith(pre2):
+ comp += [ pre+m for m in line_magics if matches(m)]
+
+ return comp
+
+ @context_matcher()
+ def magic_config_matcher(self, context: CompletionContext) -> SimpleMatcherResult:
+ """Match class names and attributes for %config magic."""
+ # NOTE: uses `line_buffer` equivalent for compatibility
+ matches = self.magic_config_matches(context.line_with_cursor)
+ return _convert_matcher_v1_result_to_v2(matches, type="param")
+
+ def magic_config_matches(self, text: str) -> List[str]:
+ """Match class names and attributes for %config magic.
+
+ .. deprecated:: 8.6
+ You can use :meth:`magic_config_matcher` instead.
+ """
+ texts = text.strip().split()
+
+ if len(texts) > 0 and (texts[0] == 'config' or texts[0] == '%config'):
+ # get all configuration classes
+ classes = sorted(set([ c for c in self.shell.configurables
+ if c.__class__.class_traits(config=True)
+ ]), key=lambda x: x.__class__.__name__)
+ classnames = [ c.__class__.__name__ for c in classes ]
+
+ # return all classnames if config or %config is given
+ if len(texts) == 1:
+ return classnames
+
+ # match classname
+ classname_texts = texts[1].split('.')
+ classname = classname_texts[0]
+ classname_matches = [ c for c in classnames
+ if c.startswith(classname) ]
+
+ # return matched classes or the matched class with attributes
+ if texts[1].find('.') < 0:
+ return classname_matches
+ elif len(classname_matches) == 1 and \
+ classname_matches[0] == classname:
+ cls = classes[classnames.index(classname)].__class__
+ help = cls.class_get_help()
+ # strip leading '--' from cl-args:
+ help = re.sub(re.compile(r'^--', re.MULTILINE), '', help)
+ return [ attr.split('=')[0]
+ for attr in help.strip().splitlines()
+ if attr.startswith(texts[1]) ]
+ return []
+
+ @context_matcher()
+ def magic_color_matcher(self, context: CompletionContext) -> SimpleMatcherResult:
+ """Match color schemes for %colors magic."""
+ # NOTE: uses `line_buffer` equivalent for compatibility
+ matches = self.magic_color_matches(context.line_with_cursor)
+ return _convert_matcher_v1_result_to_v2(matches, type="param")
+
+ def magic_color_matches(self, text: str) -> List[str]:
+ """Match color schemes for %colors magic.
+
+ .. deprecated:: 8.6
+ You can use :meth:`magic_color_matcher` instead.
+ """
+ texts = text.split()
+ if text.endswith(' '):
+ # .split() strips off the trailing whitespace. Add '' back
+ # so that: '%colors ' -> ['%colors', '']
+ texts.append('')
+
+ if len(texts) == 2 and (texts[0] == 'colors' or texts[0] == '%colors'):
+ prefix = texts[1]
+ return [ color for color in InspectColors.keys()
+ if color.startswith(prefix) ]
+ return []
+
+ @context_matcher(identifier="IPCompleter.jedi_matcher")
+ def _jedi_matcher(self, context: CompletionContext) -> _JediMatcherResult:
+ matches = self._jedi_matches(
+ cursor_column=context.cursor_position,
+ cursor_line=context.cursor_line,
+ text=context.full_text,
+ )
+ return {
+ "completions": matches,
+ # static analysis should not suppress other matchers
+ "suppress": False,
+ }
+
+ def _jedi_matches(
+ self, cursor_column: int, cursor_line: int, text: str
+ ) -> Iterator[_JediCompletionLike]:
+ """
+ Return a list of :any:`jedi.api.Completion`s object from a ``text`` and
+ cursor position.
+
+ Parameters
+ ----------
+ cursor_column : int
+ column position of the cursor in ``text``, 0-indexed.
+ cursor_line : int
+ line position of the cursor in ``text``, 0-indexed
+ text : str
+ text to complete
+
+ Notes
+ -----
+ If ``IPCompleter.debug`` is ``True`` may return a :any:`_FakeJediCompletion`
+ object containing a string with the Jedi debug information attached.
+
+ .. deprecated:: 8.6
+ You can use :meth:`_jedi_matcher` instead.
+ """
+ namespaces = [self.namespace]
+ if self.global_namespace is not None:
+ namespaces.append(self.global_namespace)
+
+ completion_filter = lambda x:x
+ offset = cursor_to_position(text, cursor_line, cursor_column)
+ # filter output if we are completing for object members
+ if offset:
+ pre = text[offset-1]
+ if pre == '.':
+ if self.omit__names == 2:
+ completion_filter = lambda c:not c.name.startswith('_')
+ elif self.omit__names == 1:
+ completion_filter = lambda c:not (c.name.startswith('__') and c.name.endswith('__'))
+ elif self.omit__names == 0:
+ completion_filter = lambda x:x
+ else:
+ raise ValueError("Don't understand self.omit__names == {}".format(self.omit__names))
+
+ interpreter = jedi.Interpreter(text[:offset], namespaces, column=cursor_column, line=cursor_line + 1)
+ try_jedi = True
+
+ try:
+ # find the first token in the current tree -- if it is a ' or " then we are in a string
+ completing_string = False
+ try:
+ first_child = next(c for c in interpreter._get_module().tree_node.children if hasattr(c, 'value'))
+ except StopIteration:
+ pass
+ else:
+ # note the value may be ', ", or it may also be ''' or """, or
+ # in some cases, """what/you/typed..., but all of these are
+ # strings.
+ completing_string = len(first_child.value) > 0 and first_child.value[0] in {"'", '"'}
+
+ # if we are in a string jedi is likely not the right candidate for
+ # now. Skip it.
+ try_jedi = not completing_string
+ except Exception as e:
+ # many of things can go wrong, we are using private API just don't crash.
+ if self.debug:
+ print("Error detecting if completing a non-finished string :", e, '|')
+
+ if not try_jedi:
+ return iter([])
+ try:
+ return filter(completion_filter, interpreter.completions())
+ except Exception as e:
+ if self.debug:
+ return iter(
+ [
+ _FakeJediCompletion(
+ 'Oops Jedi has crashed, please report a bug with the following:\n"""\n%s\ns"""'
+ % (e)
+ )
+ ]
+ )
+ else:
+ return iter([])
+
+ @completion_matcher(api_version=1)
+ def python_matches(self, text: str) -> Iterable[str]:
+ """Match attributes or global python names"""
+ if "." in text:
+ try:
+ matches = self.attr_matches(text)
+ if text.endswith('.') and self.omit__names:
+ if self.omit__names == 1:
+ # true if txt is _not_ a __ name, false otherwise:
+ no__name = (lambda txt:
+ re.match(r'.*\.__.*?__',txt) is None)
+ else:
+ # true if txt is _not_ a _ name, false otherwise:
+ no__name = (lambda txt:
+ re.match(r'\._.*?',txt[txt.rindex('.'):]) is None)
+ matches = filter(no__name, matches)
+ except NameError:
+ # catches <undefined attributes>.<tab>
+ matches = []
+ else:
+ matches = self.global_matches(text)
+ return matches
+
+ def _default_arguments_from_docstring(self, doc):
+ """Parse the first line of docstring for call signature.
+
+ Docstring should be of the form 'min(iterable[, key=func])\n'.
+ It can also parse cython docstring of the form
+ 'Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)'.
+ """
+ if doc is None:
+ return []
+
+ #care only the firstline
+ line = doc.lstrip().splitlines()[0]
+
+ #p = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
+ #'min(iterable[, key=func])\n' -> 'iterable[, key=func]'
+ sig = self.docstring_sig_re.search(line)
+ if sig is None:
+ return []
+ # iterable[, key=func]' -> ['iterable[' ,' key=func]']
+ sig = sig.groups()[0].split(',')
+ ret = []
+ for s in sig:
+ #re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
+ ret += self.docstring_kwd_re.findall(s)
+ return ret
+
+ def _default_arguments(self, obj):
+ """Return the list of default arguments of obj if it is callable,
+ or empty list otherwise."""
+ call_obj = obj
+ ret = []
+ if inspect.isbuiltin(obj):
+ pass
+ elif not (inspect.isfunction(obj) or inspect.ismethod(obj)):
+ if inspect.isclass(obj):
+ #for cython embedsignature=True the constructor docstring
+ #belongs to the object itself not __init__
+ ret += self._default_arguments_from_docstring(
+ getattr(obj, '__doc__', ''))
+ # for classes, check for __init__,__new__
+ call_obj = (getattr(obj, '__init__', None) or
+ getattr(obj, '__new__', None))
+ # for all others, check if they are __call__able
+ elif hasattr(obj, '__call__'):
+ call_obj = obj.__call__
+ ret += self._default_arguments_from_docstring(
+ getattr(call_obj, '__doc__', ''))
+
+ _keeps = (inspect.Parameter.KEYWORD_ONLY,
+ inspect.Parameter.POSITIONAL_OR_KEYWORD)
+
+ try:
+ sig = inspect.signature(obj)
+ ret.extend(k for k, v in sig.parameters.items() if
+ v.kind in _keeps)
+ except ValueError:
+ pass
+
+ return list(set(ret))
+
+ @context_matcher()
+ def python_func_kw_matcher(self, context: CompletionContext) -> SimpleMatcherResult:
+ """Match named parameters (kwargs) of the last open function."""
+ matches = self.python_func_kw_matches(context.token)
+ return _convert_matcher_v1_result_to_v2(matches, type="param")
+
+ def python_func_kw_matches(self, text):
+ """Match named parameters (kwargs) of the last open function.
+
+ .. deprecated:: 8.6
+ You can use :meth:`python_func_kw_matcher` instead.
+ """
+
+ if "." in text: # a parameter cannot be dotted
+ return []
+ try: regexp = self.__funcParamsRegex
+ except AttributeError:
+ regexp = self.__funcParamsRegex = re.compile(r'''
+ '.*?(?<!\\)' | # single quoted strings or
+ ".*?(?<!\\)" | # double quoted strings or
+ \w+ | # identifier
+ \S # other characters
+ ''', re.VERBOSE | re.DOTALL)
+ # 1. find the nearest identifier that comes before an unclosed
+ # parenthesis before the cursor
+ # e.g. for "foo (1+bar(x), pa<cursor>,a=1)", the candidate is "foo"
+ tokens = regexp.findall(self.text_until_cursor)
+ iterTokens = reversed(tokens); openPar = 0
+
+ for token in iterTokens:
+ if token == ')':
+ openPar -= 1
+ elif token == '(':
+ openPar += 1
+ if openPar > 0:
+ # found the last unclosed parenthesis
+ break
+ else:
+ return []
+ # 2. Concatenate dotted names ("foo.bar" for "foo.bar(x, pa" )
+ ids = []
+ isId = re.compile(r'\w+$').match
+
+ while True:
+ try:
+ ids.append(next(iterTokens))
+ if not isId(ids[-1]):
+ ids.pop(); break
+ if not next(iterTokens) == '.':
+ break
+ except StopIteration:
+ break
+
+ # Find all named arguments already assigned to, as to avoid suggesting
+ # them again
+ usedNamedArgs = set()
+ par_level = -1
+ for token, next_token in zip(tokens, tokens[1:]):
+ if token == '(':
+ par_level += 1
+ elif token == ')':
+ par_level -= 1
+
+ if par_level != 0:
+ continue
+
+ if next_token != '=':
+ continue
+
+ usedNamedArgs.add(token)
+
+ argMatches = []
+ try:
+ callableObj = '.'.join(ids[::-1])
+ namedArgs = self._default_arguments(eval(callableObj,
+ self.namespace))
+
+ # Remove used named arguments from the list, no need to show twice
+ for namedArg in set(namedArgs) - usedNamedArgs:
+ if namedArg.startswith(text):
+ argMatches.append("%s=" %namedArg)
+ except:
+ pass
+
+ return argMatches
+
+ @staticmethod
+ def _get_keys(obj: Any) -> List[Any]:
+ # Objects can define their own completions by defining an
+ # _ipy_key_completions_() method.
+ method = get_real_method(obj, '_ipython_key_completions_')
+ if method is not None:
+ return method()
+
+ # Special case some common in-memory dict-like types
+ if isinstance(obj, dict) or _safe_isinstance(obj, "pandas", "DataFrame"):
+ try:
+ return list(obj.keys())
+ except Exception:
+ return []
+ elif _safe_isinstance(obj, "pandas", "core", "indexing", "_LocIndexer"):
+ try:
+ return list(obj.obj.keys())
+ except Exception:
+ return []
+ elif _safe_isinstance(obj, 'numpy', 'ndarray') or\
+ _safe_isinstance(obj, 'numpy', 'void'):
+ return obj.dtype.names or []
+ return []
+
+ @context_matcher()
+ def dict_key_matcher(self, context: CompletionContext) -> SimpleMatcherResult:
+ """Match string keys in a dictionary, after e.g. ``foo[``."""
+ matches = self.dict_key_matches(context.token)
+ return _convert_matcher_v1_result_to_v2(
+ matches, type="dict key", suppress_if_matches=True
+ )
+
+ def dict_key_matches(self, text: str) -> List[str]:
+ """Match string keys in a dictionary, after e.g. ``foo[``.
+
+ .. deprecated:: 8.6
+ You can use :meth:`dict_key_matcher` instead.
+ """
+
+ # Short-circuit on closed dictionary (regular expression would
+ # not match anyway, but would take quite a while).
+ if self.text_until_cursor.strip().endswith("]"):
+ return []
+
+ match = DICT_MATCHER_REGEX.search(self.text_until_cursor)
+
+ if match is None:
+ return []
+
+ expr, prior_tuple_keys, key_prefix = match.groups()
+
+ obj = self._evaluate_expr(expr)
+
+ if obj is not_found:
+ return []
+
+ keys = self._get_keys(obj)
+ if not keys:
+ return keys
+
+ tuple_prefix = guarded_eval(
+ prior_tuple_keys,
+ EvaluationContext(
+ globals=self.global_namespace,
+ locals=self.namespace,
+ evaluation=self.evaluation,
+ in_subscript=True,
+ ),
+ )
+
+ closing_quote, token_offset, matches = match_dict_keys(
+ keys, key_prefix, self.splitter.delims, extra_prefix=tuple_prefix
+ )
+ if not matches:
+ return []
+
+ # get the cursor position of
+ # - the text being completed
+ # - the start of the key text
+ # - the start of the completion
+ text_start = len(self.text_until_cursor) - len(text)
+ if key_prefix:
+ key_start = match.start(3)
+ completion_start = key_start + token_offset
+ else:
+ key_start = completion_start = match.end()
+
+ # grab the leading prefix, to make sure all completions start with `text`
+ if text_start > key_start:
+ leading = ''
+ else:
+ leading = text[text_start:completion_start]
+
+ # append closing quote and bracket as appropriate
+ # this is *not* appropriate if the opening quote or bracket is outside
+ # the text given to this method, e.g. `d["""a\nt
+ can_close_quote = False
+ can_close_bracket = False
+
+ continuation = self.line_buffer[len(self.text_until_cursor) :].strip()
+
+ if continuation.startswith(closing_quote):
+ # do not close if already closed, e.g. `d['a<tab>'`
+ continuation = continuation[len(closing_quote) :]
+ else:
+ can_close_quote = True
+
+ continuation = continuation.strip()
+
+ # e.g. `pandas.DataFrame` has different tuple indexer behaviour,
+ # handling it is out of scope, so let's avoid appending suffixes.
+ has_known_tuple_handling = isinstance(obj, dict)
+
+ can_close_bracket = (
+ not continuation.startswith("]") and self.auto_close_dict_keys
+ )
+ can_close_tuple_item = (
+ not continuation.startswith(",")
+ and has_known_tuple_handling
+ and self.auto_close_dict_keys
+ )
+ can_close_quote = can_close_quote and self.auto_close_dict_keys
+
+ # fast path if closing qoute should be appended but not suffix is allowed
+ if not can_close_quote and not can_close_bracket and closing_quote:
+ return [leading + k for k in matches]
+
+ results = []
+
+ end_of_tuple_or_item = _DictKeyState.END_OF_TUPLE | _DictKeyState.END_OF_ITEM
+
+ for k, state_flag in matches.items():
+ result = leading + k
+ if can_close_quote and closing_quote:
+ result += closing_quote
+
+ if state_flag == end_of_tuple_or_item:
+ # We do not know which suffix to add,
+ # e.g. both tuple item and string
+ # match this item.
+ pass
+
+ if state_flag in end_of_tuple_or_item and can_close_bracket:
+ result += "]"
+ if state_flag == _DictKeyState.IN_TUPLE and can_close_tuple_item:
+ result += ", "
+ results.append(result)
+ return results
+
+ @context_matcher()
+ def unicode_name_matcher(self, context: CompletionContext):
+ """Same as :any:`unicode_name_matches`, but adopted to new Matcher API."""
+ fragment, matches = self.unicode_name_matches(context.text_until_cursor)
+ return _convert_matcher_v1_result_to_v2(
+ matches, type="unicode", fragment=fragment, suppress_if_matches=True
+ )
+
+ @staticmethod
+ def unicode_name_matches(text: str) -> Tuple[str, List[str]]:
+ """Match Latex-like syntax for unicode characters base
+ on the name of the character.
+
+ This does ``\\GREEK SMALL LETTER ETA`` -> ``η``
+
+ Works only on valid python 3 identifier, or on combining characters that
+ will combine to form a valid identifier.
+ """
+ slashpos = text.rfind('\\')
+ if slashpos > -1:
+ s = text[slashpos+1:]
+ try :
+ unic = unicodedata.lookup(s)
+ # allow combining chars
+ if ('a'+unic).isidentifier():
+ return '\\'+s,[unic]
+ except KeyError:
+ pass
+ return '', []
+
+ @context_matcher()
+ def latex_name_matcher(self, context: CompletionContext):
+ """Match Latex syntax for unicode characters.
+
+ This does both ``\\alp`` -> ``\\alpha`` and ``\\alpha`` -> ``α``
+ """
+ fragment, matches = self.latex_matches(context.text_until_cursor)
+ return _convert_matcher_v1_result_to_v2(
+ matches, type="latex", fragment=fragment, suppress_if_matches=True
+ )
+
+ def latex_matches(self, text: str) -> Tuple[str, Sequence[str]]:
+ """Match Latex syntax for unicode characters.
+
+ This does both ``\\alp`` -> ``\\alpha`` and ``\\alpha`` -> ``α``
+
+ .. deprecated:: 8.6
+ You can use :meth:`latex_name_matcher` instead.
+ """
+ slashpos = text.rfind('\\')
+ if slashpos > -1:
+ s = text[slashpos:]
+ if s in latex_symbols:
+ # Try to complete a full latex symbol to unicode
+ # \\alpha -> α
+ return s, [latex_symbols[s]]
+ else:
+ # If a user has partially typed a latex symbol, give them
+ # a full list of options \al -> [\aleph, \alpha]
+ matches = [k for k in latex_symbols if k.startswith(s)]
+ if matches:
+ return s, matches
+ return '', ()
+
+ @context_matcher()
+ def custom_completer_matcher(self, context):
+ """Dispatch custom completer.
+
+ If a match is found, suppresses all other matchers except for Jedi.
+ """
+ matches = self.dispatch_custom_completer(context.token) or []
+ result = _convert_matcher_v1_result_to_v2(
+ matches, type=_UNKNOWN_TYPE, suppress_if_matches=True
+ )
+ result["ordered"] = True
+ result["do_not_suppress"] = {_get_matcher_id(self._jedi_matcher)}
+ return result
+
+ def dispatch_custom_completer(self, text):
+ """
+ .. deprecated:: 8.6
+ You can use :meth:`custom_completer_matcher` instead.
+ """
+ if not self.custom_completers:
+ return
+
+ line = self.line_buffer
+ if not line.strip():
+ return None
+
+ # Create a little structure to pass all the relevant information about
+ # the current completion to any custom completer.
+ event = SimpleNamespace()
+ event.line = line
+ event.symbol = text
+ cmd = line.split(None,1)[0]
+ event.command = cmd
+ event.text_until_cursor = self.text_until_cursor
+
+ # for foo etc, try also to find completer for %foo
+ if not cmd.startswith(self.magic_escape):
+ try_magic = self.custom_completers.s_matches(
+ self.magic_escape + cmd)
+ else:
+ try_magic = []
+
+ for c in itertools.chain(self.custom_completers.s_matches(cmd),
+ try_magic,
+ self.custom_completers.flat_matches(self.text_until_cursor)):
+ try:
+ res = c(event)
+ if res:
+ # first, try case sensitive match
+ withcase = [r for r in res if r.startswith(text)]
+ if withcase:
+ return withcase
+ # if none, then case insensitive ones are ok too
+ text_low = text.lower()
+ return [r for r in res if r.lower().startswith(text_low)]
+ except TryNext:
+ pass
+ except KeyboardInterrupt:
+ """
+ If custom completer take too long,
+ let keyboard interrupt abort and return nothing.
+ """
+ break
+
+ return None
+
+ def completions(self, text: str, offset: int)->Iterator[Completion]:
+ """
+ Returns an iterator over the possible completions
+
+ .. warning::
+
+ Unstable
+
+ This function is unstable, API may change without warning.
+ It will also raise unless use in proper context manager.
+
+ Parameters
+ ----------
+ text : str
+ Full text of the current input, multi line string.
+ offset : int
+ Integer representing the position of the cursor in ``text``. Offset
+ is 0-based indexed.
+
+ Yields
+ ------
+ Completion
+
+ Notes
+ -----
+ The cursor on a text can either be seen as being "in between"
+ characters or "On" a character depending on the interface visible to
+ the user. For consistency the cursor being on "in between" characters X
+ and Y is equivalent to the cursor being "on" character Y, that is to say
+ the character the cursor is on is considered as being after the cursor.
+
+ Combining characters may span more that one position in the
+ text.
+
+ .. note::
+
+ If ``IPCompleter.debug`` is :any:`True` will yield a ``--jedi/ipython--``
+ fake Completion token to distinguish completion returned by Jedi
+ and usual IPython completion.
+
+ .. note::
+
+ Completions are not completely deduplicated yet. If identical
+ completions are coming from different sources this function does not
+ ensure that each completion object will only be present once.
+ """
+ warnings.warn("_complete is a provisional API (as of IPython 6.0). "
+ "It may change without warnings. "
+ "Use in corresponding context manager.",
+ category=ProvisionalCompleterWarning, stacklevel=2)
+
+ seen = set()
+ profiler:Optional[cProfile.Profile]
+ try:
+ if self.profile_completions:
+ import cProfile
+ profiler = cProfile.Profile()
+ profiler.enable()
+ else:
+ profiler = None
+
+ for c in self._completions(text, offset, _timeout=self.jedi_compute_type_timeout/1000):
+ if c and (c in seen):
+ continue
+ yield c
+ seen.add(c)
+ except KeyboardInterrupt:
+ """if completions take too long and users send keyboard interrupt,
+ do not crash and return ASAP. """
+ pass
+ finally:
+ if profiler is not None:
+ profiler.disable()
+ ensure_dir_exists(self.profiler_output_dir)
+ output_path = os.path.join(self.profiler_output_dir, str(uuid.uuid4()))
+ print("Writing profiler output to", output_path)
+ profiler.dump_stats(output_path)
+
+ def _completions(self, full_text: str, offset: int, *, _timeout) -> Iterator[Completion]:
+ """
+ Core completion module.Same signature as :any:`completions`, with the
+ extra `timeout` parameter (in seconds).
+
+ Computing jedi's completion ``.type`` can be quite expensive (it is a
+ lazy property) and can require some warm-up, more warm up than just
+ computing the ``name`` of a completion. The warm-up can be :
+
+ - Long warm-up the first time a module is encountered after
+ install/update: actually build parse/inference tree.
+
+ - first time the module is encountered in a session: load tree from
+ disk.
+
+ We don't want to block completions for tens of seconds so we give the
+ completer a "budget" of ``_timeout`` seconds per invocation to compute
+ completions types, the completions that have not yet been computed will
+ be marked as "unknown" an will have a chance to be computed next round
+ are things get cached.
+
+ Keep in mind that Jedi is not the only thing treating the completion so
+ keep the timeout short-ish as if we take more than 0.3 second we still
+ have lots of processing to do.
+
+ """
+ deadline = time.monotonic() + _timeout
+
+ before = full_text[:offset]
+ cursor_line, cursor_column = position_to_cursor(full_text, offset)
+
+ jedi_matcher_id = _get_matcher_id(self._jedi_matcher)
+
+ def is_non_jedi_result(
+ result: MatcherResult, identifier: str
+ ) -> TypeGuard[SimpleMatcherResult]:
+ return identifier != jedi_matcher_id
+
+ results = self._complete(
+ full_text=full_text, cursor_line=cursor_line, cursor_pos=cursor_column
+ )
+
+ non_jedi_results: Dict[str, SimpleMatcherResult] = {
+ identifier: result
+ for identifier, result in results.items()
+ if is_non_jedi_result(result, identifier)
+ }
+
+ jedi_matches = (
+ cast(_JediMatcherResult, results[jedi_matcher_id])["completions"]
+ if jedi_matcher_id in results
+ else ()
+ )
+
+ iter_jm = iter(jedi_matches)
+ if _timeout:
+ for jm in iter_jm:
+ try:
+ type_ = jm.type
+ except Exception:
+ if self.debug:
+ print("Error in Jedi getting type of ", jm)
+ type_ = None
+ delta = len(jm.name_with_symbols) - len(jm.complete)
+ if type_ == 'function':
+ signature = _make_signature(jm)
+ else:
+ signature = ''
+ yield Completion(start=offset - delta,
+ end=offset,
+ text=jm.name_with_symbols,
+ type=type_,
+ signature=signature,
+ _origin='jedi')
+
+ if time.monotonic() > deadline:
+ break
+
+ for jm in iter_jm:
+ delta = len(jm.name_with_symbols) - len(jm.complete)
+ yield Completion(
+ start=offset - delta,
+ end=offset,
+ text=jm.name_with_symbols,
+ type=_UNKNOWN_TYPE, # don't compute type for speed
+ _origin="jedi",
+ signature="",
+ )
+
+ # TODO:
+ # Suppress this, right now just for debug.
+ if jedi_matches and non_jedi_results and self.debug:
+ some_start_offset = before.rfind(
+ next(iter(non_jedi_results.values()))["matched_fragment"]
+ )
+ yield Completion(
+ start=some_start_offset,
+ end=offset,
+ text="--jedi/ipython--",
+ _origin="debug",
+ type="none",
+ signature="",
+ )
+
+ ordered: List[Completion] = []
+ sortable: List[Completion] = []
+
+ for origin, result in non_jedi_results.items():
+ matched_text = result["matched_fragment"]
+ start_offset = before.rfind(matched_text)
+ is_ordered = result.get("ordered", False)
+ container = ordered if is_ordered else sortable
+
+ # I'm unsure if this is always true, so let's assert and see if it
+ # crash
+ assert before.endswith(matched_text)
+
+ for simple_completion in result["completions"]:
+ completion = Completion(
+ start=start_offset,
+ end=offset,
+ text=simple_completion.text,
+ _origin=origin,
+ signature="",
+ type=simple_completion.type or _UNKNOWN_TYPE,
+ )
+ container.append(completion)
+
+ yield from list(self._deduplicate(ordered + self._sort(sortable)))[
+ :MATCHES_LIMIT
+ ]
+
+ def complete(self, text=None, line_buffer=None, cursor_pos=None) -> Tuple[str, Sequence[str]]:
+ """Find completions for the given text and line context.
+
+ Note that both the text and the line_buffer are optional, but at least
+ one of them must be given.
+
+ Parameters
+ ----------
+ text : string, optional
+ Text to perform the completion on. If not given, the line buffer
+ is split using the instance's CompletionSplitter object.
+ line_buffer : string, optional
+ If not given, the completer attempts to obtain the current line
+ buffer via readline. This keyword allows clients which are
+ requesting for text completions in non-readline contexts to inform
+ the completer of the entire text.
+ cursor_pos : int, optional
+ Index of the cursor in the full line buffer. Should be provided by
+ remote frontends where kernel has no access to frontend state.
+
+ Returns
+ -------
+ Tuple of two items:
+ text : str
+ Text that was actually used in the completion.
+ matches : list
+ A list of completion matches.
+
+ Notes
+ -----
+ This API is likely to be deprecated and replaced by
+ :any:`IPCompleter.completions` in the future.
+
+ """
+ warnings.warn('`Completer.complete` is pending deprecation since '
+ 'IPython 6.0 and will be replaced by `Completer.completions`.',
+ PendingDeprecationWarning)
+ # potential todo, FOLD the 3rd throw away argument of _complete
+ # into the first 2 one.
+ # TODO: Q: does the above refer to jedi completions (i.e. 0-indexed?)
+ # TODO: should we deprecate now, or does it stay?
+
+ results = self._complete(
+ line_buffer=line_buffer, cursor_pos=cursor_pos, text=text, cursor_line=0
+ )
+
+ jedi_matcher_id = _get_matcher_id(self._jedi_matcher)
+
+ return self._arrange_and_extract(
+ results,
+ # TODO: can we confirm that excluding Jedi here was a deliberate choice in previous version?
+ skip_matchers={jedi_matcher_id},
+ # this API does not support different start/end positions (fragments of token).
+ abort_if_offset_changes=True,
+ )
+
+ def _arrange_and_extract(
+ self,
+ results: Dict[str, MatcherResult],
+ skip_matchers: Set[str],
+ abort_if_offset_changes: bool,
+ ):
+ sortable: List[AnyMatcherCompletion] = []
+ ordered: List[AnyMatcherCompletion] = []
+ most_recent_fragment = None
+ for identifier, result in results.items():
+ if identifier in skip_matchers:
+ continue
+ if not result["completions"]:
+ continue
+ if not most_recent_fragment:
+ most_recent_fragment = result["matched_fragment"]
+ if (
+ abort_if_offset_changes
+ and result["matched_fragment"] != most_recent_fragment
+ ):
+ break
+ if result.get("ordered", False):
+ ordered.extend(result["completions"])
+ else:
+ sortable.extend(result["completions"])
+
+ if not most_recent_fragment:
+ most_recent_fragment = "" # to satisfy typechecker (and just in case)
+
+ return most_recent_fragment, [
+ m.text for m in self._deduplicate(ordered + self._sort(sortable))
+ ]
+
+ def _complete(self, *, cursor_line, cursor_pos, line_buffer=None, text=None,
+ full_text=None) -> _CompleteResult:
+ """
+ Like complete but can also returns raw jedi completions as well as the
+ origin of the completion text. This could (and should) be made much
+ cleaner but that will be simpler once we drop the old (and stateful)
+ :any:`complete` API.
+
+ With current provisional API, cursor_pos act both (depending on the
+ caller) as the offset in the ``text`` or ``line_buffer``, or as the
+ ``column`` when passing multiline strings this could/should be renamed
+ but would add extra noise.
+
+ Parameters
+ ----------
+ cursor_line
+ Index of the line the cursor is on. 0 indexed.
+ cursor_pos
+ Position of the cursor in the current line/line_buffer/text. 0
+ indexed.
+ line_buffer : optional, str
+ The current line the cursor is in, this is mostly due to legacy
+ reason that readline could only give a us the single current line.
+ Prefer `full_text`.
+ text : str
+ The current "token" the cursor is in, mostly also for historical
+ reasons. as the completer would trigger only after the current line
+ was parsed.
+ full_text : str
+ Full text of the current cell.
+
+ Returns
+ -------
+ An ordered dictionary where keys are identifiers of completion
+ matchers and values are ``MatcherResult``s.
+ """
+
+ # if the cursor position isn't given, the only sane assumption we can
+ # make is that it's at the end of the line (the common case)
+ if cursor_pos is None:
+ cursor_pos = len(line_buffer) if text is None else len(text)
+
+ if self.use_main_ns:
+ self.namespace = __main__.__dict__
+
+ # if text is either None or an empty string, rely on the line buffer
+ if (not line_buffer) and full_text:
+ line_buffer = full_text.split('\n')[cursor_line]
+ if not text: # issue #11508: check line_buffer before calling split_line
+ text = (
+ self.splitter.split_line(line_buffer, cursor_pos) if line_buffer else ""
+ )
+
+ # If no line buffer is given, assume the input text is all there was
+ if line_buffer is None:
+ line_buffer = text
+
+ # deprecated - do not use `line_buffer` in new code.
+ self.line_buffer = line_buffer
+ self.text_until_cursor = self.line_buffer[:cursor_pos]
+
+ if not full_text:
+ full_text = line_buffer
+
+ context = CompletionContext(
+ full_text=full_text,
+ cursor_position=cursor_pos,
+ cursor_line=cursor_line,
+ token=text,
+ limit=MATCHES_LIMIT,
+ )
+
+ # Start with a clean slate of completions
+ results: Dict[str, MatcherResult] = {}
+
+ jedi_matcher_id = _get_matcher_id(self._jedi_matcher)
+
+ suppressed_matchers: Set[str] = set()
+
+ matchers = {
+ _get_matcher_id(matcher): matcher
+ for matcher in sorted(
+ self.matchers, key=_get_matcher_priority, reverse=True
+ )
+ }
+
+ for matcher_id, matcher in matchers.items():
+ matcher_id = _get_matcher_id(matcher)
+
+ if matcher_id in self.disable_matchers:
+ continue
+
+ if matcher_id in results:
+ warnings.warn(f"Duplicate matcher ID: {matcher_id}.")
+
+ if matcher_id in suppressed_matchers:
+ continue
+
+ result: MatcherResult
+ try:
+ if _is_matcher_v1(matcher):
+ result = _convert_matcher_v1_result_to_v2(
+ matcher(text), type=_UNKNOWN_TYPE
+ )
+ elif _is_matcher_v2(matcher):
+ result = matcher(context)
+ else:
+ api_version = _get_matcher_api_version(matcher)
+ raise ValueError(f"Unsupported API version {api_version}")
+ except:
+ # Show the ugly traceback if the matcher causes an
+ # exception, but do NOT crash the kernel!
+ sys.excepthook(*sys.exc_info())
+ continue
+
+ # set default value for matched fragment if suffix was not selected.
+ result["matched_fragment"] = result.get("matched_fragment", context.token)
+
+ if not suppressed_matchers:
+ suppression_recommended: Union[bool, Set[str]] = result.get(
+ "suppress", False
+ )
+
+ suppression_config = (
+ self.suppress_competing_matchers.get(matcher_id, None)
+ if isinstance(self.suppress_competing_matchers, dict)
+ else self.suppress_competing_matchers
+ )
+ should_suppress = (
+ (suppression_config is True)
+ or (suppression_recommended and (suppression_config is not False))
+ ) and has_any_completions(result)
+
+ if should_suppress:
+ suppression_exceptions: Set[str] = result.get(
+ "do_not_suppress", set()
+ )
+ if isinstance(suppression_recommended, Iterable):
+ to_suppress = set(suppression_recommended)
+ else:
+ to_suppress = set(matchers)
+ suppressed_matchers = to_suppress - suppression_exceptions
+
+ new_results = {}
+ for previous_matcher_id, previous_result in results.items():
+ if previous_matcher_id not in suppressed_matchers:
+ new_results[previous_matcher_id] = previous_result
+ results = new_results
+
+ results[matcher_id] = result
+
+ _, matches = self._arrange_and_extract(
+ results,
+ # TODO Jedi completions non included in legacy stateful API; was this deliberate or omission?
+ # if it was omission, we can remove the filtering step, otherwise remove this comment.
+ skip_matchers={jedi_matcher_id},
+ abort_if_offset_changes=False,
+ )
+
+ # populate legacy stateful API
+ self.matches = matches
+
+ return results
+
+ @staticmethod
+ def _deduplicate(
+ matches: Sequence[AnyCompletion],
+ ) -> Iterable[AnyCompletion]:
+ filtered_matches: Dict[str, AnyCompletion] = {}
+ for match in matches:
+ text = match.text
+ if (
+ text not in filtered_matches
+ or filtered_matches[text].type == _UNKNOWN_TYPE
+ ):
+ filtered_matches[text] = match
+
+ return filtered_matches.values()
+
+ @staticmethod
+ def _sort(matches: Sequence[AnyCompletion]):
+ return sorted(matches, key=lambda x: completions_sorting_key(x.text))
+
+ @context_matcher()
+ def fwd_unicode_matcher(self, context: CompletionContext):
+ """Same as :any:`fwd_unicode_match`, but adopted to new Matcher API."""
+ # TODO: use `context.limit` to terminate early once we matched the maximum
+ # number that will be used downstream; can be added as an optional to
+ # `fwd_unicode_match(text: str, limit: int = None)` or we could re-implement here.
+ fragment, matches = self.fwd_unicode_match(context.text_until_cursor)
+ return _convert_matcher_v1_result_to_v2(
+ matches, type="unicode", fragment=fragment, suppress_if_matches=True
+ )
+
+ def fwd_unicode_match(self, text: str) -> Tuple[str, Sequence[str]]:
+ """
+ Forward match a string starting with a backslash with a list of
+ potential Unicode completions.
+
+ Will compute list of Unicode character names on first call and cache it.
+
+ .. deprecated:: 8.6
+ You can use :meth:`fwd_unicode_matcher` instead.
+
+ Returns
+ -------
+ At tuple with:
+ - matched text (empty if no matches)
+ - list of potential completions, empty tuple otherwise)
+ """
+ # TODO: self.unicode_names is here a list we traverse each time with ~100k elements.
+ # We could do a faster match using a Trie.
+
+ # Using pygtrie the following seem to work:
+
+ # s = PrefixSet()
+
+ # for c in range(0,0x10FFFF + 1):
+ # try:
+ # s.add(unicodedata.name(chr(c)))
+ # except ValueError:
+ # pass
+ # [''.join(k) for k in s.iter(prefix)]
+
+ # But need to be timed and adds an extra dependency.
+
+ slashpos = text.rfind('\\')
+ # if text starts with slash
+ if slashpos > -1:
+ # PERF: It's important that we don't access self._unicode_names
+ # until we're inside this if-block. _unicode_names is lazily
+ # initialized, and it takes a user-noticeable amount of time to
+ # initialize it, so we don't want to initialize it unless we're
+ # actually going to use it.
+ s = text[slashpos + 1 :]
+ sup = s.upper()
+ candidates = [x for x in self.unicode_names if x.startswith(sup)]
+ if candidates:
+ return s, candidates
+ candidates = [x for x in self.unicode_names if sup in x]
+ if candidates:
+ return s, candidates
+ splitsup = sup.split(" ")
+ candidates = [
+ x for x in self.unicode_names if all(u in x for u in splitsup)
+ ]
+ if candidates:
+ return s, candidates
+
+ return "", ()
+
+ # if text does not start with slash
+ else:
+ return '', ()
+
+ @property
+ def unicode_names(self) -> List[str]:
+ """List of names of unicode code points that can be completed.
+
+ The list is lazily initialized on first access.
+ """
+ if self._unicode_names is None:
+ names = []
+ for c in range(0,0x10FFFF + 1):
+ try:
+ names.append(unicodedata.name(chr(c)))
+ except ValueError:
+ pass
+ self._unicode_names = _unicode_name_compute(_UNICODE_RANGES)
+
+ return self._unicode_names
+
+def _unicode_name_compute(ranges:List[Tuple[int,int]]) -> List[str]:
+ names = []
+ for start,stop in ranges:
+ for c in range(start, stop) :
+ try:
+ names.append(unicodedata.name(chr(c)))
+ except ValueError:
+ pass
+ return names
diff --git a/contrib/python/ipython/py3/IPython/core/completerlib.py b/contrib/python/ipython/py3/IPython/core/completerlib.py
new file mode 100644
index 0000000000..65efa42254
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/completerlib.py
@@ -0,0 +1,418 @@
+# encoding: utf-8
+"""Implementations for various useful completers.
+
+These are all loaded by default by IPython.
+"""
+#-----------------------------------------------------------------------------
+# Copyright (C) 2010-2011 The IPython Development Team.
+#
+# Distributed under the terms of the BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+# Stdlib imports
+import glob
+import inspect
+import itertools
+import os
+import re
+import sys
+from importlib import import_module
+from importlib.machinery import all_suffixes
+
+
+# Third-party imports
+from time import time
+from zipimport import zipimporter
+
+# Our own imports
+from .completer import expand_user, compress_user
+from .error import TryNext
+from ..utils._process_common import arg_split
+
+# FIXME: this should be pulled in with the right call via the component system
+from IPython import get_ipython
+
+from typing import List
+
+from __res import importer
+
+#-----------------------------------------------------------------------------
+# Globals and constants
+#-----------------------------------------------------------------------------
+_suffixes = all_suffixes()
+
+# Time in seconds after which the rootmodules will be stored permanently in the
+# ipython ip.db database (kept in the user's .ipython dir).
+TIMEOUT_STORAGE = 2
+
+# Time in seconds after which we give up
+TIMEOUT_GIVEUP = 20
+
+# Regular expression for the python import statement
+import_re = re.compile(r'(?P<name>[^\W\d]\w*?)'
+ r'(?P<package>[/\\]__init__)?'
+ r'(?P<suffix>%s)$' %
+ r'|'.join(re.escape(s) for s in _suffixes))
+
+# RE for the ipython %run command (python + ipython scripts)
+magic_run_re = re.compile(r'.*(\.ipy|\.ipynb|\.py[w]?)$')
+
+#-----------------------------------------------------------------------------
+# Local utilities
+#-----------------------------------------------------------------------------
+
+arcadia_rootmodules_cache = None
+arcadia_modules_cache = None
+
+
+def arcadia_init_cache():
+ global arcadia_rootmodules_cache, arcadia_modules_cache
+ arcadia_rootmodules_cache = set()
+ arcadia_modules_cache = {}
+
+ all_modules = itertools.chain(
+ sys.builtin_module_names,
+ importer.memory
+ )
+
+ for name in all_modules:
+ path = name.split('.')
+ arcadia_rootmodules_cache.add(path[0])
+
+ prefix = path[0]
+ for element in path[1:]:
+ if element == '__init__':
+ continue
+
+ arcadia_modules_cache.setdefault(prefix, set()).add(element)
+ prefix += '.' + element
+
+ arcadia_rootmodules_cache = sorted(arcadia_rootmodules_cache)
+ arcadia_modules_cache = {k: sorted(v) for k, v in arcadia_modules_cache.items()}
+
+
+def arcadia_module_list(mod):
+ if arcadia_modules_cache is None:
+ arcadia_init_cache()
+
+ return arcadia_modules_cache.get(mod, ())
+
+
+def arcadia_get_root_modules():
+ if arcadia_rootmodules_cache is None:
+ arcadia_init_cache()
+
+ return arcadia_rootmodules_cache
+
+
+def module_list(path):
+ """
+ Return the list containing the names of the modules available in the given
+ folder.
+ """
+ # sys.path has the cwd as an empty string, but isdir/listdir need it as '.'
+ if path == '':
+ path = '.'
+
+ # A few local constants to be used in loops below
+ pjoin = os.path.join
+
+ if os.path.isdir(path):
+ # Build a list of all files in the directory and all files
+ # in its subdirectories. For performance reasons, do not
+ # recurse more than one level into subdirectories.
+ files = []
+ for root, dirs, nondirs in os.walk(path, followlinks=True):
+ subdir = root[len(path)+1:]
+ if subdir:
+ files.extend(pjoin(subdir, f) for f in nondirs)
+ dirs[:] = [] # Do not recurse into additional subdirectories.
+ else:
+ files.extend(nondirs)
+
+ else:
+ try:
+ files = list(zipimporter(path)._files.keys())
+ except:
+ files = []
+
+ # Build a list of modules which match the import_re regex.
+ modules = []
+ for f in files:
+ m = import_re.match(f)
+ if m:
+ modules.append(m.group('name'))
+ return list(set(modules))
+
+
+def get_root_modules():
+ """
+ Returns a list containing the names of all the modules available in the
+ folders of the pythonpath.
+
+ ip.db['rootmodules_cache'] maps sys.path entries to list of modules.
+ """
+ ip = get_ipython()
+ if ip is None:
+ # No global shell instance to store cached list of modules.
+ # Don't try to scan for modules every time.
+ return list(sys.builtin_module_names)
+
+ rootmodules_cache = ip.db.get('rootmodules_cache', {})
+ rootmodules = list(sys.builtin_module_names)
+ start_time = time()
+ store = False
+ for path in sys.path:
+ try:
+ modules = rootmodules_cache[path]
+ except KeyError:
+ modules = module_list(path)
+ try:
+ modules.remove('__init__')
+ except ValueError:
+ pass
+ if path not in ('', '.'): # cwd modules should not be cached
+ rootmodules_cache[path] = modules
+ if time() - start_time > TIMEOUT_STORAGE and not store:
+ store = True
+ print("\nCaching the list of root modules, please wait!")
+ print("(This will only be done once - type '%rehashx' to "
+ "reset cache!)\n")
+ sys.stdout.flush()
+ if time() - start_time > TIMEOUT_GIVEUP:
+ print("This is taking too long, we give up.\n")
+ return []
+ rootmodules.extend(modules)
+ if store:
+ ip.db['rootmodules_cache'] = rootmodules_cache
+ rootmodules = list(set(rootmodules))
+ return rootmodules
+
+
+def is_importable(module, attr, only_modules):
+ if only_modules:
+ return inspect.ismodule(getattr(module, attr))
+ else:
+ return not(attr[:2] == '__' and attr[-2:] == '__')
+
+def is_possible_submodule(module, attr):
+ try:
+ obj = getattr(module, attr)
+ except AttributeError:
+ # Is possilby an unimported submodule
+ return True
+ except TypeError:
+ # https://github.com/ipython/ipython/issues/9678
+ return False
+ return inspect.ismodule(obj)
+
+
+def try_import(mod: str, only_modules=False) -> List[str]:
+ """
+ Try to import given module and return list of potential completions.
+ """
+ mod = mod.rstrip('.')
+ try:
+ m = import_module(mod)
+ except:
+ return []
+
+ filename = getattr(m, '__file__', '')
+ m_is_init = '__init__' in (filename or '') or filename == mod
+
+ completions = []
+ if (not hasattr(m, '__file__')) or (not only_modules) or m_is_init:
+ completions.extend( [attr for attr in dir(m) if
+ is_importable(m, attr, only_modules)])
+
+ m_all = getattr(m, "__all__", [])
+ if only_modules:
+ completions.extend(attr for attr in m_all if is_possible_submodule(m, attr))
+ else:
+ completions.extend(m_all)
+
+ if m_is_init:
+ completions.extend(arcadia_module_list(mod))
+ completions_set = {c for c in completions if isinstance(c, str)}
+ completions_set.discard('__init__')
+ return sorted(completions_set)
+
+
+#-----------------------------------------------------------------------------
+# Completion-related functions.
+#-----------------------------------------------------------------------------
+
+def quick_completer(cmd, completions):
+ r""" Easily create a trivial completer for a command.
+
+ Takes either a list of completions, or all completions in string (that will
+ be split on whitespace).
+
+ Example::
+
+ [d:\ipython]|1> import ipy_completers
+ [d:\ipython]|2> ipy_completers.quick_completer('foo', ['bar','baz'])
+ [d:\ipython]|3> foo b<TAB>
+ bar baz
+ [d:\ipython]|3> foo ba
+ """
+
+ if isinstance(completions, str):
+ completions = completions.split()
+
+ def do_complete(self, event):
+ return completions
+
+ get_ipython().set_hook('complete_command',do_complete, str_key = cmd)
+
+def module_completion(line):
+ """
+ Returns a list containing the completion possibilities for an import line.
+
+ The line looks like this :
+ 'import xml.d'
+ 'from xml.dom import'
+ """
+
+ words = line.split(' ')
+ nwords = len(words)
+
+ # from whatever <tab> -> 'import '
+ if nwords == 3 and words[0] == 'from':
+ return ['import ']
+
+ # 'from xy<tab>' or 'import xy<tab>'
+ if nwords < 3 and (words[0] in {'%aimport', 'import', 'from'}) :
+ if nwords == 1:
+ return arcadia_get_root_modules()
+ mod = words[1].split('.')
+ if len(mod) < 2:
+ return arcadia_get_root_modules()
+ completion_list = try_import('.'.join(mod[:-1]), True)
+ return ['.'.join(mod[:-1] + [el]) for el in completion_list]
+
+ # 'from xyz import abc<tab>'
+ if nwords >= 3 and words[0] == 'from':
+ mod = words[1]
+ return try_import(mod)
+
+#-----------------------------------------------------------------------------
+# Completers
+#-----------------------------------------------------------------------------
+# These all have the func(self, event) signature to be used as custom
+# completers
+
+def module_completer(self,event):
+ """Give completions after user has typed 'import ...' or 'from ...'"""
+
+ # This works in all versions of python. While 2.5 has
+ # pkgutil.walk_packages(), that particular routine is fairly dangerous,
+ # since it imports *EVERYTHING* on sys.path. That is: a) very slow b) full
+ # of possibly problematic side effects.
+ # This search the folders in the sys.path for available modules.
+
+ return module_completion(event.line)
+
+# FIXME: there's a lot of logic common to the run, cd and builtin file
+# completers, that is currently reimplemented in each.
+
+def magic_run_completer(self, event):
+ """Complete files that end in .py or .ipy or .ipynb for the %run command.
+ """
+ comps = arg_split(event.line, strict=False)
+ # relpath should be the current token that we need to complete.
+ if (len(comps) > 1) and (not event.line.endswith(' ')):
+ relpath = comps[-1].strip("'\"")
+ else:
+ relpath = ''
+
+ #print("\nev=", event) # dbg
+ #print("rp=", relpath) # dbg
+ #print('comps=', comps) # dbg
+
+ lglob = glob.glob
+ isdir = os.path.isdir
+ relpath, tilde_expand, tilde_val = expand_user(relpath)
+
+ # Find if the user has already typed the first filename, after which we
+ # should complete on all files, since after the first one other files may
+ # be arguments to the input script.
+
+ if any(magic_run_re.match(c) for c in comps):
+ matches = [f.replace('\\','/') + ('/' if isdir(f) else '')
+ for f in lglob(relpath+'*')]
+ else:
+ dirs = [f.replace('\\','/') + "/" for f in lglob(relpath+'*') if isdir(f)]
+ pys = [f.replace('\\','/')
+ for f in lglob(relpath+'*.py') + lglob(relpath+'*.ipy') +
+ lglob(relpath+'*.ipynb') + lglob(relpath + '*.pyw')]
+
+ matches = dirs + pys
+
+ #print('run comp:', dirs+pys) # dbg
+ return [compress_user(p, tilde_expand, tilde_val) for p in matches]
+
+
+def cd_completer(self, event):
+ """Completer function for cd, which only returns directories."""
+ ip = get_ipython()
+ relpath = event.symbol
+
+ #print(event) # dbg
+ if event.line.endswith('-b') or ' -b ' in event.line:
+ # return only bookmark completions
+ bkms = self.db.get('bookmarks', None)
+ if bkms:
+ return bkms.keys()
+ else:
+ return []
+
+ if event.symbol == '-':
+ width_dh = str(len(str(len(ip.user_ns['_dh']) + 1)))
+ # jump in directory history by number
+ fmt = '-%0' + width_dh +'d [%s]'
+ ents = [ fmt % (i,s) for i,s in enumerate(ip.user_ns['_dh'])]
+ if len(ents) > 1:
+ return ents
+ return []
+
+ if event.symbol.startswith('--'):
+ return ["--" + os.path.basename(d) for d in ip.user_ns['_dh']]
+
+ # Expand ~ in path and normalize directory separators.
+ relpath, tilde_expand, tilde_val = expand_user(relpath)
+ relpath = relpath.replace('\\','/')
+
+ found = []
+ for d in [f.replace('\\','/') + '/' for f in glob.glob(relpath+'*')
+ if os.path.isdir(f)]:
+ if ' ' in d:
+ # we don't want to deal with any of that, complex code
+ # for this is elsewhere
+ raise TryNext
+
+ found.append(d)
+
+ if not found:
+ if os.path.isdir(relpath):
+ return [compress_user(relpath, tilde_expand, tilde_val)]
+
+ # if no completions so far, try bookmarks
+ bks = self.db.get('bookmarks',{})
+ bkmatches = [s for s in bks if s.startswith(event.symbol)]
+ if bkmatches:
+ return bkmatches
+
+ raise TryNext
+
+ return [compress_user(p, tilde_expand, tilde_val) for p in found]
+
+def reset_completer(self, event):
+ "A completer for %reset magic"
+ return '-f -s in out array dhist'.split()
diff --git a/contrib/python/ipython/py3/IPython/core/crashhandler.py b/contrib/python/ipython/py3/IPython/core/crashhandler.py
new file mode 100644
index 0000000000..f60a75bbc5
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/crashhandler.py
@@ -0,0 +1,236 @@
+# encoding: utf-8
+"""sys.excepthook for IPython itself, leaves a detailed report on disk.
+
+Authors:
+
+* Fernando Perez
+* Brian E. Granger
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (C) 2001-2007 Fernando Perez. <fperez@colorado.edu>
+# Copyright (C) 2008-2011 The IPython Development Team
+#
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+import sys
+import traceback
+from pprint import pformat
+from pathlib import Path
+
+from IPython.core import ultratb
+from IPython.core.release import author_email
+from IPython.utils.sysinfo import sys_info
+from IPython.utils.py3compat import input
+
+from IPython.core.release import __version__ as version
+
+from typing import Optional
+
+#-----------------------------------------------------------------------------
+# Code
+#-----------------------------------------------------------------------------
+
+# Template for the user message.
+_default_message_template = """\
+Oops, {app_name} crashed. We do our best to make it stable, but...
+
+A crash report was automatically generated with the following information:
+ - A verbatim copy of the crash traceback.
+ - A copy of your input history during this session.
+ - Data on your current {app_name} configuration.
+
+It was left in the file named:
+\t'{crash_report_fname}'
+If you can email this file to the developers, the information in it will help
+them in understanding and correcting the problem.
+
+You can mail it to: {contact_name} at {contact_email}
+with the subject '{app_name} Crash Report'.
+
+If you want to do it now, the following command will work (under Unix):
+mail -s '{app_name} Crash Report' {contact_email} < {crash_report_fname}
+
+In your email, please also include information about:
+- The operating system under which the crash happened: Linux, macOS, Windows,
+ other, and which exact version (for example: Ubuntu 16.04.3, macOS 10.13.2,
+ Windows 10 Pro), and whether it is 32-bit or 64-bit;
+- How {app_name} was installed: using pip or conda, from GitHub, as part of
+ a Docker container, or other, providing more detail if possible;
+- How to reproduce the crash: what exact sequence of instructions can one
+ input to get the same crash? Ideally, find a minimal yet complete sequence
+ of instructions that yields the crash.
+
+To ensure accurate tracking of this issue, please file a report about it at:
+{bug_tracker}
+"""
+
+_lite_message_template = """
+If you suspect this is an IPython {version} bug, please report it at:
+ https://github.com/ipython/ipython/issues
+or send an email to the mailing list at {email}
+
+You can print a more detailed traceback right now with "%tb", or use "%debug"
+to interactively debug it.
+
+Extra-detailed tracebacks for bug-reporting purposes can be enabled via:
+ {config}Application.verbose_crash=True
+"""
+
+
+class CrashHandler(object):
+ """Customizable crash handlers for IPython applications.
+
+ Instances of this class provide a :meth:`__call__` method which can be
+ used as a ``sys.excepthook``. The :meth:`__call__` signature is::
+
+ def __call__(self, etype, evalue, etb)
+ """
+
+ message_template = _default_message_template
+ section_sep = '\n\n'+'*'*75+'\n\n'
+
+ def __init__(
+ self,
+ app,
+ contact_name: Optional[str] = None,
+ contact_email: Optional[str] = None,
+ bug_tracker: Optional[str] = None,
+ show_crash_traceback: bool = True,
+ call_pdb: bool = False,
+ ):
+ """Create a new crash handler
+
+ Parameters
+ ----------
+ app : Application
+ A running :class:`Application` instance, which will be queried at
+ crash time for internal information.
+ contact_name : str
+ A string with the name of the person to contact.
+ contact_email : str
+ A string with the email address of the contact.
+ bug_tracker : str
+ A string with the URL for your project's bug tracker.
+ show_crash_traceback : bool
+ If false, don't print the crash traceback on stderr, only generate
+ the on-disk report
+ call_pdb
+ Whether to call pdb on crash
+
+ Attributes
+ ----------
+ These instances contain some non-argument attributes which allow for
+ further customization of the crash handler's behavior. Please see the
+ source for further details.
+
+ """
+ self.crash_report_fname = "Crash_report_%s.txt" % app.name
+ self.app = app
+ self.call_pdb = call_pdb
+ #self.call_pdb = True # dbg
+ self.show_crash_traceback = show_crash_traceback
+ self.info = dict(app_name = app.name,
+ contact_name = contact_name,
+ contact_email = contact_email,
+ bug_tracker = bug_tracker,
+ crash_report_fname = self.crash_report_fname)
+
+
+ def __call__(self, etype, evalue, etb):
+ """Handle an exception, call for compatible with sys.excepthook"""
+
+ # do not allow the crash handler to be called twice without reinstalling it
+ # this prevents unlikely errors in the crash handling from entering an
+ # infinite loop.
+ sys.excepthook = sys.__excepthook__
+
+ # Report tracebacks shouldn't use color in general (safer for users)
+ color_scheme = 'NoColor'
+
+ # Use this ONLY for developer debugging (keep commented out for release)
+ #color_scheme = 'Linux' # dbg
+ try:
+ rptdir = self.app.ipython_dir
+ except:
+ rptdir = Path.cwd()
+ if rptdir is None or not Path.is_dir(rptdir):
+ rptdir = Path.cwd()
+ report_name = rptdir / self.crash_report_fname
+ # write the report filename into the instance dict so it can get
+ # properly expanded out in the user message template
+ self.crash_report_fname = report_name
+ self.info['crash_report_fname'] = report_name
+ TBhandler = ultratb.VerboseTB(
+ color_scheme=color_scheme,
+ long_header=1,
+ call_pdb=self.call_pdb,
+ )
+ if self.call_pdb:
+ TBhandler(etype,evalue,etb)
+ return
+ else:
+ traceback = TBhandler.text(etype,evalue,etb,context=31)
+
+ # print traceback to screen
+ if self.show_crash_traceback:
+ print(traceback, file=sys.stderr)
+
+ # and generate a complete report on disk
+ try:
+ report = open(report_name, "w", encoding="utf-8")
+ except:
+ print('Could not create crash report on disk.', file=sys.stderr)
+ return
+
+ with report:
+ # Inform user on stderr of what happened
+ print('\n'+'*'*70+'\n', file=sys.stderr)
+ print(self.message_template.format(**self.info), file=sys.stderr)
+
+ # Construct report on disk
+ report.write(self.make_report(traceback))
+
+ input("Hit <Enter> to quit (your terminal may close):")
+
+ def make_report(self,traceback):
+ """Return a string containing a crash report."""
+
+ sec_sep = self.section_sep
+
+ report = ['*'*75+'\n\n'+'IPython post-mortem report\n\n']
+ rpt_add = report.append
+ rpt_add(sys_info())
+
+ try:
+ config = pformat(self.app.config)
+ rpt_add(sec_sep)
+ rpt_add('Application name: %s\n\n' % self.app_name)
+ rpt_add('Current user configuration structure:\n\n')
+ rpt_add(config)
+ except:
+ pass
+ rpt_add(sec_sep+'Crash traceback:\n\n' + traceback)
+
+ return ''.join(report)
+
+
+def crash_handler_lite(etype, evalue, tb):
+ """a light excepthook, adding a small message to the usual traceback"""
+ traceback.print_exception(etype, evalue, tb)
+
+ from IPython.core.interactiveshell import InteractiveShell
+ if InteractiveShell.initialized():
+ # we are in a Shell environment, give %magic example
+ config = "%config "
+ else:
+ # we are not in a shell, show generic config
+ config = "c."
+ print(_lite_message_template.format(email=author_email, config=config, version=version), file=sys.stderr)
+
diff --git a/contrib/python/ipython/py3/IPython/core/debugger.py b/contrib/python/ipython/py3/IPython/core/debugger.py
new file mode 100644
index 0000000000..c8082e34e7
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/debugger.py
@@ -0,0 +1,997 @@
+# -*- coding: utf-8 -*-
+"""
+Pdb debugger class.
+
+
+This is an extension to PDB which adds a number of new features.
+Note that there is also the `IPython.terminal.debugger` class which provides UI
+improvements.
+
+We also strongly recommend to use this via the `ipdb` package, which provides
+extra configuration options.
+
+Among other things, this subclass of PDB:
+ - supports many IPython magics like pdef/psource
+ - hide frames in tracebacks based on `__tracebackhide__`
+ - allows to skip frames based on `__debuggerskip__`
+
+The skipping and hiding frames are configurable via the `skip_predicates`
+command.
+
+By default, frames from readonly files will be hidden, frames containing
+``__tracebackhide__=True`` will be hidden.
+
+Frames containing ``__debuggerskip__`` will be stepped over, frames who's parent
+frames value of ``__debuggerskip__`` is ``True`` will be skipped.
+
+ >>> def helpers_helper():
+ ... pass
+ ...
+ ... def helper_1():
+ ... print("don't step in me")
+ ... helpers_helpers() # will be stepped over unless breakpoint set.
+ ...
+ ...
+ ... def helper_2():
+ ... print("in me neither")
+ ...
+
+One can define a decorator that wraps a function between the two helpers:
+
+ >>> def pdb_skipped_decorator(function):
+ ...
+ ...
+ ... def wrapped_fn(*args, **kwargs):
+ ... __debuggerskip__ = True
+ ... helper_1()
+ ... __debuggerskip__ = False
+ ... result = function(*args, **kwargs)
+ ... __debuggerskip__ = True
+ ... helper_2()
+ ... # setting __debuggerskip__ to False again is not necessary
+ ... return result
+ ...
+ ... return wrapped_fn
+
+When decorating a function, ipdb will directly step into ``bar()`` by
+default:
+
+ >>> @foo_decorator
+ ... def bar(x, y):
+ ... return x * y
+
+
+You can toggle the behavior with
+
+ ipdb> skip_predicates debuggerskip false
+
+or configure it in your ``.pdbrc``
+
+
+
+License
+-------
+
+Modified from the standard pdb.Pdb class to avoid including readline, so that
+the command line completion of other programs which include this isn't
+damaged.
+
+In the future, this class will be expanded with improvements over the standard
+pdb.
+
+The original code in this file is mainly lifted out of cmd.py in Python 2.2,
+with minor changes. Licensing should therefore be under the standard Python
+terms. For details on the PSF (Python Software Foundation) standard license,
+see:
+
+https://docs.python.org/2/license.html
+
+
+All the changes since then are under the same license as IPython.
+
+"""
+
+#*****************************************************************************
+#
+# This file is licensed under the PSF license.
+#
+# Copyright (C) 2001 Python Software Foundation, www.python.org
+# Copyright (C) 2005-2006 Fernando Perez. <fperez@colorado.edu>
+#
+#
+#*****************************************************************************
+
+import inspect
+import linecache
+import sys
+import re
+import os
+
+from IPython import get_ipython
+from IPython.utils import PyColorize
+from IPython.utils import coloransi, py3compat
+from IPython.core.excolors import exception_colors
+
+# skip module docstests
+__skip_doctest__ = True
+
+prompt = 'ipdb> '
+
+# We have to check this directly from sys.argv, config struct not yet available
+from pdb import Pdb as OldPdb
+
+# Allow the set_trace code to operate outside of an ipython instance, even if
+# it does so with some limitations. The rest of this support is implemented in
+# the Tracer constructor.
+
+DEBUGGERSKIP = "__debuggerskip__"
+
+
+def make_arrow(pad):
+ """generate the leading arrow in front of traceback or debugger"""
+ if pad >= 2:
+ return '-'*(pad-2) + '> '
+ elif pad == 1:
+ return '>'
+ return ''
+
+
+def BdbQuit_excepthook(et, ev, tb, excepthook=None):
+ """Exception hook which handles `BdbQuit` exceptions.
+
+ All other exceptions are processed using the `excepthook`
+ parameter.
+ """
+ raise ValueError(
+ "`BdbQuit_excepthook` is deprecated since version 5.1",
+ )
+
+
+def BdbQuit_IPython_excepthook(self, et, ev, tb, tb_offset=None):
+ raise ValueError(
+ "`BdbQuit_IPython_excepthook` is deprecated since version 5.1",
+ DeprecationWarning, stacklevel=2)
+
+
+RGX_EXTRA_INDENT = re.compile(r'(?<=\n)\s+')
+
+
+def strip_indentation(multiline_string):
+ return RGX_EXTRA_INDENT.sub('', multiline_string)
+
+
+def decorate_fn_with_doc(new_fn, old_fn, additional_text=""):
+ """Make new_fn have old_fn's doc string. This is particularly useful
+ for the ``do_...`` commands that hook into the help system.
+ Adapted from from a comp.lang.python posting
+ by Duncan Booth."""
+ def wrapper(*args, **kw):
+ return new_fn(*args, **kw)
+ if old_fn.__doc__:
+ wrapper.__doc__ = strip_indentation(old_fn.__doc__) + additional_text
+ return wrapper
+
+
+class Pdb(OldPdb):
+ """Modified Pdb class, does not load readline.
+
+ for a standalone version that uses prompt_toolkit, see
+ `IPython.terminal.debugger.TerminalPdb` and
+ `IPython.terminal.debugger.set_trace()`
+
+
+ This debugger can hide and skip frames that are tagged according to some predicates.
+ See the `skip_predicates` commands.
+
+ """
+
+ default_predicates = {
+ "tbhide": True,
+ "readonly": False,
+ "ipython_internal": True,
+ "debuggerskip": True,
+ }
+
+ def __init__(self, completekey=None, stdin=None, stdout=None, context=5, **kwargs):
+ """Create a new IPython debugger.
+
+ Parameters
+ ----------
+ completekey : default None
+ Passed to pdb.Pdb.
+ stdin : default None
+ Passed to pdb.Pdb.
+ stdout : default None
+ Passed to pdb.Pdb.
+ context : int
+ Number of lines of source code context to show when
+ displaying stacktrace information.
+ **kwargs
+ Passed to pdb.Pdb.
+
+ Notes
+ -----
+ The possibilities are python version dependent, see the python
+ docs for more info.
+ """
+
+ # Parent constructor:
+ try:
+ self.context = int(context)
+ if self.context <= 0:
+ raise ValueError("Context must be a positive integer")
+ except (TypeError, ValueError) as e:
+ raise ValueError("Context must be a positive integer") from e
+
+ # `kwargs` ensures full compatibility with stdlib's `pdb.Pdb`.
+ OldPdb.__init__(self, completekey, stdin, stdout, **kwargs)
+
+ # IPython changes...
+ self.shell = get_ipython()
+
+ if self.shell is None:
+ save_main = sys.modules['__main__']
+ # No IPython instance running, we must create one
+ from IPython.terminal.interactiveshell import \
+ TerminalInteractiveShell
+ self.shell = TerminalInteractiveShell.instance()
+ # needed by any code which calls __import__("__main__") after
+ # the debugger was entered. See also #9941.
+ sys.modules["__main__"] = save_main
+
+
+ color_scheme = self.shell.colors
+
+ self.aliases = {}
+
+ # Create color table: we copy the default one from the traceback
+ # module and add a few attributes needed for debugging
+ self.color_scheme_table = exception_colors()
+
+ # shorthands
+ C = coloransi.TermColors
+ cst = self.color_scheme_table
+
+ cst['NoColor'].colors.prompt = C.NoColor
+ cst['NoColor'].colors.breakpoint_enabled = C.NoColor
+ cst['NoColor'].colors.breakpoint_disabled = C.NoColor
+
+ cst['Linux'].colors.prompt = C.Green
+ cst['Linux'].colors.breakpoint_enabled = C.LightRed
+ cst['Linux'].colors.breakpoint_disabled = C.Red
+
+ cst['LightBG'].colors.prompt = C.Blue
+ cst['LightBG'].colors.breakpoint_enabled = C.LightRed
+ cst['LightBG'].colors.breakpoint_disabled = C.Red
+
+ cst['Neutral'].colors.prompt = C.Blue
+ cst['Neutral'].colors.breakpoint_enabled = C.LightRed
+ cst['Neutral'].colors.breakpoint_disabled = C.Red
+
+ # Add a python parser so we can syntax highlight source while
+ # debugging.
+ self.parser = PyColorize.Parser(style=color_scheme)
+ self.set_colors(color_scheme)
+
+ # Set the prompt - the default prompt is '(Pdb)'
+ self.prompt = prompt
+ self.skip_hidden = True
+ self.report_skipped = True
+
+ # list of predicates we use to skip frames
+ self._predicates = self.default_predicates
+
+ #
+ def set_colors(self, scheme):
+ """Shorthand access to the color table scheme selector method."""
+ self.color_scheme_table.set_active_scheme(scheme)
+ self.parser.style = scheme
+
+ def set_trace(self, frame=None):
+ if frame is None:
+ frame = sys._getframe().f_back
+ self.initial_frame = frame
+ return super().set_trace(frame)
+
+ def _hidden_predicate(self, frame):
+ """
+ Given a frame return whether it it should be hidden or not by IPython.
+ """
+
+ if self._predicates["readonly"]:
+ fname = frame.f_code.co_filename
+ # we need to check for file existence and interactively define
+ # function would otherwise appear as RO.
+ if os.path.isfile(fname) and not os.access(fname, os.W_OK):
+ return True
+
+ if self._predicates["tbhide"]:
+ if frame in (self.curframe, getattr(self, "initial_frame", None)):
+ return False
+ frame_locals = self._get_frame_locals(frame)
+ if "__tracebackhide__" not in frame_locals:
+ return False
+ return frame_locals["__tracebackhide__"]
+ return False
+
+ def hidden_frames(self, stack):
+ """
+ Given an index in the stack return whether it should be skipped.
+
+ This is used in up/down and where to skip frames.
+ """
+ # The f_locals dictionary is updated from the actual frame
+ # locals whenever the .f_locals accessor is called, so we
+ # avoid calling it here to preserve self.curframe_locals.
+ # Furthermore, there is no good reason to hide the current frame.
+ ip_hide = [self._hidden_predicate(s[0]) for s in stack]
+ ip_start = [i for i, s in enumerate(ip_hide) if s == "__ipython_bottom__"]
+ if ip_start and self._predicates["ipython_internal"]:
+ ip_hide = [h if i > ip_start[0] else True for (i, h) in enumerate(ip_hide)]
+ return ip_hide
+
+ def interaction(self, frame, traceback):
+ try:
+ OldPdb.interaction(self, frame, traceback)
+ except KeyboardInterrupt:
+ self.stdout.write("\n" + self.shell.get_exception_only())
+
+ def precmd(self, line):
+ """Perform useful escapes on the command before it is executed."""
+
+ if line.endswith("??"):
+ line = "pinfo2 " + line[:-2]
+ elif line.endswith("?"):
+ line = "pinfo " + line[:-1]
+
+ line = super().precmd(line)
+
+ return line
+
+ def new_do_frame(self, arg):
+ OldPdb.do_frame(self, arg)
+
+ def new_do_quit(self, arg):
+
+ if hasattr(self, 'old_all_completions'):
+ self.shell.Completer.all_completions = self.old_all_completions
+
+ return OldPdb.do_quit(self, arg)
+
+ do_q = do_quit = decorate_fn_with_doc(new_do_quit, OldPdb.do_quit)
+
+ def new_do_restart(self, arg):
+ """Restart command. In the context of ipython this is exactly the same
+ thing as 'quit'."""
+ self.msg("Restart doesn't make sense here. Using 'quit' instead.")
+ return self.do_quit(arg)
+
+ def print_stack_trace(self, context=None):
+ Colors = self.color_scheme_table.active_colors
+ ColorsNormal = Colors.Normal
+ if context is None:
+ context = self.context
+ try:
+ context = int(context)
+ if context <= 0:
+ raise ValueError("Context must be a positive integer")
+ except (TypeError, ValueError) as e:
+ raise ValueError("Context must be a positive integer") from e
+ try:
+ skipped = 0
+ for hidden, frame_lineno in zip(self.hidden_frames(self.stack), self.stack):
+ if hidden and self.skip_hidden:
+ skipped += 1
+ continue
+ if skipped:
+ print(
+ f"{Colors.excName} [... skipping {skipped} hidden frame(s)]{ColorsNormal}\n"
+ )
+ skipped = 0
+ self.print_stack_entry(frame_lineno, context=context)
+ if skipped:
+ print(
+ f"{Colors.excName} [... skipping {skipped} hidden frame(s)]{ColorsNormal}\n"
+ )
+ except KeyboardInterrupt:
+ pass
+
+ def print_stack_entry(self, frame_lineno, prompt_prefix='\n-> ',
+ context=None):
+ if context is None:
+ context = self.context
+ try:
+ context = int(context)
+ if context <= 0:
+ raise ValueError("Context must be a positive integer")
+ except (TypeError, ValueError) as e:
+ raise ValueError("Context must be a positive integer") from e
+ print(self.format_stack_entry(frame_lineno, '', context), file=self.stdout)
+
+ # vds: >>
+ frame, lineno = frame_lineno
+ filename = frame.f_code.co_filename
+ self.shell.hooks.synchronize_with_editor(filename, lineno, 0)
+ # vds: <<
+
+ def _get_frame_locals(self, frame):
+ """ "
+ Accessing f_local of current frame reset the namespace, so we want to avoid
+ that or the following can happen
+
+ ipdb> foo
+ "old"
+ ipdb> foo = "new"
+ ipdb> foo
+ "new"
+ ipdb> where
+ ipdb> foo
+ "old"
+
+ So if frame is self.current_frame we instead return self.curframe_locals
+
+ """
+ if frame is self.curframe:
+ return self.curframe_locals
+ else:
+ return frame.f_locals
+
+ def format_stack_entry(self, frame_lineno, lprefix=': ', context=None):
+ if context is None:
+ context = self.context
+ try:
+ context = int(context)
+ if context <= 0:
+ print("Context must be a positive integer", file=self.stdout)
+ except (TypeError, ValueError):
+ print("Context must be a positive integer", file=self.stdout)
+
+ import reprlib
+
+ ret = []
+
+ Colors = self.color_scheme_table.active_colors
+ ColorsNormal = Colors.Normal
+ tpl_link = "%s%%s%s" % (Colors.filenameEm, ColorsNormal)
+ tpl_call = "%s%%s%s%%s%s" % (Colors.vName, Colors.valEm, ColorsNormal)
+ tpl_line = "%%s%s%%s %s%%s" % (Colors.lineno, ColorsNormal)
+ tpl_line_em = "%%s%s%%s %s%%s%s" % (Colors.linenoEm, Colors.line, ColorsNormal)
+
+ frame, lineno = frame_lineno
+
+ return_value = ''
+ loc_frame = self._get_frame_locals(frame)
+ if "__return__" in loc_frame:
+ rv = loc_frame["__return__"]
+ # return_value += '->'
+ return_value += reprlib.repr(rv) + "\n"
+ ret.append(return_value)
+
+ #s = filename + '(' + `lineno` + ')'
+ filename = self.canonic(frame.f_code.co_filename)
+ link = tpl_link % py3compat.cast_unicode(filename)
+
+ if frame.f_code.co_name:
+ func = frame.f_code.co_name
+ else:
+ func = "<lambda>"
+
+ call = ""
+ if func != "?":
+ if "__args__" in loc_frame:
+ args = reprlib.repr(loc_frame["__args__"])
+ else:
+ args = '()'
+ call = tpl_call % (func, args)
+
+ # The level info should be generated in the same format pdb uses, to
+ # avoid breaking the pdbtrack functionality of python-mode in *emacs.
+ if frame is self.curframe:
+ ret.append('> ')
+ else:
+ ret.append(" ")
+ ret.append("%s(%s)%s\n" % (link, lineno, call))
+
+ start = lineno - 1 - context//2
+ lines = linecache.getlines(filename)
+ start = min(start, len(lines) - context)
+ start = max(start, 0)
+ lines = lines[start : start + context]
+
+ for i, line in enumerate(lines):
+ show_arrow = start + 1 + i == lineno
+ linetpl = (frame is self.curframe or show_arrow) and tpl_line_em or tpl_line
+ ret.append(
+ self.__format_line(
+ linetpl, filename, start + 1 + i, line, arrow=show_arrow
+ )
+ )
+ return "".join(ret)
+
+ def __format_line(self, tpl_line, filename, lineno, line, arrow=False):
+ bp_mark = ""
+ bp_mark_color = ""
+
+ new_line, err = self.parser.format2(line, 'str')
+ if not err:
+ line = new_line
+
+ bp = None
+ if lineno in self.get_file_breaks(filename):
+ bps = self.get_breaks(filename, lineno)
+ bp = bps[-1]
+
+ if bp:
+ Colors = self.color_scheme_table.active_colors
+ bp_mark = str(bp.number)
+ bp_mark_color = Colors.breakpoint_enabled
+ if not bp.enabled:
+ bp_mark_color = Colors.breakpoint_disabled
+
+ numbers_width = 7
+ if arrow:
+ # This is the line with the error
+ pad = numbers_width - len(str(lineno)) - len(bp_mark)
+ num = '%s%s' % (make_arrow(pad), str(lineno))
+ else:
+ num = '%*s' % (numbers_width - len(bp_mark), str(lineno))
+
+ return tpl_line % (bp_mark_color + bp_mark, num, line)
+
+ def print_list_lines(self, filename, first, last):
+ """The printing (as opposed to the parsing part of a 'list'
+ command."""
+ try:
+ Colors = self.color_scheme_table.active_colors
+ ColorsNormal = Colors.Normal
+ tpl_line = '%%s%s%%s %s%%s' % (Colors.lineno, ColorsNormal)
+ tpl_line_em = '%%s%s%%s %s%%s%s' % (Colors.linenoEm, Colors.line, ColorsNormal)
+ src = []
+ if filename == "<string>" and hasattr(self, "_exec_filename"):
+ filename = self._exec_filename
+
+ for lineno in range(first, last+1):
+ line = linecache.getline(filename, lineno)
+ if not line:
+ break
+
+ if lineno == self.curframe.f_lineno:
+ line = self.__format_line(
+ tpl_line_em, filename, lineno, line, arrow=True
+ )
+ else:
+ line = self.__format_line(
+ tpl_line, filename, lineno, line, arrow=False
+ )
+
+ src.append(line)
+ self.lineno = lineno
+
+ print(''.join(src), file=self.stdout)
+
+ except KeyboardInterrupt:
+ pass
+
+ def do_skip_predicates(self, args):
+ """
+ Turn on/off individual predicates as to whether a frame should be hidden/skip.
+
+ The global option to skip (or not) hidden frames is set with skip_hidden
+
+ To change the value of a predicate
+
+ skip_predicates key [true|false]
+
+ Call without arguments to see the current values.
+
+ To permanently change the value of an option add the corresponding
+ command to your ``~/.pdbrc`` file. If you are programmatically using the
+ Pdb instance you can also change the ``default_predicates`` class
+ attribute.
+ """
+ if not args.strip():
+ print("current predicates:")
+ for p, v in self._predicates.items():
+ print(" ", p, ":", v)
+ return
+ type_value = args.strip().split(" ")
+ if len(type_value) != 2:
+ print(
+ f"Usage: skip_predicates <type> <value>, with <type> one of {set(self._predicates.keys())}"
+ )
+ return
+
+ type_, value = type_value
+ if type_ not in self._predicates:
+ print(f"{type_!r} not in {set(self._predicates.keys())}")
+ return
+ if value.lower() not in ("true", "yes", "1", "no", "false", "0"):
+ print(
+ f"{value!r} is invalid - use one of ('true', 'yes', '1', 'no', 'false', '0')"
+ )
+ return
+
+ self._predicates[type_] = value.lower() in ("true", "yes", "1")
+ if not any(self._predicates.values()):
+ print(
+ "Warning, all predicates set to False, skip_hidden may not have any effects."
+ )
+
+ def do_skip_hidden(self, arg):
+ """
+ Change whether or not we should skip frames with the
+ __tracebackhide__ attribute.
+ """
+ if not arg.strip():
+ print(
+ f"skip_hidden = {self.skip_hidden}, use 'yes','no', 'true', or 'false' to change."
+ )
+ elif arg.strip().lower() in ("true", "yes"):
+ self.skip_hidden = True
+ elif arg.strip().lower() in ("false", "no"):
+ self.skip_hidden = False
+ if not any(self._predicates.values()):
+ print(
+ "Warning, all predicates set to False, skip_hidden may not have any effects."
+ )
+
+ def do_list(self, arg):
+ """Print lines of code from the current stack frame
+ """
+ self.lastcmd = 'list'
+ last = None
+ if arg:
+ try:
+ x = eval(arg, {}, {})
+ if type(x) == type(()):
+ first, last = x
+ first = int(first)
+ last = int(last)
+ if last < first:
+ # Assume it's a count
+ last = first + last
+ else:
+ first = max(1, int(x) - 5)
+ except:
+ print('*** Error in argument:', repr(arg), file=self.stdout)
+ return
+ elif self.lineno is None:
+ first = max(1, self.curframe.f_lineno - 5)
+ else:
+ first = self.lineno + 1
+ if last is None:
+ last = first + 10
+ self.print_list_lines(self.curframe.f_code.co_filename, first, last)
+
+ # vds: >>
+ lineno = first
+ filename = self.curframe.f_code.co_filename
+ self.shell.hooks.synchronize_with_editor(filename, lineno, 0)
+ # vds: <<
+
+ do_l = do_list
+
+ def getsourcelines(self, obj):
+ lines, lineno = inspect.findsource(obj)
+ if inspect.isframe(obj) and obj.f_globals is self._get_frame_locals(obj):
+ # must be a module frame: do not try to cut a block out of it
+ return lines, 1
+ elif inspect.ismodule(obj):
+ return lines, 1
+ return inspect.getblock(lines[lineno:]), lineno+1
+
+ def do_longlist(self, arg):
+ """Print lines of code from the current stack frame.
+
+ Shows more lines than 'list' does.
+ """
+ self.lastcmd = 'longlist'
+ try:
+ lines, lineno = self.getsourcelines(self.curframe)
+ except OSError as err:
+ self.error(err)
+ return
+ last = lineno + len(lines)
+ self.print_list_lines(self.curframe.f_code.co_filename, lineno, last)
+ do_ll = do_longlist
+
+ def do_debug(self, arg):
+ """debug code
+ Enter a recursive debugger that steps through the code
+ argument (which is an arbitrary expression or statement to be
+ executed in the current environment).
+ """
+ trace_function = sys.gettrace()
+ sys.settrace(None)
+ globals = self.curframe.f_globals
+ locals = self.curframe_locals
+ p = self.__class__(completekey=self.completekey,
+ stdin=self.stdin, stdout=self.stdout)
+ p.use_rawinput = self.use_rawinput
+ p.prompt = "(%s) " % self.prompt.strip()
+ self.message("ENTERING RECURSIVE DEBUGGER")
+ sys.call_tracing(p.run, (arg, globals, locals))
+ self.message("LEAVING RECURSIVE DEBUGGER")
+ sys.settrace(trace_function)
+ self.lastcmd = p.lastcmd
+
+ def do_pdef(self, arg):
+ """Print the call signature for any callable object.
+
+ The debugger interface to %pdef"""
+ namespaces = [
+ ("Locals", self.curframe_locals),
+ ("Globals", self.curframe.f_globals),
+ ]
+ self.shell.find_line_magic("pdef")(arg, namespaces=namespaces)
+
+ def do_pdoc(self, arg):
+ """Print the docstring for an object.
+
+ The debugger interface to %pdoc."""
+ namespaces = [
+ ("Locals", self.curframe_locals),
+ ("Globals", self.curframe.f_globals),
+ ]
+ self.shell.find_line_magic("pdoc")(arg, namespaces=namespaces)
+
+ def do_pfile(self, arg):
+ """Print (or run through pager) the file where an object is defined.
+
+ The debugger interface to %pfile.
+ """
+ namespaces = [
+ ("Locals", self.curframe_locals),
+ ("Globals", self.curframe.f_globals),
+ ]
+ self.shell.find_line_magic("pfile")(arg, namespaces=namespaces)
+
+ def do_pinfo(self, arg):
+ """Provide detailed information about an object.
+
+ The debugger interface to %pinfo, i.e., obj?."""
+ namespaces = [
+ ("Locals", self.curframe_locals),
+ ("Globals", self.curframe.f_globals),
+ ]
+ self.shell.find_line_magic("pinfo")(arg, namespaces=namespaces)
+
+ def do_pinfo2(self, arg):
+ """Provide extra detailed information about an object.
+
+ The debugger interface to %pinfo2, i.e., obj??."""
+ namespaces = [
+ ("Locals", self.curframe_locals),
+ ("Globals", self.curframe.f_globals),
+ ]
+ self.shell.find_line_magic("pinfo2")(arg, namespaces=namespaces)
+
+ def do_psource(self, arg):
+ """Print (or run through pager) the source code for an object."""
+ namespaces = [
+ ("Locals", self.curframe_locals),
+ ("Globals", self.curframe.f_globals),
+ ]
+ self.shell.find_line_magic("psource")(arg, namespaces=namespaces)
+
+ def do_where(self, arg):
+ """w(here)
+ Print a stack trace, with the most recent frame at the bottom.
+ An arrow indicates the "current frame", which determines the
+ context of most commands. 'bt' is an alias for this command.
+
+ Take a number as argument as an (optional) number of context line to
+ print"""
+ if arg:
+ try:
+ context = int(arg)
+ except ValueError as err:
+ self.error(err)
+ return
+ self.print_stack_trace(context)
+ else:
+ self.print_stack_trace()
+
+ do_w = do_where
+
+ def break_anywhere(self, frame):
+ """
+ _stop_in_decorator_internals is overly restrictive, as we may still want
+ to trace function calls, so we need to also update break_anywhere so
+ that is we don't `stop_here`, because of debugger skip, we may still
+ stop at any point inside the function
+
+ """
+
+ sup = super().break_anywhere(frame)
+ if sup:
+ return sup
+ if self._predicates["debuggerskip"]:
+ if DEBUGGERSKIP in frame.f_code.co_varnames:
+ return True
+ if frame.f_back and self._get_frame_locals(frame.f_back).get(DEBUGGERSKIP):
+ return True
+ return False
+
+ def _is_in_decorator_internal_and_should_skip(self, frame):
+ """
+ Utility to tell us whether we are in a decorator internal and should stop.
+
+ """
+
+ # if we are disabled don't skip
+ if not self._predicates["debuggerskip"]:
+ return False
+
+ # if frame is tagged, skip by default.
+ if DEBUGGERSKIP in frame.f_code.co_varnames:
+ return True
+
+ # if one of the parent frame value set to True skip as well.
+
+ cframe = frame
+ while getattr(cframe, "f_back", None):
+ cframe = cframe.f_back
+ if self._get_frame_locals(cframe).get(DEBUGGERSKIP):
+ return True
+
+ return False
+
+ def stop_here(self, frame):
+ if self._is_in_decorator_internal_and_should_skip(frame) is True:
+ return False
+
+ hidden = False
+ if self.skip_hidden:
+ hidden = self._hidden_predicate(frame)
+ if hidden:
+ if self.report_skipped:
+ Colors = self.color_scheme_table.active_colors
+ ColorsNormal = Colors.Normal
+ print(
+ f"{Colors.excName} [... skipped 1 hidden frame]{ColorsNormal}\n"
+ )
+ return super().stop_here(frame)
+
+ def do_up(self, arg):
+ """u(p) [count]
+ Move the current frame count (default one) levels up in the
+ stack trace (to an older frame).
+
+ Will skip hidden frames.
+ """
+ # modified version of upstream that skips
+ # frames with __tracebackhide__
+ if self.curindex == 0:
+ self.error("Oldest frame")
+ return
+ try:
+ count = int(arg or 1)
+ except ValueError:
+ self.error("Invalid frame count (%s)" % arg)
+ return
+ skipped = 0
+ if count < 0:
+ _newframe = 0
+ else:
+ counter = 0
+ hidden_frames = self.hidden_frames(self.stack)
+ for i in range(self.curindex - 1, -1, -1):
+ if hidden_frames[i] and self.skip_hidden:
+ skipped += 1
+ continue
+ counter += 1
+ if counter >= count:
+ break
+ else:
+ # if no break occurred.
+ self.error(
+ "all frames above hidden, use `skip_hidden False` to get get into those."
+ )
+ return
+
+ Colors = self.color_scheme_table.active_colors
+ ColorsNormal = Colors.Normal
+ _newframe = i
+ self._select_frame(_newframe)
+ if skipped:
+ print(
+ f"{Colors.excName} [... skipped {skipped} hidden frame(s)]{ColorsNormal}\n"
+ )
+
+ def do_down(self, arg):
+ """d(own) [count]
+ Move the current frame count (default one) levels down in the
+ stack trace (to a newer frame).
+
+ Will skip hidden frames.
+ """
+ if self.curindex + 1 == len(self.stack):
+ self.error("Newest frame")
+ return
+ try:
+ count = int(arg or 1)
+ except ValueError:
+ self.error("Invalid frame count (%s)" % arg)
+ return
+ if count < 0:
+ _newframe = len(self.stack) - 1
+ else:
+ counter = 0
+ skipped = 0
+ hidden_frames = self.hidden_frames(self.stack)
+ for i in range(self.curindex + 1, len(self.stack)):
+ if hidden_frames[i] and self.skip_hidden:
+ skipped += 1
+ continue
+ counter += 1
+ if counter >= count:
+ break
+ else:
+ self.error(
+ "all frames below hidden, use `skip_hidden False` to get get into those."
+ )
+ return
+
+ Colors = self.color_scheme_table.active_colors
+ ColorsNormal = Colors.Normal
+ if skipped:
+ print(
+ f"{Colors.excName} [... skipped {skipped} hidden frame(s)]{ColorsNormal}\n"
+ )
+ _newframe = i
+
+ self._select_frame(_newframe)
+
+ do_d = do_down
+ do_u = do_up
+
+ def do_context(self, context):
+ """context number_of_lines
+ Set the number of lines of source code to show when displaying
+ stacktrace information.
+ """
+ try:
+ new_context = int(context)
+ if new_context <= 0:
+ raise ValueError()
+ self.context = new_context
+ except ValueError:
+ self.error("The 'context' command requires a positive integer argument.")
+
+
+class InterruptiblePdb(Pdb):
+ """Version of debugger where KeyboardInterrupt exits the debugger altogether."""
+
+ def cmdloop(self, intro=None):
+ """Wrap cmdloop() such that KeyboardInterrupt stops the debugger."""
+ try:
+ return OldPdb.cmdloop(self, intro=intro)
+ except KeyboardInterrupt:
+ self.stop_here = lambda frame: False
+ self.do_quit("")
+ sys.settrace(None)
+ self.quitting = False
+ raise
+
+ def _cmdloop(self):
+ while True:
+ try:
+ # keyboard interrupts allow for an easy way to cancel
+ # the current command, so allow them during interactive input
+ self.allow_kbdint = True
+ self.cmdloop()
+ self.allow_kbdint = False
+ break
+ except KeyboardInterrupt:
+ self.message('--KeyboardInterrupt--')
+ raise
+
+
+def set_trace(frame=None):
+ """
+ Start debugging from `frame`.
+
+ If frame is not specified, debugging starts from caller's frame.
+ """
+ Pdb().set_trace(frame or sys._getframe().f_back)
diff --git a/contrib/python/ipython/py3/IPython/core/display.py b/contrib/python/ipython/py3/IPython/core/display.py
new file mode 100644
index 0000000000..ffa6e185c4
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/display.py
@@ -0,0 +1,1290 @@
+# -*- coding: utf-8 -*-
+"""Top-level display functions for displaying object in different formats."""
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+
+from binascii import b2a_base64, hexlify
+import html
+import json
+import mimetypes
+import os
+import struct
+import warnings
+from copy import deepcopy
+from os.path import splitext
+from pathlib import Path, PurePath
+
+from IPython.utils.py3compat import cast_unicode
+from IPython.testing.skipdoctest import skip_doctest
+from . import display_functions
+
+
+__all__ = ['display_pretty', 'display_html', 'display_markdown',
+ 'display_svg', 'display_png', 'display_jpeg', 'display_latex', 'display_json',
+ 'display_javascript', 'display_pdf', 'DisplayObject', 'TextDisplayObject',
+ 'Pretty', 'HTML', 'Markdown', 'Math', 'Latex', 'SVG', 'ProgressBar', 'JSON',
+ 'GeoJSON', 'Javascript', 'Image', 'set_matplotlib_formats',
+ 'set_matplotlib_close',
+ 'Video']
+
+_deprecated_names = ["display", "clear_output", "publish_display_data", "update_display", "DisplayHandle"]
+
+__all__ = __all__ + _deprecated_names
+
+
+# ----- warn to import from IPython.display -----
+
+from warnings import warn
+
+
+def __getattr__(name):
+ if name in _deprecated_names:
+ warn(f"Importing {name} from IPython.core.display is deprecated since IPython 7.14, please import from IPython display", DeprecationWarning, stacklevel=2)
+ return getattr(display_functions, name)
+
+ if name in globals().keys():
+ return globals()[name]
+ else:
+ raise AttributeError(f"module {__name__} has no attribute {name}")
+
+
+#-----------------------------------------------------------------------------
+# utility functions
+#-----------------------------------------------------------------------------
+
+def _safe_exists(path):
+ """Check path, but don't let exceptions raise"""
+ try:
+ return os.path.exists(path)
+ except Exception:
+ return False
+
+
+def _display_mimetype(mimetype, objs, raw=False, metadata=None):
+ """internal implementation of all display_foo methods
+
+ Parameters
+ ----------
+ mimetype : str
+ The mimetype to be published (e.g. 'image/png')
+ *objs : object
+ The Python objects to display, or if raw=True raw text data to
+ display.
+ raw : bool
+ Are the data objects raw data or Python objects that need to be
+ formatted before display? [default: False]
+ metadata : dict (optional)
+ Metadata to be associated with the specific mimetype output.
+ """
+ if metadata:
+ metadata = {mimetype: metadata}
+ if raw:
+ # turn list of pngdata into list of { 'image/png': pngdata }
+ objs = [ {mimetype: obj} for obj in objs ]
+ display_functions.display(*objs, raw=raw, metadata=metadata, include=[mimetype])
+
+#-----------------------------------------------------------------------------
+# Main functions
+#-----------------------------------------------------------------------------
+
+
+def display_pretty(*objs, **kwargs):
+ """Display the pretty (default) representation of an object.
+
+ Parameters
+ ----------
+ *objs : object
+ The Python objects to display, or if raw=True raw text data to
+ display.
+ raw : bool
+ Are the data objects raw data or Python objects that need to be
+ formatted before display? [default: False]
+ metadata : dict (optional)
+ Metadata to be associated with the specific mimetype output.
+ """
+ _display_mimetype('text/plain', objs, **kwargs)
+
+
+def display_html(*objs, **kwargs):
+ """Display the HTML representation of an object.
+
+ Note: If raw=False and the object does not have a HTML
+ representation, no HTML will be shown.
+
+ Parameters
+ ----------
+ *objs : object
+ The Python objects to display, or if raw=True raw HTML data to
+ display.
+ raw : bool
+ Are the data objects raw data or Python objects that need to be
+ formatted before display? [default: False]
+ metadata : dict (optional)
+ Metadata to be associated with the specific mimetype output.
+ """
+ _display_mimetype('text/html', objs, **kwargs)
+
+
+def display_markdown(*objs, **kwargs):
+ """Displays the Markdown representation of an object.
+
+ Parameters
+ ----------
+ *objs : object
+ The Python objects to display, or if raw=True raw markdown data to
+ display.
+ raw : bool
+ Are the data objects raw data or Python objects that need to be
+ formatted before display? [default: False]
+ metadata : dict (optional)
+ Metadata to be associated with the specific mimetype output.
+ """
+
+ _display_mimetype('text/markdown', objs, **kwargs)
+
+
+def display_svg(*objs, **kwargs):
+ """Display the SVG representation of an object.
+
+ Parameters
+ ----------
+ *objs : object
+ The Python objects to display, or if raw=True raw svg data to
+ display.
+ raw : bool
+ Are the data objects raw data or Python objects that need to be
+ formatted before display? [default: False]
+ metadata : dict (optional)
+ Metadata to be associated with the specific mimetype output.
+ """
+ _display_mimetype('image/svg+xml', objs, **kwargs)
+
+
+def display_png(*objs, **kwargs):
+ """Display the PNG representation of an object.
+
+ Parameters
+ ----------
+ *objs : object
+ The Python objects to display, or if raw=True raw png data to
+ display.
+ raw : bool
+ Are the data objects raw data or Python objects that need to be
+ formatted before display? [default: False]
+ metadata : dict (optional)
+ Metadata to be associated with the specific mimetype output.
+ """
+ _display_mimetype('image/png', objs, **kwargs)
+
+
+def display_jpeg(*objs, **kwargs):
+ """Display the JPEG representation of an object.
+
+ Parameters
+ ----------
+ *objs : object
+ The Python objects to display, or if raw=True raw JPEG data to
+ display.
+ raw : bool
+ Are the data objects raw data or Python objects that need to be
+ formatted before display? [default: False]
+ metadata : dict (optional)
+ Metadata to be associated with the specific mimetype output.
+ """
+ _display_mimetype('image/jpeg', objs, **kwargs)
+
+
+def display_latex(*objs, **kwargs):
+ """Display the LaTeX representation of an object.
+
+ Parameters
+ ----------
+ *objs : object
+ The Python objects to display, or if raw=True raw latex data to
+ display.
+ raw : bool
+ Are the data objects raw data or Python objects that need to be
+ formatted before display? [default: False]
+ metadata : dict (optional)
+ Metadata to be associated with the specific mimetype output.
+ """
+ _display_mimetype('text/latex', objs, **kwargs)
+
+
+def display_json(*objs, **kwargs):
+ """Display the JSON representation of an object.
+
+ Note that not many frontends support displaying JSON.
+
+ Parameters
+ ----------
+ *objs : object
+ The Python objects to display, or if raw=True raw json data to
+ display.
+ raw : bool
+ Are the data objects raw data or Python objects that need to be
+ formatted before display? [default: False]
+ metadata : dict (optional)
+ Metadata to be associated with the specific mimetype output.
+ """
+ _display_mimetype('application/json', objs, **kwargs)
+
+
+def display_javascript(*objs, **kwargs):
+ """Display the Javascript representation of an object.
+
+ Parameters
+ ----------
+ *objs : object
+ The Python objects to display, or if raw=True raw javascript data to
+ display.
+ raw : bool
+ Are the data objects raw data or Python objects that need to be
+ formatted before display? [default: False]
+ metadata : dict (optional)
+ Metadata to be associated with the specific mimetype output.
+ """
+ _display_mimetype('application/javascript', objs, **kwargs)
+
+
+def display_pdf(*objs, **kwargs):
+ """Display the PDF representation of an object.
+
+ Parameters
+ ----------
+ *objs : object
+ The Python objects to display, or if raw=True raw javascript data to
+ display.
+ raw : bool
+ Are the data objects raw data or Python objects that need to be
+ formatted before display? [default: False]
+ metadata : dict (optional)
+ Metadata to be associated with the specific mimetype output.
+ """
+ _display_mimetype('application/pdf', objs, **kwargs)
+
+
+#-----------------------------------------------------------------------------
+# Smart classes
+#-----------------------------------------------------------------------------
+
+
+class DisplayObject(object):
+ """An object that wraps data to be displayed."""
+
+ _read_flags = 'r'
+ _show_mem_addr = False
+ metadata = None
+
+ def __init__(self, data=None, url=None, filename=None, metadata=None):
+ """Create a display object given raw data.
+
+ When this object is returned by an expression or passed to the
+ display function, it will result in the data being displayed
+ in the frontend. The MIME type of the data should match the
+ subclasses used, so the Png subclass should be used for 'image/png'
+ data. If the data is a URL, the data will first be downloaded
+ and then displayed. If
+
+ Parameters
+ ----------
+ data : unicode, str or bytes
+ The raw data or a URL or file to load the data from
+ url : unicode
+ A URL to download the data from.
+ filename : unicode
+ Path to a local file to load the data from.
+ metadata : dict
+ Dict of metadata associated to be the object when displayed
+ """
+ if isinstance(data, (Path, PurePath)):
+ data = str(data)
+
+ if data is not None and isinstance(data, str):
+ if data.startswith('http') and url is None:
+ url = data
+ filename = None
+ data = None
+ elif _safe_exists(data) and filename is None:
+ url = None
+ filename = data
+ data = None
+
+ self.url = url
+ self.filename = filename
+ # because of @data.setter methods in
+ # subclasses ensure url and filename are set
+ # before assigning to self.data
+ self.data = data
+
+ if metadata is not None:
+ self.metadata = metadata
+ elif self.metadata is None:
+ self.metadata = {}
+
+ self.reload()
+ self._check_data()
+
+ def __repr__(self):
+ if not self._show_mem_addr:
+ cls = self.__class__
+ r = "<%s.%s object>" % (cls.__module__, cls.__name__)
+ else:
+ r = super(DisplayObject, self).__repr__()
+ return r
+
+ def _check_data(self):
+ """Override in subclasses if there's something to check."""
+ pass
+
+ def _data_and_metadata(self):
+ """shortcut for returning metadata with shape information, if defined"""
+ if self.metadata:
+ return self.data, deepcopy(self.metadata)
+ else:
+ return self.data
+
+ def reload(self):
+ """Reload the raw data from file or URL."""
+ if self.filename is not None:
+ encoding = None if "b" in self._read_flags else "utf-8"
+ with open(self.filename, self._read_flags, encoding=encoding) as f:
+ self.data = f.read()
+ elif self.url is not None:
+ # Deferred import
+ from urllib.request import urlopen
+ response = urlopen(self.url)
+ data = response.read()
+ # extract encoding from header, if there is one:
+ encoding = None
+ if 'content-type' in response.headers:
+ for sub in response.headers['content-type'].split(';'):
+ sub = sub.strip()
+ if sub.startswith('charset'):
+ encoding = sub.split('=')[-1].strip()
+ break
+ if 'content-encoding' in response.headers:
+ # TODO: do deflate?
+ if 'gzip' in response.headers['content-encoding']:
+ import gzip
+ from io import BytesIO
+
+ # assume utf-8 if encoding is not specified
+ with gzip.open(
+ BytesIO(data), "rt", encoding=encoding or "utf-8"
+ ) as fp:
+ encoding = None
+ data = fp.read()
+
+ # decode data, if an encoding was specified
+ # We only touch self.data once since
+ # subclasses such as SVG have @data.setter methods
+ # that transform self.data into ... well svg.
+ if encoding:
+ self.data = data.decode(encoding, 'replace')
+ else:
+ self.data = data
+
+
+class TextDisplayObject(DisplayObject):
+ """Create a text display object given raw data.
+
+ Parameters
+ ----------
+ data : str or unicode
+ The raw data or a URL or file to load the data from.
+ url : unicode
+ A URL to download the data from.
+ filename : unicode
+ Path to a local file to load the data from.
+ metadata : dict
+ Dict of metadata associated to be the object when displayed
+ """
+ def _check_data(self):
+ if self.data is not None and not isinstance(self.data, str):
+ raise TypeError("%s expects text, not %r" % (self.__class__.__name__, self.data))
+
+class Pretty(TextDisplayObject):
+
+ def _repr_pretty_(self, pp, cycle):
+ return pp.text(self.data)
+
+
+class HTML(TextDisplayObject):
+
+ def __init__(self, data=None, url=None, filename=None, metadata=None):
+ def warn():
+ if not data:
+ return False
+
+ #
+ # Avoid calling lower() on the entire data, because it could be a
+ # long string and we're only interested in its beginning and end.
+ #
+ prefix = data[:10].lower()
+ suffix = data[-10:].lower()
+ return prefix.startswith("<iframe ") and suffix.endswith("</iframe>")
+
+ if warn():
+ warnings.warn("Consider using IPython.display.IFrame instead")
+ super(HTML, self).__init__(data=data, url=url, filename=filename, metadata=metadata)
+
+ def _repr_html_(self):
+ return self._data_and_metadata()
+
+ def __html__(self):
+ """
+ This method exists to inform other HTML-using modules (e.g. Markupsafe,
+ htmltag, etc) that this object is HTML and does not need things like
+ special characters (<>&) escaped.
+ """
+ return self._repr_html_()
+
+
+class Markdown(TextDisplayObject):
+
+ def _repr_markdown_(self):
+ return self._data_and_metadata()
+
+
+class Math(TextDisplayObject):
+
+ def _repr_latex_(self):
+ s = r"$\displaystyle %s$" % self.data.strip('$')
+ if self.metadata:
+ return s, deepcopy(self.metadata)
+ else:
+ return s
+
+
+class Latex(TextDisplayObject):
+
+ def _repr_latex_(self):
+ return self._data_and_metadata()
+
+
+class SVG(DisplayObject):
+ """Embed an SVG into the display.
+
+ Note if you just want to view a svg image via a URL use `:class:Image` with
+ a url=URL keyword argument.
+ """
+
+ _read_flags = 'rb'
+ # wrap data in a property, which extracts the <svg> tag, discarding
+ # document headers
+ _data = None
+
+ @property
+ def data(self):
+ return self._data
+
+ @data.setter
+ def data(self, svg):
+ if svg is None:
+ self._data = None
+ return
+ # parse into dom object
+ from xml.dom import minidom
+ x = minidom.parseString(svg)
+ # get svg tag (should be 1)
+ found_svg = x.getElementsByTagName('svg')
+ if found_svg:
+ svg = found_svg[0].toxml()
+ else:
+ # fallback on the input, trust the user
+ # but this is probably an error.
+ pass
+ svg = cast_unicode(svg)
+ self._data = svg
+
+ def _repr_svg_(self):
+ return self._data_and_metadata()
+
+class ProgressBar(DisplayObject):
+ """Progressbar supports displaying a progressbar like element
+ """
+ def __init__(self, total):
+ """Creates a new progressbar
+
+ Parameters
+ ----------
+ total : int
+ maximum size of the progressbar
+ """
+ self.total = total
+ self._progress = 0
+ self.html_width = '60ex'
+ self.text_width = 60
+ self._display_id = hexlify(os.urandom(8)).decode('ascii')
+
+ def __repr__(self):
+ fraction = self.progress / self.total
+ filled = '=' * int(fraction * self.text_width)
+ rest = ' ' * (self.text_width - len(filled))
+ return '[{}{}] {}/{}'.format(
+ filled, rest,
+ self.progress, self.total,
+ )
+
+ def _repr_html_(self):
+ return "<progress style='width:{}' max='{}' value='{}'></progress>".format(
+ self.html_width, self.total, self.progress)
+
+ def display(self):
+ display_functions.display(self, display_id=self._display_id)
+
+ def update(self):
+ display_functions.display(self, display_id=self._display_id, update=True)
+
+ @property
+ def progress(self):
+ return self._progress
+
+ @progress.setter
+ def progress(self, value):
+ self._progress = value
+ self.update()
+
+ def __iter__(self):
+ self.display()
+ self._progress = -1 # First iteration is 0
+ return self
+
+ def __next__(self):
+ """Returns current value and increments display by one."""
+ self.progress += 1
+ if self.progress < self.total:
+ return self.progress
+ else:
+ raise StopIteration()
+
+class JSON(DisplayObject):
+ """JSON expects a JSON-able dict or list
+
+ not an already-serialized JSON string.
+
+ Scalar types (None, number, string) are not allowed, only dict or list containers.
+ """
+ # wrap data in a property, which warns about passing already-serialized JSON
+ _data = None
+ def __init__(self, data=None, url=None, filename=None, expanded=False, metadata=None, root='root', **kwargs):
+ """Create a JSON display object given raw data.
+
+ Parameters
+ ----------
+ data : dict or list
+ JSON data to display. Not an already-serialized JSON string.
+ Scalar types (None, number, string) are not allowed, only dict
+ or list containers.
+ url : unicode
+ A URL to download the data from.
+ filename : unicode
+ Path to a local file to load the data from.
+ expanded : boolean
+ Metadata to control whether a JSON display component is expanded.
+ metadata : dict
+ Specify extra metadata to attach to the json display object.
+ root : str
+ The name of the root element of the JSON tree
+ """
+ self.metadata = {
+ 'expanded': expanded,
+ 'root': root,
+ }
+ if metadata:
+ self.metadata.update(metadata)
+ if kwargs:
+ self.metadata.update(kwargs)
+ super(JSON, self).__init__(data=data, url=url, filename=filename)
+
+ def _check_data(self):
+ if self.data is not None and not isinstance(self.data, (dict, list)):
+ raise TypeError("%s expects JSONable dict or list, not %r" % (self.__class__.__name__, self.data))
+
+ @property
+ def data(self):
+ return self._data
+
+ @data.setter
+ def data(self, data):
+ if isinstance(data, (Path, PurePath)):
+ data = str(data)
+
+ if isinstance(data, str):
+ if self.filename is None and self.url is None:
+ warnings.warn("JSON expects JSONable dict or list, not JSON strings")
+ data = json.loads(data)
+ self._data = data
+
+ def _data_and_metadata(self):
+ return self.data, self.metadata
+
+ def _repr_json_(self):
+ return self._data_and_metadata()
+
+
+_css_t = """var link = document.createElement("link");
+ link.rel = "stylesheet";
+ link.type = "text/css";
+ link.href = "%s";
+ document.head.appendChild(link);
+"""
+
+_lib_t1 = """new Promise(function(resolve, reject) {
+ var script = document.createElement("script");
+ script.onload = resolve;
+ script.onerror = reject;
+ script.src = "%s";
+ document.head.appendChild(script);
+}).then(() => {
+"""
+
+_lib_t2 = """
+});"""
+
+class GeoJSON(JSON):
+ """GeoJSON expects JSON-able dict
+
+ not an already-serialized JSON string.
+
+ Scalar types (None, number, string) are not allowed, only dict containers.
+ """
+
+ def __init__(self, *args, **kwargs):
+ """Create a GeoJSON display object given raw data.
+
+ Parameters
+ ----------
+ data : dict or list
+ VegaLite data. Not an already-serialized JSON string.
+ Scalar types (None, number, string) are not allowed, only dict
+ or list containers.
+ url_template : string
+ Leaflet TileLayer URL template: http://leafletjs.com/reference.html#url-template
+ layer_options : dict
+ Leaflet TileLayer options: http://leafletjs.com/reference.html#tilelayer-options
+ url : unicode
+ A URL to download the data from.
+ filename : unicode
+ Path to a local file to load the data from.
+ metadata : dict
+ Specify extra metadata to attach to the json display object.
+
+ Examples
+ --------
+ The following will display an interactive map of Mars with a point of
+ interest on frontend that do support GeoJSON display.
+
+ >>> from IPython.display import GeoJSON
+
+ >>> GeoJSON(data={
+ ... "type": "Feature",
+ ... "geometry": {
+ ... "type": "Point",
+ ... "coordinates": [-81.327, 296.038]
+ ... }
+ ... },
+ ... url_template="http://s3-eu-west-1.amazonaws.com/whereonmars.cartodb.net/{basemap_id}/{z}/{x}/{y}.png",
+ ... layer_options={
+ ... "basemap_id": "celestia_mars-shaded-16k_global",
+ ... "attribution" : "Celestia/praesepe",
+ ... "minZoom" : 0,
+ ... "maxZoom" : 18,
+ ... })
+ <IPython.core.display.GeoJSON object>
+
+ In the terminal IPython, you will only see the text representation of
+ the GeoJSON object.
+
+ """
+
+ super(GeoJSON, self).__init__(*args, **kwargs)
+
+
+ def _ipython_display_(self):
+ bundle = {
+ 'application/geo+json': self.data,
+ 'text/plain': '<IPython.display.GeoJSON object>'
+ }
+ metadata = {
+ 'application/geo+json': self.metadata
+ }
+ display_functions.display(bundle, metadata=metadata, raw=True)
+
+class Javascript(TextDisplayObject):
+
+ def __init__(self, data=None, url=None, filename=None, lib=None, css=None):
+ """Create a Javascript display object given raw data.
+
+ When this object is returned by an expression or passed to the
+ display function, it will result in the data being displayed
+ in the frontend. If the data is a URL, the data will first be
+ downloaded and then displayed.
+
+ In the Notebook, the containing element will be available as `element`,
+ and jQuery will be available. Content appended to `element` will be
+ visible in the output area.
+
+ Parameters
+ ----------
+ data : unicode, str or bytes
+ The Javascript source code or a URL to download it from.
+ url : unicode
+ A URL to download the data from.
+ filename : unicode
+ Path to a local file to load the data from.
+ lib : list or str
+ A sequence of Javascript library URLs to load asynchronously before
+ running the source code. The full URLs of the libraries should
+ be given. A single Javascript library URL can also be given as a
+ string.
+ css : list or str
+ A sequence of css files to load before running the source code.
+ The full URLs of the css files should be given. A single css URL
+ can also be given as a string.
+ """
+ if isinstance(lib, str):
+ lib = [lib]
+ elif lib is None:
+ lib = []
+ if isinstance(css, str):
+ css = [css]
+ elif css is None:
+ css = []
+ if not isinstance(lib, (list,tuple)):
+ raise TypeError('expected sequence, got: %r' % lib)
+ if not isinstance(css, (list,tuple)):
+ raise TypeError('expected sequence, got: %r' % css)
+ self.lib = lib
+ self.css = css
+ super(Javascript, self).__init__(data=data, url=url, filename=filename)
+
+ def _repr_javascript_(self):
+ r = ''
+ for c in self.css:
+ r += _css_t % c
+ for l in self.lib:
+ r += _lib_t1 % l
+ r += self.data
+ r += _lib_t2*len(self.lib)
+ return r
+
+# constants for identifying png/jpeg data
+_PNG = b'\x89PNG\r\n\x1a\n'
+_JPEG = b'\xff\xd8'
+
+def _pngxy(data):
+ """read the (width, height) from a PNG header"""
+ ihdr = data.index(b'IHDR')
+ # next 8 bytes are width/height
+ return struct.unpack('>ii', data[ihdr+4:ihdr+12])
+
+def _jpegxy(data):
+ """read the (width, height) from a JPEG header"""
+ # adapted from http://www.64lines.com/jpeg-width-height
+
+ idx = 4
+ while True:
+ block_size = struct.unpack('>H', data[idx:idx+2])[0]
+ idx = idx + block_size
+ if data[idx:idx+2] == b'\xFF\xC0':
+ # found Start of Frame
+ iSOF = idx
+ break
+ else:
+ # read another block
+ idx += 2
+
+ h, w = struct.unpack('>HH', data[iSOF+5:iSOF+9])
+ return w, h
+
+def _gifxy(data):
+ """read the (width, height) from a GIF header"""
+ return struct.unpack('<HH', data[6:10])
+
+
+class Image(DisplayObject):
+
+ _read_flags = 'rb'
+ _FMT_JPEG = u'jpeg'
+ _FMT_PNG = u'png'
+ _FMT_GIF = u'gif'
+ _ACCEPTABLE_EMBEDDINGS = [_FMT_JPEG, _FMT_PNG, _FMT_GIF]
+ _MIMETYPES = {
+ _FMT_PNG: 'image/png',
+ _FMT_JPEG: 'image/jpeg',
+ _FMT_GIF: 'image/gif',
+ }
+
+ def __init__(
+ self,
+ data=None,
+ url=None,
+ filename=None,
+ format=None,
+ embed=None,
+ width=None,
+ height=None,
+ retina=False,
+ unconfined=False,
+ metadata=None,
+ alt=None,
+ ):
+ """Create a PNG/JPEG/GIF image object given raw data.
+
+ When this object is returned by an input cell or passed to the
+ display function, it will result in the image being displayed
+ in the frontend.
+
+ Parameters
+ ----------
+ data : unicode, str or bytes
+ The raw image data or a URL or filename to load the data from.
+ This always results in embedded image data.
+
+ url : unicode
+ A URL to download the data from. If you specify `url=`,
+ the image data will not be embedded unless you also specify `embed=True`.
+
+ filename : unicode
+ Path to a local file to load the data from.
+ Images from a file are always embedded.
+
+ format : unicode
+ The format of the image data (png/jpeg/jpg/gif). If a filename or URL is given
+ for format will be inferred from the filename extension.
+
+ embed : bool
+ Should the image data be embedded using a data URI (True) or be
+ loaded using an <img> tag. Set this to True if you want the image
+ to be viewable later with no internet connection in the notebook.
+
+ Default is `True`, unless the keyword argument `url` is set, then
+ default value is `False`.
+
+ Note that QtConsole is not able to display images if `embed` is set to `False`
+
+ width : int
+ Width in pixels to which to constrain the image in html
+
+ height : int
+ Height in pixels to which to constrain the image in html
+
+ retina : bool
+ Automatically set the width and height to half of the measured
+ width and height.
+ This only works for embedded images because it reads the width/height
+ from image data.
+ For non-embedded images, you can just set the desired display width
+ and height directly.
+
+ unconfined : bool
+ Set unconfined=True to disable max-width confinement of the image.
+
+ metadata : dict
+ Specify extra metadata to attach to the image.
+
+ alt : unicode
+ Alternative text for the image, for use by screen readers.
+
+ Examples
+ --------
+ embedded image data, works in qtconsole and notebook
+ when passed positionally, the first arg can be any of raw image data,
+ a URL, or a filename from which to load image data.
+ The result is always embedding image data for inline images.
+
+ >>> Image('https://www.google.fr/images/srpr/logo3w.png') # doctest: +SKIP
+ <IPython.core.display.Image object>
+
+ >>> Image('/path/to/image.jpg')
+ <IPython.core.display.Image object>
+
+ >>> Image(b'RAW_PNG_DATA...')
+ <IPython.core.display.Image object>
+
+ Specifying Image(url=...) does not embed the image data,
+ it only generates ``<img>`` tag with a link to the source.
+ This will not work in the qtconsole or offline.
+
+ >>> Image(url='https://www.google.fr/images/srpr/logo3w.png')
+ <IPython.core.display.Image object>
+
+ """
+ if isinstance(data, (Path, PurePath)):
+ data = str(data)
+
+ if filename is not None:
+ ext = self._find_ext(filename)
+ elif url is not None:
+ ext = self._find_ext(url)
+ elif data is None:
+ raise ValueError("No image data found. Expecting filename, url, or data.")
+ elif isinstance(data, str) and (
+ data.startswith('http') or _safe_exists(data)
+ ):
+ ext = self._find_ext(data)
+ else:
+ ext = None
+
+ if format is None:
+ if ext is not None:
+ if ext == u'jpg' or ext == u'jpeg':
+ format = self._FMT_JPEG
+ elif ext == u'png':
+ format = self._FMT_PNG
+ elif ext == u'gif':
+ format = self._FMT_GIF
+ else:
+ format = ext.lower()
+ elif isinstance(data, bytes):
+ # infer image type from image data header,
+ # only if format has not been specified.
+ if data[:2] == _JPEG:
+ format = self._FMT_JPEG
+
+ # failed to detect format, default png
+ if format is None:
+ format = self._FMT_PNG
+
+ if format.lower() == 'jpg':
+ # jpg->jpeg
+ format = self._FMT_JPEG
+
+ self.format = format.lower()
+ self.embed = embed if embed is not None else (url is None)
+
+ if self.embed and self.format not in self._ACCEPTABLE_EMBEDDINGS:
+ raise ValueError("Cannot embed the '%s' image format" % (self.format))
+ if self.embed:
+ self._mimetype = self._MIMETYPES.get(self.format)
+
+ self.width = width
+ self.height = height
+ self.retina = retina
+ self.unconfined = unconfined
+ self.alt = alt
+ super(Image, self).__init__(data=data, url=url, filename=filename,
+ metadata=metadata)
+
+ if self.width is None and self.metadata.get('width', {}):
+ self.width = metadata['width']
+
+ if self.height is None and self.metadata.get('height', {}):
+ self.height = metadata['height']
+
+ if self.alt is None and self.metadata.get("alt", {}):
+ self.alt = metadata["alt"]
+
+ if retina:
+ self._retina_shape()
+
+
+ def _retina_shape(self):
+ """load pixel-doubled width and height from image data"""
+ if not self.embed:
+ return
+ if self.format == self._FMT_PNG:
+ w, h = _pngxy(self.data)
+ elif self.format == self._FMT_JPEG:
+ w, h = _jpegxy(self.data)
+ elif self.format == self._FMT_GIF:
+ w, h = _gifxy(self.data)
+ else:
+ # retina only supports png
+ return
+ self.width = w // 2
+ self.height = h // 2
+
+ def reload(self):
+ """Reload the raw data from file or URL."""
+ if self.embed:
+ super(Image,self).reload()
+ if self.retina:
+ self._retina_shape()
+
+ def _repr_html_(self):
+ if not self.embed:
+ width = height = klass = alt = ""
+ if self.width:
+ width = ' width="%d"' % self.width
+ if self.height:
+ height = ' height="%d"' % self.height
+ if self.unconfined:
+ klass = ' class="unconfined"'
+ if self.alt:
+ alt = ' alt="%s"' % html.escape(self.alt)
+ return '<img src="{url}"{width}{height}{klass}{alt}/>'.format(
+ url=self.url,
+ width=width,
+ height=height,
+ klass=klass,
+ alt=alt,
+ )
+
+ def _repr_mimebundle_(self, include=None, exclude=None):
+ """Return the image as a mimebundle
+
+ Any new mimetype support should be implemented here.
+ """
+ if self.embed:
+ mimetype = self._mimetype
+ data, metadata = self._data_and_metadata(always_both=True)
+ if metadata:
+ metadata = {mimetype: metadata}
+ return {mimetype: data}, metadata
+ else:
+ return {'text/html': self._repr_html_()}
+
+ def _data_and_metadata(self, always_both=False):
+ """shortcut for returning metadata with shape information, if defined"""
+ try:
+ b64_data = b2a_base64(self.data, newline=False).decode("ascii")
+ except TypeError as e:
+ raise FileNotFoundError(
+ "No such file or directory: '%s'" % (self.data)) from e
+ md = {}
+ if self.metadata:
+ md.update(self.metadata)
+ if self.width:
+ md['width'] = self.width
+ if self.height:
+ md['height'] = self.height
+ if self.unconfined:
+ md['unconfined'] = self.unconfined
+ if self.alt:
+ md["alt"] = self.alt
+ if md or always_both:
+ return b64_data, md
+ else:
+ return b64_data
+
+ def _repr_png_(self):
+ if self.embed and self.format == self._FMT_PNG:
+ return self._data_and_metadata()
+
+ def _repr_jpeg_(self):
+ if self.embed and self.format == self._FMT_JPEG:
+ return self._data_and_metadata()
+
+ def _find_ext(self, s):
+ base, ext = splitext(s)
+
+ if not ext:
+ return base
+
+ # `splitext` includes leading period, so we skip it
+ return ext[1:].lower()
+
+
+class Video(DisplayObject):
+
+ def __init__(self, data=None, url=None, filename=None, embed=False,
+ mimetype=None, width=None, height=None, html_attributes="controls"):
+ """Create a video object given raw data or an URL.
+
+ When this object is returned by an input cell or passed to the
+ display function, it will result in the video being displayed
+ in the frontend.
+
+ Parameters
+ ----------
+ data : unicode, str or bytes
+ The raw video data or a URL or filename to load the data from.
+ Raw data will require passing ``embed=True``.
+
+ url : unicode
+ A URL for the video. If you specify ``url=``,
+ the image data will not be embedded.
+
+ filename : unicode
+ Path to a local file containing the video.
+ Will be interpreted as a local URL unless ``embed=True``.
+
+ embed : bool
+ Should the video be embedded using a data URI (True) or be
+ loaded using a <video> tag (False).
+
+ Since videos are large, embedding them should be avoided, if possible.
+ You must confirm embedding as your intention by passing ``embed=True``.
+
+ Local files can be displayed with URLs without embedding the content, via::
+
+ Video('./video.mp4')
+
+ mimetype : unicode
+ Specify the mimetype for embedded videos.
+ Default will be guessed from file extension, if available.
+
+ width : int
+ Width in pixels to which to constrain the video in HTML.
+ If not supplied, defaults to the width of the video.
+
+ height : int
+ Height in pixels to which to constrain the video in html.
+ If not supplied, defaults to the height of the video.
+
+ html_attributes : str
+ Attributes for the HTML ``<video>`` block.
+ Default: ``"controls"`` to get video controls.
+ Other examples: ``"controls muted"`` for muted video with controls,
+ ``"loop autoplay"`` for looping autoplaying video without controls.
+
+ Examples
+ --------
+ ::
+
+ Video('https://archive.org/download/Sita_Sings_the_Blues/Sita_Sings_the_Blues_small.mp4')
+ Video('path/to/video.mp4')
+ Video('path/to/video.mp4', embed=True)
+ Video('path/to/video.mp4', embed=True, html_attributes="controls muted autoplay")
+ Video(b'raw-videodata', embed=True)
+ """
+ if isinstance(data, (Path, PurePath)):
+ data = str(data)
+
+ if url is None and isinstance(data, str) and data.startswith(('http:', 'https:')):
+ url = data
+ data = None
+ elif data is not None and os.path.exists(data):
+ filename = data
+ data = None
+
+ if data and not embed:
+ msg = ''.join([
+ "To embed videos, you must pass embed=True ",
+ "(this may make your notebook files huge)\n",
+ "Consider passing Video(url='...')",
+ ])
+ raise ValueError(msg)
+
+ self.mimetype = mimetype
+ self.embed = embed
+ self.width = width
+ self.height = height
+ self.html_attributes = html_attributes
+ super(Video, self).__init__(data=data, url=url, filename=filename)
+
+ def _repr_html_(self):
+ width = height = ''
+ if self.width:
+ width = ' width="%d"' % self.width
+ if self.height:
+ height = ' height="%d"' % self.height
+
+ # External URLs and potentially local files are not embedded into the
+ # notebook output.
+ if not self.embed:
+ url = self.url if self.url is not None else self.filename
+ output = """<video src="{0}" {1} {2} {3}>
+ Your browser does not support the <code>video</code> element.
+ </video>""".format(url, self.html_attributes, width, height)
+ return output
+
+ # Embedded videos are base64-encoded.
+ mimetype = self.mimetype
+ if self.filename is not None:
+ if not mimetype:
+ mimetype, _ = mimetypes.guess_type(self.filename)
+
+ with open(self.filename, 'rb') as f:
+ video = f.read()
+ else:
+ video = self.data
+ if isinstance(video, str):
+ # unicode input is already b64-encoded
+ b64_video = video
+ else:
+ b64_video = b2a_base64(video, newline=False).decode("ascii").rstrip()
+
+ output = """<video {0} {1} {2}>
+ <source src="data:{3};base64,{4}" type="{3}">
+ Your browser does not support the video tag.
+ </video>""".format(self.html_attributes, width, height, mimetype, b64_video)
+ return output
+
+ def reload(self):
+ # TODO
+ pass
+
+
+@skip_doctest
+def set_matplotlib_formats(*formats, **kwargs):
+ """
+ .. deprecated:: 7.23
+
+ use `matplotlib_inline.backend_inline.set_matplotlib_formats()`
+
+ Select figure formats for the inline backend. Optionally pass quality for JPEG.
+
+ For example, this enables PNG and JPEG output with a JPEG quality of 90%::
+
+ In [1]: set_matplotlib_formats('png', 'jpeg', quality=90)
+
+ To set this in your config files use the following::
+
+ c.InlineBackend.figure_formats = {'png', 'jpeg'}
+ c.InlineBackend.print_figure_kwargs.update({'quality' : 90})
+
+ Parameters
+ ----------
+ *formats : strs
+ One or more figure formats to enable: 'png', 'retina', 'jpeg', 'svg', 'pdf'.
+ **kwargs
+ Keyword args will be relayed to ``figure.canvas.print_figure``.
+ """
+ warnings.warn(
+ "`set_matplotlib_formats` is deprecated since IPython 7.23, directly "
+ "use `matplotlib_inline.backend_inline.set_matplotlib_formats()`",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+
+ from matplotlib_inline.backend_inline import (
+ set_matplotlib_formats as set_matplotlib_formats_orig,
+ )
+
+ set_matplotlib_formats_orig(*formats, **kwargs)
+
+@skip_doctest
+def set_matplotlib_close(close=True):
+ """
+ .. deprecated:: 7.23
+
+ use `matplotlib_inline.backend_inline.set_matplotlib_close()`
+
+ Set whether the inline backend closes all figures automatically or not.
+
+ By default, the inline backend used in the IPython Notebook will close all
+ matplotlib figures automatically after each cell is run. This means that
+ plots in different cells won't interfere. Sometimes, you may want to make
+ a plot in one cell and then refine it in later cells. This can be accomplished
+ by::
+
+ In [1]: set_matplotlib_close(False)
+
+ To set this in your config files use the following::
+
+ c.InlineBackend.close_figures = False
+
+ Parameters
+ ----------
+ close : bool
+ Should all matplotlib figures be automatically closed after each cell is
+ run?
+ """
+ warnings.warn(
+ "`set_matplotlib_close` is deprecated since IPython 7.23, directly "
+ "use `matplotlib_inline.backend_inline.set_matplotlib_close()`",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+
+ from matplotlib_inline.backend_inline import (
+ set_matplotlib_close as set_matplotlib_close_orig,
+ )
+
+ set_matplotlib_close_orig(close)
diff --git a/contrib/python/ipython/py3/IPython/core/display_functions.py b/contrib/python/ipython/py3/IPython/core/display_functions.py
new file mode 100644
index 0000000000..567cf3fa60
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/display_functions.py
@@ -0,0 +1,391 @@
+# -*- coding: utf-8 -*-
+"""Top-level display functions for displaying object in different formats."""
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+
+from binascii import b2a_hex
+import os
+import sys
+import warnings
+
+__all__ = ['display', 'clear_output', 'publish_display_data', 'update_display', 'DisplayHandle']
+
+#-----------------------------------------------------------------------------
+# utility functions
+#-----------------------------------------------------------------------------
+
+
+def _merge(d1, d2):
+ """Like update, but merges sub-dicts instead of clobbering at the top level.
+
+ Updates d1 in-place
+ """
+
+ if not isinstance(d2, dict) or not isinstance(d1, dict):
+ return d2
+ for key, value in d2.items():
+ d1[key] = _merge(d1.get(key), value)
+ return d1
+
+
+#-----------------------------------------------------------------------------
+# Main functions
+#-----------------------------------------------------------------------------
+
+class _Sentinel:
+ def __repr__(self):
+ return "<deprecated>"
+
+
+_sentinel = _Sentinel()
+
+# use * to indicate transient is keyword-only
+def publish_display_data(
+ data, metadata=None, source=_sentinel, *, transient=None, **kwargs
+):
+ """Publish data and metadata to all frontends.
+
+ See the ``display_data`` message in the messaging documentation for
+ more details about this message type.
+
+ Keys of data and metadata can be any mime-type.
+
+ Parameters
+ ----------
+ data : dict
+ A dictionary having keys that are valid MIME types (like
+ 'text/plain' or 'image/svg+xml') and values that are the data for
+ that MIME type. The data itself must be a JSON'able data
+ structure. Minimally all data should have the 'text/plain' data,
+ which can be displayed by all frontends. If more than the plain
+ text is given, it is up to the frontend to decide which
+ representation to use.
+ metadata : dict
+ A dictionary for metadata related to the data. This can contain
+ arbitrary key, value pairs that frontends can use to interpret
+ the data. mime-type keys matching those in data can be used
+ to specify metadata about particular representations.
+ source : str, deprecated
+ Unused.
+ transient : dict, keyword-only
+ A dictionary of transient data, such as display_id.
+ """
+ from IPython.core.interactiveshell import InteractiveShell
+
+ if source is not _sentinel:
+ warnings.warn(
+ "The `source` parameter emit a deprecation warning since"
+ " IPython 8.0, it had no effects for a long time and will "
+ " be removed in future versions.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ display_pub = InteractiveShell.instance().display_pub
+
+ # only pass transient if supplied,
+ # to avoid errors with older ipykernel.
+ # TODO: We could check for ipykernel version and provide a detailed upgrade message.
+ if transient:
+ kwargs['transient'] = transient
+
+ display_pub.publish(
+ data=data,
+ metadata=metadata,
+ **kwargs
+ )
+
+
+def _new_id():
+ """Generate a new random text id with urandom"""
+ return b2a_hex(os.urandom(16)).decode('ascii')
+
+
+def display(
+ *objs,
+ include=None,
+ exclude=None,
+ metadata=None,
+ transient=None,
+ display_id=None,
+ raw=False,
+ clear=False,
+ **kwargs
+):
+ """Display a Python object in all frontends.
+
+ By default all representations will be computed and sent to the frontends.
+ Frontends can decide which representation is used and how.
+
+ In terminal IPython this will be similar to using :func:`print`, for use in richer
+ frontends see Jupyter notebook examples with rich display logic.
+
+ Parameters
+ ----------
+ *objs : object
+ The Python objects to display.
+ raw : bool, optional
+ Are the objects to be displayed already mimetype-keyed dicts of raw display data,
+ or Python objects that need to be formatted before display? [default: False]
+ include : list, tuple or set, optional
+ A list of format type strings (MIME types) to include in the
+ format data dict. If this is set *only* the format types included
+ in this list will be computed.
+ exclude : list, tuple or set, optional
+ A list of format type strings (MIME types) to exclude in the format
+ data dict. If this is set all format types will be computed,
+ except for those included in this argument.
+ metadata : dict, optional
+ A dictionary of metadata to associate with the output.
+ mime-type keys in this dictionary will be associated with the individual
+ representation formats, if they exist.
+ transient : dict, optional
+ A dictionary of transient data to associate with the output.
+ Data in this dict should not be persisted to files (e.g. notebooks).
+ display_id : str, bool optional
+ Set an id for the display.
+ This id can be used for updating this display area later via update_display.
+ If given as `True`, generate a new `display_id`
+ clear : bool, optional
+ Should the output area be cleared before displaying anything? If True,
+ this will wait for additional output before clearing. [default: False]
+ **kwargs : additional keyword-args, optional
+ Additional keyword-arguments are passed through to the display publisher.
+
+ Returns
+ -------
+ handle: DisplayHandle
+ Returns a handle on updatable displays for use with :func:`update_display`,
+ if `display_id` is given. Returns :any:`None` if no `display_id` is given
+ (default).
+
+ Examples
+ --------
+ >>> class Json(object):
+ ... def __init__(self, json):
+ ... self.json = json
+ ... def _repr_pretty_(self, pp, cycle):
+ ... import json
+ ... pp.text(json.dumps(self.json, indent=2))
+ ... def __repr__(self):
+ ... return str(self.json)
+ ...
+
+ >>> d = Json({1:2, 3: {4:5}})
+
+ >>> print(d)
+ {1: 2, 3: {4: 5}}
+
+ >>> display(d)
+ {
+ "1": 2,
+ "3": {
+ "4": 5
+ }
+ }
+
+ >>> def int_formatter(integer, pp, cycle):
+ ... pp.text('I'*integer)
+
+ >>> plain = get_ipython().display_formatter.formatters['text/plain']
+ >>> plain.for_type(int, int_formatter)
+ <function _repr_pprint at 0x...>
+ >>> display(7-5)
+ II
+
+ >>> del plain.type_printers[int]
+ >>> display(7-5)
+ 2
+
+ See Also
+ --------
+ :func:`update_display`
+
+ Notes
+ -----
+ In Python, objects can declare their textual representation using the
+ `__repr__` method. IPython expands on this idea and allows objects to declare
+ other, rich representations including:
+
+ - HTML
+ - JSON
+ - PNG
+ - JPEG
+ - SVG
+ - LaTeX
+
+ A single object can declare some or all of these representations; all are
+ handled by IPython's display system.
+
+ The main idea of the first approach is that you have to implement special
+ display methods when you define your class, one for each representation you
+ want to use. Here is a list of the names of the special methods and the
+ values they must return:
+
+ - `_repr_html_`: return raw HTML as a string, or a tuple (see below).
+ - `_repr_json_`: return a JSONable dict, or a tuple (see below).
+ - `_repr_jpeg_`: return raw JPEG data, or a tuple (see below).
+ - `_repr_png_`: return raw PNG data, or a tuple (see below).
+ - `_repr_svg_`: return raw SVG data as a string, or a tuple (see below).
+ - `_repr_latex_`: return LaTeX commands in a string surrounded by "$",
+ or a tuple (see below).
+ - `_repr_mimebundle_`: return a full mimebundle containing the mapping
+ from all mimetypes to data.
+ Use this for any mime-type not listed above.
+
+ The above functions may also return the object's metadata alonside the
+ data. If the metadata is available, the functions will return a tuple
+ containing the data and metadata, in that order. If there is no metadata
+ available, then the functions will return the data only.
+
+ When you are directly writing your own classes, you can adapt them for
+ display in IPython by following the above approach. But in practice, you
+ often need to work with existing classes that you can't easily modify.
+
+ You can refer to the documentation on integrating with the display system in
+ order to register custom formatters for already existing types
+ (:ref:`integrating_rich_display`).
+
+ .. versionadded:: 5.4 display available without import
+ .. versionadded:: 6.1 display available without import
+
+ Since IPython 5.4 and 6.1 :func:`display` is automatically made available to
+ the user without import. If you are using display in a document that might
+ be used in a pure python context or with older version of IPython, use the
+ following import at the top of your file::
+
+ from IPython.display import display
+
+ """
+ from IPython.core.interactiveshell import InteractiveShell
+
+ if not InteractiveShell.initialized():
+ # Directly print objects.
+ print(*objs)
+ return
+
+ if transient is None:
+ transient = {}
+ if metadata is None:
+ metadata={}
+ if display_id:
+ if display_id is True:
+ display_id = _new_id()
+ transient['display_id'] = display_id
+ if kwargs.get('update') and 'display_id' not in transient:
+ raise TypeError('display_id required for update_display')
+ if transient:
+ kwargs['transient'] = transient
+
+ if not objs and display_id:
+ # if given no objects, but still a request for a display_id,
+ # we assume the user wants to insert an empty output that
+ # can be updated later
+ objs = [{}]
+ raw = True
+
+ if not raw:
+ format = InteractiveShell.instance().display_formatter.format
+
+ if clear:
+ clear_output(wait=True)
+
+ for obj in objs:
+ if raw:
+ publish_display_data(data=obj, metadata=metadata, **kwargs)
+ else:
+ format_dict, md_dict = format(obj, include=include, exclude=exclude)
+ if not format_dict:
+ # nothing to display (e.g. _ipython_display_ took over)
+ continue
+ if metadata:
+ # kwarg-specified metadata gets precedence
+ _merge(md_dict, metadata)
+ publish_display_data(data=format_dict, metadata=md_dict, **kwargs)
+ if display_id:
+ return DisplayHandle(display_id)
+
+
+# use * for keyword-only display_id arg
+def update_display(obj, *, display_id, **kwargs):
+ """Update an existing display by id
+
+ Parameters
+ ----------
+ obj
+ The object with which to update the display
+ display_id : keyword-only
+ The id of the display to update
+
+ See Also
+ --------
+ :func:`display`
+ """
+ kwargs['update'] = True
+ display(obj, display_id=display_id, **kwargs)
+
+
+class DisplayHandle(object):
+ """A handle on an updatable display
+
+ Call `.update(obj)` to display a new object.
+
+ Call `.display(obj`) to add a new instance of this display,
+ and update existing instances.
+
+ See Also
+ --------
+
+ :func:`display`, :func:`update_display`
+
+ """
+
+ def __init__(self, display_id=None):
+ if display_id is None:
+ display_id = _new_id()
+ self.display_id = display_id
+
+ def __repr__(self):
+ return "<%s display_id=%s>" % (self.__class__.__name__, self.display_id)
+
+ def display(self, obj, **kwargs):
+ """Make a new display with my id, updating existing instances.
+
+ Parameters
+ ----------
+ obj
+ object to display
+ **kwargs
+ additional keyword arguments passed to display
+ """
+ display(obj, display_id=self.display_id, **kwargs)
+
+ def update(self, obj, **kwargs):
+ """Update existing displays with my id
+
+ Parameters
+ ----------
+ obj
+ object to display
+ **kwargs
+ additional keyword arguments passed to update_display
+ """
+ update_display(obj, display_id=self.display_id, **kwargs)
+
+
+def clear_output(wait=False):
+ """Clear the output of the current cell receiving output.
+
+ Parameters
+ ----------
+ wait : bool [default: false]
+ Wait to clear the output until new output is available to replace it."""
+ from IPython.core.interactiveshell import InteractiveShell
+ if InteractiveShell.initialized():
+ InteractiveShell.instance().display_pub.clear_output(wait)
+ else:
+ print('\033[2K\r', end='')
+ sys.stdout.flush()
+ print('\033[2K\r', end='')
+ sys.stderr.flush()
diff --git a/contrib/python/ipython/py3/IPython/core/display_trap.py b/contrib/python/ipython/py3/IPython/core/display_trap.py
new file mode 100644
index 0000000000..9931dfe2df
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/display_trap.py
@@ -0,0 +1,70 @@
+# encoding: utf-8
+"""
+A context manager for handling sys.displayhook.
+
+Authors:
+
+* Robert Kern
+* Brian Granger
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (C) 2008-2011 The IPython Development Team
+#
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+import sys
+
+from traitlets.config.configurable import Configurable
+from traitlets import Any
+
+#-----------------------------------------------------------------------------
+# Classes and functions
+#-----------------------------------------------------------------------------
+
+
+class DisplayTrap(Configurable):
+ """Object to manage sys.displayhook.
+
+ This came from IPython.core.kernel.display_hook, but is simplified
+ (no callbacks or formatters) until more of the core is refactored.
+ """
+
+ hook = Any()
+
+ def __init__(self, hook=None):
+ super(DisplayTrap, self).__init__(hook=hook, config=None)
+ self.old_hook = None
+ # We define this to track if a single BuiltinTrap is nested.
+ # Only turn off the trap when the outermost call to __exit__ is made.
+ self._nested_level = 0
+
+ def __enter__(self):
+ if self._nested_level == 0:
+ self.set()
+ self._nested_level += 1
+ return self
+
+ def __exit__(self, type, value, traceback):
+ if self._nested_level == 1:
+ self.unset()
+ self._nested_level -= 1
+ # Returning False will cause exceptions to propagate
+ return False
+
+ def set(self):
+ """Set the hook."""
+ if sys.displayhook is not self.hook:
+ self.old_hook = sys.displayhook
+ sys.displayhook = self.hook
+
+ def unset(self):
+ """Unset the hook."""
+ sys.displayhook = self.old_hook
+
diff --git a/contrib/python/ipython/py3/IPython/core/displayhook.py b/contrib/python/ipython/py3/IPython/core/displayhook.py
new file mode 100644
index 0000000000..aba4f904d8
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/displayhook.py
@@ -0,0 +1,331 @@
+# -*- coding: utf-8 -*-
+"""Displayhook for IPython.
+
+This defines a callable class that IPython uses for `sys.displayhook`.
+"""
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+import builtins as builtin_mod
+import sys
+import io as _io
+import tokenize
+
+from traitlets.config.configurable import Configurable
+from traitlets import Instance, Float
+from warnings import warn
+
+# TODO: Move the various attributes (cache_size, [others now moved]). Some
+# of these are also attributes of InteractiveShell. They should be on ONE object
+# only and the other objects should ask that one object for their values.
+
+class DisplayHook(Configurable):
+ """The custom IPython displayhook to replace sys.displayhook.
+
+ This class does many things, but the basic idea is that it is a callable
+ that gets called anytime user code returns a value.
+ """
+
+ shell = Instance('IPython.core.interactiveshell.InteractiveShellABC',
+ allow_none=True)
+ exec_result = Instance('IPython.core.interactiveshell.ExecutionResult',
+ allow_none=True)
+ cull_fraction = Float(0.2)
+
+ def __init__(self, shell=None, cache_size=1000, **kwargs):
+ super(DisplayHook, self).__init__(shell=shell, **kwargs)
+ cache_size_min = 3
+ if cache_size <= 0:
+ self.do_full_cache = 0
+ cache_size = 0
+ elif cache_size < cache_size_min:
+ self.do_full_cache = 0
+ cache_size = 0
+ warn('caching was disabled (min value for cache size is %s).' %
+ cache_size_min,stacklevel=3)
+ else:
+ self.do_full_cache = 1
+
+ self.cache_size = cache_size
+
+ # we need a reference to the user-level namespace
+ self.shell = shell
+
+ self._,self.__,self.___ = '','',''
+
+ # these are deliberately global:
+ to_user_ns = {'_':self._,'__':self.__,'___':self.___}
+ self.shell.user_ns.update(to_user_ns)
+
+ @property
+ def prompt_count(self):
+ return self.shell.execution_count
+
+ #-------------------------------------------------------------------------
+ # Methods used in __call__. Override these methods to modify the behavior
+ # of the displayhook.
+ #-------------------------------------------------------------------------
+
+ def check_for_underscore(self):
+ """Check if the user has set the '_' variable by hand."""
+ # If something injected a '_' variable in __builtin__, delete
+ # ipython's automatic one so we don't clobber that. gettext() in
+ # particular uses _, so we need to stay away from it.
+ if '_' in builtin_mod.__dict__:
+ try:
+ user_value = self.shell.user_ns['_']
+ if user_value is not self._:
+ return
+ del self.shell.user_ns['_']
+ except KeyError:
+ pass
+
+ def quiet(self):
+ """Should we silence the display hook because of ';'?"""
+ # do not print output if input ends in ';'
+
+ try:
+ cell = self.shell.history_manager.input_hist_parsed[-1]
+ except IndexError:
+ # some uses of ipshellembed may fail here
+ return False
+
+ return self.semicolon_at_end_of_expression(cell)
+
+ @staticmethod
+ def semicolon_at_end_of_expression(expression):
+ """Parse Python expression and detects whether last token is ';'"""
+
+ sio = _io.StringIO(expression)
+ tokens = list(tokenize.generate_tokens(sio.readline))
+
+ for token in reversed(tokens):
+ if token[0] in (tokenize.ENDMARKER, tokenize.NL, tokenize.NEWLINE, tokenize.COMMENT):
+ continue
+ if (token[0] == tokenize.OP) and (token[1] == ';'):
+ return True
+ else:
+ return False
+
+ def start_displayhook(self):
+ """Start the displayhook, initializing resources."""
+ pass
+
+ def write_output_prompt(self):
+ """Write the output prompt.
+
+ The default implementation simply writes the prompt to
+ ``sys.stdout``.
+ """
+ # Use write, not print which adds an extra space.
+ sys.stdout.write(self.shell.separate_out)
+ outprompt = 'Out[{}]: '.format(self.shell.execution_count)
+ if self.do_full_cache:
+ sys.stdout.write(outprompt)
+
+ def compute_format_data(self, result):
+ """Compute format data of the object to be displayed.
+
+ The format data is a generalization of the :func:`repr` of an object.
+ In the default implementation the format data is a :class:`dict` of
+ key value pair where the keys are valid MIME types and the values
+ are JSON'able data structure containing the raw data for that MIME
+ type. It is up to frontends to determine pick a MIME to to use and
+ display that data in an appropriate manner.
+
+ This method only computes the format data for the object and should
+ NOT actually print or write that to a stream.
+
+ Parameters
+ ----------
+ result : object
+ The Python object passed to the display hook, whose format will be
+ computed.
+
+ Returns
+ -------
+ (format_dict, md_dict) : dict
+ format_dict is a :class:`dict` whose keys are valid MIME types and values are
+ JSON'able raw data for that MIME type. It is recommended that
+ all return values of this should always include the "text/plain"
+ MIME type representation of the object.
+ md_dict is a :class:`dict` with the same MIME type keys
+ of metadata associated with each output.
+
+ """
+ return self.shell.display_formatter.format(result)
+
+ # This can be set to True by the write_output_prompt method in a subclass
+ prompt_end_newline = False
+
+ def write_format_data(self, format_dict, md_dict=None) -> None:
+ """Write the format data dict to the frontend.
+
+ This default version of this method simply writes the plain text
+ representation of the object to ``sys.stdout``. Subclasses should
+ override this method to send the entire `format_dict` to the
+ frontends.
+
+ Parameters
+ ----------
+ format_dict : dict
+ The format dict for the object passed to `sys.displayhook`.
+ md_dict : dict (optional)
+ The metadata dict to be associated with the display data.
+ """
+ if 'text/plain' not in format_dict:
+ # nothing to do
+ return
+ # We want to print because we want to always make sure we have a
+ # newline, even if all the prompt separators are ''. This is the
+ # standard IPython behavior.
+ result_repr = format_dict['text/plain']
+ if '\n' in result_repr:
+ # So that multi-line strings line up with the left column of
+ # the screen, instead of having the output prompt mess up
+ # their first line.
+ # We use the prompt template instead of the expanded prompt
+ # because the expansion may add ANSI escapes that will interfere
+ # with our ability to determine whether or not we should add
+ # a newline.
+ if not self.prompt_end_newline:
+ # But avoid extraneous empty lines.
+ result_repr = '\n' + result_repr
+
+ try:
+ print(result_repr)
+ except UnicodeEncodeError:
+ # If a character is not supported by the terminal encoding replace
+ # it with its \u or \x representation
+ print(result_repr.encode(sys.stdout.encoding,'backslashreplace').decode(sys.stdout.encoding))
+
+ def update_user_ns(self, result):
+ """Update user_ns with various things like _, __, _1, etc."""
+
+ # Avoid recursive reference when displaying _oh/Out
+ if self.cache_size and result is not self.shell.user_ns['_oh']:
+ if len(self.shell.user_ns['_oh']) >= self.cache_size and self.do_full_cache:
+ self.cull_cache()
+
+ # Don't overwrite '_' and friends if '_' is in __builtin__
+ # (otherwise we cause buggy behavior for things like gettext). and
+ # do not overwrite _, __ or ___ if one of these has been assigned
+ # by the user.
+ update_unders = True
+ for unders in ['_'*i for i in range(1,4)]:
+ if not unders in self.shell.user_ns:
+ continue
+ if getattr(self, unders) is not self.shell.user_ns.get(unders):
+ update_unders = False
+
+ self.___ = self.__
+ self.__ = self._
+ self._ = result
+
+ if ('_' not in builtin_mod.__dict__) and (update_unders):
+ self.shell.push({'_':self._,
+ '__':self.__,
+ '___':self.___}, interactive=False)
+
+ # hackish access to top-level namespace to create _1,_2... dynamically
+ to_main = {}
+ if self.do_full_cache:
+ new_result = '_%s' % self.prompt_count
+ to_main[new_result] = result
+ self.shell.push(to_main, interactive=False)
+ self.shell.user_ns['_oh'][self.prompt_count] = result
+
+ def fill_exec_result(self, result):
+ if self.exec_result is not None:
+ self.exec_result.result = result
+
+ def log_output(self, format_dict):
+ """Log the output."""
+ if 'text/plain' not in format_dict:
+ # nothing to do
+ return
+ if self.shell.logger.log_output:
+ self.shell.logger.log_write(format_dict['text/plain'], 'output')
+ self.shell.history_manager.output_hist_reprs[self.prompt_count] = \
+ format_dict['text/plain']
+
+ def finish_displayhook(self):
+ """Finish up all displayhook activities."""
+ sys.stdout.write(self.shell.separate_out2)
+ sys.stdout.flush()
+
+ def __call__(self, result=None):
+ """Printing with history cache management.
+
+ This is invoked every time the interpreter needs to print, and is
+ activated by setting the variable sys.displayhook to it.
+ """
+ self.check_for_underscore()
+ if result is not None and not self.quiet():
+ self.start_displayhook()
+ self.write_output_prompt()
+ format_dict, md_dict = self.compute_format_data(result)
+ self.update_user_ns(result)
+ self.fill_exec_result(result)
+ if format_dict:
+ self.write_format_data(format_dict, md_dict)
+ self.log_output(format_dict)
+ self.finish_displayhook()
+
+ def cull_cache(self):
+ """Output cache is full, cull the oldest entries"""
+ oh = self.shell.user_ns.get('_oh', {})
+ sz = len(oh)
+ cull_count = max(int(sz * self.cull_fraction), 2)
+ warn('Output cache limit (currently {sz} entries) hit.\n'
+ 'Flushing oldest {cull_count} entries.'.format(sz=sz, cull_count=cull_count))
+
+ for i, n in enumerate(sorted(oh)):
+ if i >= cull_count:
+ break
+ self.shell.user_ns.pop('_%i' % n, None)
+ oh.pop(n, None)
+
+
+ def flush(self):
+ if not self.do_full_cache:
+ raise ValueError("You shouldn't have reached the cache flush "
+ "if full caching is not enabled!")
+ # delete auto-generated vars from global namespace
+
+ for n in range(1,self.prompt_count + 1):
+ key = '_'+repr(n)
+ try:
+ del self.shell.user_ns[key]
+ except: pass
+ # In some embedded circumstances, the user_ns doesn't have the
+ # '_oh' key set up.
+ oh = self.shell.user_ns.get('_oh', None)
+ if oh is not None:
+ oh.clear()
+
+ # Release our own references to objects:
+ self._, self.__, self.___ = '', '', ''
+
+ if '_' not in builtin_mod.__dict__:
+ self.shell.user_ns.update({'_':self._,'__':self.__,'___':self.___})
+ import gc
+ # TODO: Is this really needed?
+ # IronPython blocks here forever
+ if sys.platform != "cli":
+ gc.collect()
+
+
+class CapturingDisplayHook(object):
+ def __init__(self, shell, outputs=None):
+ self.shell = shell
+ if outputs is None:
+ outputs = []
+ self.outputs = outputs
+
+ def __call__(self, result=None):
+ if result is None:
+ return
+ format_dict, md_dict = self.shell.display_formatter.format(result)
+ self.outputs.append({ 'data': format_dict, 'metadata': md_dict })
diff --git a/contrib/python/ipython/py3/IPython/core/displaypub.py b/contrib/python/ipython/py3/IPython/core/displaypub.py
new file mode 100644
index 0000000000..74028ec79e
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/displaypub.py
@@ -0,0 +1,138 @@
+"""An interface for publishing rich data to frontends.
+
+There are two components of the display system:
+
+* Display formatters, which take a Python object and compute the
+ representation of the object in various formats (text, HTML, SVG, etc.).
+* The display publisher that is used to send the representation data to the
+ various frontends.
+
+This module defines the logic display publishing. The display publisher uses
+the ``display_data`` message type that is defined in the IPython messaging
+spec.
+"""
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+
+import sys
+
+from traitlets.config.configurable import Configurable
+from traitlets import List
+
+# This used to be defined here - it is imported for backwards compatibility
+from .display_functions import publish_display_data
+
+#-----------------------------------------------------------------------------
+# Main payload class
+#-----------------------------------------------------------------------------
+
+
+class DisplayPublisher(Configurable):
+ """A traited class that publishes display data to frontends.
+
+ Instances of this class are created by the main IPython object and should
+ be accessed there.
+ """
+
+ def __init__(self, shell=None, *args, **kwargs):
+ self.shell = shell
+ super().__init__(*args, **kwargs)
+
+ def _validate_data(self, data, metadata=None):
+ """Validate the display data.
+
+ Parameters
+ ----------
+ data : dict
+ The formata data dictionary.
+ metadata : dict
+ Any metadata for the data.
+ """
+
+ if not isinstance(data, dict):
+ raise TypeError('data must be a dict, got: %r' % data)
+ if metadata is not None:
+ if not isinstance(metadata, dict):
+ raise TypeError('metadata must be a dict, got: %r' % data)
+
+ # use * to indicate transient, update are keyword-only
+ def publish(self, data, metadata=None, source=None, *, transient=None, update=False, **kwargs) -> None:
+ """Publish data and metadata to all frontends.
+
+ See the ``display_data`` message in the messaging documentation for
+ more details about this message type.
+
+ The following MIME types are currently implemented:
+
+ * text/plain
+ * text/html
+ * text/markdown
+ * text/latex
+ * application/json
+ * application/javascript
+ * image/png
+ * image/jpeg
+ * image/svg+xml
+
+ Parameters
+ ----------
+ data : dict
+ A dictionary having keys that are valid MIME types (like
+ 'text/plain' or 'image/svg+xml') and values that are the data for
+ that MIME type. The data itself must be a JSON'able data
+ structure. Minimally all data should have the 'text/plain' data,
+ which can be displayed by all frontends. If more than the plain
+ text is given, it is up to the frontend to decide which
+ representation to use.
+ metadata : dict
+ A dictionary for metadata related to the data. This can contain
+ arbitrary key, value pairs that frontends can use to interpret
+ the data. Metadata specific to each mime-type can be specified
+ in the metadata dict with the same mime-type keys as
+ the data itself.
+ source : str, deprecated
+ Unused.
+ transient : dict, keyword-only
+ A dictionary for transient data.
+ Data in this dictionary should not be persisted as part of saving this output.
+ Examples include 'display_id'.
+ update : bool, keyword-only, default: False
+ If True, only update existing outputs with the same display_id,
+ rather than creating a new output.
+ """
+
+ handlers = {}
+ if self.shell is not None:
+ handlers = getattr(self.shell, 'mime_renderers', {})
+
+ for mime, handler in handlers.items():
+ if mime in data:
+ handler(data[mime], metadata.get(mime, None))
+ return
+
+ if 'text/plain' in data:
+ print(data['text/plain'])
+
+ def clear_output(self, wait=False):
+ """Clear the output of the cell receiving output."""
+ print('\033[2K\r', end='')
+ sys.stdout.flush()
+ print('\033[2K\r', end='')
+ sys.stderr.flush()
+
+
+class CapturingDisplayPublisher(DisplayPublisher):
+ """A DisplayPublisher that stores"""
+ outputs = List()
+
+ def publish(self, data, metadata=None, source=None, *, transient=None, update=False):
+ self.outputs.append({'data':data, 'metadata':metadata,
+ 'transient':transient, 'update':update})
+
+ def clear_output(self, wait=False):
+ super(CapturingDisplayPublisher, self).clear_output(wait)
+
+ # empty the list, *do not* reassign a new list
+ self.outputs.clear()
diff --git a/contrib/python/ipython/py3/IPython/core/error.py b/contrib/python/ipython/py3/IPython/core/error.py
new file mode 100644
index 0000000000..684cbc8da6
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/error.py
@@ -0,0 +1,60 @@
+# encoding: utf-8
+"""
+Global exception classes for IPython.core.
+
+Authors:
+
+* Brian Granger
+* Fernando Perez
+* Min Ragan-Kelley
+
+Notes
+-----
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (C) 2008 The IPython Development Team
+#
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Exception classes
+#-----------------------------------------------------------------------------
+
+class IPythonCoreError(Exception):
+ pass
+
+
+class TryNext(IPythonCoreError):
+ """Try next hook exception.
+
+ Raise this in your hook function to indicate that the next hook handler
+ should be used to handle the operation.
+ """
+
+class UsageError(IPythonCoreError):
+ """Error in magic function arguments, etc.
+
+ Something that probably won't warrant a full traceback, but should
+ nevertheless interrupt a macro / batch file.
+ """
+
+class StdinNotImplementedError(IPythonCoreError, NotImplementedError):
+ """raw_input was requested in a context where it is not supported
+
+ For use in IPython kernels, where only some frontends may support
+ stdin requests.
+ """
+
+class InputRejected(Exception):
+ """Input rejected by ast transformer.
+
+ Raise this in your NodeTransformer to indicate that InteractiveShell should
+ not execute the supplied input.
+ """
diff --git a/contrib/python/ipython/py3/IPython/core/events.py b/contrib/python/ipython/py3/IPython/core/events.py
new file mode 100644
index 0000000000..3a66e75e5a
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/events.py
@@ -0,0 +1,166 @@
+"""Infrastructure for registering and firing callbacks on application events.
+
+Unlike :mod:`IPython.core.hooks`, which lets end users set single functions to
+be called at specific times, or a collection of alternative methods to try,
+callbacks are designed to be used by extension authors. A number of callbacks
+can be registered for the same event without needing to be aware of one another.
+
+The functions defined in this module are no-ops indicating the names of available
+events and the arguments which will be passed to them.
+
+.. note::
+
+ This API is experimental in IPython 2.0, and may be revised in future versions.
+"""
+
+from backcall import callback_prototype
+
+
+class EventManager(object):
+ """Manage a collection of events and a sequence of callbacks for each.
+
+ This is attached to :class:`~IPython.core.interactiveshell.InteractiveShell`
+ instances as an ``events`` attribute.
+
+ .. note::
+
+ This API is experimental in IPython 2.0, and may be revised in future versions.
+ """
+
+ def __init__(self, shell, available_events, print_on_error=True):
+ """Initialise the :class:`CallbackManager`.
+
+ Parameters
+ ----------
+ shell
+ The :class:`~IPython.core.interactiveshell.InteractiveShell` instance
+ available_events
+ An iterable of names for callback events.
+ print_on_error:
+ A boolean flag to set whether the EventManager will print a warning which a event errors.
+ """
+ self.shell = shell
+ self.callbacks = {n:[] for n in available_events}
+ self.print_on_error = print_on_error
+
+ def register(self, event, function):
+ """Register a new event callback.
+
+ Parameters
+ ----------
+ event : str
+ The event for which to register this callback.
+ function : callable
+ A function to be called on the given event. It should take the same
+ parameters as the appropriate callback prototype.
+
+ Raises
+ ------
+ TypeError
+ If ``function`` is not callable.
+ KeyError
+ If ``event`` is not one of the known events.
+ """
+ if not callable(function):
+ raise TypeError('Need a callable, got %r' % function)
+ callback_proto = available_events.get(event)
+ if function not in self.callbacks[event]:
+ self.callbacks[event].append(callback_proto.adapt(function))
+
+ def unregister(self, event, function):
+ """Remove a callback from the given event."""
+ if function in self.callbacks[event]:
+ return self.callbacks[event].remove(function)
+
+ # Remove callback in case ``function`` was adapted by `backcall`.
+ for callback in self.callbacks[event]:
+ try:
+ if callback.__wrapped__ is function:
+ return self.callbacks[event].remove(callback)
+ except AttributeError:
+ pass
+
+ raise ValueError('Function {!r} is not registered as a {} callback'.format(function, event))
+
+ def trigger(self, event, *args, **kwargs):
+ """Call callbacks for ``event``.
+
+ Any additional arguments are passed to all callbacks registered for this
+ event. Exceptions raised by callbacks are caught, and a message printed.
+ """
+ for func in self.callbacks[event][:]:
+ try:
+ func(*args, **kwargs)
+ except (Exception, KeyboardInterrupt):
+ if self.print_on_error:
+ print("Error in callback {} (for {}):".format(func, event))
+ self.shell.showtraceback()
+
+# event_name -> prototype mapping
+available_events = {}
+
+def _define_event(callback_function):
+ callback_proto = callback_prototype(callback_function)
+ available_events[callback_function.__name__] = callback_proto
+ return callback_proto
+
+# ------------------------------------------------------------------------------
+# Callback prototypes
+#
+# No-op functions which describe the names of available events and the
+# signatures of callbacks for those events.
+# ------------------------------------------------------------------------------
+
+@_define_event
+def pre_execute():
+ """Fires before code is executed in response to user/frontend action.
+
+ This includes comm and widget messages and silent execution, as well as user
+ code cells.
+ """
+ pass
+
+@_define_event
+def pre_run_cell(info):
+ """Fires before user-entered code runs.
+
+ Parameters
+ ----------
+ info : :class:`~IPython.core.interactiveshell.ExecutionInfo`
+ An object containing information used for the code execution.
+ """
+ pass
+
+@_define_event
+def post_execute():
+ """Fires after code is executed in response to user/frontend action.
+
+ This includes comm and widget messages and silent execution, as well as user
+ code cells.
+ """
+ pass
+
+@_define_event
+def post_run_cell(result):
+ """Fires after user-entered code runs.
+
+ Parameters
+ ----------
+ result : :class:`~IPython.core.interactiveshell.ExecutionResult`
+ The object which will be returned as the execution result.
+ """
+ pass
+
+@_define_event
+def shell_initialized(ip):
+ """Fires after initialisation of :class:`~IPython.core.interactiveshell.InteractiveShell`.
+
+ This is before extensions and startup scripts are loaded, so it can only be
+ set by subclassing.
+
+ Parameters
+ ----------
+ ip : :class:`~IPython.core.interactiveshell.InteractiveShell`
+ The newly initialised shell.
+ """
+ pass
diff --git a/contrib/python/ipython/py3/IPython/core/excolors.py b/contrib/python/ipython/py3/IPython/core/excolors.py
new file mode 100644
index 0000000000..85eef81f0e
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/excolors.py
@@ -0,0 +1,165 @@
+# -*- coding: utf-8 -*-
+"""
+Color schemes for exception handling code in IPython.
+"""
+
+import os
+
+#*****************************************************************************
+# Copyright (C) 2005-2006 Fernando Perez <fperez@colorado.edu>
+#
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#*****************************************************************************
+
+from IPython.utils.coloransi import ColorSchemeTable, TermColors, ColorScheme
+
+def exception_colors():
+ """Return a color table with fields for exception reporting.
+
+ The table is an instance of ColorSchemeTable with schemes added for
+ 'Neutral', 'Linux', 'LightBG' and 'NoColor' and fields for exception handling filled
+ in.
+
+ Examples:
+
+ >>> ec = exception_colors()
+ >>> ec.active_scheme_name
+ ''
+ >>> print(ec.active_colors)
+ None
+
+ Now we activate a color scheme:
+ >>> ec.set_active_scheme('NoColor')
+ >>> ec.active_scheme_name
+ 'NoColor'
+ >>> sorted(ec.active_colors.keys())
+ ['Normal', 'caret', 'em', 'excName', 'filename', 'filenameEm', 'line',
+ 'lineno', 'linenoEm', 'name', 'nameEm', 'normalEm', 'topline', 'vName',
+ 'val', 'valEm']
+ """
+
+ ex_colors = ColorSchemeTable()
+
+ # Populate it with color schemes
+ C = TermColors # shorthand and local lookup
+ ex_colors.add_scheme(ColorScheme(
+ 'NoColor',
+ # The color to be used for the top line
+ topline = C.NoColor,
+
+ # The colors to be used in the traceback
+ filename = C.NoColor,
+ lineno = C.NoColor,
+ name = C.NoColor,
+ vName = C.NoColor,
+ val = C.NoColor,
+ em = C.NoColor,
+
+ # Emphasized colors for the last frame of the traceback
+ normalEm = C.NoColor,
+ filenameEm = C.NoColor,
+ linenoEm = C.NoColor,
+ nameEm = C.NoColor,
+ valEm = C.NoColor,
+
+ # Colors for printing the exception
+ excName = C.NoColor,
+ line = C.NoColor,
+ caret = C.NoColor,
+ Normal = C.NoColor
+ ))
+
+ # make some schemes as instances so we can copy them for modification easily
+ ex_colors.add_scheme(ColorScheme(
+ 'Linux',
+ # The color to be used for the top line
+ topline = C.LightRed,
+
+ # The colors to be used in the traceback
+ filename = C.Green,
+ lineno = C.Green,
+ name = C.Purple,
+ vName = C.Cyan,
+ val = C.Green,
+ em = C.LightCyan,
+
+ # Emphasized colors for the last frame of the traceback
+ normalEm = C.LightCyan,
+ filenameEm = C.LightGreen,
+ linenoEm = C.LightGreen,
+ nameEm = C.LightPurple,
+ valEm = C.LightBlue,
+
+ # Colors for printing the exception
+ excName = C.LightRed,
+ line = C.Yellow,
+ caret = C.White,
+ Normal = C.Normal
+ ))
+
+ # For light backgrounds, swap dark/light colors
+ ex_colors.add_scheme(ColorScheme(
+ 'LightBG',
+ # The color to be used for the top line
+ topline = C.Red,
+
+ # The colors to be used in the traceback
+ filename = C.LightGreen,
+ lineno = C.LightGreen,
+ name = C.LightPurple,
+ vName = C.Cyan,
+ val = C.LightGreen,
+ em = C.Cyan,
+
+ # Emphasized colors for the last frame of the traceback
+ normalEm = C.Cyan,
+ filenameEm = C.Green,
+ linenoEm = C.Green,
+ nameEm = C.Purple,
+ valEm = C.Blue,
+
+ # Colors for printing the exception
+ excName = C.Red,
+ #line = C.Brown, # brown often is displayed as yellow
+ line = C.Red,
+ caret = C.Normal,
+ Normal = C.Normal,
+ ))
+
+ ex_colors.add_scheme(ColorScheme(
+ 'Neutral',
+ # The color to be used for the top line
+ topline = C.Red,
+
+ # The colors to be used in the traceback
+ filename = C.LightGreen,
+ lineno = C.LightGreen,
+ name = C.LightPurple,
+ vName = C.Cyan,
+ val = C.LightGreen,
+ em = C.Cyan,
+
+ # Emphasized colors for the last frame of the traceback
+ normalEm = C.Cyan,
+ filenameEm = C.Green,
+ linenoEm = C.Green,
+ nameEm = C.Purple,
+ valEm = C.Blue,
+
+ # Colors for printing the exception
+ excName = C.Red,
+ #line = C.Brown, # brown often is displayed as yellow
+ line = C.Red,
+ caret = C.Normal,
+ Normal = C.Normal,
+ ))
+
+ # Hack: the 'neutral' colours are not very visible on a dark background on
+ # Windows. Since Windows command prompts have a dark background by default, and
+ # relatively few users are likely to alter that, we will use the 'Linux' colours,
+ # designed for a dark background, as the default on Windows.
+ if os.name == "nt":
+ ex_colors.add_scheme(ex_colors['Linux'].copy('Neutral'))
+
+ return ex_colors
diff --git a/contrib/python/ipython/py3/IPython/core/extensions.py b/contrib/python/ipython/py3/IPython/core/extensions.py
new file mode 100644
index 0000000000..21fba40eaf
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/extensions.py
@@ -0,0 +1,151 @@
+# encoding: utf-8
+"""A class for managing IPython extensions."""
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+import os
+import os.path
+import sys
+from importlib import import_module, reload
+
+from traitlets.config.configurable import Configurable
+from IPython.utils.path import ensure_dir_exists, compress_user
+from IPython.utils.decorators import undoc
+from traitlets import Instance
+
+
+#-----------------------------------------------------------------------------
+# Main class
+#-----------------------------------------------------------------------------
+
+BUILTINS_EXTS = {"storemagic": False, "autoreload": False}
+
+
+class ExtensionManager(Configurable):
+ """A class to manage IPython extensions.
+
+ An IPython extension is an importable Python module that has
+ a function with the signature::
+
+ def load_ipython_extension(ipython):
+ # Do things with ipython
+
+ This function is called after your extension is imported and the
+ currently active :class:`InteractiveShell` instance is passed as
+ the only argument. You can do anything you want with IPython at
+ that point, including defining new magic and aliases, adding new
+ components, etc.
+
+ You can also optionally define an :func:`unload_ipython_extension(ipython)`
+ function, which will be called if the user unloads or reloads the extension.
+ The extension manager will only call :func:`load_ipython_extension` again
+ if the extension is reloaded.
+
+ You can put your extension modules anywhere you want, as long as
+ they can be imported by Python's standard import mechanism. However,
+ to make it easy to write extensions, you can also put your extensions
+ in ``os.path.join(self.ipython_dir, 'extensions')``. This directory
+ is added to ``sys.path`` automatically.
+ """
+
+ shell = Instance('IPython.core.interactiveshell.InteractiveShellABC', allow_none=True)
+
+ def __init__(self, shell=None, **kwargs):
+ super(ExtensionManager, self).__init__(shell=shell, **kwargs)
+ self.shell.observe(
+ self._on_ipython_dir_changed, names=('ipython_dir',)
+ )
+ self.loaded = set()
+
+ @property
+ def ipython_extension_dir(self):
+ return os.path.join(self.shell.ipython_dir, u'extensions')
+
+ def _on_ipython_dir_changed(self, change):
+ ensure_dir_exists(self.ipython_extension_dir)
+
+ def load_extension(self, module_str: str):
+ """Load an IPython extension by its module name.
+
+ Returns the string "already loaded" if the extension is already loaded,
+ "no load function" if the module doesn't have a load_ipython_extension
+ function, or None if it succeeded.
+ """
+ try:
+ return self._load_extension(module_str)
+ except ModuleNotFoundError:
+ if module_str in BUILTINS_EXTS:
+ BUILTINS_EXTS[module_str] = True
+ return self._load_extension("IPython.extensions." + module_str)
+ raise
+
+ def _load_extension(self, module_str: str):
+ if module_str in self.loaded:
+ return "already loaded"
+
+ from IPython.utils.syspathcontext import prepended_to_syspath
+
+ with self.shell.builtin_trap:
+ if module_str not in sys.modules:
+ mod = import_module(module_str)
+ mod = sys.modules[module_str]
+ if self._call_load_ipython_extension(mod):
+ self.loaded.add(module_str)
+ else:
+ return "no load function"
+
+ def unload_extension(self, module_str: str):
+ """Unload an IPython extension by its module name.
+
+ This function looks up the extension's name in ``sys.modules`` and
+ simply calls ``mod.unload_ipython_extension(self)``.
+
+ Returns the string "no unload function" if the extension doesn't define
+ a function to unload itself, "not loaded" if the extension isn't loaded,
+ otherwise None.
+ """
+ if BUILTINS_EXTS.get(module_str, False) is True:
+ module_str = "IPython.extensions." + module_str
+ if module_str not in self.loaded:
+ return "not loaded"
+
+ if module_str in sys.modules:
+ mod = sys.modules[module_str]
+ if self._call_unload_ipython_extension(mod):
+ self.loaded.discard(module_str)
+ else:
+ return "no unload function"
+
+ def reload_extension(self, module_str: str):
+ """Reload an IPython extension by calling reload.
+
+ If the module has not been loaded before,
+ :meth:`InteractiveShell.load_extension` is called. Otherwise
+ :func:`reload` is called and then the :func:`load_ipython_extension`
+ function of the module, if it exists is called.
+ """
+ from IPython.utils.syspathcontext import prepended_to_syspath
+
+ if BUILTINS_EXTS.get(module_str, False) is True:
+ module_str = "IPython.extensions." + module_str
+
+ if (module_str in self.loaded) and (module_str in sys.modules):
+ self.unload_extension(module_str)
+ mod = sys.modules[module_str]
+ with prepended_to_syspath(self.ipython_extension_dir):
+ reload(mod)
+ if self._call_load_ipython_extension(mod):
+ self.loaded.add(module_str)
+ else:
+ self.load_extension(module_str)
+
+ def _call_load_ipython_extension(self, mod):
+ if hasattr(mod, 'load_ipython_extension'):
+ mod.load_ipython_extension(self.shell)
+ return True
+
+ def _call_unload_ipython_extension(self, mod):
+ if hasattr(mod, 'unload_ipython_extension'):
+ mod.unload_ipython_extension(self.shell)
+ return True
diff --git a/contrib/python/ipython/py3/IPython/core/formatters.py b/contrib/python/ipython/py3/IPython/core/formatters.py
new file mode 100644
index 0000000000..15cf703c2a
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/formatters.py
@@ -0,0 +1,1028 @@
+# -*- coding: utf-8 -*-
+"""Display formatters.
+
+Inheritance diagram:
+
+.. inheritance-diagram:: IPython.core.formatters
+ :parts: 3
+"""
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+import abc
+import sys
+import traceback
+import warnings
+from io import StringIO
+
+from decorator import decorator
+
+from traitlets.config.configurable import Configurable
+from .getipython import get_ipython
+from ..utils.sentinel import Sentinel
+from ..utils.dir2 import get_real_method
+from ..lib import pretty
+from traitlets import (
+ Bool, Dict, Integer, Unicode, CUnicode, ObjectName, List,
+ ForwardDeclaredInstance,
+ default, observe,
+)
+
+from typing import Any
+
+
+class DisplayFormatter(Configurable):
+
+ active_types = List(Unicode(),
+ help="""List of currently active mime-types to display.
+ You can use this to set a white-list for formats to display.
+
+ Most users will not need to change this value.
+ """).tag(config=True)
+
+ @default('active_types')
+ def _active_types_default(self):
+ return self.format_types
+
+ @observe('active_types')
+ def _active_types_changed(self, change):
+ for key, formatter in self.formatters.items():
+ if key in change['new']:
+ formatter.enabled = True
+ else:
+ formatter.enabled = False
+
+ ipython_display_formatter = ForwardDeclaredInstance('FormatterABC')
+ @default('ipython_display_formatter')
+ def _default_formatter(self):
+ return IPythonDisplayFormatter(parent=self)
+
+ mimebundle_formatter = ForwardDeclaredInstance('FormatterABC')
+ @default('mimebundle_formatter')
+ def _default_mime_formatter(self):
+ return MimeBundleFormatter(parent=self)
+
+ # A dict of formatter whose keys are format types (MIME types) and whose
+ # values are subclasses of BaseFormatter.
+ formatters = Dict()
+ @default('formatters')
+ def _formatters_default(self):
+ """Activate the default formatters."""
+ formatter_classes = [
+ PlainTextFormatter,
+ HTMLFormatter,
+ MarkdownFormatter,
+ SVGFormatter,
+ PNGFormatter,
+ PDFFormatter,
+ JPEGFormatter,
+ LatexFormatter,
+ JSONFormatter,
+ JavascriptFormatter
+ ]
+ d = {}
+ for cls in formatter_classes:
+ f = cls(parent=self)
+ d[f.format_type] = f
+ return d
+
+ def format(self, obj, include=None, exclude=None):
+ """Return a format data dict for an object.
+
+ By default all format types will be computed.
+
+ The following MIME types are usually implemented:
+
+ * text/plain
+ * text/html
+ * text/markdown
+ * text/latex
+ * application/json
+ * application/javascript
+ * application/pdf
+ * image/png
+ * image/jpeg
+ * image/svg+xml
+
+ Parameters
+ ----------
+ obj : object
+ The Python object whose format data will be computed.
+ include : list, tuple or set; optional
+ A list of format type strings (MIME types) to include in the
+ format data dict. If this is set *only* the format types included
+ in this list will be computed.
+ exclude : list, tuple or set; optional
+ A list of format type string (MIME types) to exclude in the format
+ data dict. If this is set all format types will be computed,
+ except for those included in this argument.
+ Mimetypes present in exclude will take precedence over the ones in include
+
+ Returns
+ -------
+ (format_dict, metadata_dict) : tuple of two dicts
+ format_dict is a dictionary of key/value pairs, one of each format that was
+ generated for the object. The keys are the format types, which
+ will usually be MIME type strings and the values and JSON'able
+ data structure containing the raw data for the representation in
+ that format.
+
+ metadata_dict is a dictionary of metadata about each mime-type output.
+ Its keys will be a strict subset of the keys in format_dict.
+
+ Notes
+ -----
+ If an object implement `_repr_mimebundle_` as well as various
+ `_repr_*_`, the data returned by `_repr_mimebundle_` will take
+ precedence and the corresponding `_repr_*_` for this mimetype will
+ not be called.
+
+ """
+ format_dict = {}
+ md_dict = {}
+
+ if self.ipython_display_formatter(obj):
+ # object handled itself, don't proceed
+ return {}, {}
+
+ format_dict, md_dict = self.mimebundle_formatter(obj, include=include, exclude=exclude)
+
+ if format_dict or md_dict:
+ if include:
+ format_dict = {k:v for k,v in format_dict.items() if k in include}
+ md_dict = {k:v for k,v in md_dict.items() if k in include}
+ if exclude:
+ format_dict = {k:v for k,v in format_dict.items() if k not in exclude}
+ md_dict = {k:v for k,v in md_dict.items() if k not in exclude}
+
+ for format_type, formatter in self.formatters.items():
+ if format_type in format_dict:
+ # already got it from mimebundle, maybe don't render again.
+ # exception: manually registered per-mime renderer
+ # check priority:
+ # 1. user-registered per-mime formatter
+ # 2. mime-bundle (user-registered or repr method)
+ # 3. default per-mime formatter (e.g. repr method)
+ try:
+ formatter.lookup(obj)
+ except KeyError:
+ # no special formatter, use mime-bundle-provided value
+ continue
+ if include and format_type not in include:
+ continue
+ if exclude and format_type in exclude:
+ continue
+
+ md = None
+ try:
+ data = formatter(obj)
+ except:
+ # FIXME: log the exception
+ raise
+
+ # formatters can return raw data or (data, metadata)
+ if isinstance(data, tuple) and len(data) == 2:
+ data, md = data
+
+ if data is not None:
+ format_dict[format_type] = data
+ if md is not None:
+ md_dict[format_type] = md
+ return format_dict, md_dict
+
+ @property
+ def format_types(self):
+ """Return the format types (MIME types) of the active formatters."""
+ return list(self.formatters.keys())
+
+
+#-----------------------------------------------------------------------------
+# Formatters for specific format types (text, html, svg, etc.)
+#-----------------------------------------------------------------------------
+
+
+def _safe_repr(obj):
+ """Try to return a repr of an object
+
+ always returns a string, at least.
+ """
+ try:
+ return repr(obj)
+ except Exception as e:
+ return "un-repr-able object (%r)" % e
+
+
+class FormatterWarning(UserWarning):
+ """Warning class for errors in formatters"""
+
+@decorator
+def catch_format_error(method, self, *args, **kwargs):
+ """show traceback on failed format call"""
+ try:
+ r = method(self, *args, **kwargs)
+ except NotImplementedError:
+ # don't warn on NotImplementedErrors
+ return self._check_return(None, args[0])
+ except Exception:
+ exc_info = sys.exc_info()
+ ip = get_ipython()
+ if ip is not None:
+ ip.showtraceback(exc_info)
+ else:
+ traceback.print_exception(*exc_info)
+ return self._check_return(None, args[0])
+ return self._check_return(r, args[0])
+
+
+class FormatterABC(metaclass=abc.ABCMeta):
+ """ Abstract base class for Formatters.
+
+ A formatter is a callable class that is responsible for computing the
+ raw format data for a particular format type (MIME type). For example,
+ an HTML formatter would have a format type of `text/html` and would return
+ the HTML representation of the object when called.
+ """
+
+ # The format type of the data returned, usually a MIME type.
+ format_type = 'text/plain'
+
+ # Is the formatter enabled...
+ enabled = True
+
+ @abc.abstractmethod
+ def __call__(self, obj):
+ """Return a JSON'able representation of the object.
+
+ If the object cannot be formatted by this formatter,
+ warn and return None.
+ """
+ return repr(obj)
+
+
+def _mod_name_key(typ):
+ """Return a (__module__, __name__) tuple for a type.
+
+ Used as key in Formatter.deferred_printers.
+ """
+ module = getattr(typ, '__module__', None)
+ name = getattr(typ, '__name__', None)
+ return (module, name)
+
+
+def _get_type(obj):
+ """Return the type of an instance (old and new-style)"""
+ return getattr(obj, '__class__', None) or type(obj)
+
+
+_raise_key_error = Sentinel('_raise_key_error', __name__,
+"""
+Special value to raise a KeyError
+
+Raise KeyError in `BaseFormatter.pop` if passed as the default value to `pop`
+""")
+
+
+class BaseFormatter(Configurable):
+ """A base formatter class that is configurable.
+
+ This formatter should usually be used as the base class of all formatters.
+ It is a traited :class:`Configurable` class and includes an extensible
+ API for users to determine how their objects are formatted. The following
+ logic is used to find a function to format an given object.
+
+ 1. The object is introspected to see if it has a method with the name
+ :attr:`print_method`. If is does, that object is passed to that method
+ for formatting.
+ 2. If no print method is found, three internal dictionaries are consulted
+ to find print method: :attr:`singleton_printers`, :attr:`type_printers`
+ and :attr:`deferred_printers`.
+
+ Users should use these dictionaries to register functions that will be
+ used to compute the format data for their objects (if those objects don't
+ have the special print methods). The easiest way of using these
+ dictionaries is through the :meth:`for_type` and :meth:`for_type_by_name`
+ methods.
+
+ If no function/callable is found to compute the format data, ``None`` is
+ returned and this format type is not used.
+ """
+
+ format_type = Unicode("text/plain")
+ _return_type: Any = str
+
+ enabled = Bool(True).tag(config=True)
+
+ print_method = ObjectName('__repr__')
+
+ # The singleton printers.
+ # Maps the IDs of the builtin singleton objects to the format functions.
+ singleton_printers = Dict().tag(config=True)
+
+ # The type-specific printers.
+ # Map type objects to the format functions.
+ type_printers = Dict().tag(config=True)
+
+ # The deferred-import type-specific printers.
+ # Map (modulename, classname) pairs to the format functions.
+ deferred_printers = Dict().tag(config=True)
+
+ @catch_format_error
+ def __call__(self, obj):
+ """Compute the format for an object."""
+ if self.enabled:
+ # lookup registered printer
+ try:
+ printer = self.lookup(obj)
+ except KeyError:
+ pass
+ else:
+ return printer(obj)
+ # Finally look for special method names
+ method = get_real_method(obj, self.print_method)
+ if method is not None:
+ return method()
+ return None
+ else:
+ return None
+
+ def __contains__(self, typ):
+ """map in to lookup_by_type"""
+ try:
+ self.lookup_by_type(typ)
+ except KeyError:
+ return False
+ else:
+ return True
+
+ def _check_return(self, r, obj):
+ """Check that a return value is appropriate
+
+ Return the value if so, None otherwise, warning if invalid.
+ """
+ if r is None or isinstance(r, self._return_type) or \
+ (isinstance(r, tuple) and r and isinstance(r[0], self._return_type)):
+ return r
+ else:
+ warnings.warn(
+ "%s formatter returned invalid type %s (expected %s) for object: %s" % \
+ (self.format_type, type(r), self._return_type, _safe_repr(obj)),
+ FormatterWarning
+ )
+
+ def lookup(self, obj):
+ """Look up the formatter for a given instance.
+
+ Parameters
+ ----------
+ obj : object instance
+
+ Returns
+ -------
+ f : callable
+ The registered formatting callable for the type.
+
+ Raises
+ ------
+ KeyError if the type has not been registered.
+ """
+ # look for singleton first
+ obj_id = id(obj)
+ if obj_id in self.singleton_printers:
+ return self.singleton_printers[obj_id]
+ # then lookup by type
+ return self.lookup_by_type(_get_type(obj))
+
+ def lookup_by_type(self, typ):
+ """Look up the registered formatter for a type.
+
+ Parameters
+ ----------
+ typ : type or '__module__.__name__' string for a type
+
+ Returns
+ -------
+ f : callable
+ The registered formatting callable for the type.
+
+ Raises
+ ------
+ KeyError if the type has not been registered.
+ """
+ if isinstance(typ, str):
+ typ_key = tuple(typ.rsplit('.',1))
+ if typ_key not in self.deferred_printers:
+ # We may have it cached in the type map. We will have to
+ # iterate over all of the types to check.
+ for cls in self.type_printers:
+ if _mod_name_key(cls) == typ_key:
+ return self.type_printers[cls]
+ else:
+ return self.deferred_printers[typ_key]
+ else:
+ for cls in pretty._get_mro(typ):
+ if cls in self.type_printers or self._in_deferred_types(cls):
+ return self.type_printers[cls]
+
+ # If we have reached here, the lookup failed.
+ raise KeyError("No registered printer for {0!r}".format(typ))
+
+ def for_type(self, typ, func=None):
+ """Add a format function for a given type.
+
+ Parameters
+ ----------
+ typ : type or '__module__.__name__' string for a type
+ The class of the object that will be formatted using `func`.
+
+ func : callable
+ A callable for computing the format data.
+ `func` will be called with the object to be formatted,
+ and will return the raw data in this formatter's format.
+ Subclasses may use a different call signature for the
+ `func` argument.
+
+ If `func` is None or not specified, there will be no change,
+ only returning the current value.
+
+ Returns
+ -------
+ oldfunc : callable
+ The currently registered callable.
+ If you are registering a new formatter,
+ this will be the previous value (to enable restoring later).
+ """
+ # if string given, interpret as 'pkg.module.class_name'
+ if isinstance(typ, str):
+ type_module, type_name = typ.rsplit('.', 1)
+ return self.for_type_by_name(type_module, type_name, func)
+
+ try:
+ oldfunc = self.lookup_by_type(typ)
+ except KeyError:
+ oldfunc = None
+
+ if func is not None:
+ self.type_printers[typ] = func
+
+ return oldfunc
+
+ def for_type_by_name(self, type_module, type_name, func=None):
+ """Add a format function for a type specified by the full dotted
+ module and name of the type, rather than the type of the object.
+
+ Parameters
+ ----------
+ type_module : str
+ The full dotted name of the module the type is defined in, like
+ ``numpy``.
+
+ type_name : str
+ The name of the type (the class name), like ``dtype``
+
+ func : callable
+ A callable for computing the format data.
+ `func` will be called with the object to be formatted,
+ and will return the raw data in this formatter's format.
+ Subclasses may use a different call signature for the
+ `func` argument.
+
+ If `func` is None or unspecified, there will be no change,
+ only returning the current value.
+
+ Returns
+ -------
+ oldfunc : callable
+ The currently registered callable.
+ If you are registering a new formatter,
+ this will be the previous value (to enable restoring later).
+ """
+ key = (type_module, type_name)
+
+ try:
+ oldfunc = self.lookup_by_type("%s.%s" % key)
+ except KeyError:
+ oldfunc = None
+
+ if func is not None:
+ self.deferred_printers[key] = func
+ return oldfunc
+
+ def pop(self, typ, default=_raise_key_error):
+ """Pop a formatter for the given type.
+
+ Parameters
+ ----------
+ typ : type or '__module__.__name__' string for a type
+ default : object
+ value to be returned if no formatter is registered for typ.
+
+ Returns
+ -------
+ obj : object
+ The last registered object for the type.
+
+ Raises
+ ------
+ KeyError if the type is not registered and default is not specified.
+ """
+
+ if isinstance(typ, str):
+ typ_key = tuple(typ.rsplit('.',1))
+ if typ_key not in self.deferred_printers:
+ # We may have it cached in the type map. We will have to
+ # iterate over all of the types to check.
+ for cls in self.type_printers:
+ if _mod_name_key(cls) == typ_key:
+ old = self.type_printers.pop(cls)
+ break
+ else:
+ old = default
+ else:
+ old = self.deferred_printers.pop(typ_key)
+ else:
+ if typ in self.type_printers:
+ old = self.type_printers.pop(typ)
+ else:
+ old = self.deferred_printers.pop(_mod_name_key(typ), default)
+ if old is _raise_key_error:
+ raise KeyError("No registered value for {0!r}".format(typ))
+ return old
+
+ def _in_deferred_types(self, cls):
+ """
+ Check if the given class is specified in the deferred type registry.
+
+ Successful matches will be moved to the regular type registry for future use.
+ """
+ mod = getattr(cls, '__module__', None)
+ name = getattr(cls, '__name__', None)
+ key = (mod, name)
+ if key in self.deferred_printers:
+ # Move the printer over to the regular registry.
+ printer = self.deferred_printers.pop(key)
+ self.type_printers[cls] = printer
+ return True
+ return False
+
+
+class PlainTextFormatter(BaseFormatter):
+ """The default pretty-printer.
+
+ This uses :mod:`IPython.lib.pretty` to compute the format data of
+ the object. If the object cannot be pretty printed, :func:`repr` is used.
+ See the documentation of :mod:`IPython.lib.pretty` for details on
+ how to write pretty printers. Here is a simple example::
+
+ def dtype_pprinter(obj, p, cycle):
+ if cycle:
+ return p.text('dtype(...)')
+ if hasattr(obj, 'fields'):
+ if obj.fields is None:
+ p.text(repr(obj))
+ else:
+ p.begin_group(7, 'dtype([')
+ for i, field in enumerate(obj.descr):
+ if i > 0:
+ p.text(',')
+ p.breakable()
+ p.pretty(field)
+ p.end_group(7, '])')
+ """
+
+ # The format type of data returned.
+ format_type = Unicode('text/plain')
+
+ # This subclass ignores this attribute as it always need to return
+ # something.
+ enabled = Bool(True).tag(config=False)
+
+ max_seq_length = Integer(pretty.MAX_SEQ_LENGTH,
+ help="""Truncate large collections (lists, dicts, tuples, sets) to this size.
+
+ Set to 0 to disable truncation.
+ """
+ ).tag(config=True)
+
+ # Look for a _repr_pretty_ methods to use for pretty printing.
+ print_method = ObjectName('_repr_pretty_')
+
+ # Whether to pretty-print or not.
+ pprint = Bool(True).tag(config=True)
+
+ # Whether to be verbose or not.
+ verbose = Bool(False).tag(config=True)
+
+ # The maximum width.
+ max_width = Integer(79).tag(config=True)
+
+ # The newline character.
+ newline = Unicode('\n').tag(config=True)
+
+ # format-string for pprinting floats
+ float_format = Unicode('%r')
+ # setter for float precision, either int or direct format-string
+ float_precision = CUnicode('').tag(config=True)
+
+ @observe('float_precision')
+ def _float_precision_changed(self, change):
+ """float_precision changed, set float_format accordingly.
+
+ float_precision can be set by int or str.
+ This will set float_format, after interpreting input.
+ If numpy has been imported, numpy print precision will also be set.
+
+ integer `n` sets format to '%.nf', otherwise, format set directly.
+
+ An empty string returns to defaults (repr for float, 8 for numpy).
+
+ This parameter can be set via the '%precision' magic.
+ """
+ new = change['new']
+ if '%' in new:
+ # got explicit format string
+ fmt = new
+ try:
+ fmt%3.14159
+ except Exception as e:
+ raise ValueError("Precision must be int or format string, not %r"%new) from e
+ elif new:
+ # otherwise, should be an int
+ try:
+ i = int(new)
+ assert i >= 0
+ except ValueError as e:
+ raise ValueError("Precision must be int or format string, not %r"%new) from e
+ except AssertionError as e:
+ raise ValueError("int precision must be non-negative, not %r"%i) from e
+
+ fmt = '%%.%if'%i
+ if 'numpy' in sys.modules:
+ # set numpy precision if it has been imported
+ import numpy
+ numpy.set_printoptions(precision=i)
+ else:
+ # default back to repr
+ fmt = '%r'
+ if 'numpy' in sys.modules:
+ import numpy
+ # numpy default is 8
+ numpy.set_printoptions(precision=8)
+ self.float_format = fmt
+
+ # Use the default pretty printers from IPython.lib.pretty.
+ @default('singleton_printers')
+ def _singleton_printers_default(self):
+ return pretty._singleton_pprinters.copy()
+
+ @default('type_printers')
+ def _type_printers_default(self):
+ d = pretty._type_pprinters.copy()
+ d[float] = lambda obj,p,cycle: p.text(self.float_format%obj)
+ # if NumPy is used, set precision for its float64 type
+ if "numpy" in sys.modules:
+ import numpy
+
+ d[numpy.float64] = lambda obj, p, cycle: p.text(self.float_format % obj)
+ return d
+
+ @default('deferred_printers')
+ def _deferred_printers_default(self):
+ return pretty._deferred_type_pprinters.copy()
+
+ #### FormatterABC interface ####
+
+ @catch_format_error
+ def __call__(self, obj):
+ """Compute the pretty representation of the object."""
+ if not self.pprint:
+ return repr(obj)
+ else:
+ stream = StringIO()
+ printer = pretty.RepresentationPrinter(stream, self.verbose,
+ self.max_width, self.newline,
+ max_seq_length=self.max_seq_length,
+ singleton_pprinters=self.singleton_printers,
+ type_pprinters=self.type_printers,
+ deferred_pprinters=self.deferred_printers)
+ printer.pretty(obj)
+ printer.flush()
+ return stream.getvalue()
+
+
+class HTMLFormatter(BaseFormatter):
+ """An HTML formatter.
+
+ To define the callables that compute the HTML representation of your
+ objects, define a :meth:`_repr_html_` method or use the :meth:`for_type`
+ or :meth:`for_type_by_name` methods to register functions that handle
+ this.
+
+ The return value of this formatter should be a valid HTML snippet that
+ could be injected into an existing DOM. It should *not* include the
+ ```<html>`` or ```<body>`` tags.
+ """
+ format_type = Unicode('text/html')
+
+ print_method = ObjectName('_repr_html_')
+
+
+class MarkdownFormatter(BaseFormatter):
+ """A Markdown formatter.
+
+ To define the callables that compute the Markdown representation of your
+ objects, define a :meth:`_repr_markdown_` method or use the :meth:`for_type`
+ or :meth:`for_type_by_name` methods to register functions that handle
+ this.
+
+ The return value of this formatter should be a valid Markdown.
+ """
+ format_type = Unicode('text/markdown')
+
+ print_method = ObjectName('_repr_markdown_')
+
+class SVGFormatter(BaseFormatter):
+ """An SVG formatter.
+
+ To define the callables that compute the SVG representation of your
+ objects, define a :meth:`_repr_svg_` method or use the :meth:`for_type`
+ or :meth:`for_type_by_name` methods to register functions that handle
+ this.
+
+ The return value of this formatter should be valid SVG enclosed in
+ ```<svg>``` tags, that could be injected into an existing DOM. It should
+ *not* include the ```<html>`` or ```<body>`` tags.
+ """
+ format_type = Unicode('image/svg+xml')
+
+ print_method = ObjectName('_repr_svg_')
+
+
+class PNGFormatter(BaseFormatter):
+ """A PNG formatter.
+
+ To define the callables that compute the PNG representation of your
+ objects, define a :meth:`_repr_png_` method or use the :meth:`for_type`
+ or :meth:`for_type_by_name` methods to register functions that handle
+ this.
+
+ The return value of this formatter should be raw PNG data, *not*
+ base64 encoded.
+ """
+ format_type = Unicode('image/png')
+
+ print_method = ObjectName('_repr_png_')
+
+ _return_type = (bytes, str)
+
+
+class JPEGFormatter(BaseFormatter):
+ """A JPEG formatter.
+
+ To define the callables that compute the JPEG representation of your
+ objects, define a :meth:`_repr_jpeg_` method or use the :meth:`for_type`
+ or :meth:`for_type_by_name` methods to register functions that handle
+ this.
+
+ The return value of this formatter should be raw JPEG data, *not*
+ base64 encoded.
+ """
+ format_type = Unicode('image/jpeg')
+
+ print_method = ObjectName('_repr_jpeg_')
+
+ _return_type = (bytes, str)
+
+
+class LatexFormatter(BaseFormatter):
+ """A LaTeX formatter.
+
+ To define the callables that compute the LaTeX representation of your
+ objects, define a :meth:`_repr_latex_` method or use the :meth:`for_type`
+ or :meth:`for_type_by_name` methods to register functions that handle
+ this.
+
+ The return value of this formatter should be a valid LaTeX equation,
+ enclosed in either ```$```, ```$$``` or another LaTeX equation
+ environment.
+ """
+ format_type = Unicode('text/latex')
+
+ print_method = ObjectName('_repr_latex_')
+
+
+class JSONFormatter(BaseFormatter):
+ """A JSON string formatter.
+
+ To define the callables that compute the JSONable representation of
+ your objects, define a :meth:`_repr_json_` method or use the :meth:`for_type`
+ or :meth:`for_type_by_name` methods to register functions that handle
+ this.
+
+ The return value of this formatter should be a JSONable list or dict.
+ JSON scalars (None, number, string) are not allowed, only dict or list containers.
+ """
+ format_type = Unicode('application/json')
+ _return_type = (list, dict)
+
+ print_method = ObjectName('_repr_json_')
+
+ def _check_return(self, r, obj):
+ """Check that a return value is appropriate
+
+ Return the value if so, None otherwise, warning if invalid.
+ """
+ if r is None:
+ return
+ md = None
+ if isinstance(r, tuple):
+ # unpack data, metadata tuple for type checking on first element
+ r, md = r
+
+ assert not isinstance(
+ r, str
+ ), "JSON-as-string has been deprecated since IPython < 3"
+
+ if md is not None:
+ # put the tuple back together
+ r = (r, md)
+ return super(JSONFormatter, self)._check_return(r, obj)
+
+
+class JavascriptFormatter(BaseFormatter):
+ """A Javascript formatter.
+
+ To define the callables that compute the Javascript representation of
+ your objects, define a :meth:`_repr_javascript_` method or use the
+ :meth:`for_type` or :meth:`for_type_by_name` methods to register functions
+ that handle this.
+
+ The return value of this formatter should be valid Javascript code and
+ should *not* be enclosed in ```<script>``` tags.
+ """
+ format_type = Unicode('application/javascript')
+
+ print_method = ObjectName('_repr_javascript_')
+
+
+class PDFFormatter(BaseFormatter):
+ """A PDF formatter.
+
+ To define the callables that compute the PDF representation of your
+ objects, define a :meth:`_repr_pdf_` method or use the :meth:`for_type`
+ or :meth:`for_type_by_name` methods to register functions that handle
+ this.
+
+ The return value of this formatter should be raw PDF data, *not*
+ base64 encoded.
+ """
+ format_type = Unicode('application/pdf')
+
+ print_method = ObjectName('_repr_pdf_')
+
+ _return_type = (bytes, str)
+
+class IPythonDisplayFormatter(BaseFormatter):
+ """An escape-hatch Formatter for objects that know how to display themselves.
+
+ To define the callables that compute the representation of your
+ objects, define a :meth:`_ipython_display_` method or use the :meth:`for_type`
+ or :meth:`for_type_by_name` methods to register functions that handle
+ this. Unlike mime-type displays, this method should not return anything,
+ instead calling any appropriate display methods itself.
+
+ This display formatter has highest priority.
+ If it fires, no other display formatter will be called.
+
+ Prior to IPython 6.1, `_ipython_display_` was the only way to display custom mime-types
+ without registering a new Formatter.
+
+ IPython 6.1 introduces `_repr_mimebundle_` for displaying custom mime-types,
+ so `_ipython_display_` should only be used for objects that require unusual
+ display patterns, such as multiple display calls.
+ """
+ print_method = ObjectName('_ipython_display_')
+ _return_type = (type(None), bool)
+
+ @catch_format_error
+ def __call__(self, obj):
+ """Compute the format for an object."""
+ if self.enabled:
+ # lookup registered printer
+ try:
+ printer = self.lookup(obj)
+ except KeyError:
+ pass
+ else:
+ printer(obj)
+ return True
+ # Finally look for special method names
+ method = get_real_method(obj, self.print_method)
+ if method is not None:
+ method()
+ return True
+
+
+class MimeBundleFormatter(BaseFormatter):
+ """A Formatter for arbitrary mime-types.
+
+ Unlike other `_repr_<mimetype>_` methods,
+ `_repr_mimebundle_` should return mime-bundle data,
+ either the mime-keyed `data` dictionary or the tuple `(data, metadata)`.
+ Any mime-type is valid.
+
+ To define the callables that compute the mime-bundle representation of your
+ objects, define a :meth:`_repr_mimebundle_` method or use the :meth:`for_type`
+ or :meth:`for_type_by_name` methods to register functions that handle
+ this.
+
+ .. versionadded:: 6.1
+ """
+ print_method = ObjectName('_repr_mimebundle_')
+ _return_type = dict
+
+ def _check_return(self, r, obj):
+ r = super(MimeBundleFormatter, self)._check_return(r, obj)
+ # always return (data, metadata):
+ if r is None:
+ return {}, {}
+ if not isinstance(r, tuple):
+ return r, {}
+ return r
+
+ @catch_format_error
+ def __call__(self, obj, include=None, exclude=None):
+ """Compute the format for an object.
+
+ Identical to parent's method but we pass extra parameters to the method.
+
+ Unlike other _repr_*_ `_repr_mimebundle_` should allow extra kwargs, in
+ particular `include` and `exclude`.
+ """
+ if self.enabled:
+ # lookup registered printer
+ try:
+ printer = self.lookup(obj)
+ except KeyError:
+ pass
+ else:
+ return printer(obj)
+ # Finally look for special method names
+ method = get_real_method(obj, self.print_method)
+
+ if method is not None:
+ return method(include=include, exclude=exclude)
+ return None
+ else:
+ return None
+
+
+FormatterABC.register(BaseFormatter)
+FormatterABC.register(PlainTextFormatter)
+FormatterABC.register(HTMLFormatter)
+FormatterABC.register(MarkdownFormatter)
+FormatterABC.register(SVGFormatter)
+FormatterABC.register(PNGFormatter)
+FormatterABC.register(PDFFormatter)
+FormatterABC.register(JPEGFormatter)
+FormatterABC.register(LatexFormatter)
+FormatterABC.register(JSONFormatter)
+FormatterABC.register(JavascriptFormatter)
+FormatterABC.register(IPythonDisplayFormatter)
+FormatterABC.register(MimeBundleFormatter)
+
+
+def format_display_data(obj, include=None, exclude=None):
+ """Return a format data dict for an object.
+
+ By default all format types will be computed.
+
+ Parameters
+ ----------
+ obj : object
+ The Python object whose format data will be computed.
+
+ Returns
+ -------
+ format_dict : dict
+ A dictionary of key/value pairs, one or each format that was
+ generated for the object. The keys are the format types, which
+ will usually be MIME type strings and the values and JSON'able
+ data structure containing the raw data for the representation in
+ that format.
+ include : list or tuple, optional
+ A list of format type strings (MIME types) to include in the
+ format data dict. If this is set *only* the format types included
+ in this list will be computed.
+ exclude : list or tuple, optional
+ A list of format type string (MIME types) to exclude in the format
+ data dict. If this is set all format types will be computed,
+ except for those included in this argument.
+ """
+ from .interactiveshell import InteractiveShell
+
+ return InteractiveShell.instance().display_formatter.format(
+ obj,
+ include,
+ exclude
+ )
diff --git a/contrib/python/ipython/py3/IPython/core/getipython.py b/contrib/python/ipython/py3/IPython/core/getipython.py
new file mode 100644
index 0000000000..5e9b13cf3c
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/getipython.py
@@ -0,0 +1,24 @@
+# encoding: utf-8
+"""Simple function to call to get the current InteractiveShell instance
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (C) 2013 The IPython Development Team
+#
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Classes and functions
+#-----------------------------------------------------------------------------
+
+
+def get_ipython():
+ """Get the global InteractiveShell instance.
+
+ Returns None if no InteractiveShell instance is registered.
+ """
+ from IPython.core.interactiveshell import InteractiveShell
+ if InteractiveShell.initialized():
+ return InteractiveShell.instance()
diff --git a/contrib/python/ipython/py3/IPython/core/guarded_eval.py b/contrib/python/ipython/py3/IPython/core/guarded_eval.py
new file mode 100644
index 0000000000..d576a2a769
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/guarded_eval.py
@@ -0,0 +1,733 @@
+from typing import (
+ Any,
+ Callable,
+ Dict,
+ Set,
+ Sequence,
+ Tuple,
+ NamedTuple,
+ Type,
+ Literal,
+ Union,
+ TYPE_CHECKING,
+)
+import ast
+import builtins
+import collections
+import operator
+import sys
+from functools import cached_property
+from dataclasses import dataclass, field
+from types import MethodDescriptorType, ModuleType
+
+from IPython.utils.docs import GENERATING_DOCUMENTATION
+from IPython.utils.decorators import undoc
+
+
+if TYPE_CHECKING or GENERATING_DOCUMENTATION:
+ from typing_extensions import Protocol
+else:
+ # do not require on runtime
+ Protocol = object # requires Python >=3.8
+
+
+@undoc
+class HasGetItem(Protocol):
+ def __getitem__(self, key) -> None:
+ ...
+
+
+@undoc
+class InstancesHaveGetItem(Protocol):
+ def __call__(self, *args, **kwargs) -> HasGetItem:
+ ...
+
+
+@undoc
+class HasGetAttr(Protocol):
+ def __getattr__(self, key) -> None:
+ ...
+
+
+@undoc
+class DoesNotHaveGetAttr(Protocol):
+ pass
+
+
+# By default `__getattr__` is not explicitly implemented on most objects
+MayHaveGetattr = Union[HasGetAttr, DoesNotHaveGetAttr]
+
+
+def _unbind_method(func: Callable) -> Union[Callable, None]:
+ """Get unbound method for given bound method.
+
+ Returns None if cannot get unbound method, or method is already unbound.
+ """
+ owner = getattr(func, "__self__", None)
+ owner_class = type(owner)
+ name = getattr(func, "__name__", None)
+ instance_dict_overrides = getattr(owner, "__dict__", None)
+ if (
+ owner is not None
+ and name
+ and (
+ not instance_dict_overrides
+ or (instance_dict_overrides and name not in instance_dict_overrides)
+ )
+ ):
+ return getattr(owner_class, name)
+ return None
+
+
+@undoc
+@dataclass
+class EvaluationPolicy:
+ """Definition of evaluation policy."""
+
+ allow_locals_access: bool = False
+ allow_globals_access: bool = False
+ allow_item_access: bool = False
+ allow_attr_access: bool = False
+ allow_builtins_access: bool = False
+ allow_all_operations: bool = False
+ allow_any_calls: bool = False
+ allowed_calls: Set[Callable] = field(default_factory=set)
+
+ def can_get_item(self, value, item):
+ return self.allow_item_access
+
+ def can_get_attr(self, value, attr):
+ return self.allow_attr_access
+
+ def can_operate(self, dunders: Tuple[str, ...], a, b=None):
+ if self.allow_all_operations:
+ return True
+
+ def can_call(self, func):
+ if self.allow_any_calls:
+ return True
+
+ if func in self.allowed_calls:
+ return True
+
+ owner_method = _unbind_method(func)
+
+ if owner_method and owner_method in self.allowed_calls:
+ return True
+
+
+def _get_external(module_name: str, access_path: Sequence[str]):
+ """Get value from external module given a dotted access path.
+
+ Raises:
+ * `KeyError` if module is removed not found, and
+ * `AttributeError` if acess path does not match an exported object
+ """
+ member_type = sys.modules[module_name]
+ for attr in access_path:
+ member_type = getattr(member_type, attr)
+ return member_type
+
+
+def _has_original_dunder_external(
+ value,
+ module_name: str,
+ access_path: Sequence[str],
+ method_name: str,
+):
+ if module_name not in sys.modules:
+ # LBYLB as it is faster
+ return False
+ try:
+ member_type = _get_external(module_name, access_path)
+ value_type = type(value)
+ if type(value) == member_type:
+ return True
+ if method_name == "__getattribute__":
+ # we have to short-circuit here due to an unresolved issue in
+ # `isinstance` implementation: https://bugs.python.org/issue32683
+ return False
+ if isinstance(value, member_type):
+ method = getattr(value_type, method_name, None)
+ member_method = getattr(member_type, method_name, None)
+ if member_method == method:
+ return True
+ except (AttributeError, KeyError):
+ return False
+
+
+def _has_original_dunder(
+ value, allowed_types, allowed_methods, allowed_external, method_name
+):
+ # note: Python ignores `__getattr__`/`__getitem__` on instances,
+ # we only need to check at class level
+ value_type = type(value)
+
+ # strict type check passes → no need to check method
+ if value_type in allowed_types:
+ return True
+
+ method = getattr(value_type, method_name, None)
+
+ if method is None:
+ return None
+
+ if method in allowed_methods:
+ return True
+
+ for module_name, *access_path in allowed_external:
+ if _has_original_dunder_external(value, module_name, access_path, method_name):
+ return True
+
+ return False
+
+
+@undoc
+@dataclass
+class SelectivePolicy(EvaluationPolicy):
+ allowed_getitem: Set[InstancesHaveGetItem] = field(default_factory=set)
+ allowed_getitem_external: Set[Tuple[str, ...]] = field(default_factory=set)
+
+ allowed_getattr: Set[MayHaveGetattr] = field(default_factory=set)
+ allowed_getattr_external: Set[Tuple[str, ...]] = field(default_factory=set)
+
+ allowed_operations: Set = field(default_factory=set)
+ allowed_operations_external: Set[Tuple[str, ...]] = field(default_factory=set)
+
+ _operation_methods_cache: Dict[str, Set[Callable]] = field(
+ default_factory=dict, init=False
+ )
+
+ def can_get_attr(self, value, attr):
+ has_original_attribute = _has_original_dunder(
+ value,
+ allowed_types=self.allowed_getattr,
+ allowed_methods=self._getattribute_methods,
+ allowed_external=self.allowed_getattr_external,
+ method_name="__getattribute__",
+ )
+ has_original_attr = _has_original_dunder(
+ value,
+ allowed_types=self.allowed_getattr,
+ allowed_methods=self._getattr_methods,
+ allowed_external=self.allowed_getattr_external,
+ method_name="__getattr__",
+ )
+
+ accept = False
+
+ # Many objects do not have `__getattr__`, this is fine.
+ if has_original_attr is None and has_original_attribute:
+ accept = True
+ else:
+ # Accept objects without modifications to `__getattr__` and `__getattribute__`
+ accept = has_original_attr and has_original_attribute
+
+ if accept:
+ # We still need to check for overriden properties.
+
+ value_class = type(value)
+ if not hasattr(value_class, attr):
+ return True
+
+ class_attr_val = getattr(value_class, attr)
+ is_property = isinstance(class_attr_val, property)
+
+ if not is_property:
+ return True
+
+ # Properties in allowed types are ok (although we do not include any
+ # properties in our default allow list currently).
+ if type(value) in self.allowed_getattr:
+ return True # pragma: no cover
+
+ # Properties in subclasses of allowed types may be ok if not changed
+ for module_name, *access_path in self.allowed_getattr_external:
+ try:
+ external_class = _get_external(module_name, access_path)
+ external_class_attr_val = getattr(external_class, attr)
+ except (KeyError, AttributeError):
+ return False # pragma: no cover
+ return class_attr_val == external_class_attr_val
+
+ return False
+
+ def can_get_item(self, value, item):
+ """Allow accessing `__getiitem__` of allow-listed instances unless it was not modified."""
+ return _has_original_dunder(
+ value,
+ allowed_types=self.allowed_getitem,
+ allowed_methods=self._getitem_methods,
+ allowed_external=self.allowed_getitem_external,
+ method_name="__getitem__",
+ )
+
+ def can_operate(self, dunders: Tuple[str, ...], a, b=None):
+ objects = [a]
+ if b is not None:
+ objects.append(b)
+ return all(
+ [
+ _has_original_dunder(
+ obj,
+ allowed_types=self.allowed_operations,
+ allowed_methods=self._operator_dunder_methods(dunder),
+ allowed_external=self.allowed_operations_external,
+ method_name=dunder,
+ )
+ for dunder in dunders
+ for obj in objects
+ ]
+ )
+
+ def _operator_dunder_methods(self, dunder: str) -> Set[Callable]:
+ if dunder not in self._operation_methods_cache:
+ self._operation_methods_cache[dunder] = self._safe_get_methods(
+ self.allowed_operations, dunder
+ )
+ return self._operation_methods_cache[dunder]
+
+ @cached_property
+ def _getitem_methods(self) -> Set[Callable]:
+ return self._safe_get_methods(self.allowed_getitem, "__getitem__")
+
+ @cached_property
+ def _getattr_methods(self) -> Set[Callable]:
+ return self._safe_get_methods(self.allowed_getattr, "__getattr__")
+
+ @cached_property
+ def _getattribute_methods(self) -> Set[Callable]:
+ return self._safe_get_methods(self.allowed_getattr, "__getattribute__")
+
+ def _safe_get_methods(self, classes, name) -> Set[Callable]:
+ return {
+ method
+ for class_ in classes
+ for method in [getattr(class_, name, None)]
+ if method
+ }
+
+
+class _DummyNamedTuple(NamedTuple):
+ """Used internally to retrieve methods of named tuple instance."""
+
+
+class EvaluationContext(NamedTuple):
+ #: Local namespace
+ locals: dict
+ #: Global namespace
+ globals: dict
+ #: Evaluation policy identifier
+ evaluation: Literal[
+ "forbidden", "minimal", "limited", "unsafe", "dangerous"
+ ] = "forbidden"
+ #: Whether the evalution of code takes place inside of a subscript.
+ #: Useful for evaluating ``:-1, 'col'`` in ``df[:-1, 'col']``.
+ in_subscript: bool = False
+
+
+class _IdentitySubscript:
+ """Returns the key itself when item is requested via subscript."""
+
+ def __getitem__(self, key):
+ return key
+
+
+IDENTITY_SUBSCRIPT = _IdentitySubscript()
+SUBSCRIPT_MARKER = "__SUBSCRIPT_SENTINEL__"
+
+
+class GuardRejection(Exception):
+ """Exception raised when guard rejects evaluation attempt."""
+
+ pass
+
+
+def guarded_eval(code: str, context: EvaluationContext):
+ """Evaluate provided code in the evaluation context.
+
+ If evaluation policy given by context is set to ``forbidden``
+ no evaluation will be performed; if it is set to ``dangerous``
+ standard :func:`eval` will be used; finally, for any other,
+ policy :func:`eval_node` will be called on parsed AST.
+ """
+ locals_ = context.locals
+
+ if context.evaluation == "forbidden":
+ raise GuardRejection("Forbidden mode")
+
+ # note: not using `ast.literal_eval` as it does not implement
+ # getitem at all, for example it fails on simple `[0][1]`
+
+ if context.in_subscript:
+ # syntatic sugar for ellipsis (:) is only available in susbcripts
+ # so we need to trick the ast parser into thinking that we have
+ # a subscript, but we need to be able to later recognise that we did
+ # it so we can ignore the actual __getitem__ operation
+ if not code:
+ return tuple()
+ locals_ = locals_.copy()
+ locals_[SUBSCRIPT_MARKER] = IDENTITY_SUBSCRIPT
+ code = SUBSCRIPT_MARKER + "[" + code + "]"
+ context = EvaluationContext(**{**context._asdict(), **{"locals": locals_}})
+
+ if context.evaluation == "dangerous":
+ return eval(code, context.globals, context.locals)
+
+ expression = ast.parse(code, mode="eval")
+
+ return eval_node(expression, context)
+
+
+BINARY_OP_DUNDERS: Dict[Type[ast.operator], Tuple[str]] = {
+ ast.Add: ("__add__",),
+ ast.Sub: ("__sub__",),
+ ast.Mult: ("__mul__",),
+ ast.Div: ("__truediv__",),
+ ast.FloorDiv: ("__floordiv__",),
+ ast.Mod: ("__mod__",),
+ ast.Pow: ("__pow__",),
+ ast.LShift: ("__lshift__",),
+ ast.RShift: ("__rshift__",),
+ ast.BitOr: ("__or__",),
+ ast.BitXor: ("__xor__",),
+ ast.BitAnd: ("__and__",),
+ ast.MatMult: ("__matmul__",),
+}
+
+COMP_OP_DUNDERS: Dict[Type[ast.cmpop], Tuple[str, ...]] = {
+ ast.Eq: ("__eq__",),
+ ast.NotEq: ("__ne__", "__eq__"),
+ ast.Lt: ("__lt__", "__gt__"),
+ ast.LtE: ("__le__", "__ge__"),
+ ast.Gt: ("__gt__", "__lt__"),
+ ast.GtE: ("__ge__", "__le__"),
+ ast.In: ("__contains__",),
+ # Note: ast.Is, ast.IsNot, ast.NotIn are handled specially
+}
+
+UNARY_OP_DUNDERS: Dict[Type[ast.unaryop], Tuple[str, ...]] = {
+ ast.USub: ("__neg__",),
+ ast.UAdd: ("__pos__",),
+ # we have to check both __inv__ and __invert__!
+ ast.Invert: ("__invert__", "__inv__"),
+ ast.Not: ("__not__",),
+}
+
+
+def _find_dunder(node_op, dunders) -> Union[Tuple[str, ...], None]:
+ dunder = None
+ for op, candidate_dunder in dunders.items():
+ if isinstance(node_op, op):
+ dunder = candidate_dunder
+ return dunder
+
+
+def eval_node(node: Union[ast.AST, None], context: EvaluationContext):
+ """Evaluate AST node in provided context.
+
+ Applies evaluation restrictions defined in the context. Currently does not support evaluation of functions with keyword arguments.
+
+ Does not evaluate actions that always have side effects:
+
+ - class definitions (``class sth: ...``)
+ - function definitions (``def sth: ...``)
+ - variable assignments (``x = 1``)
+ - augmented assignments (``x += 1``)
+ - deletions (``del x``)
+
+ Does not evaluate operations which do not return values:
+
+ - assertions (``assert x``)
+ - pass (``pass``)
+ - imports (``import x``)
+ - control flow:
+
+ - conditionals (``if x:``) except for ternary IfExp (``a if x else b``)
+ - loops (``for`` and `while``)
+ - exception handling
+
+ The purpose of this function is to guard against unwanted side-effects;
+ it does not give guarantees on protection from malicious code execution.
+ """
+ policy = EVALUATION_POLICIES[context.evaluation]
+ if node is None:
+ return None
+ if isinstance(node, ast.Expression):
+ return eval_node(node.body, context)
+ if isinstance(node, ast.BinOp):
+ left = eval_node(node.left, context)
+ right = eval_node(node.right, context)
+ dunders = _find_dunder(node.op, BINARY_OP_DUNDERS)
+ if dunders:
+ if policy.can_operate(dunders, left, right):
+ return getattr(left, dunders[0])(right)
+ else:
+ raise GuardRejection(
+ f"Operation (`{dunders}`) for",
+ type(left),
+ f"not allowed in {context.evaluation} mode",
+ )
+ if isinstance(node, ast.Compare):
+ left = eval_node(node.left, context)
+ all_true = True
+ negate = False
+ for op, right in zip(node.ops, node.comparators):
+ right = eval_node(right, context)
+ dunder = None
+ dunders = _find_dunder(op, COMP_OP_DUNDERS)
+ if not dunders:
+ if isinstance(op, ast.NotIn):
+ dunders = COMP_OP_DUNDERS[ast.In]
+ negate = True
+ if isinstance(op, ast.Is):
+ dunder = "is_"
+ if isinstance(op, ast.IsNot):
+ dunder = "is_"
+ negate = True
+ if not dunder and dunders:
+ dunder = dunders[0]
+ if dunder:
+ a, b = (right, left) if dunder == "__contains__" else (left, right)
+ if dunder == "is_" or dunders and policy.can_operate(dunders, a, b):
+ result = getattr(operator, dunder)(a, b)
+ if negate:
+ result = not result
+ if not result:
+ all_true = False
+ left = right
+ else:
+ raise GuardRejection(
+ f"Comparison (`{dunder}`) for",
+ type(left),
+ f"not allowed in {context.evaluation} mode",
+ )
+ else:
+ raise ValueError(
+ f"Comparison `{dunder}` not supported"
+ ) # pragma: no cover
+ return all_true
+ if isinstance(node, ast.Constant):
+ return node.value
+ if isinstance(node, ast.Tuple):
+ return tuple(eval_node(e, context) for e in node.elts)
+ if isinstance(node, ast.List):
+ return [eval_node(e, context) for e in node.elts]
+ if isinstance(node, ast.Set):
+ return {eval_node(e, context) for e in node.elts}
+ if isinstance(node, ast.Dict):
+ return dict(
+ zip(
+ [eval_node(k, context) for k in node.keys],
+ [eval_node(v, context) for v in node.values],
+ )
+ )
+ if isinstance(node, ast.Slice):
+ return slice(
+ eval_node(node.lower, context),
+ eval_node(node.upper, context),
+ eval_node(node.step, context),
+ )
+ if isinstance(node, ast.UnaryOp):
+ value = eval_node(node.operand, context)
+ dunders = _find_dunder(node.op, UNARY_OP_DUNDERS)
+ if dunders:
+ if policy.can_operate(dunders, value):
+ return getattr(value, dunders[0])()
+ else:
+ raise GuardRejection(
+ f"Operation (`{dunders}`) for",
+ type(value),
+ f"not allowed in {context.evaluation} mode",
+ )
+ if isinstance(node, ast.Subscript):
+ value = eval_node(node.value, context)
+ slice_ = eval_node(node.slice, context)
+ if policy.can_get_item(value, slice_):
+ return value[slice_]
+ raise GuardRejection(
+ "Subscript access (`__getitem__`) for",
+ type(value), # not joined to avoid calling `repr`
+ f" not allowed in {context.evaluation} mode",
+ )
+ if isinstance(node, ast.Name):
+ if policy.allow_locals_access and node.id in context.locals:
+ return context.locals[node.id]
+ if policy.allow_globals_access and node.id in context.globals:
+ return context.globals[node.id]
+ if policy.allow_builtins_access and hasattr(builtins, node.id):
+ # note: do not use __builtins__, it is implementation detail of cPython
+ return getattr(builtins, node.id)
+ if not policy.allow_globals_access and not policy.allow_locals_access:
+ raise GuardRejection(
+ f"Namespace access not allowed in {context.evaluation} mode"
+ )
+ else:
+ raise NameError(f"{node.id} not found in locals, globals, nor builtins")
+ if isinstance(node, ast.Attribute):
+ value = eval_node(node.value, context)
+ if policy.can_get_attr(value, node.attr):
+ return getattr(value, node.attr)
+ raise GuardRejection(
+ "Attribute access (`__getattr__`) for",
+ type(value), # not joined to avoid calling `repr`
+ f"not allowed in {context.evaluation} mode",
+ )
+ if isinstance(node, ast.IfExp):
+ test = eval_node(node.test, context)
+ if test:
+ return eval_node(node.body, context)
+ else:
+ return eval_node(node.orelse, context)
+ if isinstance(node, ast.Call):
+ func = eval_node(node.func, context)
+ if policy.can_call(func) and not node.keywords:
+ args = [eval_node(arg, context) for arg in node.args]
+ return func(*args)
+ raise GuardRejection(
+ "Call for",
+ func, # not joined to avoid calling `repr`
+ f"not allowed in {context.evaluation} mode",
+ )
+ raise ValueError("Unhandled node", ast.dump(node))
+
+
+SUPPORTED_EXTERNAL_GETITEM = {
+ ("pandas", "core", "indexing", "_iLocIndexer"),
+ ("pandas", "core", "indexing", "_LocIndexer"),
+ ("pandas", "DataFrame"),
+ ("pandas", "Series"),
+ ("numpy", "ndarray"),
+ ("numpy", "void"),
+}
+
+
+BUILTIN_GETITEM: Set[InstancesHaveGetItem] = {
+ dict,
+ str, # type: ignore[arg-type]
+ bytes, # type: ignore[arg-type]
+ list,
+ tuple,
+ collections.defaultdict,
+ collections.deque,
+ collections.OrderedDict,
+ collections.ChainMap,
+ collections.UserDict,
+ collections.UserList,
+ collections.UserString, # type: ignore[arg-type]
+ _DummyNamedTuple,
+ _IdentitySubscript,
+}
+
+
+def _list_methods(cls, source=None):
+ """For use on immutable objects or with methods returning a copy"""
+ return [getattr(cls, k) for k in (source if source else dir(cls))]
+
+
+dict_non_mutating_methods = ("copy", "keys", "values", "items")
+list_non_mutating_methods = ("copy", "index", "count")
+set_non_mutating_methods = set(dir(set)) & set(dir(frozenset))
+
+
+dict_keys: Type[collections.abc.KeysView] = type({}.keys())
+
+NUMERICS = {int, float, complex}
+
+ALLOWED_CALLS = {
+ bytes,
+ *_list_methods(bytes),
+ dict,
+ *_list_methods(dict, dict_non_mutating_methods),
+ dict_keys.isdisjoint,
+ list,
+ *_list_methods(list, list_non_mutating_methods),
+ set,
+ *_list_methods(set, set_non_mutating_methods),
+ frozenset,
+ *_list_methods(frozenset),
+ range,
+ str,
+ *_list_methods(str),
+ tuple,
+ *_list_methods(tuple),
+ *NUMERICS,
+ *[method for numeric_cls in NUMERICS for method in _list_methods(numeric_cls)],
+ collections.deque,
+ *_list_methods(collections.deque, list_non_mutating_methods),
+ collections.defaultdict,
+ *_list_methods(collections.defaultdict, dict_non_mutating_methods),
+ collections.OrderedDict,
+ *_list_methods(collections.OrderedDict, dict_non_mutating_methods),
+ collections.UserDict,
+ *_list_methods(collections.UserDict, dict_non_mutating_methods),
+ collections.UserList,
+ *_list_methods(collections.UserList, list_non_mutating_methods),
+ collections.UserString,
+ *_list_methods(collections.UserString, dir(str)),
+ collections.Counter,
+ *_list_methods(collections.Counter, dict_non_mutating_methods),
+ collections.Counter.elements,
+ collections.Counter.most_common,
+}
+
+BUILTIN_GETATTR: Set[MayHaveGetattr] = {
+ *BUILTIN_GETITEM,
+ set,
+ frozenset,
+ object,
+ type, # `type` handles a lot of generic cases, e.g. numbers as in `int.real`.
+ *NUMERICS,
+ dict_keys,
+ MethodDescriptorType,
+ ModuleType,
+}
+
+
+BUILTIN_OPERATIONS = {*BUILTIN_GETATTR}
+
+EVALUATION_POLICIES = {
+ "minimal": EvaluationPolicy(
+ allow_builtins_access=True,
+ allow_locals_access=False,
+ allow_globals_access=False,
+ allow_item_access=False,
+ allow_attr_access=False,
+ allowed_calls=set(),
+ allow_any_calls=False,
+ allow_all_operations=False,
+ ),
+ "limited": SelectivePolicy(
+ allowed_getitem=BUILTIN_GETITEM,
+ allowed_getitem_external=SUPPORTED_EXTERNAL_GETITEM,
+ allowed_getattr=BUILTIN_GETATTR,
+ allowed_getattr_external={
+ # pandas Series/Frame implements custom `__getattr__`
+ ("pandas", "DataFrame"),
+ ("pandas", "Series"),
+ },
+ allowed_operations=BUILTIN_OPERATIONS,
+ allow_builtins_access=True,
+ allow_locals_access=True,
+ allow_globals_access=True,
+ allowed_calls=ALLOWED_CALLS,
+ ),
+ "unsafe": EvaluationPolicy(
+ allow_builtins_access=True,
+ allow_locals_access=True,
+ allow_globals_access=True,
+ allow_attr_access=True,
+ allow_item_access=True,
+ allow_any_calls=True,
+ allow_all_operations=True,
+ ),
+}
+
+
+__all__ = [
+ "guarded_eval",
+ "eval_node",
+ "GuardRejection",
+ "EvaluationContext",
+ "_unbind_method",
+]
diff --git a/contrib/python/ipython/py3/IPython/core/history.py b/contrib/python/ipython/py3/IPython/core/history.py
new file mode 100644
index 0000000000..fd5a8680bf
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/history.py
@@ -0,0 +1,968 @@
+""" History related magics and functionality """
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+
+import atexit
+import datetime
+from pathlib import Path
+import re
+import sqlite3
+import threading
+
+from traitlets.config.configurable import LoggingConfigurable
+from decorator import decorator
+from IPython.utils.decorators import undoc
+from IPython.paths import locate_profile
+from traitlets import (
+ Any,
+ Bool,
+ Dict,
+ Instance,
+ Integer,
+ List,
+ Unicode,
+ Union,
+ TraitError,
+ default,
+ observe,
+)
+
+#-----------------------------------------------------------------------------
+# Classes and functions
+#-----------------------------------------------------------------------------
+
+@undoc
+class DummyDB(object):
+ """Dummy DB that will act as a black hole for history.
+
+ Only used in the absence of sqlite"""
+ def execute(*args, **kwargs):
+ return []
+
+ def commit(self, *args, **kwargs):
+ pass
+
+ def __enter__(self, *args, **kwargs):
+ pass
+
+ def __exit__(self, *args, **kwargs):
+ pass
+
+
+@decorator
+def only_when_enabled(f, self, *a, **kw):
+ """Decorator: return an empty list in the absence of sqlite."""
+ if not self.enabled:
+ return []
+ else:
+ return f(self, *a, **kw)
+
+
+# use 16kB as threshold for whether a corrupt history db should be saved
+# that should be at least 100 entries or so
+_SAVE_DB_SIZE = 16384
+
+@decorator
+def catch_corrupt_db(f, self, *a, **kw):
+ """A decorator which wraps HistoryAccessor method calls to catch errors from
+ a corrupt SQLite database, move the old database out of the way, and create
+ a new one.
+
+ We avoid clobbering larger databases because this may be triggered due to filesystem issues,
+ not just a corrupt file.
+ """
+ try:
+ return f(self, *a, **kw)
+ except (sqlite3.DatabaseError, sqlite3.OperationalError) as e:
+ self._corrupt_db_counter += 1
+ self.log.error("Failed to open SQLite history %s (%s).", self.hist_file, e)
+ if self.hist_file != ':memory:':
+ if self._corrupt_db_counter > self._corrupt_db_limit:
+ self.hist_file = ':memory:'
+ self.log.error("Failed to load history too many times, history will not be saved.")
+ elif self.hist_file.is_file():
+ # move the file out of the way
+ base = str(self.hist_file.parent / self.hist_file.stem)
+ ext = self.hist_file.suffix
+ size = self.hist_file.stat().st_size
+ if size >= _SAVE_DB_SIZE:
+ # if there's significant content, avoid clobbering
+ now = datetime.datetime.now().isoformat().replace(':', '.')
+ newpath = base + '-corrupt-' + now + ext
+ # don't clobber previous corrupt backups
+ for i in range(100):
+ if not Path(newpath).exists():
+ break
+ else:
+ newpath = base + '-corrupt-' + now + (u'-%i' % i) + ext
+ else:
+ # not much content, possibly empty; don't worry about clobbering
+ # maybe we should just delete it?
+ newpath = base + '-corrupt' + ext
+ self.hist_file.rename(newpath)
+ self.log.error("History file was moved to %s and a new file created.", newpath)
+ self.init_db()
+ return []
+ else:
+ # Failed with :memory:, something serious is wrong
+ raise
+
+
+class HistoryAccessorBase(LoggingConfigurable):
+ """An abstract class for History Accessors """
+
+ def get_tail(self, n=10, raw=True, output=False, include_latest=False):
+ raise NotImplementedError
+
+ def search(self, pattern="*", raw=True, search_raw=True,
+ output=False, n=None, unique=False):
+ raise NotImplementedError
+
+ def get_range(self, session, start=1, stop=None, raw=True,output=False):
+ raise NotImplementedError
+
+ def get_range_by_str(self, rangestr, raw=True, output=False):
+ raise NotImplementedError
+
+
+class HistoryAccessor(HistoryAccessorBase):
+ """Access the history database without adding to it.
+
+ This is intended for use by standalone history tools. IPython shells use
+ HistoryManager, below, which is a subclass of this."""
+
+ # counter for init_db retries, so we don't keep trying over and over
+ _corrupt_db_counter = 0
+ # after two failures, fallback on :memory:
+ _corrupt_db_limit = 2
+
+ # String holding the path to the history file
+ hist_file = Union(
+ [Instance(Path), Unicode()],
+ help="""Path to file to use for SQLite history database.
+
+ By default, IPython will put the history database in the IPython
+ profile directory. If you would rather share one history among
+ profiles, you can set this value in each, so that they are consistent.
+
+ Due to an issue with fcntl, SQLite is known to misbehave on some NFS
+ mounts. If you see IPython hanging, try setting this to something on a
+ local disk, e.g::
+
+ ipython --HistoryManager.hist_file=/tmp/ipython_hist.sqlite
+
+ you can also use the specific value `:memory:` (including the colon
+ at both end but not the back ticks), to avoid creating an history file.
+
+ """,
+ ).tag(config=True)
+
+ enabled = Bool(True,
+ help="""enable the SQLite history
+
+ set enabled=False to disable the SQLite history,
+ in which case there will be no stored history, no SQLite connection,
+ and no background saving thread. This may be necessary in some
+ threaded environments where IPython is embedded.
+ """,
+ ).tag(config=True)
+
+ connection_options = Dict(
+ help="""Options for configuring the SQLite connection
+
+ These options are passed as keyword args to sqlite3.connect
+ when establishing database connections.
+ """
+ ).tag(config=True)
+
+ # The SQLite database
+ db = Any()
+ @observe('db')
+ def _db_changed(self, change):
+ """validate the db, since it can be an Instance of two different types"""
+ new = change['new']
+ connection_types = (DummyDB, sqlite3.Connection)
+ if not isinstance(new, connection_types):
+ msg = "%s.db must be sqlite3 Connection or DummyDB, not %r" % \
+ (self.__class__.__name__, new)
+ raise TraitError(msg)
+
+ def __init__(self, profile="default", hist_file="", **traits):
+ """Create a new history accessor.
+
+ Parameters
+ ----------
+ profile : str
+ The name of the profile from which to open history.
+ hist_file : str
+ Path to an SQLite history database stored by IPython. If specified,
+ hist_file overrides profile.
+ config : :class:`~traitlets.config.loader.Config`
+ Config object. hist_file can also be set through this.
+ """
+ super(HistoryAccessor, self).__init__(**traits)
+ # defer setting hist_file from kwarg until after init,
+ # otherwise the default kwarg value would clobber any value
+ # set by config
+ if hist_file:
+ self.hist_file = hist_file
+
+ try:
+ self.hist_file
+ except TraitError:
+ # No one has set the hist_file, yet.
+ self.hist_file = self._get_hist_file_name(profile)
+
+ self.init_db()
+
+ def _get_hist_file_name(self, profile='default'):
+ """Find the history file for the given profile name.
+
+ This is overridden by the HistoryManager subclass, to use the shell's
+ active profile.
+
+ Parameters
+ ----------
+ profile : str
+ The name of a profile which has a history file.
+ """
+ return Path(locate_profile(profile)) / "history.sqlite"
+
+ @catch_corrupt_db
+ def init_db(self):
+ """Connect to the database, and create tables if necessary."""
+ if not self.enabled:
+ self.db = DummyDB()
+ return
+
+ # use detect_types so that timestamps return datetime objects
+ kwargs = dict(detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES)
+ kwargs.update(self.connection_options)
+ self.db = sqlite3.connect(str(self.hist_file), **kwargs)
+ with self.db:
+ self.db.execute(
+ """CREATE TABLE IF NOT EXISTS sessions (session integer
+ primary key autoincrement, start timestamp,
+ end timestamp, num_cmds integer, remark text)"""
+ )
+ self.db.execute(
+ """CREATE TABLE IF NOT EXISTS history
+ (session integer, line integer, source text, source_raw text,
+ PRIMARY KEY (session, line))"""
+ )
+ # Output history is optional, but ensure the table's there so it can be
+ # enabled later.
+ self.db.execute(
+ """CREATE TABLE IF NOT EXISTS output_history
+ (session integer, line integer, output text,
+ PRIMARY KEY (session, line))"""
+ )
+ # success! reset corrupt db count
+ self._corrupt_db_counter = 0
+
+ def writeout_cache(self):
+ """Overridden by HistoryManager to dump the cache before certain
+ database lookups."""
+ pass
+
+ ## -------------------------------
+ ## Methods for retrieving history:
+ ## -------------------------------
+ def _run_sql(self, sql, params, raw=True, output=False, latest=False):
+ """Prepares and runs an SQL query for the history database.
+
+ Parameters
+ ----------
+ sql : str
+ Any filtering expressions to go after SELECT ... FROM ...
+ params : tuple
+ Parameters passed to the SQL query (to replace "?")
+ raw, output : bool
+ See :meth:`get_range`
+ latest : bool
+ Select rows with max (session, line)
+
+ Returns
+ -------
+ Tuples as :meth:`get_range`
+ """
+ toget = 'source_raw' if raw else 'source'
+ sqlfrom = "history"
+ if output:
+ sqlfrom = "history LEFT JOIN output_history USING (session, line)"
+ toget = "history.%s, output_history.output" % toget
+ if latest:
+ toget += ", MAX(session * 128 * 1024 + line)"
+ this_querry = "SELECT session, line, %s FROM %s " % (toget, sqlfrom) + sql
+ cur = self.db.execute(this_querry, params)
+ if latest:
+ cur = (row[:-1] for row in cur)
+ if output: # Regroup into 3-tuples, and parse JSON
+ return ((ses, lin, (inp, out)) for ses, lin, inp, out in cur)
+ return cur
+
+ @only_when_enabled
+ @catch_corrupt_db
+ def get_session_info(self, session):
+ """Get info about a session.
+
+ Parameters
+ ----------
+ session : int
+ Session number to retrieve.
+
+ Returns
+ -------
+ session_id : int
+ Session ID number
+ start : datetime
+ Timestamp for the start of the session.
+ end : datetime
+ Timestamp for the end of the session, or None if IPython crashed.
+ num_cmds : int
+ Number of commands run, or None if IPython crashed.
+ remark : unicode
+ A manually set description.
+ """
+ query = "SELECT * from sessions where session == ?"
+ return self.db.execute(query, (session,)).fetchone()
+
+ @catch_corrupt_db
+ def get_last_session_id(self):
+ """Get the last session ID currently in the database.
+
+ Within IPython, this should be the same as the value stored in
+ :attr:`HistoryManager.session_number`.
+ """
+ for record in self.get_tail(n=1, include_latest=True):
+ return record[0]
+
+ @catch_corrupt_db
+ def get_tail(self, n=10, raw=True, output=False, include_latest=False):
+ """Get the last n lines from the history database.
+
+ Parameters
+ ----------
+ n : int
+ The number of lines to get
+ raw, output : bool
+ See :meth:`get_range`
+ include_latest : bool
+ If False (default), n+1 lines are fetched, and the latest one
+ is discarded. This is intended to be used where the function
+ is called by a user command, which it should not return.
+
+ Returns
+ -------
+ Tuples as :meth:`get_range`
+ """
+ self.writeout_cache()
+ if not include_latest:
+ n += 1
+ cur = self._run_sql(
+ "ORDER BY session DESC, line DESC LIMIT ?", (n,), raw=raw, output=output
+ )
+ if not include_latest:
+ return reversed(list(cur)[1:])
+ return reversed(list(cur))
+
+ @catch_corrupt_db
+ def search(self, pattern="*", raw=True, search_raw=True,
+ output=False, n=None, unique=False):
+ """Search the database using unix glob-style matching (wildcards
+ * and ?).
+
+ Parameters
+ ----------
+ pattern : str
+ The wildcarded pattern to match when searching
+ search_raw : bool
+ If True, search the raw input, otherwise, the parsed input
+ raw, output : bool
+ See :meth:`get_range`
+ n : None or int
+ If an integer is given, it defines the limit of
+ returned entries.
+ unique : bool
+ When it is true, return only unique entries.
+
+ Returns
+ -------
+ Tuples as :meth:`get_range`
+ """
+ tosearch = "source_raw" if search_raw else "source"
+ if output:
+ tosearch = "history." + tosearch
+ self.writeout_cache()
+ sqlform = "WHERE %s GLOB ?" % tosearch
+ params = (pattern,)
+ if unique:
+ sqlform += ' GROUP BY {0}'.format(tosearch)
+ if n is not None:
+ sqlform += " ORDER BY session DESC, line DESC LIMIT ?"
+ params += (n,)
+ elif unique:
+ sqlform += " ORDER BY session, line"
+ cur = self._run_sql(sqlform, params, raw=raw, output=output, latest=unique)
+ if n is not None:
+ return reversed(list(cur))
+ return cur
+
+ @catch_corrupt_db
+ def get_range(self, session, start=1, stop=None, raw=True,output=False):
+ """Retrieve input by session.
+
+ Parameters
+ ----------
+ session : int
+ Session number to retrieve.
+ start : int
+ First line to retrieve.
+ stop : int
+ End of line range (excluded from output itself). If None, retrieve
+ to the end of the session.
+ raw : bool
+ If True, return untranslated input
+ output : bool
+ If True, attempt to include output. This will be 'real' Python
+ objects for the current session, or text reprs from previous
+ sessions if db_log_output was enabled at the time. Where no output
+ is found, None is used.
+
+ Returns
+ -------
+ entries
+ An iterator over the desired lines. Each line is a 3-tuple, either
+ (session, line, input) if output is False, or
+ (session, line, (input, output)) if output is True.
+ """
+ if stop:
+ lineclause = "line >= ? AND line < ?"
+ params = (session, start, stop)
+ else:
+ lineclause = "line>=?"
+ params = (session, start)
+
+ return self._run_sql("WHERE session==? AND %s" % lineclause,
+ params, raw=raw, output=output)
+
+ def get_range_by_str(self, rangestr, raw=True, output=False):
+ """Get lines of history from a string of ranges, as used by magic
+ commands %hist, %save, %macro, etc.
+
+ Parameters
+ ----------
+ rangestr : str
+ A string specifying ranges, e.g. "5 ~2/1-4". If empty string is used,
+ this will return everything from current session's history.
+
+ See the documentation of :func:`%history` for the full details.
+
+ raw, output : bool
+ As :meth:`get_range`
+
+ Returns
+ -------
+ Tuples as :meth:`get_range`
+ """
+ for sess, s, e in extract_hist_ranges(rangestr):
+ for line in self.get_range(sess, s, e, raw=raw, output=output):
+ yield line
+
+
+class HistoryManager(HistoryAccessor):
+ """A class to organize all history-related functionality in one place.
+ """
+ # Public interface
+
+ # An instance of the IPython shell we are attached to
+ shell = Instance('IPython.core.interactiveshell.InteractiveShellABC',
+ allow_none=True)
+ # Lists to hold processed and raw history. These start with a blank entry
+ # so that we can index them starting from 1
+ input_hist_parsed = List([""])
+ input_hist_raw = List([""])
+ # A list of directories visited during session
+ dir_hist = List()
+ @default('dir_hist')
+ def _dir_hist_default(self):
+ try:
+ return [Path.cwd()]
+ except OSError:
+ return []
+
+ # A dict of output history, keyed with ints from the shell's
+ # execution count.
+ output_hist = Dict()
+ # The text/plain repr of outputs.
+ output_hist_reprs = Dict()
+
+ # The number of the current session in the history database
+ session_number = Integer()
+
+ db_log_output = Bool(False,
+ help="Should the history database include output? (default: no)"
+ ).tag(config=True)
+ db_cache_size = Integer(0,
+ help="Write to database every x commands (higher values save disk access & power).\n"
+ "Values of 1 or less effectively disable caching."
+ ).tag(config=True)
+ # The input and output caches
+ db_input_cache = List()
+ db_output_cache = List()
+
+ # History saving in separate thread
+ save_thread = Instance('IPython.core.history.HistorySavingThread',
+ allow_none=True)
+ save_flag = Instance(threading.Event, allow_none=True)
+
+ # Private interface
+ # Variables used to store the three last inputs from the user. On each new
+ # history update, we populate the user's namespace with these, shifted as
+ # necessary.
+ _i00 = Unicode(u'')
+ _i = Unicode(u'')
+ _ii = Unicode(u'')
+ _iii = Unicode(u'')
+
+ # A regex matching all forms of the exit command, so that we don't store
+ # them in the history (it's annoying to rewind the first entry and land on
+ # an exit call).
+ _exit_re = re.compile(r"(exit|quit)(\s*\(.*\))?$")
+
+ def __init__(self, shell=None, config=None, **traits):
+ """Create a new history manager associated with a shell instance.
+ """
+ super(HistoryManager, self).__init__(shell=shell, config=config,
+ **traits)
+ self.save_flag = threading.Event()
+ self.db_input_cache_lock = threading.Lock()
+ self.db_output_cache_lock = threading.Lock()
+
+ try:
+ self.new_session()
+ except sqlite3.OperationalError:
+ self.log.error("Failed to create history session in %s. History will not be saved.",
+ self.hist_file, exc_info=True)
+ self.hist_file = ':memory:'
+
+ if self.enabled and self.hist_file != ':memory:':
+ self.save_thread = HistorySavingThread(self)
+ self.save_thread.start()
+
+ def _get_hist_file_name(self, profile=None):
+ """Get default history file name based on the Shell's profile.
+
+ The profile parameter is ignored, but must exist for compatibility with
+ the parent class."""
+ profile_dir = self.shell.profile_dir.location
+ return Path(profile_dir) / "history.sqlite"
+
+ @only_when_enabled
+ def new_session(self, conn=None):
+ """Get a new session number."""
+ if conn is None:
+ conn = self.db
+
+ with conn:
+ cur = conn.execute(
+ """INSERT INTO sessions VALUES (NULL, ?, NULL,
+ NULL, '') """,
+ (datetime.datetime.now(),),
+ )
+ self.session_number = cur.lastrowid
+
+ def end_session(self):
+ """Close the database session, filling in the end time and line count."""
+ self.writeout_cache()
+ with self.db:
+ self.db.execute("""UPDATE sessions SET end=?, num_cmds=? WHERE
+ session==?""", (datetime.datetime.now(),
+ len(self.input_hist_parsed)-1, self.session_number))
+ self.session_number = 0
+
+ def name_session(self, name):
+ """Give the current session a name in the history database."""
+ with self.db:
+ self.db.execute("UPDATE sessions SET remark=? WHERE session==?",
+ (name, self.session_number))
+
+ def reset(self, new_session=True):
+ """Clear the session history, releasing all object references, and
+ optionally open a new session."""
+ self.output_hist.clear()
+ # The directory history can't be completely empty
+ self.dir_hist[:] = [Path.cwd()]
+
+ if new_session:
+ if self.session_number:
+ self.end_session()
+ self.input_hist_parsed[:] = [""]
+ self.input_hist_raw[:] = [""]
+ self.new_session()
+
+ # ------------------------------
+ # Methods for retrieving history
+ # ------------------------------
+ def get_session_info(self, session=0):
+ """Get info about a session.
+
+ Parameters
+ ----------
+ session : int
+ Session number to retrieve. The current session is 0, and negative
+ numbers count back from current session, so -1 is the previous session.
+
+ Returns
+ -------
+ session_id : int
+ Session ID number
+ start : datetime
+ Timestamp for the start of the session.
+ end : datetime
+ Timestamp for the end of the session, or None if IPython crashed.
+ num_cmds : int
+ Number of commands run, or None if IPython crashed.
+ remark : unicode
+ A manually set description.
+ """
+ if session <= 0:
+ session += self.session_number
+
+ return super(HistoryManager, self).get_session_info(session=session)
+
+ @catch_corrupt_db
+ def get_tail(self, n=10, raw=True, output=False, include_latest=False):
+ """Get the last n lines from the history database.
+
+ Most recent entry last.
+
+ Completion will be reordered so that that the last ones are when
+ possible from current session.
+
+ Parameters
+ ----------
+ n : int
+ The number of lines to get
+ raw, output : bool
+ See :meth:`get_range`
+ include_latest : bool
+ If False (default), n+1 lines are fetched, and the latest one
+ is discarded. This is intended to be used where the function
+ is called by a user command, which it should not return.
+
+ Returns
+ -------
+ Tuples as :meth:`get_range`
+ """
+ self.writeout_cache()
+ if not include_latest:
+ n += 1
+ # cursor/line/entry
+ this_cur = list(
+ self._run_sql(
+ "WHERE session == ? ORDER BY line DESC LIMIT ? ",
+ (self.session_number, n),
+ raw=raw,
+ output=output,
+ )
+ )
+ other_cur = list(
+ self._run_sql(
+ "WHERE session != ? ORDER BY session DESC, line DESC LIMIT ?",
+ (self.session_number, n),
+ raw=raw,
+ output=output,
+ )
+ )
+
+ everything = this_cur + other_cur
+
+ everything = everything[:n]
+
+ if not include_latest:
+ return list(everything)[:0:-1]
+ return list(everything)[::-1]
+
+ def _get_range_session(self, start=1, stop=None, raw=True, output=False):
+ """Get input and output history from the current session. Called by
+ get_range, and takes similar parameters."""
+ input_hist = self.input_hist_raw if raw else self.input_hist_parsed
+
+ n = len(input_hist)
+ if start < 0:
+ start += n
+ if not stop or (stop > n):
+ stop = n
+ elif stop < 0:
+ stop += n
+
+ for i in range(start, stop):
+ if output:
+ line = (input_hist[i], self.output_hist_reprs.get(i))
+ else:
+ line = input_hist[i]
+ yield (0, i, line)
+
+ def get_range(self, session=0, start=1, stop=None, raw=True,output=False):
+ """Retrieve input by session.
+
+ Parameters
+ ----------
+ session : int
+ Session number to retrieve. The current session is 0, and negative
+ numbers count back from current session, so -1 is previous session.
+ start : int
+ First line to retrieve.
+ stop : int
+ End of line range (excluded from output itself). If None, retrieve
+ to the end of the session.
+ raw : bool
+ If True, return untranslated input
+ output : bool
+ If True, attempt to include output. This will be 'real' Python
+ objects for the current session, or text reprs from previous
+ sessions if db_log_output was enabled at the time. Where no output
+ is found, None is used.
+
+ Returns
+ -------
+ entries
+ An iterator over the desired lines. Each line is a 3-tuple, either
+ (session, line, input) if output is False, or
+ (session, line, (input, output)) if output is True.
+ """
+ if session <= 0:
+ session += self.session_number
+ if session==self.session_number: # Current session
+ return self._get_range_session(start, stop, raw, output)
+ return super(HistoryManager, self).get_range(session, start, stop, raw,
+ output)
+
+ ## ----------------------------
+ ## Methods for storing history:
+ ## ----------------------------
+ def store_inputs(self, line_num, source, source_raw=None):
+ """Store source and raw input in history and create input cache
+ variables ``_i*``.
+
+ Parameters
+ ----------
+ line_num : int
+ The prompt number of this input.
+ source : str
+ Python input.
+ source_raw : str, optional
+ If given, this is the raw input without any IPython transformations
+ applied to it. If not given, ``source`` is used.
+ """
+ if source_raw is None:
+ source_raw = source
+ source = source.rstrip('\n')
+ source_raw = source_raw.rstrip('\n')
+
+ # do not store exit/quit commands
+ if self._exit_re.match(source_raw.strip()):
+ return
+
+ self.input_hist_parsed.append(source)
+ self.input_hist_raw.append(source_raw)
+
+ with self.db_input_cache_lock:
+ self.db_input_cache.append((line_num, source, source_raw))
+ # Trigger to flush cache and write to DB.
+ if len(self.db_input_cache) >= self.db_cache_size:
+ self.save_flag.set()
+
+ # update the auto _i variables
+ self._iii = self._ii
+ self._ii = self._i
+ self._i = self._i00
+ self._i00 = source_raw
+
+ # hackish access to user namespace to create _i1,_i2... dynamically
+ new_i = '_i%s' % line_num
+ to_main = {'_i': self._i,
+ '_ii': self._ii,
+ '_iii': self._iii,
+ new_i : self._i00 }
+
+ if self.shell is not None:
+ self.shell.push(to_main, interactive=False)
+
+ def store_output(self, line_num):
+ """If database output logging is enabled, this saves all the
+ outputs from the indicated prompt number to the database. It's
+ called by run_cell after code has been executed.
+
+ Parameters
+ ----------
+ line_num : int
+ The line number from which to save outputs
+ """
+ if (not self.db_log_output) or (line_num not in self.output_hist_reprs):
+ return
+ output = self.output_hist_reprs[line_num]
+
+ with self.db_output_cache_lock:
+ self.db_output_cache.append((line_num, output))
+ if self.db_cache_size <= 1:
+ self.save_flag.set()
+
+ def _writeout_input_cache(self, conn):
+ with conn:
+ for line in self.db_input_cache:
+ conn.execute("INSERT INTO history VALUES (?, ?, ?, ?)",
+ (self.session_number,)+line)
+
+ def _writeout_output_cache(self, conn):
+ with conn:
+ for line in self.db_output_cache:
+ conn.execute("INSERT INTO output_history VALUES (?, ?, ?)",
+ (self.session_number,)+line)
+
+ @only_when_enabled
+ def writeout_cache(self, conn=None):
+ """Write any entries in the cache to the database."""
+ if conn is None:
+ conn = self.db
+
+ with self.db_input_cache_lock:
+ try:
+ self._writeout_input_cache(conn)
+ except sqlite3.IntegrityError:
+ self.new_session(conn)
+ print("ERROR! Session/line number was not unique in",
+ "database. History logging moved to new session",
+ self.session_number)
+ try:
+ # Try writing to the new session. If this fails, don't
+ # recurse
+ self._writeout_input_cache(conn)
+ except sqlite3.IntegrityError:
+ pass
+ finally:
+ self.db_input_cache = []
+
+ with self.db_output_cache_lock:
+ try:
+ self._writeout_output_cache(conn)
+ except sqlite3.IntegrityError:
+ print("!! Session/line number for output was not unique",
+ "in database. Output will not be stored.")
+ finally:
+ self.db_output_cache = []
+
+
+class HistorySavingThread(threading.Thread):
+ """This thread takes care of writing history to the database, so that
+ the UI isn't held up while that happens.
+
+ It waits for the HistoryManager's save_flag to be set, then writes out
+ the history cache. The main thread is responsible for setting the flag when
+ the cache size reaches a defined threshold."""
+ daemon = True
+ stop_now = False
+ enabled = True
+ def __init__(self, history_manager):
+ super(HistorySavingThread, self).__init__(name="IPythonHistorySavingThread")
+ self.history_manager = history_manager
+ self.enabled = history_manager.enabled
+ atexit.register(self.stop)
+
+ @only_when_enabled
+ def run(self):
+ # We need a separate db connection per thread:
+ try:
+ self.db = sqlite3.connect(
+ str(self.history_manager.hist_file),
+ **self.history_manager.connection_options,
+ )
+ while True:
+ self.history_manager.save_flag.wait()
+ if self.stop_now:
+ self.db.close()
+ return
+ self.history_manager.save_flag.clear()
+ self.history_manager.writeout_cache(self.db)
+ except Exception as e:
+ print(("The history saving thread hit an unexpected error (%s)."
+ "History will not be written to the database.") % repr(e))
+
+ def stop(self):
+ """This can be called from the main thread to safely stop this thread.
+
+ Note that it does not attempt to write out remaining history before
+ exiting. That should be done by calling the HistoryManager's
+ end_session method."""
+ self.stop_now = True
+ self.history_manager.save_flag.set()
+ self.join()
+
+
+# To match, e.g. ~5/8-~2/3
+range_re = re.compile(r"""
+((?P<startsess>~?\d+)/)?
+(?P<start>\d+)?
+((?P<sep>[\-:])
+ ((?P<endsess>~?\d+)/)?
+ (?P<end>\d+))?
+$""", re.VERBOSE)
+
+
+def extract_hist_ranges(ranges_str):
+ """Turn a string of history ranges into 3-tuples of (session, start, stop).
+
+ Empty string results in a `[(0, 1, None)]`, i.e. "everything from current
+ session".
+
+ Examples
+ --------
+ >>> list(extract_hist_ranges("~8/5-~7/4 2"))
+ [(-8, 5, None), (-7, 1, 5), (0, 2, 3)]
+ """
+ if ranges_str == "":
+ yield (0, 1, None) # Everything from current session
+ return
+
+ for range_str in ranges_str.split():
+ rmatch = range_re.match(range_str)
+ if not rmatch:
+ continue
+ start = rmatch.group("start")
+ if start:
+ start = int(start)
+ end = rmatch.group("end")
+ # If no end specified, get (a, a + 1)
+ end = int(end) if end else start + 1
+ else: # start not specified
+ if not rmatch.group('startsess'): # no startsess
+ continue
+ start = 1
+ end = None # provide the entire session hist
+
+ if rmatch.group("sep") == "-": # 1-3 == 1:4 --> [1, 2, 3]
+ end += 1
+ startsess = rmatch.group("startsess") or "0"
+ endsess = rmatch.group("endsess") or startsess
+ startsess = int(startsess.replace("~","-"))
+ endsess = int(endsess.replace("~","-"))
+ assert endsess >= startsess, "start session must be earlier than end session"
+
+ if endsess == startsess:
+ yield (startsess, start, end)
+ continue
+ # Multiple sessions in one range:
+ yield (startsess, start, None)
+ for sess in range(startsess+1, endsess):
+ yield (sess, 1, None)
+ yield (endsess, 1, end)
+
+
+def _format_lineno(session, line):
+ """Helper function to format line numbers properly."""
+ if session == 0:
+ return str(line)
+ return "%s#%s" % (session, line)
diff --git a/contrib/python/ipython/py3/IPython/core/historyapp.py b/contrib/python/ipython/py3/IPython/core/historyapp.py
new file mode 100644
index 0000000000..01a55343f8
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/historyapp.py
@@ -0,0 +1,161 @@
+# encoding: utf-8
+"""
+An application for managing IPython history.
+
+To be invoked as the `ipython history` subcommand.
+"""
+
+import sqlite3
+from pathlib import Path
+
+from traitlets.config.application import Application
+from .application import BaseIPythonApplication
+from traitlets import Bool, Int, Dict
+from ..utils.io import ask_yes_no
+
+trim_hist_help = """Trim the IPython history database to the last 1000 entries.
+
+This actually copies the last 1000 entries to a new database, and then replaces
+the old file with the new. Use the `--keep=` argument to specify a number
+other than 1000.
+"""
+
+clear_hist_help = """Clear the IPython history database, deleting all entries.
+
+Because this is a destructive operation, IPython will prompt the user if they
+really want to do this. Passing a `-f` flag will force clearing without a
+prompt.
+
+This is an handy alias to `ipython history trim --keep=0`
+"""
+
+
+class HistoryTrim(BaseIPythonApplication):
+ description = trim_hist_help
+
+ backup = Bool(False,
+ help="Keep the old history file as history.sqlite.<N>"
+ ).tag(config=True)
+
+ keep = Int(1000,
+ help="Number of recent lines to keep in the database."
+ ).tag(config=True)
+
+ flags = Dict(dict(
+ backup = ({'HistoryTrim' : {'backup' : True}},
+ backup.help
+ )
+ ))
+
+ aliases=Dict(dict(
+ keep = 'HistoryTrim.keep'
+ ))
+
+ def start(self):
+ profile_dir = Path(self.profile_dir.location)
+ hist_file = profile_dir / "history.sqlite"
+ con = sqlite3.connect(hist_file)
+
+ # Grab the recent history from the current database.
+ inputs = list(con.execute('SELECT session, line, source, source_raw FROM '
+ 'history ORDER BY session DESC, line DESC LIMIT ?', (self.keep+1,)))
+ if len(inputs) <= self.keep:
+ print("There are already at most %d entries in the history database." % self.keep)
+ print("Not doing anything. Use --keep= argument to keep fewer entries")
+ return
+
+ print("Trimming history to the most recent %d entries." % self.keep)
+
+ inputs.pop() # Remove the extra element we got to check the length.
+ inputs.reverse()
+ if inputs:
+ first_session = inputs[0][0]
+ outputs = list(con.execute('SELECT session, line, output FROM '
+ 'output_history WHERE session >= ?', (first_session,)))
+ sessions = list(con.execute('SELECT session, start, end, num_cmds, remark FROM '
+ 'sessions WHERE session >= ?', (first_session,)))
+ con.close()
+
+ # Create the new history database.
+ new_hist_file = profile_dir / "history.sqlite.new"
+ i = 0
+ while new_hist_file.exists():
+ # Make sure we don't interfere with an existing file.
+ i += 1
+ new_hist_file = profile_dir / ("history.sqlite.new" + str(i))
+ new_db = sqlite3.connect(new_hist_file)
+ new_db.execute("""CREATE TABLE IF NOT EXISTS sessions (session integer
+ primary key autoincrement, start timestamp,
+ end timestamp, num_cmds integer, remark text)""")
+ new_db.execute("""CREATE TABLE IF NOT EXISTS history
+ (session integer, line integer, source text, source_raw text,
+ PRIMARY KEY (session, line))""")
+ new_db.execute("""CREATE TABLE IF NOT EXISTS output_history
+ (session integer, line integer, output text,
+ PRIMARY KEY (session, line))""")
+ new_db.commit()
+
+
+ if inputs:
+ with new_db:
+ # Add the recent history into the new database.
+ new_db.executemany('insert into sessions values (?,?,?,?,?)', sessions)
+ new_db.executemany('insert into history values (?,?,?,?)', inputs)
+ new_db.executemany('insert into output_history values (?,?,?)', outputs)
+ new_db.close()
+
+ if self.backup:
+ i = 1
+ backup_hist_file = profile_dir / ("history.sqlite.old.%d" % i)
+ while backup_hist_file.exists():
+ i += 1
+ backup_hist_file = profile_dir / ("history.sqlite.old.%d" % i)
+ hist_file.rename(backup_hist_file)
+ print("Backed up longer history file to", backup_hist_file)
+ else:
+ hist_file.unlink()
+
+ new_hist_file.rename(hist_file)
+
+class HistoryClear(HistoryTrim):
+ description = clear_hist_help
+ keep = Int(0,
+ help="Number of recent lines to keep in the database.")
+
+ force = Bool(False,
+ help="Don't prompt user for confirmation"
+ ).tag(config=True)
+
+ flags = Dict(dict(
+ force = ({'HistoryClear' : {'force' : True}},
+ force.help),
+ f = ({'HistoryTrim' : {'force' : True}},
+ force.help
+ )
+ ))
+ aliases = Dict()
+
+ def start(self):
+ if self.force or ask_yes_no("Really delete all ipython history? ",
+ default="no", interrupt="no"):
+ HistoryTrim.start(self)
+
+class HistoryApp(Application):
+ name = u'ipython-history'
+ description = "Manage the IPython history database."
+
+ subcommands = Dict(dict(
+ trim = (HistoryTrim, HistoryTrim.description.splitlines()[0]),
+ clear = (HistoryClear, HistoryClear.description.splitlines()[0]),
+ ))
+
+ def start(self):
+ if self.subapp is None:
+ print("No subcommand specified. Must specify one of: %s" % \
+ (self.subcommands.keys()))
+ print()
+ self.print_description()
+ self.print_subcommands()
+ self.exit(1)
+ else:
+ return self.subapp.start()
diff --git a/contrib/python/ipython/py3/IPython/core/hooks.py b/contrib/python/ipython/py3/IPython/core/hooks.py
new file mode 100644
index 0000000000..f73c565763
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/hooks.py
@@ -0,0 +1,173 @@
+"""Hooks for IPython.
+
+In Python, it is possible to overwrite any method of any object if you really
+want to. But IPython exposes a few 'hooks', methods which are *designed* to
+be overwritten by users for customization purposes. This module defines the
+default versions of all such hooks, which get used by IPython if not
+overridden by the user.
+
+Hooks are simple functions, but they should be declared with ``self`` as their
+first argument, because when activated they are registered into IPython as
+instance methods. The self argument will be the IPython running instance
+itself, so hooks have full access to the entire IPython object.
+
+If you wish to define a new hook and activate it, you can make an :doc:`extension
+</config/extensions/index>` or a :ref:`startup script <startup_files>`. For
+example, you could use a startup file like this::
+
+ import os
+
+ def calljed(self,filename, linenum):
+ "My editor hook calls the jed editor directly."
+ print "Calling my own editor, jed ..."
+ if os.system('jed +%d %s' % (linenum,filename)) != 0:
+ raise TryNext()
+
+ def load_ipython_extension(ip):
+ ip.set_hook('editor', calljed)
+
+"""
+
+#*****************************************************************************
+# Copyright (C) 2005 Fernando Perez. <fperez@colorado.edu>
+#
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#*****************************************************************************
+
+import os
+import subprocess
+import sys
+
+from .error import TryNext
+
+# List here all the default hooks. For now it's just the editor functions
+# but over time we'll move here all the public API for user-accessible things.
+
+__all__ = [
+ "editor",
+ "synchronize_with_editor",
+ "show_in_pager",
+ "pre_prompt_hook",
+ "clipboard_get",
+]
+
+deprecated = {'pre_run_code_hook': "a callback for the 'pre_execute' or 'pre_run_cell' event",
+ 'late_startup_hook': "a callback for the 'shell_initialized' event",
+ 'shutdown_hook': "the atexit module",
+ }
+
+def editor(self, filename, linenum=None, wait=True):
+ """Open the default editor at the given filename and linenumber.
+
+ This is IPython's default editor hook, you can use it as an example to
+ write your own modified one. To set your own editor function as the
+ new editor hook, call ip.set_hook('editor',yourfunc)."""
+
+ # IPython configures a default editor at startup by reading $EDITOR from
+ # the environment, and falling back on vi (unix) or notepad (win32).
+ editor = self.editor
+
+ # marker for at which line to open the file (for existing objects)
+ if linenum is None or editor=='notepad':
+ linemark = ''
+ else:
+ linemark = '+%d' % int(linenum)
+
+ # Enclose in quotes if necessary and legal
+ if ' ' in editor and os.path.isfile(editor) and editor[0] != '"':
+ editor = '"%s"' % editor
+
+ # Call the actual editor
+ proc = subprocess.Popen('%s %s %s' % (editor, linemark, filename),
+ shell=True)
+ if wait and proc.wait() != 0:
+ raise TryNext()
+
+
+def synchronize_with_editor(self, filename, linenum, column):
+ pass
+
+
+class CommandChainDispatcher:
+ """ Dispatch calls to a chain of commands until some func can handle it
+
+ Usage: instantiate, execute "add" to add commands (with optional
+ priority), execute normally via f() calling mechanism.
+
+ """
+ def __init__(self,commands=None):
+ if commands is None:
+ self.chain = []
+ else:
+ self.chain = commands
+
+
+ def __call__(self,*args, **kw):
+ """ Command chain is called just like normal func.
+
+ This will call all funcs in chain with the same args as were given to
+ this function, and return the result of first func that didn't raise
+ TryNext"""
+ last_exc = TryNext()
+ for prio,cmd in self.chain:
+ #print "prio",prio,"cmd",cmd #dbg
+ try:
+ return cmd(*args, **kw)
+ except TryNext as exc:
+ last_exc = exc
+ # if no function will accept it, raise TryNext up to the caller
+ raise last_exc
+
+ def __str__(self):
+ return str(self.chain)
+
+ def add(self, func, priority=0):
+ """ Add a func to the cmd chain with given priority """
+ self.chain.append((priority, func))
+ self.chain.sort(key=lambda x: x[0])
+
+ def __iter__(self):
+ """ Return all objects in chain.
+
+ Handy if the objects are not callable.
+ """
+ return iter(self.chain)
+
+
+def show_in_pager(self, data, start, screen_lines):
+ """ Run a string through pager """
+ # raising TryNext here will use the default paging functionality
+ raise TryNext
+
+
+def pre_prompt_hook(self):
+ """ Run before displaying the next prompt
+
+ Use this e.g. to display output from asynchronous operations (in order
+ to not mess up text entry)
+ """
+
+ return None
+
+
+def clipboard_get(self):
+ """ Get text from the clipboard.
+ """
+ from ..lib.clipboard import (
+ osx_clipboard_get,
+ tkinter_clipboard_get,
+ win32_clipboard_get,
+ wayland_clipboard_get,
+ )
+ if sys.platform == 'win32':
+ chain = [win32_clipboard_get, tkinter_clipboard_get]
+ elif sys.platform == 'darwin':
+ chain = [osx_clipboard_get, tkinter_clipboard_get]
+ else:
+ chain = [wayland_clipboard_get, tkinter_clipboard_get]
+ dispatcher = CommandChainDispatcher()
+ for func in chain:
+ dispatcher.add(func)
+ text = dispatcher()
+ return text
diff --git a/contrib/python/ipython/py3/IPython/core/inputsplitter.py b/contrib/python/ipython/py3/IPython/core/inputsplitter.py
new file mode 100644
index 0000000000..10707d3d6b
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/inputsplitter.py
@@ -0,0 +1,773 @@
+"""DEPRECATED: Input handling and transformation machinery.
+
+This module was deprecated in IPython 7.0, in favour of inputtransformer2.
+
+The first class in this module, :class:`InputSplitter`, is designed to tell when
+input from a line-oriented frontend is complete and should be executed, and when
+the user should be prompted for another line of code instead. The name 'input
+splitter' is largely for historical reasons.
+
+A companion, :class:`IPythonInputSplitter`, provides the same functionality but
+with full support for the extended IPython syntax (magics, system calls, etc).
+The code to actually do these transformations is in :mod:`IPython.core.inputtransformer`.
+:class:`IPythonInputSplitter` feeds the raw code to the transformers in order
+and stores the results.
+
+For more details, see the class docstrings below.
+"""
+
+from warnings import warn
+
+warn('IPython.core.inputsplitter is deprecated since IPython 7 in favor of `IPython.core.inputtransformer2`',
+ DeprecationWarning)
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+import ast
+import codeop
+import io
+import re
+import sys
+import tokenize
+import warnings
+
+from typing import List
+
+from IPython.core.inputtransformer import (leading_indent,
+ classic_prompt,
+ ipy_prompt,
+ cellmagic,
+ assemble_logical_lines,
+ help_end,
+ escaped_commands,
+ assign_from_magic,
+ assign_from_system,
+ assemble_python_lines,
+ )
+
+# These are available in this module for backwards compatibility.
+from IPython.core.inputtransformer import (ESC_SHELL, ESC_SH_CAP, ESC_HELP,
+ ESC_HELP2, ESC_MAGIC, ESC_MAGIC2,
+ ESC_QUOTE, ESC_QUOTE2, ESC_PAREN, ESC_SEQUENCES)
+
+#-----------------------------------------------------------------------------
+# Utilities
+#-----------------------------------------------------------------------------
+
+# FIXME: These are general-purpose utilities that later can be moved to the
+# general ward. Kept here for now because we're being very strict about test
+# coverage with this code, and this lets us ensure that we keep 100% coverage
+# while developing.
+
+# compiled regexps for autoindent management
+dedent_re = re.compile('|'.join([
+ r'^\s+raise(\s.*)?$', # raise statement (+ space + other stuff, maybe)
+ r'^\s+raise\([^\)]*\).*$', # wacky raise with immediate open paren
+ r'^\s+return(\s.*)?$', # normal return (+ space + other stuff, maybe)
+ r'^\s+return\([^\)]*\).*$', # wacky return with immediate open paren
+ r'^\s+pass\s*$', # pass (optionally followed by trailing spaces)
+ r'^\s+break\s*$', # break (optionally followed by trailing spaces)
+ r'^\s+continue\s*$', # continue (optionally followed by trailing spaces)
+]))
+ini_spaces_re = re.compile(r'^([ \t\r\f\v]+)')
+
+# regexp to match pure comment lines so we don't accidentally insert 'if 1:'
+# before pure comments
+comment_line_re = re.compile(r'^\s*\#')
+
+
+def num_ini_spaces(s):
+ """Return the number of initial spaces in a string.
+
+ Note that tabs are counted as a single space. For now, we do *not* support
+ mixing of tabs and spaces in the user's input.
+
+ Parameters
+ ----------
+ s : string
+
+ Returns
+ -------
+ n : int
+ """
+
+ ini_spaces = ini_spaces_re.match(s)
+ if ini_spaces:
+ return ini_spaces.end()
+ else:
+ return 0
+
+# Fake token types for partial_tokenize:
+INCOMPLETE_STRING = tokenize.N_TOKENS
+IN_MULTILINE_STATEMENT = tokenize.N_TOKENS + 1
+
+# The 2 classes below have the same API as TokenInfo, but don't try to look up
+# a token type name that they won't find.
+class IncompleteString:
+ type = exact_type = INCOMPLETE_STRING
+ def __init__(self, s, start, end, line):
+ self.s = s
+ self.start = start
+ self.end = end
+ self.line = line
+
+class InMultilineStatement:
+ type = exact_type = IN_MULTILINE_STATEMENT
+ def __init__(self, pos, line):
+ self.s = ''
+ self.start = self.end = pos
+ self.line = line
+
+def partial_tokens(s):
+ """Iterate over tokens from a possibly-incomplete string of code.
+
+ This adds two special token types: INCOMPLETE_STRING and
+ IN_MULTILINE_STATEMENT. These can only occur as the last token yielded, and
+ represent the two main ways for code to be incomplete.
+ """
+ readline = io.StringIO(s).readline
+ token = tokenize.TokenInfo(tokenize.NEWLINE, '', (1, 0), (1, 0), '')
+ try:
+ for token in tokenize.generate_tokens(readline):
+ yield token
+ except tokenize.TokenError as e:
+ # catch EOF error
+ lines = s.splitlines(keepends=True)
+ end = len(lines), len(lines[-1])
+ if 'multi-line string' in e.args[0]:
+ l, c = start = token.end
+ s = lines[l-1][c:] + ''.join(lines[l:])
+ yield IncompleteString(s, start, end, lines[-1])
+ elif 'multi-line statement' in e.args[0]:
+ yield InMultilineStatement(end, lines[-1])
+ else:
+ raise
+
+def find_next_indent(code):
+ """Find the number of spaces for the next line of indentation"""
+ tokens = list(partial_tokens(code))
+ if tokens[-1].type == tokenize.ENDMARKER:
+ tokens.pop()
+ if not tokens:
+ return 0
+ while (tokens[-1].type in {tokenize.DEDENT, tokenize.NEWLINE, tokenize.COMMENT}):
+ tokens.pop()
+
+ if tokens[-1].type == INCOMPLETE_STRING:
+ # Inside a multiline string
+ return 0
+
+ # Find the indents used before
+ prev_indents = [0]
+ def _add_indent(n):
+ if n != prev_indents[-1]:
+ prev_indents.append(n)
+
+ tokiter = iter(tokens)
+ for tok in tokiter:
+ if tok.type in {tokenize.INDENT, tokenize.DEDENT}:
+ _add_indent(tok.end[1])
+ elif (tok.type == tokenize.NL):
+ try:
+ _add_indent(next(tokiter).start[1])
+ except StopIteration:
+ break
+
+ last_indent = prev_indents.pop()
+
+ # If we've just opened a multiline statement (e.g. 'a = ['), indent more
+ if tokens[-1].type == IN_MULTILINE_STATEMENT:
+ if tokens[-2].exact_type in {tokenize.LPAR, tokenize.LSQB, tokenize.LBRACE}:
+ return last_indent + 4
+ return last_indent
+
+ if tokens[-1].exact_type == tokenize.COLON:
+ # Line ends with colon - indent
+ return last_indent + 4
+
+ if last_indent:
+ # Examine the last line for dedent cues - statements like return or
+ # raise which normally end a block of code.
+ last_line_starts = 0
+ for i, tok in enumerate(tokens):
+ if tok.type == tokenize.NEWLINE:
+ last_line_starts = i + 1
+
+ last_line_tokens = tokens[last_line_starts:]
+ names = [t.string for t in last_line_tokens if t.type == tokenize.NAME]
+ if names and names[0] in {'raise', 'return', 'pass', 'break', 'continue'}:
+ # Find the most recent indentation less than the current level
+ for indent in reversed(prev_indents):
+ if indent < last_indent:
+ return indent
+
+ return last_indent
+
+
+def last_blank(src):
+ """Determine if the input source ends in a blank.
+
+ A blank is either a newline or a line consisting of whitespace.
+
+ Parameters
+ ----------
+ src : string
+ A single or multiline string.
+ """
+ if not src: return False
+ ll = src.splitlines()[-1]
+ return (ll == '') or ll.isspace()
+
+
+last_two_blanks_re = re.compile(r'\n\s*\n\s*$', re.MULTILINE)
+last_two_blanks_re2 = re.compile(r'.+\n\s*\n\s+$', re.MULTILINE)
+
+def last_two_blanks(src):
+ """Determine if the input source ends in two blanks.
+
+ A blank is either a newline or a line consisting of whitespace.
+
+ Parameters
+ ----------
+ src : string
+ A single or multiline string.
+ """
+ if not src: return False
+ # The logic here is tricky: I couldn't get a regexp to work and pass all
+ # the tests, so I took a different approach: split the source by lines,
+ # grab the last two and prepend '###\n' as a stand-in for whatever was in
+ # the body before the last two lines. Then, with that structure, it's
+ # possible to analyze with two regexps. Not the most elegant solution, but
+ # it works. If anyone tries to change this logic, make sure to validate
+ # the whole test suite first!
+ new_src = '\n'.join(['###\n'] + src.splitlines()[-2:])
+ return (bool(last_two_blanks_re.match(new_src)) or
+ bool(last_two_blanks_re2.match(new_src)) )
+
+
+def remove_comments(src):
+ """Remove all comments from input source.
+
+ Note: comments are NOT recognized inside of strings!
+
+ Parameters
+ ----------
+ src : string
+ A single or multiline input string.
+
+ Returns
+ -------
+ String with all Python comments removed.
+ """
+
+ return re.sub('#.*', '', src)
+
+
+def get_input_encoding():
+ """Return the default standard input encoding.
+
+ If sys.stdin has no encoding, 'ascii' is returned."""
+ # There are strange environments for which sys.stdin.encoding is None. We
+ # ensure that a valid encoding is returned.
+ encoding = getattr(sys.stdin, 'encoding', None)
+ if encoding is None:
+ encoding = 'ascii'
+ return encoding
+
+#-----------------------------------------------------------------------------
+# Classes and functions for normal Python syntax handling
+#-----------------------------------------------------------------------------
+
+class InputSplitter(object):
+ r"""An object that can accumulate lines of Python source before execution.
+
+ This object is designed to be fed python source line-by-line, using
+ :meth:`push`. It will return on each push whether the currently pushed
+ code could be executed already. In addition, it provides a method called
+ :meth:`push_accepts_more` that can be used to query whether more input
+ can be pushed into a single interactive block.
+
+ This is a simple example of how an interactive terminal-based client can use
+ this tool::
+
+ isp = InputSplitter()
+ while isp.push_accepts_more():
+ indent = ' '*isp.indent_spaces
+ prompt = '>>> ' + indent
+ line = indent + raw_input(prompt)
+ isp.push(line)
+ print 'Input source was:\n', isp.source_reset(),
+ """
+ # A cache for storing the current indentation
+ # The first value stores the most recently processed source input
+ # The second value is the number of spaces for the current indentation
+ # If self.source matches the first value, the second value is a valid
+ # current indentation. Otherwise, the cache is invalid and the indentation
+ # must be recalculated.
+ _indent_spaces_cache = None, None
+ # String, indicating the default input encoding. It is computed by default
+ # at initialization time via get_input_encoding(), but it can be reset by a
+ # client with specific knowledge of the encoding.
+ encoding = ''
+ # String where the current full source input is stored, properly encoded.
+ # Reading this attribute is the normal way of querying the currently pushed
+ # source code, that has been properly encoded.
+ source = ''
+ # Code object corresponding to the current source. It is automatically
+ # synced to the source, so it can be queried at any time to obtain the code
+ # object; it will be None if the source doesn't compile to valid Python.
+ code = None
+
+ # Private attributes
+
+ # List with lines of input accumulated so far
+ _buffer: List[str]
+ # Command compiler
+ _compile: codeop.CommandCompiler
+ # Boolean indicating whether the current block is complete
+ _is_complete = None
+ # Boolean indicating whether the current block has an unrecoverable syntax error
+ _is_invalid = False
+
+ def __init__(self) -> None:
+ """Create a new InputSplitter instance."""
+ self._buffer = []
+ self._compile = codeop.CommandCompiler()
+ self.encoding = get_input_encoding()
+
+ def reset(self):
+ """Reset the input buffer and associated state."""
+ self._buffer[:] = []
+ self.source = ''
+ self.code = None
+ self._is_complete = False
+ self._is_invalid = False
+
+ def source_reset(self):
+ """Return the input source and perform a full reset.
+ """
+ out = self.source
+ self.reset()
+ return out
+
+ def check_complete(self, source):
+ """Return whether a block of code is ready to execute, or should be continued
+
+ This is a non-stateful API, and will reset the state of this InputSplitter.
+
+ Parameters
+ ----------
+ source : string
+ Python input code, which can be multiline.
+
+ Returns
+ -------
+ status : str
+ One of 'complete', 'incomplete', or 'invalid' if source is not a
+ prefix of valid code.
+ indent_spaces : int or None
+ The number of spaces by which to indent the next line of code. If
+ status is not 'incomplete', this is None.
+ """
+ self.reset()
+ try:
+ self.push(source)
+ except SyntaxError:
+ # Transformers in IPythonInputSplitter can raise SyntaxError,
+ # which push() will not catch.
+ return 'invalid', None
+ else:
+ if self._is_invalid:
+ return 'invalid', None
+ elif self.push_accepts_more():
+ return 'incomplete', self.get_indent_spaces()
+ else:
+ return 'complete', None
+ finally:
+ self.reset()
+
+ def push(self, lines:str) -> bool:
+ """Push one or more lines of input.
+
+ This stores the given lines and returns a status code indicating
+ whether the code forms a complete Python block or not.
+
+ Any exceptions generated in compilation are swallowed, but if an
+ exception was produced, the method returns True.
+
+ Parameters
+ ----------
+ lines : string
+ One or more lines of Python input.
+
+ Returns
+ -------
+ is_complete : boolean
+ True if the current input source (the result of the current input
+ plus prior inputs) forms a complete Python execution block. Note that
+ this value is also stored as a private attribute (``_is_complete``), so it
+ can be queried at any time.
+ """
+ assert isinstance(lines, str)
+ self._store(lines)
+ source = self.source
+
+ # Before calling _compile(), reset the code object to None so that if an
+ # exception is raised in compilation, we don't mislead by having
+ # inconsistent code/source attributes.
+ self.code, self._is_complete = None, None
+ self._is_invalid = False
+
+ # Honor termination lines properly
+ if source.endswith('\\\n'):
+ return False
+
+ try:
+ with warnings.catch_warnings():
+ warnings.simplefilter('error', SyntaxWarning)
+ self.code = self._compile(source, symbol="exec")
+ # Invalid syntax can produce any of a number of different errors from
+ # inside the compiler, so we have to catch them all. Syntax errors
+ # immediately produce a 'ready' block, so the invalid Python can be
+ # sent to the kernel for evaluation with possible ipython
+ # special-syntax conversion.
+ except (SyntaxError, OverflowError, ValueError, TypeError,
+ MemoryError, SyntaxWarning):
+ self._is_complete = True
+ self._is_invalid = True
+ else:
+ # Compilation didn't produce any exceptions (though it may not have
+ # given a complete code object)
+ self._is_complete = self.code is not None
+
+ return self._is_complete
+
+ def push_accepts_more(self):
+ """Return whether a block of interactive input can accept more input.
+
+ This method is meant to be used by line-oriented frontends, who need to
+ guess whether a block is complete or not based solely on prior and
+ current input lines. The InputSplitter considers it has a complete
+ interactive block and will not accept more input when either:
+
+ * A SyntaxError is raised
+
+ * The code is complete and consists of a single line or a single
+ non-compound statement
+
+ * The code is complete and has a blank line at the end
+
+ If the current input produces a syntax error, this method immediately
+ returns False but does *not* raise the syntax error exception, as
+ typically clients will want to send invalid syntax to an execution
+ backend which might convert the invalid syntax into valid Python via
+ one of the dynamic IPython mechanisms.
+ """
+
+ # With incomplete input, unconditionally accept more
+ # A syntax error also sets _is_complete to True - see push()
+ if not self._is_complete:
+ #print("Not complete") # debug
+ return True
+
+ # The user can make any (complete) input execute by leaving a blank line
+ last_line = self.source.splitlines()[-1]
+ if (not last_line) or last_line.isspace():
+ #print("Blank line") # debug
+ return False
+
+ # If there's just a single line or AST node, and we're flush left, as is
+ # the case after a simple statement such as 'a=1', we want to execute it
+ # straight away.
+ if self.get_indent_spaces() == 0:
+ if len(self.source.splitlines()) <= 1:
+ return False
+
+ try:
+ code_ast = ast.parse("".join(self._buffer))
+ except Exception:
+ #print("Can't parse AST") # debug
+ return False
+ else:
+ if len(code_ast.body) == 1 and \
+ not hasattr(code_ast.body[0], 'body'):
+ #print("Simple statement") # debug
+ return False
+
+ # General fallback - accept more code
+ return True
+
+ def get_indent_spaces(self):
+ sourcefor, n = self._indent_spaces_cache
+ if sourcefor == self.source:
+ return n
+
+ # self.source always has a trailing newline
+ n = find_next_indent(self.source[:-1])
+ self._indent_spaces_cache = (self.source, n)
+ return n
+
+ # Backwards compatibility. I think all code that used .indent_spaces was
+ # inside IPython, but we can leave this here until IPython 7 in case any
+ # other modules are using it. -TK, November 2017
+ indent_spaces = property(get_indent_spaces)
+
+ def _store(self, lines, buffer=None, store='source'):
+ """Store one or more lines of input.
+
+ If input lines are not newline-terminated, a newline is automatically
+ appended."""
+
+ if buffer is None:
+ buffer = self._buffer
+
+ if lines.endswith('\n'):
+ buffer.append(lines)
+ else:
+ buffer.append(lines+'\n')
+ setattr(self, store, self._set_source(buffer))
+
+ def _set_source(self, buffer):
+ return u''.join(buffer)
+
+
+class IPythonInputSplitter(InputSplitter):
+ """An input splitter that recognizes all of IPython's special syntax."""
+
+ # String with raw, untransformed input.
+ source_raw = ''
+
+ # Flag to track when a transformer has stored input that it hasn't given
+ # back yet.
+ transformer_accumulating = False
+
+ # Flag to track when assemble_python_lines has stored input that it hasn't
+ # given back yet.
+ within_python_line = False
+
+ # Private attributes
+
+ # List with lines of raw input accumulated so far.
+ _buffer_raw = None
+
+ def __init__(self, line_input_checker=True, physical_line_transforms=None,
+ logical_line_transforms=None, python_line_transforms=None):
+ super(IPythonInputSplitter, self).__init__()
+ self._buffer_raw = []
+ self._validate = True
+
+ if physical_line_transforms is not None:
+ self.physical_line_transforms = physical_line_transforms
+ else:
+ self.physical_line_transforms = [
+ leading_indent(),
+ classic_prompt(),
+ ipy_prompt(),
+ cellmagic(end_on_blank_line=line_input_checker),
+ ]
+
+ self.assemble_logical_lines = assemble_logical_lines()
+ if logical_line_transforms is not None:
+ self.logical_line_transforms = logical_line_transforms
+ else:
+ self.logical_line_transforms = [
+ help_end(),
+ escaped_commands(),
+ assign_from_magic(),
+ assign_from_system(),
+ ]
+
+ self.assemble_python_lines = assemble_python_lines()
+ if python_line_transforms is not None:
+ self.python_line_transforms = python_line_transforms
+ else:
+ # We don't use any of these at present
+ self.python_line_transforms = []
+
+ @property
+ def transforms(self):
+ "Quick access to all transformers."
+ return self.physical_line_transforms + \
+ [self.assemble_logical_lines] + self.logical_line_transforms + \
+ [self.assemble_python_lines] + self.python_line_transforms
+
+ @property
+ def transforms_in_use(self):
+ """Transformers, excluding logical line transformers if we're in a
+ Python line."""
+ t = self.physical_line_transforms[:]
+ if not self.within_python_line:
+ t += [self.assemble_logical_lines] + self.logical_line_transforms
+ return t + [self.assemble_python_lines] + self.python_line_transforms
+
+ def reset(self):
+ """Reset the input buffer and associated state."""
+ super(IPythonInputSplitter, self).reset()
+ self._buffer_raw[:] = []
+ self.source_raw = ''
+ self.transformer_accumulating = False
+ self.within_python_line = False
+
+ for t in self.transforms:
+ try:
+ t.reset()
+ except SyntaxError:
+ # Nothing that calls reset() expects to handle transformer
+ # errors
+ pass
+
+ def flush_transformers(self):
+ def _flush(transform, outs):
+ """yield transformed lines
+
+ always strings, never None
+
+ transform: the current transform
+ outs: an iterable of previously transformed inputs.
+ Each may be multiline, which will be passed
+ one line at a time to transform.
+ """
+ for out in outs:
+ for line in out.splitlines():
+ # push one line at a time
+ tmp = transform.push(line)
+ if tmp is not None:
+ yield tmp
+
+ # reset the transform
+ tmp = transform.reset()
+ if tmp is not None:
+ yield tmp
+
+ out = []
+ for t in self.transforms_in_use:
+ out = _flush(t, out)
+
+ out = list(out)
+ if out:
+ self._store('\n'.join(out))
+
+ def raw_reset(self):
+ """Return raw input only and perform a full reset.
+ """
+ out = self.source_raw
+ self.reset()
+ return out
+
+ def source_reset(self):
+ try:
+ self.flush_transformers()
+ return self.source
+ finally:
+ self.reset()
+
+ def push_accepts_more(self):
+ if self.transformer_accumulating:
+ return True
+ else:
+ return super(IPythonInputSplitter, self).push_accepts_more()
+
+ def transform_cell(self, cell):
+ """Process and translate a cell of input.
+ """
+ self.reset()
+ try:
+ self.push(cell)
+ self.flush_transformers()
+ return self.source
+ finally:
+ self.reset()
+
+ def push(self, lines:str) -> bool:
+ """Push one or more lines of IPython input.
+
+ This stores the given lines and returns a status code indicating
+ whether the code forms a complete Python block or not, after processing
+ all input lines for special IPython syntax.
+
+ Any exceptions generated in compilation are swallowed, but if an
+ exception was produced, the method returns True.
+
+ Parameters
+ ----------
+ lines : string
+ One or more lines of Python input.
+
+ Returns
+ -------
+ is_complete : boolean
+ True if the current input source (the result of the current input
+ plus prior inputs) forms a complete Python execution block. Note that
+ this value is also stored as a private attribute (_is_complete), so it
+ can be queried at any time.
+ """
+ assert isinstance(lines, str)
+ # We must ensure all input is pure unicode
+ # ''.splitlines() --> [], but we need to push the empty line to transformers
+ lines_list = lines.splitlines()
+ if not lines_list:
+ lines_list = ['']
+
+ # Store raw source before applying any transformations to it. Note
+ # that this must be done *after* the reset() call that would otherwise
+ # flush the buffer.
+ self._store(lines, self._buffer_raw, 'source_raw')
+
+ transformed_lines_list = []
+ for line in lines_list:
+ transformed = self._transform_line(line)
+ if transformed is not None:
+ transformed_lines_list.append(transformed)
+
+ if transformed_lines_list:
+ transformed_lines = '\n'.join(transformed_lines_list)
+ return super(IPythonInputSplitter, self).push(transformed_lines)
+ else:
+ # Got nothing back from transformers - they must be waiting for
+ # more input.
+ return False
+
+ def _transform_line(self, line):
+ """Push a line of input code through the various transformers.
+
+ Returns any output from the transformers, or None if a transformer
+ is accumulating lines.
+
+ Sets self.transformer_accumulating as a side effect.
+ """
+ def _accumulating(dbg):
+ #print(dbg)
+ self.transformer_accumulating = True
+ return None
+
+ for transformer in self.physical_line_transforms:
+ line = transformer.push(line)
+ if line is None:
+ return _accumulating(transformer)
+
+ if not self.within_python_line:
+ line = self.assemble_logical_lines.push(line)
+ if line is None:
+ return _accumulating('acc logical line')
+
+ for transformer in self.logical_line_transforms:
+ line = transformer.push(line)
+ if line is None:
+ return _accumulating(transformer)
+
+ line = self.assemble_python_lines.push(line)
+ if line is None:
+ self.within_python_line = True
+ return _accumulating('acc python line')
+ else:
+ self.within_python_line = False
+
+ for transformer in self.python_line_transforms:
+ line = transformer.push(line)
+ if line is None:
+ return _accumulating(transformer)
+
+ #print("transformers clear") #debug
+ self.transformer_accumulating = False
+ return line
+
diff --git a/contrib/python/ipython/py3/IPython/core/inputtransformer.py b/contrib/python/ipython/py3/IPython/core/inputtransformer.py
new file mode 100644
index 0000000000..77f69f388f
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/inputtransformer.py
@@ -0,0 +1,536 @@
+"""DEPRECATED: Input transformer classes to support IPython special syntax.
+
+This module was deprecated in IPython 7.0, in favour of inputtransformer2.
+
+This includes the machinery to recognise and transform ``%magic`` commands,
+``!system`` commands, ``help?`` querying, prompt stripping, and so forth.
+"""
+import abc
+import functools
+import re
+import tokenize
+from tokenize import generate_tokens, untokenize, TokenError
+from io import StringIO
+
+from IPython.core.splitinput import LineInfo
+
+#-----------------------------------------------------------------------------
+# Globals
+#-----------------------------------------------------------------------------
+
+# The escape sequences that define the syntax transformations IPython will
+# apply to user input. These can NOT be just changed here: many regular
+# expressions and other parts of the code may use their hardcoded values, and
+# for all intents and purposes they constitute the 'IPython syntax', so they
+# should be considered fixed.
+
+ESC_SHELL = '!' # Send line to underlying system shell
+ESC_SH_CAP = '!!' # Send line to system shell and capture output
+ESC_HELP = '?' # Find information about object
+ESC_HELP2 = '??' # Find extra-detailed information about object
+ESC_MAGIC = '%' # Call magic function
+ESC_MAGIC2 = '%%' # Call cell-magic function
+ESC_QUOTE = ',' # Split args on whitespace, quote each as string and call
+ESC_QUOTE2 = ';' # Quote all args as a single string, call
+ESC_PAREN = '/' # Call first argument with rest of line as arguments
+
+ESC_SEQUENCES = [ESC_SHELL, ESC_SH_CAP, ESC_HELP ,\
+ ESC_HELP2, ESC_MAGIC, ESC_MAGIC2,\
+ ESC_QUOTE, ESC_QUOTE2, ESC_PAREN ]
+
+
+class InputTransformer(metaclass=abc.ABCMeta):
+ """Abstract base class for line-based input transformers."""
+
+ @abc.abstractmethod
+ def push(self, line):
+ """Send a line of input to the transformer, returning the transformed
+ input or None if the transformer is waiting for more input.
+
+ Must be overridden by subclasses.
+
+ Implementations may raise ``SyntaxError`` if the input is invalid. No
+ other exceptions may be raised.
+ """
+ pass
+
+ @abc.abstractmethod
+ def reset(self):
+ """Return, transformed any lines that the transformer has accumulated,
+ and reset its internal state.
+
+ Must be overridden by subclasses.
+ """
+ pass
+
+ @classmethod
+ def wrap(cls, func):
+ """Can be used by subclasses as a decorator, to return a factory that
+ will allow instantiation with the decorated object.
+ """
+ @functools.wraps(func)
+ def transformer_factory(**kwargs):
+ return cls(func, **kwargs)
+
+ return transformer_factory
+
+class StatelessInputTransformer(InputTransformer):
+ """Wrapper for a stateless input transformer implemented as a function."""
+ def __init__(self, func):
+ self.func = func
+
+ def __repr__(self):
+ return "StatelessInputTransformer(func={0!r})".format(self.func)
+
+ def push(self, line):
+ """Send a line of input to the transformer, returning the
+ transformed input."""
+ return self.func(line)
+
+ def reset(self):
+ """No-op - exists for compatibility."""
+ pass
+
+class CoroutineInputTransformer(InputTransformer):
+ """Wrapper for an input transformer implemented as a coroutine."""
+ def __init__(self, coro, **kwargs):
+ # Prime it
+ self.coro = coro(**kwargs)
+ next(self.coro)
+
+ def __repr__(self):
+ return "CoroutineInputTransformer(coro={0!r})".format(self.coro)
+
+ def push(self, line):
+ """Send a line of input to the transformer, returning the
+ transformed input or None if the transformer is waiting for more
+ input.
+ """
+ return self.coro.send(line)
+
+ def reset(self):
+ """Return, transformed any lines that the transformer has
+ accumulated, and reset its internal state.
+ """
+ return self.coro.send(None)
+
+class TokenInputTransformer(InputTransformer):
+ """Wrapper for a token-based input transformer.
+
+ func should accept a list of tokens (5-tuples, see tokenize docs), and
+ return an iterable which can be passed to tokenize.untokenize().
+ """
+ def __init__(self, func):
+ self.func = func
+ self.buf = []
+ self.reset_tokenizer()
+
+ def reset_tokenizer(self):
+ it = iter(self.buf)
+ self.tokenizer = generate_tokens(it.__next__)
+
+ def push(self, line):
+ self.buf.append(line + '\n')
+ if all(l.isspace() for l in self.buf):
+ return self.reset()
+
+ tokens = []
+ stop_at_NL = False
+ try:
+ for intok in self.tokenizer:
+ tokens.append(intok)
+ t = intok[0]
+ if t == tokenize.NEWLINE or (stop_at_NL and t == tokenize.NL):
+ # Stop before we try to pull a line we don't have yet
+ break
+ elif t == tokenize.ERRORTOKEN:
+ stop_at_NL = True
+ except TokenError:
+ # Multi-line statement - stop and try again with the next line
+ self.reset_tokenizer()
+ return None
+
+ return self.output(tokens)
+
+ def output(self, tokens):
+ self.buf.clear()
+ self.reset_tokenizer()
+ return untokenize(self.func(tokens)).rstrip('\n')
+
+ def reset(self):
+ l = ''.join(self.buf)
+ self.buf.clear()
+ self.reset_tokenizer()
+ if l:
+ return l.rstrip('\n')
+
+class assemble_python_lines(TokenInputTransformer):
+ def __init__(self):
+ super(assemble_python_lines, self).__init__(None)
+
+ def output(self, tokens):
+ return self.reset()
+
+@CoroutineInputTransformer.wrap
+def assemble_logical_lines():
+ r"""Join lines following explicit line continuations (\)"""
+ line = ''
+ while True:
+ line = (yield line)
+ if not line or line.isspace():
+ continue
+
+ parts = []
+ while line is not None:
+ if line.endswith('\\') and (not has_comment(line)):
+ parts.append(line[:-1])
+ line = (yield None) # Get another line
+ else:
+ parts.append(line)
+ break
+
+ # Output
+ line = ''.join(parts)
+
+# Utilities
+def _make_help_call(target, esc, lspace):
+ """Prepares a pinfo(2)/psearch call from a target name and the escape
+ (i.e. ? or ??)"""
+ method = 'pinfo2' if esc == '??' \
+ else 'psearch' if '*' in target \
+ else 'pinfo'
+ arg = " ".join([method, target])
+ #Prepare arguments for get_ipython().run_line_magic(magic_name, magic_args)
+ t_magic_name, _, t_magic_arg_s = arg.partition(' ')
+ t_magic_name = t_magic_name.lstrip(ESC_MAGIC)
+ return "%sget_ipython().run_line_magic(%r, %r)" % (
+ lspace,
+ t_magic_name,
+ t_magic_arg_s,
+ )
+
+
+# These define the transformations for the different escape characters.
+def _tr_system(line_info):
+ "Translate lines escaped with: !"
+ cmd = line_info.line.lstrip().lstrip(ESC_SHELL)
+ return '%sget_ipython().system(%r)' % (line_info.pre, cmd)
+
+def _tr_system2(line_info):
+ "Translate lines escaped with: !!"
+ cmd = line_info.line.lstrip()[2:]
+ return '%sget_ipython().getoutput(%r)' % (line_info.pre, cmd)
+
+def _tr_help(line_info):
+ "Translate lines escaped with: ?/??"
+ # A naked help line should just fire the intro help screen
+ if not line_info.line[1:]:
+ return 'get_ipython().show_usage()'
+
+ return _make_help_call(line_info.ifun, line_info.esc, line_info.pre)
+
+def _tr_magic(line_info):
+ "Translate lines escaped with: %"
+ tpl = '%sget_ipython().run_line_magic(%r, %r)'
+ if line_info.line.startswith(ESC_MAGIC2):
+ return line_info.line
+ cmd = ' '.join([line_info.ifun, line_info.the_rest]).strip()
+ #Prepare arguments for get_ipython().run_line_magic(magic_name, magic_args)
+ t_magic_name, _, t_magic_arg_s = cmd.partition(' ')
+ t_magic_name = t_magic_name.lstrip(ESC_MAGIC)
+ return tpl % (line_info.pre, t_magic_name, t_magic_arg_s)
+
+def _tr_quote(line_info):
+ "Translate lines escaped with: ,"
+ return '%s%s("%s")' % (line_info.pre, line_info.ifun,
+ '", "'.join(line_info.the_rest.split()) )
+
+def _tr_quote2(line_info):
+ "Translate lines escaped with: ;"
+ return '%s%s("%s")' % (line_info.pre, line_info.ifun,
+ line_info.the_rest)
+
+def _tr_paren(line_info):
+ "Translate lines escaped with: /"
+ return '%s%s(%s)' % (line_info.pre, line_info.ifun,
+ ", ".join(line_info.the_rest.split()))
+
+tr = { ESC_SHELL : _tr_system,
+ ESC_SH_CAP : _tr_system2,
+ ESC_HELP : _tr_help,
+ ESC_HELP2 : _tr_help,
+ ESC_MAGIC : _tr_magic,
+ ESC_QUOTE : _tr_quote,
+ ESC_QUOTE2 : _tr_quote2,
+ ESC_PAREN : _tr_paren }
+
+@StatelessInputTransformer.wrap
+def escaped_commands(line):
+ """Transform escaped commands - %magic, !system, ?help + various autocalls.
+ """
+ if not line or line.isspace():
+ return line
+ lineinf = LineInfo(line)
+ if lineinf.esc not in tr:
+ return line
+
+ return tr[lineinf.esc](lineinf)
+
+_initial_space_re = re.compile(r'\s*')
+
+_help_end_re = re.compile(r"""(%{0,2}
+ (?!\d)[\w*]+ # Variable name
+ (\.(?!\d)[\w*]+)* # .etc.etc
+ )
+ (\?\??)$ # ? or ??
+ """,
+ re.VERBOSE)
+
+# Extra pseudotokens for multiline strings and data structures
+_MULTILINE_STRING = object()
+_MULTILINE_STRUCTURE = object()
+
+def _line_tokens(line):
+ """Helper for has_comment and ends_in_comment_or_string."""
+ readline = StringIO(line).readline
+ toktypes = set()
+ try:
+ for t in generate_tokens(readline):
+ toktypes.add(t[0])
+ except TokenError as e:
+ # There are only two cases where a TokenError is raised.
+ if 'multi-line string' in e.args[0]:
+ toktypes.add(_MULTILINE_STRING)
+ else:
+ toktypes.add(_MULTILINE_STRUCTURE)
+ return toktypes
+
+def has_comment(src):
+ """Indicate whether an input line has (i.e. ends in, or is) a comment.
+
+ This uses tokenize, so it can distinguish comments from # inside strings.
+
+ Parameters
+ ----------
+ src : string
+ A single line input string.
+
+ Returns
+ -------
+ comment : bool
+ True if source has a comment.
+ """
+ return (tokenize.COMMENT in _line_tokens(src))
+
+def ends_in_comment_or_string(src):
+ """Indicates whether or not an input line ends in a comment or within
+ a multiline string.
+
+ Parameters
+ ----------
+ src : string
+ A single line input string.
+
+ Returns
+ -------
+ comment : bool
+ True if source ends in a comment or multiline string.
+ """
+ toktypes = _line_tokens(src)
+ return (tokenize.COMMENT in toktypes) or (_MULTILINE_STRING in toktypes)
+
+
+@StatelessInputTransformer.wrap
+def help_end(line):
+ """Translate lines with ?/?? at the end"""
+ m = _help_end_re.search(line)
+ if m is None or ends_in_comment_or_string(line):
+ return line
+ target = m.group(1)
+ esc = m.group(3)
+ lspace = _initial_space_re.match(line).group(0)
+
+ return _make_help_call(target, esc, lspace)
+
+
+@CoroutineInputTransformer.wrap
+def cellmagic(end_on_blank_line=False):
+ """Captures & transforms cell magics.
+
+ After a cell magic is started, this stores up any lines it gets until it is
+ reset (sent None).
+ """
+ tpl = 'get_ipython().run_cell_magic(%r, %r, %r)'
+ cellmagic_help_re = re.compile(r'%%\w+\?')
+ line = ''
+ while True:
+ line = (yield line)
+ # consume leading empty lines
+ while not line:
+ line = (yield line)
+
+ if not line.startswith(ESC_MAGIC2):
+ # This isn't a cell magic, idle waiting for reset then start over
+ while line is not None:
+ line = (yield line)
+ continue
+
+ if cellmagic_help_re.match(line):
+ # This case will be handled by help_end
+ continue
+
+ first = line
+ body = []
+ line = (yield None)
+ while (line is not None) and \
+ ((line.strip() != '') or not end_on_blank_line):
+ body.append(line)
+ line = (yield None)
+
+ # Output
+ magic_name, _, first = first.partition(' ')
+ magic_name = magic_name.lstrip(ESC_MAGIC2)
+ line = tpl % (magic_name, first, u'\n'.join(body))
+
+
+def _strip_prompts(prompt_re, initial_re=None, turnoff_re=None):
+ """Remove matching input prompts from a block of input.
+
+ Parameters
+ ----------
+ prompt_re : regular expression
+ A regular expression matching any input prompt (including continuation)
+ initial_re : regular expression, optional
+ A regular expression matching only the initial prompt, but not continuation.
+ If no initial expression is given, prompt_re will be used everywhere.
+ Used mainly for plain Python prompts, where the continuation prompt
+ ``...`` is a valid Python expression in Python 3, so shouldn't be stripped.
+
+ Notes
+ -----
+ If `initial_re` and `prompt_re differ`,
+ only `initial_re` will be tested against the first line.
+ If any prompt is found on the first two lines,
+ prompts will be stripped from the rest of the block.
+ """
+ if initial_re is None:
+ initial_re = prompt_re
+ line = ''
+ while True:
+ line = (yield line)
+
+ # First line of cell
+ if line is None:
+ continue
+ out, n1 = initial_re.subn('', line, count=1)
+ if turnoff_re and not n1:
+ if turnoff_re.match(line):
+ # We're in e.g. a cell magic; disable this transformer for
+ # the rest of the cell.
+ while line is not None:
+ line = (yield line)
+ continue
+
+ line = (yield out)
+
+ if line is None:
+ continue
+ # check for any prompt on the second line of the cell,
+ # because people often copy from just after the first prompt,
+ # so we might not see it in the first line.
+ out, n2 = prompt_re.subn('', line, count=1)
+ line = (yield out)
+
+ if n1 or n2:
+ # Found a prompt in the first two lines - check for it in
+ # the rest of the cell as well.
+ while line is not None:
+ line = (yield prompt_re.sub('', line, count=1))
+
+ else:
+ # Prompts not in input - wait for reset
+ while line is not None:
+ line = (yield line)
+
+@CoroutineInputTransformer.wrap
+def classic_prompt():
+ """Strip the >>>/... prompts of the Python interactive shell."""
+ # FIXME: non-capturing version (?:...) usable?
+ prompt_re = re.compile(r'^(>>>|\.\.\.)( |$)')
+ initial_re = re.compile(r'^>>>( |$)')
+ # Any %magic/!system is IPython syntax, so we needn't look for >>> prompts
+ turnoff_re = re.compile(r'^[%!]')
+ return _strip_prompts(prompt_re, initial_re, turnoff_re)
+
+@CoroutineInputTransformer.wrap
+def ipy_prompt():
+ """Strip IPython's In [1]:/...: prompts."""
+ # FIXME: non-capturing version (?:...) usable?
+ prompt_re = re.compile(r'^(In \[\d+\]: |\s*\.{3,}: ?)')
+ # Disable prompt stripping inside cell magics
+ turnoff_re = re.compile(r'^%%')
+ return _strip_prompts(prompt_re, turnoff_re=turnoff_re)
+
+
+@CoroutineInputTransformer.wrap
+def leading_indent():
+ """Remove leading indentation.
+
+ If the first line starts with a spaces or tabs, the same whitespace will be
+ removed from each following line until it is reset.
+ """
+ space_re = re.compile(r'^[ \t]+')
+ line = ''
+ while True:
+ line = (yield line)
+
+ if line is None:
+ continue
+
+ m = space_re.match(line)
+ if m:
+ space = m.group(0)
+ while line is not None:
+ if line.startswith(space):
+ line = line[len(space):]
+ line = (yield line)
+ else:
+ # No leading spaces - wait for reset
+ while line is not None:
+ line = (yield line)
+
+
+_assign_pat = \
+r'''(?P<lhs>(\s*)
+ ([\w\.]+) # Initial identifier
+ (\s*,\s*
+ \*?[\w\.]+)* # Further identifiers for unpacking
+ \s*?,? # Trailing comma
+ )
+ \s*=\s*
+'''
+
+assign_system_re = re.compile(r'{}!\s*(?P<cmd>.*)'.format(_assign_pat), re.VERBOSE)
+assign_system_template = '%s = get_ipython().getoutput(%r)'
+@StatelessInputTransformer.wrap
+def assign_from_system(line):
+ """Transform assignment from system commands (e.g. files = !ls)"""
+ m = assign_system_re.match(line)
+ if m is None:
+ return line
+
+ return assign_system_template % m.group('lhs', 'cmd')
+
+assign_magic_re = re.compile(r'{}%\s*(?P<cmd>.*)'.format(_assign_pat), re.VERBOSE)
+assign_magic_template = '%s = get_ipython().run_line_magic(%r, %r)'
+@StatelessInputTransformer.wrap
+def assign_from_magic(line):
+ """Transform assignment from magic commands (e.g. a = %who_ls)"""
+ m = assign_magic_re.match(line)
+ if m is None:
+ return line
+ #Prepare arguments for get_ipython().run_line_magic(magic_name, magic_args)
+ m_lhs, m_cmd = m.group('lhs', 'cmd')
+ t_magic_name, _, t_magic_arg_s = m_cmd.partition(' ')
+ t_magic_name = t_magic_name.lstrip(ESC_MAGIC)
+ return assign_magic_template % (m_lhs, t_magic_name, t_magic_arg_s)
diff --git a/contrib/python/ipython/py3/IPython/core/inputtransformer2.py b/contrib/python/ipython/py3/IPython/core/inputtransformer2.py
new file mode 100644
index 0000000000..37f0e7699c
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/inputtransformer2.py
@@ -0,0 +1,797 @@
+"""Input transformer machinery to support IPython special syntax.
+
+This includes the machinery to recognise and transform ``%magic`` commands,
+``!system`` commands, ``help?`` querying, prompt stripping, and so forth.
+
+Added: IPython 7.0. Replaces inputsplitter and inputtransformer which were
+deprecated in 7.0.
+"""
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+import ast
+from codeop import CommandCompiler, Compile
+import re
+import tokenize
+from typing import List, Tuple, Optional, Any
+import warnings
+
+_indent_re = re.compile(r'^[ \t]+')
+
+def leading_empty_lines(lines):
+ """Remove leading empty lines
+
+ If the leading lines are empty or contain only whitespace, they will be
+ removed.
+ """
+ if not lines:
+ return lines
+ for i, line in enumerate(lines):
+ if line and not line.isspace():
+ return lines[i:]
+ return lines
+
+def leading_indent(lines):
+ """Remove leading indentation.
+
+ If the first line starts with a spaces or tabs, the same whitespace will be
+ removed from each following line in the cell.
+ """
+ if not lines:
+ return lines
+ m = _indent_re.match(lines[0])
+ if not m:
+ return lines
+ space = m.group(0)
+ n = len(space)
+ return [l[n:] if l.startswith(space) else l
+ for l in lines]
+
+class PromptStripper:
+ """Remove matching input prompts from a block of input.
+
+ Parameters
+ ----------
+ prompt_re : regular expression
+ A regular expression matching any input prompt (including continuation,
+ e.g. ``...``)
+ initial_re : regular expression, optional
+ A regular expression matching only the initial prompt, but not continuation.
+ If no initial expression is given, prompt_re will be used everywhere.
+ Used mainly for plain Python prompts (``>>>``), where the continuation prompt
+ ``...`` is a valid Python expression in Python 3, so shouldn't be stripped.
+
+ Notes
+ -----
+
+ If initial_re and prompt_re differ,
+ only initial_re will be tested against the first line.
+ If any prompt is found on the first two lines,
+ prompts will be stripped from the rest of the block.
+ """
+ def __init__(self, prompt_re, initial_re=None):
+ self.prompt_re = prompt_re
+ self.initial_re = initial_re or prompt_re
+
+ def _strip(self, lines):
+ return [self.prompt_re.sub('', l, count=1) for l in lines]
+
+ def __call__(self, lines):
+ if not lines:
+ return lines
+ if self.initial_re.match(lines[0]) or \
+ (len(lines) > 1 and self.prompt_re.match(lines[1])):
+ return self._strip(lines)
+ return lines
+
+classic_prompt = PromptStripper(
+ prompt_re=re.compile(r'^(>>>|\.\.\.)( |$)'),
+ initial_re=re.compile(r'^>>>( |$)')
+)
+
+ipython_prompt = PromptStripper(
+ re.compile(
+ r"""
+ ^( # Match from the beginning of a line, either:
+
+ # 1. First-line prompt:
+ ((\[nav\]|\[ins\])?\ )? # Vi editing mode prompt, if it's there
+ In\ # The 'In' of the prompt, with a space
+ \[\d+\]: # Command index, as displayed in the prompt
+ \ # With a mandatory trailing space
+
+ | # ... or ...
+
+ # 2. The three dots of the multiline prompt
+ \s* # All leading whitespace characters
+ \.{3,}: # The three (or more) dots
+ \ ? # With an optional trailing space
+
+ )
+ """,
+ re.VERBOSE,
+ )
+)
+
+
+def cell_magic(lines):
+ if not lines or not lines[0].startswith('%%'):
+ return lines
+ if re.match(r'%%\w+\?', lines[0]):
+ # This case will be handled by help_end
+ return lines
+ magic_name, _, first_line = lines[0][2:].rstrip().partition(' ')
+ body = ''.join(lines[1:])
+ return ['get_ipython().run_cell_magic(%r, %r, %r)\n'
+ % (magic_name, first_line, body)]
+
+
+def _find_assign_op(token_line) -> Optional[int]:
+ """Get the index of the first assignment in the line ('=' not inside brackets)
+
+ Note: We don't try to support multiple special assignment (a = b = %foo)
+ """
+ paren_level = 0
+ for i, ti in enumerate(token_line):
+ s = ti.string
+ if s == '=' and paren_level == 0:
+ return i
+ if s in {'(','[','{'}:
+ paren_level += 1
+ elif s in {')', ']', '}'}:
+ if paren_level > 0:
+ paren_level -= 1
+ return None
+
+def find_end_of_continued_line(lines, start_line: int):
+ """Find the last line of a line explicitly extended using backslashes.
+
+ Uses 0-indexed line numbers.
+ """
+ end_line = start_line
+ while lines[end_line].endswith('\\\n'):
+ end_line += 1
+ if end_line >= len(lines):
+ break
+ return end_line
+
+def assemble_continued_line(lines, start: Tuple[int, int], end_line: int):
+ r"""Assemble a single line from multiple continued line pieces
+
+ Continued lines are lines ending in ``\``, and the line following the last
+ ``\`` in the block.
+
+ For example, this code continues over multiple lines::
+
+ if (assign_ix is not None) \
+ and (len(line) >= assign_ix + 2) \
+ and (line[assign_ix+1].string == '%') \
+ and (line[assign_ix+2].type == tokenize.NAME):
+
+ This statement contains four continued line pieces.
+ Assembling these pieces into a single line would give::
+
+ if (assign_ix is not None) and (len(line) >= assign_ix + 2) and (line[...
+
+ This uses 0-indexed line numbers. *start* is (lineno, colno).
+
+ Used to allow ``%magic`` and ``!system`` commands to be continued over
+ multiple lines.
+ """
+ parts = [lines[start[0]][start[1]:]] + lines[start[0]+1:end_line+1]
+ return ' '.join([p.rstrip()[:-1] for p in parts[:-1]] # Strip backslash+newline
+ + [parts[-1].rstrip()]) # Strip newline from last line
+
+class TokenTransformBase:
+ """Base class for transformations which examine tokens.
+
+ Special syntax should not be transformed when it occurs inside strings or
+ comments. This is hard to reliably avoid with regexes. The solution is to
+ tokenise the code as Python, and recognise the special syntax in the tokens.
+
+ IPython's special syntax is not valid Python syntax, so tokenising may go
+ wrong after the special syntax starts. These classes therefore find and
+ transform *one* instance of special syntax at a time into regular Python
+ syntax. After each transformation, tokens are regenerated to find the next
+ piece of special syntax.
+
+ Subclasses need to implement one class method (find)
+ and one regular method (transform).
+
+ The priority attribute can select which transformation to apply if multiple
+ transformers match in the same place. Lower numbers have higher priority.
+ This allows "%magic?" to be turned into a help call rather than a magic call.
+ """
+ # Lower numbers -> higher priority (for matches in the same location)
+ priority = 10
+
+ def sortby(self):
+ return self.start_line, self.start_col, self.priority
+
+ def __init__(self, start):
+ self.start_line = start[0] - 1 # Shift from 1-index to 0-index
+ self.start_col = start[1]
+
+ @classmethod
+ def find(cls, tokens_by_line):
+ """Find one instance of special syntax in the provided tokens.
+
+ Tokens are grouped into logical lines for convenience,
+ so it is easy to e.g. look at the first token of each line.
+ *tokens_by_line* is a list of lists of tokenize.TokenInfo objects.
+
+ This should return an instance of its class, pointing to the start
+ position it has found, or None if it found no match.
+ """
+ raise NotImplementedError
+
+ def transform(self, lines: List[str]):
+ """Transform one instance of special syntax found by ``find()``
+
+ Takes a list of strings representing physical lines,
+ returns a similar list of transformed lines.
+ """
+ raise NotImplementedError
+
+class MagicAssign(TokenTransformBase):
+ """Transformer for assignments from magics (a = %foo)"""
+ @classmethod
+ def find(cls, tokens_by_line):
+ """Find the first magic assignment (a = %foo) in the cell.
+ """
+ for line in tokens_by_line:
+ assign_ix = _find_assign_op(line)
+ if (assign_ix is not None) \
+ and (len(line) >= assign_ix + 2) \
+ and (line[assign_ix+1].string == '%') \
+ and (line[assign_ix+2].type == tokenize.NAME):
+ return cls(line[assign_ix+1].start)
+
+ def transform(self, lines: List[str]):
+ """Transform a magic assignment found by the ``find()`` classmethod.
+ """
+ start_line, start_col = self.start_line, self.start_col
+ lhs = lines[start_line][:start_col]
+ end_line = find_end_of_continued_line(lines, start_line)
+ rhs = assemble_continued_line(lines, (start_line, start_col), end_line)
+ assert rhs.startswith('%'), rhs
+ magic_name, _, args = rhs[1:].partition(' ')
+
+ lines_before = lines[:start_line]
+ call = "get_ipython().run_line_magic({!r}, {!r})".format(magic_name, args)
+ new_line = lhs + call + '\n'
+ lines_after = lines[end_line+1:]
+
+ return lines_before + [new_line] + lines_after
+
+
+class SystemAssign(TokenTransformBase):
+ """Transformer for assignments from system commands (a = !foo)"""
+ @classmethod
+ def find(cls, tokens_by_line):
+ """Find the first system assignment (a = !foo) in the cell.
+ """
+ for line in tokens_by_line:
+ assign_ix = _find_assign_op(line)
+ if (assign_ix is not None) \
+ and not line[assign_ix].line.strip().startswith('=') \
+ and (len(line) >= assign_ix + 2) \
+ and (line[assign_ix + 1].type == tokenize.ERRORTOKEN):
+ ix = assign_ix + 1
+
+ while ix < len(line) and line[ix].type == tokenize.ERRORTOKEN:
+ if line[ix].string == '!':
+ return cls(line[ix].start)
+ elif not line[ix].string.isspace():
+ break
+ ix += 1
+
+ def transform(self, lines: List[str]):
+ """Transform a system assignment found by the ``find()`` classmethod.
+ """
+ start_line, start_col = self.start_line, self.start_col
+
+ lhs = lines[start_line][:start_col]
+ end_line = find_end_of_continued_line(lines, start_line)
+ rhs = assemble_continued_line(lines, (start_line, start_col), end_line)
+ assert rhs.startswith('!'), rhs
+ cmd = rhs[1:]
+
+ lines_before = lines[:start_line]
+ call = "get_ipython().getoutput({!r})".format(cmd)
+ new_line = lhs + call + '\n'
+ lines_after = lines[end_line + 1:]
+
+ return lines_before + [new_line] + lines_after
+
+# The escape sequences that define the syntax transformations IPython will
+# apply to user input. These can NOT be just changed here: many regular
+# expressions and other parts of the code may use their hardcoded values, and
+# for all intents and purposes they constitute the 'IPython syntax', so they
+# should be considered fixed.
+
+ESC_SHELL = '!' # Send line to underlying system shell
+ESC_SH_CAP = '!!' # Send line to system shell and capture output
+ESC_HELP = '?' # Find information about object
+ESC_HELP2 = '??' # Find extra-detailed information about object
+ESC_MAGIC = '%' # Call magic function
+ESC_MAGIC2 = '%%' # Call cell-magic function
+ESC_QUOTE = ',' # Split args on whitespace, quote each as string and call
+ESC_QUOTE2 = ';' # Quote all args as a single string, call
+ESC_PAREN = '/' # Call first argument with rest of line as arguments
+
+ESCAPE_SINGLES = {'!', '?', '%', ',', ';', '/'}
+ESCAPE_DOUBLES = {'!!', '??'} # %% (cell magic) is handled separately
+
+def _make_help_call(target, esc):
+ """Prepares a pinfo(2)/psearch call from a target name and the escape
+ (i.e. ? or ??)"""
+ method = 'pinfo2' if esc == '??' \
+ else 'psearch' if '*' in target \
+ else 'pinfo'
+ arg = " ".join([method, target])
+ #Prepare arguments for get_ipython().run_line_magic(magic_name, magic_args)
+ t_magic_name, _, t_magic_arg_s = arg.partition(' ')
+ t_magic_name = t_magic_name.lstrip(ESC_MAGIC)
+ return "get_ipython().run_line_magic(%r, %r)" % (t_magic_name, t_magic_arg_s)
+
+
+def _tr_help(content):
+ """Translate lines escaped with: ?
+
+ A naked help line should fire the intro help screen (shell.show_usage())
+ """
+ if not content:
+ return 'get_ipython().show_usage()'
+
+ return _make_help_call(content, '?')
+
+def _tr_help2(content):
+ """Translate lines escaped with: ??
+
+ A naked help line should fire the intro help screen (shell.show_usage())
+ """
+ if not content:
+ return 'get_ipython().show_usage()'
+
+ return _make_help_call(content, '??')
+
+def _tr_magic(content):
+ "Translate lines escaped with a percent sign: %"
+ name, _, args = content.partition(' ')
+ return 'get_ipython().run_line_magic(%r, %r)' % (name, args)
+
+def _tr_quote(content):
+ "Translate lines escaped with a comma: ,"
+ name, _, args = content.partition(' ')
+ return '%s("%s")' % (name, '", "'.join(args.split()) )
+
+def _tr_quote2(content):
+ "Translate lines escaped with a semicolon: ;"
+ name, _, args = content.partition(' ')
+ return '%s("%s")' % (name, args)
+
+def _tr_paren(content):
+ "Translate lines escaped with a slash: /"
+ name, _, args = content.partition(' ')
+ return '%s(%s)' % (name, ", ".join(args.split()))
+
+tr = { ESC_SHELL : 'get_ipython().system({!r})'.format,
+ ESC_SH_CAP : 'get_ipython().getoutput({!r})'.format,
+ ESC_HELP : _tr_help,
+ ESC_HELP2 : _tr_help2,
+ ESC_MAGIC : _tr_magic,
+ ESC_QUOTE : _tr_quote,
+ ESC_QUOTE2 : _tr_quote2,
+ ESC_PAREN : _tr_paren }
+
+class EscapedCommand(TokenTransformBase):
+ """Transformer for escaped commands like %foo, !foo, or /foo"""
+ @classmethod
+ def find(cls, tokens_by_line):
+ """Find the first escaped command (%foo, !foo, etc.) in the cell.
+ """
+ for line in tokens_by_line:
+ if not line:
+ continue
+ ix = 0
+ ll = len(line)
+ while ll > ix and line[ix].type in {tokenize.INDENT, tokenize.DEDENT}:
+ ix += 1
+ if ix >= ll:
+ continue
+ if line[ix].string in ESCAPE_SINGLES:
+ return cls(line[ix].start)
+
+ def transform(self, lines):
+ """Transform an escaped line found by the ``find()`` classmethod.
+ """
+ start_line, start_col = self.start_line, self.start_col
+
+ indent = lines[start_line][:start_col]
+ end_line = find_end_of_continued_line(lines, start_line)
+ line = assemble_continued_line(lines, (start_line, start_col), end_line)
+
+ if len(line) > 1 and line[:2] in ESCAPE_DOUBLES:
+ escape, content = line[:2], line[2:]
+ else:
+ escape, content = line[:1], line[1:]
+
+ if escape in tr:
+ call = tr[escape](content)
+ else:
+ call = ''
+
+ lines_before = lines[:start_line]
+ new_line = indent + call + '\n'
+ lines_after = lines[end_line + 1:]
+
+ return lines_before + [new_line] + lines_after
+
+
+_help_end_re = re.compile(
+ r"""(%{0,2}
+ (?!\d)[\w*]+ # Variable name
+ (\.(?!\d)[\w*]+|\[-?[0-9]+\])* # .etc.etc or [0], we only support literal integers.
+ )
+ (\?\??)$ # ? or ??
+ """,
+ re.VERBOSE,
+)
+
+
+class HelpEnd(TokenTransformBase):
+ """Transformer for help syntax: obj? and obj??"""
+ # This needs to be higher priority (lower number) than EscapedCommand so
+ # that inspecting magics (%foo?) works.
+ priority = 5
+
+ def __init__(self, start, q_locn):
+ super().__init__(start)
+ self.q_line = q_locn[0] - 1 # Shift from 1-indexed to 0-indexed
+ self.q_col = q_locn[1]
+
+ @classmethod
+ def find(cls, tokens_by_line):
+ """Find the first help command (foo?) in the cell.
+ """
+ for line in tokens_by_line:
+ # Last token is NEWLINE; look at last but one
+ if len(line) > 2 and line[-2].string == '?':
+ # Find the first token that's not INDENT/DEDENT
+ ix = 0
+ while line[ix].type in {tokenize.INDENT, tokenize.DEDENT}:
+ ix += 1
+ return cls(line[ix].start, line[-2].start)
+
+ def transform(self, lines):
+ """Transform a help command found by the ``find()`` classmethod.
+ """
+
+ piece = "".join(lines[self.start_line : self.q_line + 1])
+ indent, content = piece[: self.start_col], piece[self.start_col :]
+ lines_before = lines[: self.start_line]
+ lines_after = lines[self.q_line + 1 :]
+
+ m = _help_end_re.search(content)
+ if not m:
+ raise SyntaxError(content)
+ assert m is not None, content
+ target = m.group(1)
+ esc = m.group(3)
+
+
+ call = _make_help_call(target, esc)
+ new_line = indent + call + '\n'
+
+ return lines_before + [new_line] + lines_after
+
+def make_tokens_by_line(lines:List[str]):
+ """Tokenize a series of lines and group tokens by line.
+
+ The tokens for a multiline Python string or expression are grouped as one
+ line. All lines except the last lines should keep their line ending ('\\n',
+ '\\r\\n') for this to properly work. Use `.splitlines(keeplineending=True)`
+ for example when passing block of text to this function.
+
+ """
+ # NL tokens are used inside multiline expressions, but also after blank
+ # lines or comments. This is intentional - see https://bugs.python.org/issue17061
+ # We want to group the former case together but split the latter, so we
+ # track parentheses level, similar to the internals of tokenize.
+
+ # reexported from token on 3.7+
+ NEWLINE, NL = tokenize.NEWLINE, tokenize.NL # type: ignore
+ tokens_by_line: List[List[Any]] = [[]]
+ if len(lines) > 1 and not lines[0].endswith(("\n", "\r", "\r\n", "\x0b", "\x0c")):
+ warnings.warn(
+ "`make_tokens_by_line` received a list of lines which do not have lineending markers ('\\n', '\\r', '\\r\\n', '\\x0b', '\\x0c'), behavior will be unspecified",
+ stacklevel=2,
+ )
+ parenlev = 0
+ try:
+ for token in tokenize.generate_tokens(iter(lines).__next__):
+ tokens_by_line[-1].append(token)
+ if (token.type == NEWLINE) \
+ or ((token.type == NL) and (parenlev <= 0)):
+ tokens_by_line.append([])
+ elif token.string in {'(', '[', '{'}:
+ parenlev += 1
+ elif token.string in {')', ']', '}'}:
+ if parenlev > 0:
+ parenlev -= 1
+ except tokenize.TokenError:
+ # Input ended in a multiline string or expression. That's OK for us.
+ pass
+
+
+ if not tokens_by_line[-1]:
+ tokens_by_line.pop()
+
+
+ return tokens_by_line
+
+
+def has_sunken_brackets(tokens: List[tokenize.TokenInfo]):
+ """Check if the depth of brackets in the list of tokens drops below 0"""
+ parenlev = 0
+ for token in tokens:
+ if token.string in {"(", "[", "{"}:
+ parenlev += 1
+ elif token.string in {")", "]", "}"}:
+ parenlev -= 1
+ if parenlev < 0:
+ return True
+ return False
+
+
+def show_linewise_tokens(s: str):
+ """For investigation and debugging"""
+ warnings.warn(
+ "show_linewise_tokens is deprecated since IPython 8.6",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ if not s.endswith("\n"):
+ s += "\n"
+ lines = s.splitlines(keepends=True)
+ for line in make_tokens_by_line(lines):
+ print("Line -------")
+ for tokinfo in line:
+ print(" ", tokinfo)
+
+# Arbitrary limit to prevent getting stuck in infinite loops
+TRANSFORM_LOOP_LIMIT = 500
+
+class TransformerManager:
+ """Applies various transformations to a cell or code block.
+
+ The key methods for external use are ``transform_cell()``
+ and ``check_complete()``.
+ """
+ def __init__(self):
+ self.cleanup_transforms = [
+ leading_empty_lines,
+ leading_indent,
+ classic_prompt,
+ ipython_prompt,
+ ]
+ self.line_transforms = [
+ cell_magic,
+ ]
+ self.token_transformers = [
+ MagicAssign,
+ SystemAssign,
+ EscapedCommand,
+ HelpEnd,
+ ]
+
+ def do_one_token_transform(self, lines):
+ """Find and run the transform earliest in the code.
+
+ Returns (changed, lines).
+
+ This method is called repeatedly until changed is False, indicating
+ that all available transformations are complete.
+
+ The tokens following IPython special syntax might not be valid, so
+ the transformed code is retokenised every time to identify the next
+ piece of special syntax. Hopefully long code cells are mostly valid
+ Python, not using lots of IPython special syntax, so this shouldn't be
+ a performance issue.
+ """
+ tokens_by_line = make_tokens_by_line(lines)
+ candidates = []
+ for transformer_cls in self.token_transformers:
+ transformer = transformer_cls.find(tokens_by_line)
+ if transformer:
+ candidates.append(transformer)
+
+ if not candidates:
+ # Nothing to transform
+ return False, lines
+ ordered_transformers = sorted(candidates, key=TokenTransformBase.sortby)
+ for transformer in ordered_transformers:
+ try:
+ return True, transformer.transform(lines)
+ except SyntaxError:
+ pass
+ return False, lines
+
+ def do_token_transforms(self, lines):
+ for _ in range(TRANSFORM_LOOP_LIMIT):
+ changed, lines = self.do_one_token_transform(lines)
+ if not changed:
+ return lines
+
+ raise RuntimeError("Input transformation still changing after "
+ "%d iterations. Aborting." % TRANSFORM_LOOP_LIMIT)
+
+ def transform_cell(self, cell: str) -> str:
+ """Transforms a cell of input code"""
+ if not cell.endswith('\n'):
+ cell += '\n' # Ensure the cell has a trailing newline
+ lines = cell.splitlines(keepends=True)
+ for transform in self.cleanup_transforms + self.line_transforms:
+ lines = transform(lines)
+
+ lines = self.do_token_transforms(lines)
+ return ''.join(lines)
+
+ def check_complete(self, cell: str):
+ """Return whether a block of code is ready to execute, or should be continued
+
+ Parameters
+ ----------
+ cell : string
+ Python input code, which can be multiline.
+
+ Returns
+ -------
+ status : str
+ One of 'complete', 'incomplete', or 'invalid' if source is not a
+ prefix of valid code.
+ indent_spaces : int or None
+ The number of spaces by which to indent the next line of code. If
+ status is not 'incomplete', this is None.
+ """
+ # Remember if the lines ends in a new line.
+ ends_with_newline = False
+ for character in reversed(cell):
+ if character == '\n':
+ ends_with_newline = True
+ break
+ elif character.strip():
+ break
+ else:
+ continue
+
+ if not ends_with_newline:
+ # Append an newline for consistent tokenization
+ # See https://bugs.python.org/issue33899
+ cell += '\n'
+
+ lines = cell.splitlines(keepends=True)
+
+ if not lines:
+ return 'complete', None
+
+ if lines[-1].endswith('\\'):
+ # Explicit backslash continuation
+ return 'incomplete', find_last_indent(lines)
+
+ try:
+ for transform in self.cleanup_transforms:
+ if not getattr(transform, 'has_side_effects', False):
+ lines = transform(lines)
+ except SyntaxError:
+ return 'invalid', None
+
+ if lines[0].startswith('%%'):
+ # Special case for cell magics - completion marked by blank line
+ if lines[-1].strip():
+ return 'incomplete', find_last_indent(lines)
+ else:
+ return 'complete', None
+
+ try:
+ for transform in self.line_transforms:
+ if not getattr(transform, 'has_side_effects', False):
+ lines = transform(lines)
+ lines = self.do_token_transforms(lines)
+ except SyntaxError:
+ return 'invalid', None
+
+ tokens_by_line = make_tokens_by_line(lines)
+
+ # Bail if we got one line and there are more closing parentheses than
+ # the opening ones
+ if (
+ len(lines) == 1
+ and tokens_by_line
+ and has_sunken_brackets(tokens_by_line[0])
+ ):
+ return "invalid", None
+
+ if not tokens_by_line:
+ return 'incomplete', find_last_indent(lines)
+
+ if tokens_by_line[-1][-1].type != tokenize.ENDMARKER:
+ # We're in a multiline string or expression
+ return 'incomplete', find_last_indent(lines)
+
+ newline_types = {tokenize.NEWLINE, tokenize.COMMENT, tokenize.ENDMARKER} # type: ignore
+
+ # Pop the last line which only contains DEDENTs and ENDMARKER
+ last_token_line = None
+ if {t.type for t in tokens_by_line[-1]} in [
+ {tokenize.DEDENT, tokenize.ENDMARKER},
+ {tokenize.ENDMARKER}
+ ] and len(tokens_by_line) > 1:
+ last_token_line = tokens_by_line.pop()
+
+ while tokens_by_line[-1] and tokens_by_line[-1][-1].type in newline_types:
+ tokens_by_line[-1].pop()
+
+ if not tokens_by_line[-1]:
+ return 'incomplete', find_last_indent(lines)
+
+ if tokens_by_line[-1][-1].string == ':':
+ # The last line starts a block (e.g. 'if foo:')
+ ix = 0
+ while tokens_by_line[-1][ix].type in {tokenize.INDENT, tokenize.DEDENT}:
+ ix += 1
+
+ indent = tokens_by_line[-1][ix].start[1]
+ return 'incomplete', indent + 4
+
+ if tokens_by_line[-1][0].line.endswith('\\'):
+ return 'incomplete', None
+
+ # At this point, our checks think the code is complete (or invalid).
+ # We'll use codeop.compile_command to check this with the real parser
+ try:
+ with warnings.catch_warnings():
+ warnings.simplefilter('error', SyntaxWarning)
+ res = compile_command(''.join(lines), symbol='exec')
+ except (SyntaxError, OverflowError, ValueError, TypeError,
+ MemoryError, SyntaxWarning):
+ return 'invalid', None
+ else:
+ if res is None:
+ return 'incomplete', find_last_indent(lines)
+
+ if last_token_line and last_token_line[0].type == tokenize.DEDENT:
+ if ends_with_newline:
+ return 'complete', None
+ return 'incomplete', find_last_indent(lines)
+
+ # If there's a blank line at the end, assume we're ready to execute
+ if not lines[-1].strip():
+ return 'complete', None
+
+ return 'complete', None
+
+
+def find_last_indent(lines):
+ m = _indent_re.match(lines[-1])
+ if not m:
+ return 0
+ return len(m.group(0).replace('\t', ' '*4))
+
+
+class MaybeAsyncCompile(Compile):
+ def __init__(self, extra_flags=0):
+ super().__init__()
+ self.flags |= extra_flags
+
+
+class MaybeAsyncCommandCompiler(CommandCompiler):
+ def __init__(self, extra_flags=0):
+ self.compiler = MaybeAsyncCompile(extra_flags=extra_flags)
+
+
+_extra_flags = ast.PyCF_ALLOW_TOP_LEVEL_AWAIT
+
+compile_command = MaybeAsyncCommandCompiler(extra_flags=_extra_flags)
diff --git a/contrib/python/ipython/py3/IPython/core/interactiveshell.py b/contrib/python/ipython/py3/IPython/core/interactiveshell.py
new file mode 100644
index 0000000000..7392de7c02
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/interactiveshell.py
@@ -0,0 +1,3910 @@
+# -*- coding: utf-8 -*-
+"""Main IPython class."""
+
+#-----------------------------------------------------------------------------
+# Copyright (C) 2001 Janko Hauser <jhauser@zscout.de>
+# Copyright (C) 2001-2007 Fernando Perez. <fperez@colorado.edu>
+# Copyright (C) 2008-2011 The IPython Development Team
+#
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#-----------------------------------------------------------------------------
+
+
+import abc
+import ast
+import atexit
+import bdb
+import builtins as builtin_mod
+import functools
+import inspect
+import os
+import re
+import runpy
+import subprocess
+import sys
+import tempfile
+import traceback
+import types
+import warnings
+from ast import stmt
+from io import open as io_open
+from logging import error
+from pathlib import Path
+from typing import Callable
+from typing import List as ListType, Dict as DictType, Any as AnyType
+from typing import Optional, Sequence, Tuple
+from warnings import warn
+
+from pickleshare import PickleShareDB
+from tempfile import TemporaryDirectory
+from traitlets import (
+ Any,
+ Bool,
+ CaselessStrEnum,
+ Dict,
+ Enum,
+ Instance,
+ Integer,
+ List,
+ Type,
+ Unicode,
+ default,
+ observe,
+ validate,
+)
+from traitlets.config.configurable import SingletonConfigurable
+from traitlets.utils.importstring import import_item
+
+import IPython.core.hooks
+from IPython.core import magic, oinspect, page, prefilter, ultratb
+from IPython.core.alias import Alias, AliasManager
+from IPython.core.autocall import ExitAutocall
+from IPython.core.builtin_trap import BuiltinTrap
+from IPython.core.compilerop import CachingCompiler
+from IPython.core.debugger import InterruptiblePdb
+from IPython.core.display_trap import DisplayTrap
+from IPython.core.displayhook import DisplayHook
+from IPython.core.displaypub import DisplayPublisher
+from IPython.core.error import InputRejected, UsageError
+from IPython.core.events import EventManager, available_events
+from IPython.core.extensions import ExtensionManager
+from IPython.core.formatters import DisplayFormatter
+from IPython.core.history import HistoryManager
+from IPython.core.inputtransformer2 import ESC_MAGIC, ESC_MAGIC2
+from IPython.core.logger import Logger
+from IPython.core.macro import Macro
+from IPython.core.payload import PayloadManager
+from IPython.core.prefilter import PrefilterManager
+from IPython.core.profiledir import ProfileDir
+from IPython.core.usage import default_banner
+from IPython.display import display
+from IPython.paths import get_ipython_dir
+from IPython.testing.skipdoctest import skip_doctest
+from IPython.utils import PyColorize, io, openpy, py3compat
+from IPython.utils.decorators import undoc
+from IPython.utils.io import ask_yes_no
+from IPython.utils.ipstruct import Struct
+from IPython.utils.path import ensure_dir_exists, get_home_dir, get_py_filename
+from IPython.utils.process import getoutput, system
+from IPython.utils.strdispatch import StrDispatch
+from IPython.utils.syspathcontext import prepended_to_syspath
+from IPython.utils.text import DollarFormatter, LSString, SList, format_screen
+from IPython.core.oinspect import OInfo
+
+
+sphinxify: Optional[Callable]
+
+try:
+ import docrepr.sphinxify as sphx
+
+ def sphinxify(oinfo):
+ wrapped_docstring = sphx.wrap_main_docstring(oinfo)
+
+ def sphinxify_docstring(docstring):
+ with TemporaryDirectory() as dirname:
+ return {
+ "text/html": sphx.sphinxify(wrapped_docstring, dirname),
+ "text/plain": docstring,
+ }
+
+ return sphinxify_docstring
+except ImportError:
+ sphinxify = None
+
+
+class ProvisionalWarning(DeprecationWarning):
+ """
+ Warning class for unstable features
+ """
+ pass
+
+from ast import Module
+
+_assign_nodes = (ast.AugAssign, ast.AnnAssign, ast.Assign)
+_single_targets_nodes = (ast.AugAssign, ast.AnnAssign)
+
+#-----------------------------------------------------------------------------
+# Await Helpers
+#-----------------------------------------------------------------------------
+
+# we still need to run things using the asyncio eventloop, but there is no
+# async integration
+from .async_helpers import (
+ _asyncio_runner,
+ _curio_runner,
+ _pseudo_sync_runner,
+ _should_be_async,
+ _trio_runner,
+)
+
+#-----------------------------------------------------------------------------
+# Globals
+#-----------------------------------------------------------------------------
+
+# compiled regexps for autoindent management
+dedent_re = re.compile(r'^\s+raise|^\s+return|^\s+pass')
+
+#-----------------------------------------------------------------------------
+# Utilities
+#-----------------------------------------------------------------------------
+
+
+def is_integer_string(s: str):
+ """
+ Variant of "str.isnumeric()" that allow negative values and other ints.
+ """
+ try:
+ int(s)
+ return True
+ except ValueError:
+ return False
+ raise ValueError("Unexpected error")
+
+
+@undoc
+def softspace(file, newvalue):
+ """Copied from code.py, to remove the dependency"""
+
+ oldvalue = 0
+ try:
+ oldvalue = file.softspace
+ except AttributeError:
+ pass
+ try:
+ file.softspace = newvalue
+ except (AttributeError, TypeError):
+ # "attribute-less object" or "read-only attributes"
+ pass
+ return oldvalue
+
+@undoc
+def no_op(*a, **kw):
+ pass
+
+
+class SpaceInInput(Exception): pass
+
+
+class SeparateUnicode(Unicode):
+ r"""A Unicode subclass to validate separate_in, separate_out, etc.
+
+ This is a Unicode based trait that converts '0'->'' and ``'\\n'->'\n'``.
+ """
+
+ def validate(self, obj, value):
+ if value == '0': value = ''
+ value = value.replace('\\n','\n')
+ return super(SeparateUnicode, self).validate(obj, value)
+
+
+@undoc
+class DummyMod(object):
+ """A dummy module used for IPython's interactive module when
+ a namespace must be assigned to the module's __dict__."""
+ __spec__ = None
+
+
+class ExecutionInfo(object):
+ """The arguments used for a call to :meth:`InteractiveShell.run_cell`
+
+ Stores information about what is going to happen.
+ """
+ raw_cell = None
+ store_history = False
+ silent = False
+ shell_futures = True
+ cell_id = None
+
+ def __init__(self, raw_cell, store_history, silent, shell_futures, cell_id):
+ self.raw_cell = raw_cell
+ self.store_history = store_history
+ self.silent = silent
+ self.shell_futures = shell_futures
+ self.cell_id = cell_id
+
+ def __repr__(self):
+ name = self.__class__.__qualname__
+ raw_cell = (
+ (self.raw_cell[:50] + "..") if len(self.raw_cell) > 50 else self.raw_cell
+ )
+ return (
+ '<%s object at %x, raw_cell="%s" store_history=%s silent=%s shell_futures=%s cell_id=%s>'
+ % (
+ name,
+ id(self),
+ raw_cell,
+ self.store_history,
+ self.silent,
+ self.shell_futures,
+ self.cell_id,
+ )
+ )
+
+
+class ExecutionResult(object):
+ """The result of a call to :meth:`InteractiveShell.run_cell`
+
+ Stores information about what took place.
+ """
+ execution_count = None
+ error_before_exec = None
+ error_in_exec: Optional[BaseException] = None
+ info = None
+ result = None
+
+ def __init__(self, info):
+ self.info = info
+
+ @property
+ def success(self):
+ return (self.error_before_exec is None) and (self.error_in_exec is None)
+
+ def raise_error(self):
+ """Reraises error if `success` is `False`, otherwise does nothing"""
+ if self.error_before_exec is not None:
+ raise self.error_before_exec
+ if self.error_in_exec is not None:
+ raise self.error_in_exec
+
+ def __repr__(self):
+ name = self.__class__.__qualname__
+ return '<%s object at %x, execution_count=%s error_before_exec=%s error_in_exec=%s info=%s result=%s>' %\
+ (name, id(self), self.execution_count, self.error_before_exec, self.error_in_exec, repr(self.info), repr(self.result))
+
+@functools.wraps(io_open)
+def _modified_open(file, *args, **kwargs):
+ if file in {0, 1, 2}:
+ raise ValueError(
+ f"IPython won't let you open fd={file} by default "
+ "as it is likely to crash IPython. If you know what you are doing, "
+ "you can use builtins' open."
+ )
+
+ return io_open(file, *args, **kwargs)
+
+class InteractiveShell(SingletonConfigurable):
+ """An enhanced, interactive shell for Python."""
+
+ _instance = None
+
+ ast_transformers = List([], help=
+ """
+ A list of ast.NodeTransformer subclass instances, which will be applied
+ to user input before code is run.
+ """
+ ).tag(config=True)
+
+ autocall = Enum((0,1,2), default_value=0, help=
+ """
+ Make IPython automatically call any callable object even if you didn't
+ type explicit parentheses. For example, 'str 43' becomes 'str(43)'
+ automatically. The value can be '0' to disable the feature, '1' for
+ 'smart' autocall, where it is not applied if there are no more
+ arguments on the line, and '2' for 'full' autocall, where all callable
+ objects are automatically called (even if no arguments are present).
+ """
+ ).tag(config=True)
+
+ autoindent = Bool(True, help=
+ """
+ Autoindent IPython code entered interactively.
+ """
+ ).tag(config=True)
+
+ autoawait = Bool(True, help=
+ """
+ Automatically run await statement in the top level repl.
+ """
+ ).tag(config=True)
+
+ loop_runner_map ={
+ 'asyncio':(_asyncio_runner, True),
+ 'curio':(_curio_runner, True),
+ 'trio':(_trio_runner, True),
+ 'sync': (_pseudo_sync_runner, False)
+ }
+
+ loop_runner = Any(default_value="IPython.core.interactiveshell._asyncio_runner",
+ allow_none=True,
+ help="""Select the loop runner that will be used to execute top-level asynchronous code"""
+ ).tag(config=True)
+
+ @default('loop_runner')
+ def _default_loop_runner(self):
+ return import_item("IPython.core.interactiveshell._asyncio_runner")
+
+ @validate('loop_runner')
+ def _import_runner(self, proposal):
+ if isinstance(proposal.value, str):
+ if proposal.value in self.loop_runner_map:
+ runner, autoawait = self.loop_runner_map[proposal.value]
+ self.autoawait = autoawait
+ return runner
+ runner = import_item(proposal.value)
+ if not callable(runner):
+ raise ValueError('loop_runner must be callable')
+ return runner
+ if not callable(proposal.value):
+ raise ValueError('loop_runner must be callable')
+ return proposal.value
+
+ automagic = Bool(True, help=
+ """
+ Enable magic commands to be called without the leading %.
+ """
+ ).tag(config=True)
+
+ banner1 = Unicode(default_banner,
+ help="""The part of the banner to be printed before the profile"""
+ ).tag(config=True)
+ banner2 = Unicode('',
+ help="""The part of the banner to be printed after the profile"""
+ ).tag(config=True)
+
+ cache_size = Integer(1000, help=
+ """
+ Set the size of the output cache. The default is 1000, you can
+ change it permanently in your config file. Setting it to 0 completely
+ disables the caching system, and the minimum value accepted is 3 (if
+ you provide a value less than 3, it is reset to 0 and a warning is
+ issued). This limit is defined because otherwise you'll spend more
+ time re-flushing a too small cache than working
+ """
+ ).tag(config=True)
+ color_info = Bool(True, help=
+ """
+ Use colors for displaying information about objects. Because this
+ information is passed through a pager (like 'less'), and some pagers
+ get confused with color codes, this capability can be turned off.
+ """
+ ).tag(config=True)
+ colors = CaselessStrEnum(('Neutral', 'NoColor','LightBG','Linux'),
+ default_value='Neutral',
+ help="Set the color scheme (NoColor, Neutral, Linux, or LightBG)."
+ ).tag(config=True)
+ debug = Bool(False).tag(config=True)
+ disable_failing_post_execute = Bool(False,
+ help="Don't call post-execute functions that have failed in the past."
+ ).tag(config=True)
+ display_formatter = Instance(DisplayFormatter, allow_none=True)
+ displayhook_class = Type(DisplayHook)
+ display_pub_class = Type(DisplayPublisher)
+ compiler_class = Type(CachingCompiler)
+ inspector_class = Type(
+ oinspect.Inspector, help="Class to use to instantiate the shell inspector"
+ ).tag(config=True)
+
+ sphinxify_docstring = Bool(False, help=
+ """
+ Enables rich html representation of docstrings. (This requires the
+ docrepr module).
+ """).tag(config=True)
+
+ @observe("sphinxify_docstring")
+ def _sphinxify_docstring_changed(self, change):
+ if change['new']:
+ warn("`sphinxify_docstring` is provisional since IPython 5.0 and might change in future versions." , ProvisionalWarning)
+
+ enable_html_pager = Bool(False, help=
+ """
+ (Provisional API) enables html representation in mime bundles sent
+ to pagers.
+ """).tag(config=True)
+
+ @observe("enable_html_pager")
+ def _enable_html_pager_changed(self, change):
+ if change['new']:
+ warn("`enable_html_pager` is provisional since IPython 5.0 and might change in future versions.", ProvisionalWarning)
+
+ data_pub_class = None
+
+ exit_now = Bool(False)
+ exiter = Instance(ExitAutocall)
+ @default('exiter')
+ def _exiter_default(self):
+ return ExitAutocall(self)
+ # Monotonically increasing execution counter
+ execution_count = Integer(1)
+ filename = Unicode("<ipython console>")
+ ipython_dir= Unicode('').tag(config=True) # Set to get_ipython_dir() in __init__
+
+ # Used to transform cells before running them, and check whether code is complete
+ input_transformer_manager = Instance('IPython.core.inputtransformer2.TransformerManager',
+ ())
+
+ @property
+ def input_transformers_cleanup(self):
+ return self.input_transformer_manager.cleanup_transforms
+
+ input_transformers_post = List([],
+ help="A list of string input transformers, to be applied after IPython's "
+ "own input transformations."
+ )
+
+ @property
+ def input_splitter(self):
+ """Make this available for backward compatibility (pre-7.0 release) with existing code.
+
+ For example, ipykernel ipykernel currently uses
+ `shell.input_splitter.check_complete`
+ """
+ from warnings import warn
+ warn("`input_splitter` is deprecated since IPython 7.0, prefer `input_transformer_manager`.",
+ DeprecationWarning, stacklevel=2
+ )
+ return self.input_transformer_manager
+
+ logstart = Bool(False, help=
+ """
+ Start logging to the default log file in overwrite mode.
+ Use `logappend` to specify a log file to **append** logs to.
+ """
+ ).tag(config=True)
+ logfile = Unicode('', help=
+ """
+ The name of the logfile to use.
+ """
+ ).tag(config=True)
+ logappend = Unicode('', help=
+ """
+ Start logging to the given file in append mode.
+ Use `logfile` to specify a log file to **overwrite** logs to.
+ """
+ ).tag(config=True)
+ object_info_string_level = Enum((0,1,2), default_value=0,
+ ).tag(config=True)
+ pdb = Bool(False, help=
+ """
+ Automatically call the pdb debugger after every exception.
+ """
+ ).tag(config=True)
+ display_page = Bool(False,
+ help="""If True, anything that would be passed to the pager
+ will be displayed as regular output instead."""
+ ).tag(config=True)
+
+
+ show_rewritten_input = Bool(True,
+ help="Show rewritten input, e.g. for autocall."
+ ).tag(config=True)
+
+ quiet = Bool(False).tag(config=True)
+
+ history_length = Integer(10000,
+ help='Total length of command history'
+ ).tag(config=True)
+
+ history_load_length = Integer(1000, help=
+ """
+ The number of saved history entries to be loaded
+ into the history buffer at startup.
+ """
+ ).tag(config=True)
+
+ ast_node_interactivity = Enum(['all', 'last', 'last_expr', 'none', 'last_expr_or_assign'],
+ default_value='last_expr',
+ help="""
+ 'all', 'last', 'last_expr' or 'none', 'last_expr_or_assign' specifying
+ which nodes should be run interactively (displaying output from expressions).
+ """
+ ).tag(config=True)
+
+ warn_venv = Bool(
+ True,
+ help="Warn if running in a virtual environment with no IPython installed (so IPython from the global environment is used).",
+ ).tag(config=True)
+
+ # TODO: this part of prompt management should be moved to the frontends.
+ # Use custom TraitTypes that convert '0'->'' and '\\n'->'\n'
+ separate_in = SeparateUnicode('\n').tag(config=True)
+ separate_out = SeparateUnicode('').tag(config=True)
+ separate_out2 = SeparateUnicode('').tag(config=True)
+ wildcards_case_sensitive = Bool(True).tag(config=True)
+ xmode = CaselessStrEnum(('Context', 'Plain', 'Verbose', 'Minimal'),
+ default_value='Context',
+ help="Switch modes for the IPython exception handlers."
+ ).tag(config=True)
+
+ # Subcomponents of InteractiveShell
+ alias_manager = Instance('IPython.core.alias.AliasManager', allow_none=True)
+ prefilter_manager = Instance('IPython.core.prefilter.PrefilterManager', allow_none=True)
+ builtin_trap = Instance('IPython.core.builtin_trap.BuiltinTrap', allow_none=True)
+ display_trap = Instance('IPython.core.display_trap.DisplayTrap', allow_none=True)
+ extension_manager = Instance('IPython.core.extensions.ExtensionManager', allow_none=True)
+ payload_manager = Instance('IPython.core.payload.PayloadManager', allow_none=True)
+ history_manager = Instance('IPython.core.history.HistoryAccessorBase', allow_none=True)
+ magics_manager = Instance('IPython.core.magic.MagicsManager', allow_none=True)
+
+ profile_dir = Instance('IPython.core.application.ProfileDir', allow_none=True)
+ @property
+ def profile(self):
+ if self.profile_dir is not None:
+ name = os.path.basename(self.profile_dir.location)
+ return name.replace('profile_','')
+
+
+ # Private interface
+ _post_execute = Dict()
+
+ # Tracks any GUI loop loaded for pylab
+ pylab_gui_select = None
+
+ last_execution_succeeded = Bool(True, help='Did last executed command succeeded')
+
+ last_execution_result = Instance('IPython.core.interactiveshell.ExecutionResult', help='Result of executing the last command', allow_none=True)
+
+ def __init__(self, ipython_dir=None, profile_dir=None,
+ user_module=None, user_ns=None,
+ custom_exceptions=((), None), **kwargs):
+ # This is where traits with a config_key argument are updated
+ # from the values on config.
+ super(InteractiveShell, self).__init__(**kwargs)
+ if 'PromptManager' in self.config:
+ warn('As of IPython 5.0 `PromptManager` config will have no effect'
+ ' and has been replaced by TerminalInteractiveShell.prompts_class')
+ self.configurables = [self]
+
+ # These are relatively independent and stateless
+ self.init_ipython_dir(ipython_dir)
+ self.init_profile_dir(profile_dir)
+ self.init_instance_attrs()
+ self.init_environment()
+
+ # Check if we're in a virtualenv, and set up sys.path.
+ self.init_virtualenv()
+
+ # Create namespaces (user_ns, user_global_ns, etc.)
+ self.init_create_namespaces(user_module, user_ns)
+ # This has to be done after init_create_namespaces because it uses
+ # something in self.user_ns, but before init_sys_modules, which
+ # is the first thing to modify sys.
+ # TODO: When we override sys.stdout and sys.stderr before this class
+ # is created, we are saving the overridden ones here. Not sure if this
+ # is what we want to do.
+ self.save_sys_module_state()
+ self.init_sys_modules()
+
+ # While we're trying to have each part of the code directly access what
+ # it needs without keeping redundant references to objects, we have too
+ # much legacy code that expects ip.db to exist.
+ self.db = PickleShareDB(os.path.join(self.profile_dir.location, 'db'))
+
+ self.init_history()
+ self.init_encoding()
+ self.init_prefilter()
+
+ self.init_syntax_highlighting()
+ self.init_hooks()
+ self.init_events()
+ self.init_pushd_popd_magic()
+ self.init_user_ns()
+ self.init_logger()
+ self.init_builtins()
+
+ # The following was in post_config_initialization
+ self.init_inspector()
+ self.raw_input_original = input
+ self.init_completer()
+ # TODO: init_io() needs to happen before init_traceback handlers
+ # because the traceback handlers hardcode the stdout/stderr streams.
+ # This logic in in debugger.Pdb and should eventually be changed.
+ self.init_io()
+ self.init_traceback_handlers(custom_exceptions)
+ self.init_prompts()
+ self.init_display_formatter()
+ self.init_display_pub()
+ self.init_data_pub()
+ self.init_displayhook()
+ self.init_magics()
+ self.init_alias()
+ self.init_logstart()
+ self.init_pdb()
+ self.init_extension_manager()
+ self.init_payload()
+ self.events.trigger('shell_initialized', self)
+ atexit.register(self.atexit_operations)
+
+ # The trio runner is used for running Trio in the foreground thread. It
+ # is different from `_trio_runner(async_fn)` in `async_helpers.py`
+ # which calls `trio.run()` for every cell. This runner runs all cells
+ # inside a single Trio event loop. If used, it is set from
+ # `ipykernel.kernelapp`.
+ self.trio_runner = None
+
+ def get_ipython(self):
+ """Return the currently running IPython instance."""
+ return self
+
+ #-------------------------------------------------------------------------
+ # Trait changed handlers
+ #-------------------------------------------------------------------------
+ @observe('ipython_dir')
+ def _ipython_dir_changed(self, change):
+ ensure_dir_exists(change['new'])
+
+ def set_autoindent(self,value=None):
+ """Set the autoindent flag.
+
+ If called with no arguments, it acts as a toggle."""
+ if value is None:
+ self.autoindent = not self.autoindent
+ else:
+ self.autoindent = value
+
+ def set_trio_runner(self, tr):
+ self.trio_runner = tr
+
+ #-------------------------------------------------------------------------
+ # init_* methods called by __init__
+ #-------------------------------------------------------------------------
+
+ def init_ipython_dir(self, ipython_dir):
+ if ipython_dir is not None:
+ self.ipython_dir = ipython_dir
+ return
+
+ self.ipython_dir = get_ipython_dir()
+
+ def init_profile_dir(self, profile_dir):
+ if profile_dir is not None:
+ self.profile_dir = profile_dir
+ return
+ self.profile_dir = ProfileDir.create_profile_dir_by_name(
+ self.ipython_dir, "default"
+ )
+
+ def init_instance_attrs(self):
+ self.more = False
+
+ # command compiler
+ self.compile = self.compiler_class()
+
+ # Make an empty namespace, which extension writers can rely on both
+ # existing and NEVER being used by ipython itself. This gives them a
+ # convenient location for storing additional information and state
+ # their extensions may require, without fear of collisions with other
+ # ipython names that may develop later.
+ self.meta = Struct()
+
+ # Temporary files used for various purposes. Deleted at exit.
+ # The files here are stored with Path from Pathlib
+ self.tempfiles = []
+ self.tempdirs = []
+
+ # keep track of where we started running (mainly for crash post-mortem)
+ # This is not being used anywhere currently.
+ self.starting_dir = os.getcwd()
+
+ # Indentation management
+ self.indent_current_nsp = 0
+
+ # Dict to track post-execution functions that have been registered
+ self._post_execute = {}
+
+ def init_environment(self):
+ """Any changes we need to make to the user's environment."""
+ pass
+
+ def init_encoding(self):
+ # Get system encoding at startup time. Certain terminals (like Emacs
+ # under Win32 have it set to None, and we need to have a known valid
+ # encoding to use in the raw_input() method
+ try:
+ self.stdin_encoding = sys.stdin.encoding or 'ascii'
+ except AttributeError:
+ self.stdin_encoding = 'ascii'
+
+
+ @observe('colors')
+ def init_syntax_highlighting(self, changes=None):
+ # Python source parser/formatter for syntax highlighting
+ pyformat = PyColorize.Parser(style=self.colors, parent=self).format
+ self.pycolorize = lambda src: pyformat(src,'str')
+
+ def refresh_style(self):
+ # No-op here, used in subclass
+ pass
+
+ def init_pushd_popd_magic(self):
+ # for pushd/popd management
+ self.home_dir = get_home_dir()
+
+ self.dir_stack = []
+
+ def init_logger(self):
+ self.logger = Logger(self.home_dir, logfname='ipython_log.py',
+ logmode='rotate')
+
+ def init_logstart(self):
+ """Initialize logging in case it was requested at the command line.
+ """
+ if self.logappend:
+ self.magic('logstart %s append' % self.logappend)
+ elif self.logfile:
+ self.magic('logstart %s' % self.logfile)
+ elif self.logstart:
+ self.magic('logstart')
+
+
+ def init_builtins(self):
+ # A single, static flag that we set to True. Its presence indicates
+ # that an IPython shell has been created, and we make no attempts at
+ # removing on exit or representing the existence of more than one
+ # IPython at a time.
+ builtin_mod.__dict__['__IPYTHON__'] = True
+ builtin_mod.__dict__['display'] = display
+
+ self.builtin_trap = BuiltinTrap(shell=self)
+
+ @observe('colors')
+ def init_inspector(self, changes=None):
+ # Object inspector
+ self.inspector = self.inspector_class(
+ oinspect.InspectColors,
+ PyColorize.ANSICodeColors,
+ self.colors,
+ self.object_info_string_level,
+ )
+
+ def init_io(self):
+ # implemented in subclasses, TerminalInteractiveShell does call
+ # colorama.init().
+ pass
+
+ def init_prompts(self):
+ # Set system prompts, so that scripts can decide if they are running
+ # interactively.
+ sys.ps1 = 'In : '
+ sys.ps2 = '...: '
+ sys.ps3 = 'Out: '
+
+ def init_display_formatter(self):
+ self.display_formatter = DisplayFormatter(parent=self)
+ self.configurables.append(self.display_formatter)
+
+ def init_display_pub(self):
+ self.display_pub = self.display_pub_class(parent=self, shell=self)
+ self.configurables.append(self.display_pub)
+
+ def init_data_pub(self):
+ if not self.data_pub_class:
+ self.data_pub = None
+ return
+ self.data_pub = self.data_pub_class(parent=self)
+ self.configurables.append(self.data_pub)
+
+ def init_displayhook(self):
+ # Initialize displayhook, set in/out prompts and printing system
+ self.displayhook = self.displayhook_class(
+ parent=self,
+ shell=self,
+ cache_size=self.cache_size,
+ )
+ self.configurables.append(self.displayhook)
+ # This is a context manager that installs/revmoes the displayhook at
+ # the appropriate time.
+ self.display_trap = DisplayTrap(hook=self.displayhook)
+
+ @staticmethod
+ def get_path_links(p: Path):
+ """Gets path links including all symlinks
+
+ Examples
+ --------
+ In [1]: from IPython.core.interactiveshell import InteractiveShell
+
+ In [2]: import sys, pathlib
+
+ In [3]: paths = InteractiveShell.get_path_links(pathlib.Path(sys.executable))
+
+ In [4]: len(paths) == len(set(paths))
+ Out[4]: True
+
+ In [5]: bool(paths)
+ Out[5]: True
+ """
+ paths = [p]
+ while p.is_symlink():
+ new_path = Path(os.readlink(p))
+ if not new_path.is_absolute():
+ new_path = p.parent / new_path
+ p = new_path
+ paths.append(p)
+ return paths
+
+ def init_virtualenv(self):
+ """Add the current virtualenv to sys.path so the user can import modules from it.
+ This isn't perfect: it doesn't use the Python interpreter with which the
+ virtualenv was built, and it ignores the --no-site-packages option. A
+ warning will appear suggesting the user installs IPython in the
+ virtualenv, but for many cases, it probably works well enough.
+
+ Adapted from code snippets online.
+
+ http://blog.ufsoft.org/2009/1/29/ipython-and-virtualenv
+ """
+ if 'VIRTUAL_ENV' not in os.environ:
+ # Not in a virtualenv
+ return
+ elif os.environ["VIRTUAL_ENV"] == "":
+ warn("Virtual env path set to '', please check if this is intended.")
+ return
+
+ p = Path(sys.executable)
+ p_venv = Path(os.environ["VIRTUAL_ENV"])
+
+ # fallback venv detection:
+ # stdlib venv may symlink sys.executable, so we can't use realpath.
+ # but others can symlink *to* the venv Python, so we can't just use sys.executable.
+ # So we just check every item in the symlink tree (generally <= 3)
+ paths = self.get_path_links(p)
+
+ # In Cygwin paths like "c:\..." and '\cygdrive\c\...' are possible
+ if p_venv.parts[1] == "cygdrive":
+ drive_name = p_venv.parts[2]
+ p_venv = (drive_name + ":/") / Path(*p_venv.parts[3:])
+
+ if any(p_venv == p.parents[1] for p in paths):
+ # Our exe is inside or has access to the virtualenv, don't need to do anything.
+ return
+
+ if sys.platform == "win32":
+ virtual_env = str(Path(os.environ["VIRTUAL_ENV"], "Lib", "site-packages"))
+ else:
+ virtual_env_path = Path(
+ os.environ["VIRTUAL_ENV"], "lib", "python{}.{}", "site-packages"
+ )
+ p_ver = sys.version_info[:2]
+
+ # Predict version from py[thon]-x.x in the $VIRTUAL_ENV
+ re_m = re.search(r"\bpy(?:thon)?([23])\.(\d+)\b", os.environ["VIRTUAL_ENV"])
+ if re_m:
+ predicted_path = Path(str(virtual_env_path).format(*re_m.groups()))
+ if predicted_path.exists():
+ p_ver = re_m.groups()
+
+ virtual_env = str(virtual_env_path).format(*p_ver)
+ if self.warn_venv:
+ warn(
+ "Attempting to work in a virtualenv. If you encounter problems, "
+ "please install IPython inside the virtualenv."
+ )
+ import site
+ sys.path.insert(0, virtual_env)
+ site.addsitedir(virtual_env)
+
+ #-------------------------------------------------------------------------
+ # Things related to injections into the sys module
+ #-------------------------------------------------------------------------
+
+ def save_sys_module_state(self):
+ """Save the state of hooks in the sys module.
+
+ This has to be called after self.user_module is created.
+ """
+ self._orig_sys_module_state = {'stdin': sys.stdin,
+ 'stdout': sys.stdout,
+ 'stderr': sys.stderr,
+ 'excepthook': sys.excepthook}
+ self._orig_sys_modules_main_name = self.user_module.__name__
+ self._orig_sys_modules_main_mod = sys.modules.get(self.user_module.__name__)
+
+ def restore_sys_module_state(self):
+ """Restore the state of the sys module."""
+ try:
+ for k, v in self._orig_sys_module_state.items():
+ setattr(sys, k, v)
+ except AttributeError:
+ pass
+ # Reset what what done in self.init_sys_modules
+ if self._orig_sys_modules_main_mod is not None:
+ sys.modules[self._orig_sys_modules_main_name] = self._orig_sys_modules_main_mod
+
+ #-------------------------------------------------------------------------
+ # Things related to the banner
+ #-------------------------------------------------------------------------
+
+ @property
+ def banner(self):
+ banner = self.banner1
+ if self.profile and self.profile != 'default':
+ banner += '\nIPython profile: %s\n' % self.profile
+ if self.banner2:
+ banner += '\n' + self.banner2
+ return banner
+
+ def show_banner(self, banner=None):
+ if banner is None:
+ banner = self.banner
+ sys.stdout.write(banner)
+
+ #-------------------------------------------------------------------------
+ # Things related to hooks
+ #-------------------------------------------------------------------------
+
+ def init_hooks(self):
+ # hooks holds pointers used for user-side customizations
+ self.hooks = Struct()
+
+ self.strdispatchers = {}
+
+ # Set all default hooks, defined in the IPython.hooks module.
+ hooks = IPython.core.hooks
+ for hook_name in hooks.__all__:
+ # default hooks have priority 100, i.e. low; user hooks should have
+ # 0-100 priority
+ self.set_hook(hook_name, getattr(hooks, hook_name), 100)
+
+ if self.display_page:
+ self.set_hook('show_in_pager', page.as_hook(page.display_page), 90)
+
+ def set_hook(self, name, hook, priority=50, str_key=None, re_key=None):
+ """set_hook(name,hook) -> sets an internal IPython hook.
+
+ IPython exposes some of its internal API as user-modifiable hooks. By
+ adding your function to one of these hooks, you can modify IPython's
+ behavior to call at runtime your own routines."""
+
+ # At some point in the future, this should validate the hook before it
+ # accepts it. Probably at least check that the hook takes the number
+ # of args it's supposed to.
+
+ f = types.MethodType(hook,self)
+
+ # check if the hook is for strdispatcher first
+ if str_key is not None:
+ sdp = self.strdispatchers.get(name, StrDispatch())
+ sdp.add_s(str_key, f, priority )
+ self.strdispatchers[name] = sdp
+ return
+ if re_key is not None:
+ sdp = self.strdispatchers.get(name, StrDispatch())
+ sdp.add_re(re.compile(re_key), f, priority )
+ self.strdispatchers[name] = sdp
+ return
+
+ dp = getattr(self.hooks, name, None)
+ if name not in IPython.core.hooks.__all__:
+ print("Warning! Hook '%s' is not one of %s" % \
+ (name, IPython.core.hooks.__all__ ))
+
+ if name in IPython.core.hooks.deprecated:
+ alternative = IPython.core.hooks.deprecated[name]
+ raise ValueError(
+ "Hook {} has been deprecated since IPython 5.0. Use {} instead.".format(
+ name, alternative
+ )
+ )
+
+ if not dp:
+ dp = IPython.core.hooks.CommandChainDispatcher()
+
+ try:
+ dp.add(f,priority)
+ except AttributeError:
+ # it was not commandchain, plain old func - replace
+ dp = f
+
+ setattr(self.hooks,name, dp)
+
+ #-------------------------------------------------------------------------
+ # Things related to events
+ #-------------------------------------------------------------------------
+
+ def init_events(self):
+ self.events = EventManager(self, available_events)
+
+ self.events.register("pre_execute", self._clear_warning_registry)
+
+ def register_post_execute(self, func):
+ """DEPRECATED: Use ip.events.register('post_run_cell', func)
+
+ Register a function for calling after code execution.
+ """
+ raise ValueError(
+ "ip.register_post_execute is deprecated since IPython 1.0, use "
+ "ip.events.register('post_run_cell', func) instead."
+ )
+
+ def _clear_warning_registry(self):
+ # clear the warning registry, so that different code blocks with
+ # overlapping line number ranges don't cause spurious suppression of
+ # warnings (see gh-6611 for details)
+ if "__warningregistry__" in self.user_global_ns:
+ del self.user_global_ns["__warningregistry__"]
+
+ #-------------------------------------------------------------------------
+ # Things related to the "main" module
+ #-------------------------------------------------------------------------
+
+ def new_main_mod(self, filename, modname):
+ """Return a new 'main' module object for user code execution.
+
+ ``filename`` should be the path of the script which will be run in the
+ module. Requests with the same filename will get the same module, with
+ its namespace cleared.
+
+ ``modname`` should be the module name - normally either '__main__' or
+ the basename of the file without the extension.
+
+ When scripts are executed via %run, we must keep a reference to their
+ __main__ module around so that Python doesn't
+ clear it, rendering references to module globals useless.
+
+ This method keeps said reference in a private dict, keyed by the
+ absolute path of the script. This way, for multiple executions of the
+ same script we only keep one copy of the namespace (the last one),
+ thus preventing memory leaks from old references while allowing the
+ objects from the last execution to be accessible.
+ """
+ filename = os.path.abspath(filename)
+ try:
+ main_mod = self._main_mod_cache[filename]
+ except KeyError:
+ main_mod = self._main_mod_cache[filename] = types.ModuleType(
+ modname,
+ doc="Module created for script run in IPython")
+ else:
+ main_mod.__dict__.clear()
+ main_mod.__name__ = modname
+
+ main_mod.__file__ = filename
+ # It seems pydoc (and perhaps others) needs any module instance to
+ # implement a __nonzero__ method
+ main_mod.__nonzero__ = lambda : True
+
+ return main_mod
+
+ def clear_main_mod_cache(self):
+ """Clear the cache of main modules.
+
+ Mainly for use by utilities like %reset.
+
+ Examples
+ --------
+ In [15]: import IPython
+
+ In [16]: m = _ip.new_main_mod(IPython.__file__, 'IPython')
+
+ In [17]: len(_ip._main_mod_cache) > 0
+ Out[17]: True
+
+ In [18]: _ip.clear_main_mod_cache()
+
+ In [19]: len(_ip._main_mod_cache) == 0
+ Out[19]: True
+ """
+ self._main_mod_cache.clear()
+
+ #-------------------------------------------------------------------------
+ # Things related to debugging
+ #-------------------------------------------------------------------------
+
+ def init_pdb(self):
+ # Set calling of pdb on exceptions
+ # self.call_pdb is a property
+ self.call_pdb = self.pdb
+
+ def _get_call_pdb(self):
+ return self._call_pdb
+
+ def _set_call_pdb(self,val):
+
+ if val not in (0,1,False,True):
+ raise ValueError('new call_pdb value must be boolean')
+
+ # store value in instance
+ self._call_pdb = val
+
+ # notify the actual exception handlers
+ self.InteractiveTB.call_pdb = val
+
+ call_pdb = property(_get_call_pdb,_set_call_pdb,None,
+ 'Control auto-activation of pdb at exceptions')
+
+ def debugger(self,force=False):
+ """Call the pdb debugger.
+
+ Keywords:
+
+ - force(False): by default, this routine checks the instance call_pdb
+ flag and does not actually invoke the debugger if the flag is false.
+ The 'force' option forces the debugger to activate even if the flag
+ is false.
+ """
+
+ if not (force or self.call_pdb):
+ return
+
+ if not hasattr(sys,'last_traceback'):
+ error('No traceback has been produced, nothing to debug.')
+ return
+
+ self.InteractiveTB.debugger(force=True)
+
+ #-------------------------------------------------------------------------
+ # Things related to IPython's various namespaces
+ #-------------------------------------------------------------------------
+ default_user_namespaces = True
+
+ def init_create_namespaces(self, user_module=None, user_ns=None):
+ # Create the namespace where the user will operate. user_ns is
+ # normally the only one used, and it is passed to the exec calls as
+ # the locals argument. But we do carry a user_global_ns namespace
+ # given as the exec 'globals' argument, This is useful in embedding
+ # situations where the ipython shell opens in a context where the
+ # distinction between locals and globals is meaningful. For
+ # non-embedded contexts, it is just the same object as the user_ns dict.
+
+ # FIXME. For some strange reason, __builtins__ is showing up at user
+ # level as a dict instead of a module. This is a manual fix, but I
+ # should really track down where the problem is coming from. Alex
+ # Schmolck reported this problem first.
+
+ # A useful post by Alex Martelli on this topic:
+ # Re: inconsistent value from __builtins__
+ # Von: Alex Martelli <aleaxit@yahoo.com>
+ # Datum: Freitag 01 Oktober 2004 04:45:34 nachmittags/abends
+ # Gruppen: comp.lang.python
+
+ # Michael Hohn <hohn@hooknose.lbl.gov> wrote:
+ # > >>> print type(builtin_check.get_global_binding('__builtins__'))
+ # > <type 'dict'>
+ # > >>> print type(__builtins__)
+ # > <type 'module'>
+ # > Is this difference in return value intentional?
+
+ # Well, it's documented that '__builtins__' can be either a dictionary
+ # or a module, and it's been that way for a long time. Whether it's
+ # intentional (or sensible), I don't know. In any case, the idea is
+ # that if you need to access the built-in namespace directly, you
+ # should start with "import __builtin__" (note, no 's') which will
+ # definitely give you a module. Yeah, it's somewhat confusing:-(.
+
+ # These routines return a properly built module and dict as needed by
+ # the rest of the code, and can also be used by extension writers to
+ # generate properly initialized namespaces.
+ if (user_ns is not None) or (user_module is not None):
+ self.default_user_namespaces = False
+ self.user_module, self.user_ns = self.prepare_user_module(user_module, user_ns)
+
+ # A record of hidden variables we have added to the user namespace, so
+ # we can list later only variables defined in actual interactive use.
+ self.user_ns_hidden = {}
+
+ # Now that FakeModule produces a real module, we've run into a nasty
+ # problem: after script execution (via %run), the module where the user
+ # code ran is deleted. Now that this object is a true module (needed
+ # so doctest and other tools work correctly), the Python module
+ # teardown mechanism runs over it, and sets to None every variable
+ # present in that module. Top-level references to objects from the
+ # script survive, because the user_ns is updated with them. However,
+ # calling functions defined in the script that use other things from
+ # the script will fail, because the function's closure had references
+ # to the original objects, which are now all None. So we must protect
+ # these modules from deletion by keeping a cache.
+ #
+ # To avoid keeping stale modules around (we only need the one from the
+ # last run), we use a dict keyed with the full path to the script, so
+ # only the last version of the module is held in the cache. Note,
+ # however, that we must cache the module *namespace contents* (their
+ # __dict__). Because if we try to cache the actual modules, old ones
+ # (uncached) could be destroyed while still holding references (such as
+ # those held by GUI objects that tend to be long-lived)>
+ #
+ # The %reset command will flush this cache. See the cache_main_mod()
+ # and clear_main_mod_cache() methods for details on use.
+
+ # This is the cache used for 'main' namespaces
+ self._main_mod_cache = {}
+
+ # A table holding all the namespaces IPython deals with, so that
+ # introspection facilities can search easily.
+ self.ns_table = {'user_global':self.user_module.__dict__,
+ 'user_local':self.user_ns,
+ 'builtin':builtin_mod.__dict__
+ }
+
+ @property
+ def user_global_ns(self):
+ return self.user_module.__dict__
+
+ def prepare_user_module(self, user_module=None, user_ns=None):
+ """Prepare the module and namespace in which user code will be run.
+
+ When IPython is started normally, both parameters are None: a new module
+ is created automatically, and its __dict__ used as the namespace.
+
+ If only user_module is provided, its __dict__ is used as the namespace.
+ If only user_ns is provided, a dummy module is created, and user_ns
+ becomes the global namespace. If both are provided (as they may be
+ when embedding), user_ns is the local namespace, and user_module
+ provides the global namespace.
+
+ Parameters
+ ----------
+ user_module : module, optional
+ The current user module in which IPython is being run. If None,
+ a clean module will be created.
+ user_ns : dict, optional
+ A namespace in which to run interactive commands.
+
+ Returns
+ -------
+ A tuple of user_module and user_ns, each properly initialised.
+ """
+ if user_module is None and user_ns is not None:
+ user_ns.setdefault("__name__", "__main__")
+ user_module = DummyMod()
+ user_module.__dict__ = user_ns
+
+ if user_module is None:
+ user_module = types.ModuleType("__main__",
+ doc="Automatically created module for IPython interactive environment")
+
+ # We must ensure that __builtin__ (without the final 's') is always
+ # available and pointing to the __builtin__ *module*. For more details:
+ # http://mail.python.org/pipermail/python-dev/2001-April/014068.html
+ user_module.__dict__.setdefault('__builtin__', builtin_mod)
+ user_module.__dict__.setdefault('__builtins__', builtin_mod)
+
+ if user_ns is None:
+ user_ns = user_module.__dict__
+
+ return user_module, user_ns
+
+ def init_sys_modules(self):
+ # We need to insert into sys.modules something that looks like a
+ # module but which accesses the IPython namespace, for shelve and
+ # pickle to work interactively. Normally they rely on getting
+ # everything out of __main__, but for embedding purposes each IPython
+ # instance has its own private namespace, so we can't go shoving
+ # everything into __main__.
+
+ # note, however, that we should only do this for non-embedded
+ # ipythons, which really mimic the __main__.__dict__ with their own
+ # namespace. Embedded instances, on the other hand, should not do
+ # this because they need to manage the user local/global namespaces
+ # only, but they live within a 'normal' __main__ (meaning, they
+ # shouldn't overtake the execution environment of the script they're
+ # embedded in).
+
+ # This is overridden in the InteractiveShellEmbed subclass to a no-op.
+ main_name = self.user_module.__name__
+ sys.modules[main_name] = self.user_module
+
+ def init_user_ns(self):
+ """Initialize all user-visible namespaces to their minimum defaults.
+
+ Certain history lists are also initialized here, as they effectively
+ act as user namespaces.
+
+ Notes
+ -----
+ All data structures here are only filled in, they are NOT reset by this
+ method. If they were not empty before, data will simply be added to
+ them.
+ """
+ # This function works in two parts: first we put a few things in
+ # user_ns, and we sync that contents into user_ns_hidden so that these
+ # initial variables aren't shown by %who. After the sync, we add the
+ # rest of what we *do* want the user to see with %who even on a new
+ # session (probably nothing, so they really only see their own stuff)
+
+ # The user dict must *always* have a __builtin__ reference to the
+ # Python standard __builtin__ namespace, which must be imported.
+ # This is so that certain operations in prompt evaluation can be
+ # reliably executed with builtins. Note that we can NOT use
+ # __builtins__ (note the 's'), because that can either be a dict or a
+ # module, and can even mutate at runtime, depending on the context
+ # (Python makes no guarantees on it). In contrast, __builtin__ is
+ # always a module object, though it must be explicitly imported.
+
+ # For more details:
+ # http://mail.python.org/pipermail/python-dev/2001-April/014068.html
+ ns = {}
+
+ # make global variables for user access to the histories
+ ns['_ih'] = self.history_manager.input_hist_parsed
+ ns['_oh'] = self.history_manager.output_hist
+ ns['_dh'] = self.history_manager.dir_hist
+
+ # user aliases to input and output histories. These shouldn't show up
+ # in %who, as they can have very large reprs.
+ ns['In'] = self.history_manager.input_hist_parsed
+ ns['Out'] = self.history_manager.output_hist
+
+ # Store myself as the public api!!!
+ ns['get_ipython'] = self.get_ipython
+
+ ns['exit'] = self.exiter
+ ns['quit'] = self.exiter
+ ns["open"] = _modified_open
+
+ # Sync what we've added so far to user_ns_hidden so these aren't seen
+ # by %who
+ self.user_ns_hidden.update(ns)
+
+ # Anything put into ns now would show up in %who. Think twice before
+ # putting anything here, as we really want %who to show the user their
+ # stuff, not our variables.
+
+ # Finally, update the real user's namespace
+ self.user_ns.update(ns)
+
+ @property
+ def all_ns_refs(self):
+ """Get a list of references to all the namespace dictionaries in which
+ IPython might store a user-created object.
+
+ Note that this does not include the displayhook, which also caches
+ objects from the output."""
+ return [self.user_ns, self.user_global_ns, self.user_ns_hidden] + \
+ [m.__dict__ for m in self._main_mod_cache.values()]
+
+ def reset(self, new_session=True, aggressive=False):
+ """Clear all internal namespaces, and attempt to release references to
+ user objects.
+
+ If new_session is True, a new history session will be opened.
+ """
+ # Clear histories
+ self.history_manager.reset(new_session)
+ # Reset counter used to index all histories
+ if new_session:
+ self.execution_count = 1
+
+ # Reset last execution result
+ self.last_execution_succeeded = True
+ self.last_execution_result = None
+
+ # Flush cached output items
+ if self.displayhook.do_full_cache:
+ self.displayhook.flush()
+
+ # The main execution namespaces must be cleared very carefully,
+ # skipping the deletion of the builtin-related keys, because doing so
+ # would cause errors in many object's __del__ methods.
+ if self.user_ns is not self.user_global_ns:
+ self.user_ns.clear()
+ ns = self.user_global_ns
+ drop_keys = set(ns.keys())
+ drop_keys.discard('__builtin__')
+ drop_keys.discard('__builtins__')
+ drop_keys.discard('__name__')
+ for k in drop_keys:
+ del ns[k]
+
+ self.user_ns_hidden.clear()
+
+ # Restore the user namespaces to minimal usability
+ self.init_user_ns()
+ if aggressive and not hasattr(self, "_sys_modules_keys"):
+ print("Cannot restore sys.module, no snapshot")
+ elif aggressive:
+ print("culling sys module...")
+ current_keys = set(sys.modules.keys())
+ for k in current_keys - self._sys_modules_keys:
+ if k.startswith("multiprocessing"):
+ continue
+ del sys.modules[k]
+
+ # Restore the default and user aliases
+ self.alias_manager.clear_aliases()
+ self.alias_manager.init_aliases()
+
+ # Now define aliases that only make sense on the terminal, because they
+ # need direct access to the console in a way that we can't emulate in
+ # GUI or web frontend
+ if os.name == 'posix':
+ for cmd in ('clear', 'more', 'less', 'man'):
+ if cmd not in self.magics_manager.magics['line']:
+ self.alias_manager.soft_define_alias(cmd, cmd)
+
+ # Flush the private list of module references kept for script
+ # execution protection
+ self.clear_main_mod_cache()
+
+ def del_var(self, varname, by_name=False):
+ """Delete a variable from the various namespaces, so that, as
+ far as possible, we're not keeping any hidden references to it.
+
+ Parameters
+ ----------
+ varname : str
+ The name of the variable to delete.
+ by_name : bool
+ If True, delete variables with the given name in each
+ namespace. If False (default), find the variable in the user
+ namespace, and delete references to it.
+ """
+ if varname in ('__builtin__', '__builtins__'):
+ raise ValueError("Refusing to delete %s" % varname)
+
+ ns_refs = self.all_ns_refs
+
+ if by_name: # Delete by name
+ for ns in ns_refs:
+ try:
+ del ns[varname]
+ except KeyError:
+ pass
+ else: # Delete by object
+ try:
+ obj = self.user_ns[varname]
+ except KeyError as e:
+ raise NameError("name '%s' is not defined" % varname) from e
+ # Also check in output history
+ ns_refs.append(self.history_manager.output_hist)
+ for ns in ns_refs:
+ to_delete = [n for n, o in ns.items() if o is obj]
+ for name in to_delete:
+ del ns[name]
+
+ # Ensure it is removed from the last execution result
+ if self.last_execution_result.result is obj:
+ self.last_execution_result = None
+
+ # displayhook keeps extra references, but not in a dictionary
+ for name in ('_', '__', '___'):
+ if getattr(self.displayhook, name) is obj:
+ setattr(self.displayhook, name, None)
+
+ def reset_selective(self, regex=None):
+ """Clear selective variables from internal namespaces based on a
+ specified regular expression.
+
+ Parameters
+ ----------
+ regex : string or compiled pattern, optional
+ A regular expression pattern that will be used in searching
+ variable names in the users namespaces.
+ """
+ if regex is not None:
+ try:
+ m = re.compile(regex)
+ except TypeError as e:
+ raise TypeError('regex must be a string or compiled pattern') from e
+ # Search for keys in each namespace that match the given regex
+ # If a match is found, delete the key/value pair.
+ for ns in self.all_ns_refs:
+ for var in ns:
+ if m.search(var):
+ del ns[var]
+
+ def push(self, variables, interactive=True):
+ """Inject a group of variables into the IPython user namespace.
+
+ Parameters
+ ----------
+ variables : dict, str or list/tuple of str
+ The variables to inject into the user's namespace. If a dict, a
+ simple update is done. If a str, the string is assumed to have
+ variable names separated by spaces. A list/tuple of str can also
+ be used to give the variable names. If just the variable names are
+ give (list/tuple/str) then the variable values looked up in the
+ callers frame.
+ interactive : bool
+ If True (default), the variables will be listed with the ``who``
+ magic.
+ """
+ vdict = None
+
+ # We need a dict of name/value pairs to do namespace updates.
+ if isinstance(variables, dict):
+ vdict = variables
+ elif isinstance(variables, (str, list, tuple)):
+ if isinstance(variables, str):
+ vlist = variables.split()
+ else:
+ vlist = variables
+ vdict = {}
+ cf = sys._getframe(1)
+ for name in vlist:
+ try:
+ vdict[name] = eval(name, cf.f_globals, cf.f_locals)
+ except:
+ print('Could not get variable %s from %s' %
+ (name,cf.f_code.co_name))
+ else:
+ raise ValueError('variables must be a dict/str/list/tuple')
+
+ # Propagate variables to user namespace
+ self.user_ns.update(vdict)
+
+ # And configure interactive visibility
+ user_ns_hidden = self.user_ns_hidden
+ if interactive:
+ for name in vdict:
+ user_ns_hidden.pop(name, None)
+ else:
+ user_ns_hidden.update(vdict)
+
+ def drop_by_id(self, variables):
+ """Remove a dict of variables from the user namespace, if they are the
+ same as the values in the dictionary.
+
+ This is intended for use by extensions: variables that they've added can
+ be taken back out if they are unloaded, without removing any that the
+ user has overwritten.
+
+ Parameters
+ ----------
+ variables : dict
+ A dictionary mapping object names (as strings) to the objects.
+ """
+ for name, obj in variables.items():
+ if name in self.user_ns and self.user_ns[name] is obj:
+ del self.user_ns[name]
+ self.user_ns_hidden.pop(name, None)
+
+ #-------------------------------------------------------------------------
+ # Things related to object introspection
+ #-------------------------------------------------------------------------
+ @staticmethod
+ def _find_parts(oname: str) -> Tuple[bool, ListType[str]]:
+ """
+ Given an object name, return a list of parts of this object name.
+
+ Basically split on docs when using attribute access,
+ and extract the value when using square bracket.
+
+
+ For example foo.bar[3].baz[x] -> foo, bar, 3, baz, x
+
+
+ Returns
+ -------
+ parts_ok: bool
+ wether we were properly able to parse parts.
+ parts: list of str
+ extracted parts
+
+
+
+ """
+ raw_parts = oname.split(".")
+ parts = []
+ parts_ok = True
+ for p in raw_parts:
+ if p.endswith("]"):
+ var, *indices = p.split("[")
+ if not var.isidentifier():
+ parts_ok = False
+ break
+ parts.append(var)
+ for ind in indices:
+ if ind[-1] != "]" and not is_integer_string(ind[:-1]):
+ parts_ok = False
+ break
+ parts.append(ind[:-1])
+ continue
+
+ if not p.isidentifier():
+ parts_ok = False
+ parts.append(p)
+
+ return parts_ok, parts
+
+ def _ofind(
+ self, oname: str, namespaces: Optional[Sequence[Tuple[str, AnyType]]] = None
+ ) -> OInfo:
+ """Find an object in the available namespaces.
+
+
+ Returns
+ -------
+ OInfo with fields:
+ - ismagic
+ - isalias
+ - found
+ - obj
+ - namespac
+ - parent
+
+ Has special code to detect magic functions.
+ """
+ oname = oname.strip()
+ parts_ok, parts = self._find_parts(oname)
+
+ if (
+ not oname.startswith(ESC_MAGIC)
+ and not oname.startswith(ESC_MAGIC2)
+ and not parts_ok
+ ):
+ return OInfo(
+ ismagic=False,
+ isalias=False,
+ found=False,
+ obj=None,
+ namespace=None,
+ parent=None,
+ )
+
+ if namespaces is None:
+ # Namespaces to search in:
+ # Put them in a list. The order is important so that we
+ # find things in the same order that Python finds them.
+ namespaces = [ ('Interactive', self.user_ns),
+ ('Interactive (global)', self.user_global_ns),
+ ('Python builtin', builtin_mod.__dict__),
+ ]
+
+ ismagic = False
+ isalias = False
+ found = False
+ ospace = None
+ parent = None
+ obj = None
+
+
+ # Look for the given name by splitting it in parts. If the head is
+ # found, then we look for all the remaining parts as members, and only
+ # declare success if we can find them all.
+ oname_parts = parts
+ oname_head, oname_rest = oname_parts[0],oname_parts[1:]
+ for nsname,ns in namespaces:
+ try:
+ obj = ns[oname_head]
+ except KeyError:
+ continue
+ else:
+ for idx, part in enumerate(oname_rest):
+ try:
+ parent = obj
+ # The last part is looked up in a special way to avoid
+ # descriptor invocation as it may raise or have side
+ # effects.
+ if idx == len(oname_rest) - 1:
+ obj = self._getattr_property(obj, part)
+ else:
+ if is_integer_string(part):
+ obj = obj[int(part)]
+ else:
+ obj = getattr(obj, part)
+ except:
+ # Blanket except b/c some badly implemented objects
+ # allow __getattr__ to raise exceptions other than
+ # AttributeError, which then crashes IPython.
+ break
+ else:
+ # If we finish the for loop (no break), we got all members
+ found = True
+ ospace = nsname
+ break # namespace loop
+
+ # Try to see if it's magic
+ if not found:
+ obj = None
+ if oname.startswith(ESC_MAGIC2):
+ oname = oname.lstrip(ESC_MAGIC2)
+ obj = self.find_cell_magic(oname)
+ elif oname.startswith(ESC_MAGIC):
+ oname = oname.lstrip(ESC_MAGIC)
+ obj = self.find_line_magic(oname)
+ else:
+ # search without prefix, so run? will find %run?
+ obj = self.find_line_magic(oname)
+ if obj is None:
+ obj = self.find_cell_magic(oname)
+ if obj is not None:
+ found = True
+ ospace = 'IPython internal'
+ ismagic = True
+ isalias = isinstance(obj, Alias)
+
+ # Last try: special-case some literals like '', [], {}, etc:
+ if not found and oname_head in ["''",'""','[]','{}','()']:
+ obj = eval(oname_head)
+ found = True
+ ospace = 'Interactive'
+
+ return OInfo(
+ obj=obj,
+ found=found,
+ parent=parent,
+ ismagic=ismagic,
+ isalias=isalias,
+ namespace=ospace,
+ )
+
+ @staticmethod
+ def _getattr_property(obj, attrname):
+ """Property-aware getattr to use in object finding.
+
+ If attrname represents a property, return it unevaluated (in case it has
+ side effects or raises an error.
+
+ """
+ if not isinstance(obj, type):
+ try:
+ # `getattr(type(obj), attrname)` is not guaranteed to return
+ # `obj`, but does so for property:
+ #
+ # property.__get__(self, None, cls) -> self
+ #
+ # The universal alternative is to traverse the mro manually
+ # searching for attrname in class dicts.
+ if is_integer_string(attrname):
+ return obj[int(attrname)]
+ else:
+ attr = getattr(type(obj), attrname)
+ except AttributeError:
+ pass
+ else:
+ # This relies on the fact that data descriptors (with both
+ # __get__ & __set__ magic methods) take precedence over
+ # instance-level attributes:
+ #
+ # class A(object):
+ # @property
+ # def foobar(self): return 123
+ # a = A()
+ # a.__dict__['foobar'] = 345
+ # a.foobar # == 123
+ #
+ # So, a property may be returned right away.
+ if isinstance(attr, property):
+ return attr
+
+ # Nothing helped, fall back.
+ return getattr(obj, attrname)
+
+ def _object_find(self, oname, namespaces=None) -> OInfo:
+ """Find an object and return a struct with info about it."""
+ return self._ofind(oname, namespaces)
+
+ def _inspect(self, meth, oname, namespaces=None, **kw):
+ """Generic interface to the inspector system.
+
+ This function is meant to be called by pdef, pdoc & friends.
+ """
+ info: OInfo = self._object_find(oname, namespaces)
+ if self.sphinxify_docstring:
+ if sphinxify is None:
+ raise ImportError("Module ``docrepr`` required but missing")
+ docformat = sphinxify(self.object_inspect(oname))
+ else:
+ docformat = None
+ if info.found or hasattr(info.parent, oinspect.HOOK_NAME):
+ pmethod = getattr(self.inspector, meth)
+ # TODO: only apply format_screen to the plain/text repr of the mime
+ # bundle.
+ formatter = format_screen if info.ismagic else docformat
+ if meth == 'pdoc':
+ pmethod(info.obj, oname, formatter)
+ elif meth == 'pinfo':
+ pmethod(
+ info.obj,
+ oname,
+ formatter,
+ info,
+ enable_html_pager=self.enable_html_pager,
+ **kw,
+ )
+ else:
+ pmethod(info.obj, oname)
+ else:
+ print('Object `%s` not found.' % oname)
+ return 'not found' # so callers can take other action
+
+ def object_inspect(self, oname, detail_level=0):
+ """Get object info about oname"""
+ with self.builtin_trap:
+ info = self._object_find(oname)
+ if info.found:
+ return self.inspector.info(info.obj, oname, info=info,
+ detail_level=detail_level
+ )
+ else:
+ return oinspect.object_info(name=oname, found=False)
+
+ def object_inspect_text(self, oname, detail_level=0):
+ """Get object info as formatted text"""
+ return self.object_inspect_mime(oname, detail_level)['text/plain']
+
+ def object_inspect_mime(self, oname, detail_level=0, omit_sections=()):
+ """Get object info as a mimebundle of formatted representations.
+
+ A mimebundle is a dictionary, keyed by mime-type.
+ It must always have the key `'text/plain'`.
+ """
+ with self.builtin_trap:
+ info = self._object_find(oname)
+ if info.found:
+ docformat = (
+ sphinxify(self.object_inspect(oname))
+ if self.sphinxify_docstring
+ else None
+ )
+ return self.inspector._get_info(
+ info.obj,
+ oname,
+ info=info,
+ detail_level=detail_level,
+ formatter=docformat,
+ omit_sections=omit_sections,
+ )
+ else:
+ raise KeyError(oname)
+
+ #-------------------------------------------------------------------------
+ # Things related to history management
+ #-------------------------------------------------------------------------
+
+ def init_history(self):
+ """Sets up the command history, and starts regular autosaves."""
+ self.history_manager = HistoryManager(shell=self, parent=self)
+ self.configurables.append(self.history_manager)
+
+ #-------------------------------------------------------------------------
+ # Things related to exception handling and tracebacks (not debugging)
+ #-------------------------------------------------------------------------
+
+ debugger_cls = InterruptiblePdb
+
+ def init_traceback_handlers(self, custom_exceptions):
+ # Syntax error handler.
+ self.SyntaxTB = ultratb.SyntaxTB(color_scheme='NoColor', parent=self)
+
+ # The interactive one is initialized with an offset, meaning we always
+ # want to remove the topmost item in the traceback, which is our own
+ # internal code. Valid modes: ['Plain','Context','Verbose','Minimal']
+ self.InteractiveTB = ultratb.AutoFormattedTB(mode = 'Plain',
+ color_scheme='NoColor',
+ tb_offset = 1,
+ debugger_cls=self.debugger_cls, parent=self)
+
+ # The instance will store a pointer to the system-wide exception hook,
+ # so that runtime code (such as magics) can access it. This is because
+ # during the read-eval loop, it may get temporarily overwritten.
+ self.sys_excepthook = sys.excepthook
+
+ # and add any custom exception handlers the user may have specified
+ self.set_custom_exc(*custom_exceptions)
+
+ # Set the exception mode
+ self.InteractiveTB.set_mode(mode=self.xmode)
+
+ def set_custom_exc(self, exc_tuple, handler):
+ """set_custom_exc(exc_tuple, handler)
+
+ Set a custom exception handler, which will be called if any of the
+ exceptions in exc_tuple occur in the mainloop (specifically, in the
+ run_code() method).
+
+ Parameters
+ ----------
+ exc_tuple : tuple of exception classes
+ A *tuple* of exception classes, for which to call the defined
+ handler. It is very important that you use a tuple, and NOT A
+ LIST here, because of the way Python's except statement works. If
+ you only want to trap a single exception, use a singleton tuple::
+
+ exc_tuple == (MyCustomException,)
+
+ handler : callable
+ handler must have the following signature::
+
+ def my_handler(self, etype, value, tb, tb_offset=None):
+ ...
+ return structured_traceback
+
+ Your handler must return a structured traceback (a list of strings),
+ or None.
+
+ This will be made into an instance method (via types.MethodType)
+ of IPython itself, and it will be called if any of the exceptions
+ listed in the exc_tuple are caught. If the handler is None, an
+ internal basic one is used, which just prints basic info.
+
+ To protect IPython from crashes, if your handler ever raises an
+ exception or returns an invalid result, it will be immediately
+ disabled.
+
+ Notes
+ -----
+ WARNING: by putting in your own exception handler into IPython's main
+ execution loop, you run a very good chance of nasty crashes. This
+ facility should only be used if you really know what you are doing.
+ """
+
+ if not isinstance(exc_tuple, tuple):
+ raise TypeError("The custom exceptions must be given as a tuple.")
+
+ def dummy_handler(self, etype, value, tb, tb_offset=None):
+ print('*** Simple custom exception handler ***')
+ print('Exception type :', etype)
+ print('Exception value:', value)
+ print('Traceback :', tb)
+
+ def validate_stb(stb):
+ """validate structured traceback return type
+
+ return type of CustomTB *should* be a list of strings, but allow
+ single strings or None, which are harmless.
+
+ This function will *always* return a list of strings,
+ and will raise a TypeError if stb is inappropriate.
+ """
+ msg = "CustomTB must return list of strings, not %r" % stb
+ if stb is None:
+ return []
+ elif isinstance(stb, str):
+ return [stb]
+ elif not isinstance(stb, list):
+ raise TypeError(msg)
+ # it's a list
+ for line in stb:
+ # check every element
+ if not isinstance(line, str):
+ raise TypeError(msg)
+ return stb
+
+ if handler is None:
+ wrapped = dummy_handler
+ else:
+ def wrapped(self,etype,value,tb,tb_offset=None):
+ """wrap CustomTB handler, to protect IPython from user code
+
+ This makes it harder (but not impossible) for custom exception
+ handlers to crash IPython.
+ """
+ try:
+ stb = handler(self,etype,value,tb,tb_offset=tb_offset)
+ return validate_stb(stb)
+ except:
+ # clear custom handler immediately
+ self.set_custom_exc((), None)
+ print("Custom TB Handler failed, unregistering", file=sys.stderr)
+ # show the exception in handler first
+ stb = self.InteractiveTB.structured_traceback(*sys.exc_info())
+ print(self.InteractiveTB.stb2text(stb))
+ print("The original exception:")
+ stb = self.InteractiveTB.structured_traceback(
+ (etype,value,tb), tb_offset=tb_offset
+ )
+ return stb
+
+ self.CustomTB = types.MethodType(wrapped,self)
+ self.custom_exceptions = exc_tuple
+
+ def excepthook(self, etype, value, tb):
+ """One more defense for GUI apps that call sys.excepthook.
+
+ GUI frameworks like wxPython trap exceptions and call
+ sys.excepthook themselves. I guess this is a feature that
+ enables them to keep running after exceptions that would
+ otherwise kill their mainloop. This is a bother for IPython
+ which expects to catch all of the program exceptions with a try:
+ except: statement.
+
+ Normally, IPython sets sys.excepthook to a CrashHandler instance, so if
+ any app directly invokes sys.excepthook, it will look to the user like
+ IPython crashed. In order to work around this, we can disable the
+ CrashHandler and replace it with this excepthook instead, which prints a
+ regular traceback using our InteractiveTB. In this fashion, apps which
+ call sys.excepthook will generate a regular-looking exception from
+ IPython, and the CrashHandler will only be triggered by real IPython
+ crashes.
+
+ This hook should be used sparingly, only in places which are not likely
+ to be true IPython errors.
+ """
+ self.showtraceback((etype, value, tb), tb_offset=0)
+
+ def _get_exc_info(self, exc_tuple=None):
+ """get exc_info from a given tuple, sys.exc_info() or sys.last_type etc.
+
+ Ensures sys.last_type,value,traceback hold the exc_info we found,
+ from whichever source.
+
+ raises ValueError if none of these contain any information
+ """
+ if exc_tuple is None:
+ etype, value, tb = sys.exc_info()
+ else:
+ etype, value, tb = exc_tuple
+
+ if etype is None:
+ if hasattr(sys, 'last_type'):
+ etype, value, tb = sys.last_type, sys.last_value, \
+ sys.last_traceback
+
+ if etype is None:
+ raise ValueError("No exception to find")
+
+ # Now store the exception info in sys.last_type etc.
+ # WARNING: these variables are somewhat deprecated and not
+ # necessarily safe to use in a threaded environment, but tools
+ # like pdb depend on their existence, so let's set them. If we
+ # find problems in the field, we'll need to revisit their use.
+ sys.last_type = etype
+ sys.last_value = value
+ sys.last_traceback = tb
+
+ return etype, value, tb
+
+ def show_usage_error(self, exc):
+ """Show a short message for UsageErrors
+
+ These are special exceptions that shouldn't show a traceback.
+ """
+ print("UsageError: %s" % exc, file=sys.stderr)
+
+ def get_exception_only(self, exc_tuple=None):
+ """
+ Return as a string (ending with a newline) the exception that
+ just occurred, without any traceback.
+ """
+ etype, value, tb = self._get_exc_info(exc_tuple)
+ msg = traceback.format_exception_only(etype, value)
+ return ''.join(msg)
+
+ def showtraceback(self, exc_tuple=None, filename=None, tb_offset=None,
+ exception_only=False, running_compiled_code=False):
+ """Display the exception that just occurred.
+
+ If nothing is known about the exception, this is the method which
+ should be used throughout the code for presenting user tracebacks,
+ rather than directly invoking the InteractiveTB object.
+
+ A specific showsyntaxerror() also exists, but this method can take
+ care of calling it if needed, so unless you are explicitly catching a
+ SyntaxError exception, don't try to analyze the stack manually and
+ simply call this method."""
+
+ try:
+ try:
+ etype, value, tb = self._get_exc_info(exc_tuple)
+ except ValueError:
+ print('No traceback available to show.', file=sys.stderr)
+ return
+
+ if issubclass(etype, SyntaxError):
+ # Though this won't be called by syntax errors in the input
+ # line, there may be SyntaxError cases with imported code.
+ self.showsyntaxerror(filename, running_compiled_code)
+ elif etype is UsageError:
+ self.show_usage_error(value)
+ else:
+ if exception_only:
+ stb = ['An exception has occurred, use %tb to see '
+ 'the full traceback.\n']
+ stb.extend(self.InteractiveTB.get_exception_only(etype,
+ value))
+ else:
+ try:
+ # Exception classes can customise their traceback - we
+ # use this in IPython.parallel for exceptions occurring
+ # in the engines. This should return a list of strings.
+ if hasattr(value, "_render_traceback_"):
+ stb = value._render_traceback_()
+ else:
+ stb = self.InteractiveTB.structured_traceback(
+ etype, value, tb, tb_offset=tb_offset
+ )
+
+ except Exception:
+ print(
+ "Unexpected exception formatting exception. Falling back to standard exception"
+ )
+ traceback.print_exc()
+ return None
+
+ self._showtraceback(etype, value, stb)
+ if self.call_pdb:
+ # drop into debugger
+ self.debugger(force=True)
+ return
+
+ # Actually show the traceback
+ self._showtraceback(etype, value, stb)
+
+ except KeyboardInterrupt:
+ print('\n' + self.get_exception_only(), file=sys.stderr)
+
+ def _showtraceback(self, etype, evalue, stb: str):
+ """Actually show a traceback.
+
+ Subclasses may override this method to put the traceback on a different
+ place, like a side channel.
+ """
+ val = self.InteractiveTB.stb2text(stb)
+ try:
+ print(val)
+ except UnicodeEncodeError:
+ print(val.encode("utf-8", "backslashreplace").decode())
+
+ def showsyntaxerror(self, filename=None, running_compiled_code=False):
+ """Display the syntax error that just occurred.
+
+ This doesn't display a stack trace because there isn't one.
+
+ If a filename is given, it is stuffed in the exception instead
+ of what was there before (because Python's parser always uses
+ "<string>" when reading from a string).
+
+ If the syntax error occurred when running a compiled code (i.e. running_compile_code=True),
+ longer stack trace will be displayed.
+ """
+ etype, value, last_traceback = self._get_exc_info()
+
+ if filename and issubclass(etype, SyntaxError):
+ try:
+ value.filename = filename
+ except:
+ # Not the format we expect; leave it alone
+ pass
+
+ # If the error occurred when executing compiled code, we should provide full stacktrace.
+ elist = traceback.extract_tb(last_traceback) if running_compiled_code else []
+ stb = self.SyntaxTB.structured_traceback(etype, value, elist)
+ self._showtraceback(etype, value, stb)
+
+ # This is overridden in TerminalInteractiveShell to show a message about
+ # the %paste magic.
+ def showindentationerror(self):
+ """Called by _run_cell when there's an IndentationError in code entered
+ at the prompt.
+
+ This is overridden in TerminalInteractiveShell to show a message about
+ the %paste magic."""
+ self.showsyntaxerror()
+
+ @skip_doctest
+ def set_next_input(self, s, replace=False):
+ """ Sets the 'default' input string for the next command line.
+
+ Example::
+
+ In [1]: _ip.set_next_input("Hello Word")
+ In [2]: Hello Word_ # cursor is here
+ """
+ self.rl_next_input = s
+
+ def _indent_current_str(self):
+ """return the current level of indentation as a string"""
+ return self.input_splitter.get_indent_spaces() * ' '
+
+ #-------------------------------------------------------------------------
+ # Things related to text completion
+ #-------------------------------------------------------------------------
+
+ def init_completer(self):
+ """Initialize the completion machinery.
+
+ This creates completion machinery that can be used by client code,
+ either interactively in-process (typically triggered by the readline
+ library), programmatically (such as in test suites) or out-of-process
+ (typically over the network by remote frontends).
+ """
+ from IPython.core.completer import IPCompleter
+ from IPython.core.completerlib import (
+ cd_completer,
+ magic_run_completer,
+ module_completer,
+ reset_completer,
+ )
+
+ self.Completer = IPCompleter(shell=self,
+ namespace=self.user_ns,
+ global_namespace=self.user_global_ns,
+ parent=self,
+ )
+ self.configurables.append(self.Completer)
+
+ # Add custom completers to the basic ones built into IPCompleter
+ sdisp = self.strdispatchers.get('complete_command', StrDispatch())
+ self.strdispatchers['complete_command'] = sdisp
+ self.Completer.custom_completers = sdisp
+
+ self.set_hook('complete_command', module_completer, str_key = 'import')
+ self.set_hook('complete_command', module_completer, str_key = 'from')
+ self.set_hook('complete_command', module_completer, str_key = '%aimport')
+ self.set_hook('complete_command', magic_run_completer, str_key = '%run')
+ self.set_hook('complete_command', cd_completer, str_key = '%cd')
+ self.set_hook('complete_command', reset_completer, str_key = '%reset')
+
+ @skip_doctest
+ def complete(self, text, line=None, cursor_pos=None):
+ """Return the completed text and a list of completions.
+
+ Parameters
+ ----------
+ text : string
+ A string of text to be completed on. It can be given as empty and
+ instead a line/position pair are given. In this case, the
+ completer itself will split the line like readline does.
+ line : string, optional
+ The complete line that text is part of.
+ cursor_pos : int, optional
+ The position of the cursor on the input line.
+
+ Returns
+ -------
+ text : string
+ The actual text that was completed.
+ matches : list
+ A sorted list with all possible completions.
+
+ Notes
+ -----
+ The optional arguments allow the completion to take more context into
+ account, and are part of the low-level completion API.
+
+ This is a wrapper around the completion mechanism, similar to what
+ readline does at the command line when the TAB key is hit. By
+ exposing it as a method, it can be used by other non-readline
+ environments (such as GUIs) for text completion.
+
+ Examples
+ --------
+ In [1]: x = 'hello'
+
+ In [2]: _ip.complete('x.l')
+ Out[2]: ('x.l', ['x.ljust', 'x.lower', 'x.lstrip'])
+ """
+
+ # Inject names into __builtin__ so we can complete on the added names.
+ with self.builtin_trap:
+ return self.Completer.complete(text, line, cursor_pos)
+
+ def set_custom_completer(self, completer, pos=0) -> None:
+ """Adds a new custom completer function.
+
+ The position argument (defaults to 0) is the index in the completers
+ list where you want the completer to be inserted.
+
+ `completer` should have the following signature::
+
+ def completion(self: Completer, text: string) -> List[str]:
+ raise NotImplementedError
+
+ It will be bound to the current Completer instance and pass some text
+ and return a list with current completions to suggest to the user.
+ """
+
+ newcomp = types.MethodType(completer, self.Completer)
+ self.Completer.custom_matchers.insert(pos,newcomp)
+
+ def set_completer_frame(self, frame=None):
+ """Set the frame of the completer."""
+ if frame:
+ self.Completer.namespace = frame.f_locals
+ self.Completer.global_namespace = frame.f_globals
+ else:
+ self.Completer.namespace = self.user_ns
+ self.Completer.global_namespace = self.user_global_ns
+
+ #-------------------------------------------------------------------------
+ # Things related to magics
+ #-------------------------------------------------------------------------
+
+ def init_magics(self):
+ from IPython.core import magics as m
+ self.magics_manager = magic.MagicsManager(shell=self,
+ parent=self,
+ user_magics=m.UserMagics(self))
+ self.configurables.append(self.magics_manager)
+
+ # Expose as public API from the magics manager
+ self.register_magics = self.magics_manager.register
+
+ self.register_magics(m.AutoMagics, m.BasicMagics, m.CodeMagics,
+ m.ConfigMagics, m.DisplayMagics, m.ExecutionMagics,
+ m.ExtensionMagics, m.HistoryMagics, m.LoggingMagics,
+ m.NamespaceMagics, m.OSMagics, m.PackagingMagics,
+ m.PylabMagics, m.ScriptMagics,
+ )
+ self.register_magics(m.AsyncMagics)
+
+ # Register Magic Aliases
+ mman = self.magics_manager
+ # FIXME: magic aliases should be defined by the Magics classes
+ # or in MagicsManager, not here
+ mman.register_alias('ed', 'edit')
+ mman.register_alias('hist', 'history')
+ mman.register_alias('rep', 'recall')
+ mman.register_alias('SVG', 'svg', 'cell')
+ mman.register_alias('HTML', 'html', 'cell')
+ mman.register_alias('file', 'writefile', 'cell')
+
+ # FIXME: Move the color initialization to the DisplayHook, which
+ # should be split into a prompt manager and displayhook. We probably
+ # even need a centralize colors management object.
+ self.run_line_magic('colors', self.colors)
+
+ # Defined here so that it's included in the documentation
+ @functools.wraps(magic.MagicsManager.register_function)
+ def register_magic_function(self, func, magic_kind='line', magic_name=None):
+ self.magics_manager.register_function(
+ func, magic_kind=magic_kind, magic_name=magic_name
+ )
+
+ def _find_with_lazy_load(self, /, type_, magic_name: str):
+ """
+ Try to find a magic potentially lazy-loading it.
+
+ Parameters
+ ----------
+
+ type_: "line"|"cell"
+ the type of magics we are trying to find/lazy load.
+ magic_name: str
+ The name of the magic we are trying to find/lazy load
+
+
+ Note that this may have any side effects
+ """
+ finder = {"line": self.find_line_magic, "cell": self.find_cell_magic}[type_]
+ fn = finder(magic_name)
+ if fn is not None:
+ return fn
+ lazy = self.magics_manager.lazy_magics.get(magic_name)
+ if lazy is None:
+ return None
+
+ self.run_line_magic("load_ext", lazy)
+ res = finder(magic_name)
+ return res
+
+ def run_line_magic(self, magic_name: str, line, _stack_depth=1):
+ """Execute the given line magic.
+
+ Parameters
+ ----------
+ magic_name : str
+ Name of the desired magic function, without '%' prefix.
+ line : str
+ The rest of the input line as a single string.
+ _stack_depth : int
+ If run_line_magic() is called from magic() then _stack_depth=2.
+ This is added to ensure backward compatibility for use of 'get_ipython().magic()'
+ """
+ fn = self._find_with_lazy_load("line", magic_name)
+ if fn is None:
+ lazy = self.magics_manager.lazy_magics.get(magic_name)
+ if lazy:
+ self.run_line_magic("load_ext", lazy)
+ fn = self.find_line_magic(magic_name)
+ if fn is None:
+ cm = self.find_cell_magic(magic_name)
+ etpl = "Line magic function `%%%s` not found%s."
+ extra = '' if cm is None else (' (But cell magic `%%%%%s` exists, '
+ 'did you mean that instead?)' % magic_name )
+ raise UsageError(etpl % (magic_name, extra))
+ else:
+ # Note: this is the distance in the stack to the user's frame.
+ # This will need to be updated if the internal calling logic gets
+ # refactored, or else we'll be expanding the wrong variables.
+
+ # Determine stack_depth depending on where run_line_magic() has been called
+ stack_depth = _stack_depth
+ if getattr(fn, magic.MAGIC_NO_VAR_EXPAND_ATTR, False):
+ # magic has opted out of var_expand
+ magic_arg_s = line
+ else:
+ magic_arg_s = self.var_expand(line, stack_depth)
+ # Put magic args in a list so we can call with f(*a) syntax
+ args = [magic_arg_s]
+ kwargs = {}
+ # Grab local namespace if we need it:
+ if getattr(fn, "needs_local_scope", False):
+ kwargs['local_ns'] = self.get_local_scope(stack_depth)
+ with self.builtin_trap:
+ result = fn(*args, **kwargs)
+
+ # The code below prevents the output from being displayed
+ # when using magics with decodator @output_can_be_silenced
+ # when the last Python token in the expression is a ';'.
+ if getattr(fn, magic.MAGIC_OUTPUT_CAN_BE_SILENCED, False):
+ if DisplayHook.semicolon_at_end_of_expression(magic_arg_s):
+ return None
+
+ return result
+
+ def get_local_scope(self, stack_depth):
+ """Get local scope at given stack depth.
+
+ Parameters
+ ----------
+ stack_depth : int
+ Depth relative to calling frame
+ """
+ return sys._getframe(stack_depth + 1).f_locals
+
+ def run_cell_magic(self, magic_name, line, cell):
+ """Execute the given cell magic.
+
+ Parameters
+ ----------
+ magic_name : str
+ Name of the desired magic function, without '%' prefix.
+ line : str
+ The rest of the first input line as a single string.
+ cell : str
+ The body of the cell as a (possibly multiline) string.
+ """
+ fn = self._find_with_lazy_load("cell", magic_name)
+ if fn is None:
+ lm = self.find_line_magic(magic_name)
+ etpl = "Cell magic `%%{0}` not found{1}."
+ extra = '' if lm is None else (' (But line magic `%{0}` exists, '
+ 'did you mean that instead?)'.format(magic_name))
+ raise UsageError(etpl.format(magic_name, extra))
+ elif cell == '':
+ message = '%%{0} is a cell magic, but the cell body is empty.'.format(magic_name)
+ if self.find_line_magic(magic_name) is not None:
+ message += ' Did you mean the line magic %{0} (single %)?'.format(magic_name)
+ raise UsageError(message)
+ else:
+ # Note: this is the distance in the stack to the user's frame.
+ # This will need to be updated if the internal calling logic gets
+ # refactored, or else we'll be expanding the wrong variables.
+ stack_depth = 2
+ if getattr(fn, magic.MAGIC_NO_VAR_EXPAND_ATTR, False):
+ # magic has opted out of var_expand
+ magic_arg_s = line
+ else:
+ magic_arg_s = self.var_expand(line, stack_depth)
+ kwargs = {}
+ if getattr(fn, "needs_local_scope", False):
+ kwargs['local_ns'] = self.user_ns
+
+ with self.builtin_trap:
+ args = (magic_arg_s, cell)
+ result = fn(*args, **kwargs)
+
+ # The code below prevents the output from being displayed
+ # when using magics with decodator @output_can_be_silenced
+ # when the last Python token in the expression is a ';'.
+ if getattr(fn, magic.MAGIC_OUTPUT_CAN_BE_SILENCED, False):
+ if DisplayHook.semicolon_at_end_of_expression(cell):
+ return None
+
+ return result
+
+ def find_line_magic(self, magic_name):
+ """Find and return a line magic by name.
+
+ Returns None if the magic isn't found."""
+ return self.magics_manager.magics['line'].get(magic_name)
+
+ def find_cell_magic(self, magic_name):
+ """Find and return a cell magic by name.
+
+ Returns None if the magic isn't found."""
+ return self.magics_manager.magics['cell'].get(magic_name)
+
+ def find_magic(self, magic_name, magic_kind='line'):
+ """Find and return a magic of the given type by name.
+
+ Returns None if the magic isn't found."""
+ return self.magics_manager.magics[magic_kind].get(magic_name)
+
+ def magic(self, arg_s):
+ """
+ DEPRECATED
+
+ Deprecated since IPython 0.13 (warning added in
+ 8.1), use run_line_magic(magic_name, parameter_s).
+
+ Call a magic function by name.
+
+ Input: a string containing the name of the magic function to call and
+ any additional arguments to be passed to the magic.
+
+ magic('name -opt foo bar') is equivalent to typing at the ipython
+ prompt:
+
+ In[1]: %name -opt foo bar
+
+ To call a magic without arguments, simply use magic('name').
+
+ This provides a proper Python function to call IPython's magics in any
+ valid Python code you can type at the interpreter, including loops and
+ compound statements.
+ """
+ warnings.warn(
+ "`magic(...)` is deprecated since IPython 0.13 (warning added in "
+ "8.1), use run_line_magic(magic_name, parameter_s).",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ # TODO: should we issue a loud deprecation warning here?
+ magic_name, _, magic_arg_s = arg_s.partition(' ')
+ magic_name = magic_name.lstrip(prefilter.ESC_MAGIC)
+ return self.run_line_magic(magic_name, magic_arg_s, _stack_depth=2)
+
+ #-------------------------------------------------------------------------
+ # Things related to macros
+ #-------------------------------------------------------------------------
+
+ def define_macro(self, name, themacro):
+ """Define a new macro
+
+ Parameters
+ ----------
+ name : str
+ The name of the macro.
+ themacro : str or Macro
+ The action to do upon invoking the macro. If a string, a new
+ Macro object is created by passing the string to it.
+ """
+
+ from IPython.core import macro
+
+ if isinstance(themacro, str):
+ themacro = macro.Macro(themacro)
+ if not isinstance(themacro, macro.Macro):
+ raise ValueError('A macro must be a string or a Macro instance.')
+ self.user_ns[name] = themacro
+
+ #-------------------------------------------------------------------------
+ # Things related to the running of system commands
+ #-------------------------------------------------------------------------
+
+ def system_piped(self, cmd):
+ """Call the given cmd in a subprocess, piping stdout/err
+
+ Parameters
+ ----------
+ cmd : str
+ Command to execute (can not end in '&', as background processes are
+ not supported. Should not be a command that expects input
+ other than simple text.
+ """
+ if cmd.rstrip().endswith('&'):
+ # this is *far* from a rigorous test
+ # We do not support backgrounding processes because we either use
+ # pexpect or pipes to read from. Users can always just call
+ # os.system() or use ip.system=ip.system_raw
+ # if they really want a background process.
+ raise OSError("Background processes not supported.")
+
+ # we explicitly do NOT return the subprocess status code, because
+ # a non-None value would trigger :func:`sys.displayhook` calls.
+ # Instead, we store the exit_code in user_ns.
+ self.user_ns['_exit_code'] = system(self.var_expand(cmd, depth=1))
+
+ def system_raw(self, cmd):
+ """Call the given cmd in a subprocess using os.system on Windows or
+ subprocess.call using the system shell on other platforms.
+
+ Parameters
+ ----------
+ cmd : str
+ Command to execute.
+ """
+ cmd = self.var_expand(cmd, depth=1)
+ # warn if there is an IPython magic alternative.
+ main_cmd = cmd.split()[0]
+ has_magic_alternatives = ("pip", "conda", "cd")
+
+ if main_cmd in has_magic_alternatives:
+ warnings.warn(
+ (
+ "You executed the system command !{0} which may not work "
+ "as expected. Try the IPython magic %{0} instead."
+ ).format(main_cmd)
+ )
+
+ # protect os.system from UNC paths on Windows, which it can't handle:
+ if sys.platform == 'win32':
+ from IPython.utils._process_win32 import AvoidUNCPath
+ with AvoidUNCPath() as path:
+ if path is not None:
+ cmd = '"pushd %s &&"%s' % (path, cmd)
+ try:
+ ec = os.system(cmd)
+ except KeyboardInterrupt:
+ print('\n' + self.get_exception_only(), file=sys.stderr)
+ ec = -2
+ else:
+ # For posix the result of the subprocess.call() below is an exit
+ # code, which by convention is zero for success, positive for
+ # program failure. Exit codes above 128 are reserved for signals,
+ # and the formula for converting a signal to an exit code is usually
+ # signal_number+128. To more easily differentiate between exit
+ # codes and signals, ipython uses negative numbers. For instance
+ # since control-c is signal 2 but exit code 130, ipython's
+ # _exit_code variable will read -2. Note that some shells like
+ # csh and fish don't follow sh/bash conventions for exit codes.
+ executable = os.environ.get('SHELL', None)
+ try:
+ # Use env shell instead of default /bin/sh
+ ec = subprocess.call(cmd, shell=True, executable=executable)
+ except KeyboardInterrupt:
+ # intercept control-C; a long traceback is not useful here
+ print('\n' + self.get_exception_only(), file=sys.stderr)
+ ec = 130
+ if ec > 128:
+ ec = -(ec - 128)
+
+ # We explicitly do NOT return the subprocess status code, because
+ # a non-None value would trigger :func:`sys.displayhook` calls.
+ # Instead, we store the exit_code in user_ns. Note the semantics
+ # of _exit_code: for control-c, _exit_code == -signal.SIGNIT,
+ # but raising SystemExit(_exit_code) will give status 254!
+ self.user_ns['_exit_code'] = ec
+
+ # use piped system by default, because it is better behaved
+ system = system_piped
+
+ def getoutput(self, cmd, split=True, depth=0):
+ """Get output (possibly including stderr) from a subprocess.
+
+ Parameters
+ ----------
+ cmd : str
+ Command to execute (can not end in '&', as background processes are
+ not supported.
+ split : bool, optional
+ If True, split the output into an IPython SList. Otherwise, an
+ IPython LSString is returned. These are objects similar to normal
+ lists and strings, with a few convenience attributes for easier
+ manipulation of line-based output. You can use '?' on them for
+ details.
+ depth : int, optional
+ How many frames above the caller are the local variables which should
+ be expanded in the command string? The default (0) assumes that the
+ expansion variables are in the stack frame calling this function.
+ """
+ if cmd.rstrip().endswith('&'):
+ # this is *far* from a rigorous test
+ raise OSError("Background processes not supported.")
+ out = getoutput(self.var_expand(cmd, depth=depth+1))
+ if split:
+ out = SList(out.splitlines())
+ else:
+ out = LSString(out)
+ return out
+
+ #-------------------------------------------------------------------------
+ # Things related to aliases
+ #-------------------------------------------------------------------------
+
+ def init_alias(self):
+ self.alias_manager = AliasManager(shell=self, parent=self)
+ self.configurables.append(self.alias_manager)
+
+ #-------------------------------------------------------------------------
+ # Things related to extensions
+ #-------------------------------------------------------------------------
+
+ def init_extension_manager(self):
+ self.extension_manager = ExtensionManager(shell=self, parent=self)
+ self.configurables.append(self.extension_manager)
+
+ #-------------------------------------------------------------------------
+ # Things related to payloads
+ #-------------------------------------------------------------------------
+
+ def init_payload(self):
+ self.payload_manager = PayloadManager(parent=self)
+ self.configurables.append(self.payload_manager)
+
+ #-------------------------------------------------------------------------
+ # Things related to the prefilter
+ #-------------------------------------------------------------------------
+
+ def init_prefilter(self):
+ self.prefilter_manager = PrefilterManager(shell=self, parent=self)
+ self.configurables.append(self.prefilter_manager)
+ # Ultimately this will be refactored in the new interpreter code, but
+ # for now, we should expose the main prefilter method (there's legacy
+ # code out there that may rely on this).
+ self.prefilter = self.prefilter_manager.prefilter_lines
+
+ def auto_rewrite_input(self, cmd):
+ """Print to the screen the rewritten form of the user's command.
+
+ This shows visual feedback by rewriting input lines that cause
+ automatic calling to kick in, like::
+
+ /f x
+
+ into::
+
+ ------> f(x)
+
+ after the user's input prompt. This helps the user understand that the
+ input line was transformed automatically by IPython.
+ """
+ if not self.show_rewritten_input:
+ return
+
+ # This is overridden in TerminalInteractiveShell to use fancy prompts
+ print("------> " + cmd)
+
+ #-------------------------------------------------------------------------
+ # Things related to extracting values/expressions from kernel and user_ns
+ #-------------------------------------------------------------------------
+
+ def _user_obj_error(self):
+ """return simple exception dict
+
+ for use in user_expressions
+ """
+
+ etype, evalue, tb = self._get_exc_info()
+ stb = self.InteractiveTB.get_exception_only(etype, evalue)
+
+ exc_info = {
+ "status": "error",
+ "traceback": stb,
+ "ename": etype.__name__,
+ "evalue": py3compat.safe_unicode(evalue),
+ }
+
+ return exc_info
+
+ def _format_user_obj(self, obj):
+ """format a user object to display dict
+
+ for use in user_expressions
+ """
+
+ data, md = self.display_formatter.format(obj)
+ value = {
+ 'status' : 'ok',
+ 'data' : data,
+ 'metadata' : md,
+ }
+ return value
+
+ def user_expressions(self, expressions):
+ """Evaluate a dict of expressions in the user's namespace.
+
+ Parameters
+ ----------
+ expressions : dict
+ A dict with string keys and string values. The expression values
+ should be valid Python expressions, each of which will be evaluated
+ in the user namespace.
+
+ Returns
+ -------
+ A dict, keyed like the input expressions dict, with the rich mime-typed
+ display_data of each value.
+ """
+ out = {}
+ user_ns = self.user_ns
+ global_ns = self.user_global_ns
+
+ for key, expr in expressions.items():
+ try:
+ value = self._format_user_obj(eval(expr, global_ns, user_ns))
+ except:
+ value = self._user_obj_error()
+ out[key] = value
+ return out
+
+ #-------------------------------------------------------------------------
+ # Things related to the running of code
+ #-------------------------------------------------------------------------
+
+ def ex(self, cmd):
+ """Execute a normal python statement in user namespace."""
+ with self.builtin_trap:
+ exec(cmd, self.user_global_ns, self.user_ns)
+
+ def ev(self, expr):
+ """Evaluate python expression expr in user namespace.
+
+ Returns the result of evaluation
+ """
+ with self.builtin_trap:
+ return eval(expr, self.user_global_ns, self.user_ns)
+
+ def safe_execfile(self, fname, *where, exit_ignore=False, raise_exceptions=False, shell_futures=False):
+ """A safe version of the builtin execfile().
+
+ This version will never throw an exception, but instead print
+ helpful error messages to the screen. This only works on pure
+ Python files with the .py extension.
+
+ Parameters
+ ----------
+ fname : string
+ The name of the file to be executed.
+ *where : tuple
+ One or two namespaces, passed to execfile() as (globals,locals).
+ If only one is given, it is passed as both.
+ exit_ignore : bool (False)
+ If True, then silence SystemExit for non-zero status (it is always
+ silenced for zero status, as it is so common).
+ raise_exceptions : bool (False)
+ If True raise exceptions everywhere. Meant for testing.
+ shell_futures : bool (False)
+ If True, the code will share future statements with the interactive
+ shell. It will both be affected by previous __future__ imports, and
+ any __future__ imports in the code will affect the shell. If False,
+ __future__ imports are not shared in either direction.
+
+ """
+ fname = Path(fname).expanduser().resolve()
+
+ # Make sure we can open the file
+ try:
+ with fname.open("rb"):
+ pass
+ except:
+ warn('Could not open file <%s> for safe execution.' % fname)
+ return
+
+ # Find things also in current directory. This is needed to mimic the
+ # behavior of running a script from the system command line, where
+ # Python inserts the script's directory into sys.path
+ dname = str(fname.parent)
+
+ with prepended_to_syspath(dname), self.builtin_trap:
+ try:
+ glob, loc = (where + (None, ))[:2]
+ py3compat.execfile(
+ fname, glob, loc,
+ self.compile if shell_futures else None)
+ except SystemExit as status:
+ # If the call was made with 0 or None exit status (sys.exit(0)
+ # or sys.exit() ), don't bother showing a traceback, as both of
+ # these are considered normal by the OS:
+ # > python -c'import sys;sys.exit(0)'; echo $?
+ # 0
+ # > python -c'import sys;sys.exit()'; echo $?
+ # 0
+ # For other exit status, we show the exception unless
+ # explicitly silenced, but only in short form.
+ if status.code:
+ if raise_exceptions:
+ raise
+ if not exit_ignore:
+ self.showtraceback(exception_only=True)
+ except:
+ if raise_exceptions:
+ raise
+ # tb offset is 2 because we wrap execfile
+ self.showtraceback(tb_offset=2)
+
+ def safe_execfile_ipy(self, fname, shell_futures=False, raise_exceptions=False):
+ """Like safe_execfile, but for .ipy or .ipynb files with IPython syntax.
+
+ Parameters
+ ----------
+ fname : str
+ The name of the file to execute. The filename must have a
+ .ipy or .ipynb extension.
+ shell_futures : bool (False)
+ If True, the code will share future statements with the interactive
+ shell. It will both be affected by previous __future__ imports, and
+ any __future__ imports in the code will affect the shell. If False,
+ __future__ imports are not shared in either direction.
+ raise_exceptions : bool (False)
+ If True raise exceptions everywhere. Meant for testing.
+ """
+ fname = Path(fname).expanduser().resolve()
+
+ # Make sure we can open the file
+ try:
+ with fname.open("rb"):
+ pass
+ except:
+ warn('Could not open file <%s> for safe execution.' % fname)
+ return
+
+ # Find things also in current directory. This is needed to mimic the
+ # behavior of running a script from the system command line, where
+ # Python inserts the script's directory into sys.path
+ dname = str(fname.parent)
+
+ def get_cells():
+ """generator for sequence of code blocks to run"""
+ if fname.suffix == ".ipynb":
+ from nbformat import read
+ nb = read(fname, as_version=4)
+ if not nb.cells:
+ return
+ for cell in nb.cells:
+ if cell.cell_type == 'code':
+ yield cell.source
+ else:
+ yield fname.read_text(encoding="utf-8")
+
+ with prepended_to_syspath(dname):
+ try:
+ for cell in get_cells():
+ result = self.run_cell(cell, silent=True, shell_futures=shell_futures)
+ if raise_exceptions:
+ result.raise_error()
+ elif not result.success:
+ break
+ except:
+ if raise_exceptions:
+ raise
+ self.showtraceback()
+ warn('Unknown failure executing file: <%s>' % fname)
+
+ def safe_run_module(self, mod_name, where):
+ """A safe version of runpy.run_module().
+
+ This version will never throw an exception, but instead print
+ helpful error messages to the screen.
+
+ `SystemExit` exceptions with status code 0 or None are ignored.
+
+ Parameters
+ ----------
+ mod_name : string
+ The name of the module to be executed.
+ where : dict
+ The globals namespace.
+ """
+ try:
+ try:
+ where.update(
+ runpy.run_module(str(mod_name), run_name="__main__",
+ alter_sys=True)
+ )
+ except SystemExit as status:
+ if status.code:
+ raise
+ except:
+ self.showtraceback()
+ warn('Unknown failure executing module: <%s>' % mod_name)
+
+ def run_cell(
+ self,
+ raw_cell,
+ store_history=False,
+ silent=False,
+ shell_futures=True,
+ cell_id=None,
+ ):
+ """Run a complete IPython cell.
+
+ Parameters
+ ----------
+ raw_cell : str
+ The code (including IPython code such as %magic functions) to run.
+ store_history : bool
+ If True, the raw and translated cell will be stored in IPython's
+ history. For user code calling back into IPython's machinery, this
+ should be set to False.
+ silent : bool
+ If True, avoid side-effects, such as implicit displayhooks and
+ and logging. silent=True forces store_history=False.
+ shell_futures : bool
+ If True, the code will share future statements with the interactive
+ shell. It will both be affected by previous __future__ imports, and
+ any __future__ imports in the code will affect the shell. If False,
+ __future__ imports are not shared in either direction.
+
+ Returns
+ -------
+ result : :class:`ExecutionResult`
+ """
+ result = None
+ try:
+ result = self._run_cell(
+ raw_cell, store_history, silent, shell_futures, cell_id
+ )
+ finally:
+ self.events.trigger('post_execute')
+ if not silent:
+ self.events.trigger('post_run_cell', result)
+ return result
+
+ def _run_cell(
+ self,
+ raw_cell: str,
+ store_history: bool,
+ silent: bool,
+ shell_futures: bool,
+ cell_id: str,
+ ) -> ExecutionResult:
+ """Internal method to run a complete IPython cell."""
+
+ # we need to avoid calling self.transform_cell multiple time on the same thing
+ # so we need to store some results:
+ preprocessing_exc_tuple = None
+ try:
+ transformed_cell = self.transform_cell(raw_cell)
+ except Exception:
+ transformed_cell = raw_cell
+ preprocessing_exc_tuple = sys.exc_info()
+
+ assert transformed_cell is not None
+ coro = self.run_cell_async(
+ raw_cell,
+ store_history=store_history,
+ silent=silent,
+ shell_futures=shell_futures,
+ transformed_cell=transformed_cell,
+ preprocessing_exc_tuple=preprocessing_exc_tuple,
+ cell_id=cell_id,
+ )
+
+ # run_cell_async is async, but may not actually need an eventloop.
+ # when this is the case, we want to run it using the pseudo_sync_runner
+ # so that code can invoke eventloops (for example via the %run , and
+ # `%paste` magic.
+ if self.trio_runner:
+ runner = self.trio_runner
+ elif self.should_run_async(
+ raw_cell,
+ transformed_cell=transformed_cell,
+ preprocessing_exc_tuple=preprocessing_exc_tuple,
+ ):
+ runner = self.loop_runner
+ else:
+ runner = _pseudo_sync_runner
+
+ try:
+ result = runner(coro)
+ except BaseException as e:
+ info = ExecutionInfo(
+ raw_cell, store_history, silent, shell_futures, cell_id
+ )
+ result = ExecutionResult(info)
+ result.error_in_exec = e
+ self.showtraceback(running_compiled_code=True)
+ finally:
+ return result
+
+ def should_run_async(
+ self, raw_cell: str, *, transformed_cell=None, preprocessing_exc_tuple=None
+ ) -> bool:
+ """Return whether a cell should be run asynchronously via a coroutine runner
+
+ Parameters
+ ----------
+ raw_cell : str
+ The code to be executed
+
+ Returns
+ -------
+ result: bool
+ Whether the code needs to be run with a coroutine runner or not
+ .. versionadded:: 7.0
+ """
+ if not self.autoawait:
+ return False
+ if preprocessing_exc_tuple is not None:
+ return False
+ assert preprocessing_exc_tuple is None
+ if transformed_cell is None:
+ warnings.warn(
+ "`should_run_async` will not call `transform_cell`"
+ " automatically in the future. Please pass the result to"
+ " `transformed_cell` argument and any exception that happen"
+ " during the"
+ "transform in `preprocessing_exc_tuple` in"
+ " IPython 7.17 and above.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ try:
+ cell = self.transform_cell(raw_cell)
+ except Exception:
+ # any exception during transform will be raised
+ # prior to execution
+ return False
+ else:
+ cell = transformed_cell
+ return _should_be_async(cell)
+
+ async def run_cell_async(
+ self,
+ raw_cell: str,
+ store_history=False,
+ silent=False,
+ shell_futures=True,
+ *,
+ transformed_cell: Optional[str] = None,
+ preprocessing_exc_tuple: Optional[AnyType] = None,
+ cell_id=None,
+ ) -> ExecutionResult:
+ """Run a complete IPython cell asynchronously.
+
+ Parameters
+ ----------
+ raw_cell : str
+ The code (including IPython code such as %magic functions) to run.
+ store_history : bool
+ If True, the raw and translated cell will be stored in IPython's
+ history. For user code calling back into IPython's machinery, this
+ should be set to False.
+ silent : bool
+ If True, avoid side-effects, such as implicit displayhooks and
+ and logging. silent=True forces store_history=False.
+ shell_futures : bool
+ If True, the code will share future statements with the interactive
+ shell. It will both be affected by previous __future__ imports, and
+ any __future__ imports in the code will affect the shell. If False,
+ __future__ imports are not shared in either direction.
+ transformed_cell: str
+ cell that was passed through transformers
+ preprocessing_exc_tuple:
+ trace if the transformation failed.
+
+ Returns
+ -------
+ result : :class:`ExecutionResult`
+
+ .. versionadded:: 7.0
+ """
+ info = ExecutionInfo(raw_cell, store_history, silent, shell_futures, cell_id)
+ result = ExecutionResult(info)
+
+ if (not raw_cell) or raw_cell.isspace():
+ self.last_execution_succeeded = True
+ self.last_execution_result = result
+ return result
+
+ if silent:
+ store_history = False
+
+ if store_history:
+ result.execution_count = self.execution_count
+
+ def error_before_exec(value):
+ if store_history:
+ self.execution_count += 1
+ result.error_before_exec = value
+ self.last_execution_succeeded = False
+ self.last_execution_result = result
+ return result
+
+ self.events.trigger('pre_execute')
+ if not silent:
+ self.events.trigger('pre_run_cell', info)
+
+ if transformed_cell is None:
+ warnings.warn(
+ "`run_cell_async` will not call `transform_cell`"
+ " automatically in the future. Please pass the result to"
+ " `transformed_cell` argument and any exception that happen"
+ " during the"
+ "transform in `preprocessing_exc_tuple` in"
+ " IPython 7.17 and above.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ # If any of our input transformation (input_transformer_manager or
+ # prefilter_manager) raises an exception, we store it in this variable
+ # so that we can display the error after logging the input and storing
+ # it in the history.
+ try:
+ cell = self.transform_cell(raw_cell)
+ except Exception:
+ preprocessing_exc_tuple = sys.exc_info()
+ cell = raw_cell # cell has to exist so it can be stored/logged
+ else:
+ preprocessing_exc_tuple = None
+ else:
+ if preprocessing_exc_tuple is None:
+ cell = transformed_cell
+ else:
+ cell = raw_cell
+
+ # Do NOT store paste/cpaste magic history
+ if "get_ipython().run_line_magic(" in cell and "paste" in cell:
+ store_history = False
+
+ # Store raw and processed history
+ if store_history:
+ self.history_manager.store_inputs(self.execution_count, cell, raw_cell)
+ if not silent:
+ self.logger.log(cell, raw_cell)
+
+ # Display the exception if input processing failed.
+ if preprocessing_exc_tuple is not None:
+ self.showtraceback(preprocessing_exc_tuple)
+ if store_history:
+ self.execution_count += 1
+ return error_before_exec(preprocessing_exc_tuple[1])
+
+ # Our own compiler remembers the __future__ environment. If we want to
+ # run code with a separate __future__ environment, use the default
+ # compiler
+ compiler = self.compile if shell_futures else self.compiler_class()
+
+ _run_async = False
+
+ with self.builtin_trap:
+ cell_name = compiler.cache(cell, self.execution_count, raw_code=raw_cell)
+
+ with self.display_trap:
+ # Compile to bytecode
+ try:
+ code_ast = compiler.ast_parse(cell, filename=cell_name)
+ except self.custom_exceptions as e:
+ etype, value, tb = sys.exc_info()
+ self.CustomTB(etype, value, tb)
+ return error_before_exec(e)
+ except IndentationError as e:
+ self.showindentationerror()
+ return error_before_exec(e)
+ except (OverflowError, SyntaxError, ValueError, TypeError,
+ MemoryError) as e:
+ self.showsyntaxerror()
+ return error_before_exec(e)
+
+ # Apply AST transformations
+ try:
+ code_ast = self.transform_ast(code_ast)
+ except InputRejected as e:
+ self.showtraceback()
+ return error_before_exec(e)
+
+ # Give the displayhook a reference to our ExecutionResult so it
+ # can fill in the output value.
+ self.displayhook.exec_result = result
+
+ # Execute the user code
+ interactivity = "none" if silent else self.ast_node_interactivity
+
+
+ has_raised = await self.run_ast_nodes(code_ast.body, cell_name,
+ interactivity=interactivity, compiler=compiler, result=result)
+
+ self.last_execution_succeeded = not has_raised
+ self.last_execution_result = result
+
+ # Reset this so later displayed values do not modify the
+ # ExecutionResult
+ self.displayhook.exec_result = None
+
+ if store_history:
+ # Write output to the database. Does nothing unless
+ # history output logging is enabled.
+ self.history_manager.store_output(self.execution_count)
+ # Each cell is a *single* input, regardless of how many lines it has
+ self.execution_count += 1
+
+ return result
+
+ def transform_cell(self, raw_cell):
+ """Transform an input cell before parsing it.
+
+ Static transformations, implemented in IPython.core.inputtransformer2,
+ deal with things like ``%magic`` and ``!system`` commands.
+ These run on all input.
+ Dynamic transformations, for things like unescaped magics and the exit
+ autocall, depend on the state of the interpreter.
+ These only apply to single line inputs.
+
+ These string-based transformations are followed by AST transformations;
+ see :meth:`transform_ast`.
+ """
+ # Static input transformations
+ cell = self.input_transformer_manager.transform_cell(raw_cell)
+
+ if len(cell.splitlines()) == 1:
+ # Dynamic transformations - only applied for single line commands
+ with self.builtin_trap:
+ # use prefilter_lines to handle trailing newlines
+ # restore trailing newline for ast.parse
+ cell = self.prefilter_manager.prefilter_lines(cell) + '\n'
+
+ lines = cell.splitlines(keepends=True)
+ for transform in self.input_transformers_post:
+ lines = transform(lines)
+ cell = ''.join(lines)
+
+ return cell
+
+ def transform_ast(self, node):
+ """Apply the AST transformations from self.ast_transformers
+
+ Parameters
+ ----------
+ node : ast.Node
+ The root node to be transformed. Typically called with the ast.Module
+ produced by parsing user input.
+
+ Returns
+ -------
+ An ast.Node corresponding to the node it was called with. Note that it
+ may also modify the passed object, so don't rely on references to the
+ original AST.
+ """
+ for transformer in self.ast_transformers:
+ try:
+ node = transformer.visit(node)
+ except InputRejected:
+ # User-supplied AST transformers can reject an input by raising
+ # an InputRejected. Short-circuit in this case so that we
+ # don't unregister the transform.
+ raise
+ except Exception:
+ warn("AST transformer %r threw an error. It will be unregistered." % transformer)
+ self.ast_transformers.remove(transformer)
+
+ if self.ast_transformers:
+ ast.fix_missing_locations(node)
+ return node
+
+ async def run_ast_nodes(
+ self,
+ nodelist: ListType[stmt],
+ cell_name: str,
+ interactivity="last_expr",
+ compiler=compile,
+ result=None,
+ ):
+ """Run a sequence of AST nodes. The execution mode depends on the
+ interactivity parameter.
+
+ Parameters
+ ----------
+ nodelist : list
+ A sequence of AST nodes to run.
+ cell_name : str
+ Will be passed to the compiler as the filename of the cell. Typically
+ the value returned by ip.compile.cache(cell).
+ interactivity : str
+ 'all', 'last', 'last_expr' , 'last_expr_or_assign' or 'none',
+ specifying which nodes should be run interactively (displaying output
+ from expressions). 'last_expr' will run the last node interactively
+ only if it is an expression (i.e. expressions in loops or other blocks
+ are not displayed) 'last_expr_or_assign' will run the last expression
+ or the last assignment. Other values for this parameter will raise a
+ ValueError.
+
+ compiler : callable
+ A function with the same interface as the built-in compile(), to turn
+ the AST nodes into code objects. Default is the built-in compile().
+ result : ExecutionResult, optional
+ An object to store exceptions that occur during execution.
+
+ Returns
+ -------
+ True if an exception occurred while running code, False if it finished
+ running.
+ """
+ if not nodelist:
+ return
+
+
+ if interactivity == 'last_expr_or_assign':
+ if isinstance(nodelist[-1], _assign_nodes):
+ asg = nodelist[-1]
+ if isinstance(asg, ast.Assign) and len(asg.targets) == 1:
+ target = asg.targets[0]
+ elif isinstance(asg, _single_targets_nodes):
+ target = asg.target
+ else:
+ target = None
+ if isinstance(target, ast.Name):
+ nnode = ast.Expr(ast.Name(target.id, ast.Load()))
+ ast.fix_missing_locations(nnode)
+ nodelist.append(nnode)
+ interactivity = 'last_expr'
+
+ _async = False
+ if interactivity == 'last_expr':
+ if isinstance(nodelist[-1], ast.Expr):
+ interactivity = "last"
+ else:
+ interactivity = "none"
+
+ if interactivity == 'none':
+ to_run_exec, to_run_interactive = nodelist, []
+ elif interactivity == 'last':
+ to_run_exec, to_run_interactive = nodelist[:-1], nodelist[-1:]
+ elif interactivity == 'all':
+ to_run_exec, to_run_interactive = [], nodelist
+ else:
+ raise ValueError("Interactivity was %r" % interactivity)
+
+ try:
+
+ def compare(code):
+ is_async = inspect.CO_COROUTINE & code.co_flags == inspect.CO_COROUTINE
+ return is_async
+
+ # refactor that to just change the mod constructor.
+ to_run = []
+ for node in to_run_exec:
+ to_run.append((node, "exec"))
+
+ for node in to_run_interactive:
+ to_run.append((node, "single"))
+
+ for node, mode in to_run:
+ if mode == "exec":
+ mod = Module([node], [])
+ elif mode == "single":
+ mod = ast.Interactive([node]) # type: ignore
+ with compiler.extra_flags(
+ getattr(ast, "PyCF_ALLOW_TOP_LEVEL_AWAIT", 0x0)
+ if self.autoawait
+ else 0x0
+ ):
+ code = compiler(mod, cell_name, mode)
+ asy = compare(code)
+ if await self.run_code(code, result, async_=asy):
+ return True
+
+ # Flush softspace
+ if softspace(sys.stdout, 0):
+ print()
+
+ except:
+ # It's possible to have exceptions raised here, typically by
+ # compilation of odd code (such as a naked 'return' outside a
+ # function) that did parse but isn't valid. Typically the exception
+ # is a SyntaxError, but it's safest just to catch anything and show
+ # the user a traceback.
+
+ # We do only one try/except outside the loop to minimize the impact
+ # on runtime, and also because if any node in the node list is
+ # broken, we should stop execution completely.
+ if result:
+ result.error_before_exec = sys.exc_info()[1]
+ self.showtraceback()
+ return True
+
+ return False
+
+ async def run_code(self, code_obj, result=None, *, async_=False):
+ """Execute a code object.
+
+ When an exception occurs, self.showtraceback() is called to display a
+ traceback.
+
+ Parameters
+ ----------
+ code_obj : code object
+ A compiled code object, to be executed
+ result : ExecutionResult, optional
+ An object to store exceptions that occur during execution.
+ async_ : Bool (Experimental)
+ Attempt to run top-level asynchronous code in a default loop.
+
+ Returns
+ -------
+ False : successful execution.
+ True : an error occurred.
+ """
+ # special value to say that anything above is IPython and should be
+ # hidden.
+ __tracebackhide__ = "__ipython_bottom__"
+ # Set our own excepthook in case the user code tries to call it
+ # directly, so that the IPython crash handler doesn't get triggered
+ old_excepthook, sys.excepthook = sys.excepthook, self.excepthook
+
+ # we save the original sys.excepthook in the instance, in case config
+ # code (such as magics) needs access to it.
+ self.sys_excepthook = old_excepthook
+ outflag = True # happens in more places, so it's easier as default
+ try:
+ try:
+ if async_:
+ await eval(code_obj, self.user_global_ns, self.user_ns)
+ else:
+ exec(code_obj, self.user_global_ns, self.user_ns)
+ finally:
+ # Reset our crash handler in place
+ sys.excepthook = old_excepthook
+ except SystemExit as e:
+ if result is not None:
+ result.error_in_exec = e
+ self.showtraceback(exception_only=True)
+ warn("To exit: use 'exit', 'quit', or Ctrl-D.", stacklevel=1)
+ except bdb.BdbQuit:
+ etype, value, tb = sys.exc_info()
+ if result is not None:
+ result.error_in_exec = value
+ # the BdbQuit stops here
+ except self.custom_exceptions:
+ etype, value, tb = sys.exc_info()
+ if result is not None:
+ result.error_in_exec = value
+ self.CustomTB(etype, value, tb)
+ except:
+ if result is not None:
+ result.error_in_exec = sys.exc_info()[1]
+ self.showtraceback(running_compiled_code=True)
+ else:
+ outflag = False
+ return outflag
+
+ # For backwards compatibility
+ runcode = run_code
+
+ def check_complete(self, code: str) -> Tuple[str, str]:
+ """Return whether a block of code is ready to execute, or should be continued
+
+ Parameters
+ ----------
+ code : string
+ Python input code, which can be multiline.
+
+ Returns
+ -------
+ status : str
+ One of 'complete', 'incomplete', or 'invalid' if source is not a
+ prefix of valid code.
+ indent : str
+ When status is 'incomplete', this is some whitespace to insert on
+ the next line of the prompt.
+ """
+ status, nspaces = self.input_transformer_manager.check_complete(code)
+ return status, ' ' * (nspaces or 0)
+
+ #-------------------------------------------------------------------------
+ # Things related to GUI support and pylab
+ #-------------------------------------------------------------------------
+
+ active_eventloop = None
+
+ def enable_gui(self, gui=None):
+ raise NotImplementedError('Implement enable_gui in a subclass')
+
+ def enable_matplotlib(self, gui=None):
+ """Enable interactive matplotlib and inline figure support.
+
+ This takes the following steps:
+
+ 1. select the appropriate eventloop and matplotlib backend
+ 2. set up matplotlib for interactive use with that backend
+ 3. configure formatters for inline figure display
+ 4. enable the selected gui eventloop
+
+ Parameters
+ ----------
+ gui : optional, string
+ If given, dictates the choice of matplotlib GUI backend to use
+ (should be one of IPython's supported backends, 'qt', 'osx', 'tk',
+ 'gtk', 'wx' or 'inline'), otherwise we use the default chosen by
+ matplotlib (as dictated by the matplotlib build-time options plus the
+ user's matplotlibrc configuration file). Note that not all backends
+ make sense in all contexts, for example a terminal ipython can't
+ display figures inline.
+ """
+ from matplotlib_inline.backend_inline import configure_inline_support
+
+ from IPython.core import pylabtools as pt
+ gui, backend = pt.find_gui_and_backend(gui, self.pylab_gui_select)
+
+ if gui != 'inline':
+ # If we have our first gui selection, store it
+ if self.pylab_gui_select is None:
+ self.pylab_gui_select = gui
+ # Otherwise if they are different
+ elif gui != self.pylab_gui_select:
+ print('Warning: Cannot change to a different GUI toolkit: %s.'
+ ' Using %s instead.' % (gui, self.pylab_gui_select))
+ gui, backend = pt.find_gui_and_backend(self.pylab_gui_select)
+
+ pt.activate_matplotlib(backend)
+ configure_inline_support(self, backend)
+
+ # Now we must activate the gui pylab wants to use, and fix %run to take
+ # plot updates into account
+ self.enable_gui(gui)
+ self.magics_manager.registry['ExecutionMagics'].default_runner = \
+ pt.mpl_runner(self.safe_execfile)
+
+ return gui, backend
+
+ def enable_pylab(self, gui=None, import_all=True, welcome_message=False):
+ """Activate pylab support at runtime.
+
+ This turns on support for matplotlib, preloads into the interactive
+ namespace all of numpy and pylab, and configures IPython to correctly
+ interact with the GUI event loop. The GUI backend to be used can be
+ optionally selected with the optional ``gui`` argument.
+
+ This method only adds preloading the namespace to InteractiveShell.enable_matplotlib.
+
+ Parameters
+ ----------
+ gui : optional, string
+ If given, dictates the choice of matplotlib GUI backend to use
+ (should be one of IPython's supported backends, 'qt', 'osx', 'tk',
+ 'gtk', 'wx' or 'inline'), otherwise we use the default chosen by
+ matplotlib (as dictated by the matplotlib build-time options plus the
+ user's matplotlibrc configuration file). Note that not all backends
+ make sense in all contexts, for example a terminal ipython can't
+ display figures inline.
+ import_all : optional, bool, default: True
+ Whether to do `from numpy import *` and `from pylab import *`
+ in addition to module imports.
+ welcome_message : deprecated
+ This argument is ignored, no welcome message will be displayed.
+ """
+ from IPython.core.pylabtools import import_pylab
+
+ gui, backend = self.enable_matplotlib(gui)
+
+ # We want to prevent the loading of pylab to pollute the user's
+ # namespace as shown by the %who* magics, so we execute the activation
+ # code in an empty namespace, and we update *both* user_ns and
+ # user_ns_hidden with this information.
+ ns = {}
+ import_pylab(ns, import_all)
+ # warn about clobbered names
+ ignored = {"__builtins__"}
+ both = set(ns).intersection(self.user_ns).difference(ignored)
+ clobbered = [ name for name in both if self.user_ns[name] is not ns[name] ]
+ self.user_ns.update(ns)
+ self.user_ns_hidden.update(ns)
+ return gui, backend, clobbered
+
+ #-------------------------------------------------------------------------
+ # Utilities
+ #-------------------------------------------------------------------------
+
+ def var_expand(self, cmd, depth=0, formatter=DollarFormatter()):
+ """Expand python variables in a string.
+
+ The depth argument indicates how many frames above the caller should
+ be walked to look for the local namespace where to expand variables.
+
+ The global namespace for expansion is always the user's interactive
+ namespace.
+ """
+ ns = self.user_ns.copy()
+ try:
+ frame = sys._getframe(depth+1)
+ except ValueError:
+ # This is thrown if there aren't that many frames on the stack,
+ # e.g. if a script called run_line_magic() directly.
+ pass
+ else:
+ ns.update(frame.f_locals)
+
+ try:
+ # We have to use .vformat() here, because 'self' is a valid and common
+ # name, and expanding **ns for .format() would make it collide with
+ # the 'self' argument of the method.
+ cmd = formatter.vformat(cmd, args=[], kwargs=ns)
+ except Exception:
+ # if formatter couldn't format, just let it go untransformed
+ pass
+ return cmd
+
+ def mktempfile(self, data=None, prefix='ipython_edit_'):
+ """Make a new tempfile and return its filename.
+
+ This makes a call to tempfile.mkstemp (created in a tempfile.mkdtemp),
+ but it registers the created filename internally so ipython cleans it up
+ at exit time.
+
+ Optional inputs:
+
+ - data(None): if data is given, it gets written out to the temp file
+ immediately, and the file is closed again."""
+
+ dir_path = Path(tempfile.mkdtemp(prefix=prefix))
+ self.tempdirs.append(dir_path)
+
+ handle, filename = tempfile.mkstemp(".py", prefix, dir=str(dir_path))
+ os.close(handle) # On Windows, there can only be one open handle on a file
+
+ file_path = Path(filename)
+ self.tempfiles.append(file_path)
+
+ if data:
+ file_path.write_text(data, encoding="utf-8")
+ return filename
+
+ def ask_yes_no(self, prompt, default=None, interrupt=None):
+ if self.quiet:
+ return True
+ return ask_yes_no(prompt,default,interrupt)
+
+ def show_usage(self):
+ """Show a usage message"""
+ page.page(IPython.core.usage.interactive_usage)
+
+ def extract_input_lines(self, range_str, raw=False):
+ """Return as a string a set of input history slices.
+
+ Parameters
+ ----------
+ range_str : str
+ The set of slices is given as a string, like "~5/6-~4/2 4:8 9",
+ since this function is for use by magic functions which get their
+ arguments as strings. The number before the / is the session
+ number: ~n goes n back from the current session.
+
+ If empty string is given, returns history of current session
+ without the last input.
+
+ raw : bool, optional
+ By default, the processed input is used. If this is true, the raw
+ input history is used instead.
+
+ Notes
+ -----
+ Slices can be described with two notations:
+
+ * ``N:M`` -> standard python form, means including items N...(M-1).
+ * ``N-M`` -> include items N..M (closed endpoint).
+ """
+ lines = self.history_manager.get_range_by_str(range_str, raw=raw)
+ text = "\n".join(x for _, _, x in lines)
+
+ # Skip the last line, as it's probably the magic that called this
+ if not range_str:
+ if "\n" not in text:
+ text = ""
+ else:
+ text = text[: text.rfind("\n")]
+
+ return text
+
+ def find_user_code(self, target, raw=True, py_only=False, skip_encoding_cookie=True, search_ns=False):
+ """Get a code string from history, file, url, or a string or macro.
+
+ This is mainly used by magic functions.
+
+ Parameters
+ ----------
+ target : str
+ A string specifying code to retrieve. This will be tried respectively
+ as: ranges of input history (see %history for syntax), url,
+ corresponding .py file, filename, or an expression evaluating to a
+ string or Macro in the user namespace.
+
+ If empty string is given, returns complete history of current
+ session, without the last line.
+
+ raw : bool
+ If true (default), retrieve raw history. Has no effect on the other
+ retrieval mechanisms.
+
+ py_only : bool (default False)
+ Only try to fetch python code, do not try alternative methods to decode file
+ if unicode fails.
+
+ Returns
+ -------
+ A string of code.
+ ValueError is raised if nothing is found, and TypeError if it evaluates
+ to an object of another type. In each case, .args[0] is a printable
+ message.
+ """
+ code = self.extract_input_lines(target, raw=raw) # Grab history
+ if code:
+ return code
+ try:
+ if target.startswith(('http://', 'https://')):
+ return openpy.read_py_url(target, skip_encoding_cookie=skip_encoding_cookie)
+ except UnicodeDecodeError as e:
+ if not py_only :
+ # Deferred import
+ from urllib.request import urlopen
+ response = urlopen(target)
+ return response.read().decode('latin1')
+ raise ValueError(("'%s' seem to be unreadable.") % target) from e
+
+ potential_target = [target]
+ try :
+ potential_target.insert(0,get_py_filename(target))
+ except IOError:
+ pass
+
+ for tgt in potential_target :
+ if os.path.isfile(tgt): # Read file
+ try :
+ return openpy.read_py_file(tgt, skip_encoding_cookie=skip_encoding_cookie)
+ except UnicodeDecodeError as e:
+ if not py_only :
+ with io_open(tgt,'r', encoding='latin1') as f :
+ return f.read()
+ raise ValueError(("'%s' seem to be unreadable.") % target) from e
+ elif os.path.isdir(os.path.expanduser(tgt)):
+ raise ValueError("'%s' is a directory, not a regular file." % target)
+
+ if search_ns:
+ # Inspect namespace to load object source
+ object_info = self.object_inspect(target, detail_level=1)
+ if object_info['found'] and object_info['source']:
+ return object_info['source']
+
+ try: # User namespace
+ codeobj = eval(target, self.user_ns)
+ except Exception as e:
+ raise ValueError(("'%s' was not found in history, as a file, url, "
+ "nor in the user namespace.") % target) from e
+
+ if isinstance(codeobj, str):
+ return codeobj
+ elif isinstance(codeobj, Macro):
+ return codeobj.value
+
+ raise TypeError("%s is neither a string nor a macro." % target,
+ codeobj)
+
+ def _atexit_once(self):
+ """
+ At exist operation that need to be called at most once.
+ Second call to this function per instance will do nothing.
+ """
+
+ if not getattr(self, "_atexit_once_called", False):
+ self._atexit_once_called = True
+ # Clear all user namespaces to release all references cleanly.
+ self.reset(new_session=False)
+ # Close the history session (this stores the end time and line count)
+ # this must be *before* the tempfile cleanup, in case of temporary
+ # history db
+ self.history_manager.end_session()
+ self.history_manager = None
+
+ #-------------------------------------------------------------------------
+ # Things related to IPython exiting
+ #-------------------------------------------------------------------------
+ def atexit_operations(self):
+ """This will be executed at the time of exit.
+
+ Cleanup operations and saving of persistent data that is done
+ unconditionally by IPython should be performed here.
+
+ For things that may depend on startup flags or platform specifics (such
+ as having readline or not), register a separate atexit function in the
+ code that has the appropriate information, rather than trying to
+ clutter
+ """
+ self._atexit_once()
+
+ # Cleanup all tempfiles and folders left around
+ for tfile in self.tempfiles:
+ try:
+ tfile.unlink()
+ self.tempfiles.remove(tfile)
+ except FileNotFoundError:
+ pass
+ del self.tempfiles
+ for tdir in self.tempdirs:
+ try:
+ tdir.rmdir()
+ self.tempdirs.remove(tdir)
+ except FileNotFoundError:
+ pass
+ del self.tempdirs
+
+ # Restore user's cursor
+ if hasattr(self, "editing_mode") and self.editing_mode == "vi":
+ sys.stdout.write("\x1b[0 q")
+ sys.stdout.flush()
+
+ def cleanup(self):
+ self.restore_sys_module_state()
+
+
+ # Overridden in terminal subclass to change prompts
+ def switch_doctest_mode(self, mode):
+ pass
+
+
+class InteractiveShellABC(metaclass=abc.ABCMeta):
+ """An abstract base class for InteractiveShell."""
+
+InteractiveShellABC.register(InteractiveShell)
diff --git a/contrib/python/ipython/py3/IPython/core/latex_symbols.py b/contrib/python/ipython/py3/IPython/core/latex_symbols.py
new file mode 100644
index 0000000000..164d917beb
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/latex_symbols.py
@@ -0,0 +1,1301 @@
+# encoding: utf-8
+
+# DO NOT EDIT THIS FILE BY HAND.
+
+# To update this file, run the script /tools/gen_latex_symbols.py using Python 3
+
+# This file is autogenerated from the file:
+# https://raw.githubusercontent.com/JuliaLang/julia/master/base/latex_symbols.jl
+# This original list is filtered to remove any unicode characters that are not valid
+# Python identifiers.
+
+latex_symbols = {
+
+ "\\euler" : "ℯ",
+ "\\^a" : "ᵃ",
+ "\\^b" : "ᵇ",
+ "\\^c" : "ᶜ",
+ "\\^d" : "ᵈ",
+ "\\^e" : "ᵉ",
+ "\\^f" : "ᶠ",
+ "\\^g" : "ᵍ",
+ "\\^h" : "ʰ",
+ "\\^i" : "ⁱ",
+ "\\^j" : "ʲ",
+ "\\^k" : "ᵏ",
+ "\\^l" : "ˡ",
+ "\\^m" : "ᵐ",
+ "\\^n" : "ⁿ",
+ "\\^o" : "ᵒ",
+ "\\^p" : "ᵖ",
+ "\\^r" : "ʳ",
+ "\\^s" : "ˢ",
+ "\\^t" : "ᵗ",
+ "\\^u" : "ᵘ",
+ "\\^v" : "ᵛ",
+ "\\^w" : "ʷ",
+ "\\^x" : "ˣ",
+ "\\^y" : "ʸ",
+ "\\^z" : "ᶻ",
+ "\\^A" : "ᴬ",
+ "\\^B" : "ᴮ",
+ "\\^D" : "ᴰ",
+ "\\^E" : "ᴱ",
+ "\\^G" : "ᴳ",
+ "\\^H" : "ᴴ",
+ "\\^I" : "ᴵ",
+ "\\^J" : "ᴶ",
+ "\\^K" : "ᴷ",
+ "\\^L" : "ᴸ",
+ "\\^M" : "ᴹ",
+ "\\^N" : "ᴺ",
+ "\\^O" : "ᴼ",
+ "\\^P" : "ᴾ",
+ "\\^R" : "ᴿ",
+ "\\^T" : "ᵀ",
+ "\\^U" : "ᵁ",
+ "\\^V" : "ⱽ",
+ "\\^W" : "ᵂ",
+ "\\^alpha" : "ᵅ",
+ "\\^beta" : "ᵝ",
+ "\\^gamma" : "ᵞ",
+ "\\^delta" : "ᵟ",
+ "\\^epsilon" : "ᵋ",
+ "\\^theta" : "ᶿ",
+ "\\^iota" : "ᶥ",
+ "\\^phi" : "ᵠ",
+ "\\^chi" : "ᵡ",
+ "\\^Phi" : "ᶲ",
+ "\\_a" : "ₐ",
+ "\\_e" : "ₑ",
+ "\\_h" : "ₕ",
+ "\\_i" : "ᵢ",
+ "\\_j" : "ⱼ",
+ "\\_k" : "ₖ",
+ "\\_l" : "ₗ",
+ "\\_m" : "ₘ",
+ "\\_n" : "ₙ",
+ "\\_o" : "ₒ",
+ "\\_p" : "ₚ",
+ "\\_r" : "ᵣ",
+ "\\_s" : "ₛ",
+ "\\_t" : "ₜ",
+ "\\_u" : "ᵤ",
+ "\\_v" : "ᵥ",
+ "\\_x" : "ₓ",
+ "\\_schwa" : "ₔ",
+ "\\_beta" : "ᵦ",
+ "\\_gamma" : "ᵧ",
+ "\\_rho" : "ᵨ",
+ "\\_phi" : "ᵩ",
+ "\\_chi" : "ᵪ",
+ "\\hbar" : "ħ",
+ "\\sout" : "̶",
+ "\\ordfeminine" : "ª",
+ "\\cdotp" : "·",
+ "\\ordmasculine" : "º",
+ "\\AA" : "Å",
+ "\\AE" : "Æ",
+ "\\DH" : "Ð",
+ "\\O" : "Ø",
+ "\\TH" : "Þ",
+ "\\ss" : "ß",
+ "\\aa" : "å",
+ "\\ae" : "æ",
+ "\\eth" : "ð",
+ "\\dh" : "ð",
+ "\\o" : "ø",
+ "\\th" : "þ",
+ "\\DJ" : "Đ",
+ "\\dj" : "đ",
+ "\\imath" : "ı",
+ "\\jmath" : "ȷ",
+ "\\L" : "Ł",
+ "\\l" : "ł",
+ "\\NG" : "Ŋ",
+ "\\ng" : "ŋ",
+ "\\OE" : "Œ",
+ "\\oe" : "œ",
+ "\\hvlig" : "ƕ",
+ "\\nrleg" : "ƞ",
+ "\\doublepipe" : "ǂ",
+ "\\trna" : "ɐ",
+ "\\trnsa" : "ɒ",
+ "\\openo" : "ɔ",
+ "\\rtld" : "ɖ",
+ "\\schwa" : "ə",
+ "\\varepsilon" : "ε",
+ "\\pgamma" : "ɣ",
+ "\\pbgam" : "ɤ",
+ "\\trnh" : "ɥ",
+ "\\btdl" : "ɬ",
+ "\\rtll" : "ɭ",
+ "\\trnm" : "ɯ",
+ "\\trnmlr" : "ɰ",
+ "\\ltlmr" : "ɱ",
+ "\\ltln" : "ɲ",
+ "\\rtln" : "ɳ",
+ "\\clomeg" : "ɷ",
+ "\\ltphi" : "ɸ",
+ "\\trnr" : "ɹ",
+ "\\trnrl" : "ɺ",
+ "\\rttrnr" : "ɻ",
+ "\\rl" : "ɼ",
+ "\\rtlr" : "ɽ",
+ "\\fhr" : "ɾ",
+ "\\rtls" : "ʂ",
+ "\\esh" : "ʃ",
+ "\\trnt" : "ʇ",
+ "\\rtlt" : "ʈ",
+ "\\pupsil" : "ʊ",
+ "\\pscrv" : "ʋ",
+ "\\invv" : "ʌ",
+ "\\invw" : "ʍ",
+ "\\trny" : "ʎ",
+ "\\rtlz" : "ʐ",
+ "\\yogh" : "ʒ",
+ "\\glst" : "ʔ",
+ "\\reglst" : "ʕ",
+ "\\inglst" : "ʖ",
+ "\\turnk" : "ʞ",
+ "\\dyogh" : "ʤ",
+ "\\tesh" : "ʧ",
+ "\\rasp" : "ʼ",
+ "\\verts" : "ˈ",
+ "\\verti" : "ˌ",
+ "\\lmrk" : "ː",
+ "\\hlmrk" : "ˑ",
+ "\\grave" : "̀",
+ "\\acute" : "́",
+ "\\hat" : "̂",
+ "\\tilde" : "̃",
+ "\\bar" : "̄",
+ "\\breve" : "̆",
+ "\\dot" : "̇",
+ "\\ddot" : "̈",
+ "\\ocirc" : "̊",
+ "\\H" : "̋",
+ "\\check" : "̌",
+ "\\palh" : "̡",
+ "\\rh" : "̢",
+ "\\c" : "̧",
+ "\\k" : "̨",
+ "\\sbbrg" : "̪",
+ "\\strike" : "̶",
+ "\\Alpha" : "Α",
+ "\\Beta" : "Β",
+ "\\Gamma" : "Γ",
+ "\\Delta" : "Δ",
+ "\\Epsilon" : "Ε",
+ "\\Zeta" : "Ζ",
+ "\\Eta" : "Η",
+ "\\Theta" : "Θ",
+ "\\Iota" : "Ι",
+ "\\Kappa" : "Κ",
+ "\\Lambda" : "Λ",
+ "\\Xi" : "Ξ",
+ "\\Pi" : "Π",
+ "\\Rho" : "Ρ",
+ "\\Sigma" : "Σ",
+ "\\Tau" : "Τ",
+ "\\Upsilon" : "Υ",
+ "\\Phi" : "Φ",
+ "\\Chi" : "Χ",
+ "\\Psi" : "Ψ",
+ "\\Omega" : "Ω",
+ "\\alpha" : "α",
+ "\\beta" : "β",
+ "\\gamma" : "γ",
+ "\\delta" : "δ",
+ "\\zeta" : "ζ",
+ "\\eta" : "η",
+ "\\theta" : "θ",
+ "\\iota" : "ι",
+ "\\kappa" : "κ",
+ "\\lambda" : "λ",
+ "\\mu" : "μ",
+ "\\nu" : "ν",
+ "\\xi" : "ξ",
+ "\\pi" : "π",
+ "\\rho" : "ρ",
+ "\\varsigma" : "ς",
+ "\\sigma" : "σ",
+ "\\tau" : "τ",
+ "\\upsilon" : "υ",
+ "\\varphi" : "φ",
+ "\\chi" : "χ",
+ "\\psi" : "ψ",
+ "\\omega" : "ω",
+ "\\vartheta" : "ϑ",
+ "\\phi" : "ϕ",
+ "\\varpi" : "ϖ",
+ "\\Stigma" : "Ϛ",
+ "\\Digamma" : "Ϝ",
+ "\\digamma" : "ϝ",
+ "\\Koppa" : "Ϟ",
+ "\\Sampi" : "Ϡ",
+ "\\varkappa" : "ϰ",
+ "\\varrho" : "ϱ",
+ "\\varTheta" : "ϴ",
+ "\\epsilon" : "ϵ",
+ "\\dddot" : "⃛",
+ "\\ddddot" : "⃜",
+ "\\hslash" : "ℏ",
+ "\\Im" : "ℑ",
+ "\\ell" : "ℓ",
+ "\\wp" : "℘",
+ "\\Re" : "ℜ",
+ "\\aleph" : "ℵ",
+ "\\beth" : "ℶ",
+ "\\gimel" : "ℷ",
+ "\\daleth" : "ℸ",
+ "\\bbPi" : "ℿ",
+ "\\Zbar" : "Ƶ",
+ "\\overbar" : "̅",
+ "\\ovhook" : "̉",
+ "\\candra" : "̐",
+ "\\oturnedcomma" : "̒",
+ "\\ocommatopright" : "̕",
+ "\\droang" : "̚",
+ "\\wideutilde" : "̰",
+ "\\not" : "̸",
+ "\\upMu" : "Μ",
+ "\\upNu" : "Ν",
+ "\\upOmicron" : "Ο",
+ "\\upepsilon" : "ε",
+ "\\upomicron" : "ο",
+ "\\upvarbeta" : "ϐ",
+ "\\upoldKoppa" : "Ϙ",
+ "\\upoldkoppa" : "ϙ",
+ "\\upstigma" : "ϛ",
+ "\\upkoppa" : "ϟ",
+ "\\upsampi" : "ϡ",
+ "\\tieconcat" : "⁀",
+ "\\leftharpoonaccent" : "⃐",
+ "\\rightharpoonaccent" : "⃑",
+ "\\vertoverlay" : "⃒",
+ "\\overleftarrow" : "⃖",
+ "\\vec" : "⃗",
+ "\\overleftrightarrow" : "⃡",
+ "\\annuity" : "⃧",
+ "\\threeunderdot" : "⃨",
+ "\\widebridgeabove" : "⃩",
+ "\\bbC" : "ℂ",
+ "\\eulermascheroni" : "ℇ",
+ "\\scrg" : "ℊ",
+ "\\scrH" : "ℋ",
+ "\\frakH" : "ℌ",
+ "\\bbH" : "ℍ",
+ "\\planck" : "ℎ",
+ "\\scrI" : "ℐ",
+ "\\scrL" : "ℒ",
+ "\\bbN" : "ℕ",
+ "\\bbP" : "ℙ",
+ "\\bbQ" : "ℚ",
+ "\\scrR" : "ℛ",
+ "\\bbR" : "ℝ",
+ "\\bbZ" : "ℤ",
+ "\\frakZ" : "ℨ",
+ "\\Angstrom" : "Å",
+ "\\scrB" : "ℬ",
+ "\\frakC" : "ℭ",
+ "\\scre" : "ℯ",
+ "\\scrE" : "ℰ",
+ "\\scrF" : "ℱ",
+ "\\Finv" : "Ⅎ",
+ "\\scrM" : "ℳ",
+ "\\scro" : "ℴ",
+ "\\bbgamma" : "ℽ",
+ "\\bbGamma" : "ℾ",
+ "\\bbiD" : "ⅅ",
+ "\\bbid" : "ⅆ",
+ "\\bbie" : "ⅇ",
+ "\\bbii" : "ⅈ",
+ "\\bbij" : "ⅉ",
+ "\\bfA" : "𝐀",
+ "\\bfB" : "𝐁",
+ "\\bfC" : "𝐂",
+ "\\bfD" : "𝐃",
+ "\\bfE" : "𝐄",
+ "\\bfF" : "𝐅",
+ "\\bfG" : "𝐆",
+ "\\bfH" : "𝐇",
+ "\\bfI" : "𝐈",
+ "\\bfJ" : "𝐉",
+ "\\bfK" : "𝐊",
+ "\\bfL" : "𝐋",
+ "\\bfM" : "𝐌",
+ "\\bfN" : "𝐍",
+ "\\bfO" : "𝐎",
+ "\\bfP" : "𝐏",
+ "\\bfQ" : "𝐐",
+ "\\bfR" : "𝐑",
+ "\\bfS" : "𝐒",
+ "\\bfT" : "𝐓",
+ "\\bfU" : "𝐔",
+ "\\bfV" : "𝐕",
+ "\\bfW" : "𝐖",
+ "\\bfX" : "𝐗",
+ "\\bfY" : "𝐘",
+ "\\bfZ" : "𝐙",
+ "\\bfa" : "𝐚",
+ "\\bfb" : "𝐛",
+ "\\bfc" : "𝐜",
+ "\\bfd" : "𝐝",
+ "\\bfe" : "𝐞",
+ "\\bff" : "𝐟",
+ "\\bfg" : "𝐠",
+ "\\bfh" : "𝐡",
+ "\\bfi" : "𝐢",
+ "\\bfj" : "𝐣",
+ "\\bfk" : "𝐤",
+ "\\bfl" : "𝐥",
+ "\\bfm" : "𝐦",
+ "\\bfn" : "𝐧",
+ "\\bfo" : "𝐨",
+ "\\bfp" : "𝐩",
+ "\\bfq" : "𝐪",
+ "\\bfr" : "𝐫",
+ "\\bfs" : "𝐬",
+ "\\bft" : "𝐭",
+ "\\bfu" : "𝐮",
+ "\\bfv" : "𝐯",
+ "\\bfw" : "𝐰",
+ "\\bfx" : "𝐱",
+ "\\bfy" : "𝐲",
+ "\\bfz" : "𝐳",
+ "\\itA" : "𝐴",
+ "\\itB" : "𝐵",
+ "\\itC" : "𝐶",
+ "\\itD" : "𝐷",
+ "\\itE" : "𝐸",
+ "\\itF" : "𝐹",
+ "\\itG" : "𝐺",
+ "\\itH" : "𝐻",
+ "\\itI" : "𝐼",
+ "\\itJ" : "𝐽",
+ "\\itK" : "𝐾",
+ "\\itL" : "𝐿",
+ "\\itM" : "𝑀",
+ "\\itN" : "𝑁",
+ "\\itO" : "𝑂",
+ "\\itP" : "𝑃",
+ "\\itQ" : "𝑄",
+ "\\itR" : "𝑅",
+ "\\itS" : "𝑆",
+ "\\itT" : "𝑇",
+ "\\itU" : "𝑈",
+ "\\itV" : "𝑉",
+ "\\itW" : "𝑊",
+ "\\itX" : "𝑋",
+ "\\itY" : "𝑌",
+ "\\itZ" : "𝑍",
+ "\\ita" : "𝑎",
+ "\\itb" : "𝑏",
+ "\\itc" : "𝑐",
+ "\\itd" : "𝑑",
+ "\\ite" : "𝑒",
+ "\\itf" : "𝑓",
+ "\\itg" : "𝑔",
+ "\\iti" : "𝑖",
+ "\\itj" : "𝑗",
+ "\\itk" : "𝑘",
+ "\\itl" : "𝑙",
+ "\\itm" : "𝑚",
+ "\\itn" : "𝑛",
+ "\\ito" : "𝑜",
+ "\\itp" : "𝑝",
+ "\\itq" : "𝑞",
+ "\\itr" : "𝑟",
+ "\\its" : "𝑠",
+ "\\itt" : "𝑡",
+ "\\itu" : "𝑢",
+ "\\itv" : "𝑣",
+ "\\itw" : "𝑤",
+ "\\itx" : "𝑥",
+ "\\ity" : "𝑦",
+ "\\itz" : "𝑧",
+ "\\biA" : "𝑨",
+ "\\biB" : "𝑩",
+ "\\biC" : "𝑪",
+ "\\biD" : "𝑫",
+ "\\biE" : "𝑬",
+ "\\biF" : "𝑭",
+ "\\biG" : "𝑮",
+ "\\biH" : "𝑯",
+ "\\biI" : "𝑰",
+ "\\biJ" : "𝑱",
+ "\\biK" : "𝑲",
+ "\\biL" : "𝑳",
+ "\\biM" : "𝑴",
+ "\\biN" : "𝑵",
+ "\\biO" : "𝑶",
+ "\\biP" : "𝑷",
+ "\\biQ" : "𝑸",
+ "\\biR" : "𝑹",
+ "\\biS" : "𝑺",
+ "\\biT" : "𝑻",
+ "\\biU" : "𝑼",
+ "\\biV" : "𝑽",
+ "\\biW" : "𝑾",
+ "\\biX" : "𝑿",
+ "\\biY" : "𝒀",
+ "\\biZ" : "𝒁",
+ "\\bia" : "𝒂",
+ "\\bib" : "𝒃",
+ "\\bic" : "𝒄",
+ "\\bid" : "𝒅",
+ "\\bie" : "𝒆",
+ "\\bif" : "𝒇",
+ "\\big" : "𝒈",
+ "\\bih" : "𝒉",
+ "\\bii" : "𝒊",
+ "\\bij" : "𝒋",
+ "\\bik" : "𝒌",
+ "\\bil" : "𝒍",
+ "\\bim" : "𝒎",
+ "\\bin" : "𝒏",
+ "\\bio" : "𝒐",
+ "\\bip" : "𝒑",
+ "\\biq" : "𝒒",
+ "\\bir" : "𝒓",
+ "\\bis" : "𝒔",
+ "\\bit" : "𝒕",
+ "\\biu" : "𝒖",
+ "\\biv" : "𝒗",
+ "\\biw" : "𝒘",
+ "\\bix" : "𝒙",
+ "\\biy" : "𝒚",
+ "\\biz" : "𝒛",
+ "\\scrA" : "𝒜",
+ "\\scrC" : "𝒞",
+ "\\scrD" : "𝒟",
+ "\\scrG" : "𝒢",
+ "\\scrJ" : "𝒥",
+ "\\scrK" : "𝒦",
+ "\\scrN" : "𝒩",
+ "\\scrO" : "𝒪",
+ "\\scrP" : "𝒫",
+ "\\scrQ" : "𝒬",
+ "\\scrS" : "𝒮",
+ "\\scrT" : "𝒯",
+ "\\scrU" : "𝒰",
+ "\\scrV" : "𝒱",
+ "\\scrW" : "𝒲",
+ "\\scrX" : "𝒳",
+ "\\scrY" : "𝒴",
+ "\\scrZ" : "𝒵",
+ "\\scra" : "𝒶",
+ "\\scrb" : "𝒷",
+ "\\scrc" : "𝒸",
+ "\\scrd" : "𝒹",
+ "\\scrf" : "𝒻",
+ "\\scrh" : "𝒽",
+ "\\scri" : "𝒾",
+ "\\scrj" : "𝒿",
+ "\\scrk" : "𝓀",
+ "\\scrm" : "𝓂",
+ "\\scrn" : "𝓃",
+ "\\scrp" : "𝓅",
+ "\\scrq" : "𝓆",
+ "\\scrr" : "𝓇",
+ "\\scrs" : "𝓈",
+ "\\scrt" : "𝓉",
+ "\\scru" : "𝓊",
+ "\\scrv" : "𝓋",
+ "\\scrw" : "𝓌",
+ "\\scrx" : "𝓍",
+ "\\scry" : "𝓎",
+ "\\scrz" : "𝓏",
+ "\\bscrA" : "𝓐",
+ "\\bscrB" : "𝓑",
+ "\\bscrC" : "𝓒",
+ "\\bscrD" : "𝓓",
+ "\\bscrE" : "𝓔",
+ "\\bscrF" : "𝓕",
+ "\\bscrG" : "𝓖",
+ "\\bscrH" : "𝓗",
+ "\\bscrI" : "𝓘",
+ "\\bscrJ" : "𝓙",
+ "\\bscrK" : "𝓚",
+ "\\bscrL" : "𝓛",
+ "\\bscrM" : "𝓜",
+ "\\bscrN" : "𝓝",
+ "\\bscrO" : "𝓞",
+ "\\bscrP" : "𝓟",
+ "\\bscrQ" : "𝓠",
+ "\\bscrR" : "𝓡",
+ "\\bscrS" : "𝓢",
+ "\\bscrT" : "𝓣",
+ "\\bscrU" : "𝓤",
+ "\\bscrV" : "𝓥",
+ "\\bscrW" : "𝓦",
+ "\\bscrX" : "𝓧",
+ "\\bscrY" : "𝓨",
+ "\\bscrZ" : "𝓩",
+ "\\bscra" : "𝓪",
+ "\\bscrb" : "𝓫",
+ "\\bscrc" : "𝓬",
+ "\\bscrd" : "𝓭",
+ "\\bscre" : "𝓮",
+ "\\bscrf" : "𝓯",
+ "\\bscrg" : "𝓰",
+ "\\bscrh" : "𝓱",
+ "\\bscri" : "𝓲",
+ "\\bscrj" : "𝓳",
+ "\\bscrk" : "𝓴",
+ "\\bscrl" : "𝓵",
+ "\\bscrm" : "𝓶",
+ "\\bscrn" : "𝓷",
+ "\\bscro" : "𝓸",
+ "\\bscrp" : "𝓹",
+ "\\bscrq" : "𝓺",
+ "\\bscrr" : "𝓻",
+ "\\bscrs" : "𝓼",
+ "\\bscrt" : "𝓽",
+ "\\bscru" : "𝓾",
+ "\\bscrv" : "𝓿",
+ "\\bscrw" : "𝔀",
+ "\\bscrx" : "𝔁",
+ "\\bscry" : "𝔂",
+ "\\bscrz" : "𝔃",
+ "\\frakA" : "𝔄",
+ "\\frakB" : "𝔅",
+ "\\frakD" : "𝔇",
+ "\\frakE" : "𝔈",
+ "\\frakF" : "𝔉",
+ "\\frakG" : "𝔊",
+ "\\frakJ" : "𝔍",
+ "\\frakK" : "𝔎",
+ "\\frakL" : "𝔏",
+ "\\frakM" : "𝔐",
+ "\\frakN" : "𝔑",
+ "\\frakO" : "𝔒",
+ "\\frakP" : "𝔓",
+ "\\frakQ" : "𝔔",
+ "\\frakS" : "𝔖",
+ "\\frakT" : "𝔗",
+ "\\frakU" : "𝔘",
+ "\\frakV" : "𝔙",
+ "\\frakW" : "𝔚",
+ "\\frakX" : "𝔛",
+ "\\frakY" : "𝔜",
+ "\\fraka" : "𝔞",
+ "\\frakb" : "𝔟",
+ "\\frakc" : "𝔠",
+ "\\frakd" : "𝔡",
+ "\\frake" : "𝔢",
+ "\\frakf" : "𝔣",
+ "\\frakg" : "𝔤",
+ "\\frakh" : "𝔥",
+ "\\fraki" : "𝔦",
+ "\\frakj" : "𝔧",
+ "\\frakk" : "𝔨",
+ "\\frakl" : "𝔩",
+ "\\frakm" : "𝔪",
+ "\\frakn" : "𝔫",
+ "\\frako" : "𝔬",
+ "\\frakp" : "𝔭",
+ "\\frakq" : "𝔮",
+ "\\frakr" : "𝔯",
+ "\\fraks" : "𝔰",
+ "\\frakt" : "𝔱",
+ "\\fraku" : "𝔲",
+ "\\frakv" : "𝔳",
+ "\\frakw" : "𝔴",
+ "\\frakx" : "𝔵",
+ "\\fraky" : "𝔶",
+ "\\frakz" : "𝔷",
+ "\\bbA" : "𝔸",
+ "\\bbB" : "𝔹",
+ "\\bbD" : "𝔻",
+ "\\bbE" : "𝔼",
+ "\\bbF" : "𝔽",
+ "\\bbG" : "𝔾",
+ "\\bbI" : "𝕀",
+ "\\bbJ" : "𝕁",
+ "\\bbK" : "𝕂",
+ "\\bbL" : "𝕃",
+ "\\bbM" : "𝕄",
+ "\\bbO" : "𝕆",
+ "\\bbS" : "𝕊",
+ "\\bbT" : "𝕋",
+ "\\bbU" : "𝕌",
+ "\\bbV" : "𝕍",
+ "\\bbW" : "𝕎",
+ "\\bbX" : "𝕏",
+ "\\bbY" : "𝕐",
+ "\\bba" : "𝕒",
+ "\\bbb" : "𝕓",
+ "\\bbc" : "𝕔",
+ "\\bbd" : "𝕕",
+ "\\bbe" : "𝕖",
+ "\\bbf" : "𝕗",
+ "\\bbg" : "𝕘",
+ "\\bbh" : "𝕙",
+ "\\bbi" : "𝕚",
+ "\\bbj" : "𝕛",
+ "\\bbk" : "𝕜",
+ "\\bbl" : "𝕝",
+ "\\bbm" : "𝕞",
+ "\\bbn" : "𝕟",
+ "\\bbo" : "𝕠",
+ "\\bbp" : "𝕡",
+ "\\bbq" : "𝕢",
+ "\\bbr" : "𝕣",
+ "\\bbs" : "𝕤",
+ "\\bbt" : "𝕥",
+ "\\bbu" : "𝕦",
+ "\\bbv" : "𝕧",
+ "\\bbw" : "𝕨",
+ "\\bbx" : "𝕩",
+ "\\bby" : "𝕪",
+ "\\bbz" : "𝕫",
+ "\\bfrakA" : "𝕬",
+ "\\bfrakB" : "𝕭",
+ "\\bfrakC" : "𝕮",
+ "\\bfrakD" : "𝕯",
+ "\\bfrakE" : "𝕰",
+ "\\bfrakF" : "𝕱",
+ "\\bfrakG" : "𝕲",
+ "\\bfrakH" : "𝕳",
+ "\\bfrakI" : "𝕴",
+ "\\bfrakJ" : "𝕵",
+ "\\bfrakK" : "𝕶",
+ "\\bfrakL" : "𝕷",
+ "\\bfrakM" : "𝕸",
+ "\\bfrakN" : "𝕹",
+ "\\bfrakO" : "𝕺",
+ "\\bfrakP" : "𝕻",
+ "\\bfrakQ" : "𝕼",
+ "\\bfrakR" : "𝕽",
+ "\\bfrakS" : "𝕾",
+ "\\bfrakT" : "𝕿",
+ "\\bfrakU" : "𝖀",
+ "\\bfrakV" : "𝖁",
+ "\\bfrakW" : "𝖂",
+ "\\bfrakX" : "𝖃",
+ "\\bfrakY" : "𝖄",
+ "\\bfrakZ" : "𝖅",
+ "\\bfraka" : "𝖆",
+ "\\bfrakb" : "𝖇",
+ "\\bfrakc" : "𝖈",
+ "\\bfrakd" : "𝖉",
+ "\\bfrake" : "𝖊",
+ "\\bfrakf" : "𝖋",
+ "\\bfrakg" : "𝖌",
+ "\\bfrakh" : "𝖍",
+ "\\bfraki" : "𝖎",
+ "\\bfrakj" : "𝖏",
+ "\\bfrakk" : "𝖐",
+ "\\bfrakl" : "𝖑",
+ "\\bfrakm" : "𝖒",
+ "\\bfrakn" : "𝖓",
+ "\\bfrako" : "𝖔",
+ "\\bfrakp" : "𝖕",
+ "\\bfrakq" : "𝖖",
+ "\\bfrakr" : "𝖗",
+ "\\bfraks" : "𝖘",
+ "\\bfrakt" : "𝖙",
+ "\\bfraku" : "𝖚",
+ "\\bfrakv" : "𝖛",
+ "\\bfrakw" : "𝖜",
+ "\\bfrakx" : "𝖝",
+ "\\bfraky" : "𝖞",
+ "\\bfrakz" : "𝖟",
+ "\\sansA" : "𝖠",
+ "\\sansB" : "𝖡",
+ "\\sansC" : "𝖢",
+ "\\sansD" : "𝖣",
+ "\\sansE" : "𝖤",
+ "\\sansF" : "𝖥",
+ "\\sansG" : "𝖦",
+ "\\sansH" : "𝖧",
+ "\\sansI" : "𝖨",
+ "\\sansJ" : "𝖩",
+ "\\sansK" : "𝖪",
+ "\\sansL" : "𝖫",
+ "\\sansM" : "𝖬",
+ "\\sansN" : "𝖭",
+ "\\sansO" : "𝖮",
+ "\\sansP" : "𝖯",
+ "\\sansQ" : "𝖰",
+ "\\sansR" : "𝖱",
+ "\\sansS" : "𝖲",
+ "\\sansT" : "𝖳",
+ "\\sansU" : "𝖴",
+ "\\sansV" : "𝖵",
+ "\\sansW" : "𝖶",
+ "\\sansX" : "𝖷",
+ "\\sansY" : "𝖸",
+ "\\sansZ" : "𝖹",
+ "\\sansa" : "𝖺",
+ "\\sansb" : "𝖻",
+ "\\sansc" : "𝖼",
+ "\\sansd" : "𝖽",
+ "\\sanse" : "𝖾",
+ "\\sansf" : "𝖿",
+ "\\sansg" : "𝗀",
+ "\\sansh" : "𝗁",
+ "\\sansi" : "𝗂",
+ "\\sansj" : "𝗃",
+ "\\sansk" : "𝗄",
+ "\\sansl" : "𝗅",
+ "\\sansm" : "𝗆",
+ "\\sansn" : "𝗇",
+ "\\sanso" : "𝗈",
+ "\\sansp" : "𝗉",
+ "\\sansq" : "𝗊",
+ "\\sansr" : "𝗋",
+ "\\sanss" : "𝗌",
+ "\\sanst" : "𝗍",
+ "\\sansu" : "𝗎",
+ "\\sansv" : "𝗏",
+ "\\sansw" : "𝗐",
+ "\\sansx" : "𝗑",
+ "\\sansy" : "𝗒",
+ "\\sansz" : "𝗓",
+ "\\bsansA" : "𝗔",
+ "\\bsansB" : "𝗕",
+ "\\bsansC" : "𝗖",
+ "\\bsansD" : "𝗗",
+ "\\bsansE" : "𝗘",
+ "\\bsansF" : "𝗙",
+ "\\bsansG" : "𝗚",
+ "\\bsansH" : "𝗛",
+ "\\bsansI" : "𝗜",
+ "\\bsansJ" : "𝗝",
+ "\\bsansK" : "𝗞",
+ "\\bsansL" : "𝗟",
+ "\\bsansM" : "𝗠",
+ "\\bsansN" : "𝗡",
+ "\\bsansO" : "𝗢",
+ "\\bsansP" : "𝗣",
+ "\\bsansQ" : "𝗤",
+ "\\bsansR" : "𝗥",
+ "\\bsansS" : "𝗦",
+ "\\bsansT" : "𝗧",
+ "\\bsansU" : "𝗨",
+ "\\bsansV" : "𝗩",
+ "\\bsansW" : "𝗪",
+ "\\bsansX" : "𝗫",
+ "\\bsansY" : "𝗬",
+ "\\bsansZ" : "𝗭",
+ "\\bsansa" : "𝗮",
+ "\\bsansb" : "𝗯",
+ "\\bsansc" : "𝗰",
+ "\\bsansd" : "𝗱",
+ "\\bsanse" : "𝗲",
+ "\\bsansf" : "𝗳",
+ "\\bsansg" : "𝗴",
+ "\\bsansh" : "𝗵",
+ "\\bsansi" : "𝗶",
+ "\\bsansj" : "𝗷",
+ "\\bsansk" : "𝗸",
+ "\\bsansl" : "𝗹",
+ "\\bsansm" : "𝗺",
+ "\\bsansn" : "𝗻",
+ "\\bsanso" : "𝗼",
+ "\\bsansp" : "𝗽",
+ "\\bsansq" : "𝗾",
+ "\\bsansr" : "𝗿",
+ "\\bsanss" : "𝘀",
+ "\\bsanst" : "𝘁",
+ "\\bsansu" : "𝘂",
+ "\\bsansv" : "𝘃",
+ "\\bsansw" : "𝘄",
+ "\\bsansx" : "𝘅",
+ "\\bsansy" : "𝘆",
+ "\\bsansz" : "𝘇",
+ "\\isansA" : "𝘈",
+ "\\isansB" : "𝘉",
+ "\\isansC" : "𝘊",
+ "\\isansD" : "𝘋",
+ "\\isansE" : "𝘌",
+ "\\isansF" : "𝘍",
+ "\\isansG" : "𝘎",
+ "\\isansH" : "𝘏",
+ "\\isansI" : "𝘐",
+ "\\isansJ" : "𝘑",
+ "\\isansK" : "𝘒",
+ "\\isansL" : "𝘓",
+ "\\isansM" : "𝘔",
+ "\\isansN" : "𝘕",
+ "\\isansO" : "𝘖",
+ "\\isansP" : "𝘗",
+ "\\isansQ" : "𝘘",
+ "\\isansR" : "𝘙",
+ "\\isansS" : "𝘚",
+ "\\isansT" : "𝘛",
+ "\\isansU" : "𝘜",
+ "\\isansV" : "𝘝",
+ "\\isansW" : "𝘞",
+ "\\isansX" : "𝘟",
+ "\\isansY" : "𝘠",
+ "\\isansZ" : "𝘡",
+ "\\isansa" : "𝘢",
+ "\\isansb" : "𝘣",
+ "\\isansc" : "𝘤",
+ "\\isansd" : "𝘥",
+ "\\isanse" : "𝘦",
+ "\\isansf" : "𝘧",
+ "\\isansg" : "𝘨",
+ "\\isansh" : "𝘩",
+ "\\isansi" : "𝘪",
+ "\\isansj" : "𝘫",
+ "\\isansk" : "𝘬",
+ "\\isansl" : "𝘭",
+ "\\isansm" : "𝘮",
+ "\\isansn" : "𝘯",
+ "\\isanso" : "𝘰",
+ "\\isansp" : "𝘱",
+ "\\isansq" : "𝘲",
+ "\\isansr" : "𝘳",
+ "\\isanss" : "𝘴",
+ "\\isanst" : "𝘵",
+ "\\isansu" : "𝘶",
+ "\\isansv" : "𝘷",
+ "\\isansw" : "𝘸",
+ "\\isansx" : "𝘹",
+ "\\isansy" : "𝘺",
+ "\\isansz" : "𝘻",
+ "\\bisansA" : "𝘼",
+ "\\bisansB" : "𝘽",
+ "\\bisansC" : "𝘾",
+ "\\bisansD" : "𝘿",
+ "\\bisansE" : "𝙀",
+ "\\bisansF" : "𝙁",
+ "\\bisansG" : "𝙂",
+ "\\bisansH" : "𝙃",
+ "\\bisansI" : "𝙄",
+ "\\bisansJ" : "𝙅",
+ "\\bisansK" : "𝙆",
+ "\\bisansL" : "𝙇",
+ "\\bisansM" : "𝙈",
+ "\\bisansN" : "𝙉",
+ "\\bisansO" : "𝙊",
+ "\\bisansP" : "𝙋",
+ "\\bisansQ" : "𝙌",
+ "\\bisansR" : "𝙍",
+ "\\bisansS" : "𝙎",
+ "\\bisansT" : "𝙏",
+ "\\bisansU" : "𝙐",
+ "\\bisansV" : "𝙑",
+ "\\bisansW" : "𝙒",
+ "\\bisansX" : "𝙓",
+ "\\bisansY" : "𝙔",
+ "\\bisansZ" : "𝙕",
+ "\\bisansa" : "𝙖",
+ "\\bisansb" : "𝙗",
+ "\\bisansc" : "𝙘",
+ "\\bisansd" : "𝙙",
+ "\\bisanse" : "𝙚",
+ "\\bisansf" : "𝙛",
+ "\\bisansg" : "𝙜",
+ "\\bisansh" : "𝙝",
+ "\\bisansi" : "𝙞",
+ "\\bisansj" : "𝙟",
+ "\\bisansk" : "𝙠",
+ "\\bisansl" : "𝙡",
+ "\\bisansm" : "𝙢",
+ "\\bisansn" : "𝙣",
+ "\\bisanso" : "𝙤",
+ "\\bisansp" : "𝙥",
+ "\\bisansq" : "𝙦",
+ "\\bisansr" : "𝙧",
+ "\\bisanss" : "𝙨",
+ "\\bisanst" : "𝙩",
+ "\\bisansu" : "𝙪",
+ "\\bisansv" : "𝙫",
+ "\\bisansw" : "𝙬",
+ "\\bisansx" : "𝙭",
+ "\\bisansy" : "𝙮",
+ "\\bisansz" : "𝙯",
+ "\\ttA" : "𝙰",
+ "\\ttB" : "𝙱",
+ "\\ttC" : "𝙲",
+ "\\ttD" : "𝙳",
+ "\\ttE" : "𝙴",
+ "\\ttF" : "𝙵",
+ "\\ttG" : "𝙶",
+ "\\ttH" : "𝙷",
+ "\\ttI" : "𝙸",
+ "\\ttJ" : "𝙹",
+ "\\ttK" : "𝙺",
+ "\\ttL" : "𝙻",
+ "\\ttM" : "𝙼",
+ "\\ttN" : "𝙽",
+ "\\ttO" : "𝙾",
+ "\\ttP" : "𝙿",
+ "\\ttQ" : "𝚀",
+ "\\ttR" : "𝚁",
+ "\\ttS" : "𝚂",
+ "\\ttT" : "𝚃",
+ "\\ttU" : "𝚄",
+ "\\ttV" : "𝚅",
+ "\\ttW" : "𝚆",
+ "\\ttX" : "𝚇",
+ "\\ttY" : "𝚈",
+ "\\ttZ" : "𝚉",
+ "\\tta" : "𝚊",
+ "\\ttb" : "𝚋",
+ "\\ttc" : "𝚌",
+ "\\ttd" : "𝚍",
+ "\\tte" : "𝚎",
+ "\\ttf" : "𝚏",
+ "\\ttg" : "𝚐",
+ "\\tth" : "𝚑",
+ "\\tti" : "𝚒",
+ "\\ttj" : "𝚓",
+ "\\ttk" : "𝚔",
+ "\\ttl" : "𝚕",
+ "\\ttm" : "𝚖",
+ "\\ttn" : "𝚗",
+ "\\tto" : "𝚘",
+ "\\ttp" : "𝚙",
+ "\\ttq" : "𝚚",
+ "\\ttr" : "𝚛",
+ "\\tts" : "𝚜",
+ "\\ttt" : "𝚝",
+ "\\ttu" : "𝚞",
+ "\\ttv" : "𝚟",
+ "\\ttw" : "𝚠",
+ "\\ttx" : "𝚡",
+ "\\tty" : "𝚢",
+ "\\ttz" : "𝚣",
+ "\\bfAlpha" : "𝚨",
+ "\\bfBeta" : "𝚩",
+ "\\bfGamma" : "𝚪",
+ "\\bfDelta" : "𝚫",
+ "\\bfEpsilon" : "𝚬",
+ "\\bfZeta" : "𝚭",
+ "\\bfEta" : "𝚮",
+ "\\bfTheta" : "𝚯",
+ "\\bfIota" : "𝚰",
+ "\\bfKappa" : "𝚱",
+ "\\bfLambda" : "𝚲",
+ "\\bfMu" : "𝚳",
+ "\\bfNu" : "𝚴",
+ "\\bfXi" : "𝚵",
+ "\\bfOmicron" : "𝚶",
+ "\\bfPi" : "𝚷",
+ "\\bfRho" : "𝚸",
+ "\\bfvarTheta" : "𝚹",
+ "\\bfSigma" : "𝚺",
+ "\\bfTau" : "𝚻",
+ "\\bfUpsilon" : "𝚼",
+ "\\bfPhi" : "𝚽",
+ "\\bfChi" : "𝚾",
+ "\\bfPsi" : "𝚿",
+ "\\bfOmega" : "𝛀",
+ "\\bfalpha" : "𝛂",
+ "\\bfbeta" : "𝛃",
+ "\\bfgamma" : "𝛄",
+ "\\bfdelta" : "𝛅",
+ "\\bfepsilon" : "𝛆",
+ "\\bfzeta" : "𝛇",
+ "\\bfeta" : "𝛈",
+ "\\bftheta" : "𝛉",
+ "\\bfiota" : "𝛊",
+ "\\bfkappa" : "𝛋",
+ "\\bflambda" : "𝛌",
+ "\\bfmu" : "𝛍",
+ "\\bfnu" : "𝛎",
+ "\\bfxi" : "𝛏",
+ "\\bfomicron" : "𝛐",
+ "\\bfpi" : "𝛑",
+ "\\bfrho" : "𝛒",
+ "\\bfvarsigma" : "𝛓",
+ "\\bfsigma" : "𝛔",
+ "\\bftau" : "𝛕",
+ "\\bfupsilon" : "𝛖",
+ "\\bfvarphi" : "𝛗",
+ "\\bfchi" : "𝛘",
+ "\\bfpsi" : "𝛙",
+ "\\bfomega" : "𝛚",
+ "\\bfvarepsilon" : "𝛜",
+ "\\bfvartheta" : "𝛝",
+ "\\bfvarkappa" : "𝛞",
+ "\\bfphi" : "𝛟",
+ "\\bfvarrho" : "𝛠",
+ "\\bfvarpi" : "𝛡",
+ "\\itAlpha" : "𝛢",
+ "\\itBeta" : "𝛣",
+ "\\itGamma" : "𝛤",
+ "\\itDelta" : "𝛥",
+ "\\itEpsilon" : "𝛦",
+ "\\itZeta" : "𝛧",
+ "\\itEta" : "𝛨",
+ "\\itTheta" : "𝛩",
+ "\\itIota" : "𝛪",
+ "\\itKappa" : "𝛫",
+ "\\itLambda" : "𝛬",
+ "\\itMu" : "𝛭",
+ "\\itNu" : "𝛮",
+ "\\itXi" : "𝛯",
+ "\\itOmicron" : "𝛰",
+ "\\itPi" : "𝛱",
+ "\\itRho" : "𝛲",
+ "\\itvarTheta" : "𝛳",
+ "\\itSigma" : "𝛴",
+ "\\itTau" : "𝛵",
+ "\\itUpsilon" : "𝛶",
+ "\\itPhi" : "𝛷",
+ "\\itChi" : "𝛸",
+ "\\itPsi" : "𝛹",
+ "\\itOmega" : "𝛺",
+ "\\italpha" : "𝛼",
+ "\\itbeta" : "𝛽",
+ "\\itgamma" : "𝛾",
+ "\\itdelta" : "𝛿",
+ "\\itepsilon" : "𝜀",
+ "\\itzeta" : "𝜁",
+ "\\iteta" : "𝜂",
+ "\\ittheta" : "𝜃",
+ "\\itiota" : "𝜄",
+ "\\itkappa" : "𝜅",
+ "\\itlambda" : "𝜆",
+ "\\itmu" : "𝜇",
+ "\\itnu" : "𝜈",
+ "\\itxi" : "𝜉",
+ "\\itomicron" : "𝜊",
+ "\\itpi" : "𝜋",
+ "\\itrho" : "𝜌",
+ "\\itvarsigma" : "𝜍",
+ "\\itsigma" : "𝜎",
+ "\\ittau" : "𝜏",
+ "\\itupsilon" : "𝜐",
+ "\\itphi" : "𝜑",
+ "\\itchi" : "𝜒",
+ "\\itpsi" : "𝜓",
+ "\\itomega" : "𝜔",
+ "\\itvarepsilon" : "𝜖",
+ "\\itvartheta" : "𝜗",
+ "\\itvarkappa" : "𝜘",
+ "\\itvarphi" : "𝜙",
+ "\\itvarrho" : "𝜚",
+ "\\itvarpi" : "𝜛",
+ "\\biAlpha" : "𝜜",
+ "\\biBeta" : "𝜝",
+ "\\biGamma" : "𝜞",
+ "\\biDelta" : "𝜟",
+ "\\biEpsilon" : "𝜠",
+ "\\biZeta" : "𝜡",
+ "\\biEta" : "𝜢",
+ "\\biTheta" : "𝜣",
+ "\\biIota" : "𝜤",
+ "\\biKappa" : "𝜥",
+ "\\biLambda" : "𝜦",
+ "\\biMu" : "𝜧",
+ "\\biNu" : "𝜨",
+ "\\biXi" : "𝜩",
+ "\\biOmicron" : "𝜪",
+ "\\biPi" : "𝜫",
+ "\\biRho" : "𝜬",
+ "\\bivarTheta" : "𝜭",
+ "\\biSigma" : "𝜮",
+ "\\biTau" : "𝜯",
+ "\\biUpsilon" : "𝜰",
+ "\\biPhi" : "𝜱",
+ "\\biChi" : "𝜲",
+ "\\biPsi" : "𝜳",
+ "\\biOmega" : "𝜴",
+ "\\bialpha" : "𝜶",
+ "\\bibeta" : "𝜷",
+ "\\bigamma" : "𝜸",
+ "\\bidelta" : "𝜹",
+ "\\biepsilon" : "𝜺",
+ "\\bizeta" : "𝜻",
+ "\\bieta" : "𝜼",
+ "\\bitheta" : "𝜽",
+ "\\biiota" : "𝜾",
+ "\\bikappa" : "𝜿",
+ "\\bilambda" : "𝝀",
+ "\\bimu" : "𝝁",
+ "\\binu" : "𝝂",
+ "\\bixi" : "𝝃",
+ "\\biomicron" : "𝝄",
+ "\\bipi" : "𝝅",
+ "\\birho" : "𝝆",
+ "\\bivarsigma" : "𝝇",
+ "\\bisigma" : "𝝈",
+ "\\bitau" : "𝝉",
+ "\\biupsilon" : "𝝊",
+ "\\biphi" : "𝝋",
+ "\\bichi" : "𝝌",
+ "\\bipsi" : "𝝍",
+ "\\biomega" : "𝝎",
+ "\\bivarepsilon" : "𝝐",
+ "\\bivartheta" : "𝝑",
+ "\\bivarkappa" : "𝝒",
+ "\\bivarphi" : "𝝓",
+ "\\bivarrho" : "𝝔",
+ "\\bivarpi" : "𝝕",
+ "\\bsansAlpha" : "𝝖",
+ "\\bsansBeta" : "𝝗",
+ "\\bsansGamma" : "𝝘",
+ "\\bsansDelta" : "𝝙",
+ "\\bsansEpsilon" : "𝝚",
+ "\\bsansZeta" : "𝝛",
+ "\\bsansEta" : "𝝜",
+ "\\bsansTheta" : "𝝝",
+ "\\bsansIota" : "𝝞",
+ "\\bsansKappa" : "𝝟",
+ "\\bsansLambda" : "𝝠",
+ "\\bsansMu" : "𝝡",
+ "\\bsansNu" : "𝝢",
+ "\\bsansXi" : "𝝣",
+ "\\bsansOmicron" : "𝝤",
+ "\\bsansPi" : "𝝥",
+ "\\bsansRho" : "𝝦",
+ "\\bsansvarTheta" : "𝝧",
+ "\\bsansSigma" : "𝝨",
+ "\\bsansTau" : "𝝩",
+ "\\bsansUpsilon" : "𝝪",
+ "\\bsansPhi" : "𝝫",
+ "\\bsansChi" : "𝝬",
+ "\\bsansPsi" : "𝝭",
+ "\\bsansOmega" : "𝝮",
+ "\\bsansalpha" : "𝝰",
+ "\\bsansbeta" : "𝝱",
+ "\\bsansgamma" : "𝝲",
+ "\\bsansdelta" : "𝝳",
+ "\\bsansepsilon" : "𝝴",
+ "\\bsanszeta" : "𝝵",
+ "\\bsanseta" : "𝝶",
+ "\\bsanstheta" : "𝝷",
+ "\\bsansiota" : "𝝸",
+ "\\bsanskappa" : "𝝹",
+ "\\bsanslambda" : "𝝺",
+ "\\bsansmu" : "𝝻",
+ "\\bsansnu" : "𝝼",
+ "\\bsansxi" : "𝝽",
+ "\\bsansomicron" : "𝝾",
+ "\\bsanspi" : "𝝿",
+ "\\bsansrho" : "𝞀",
+ "\\bsansvarsigma" : "𝞁",
+ "\\bsanssigma" : "𝞂",
+ "\\bsanstau" : "𝞃",
+ "\\bsansupsilon" : "𝞄",
+ "\\bsansphi" : "𝞅",
+ "\\bsanschi" : "𝞆",
+ "\\bsanspsi" : "𝞇",
+ "\\bsansomega" : "𝞈",
+ "\\bsansvarepsilon" : "𝞊",
+ "\\bsansvartheta" : "𝞋",
+ "\\bsansvarkappa" : "𝞌",
+ "\\bsansvarphi" : "𝞍",
+ "\\bsansvarrho" : "𝞎",
+ "\\bsansvarpi" : "𝞏",
+ "\\bisansAlpha" : "𝞐",
+ "\\bisansBeta" : "𝞑",
+ "\\bisansGamma" : "𝞒",
+ "\\bisansDelta" : "𝞓",
+ "\\bisansEpsilon" : "𝞔",
+ "\\bisansZeta" : "𝞕",
+ "\\bisansEta" : "𝞖",
+ "\\bisansTheta" : "𝞗",
+ "\\bisansIota" : "𝞘",
+ "\\bisansKappa" : "𝞙",
+ "\\bisansLambda" : "𝞚",
+ "\\bisansMu" : "𝞛",
+ "\\bisansNu" : "𝞜",
+ "\\bisansXi" : "𝞝",
+ "\\bisansOmicron" : "𝞞",
+ "\\bisansPi" : "𝞟",
+ "\\bisansRho" : "𝞠",
+ "\\bisansvarTheta" : "𝞡",
+ "\\bisansSigma" : "𝞢",
+ "\\bisansTau" : "𝞣",
+ "\\bisansUpsilon" : "𝞤",
+ "\\bisansPhi" : "𝞥",
+ "\\bisansChi" : "𝞦",
+ "\\bisansPsi" : "𝞧",
+ "\\bisansOmega" : "𝞨",
+ "\\bisansalpha" : "𝞪",
+ "\\bisansbeta" : "𝞫",
+ "\\bisansgamma" : "𝞬",
+ "\\bisansdelta" : "𝞭",
+ "\\bisansepsilon" : "𝞮",
+ "\\bisanszeta" : "𝞯",
+ "\\bisanseta" : "𝞰",
+ "\\bisanstheta" : "𝞱",
+ "\\bisansiota" : "𝞲",
+ "\\bisanskappa" : "𝞳",
+ "\\bisanslambda" : "𝞴",
+ "\\bisansmu" : "𝞵",
+ "\\bisansnu" : "𝞶",
+ "\\bisansxi" : "𝞷",
+ "\\bisansomicron" : "𝞸",
+ "\\bisanspi" : "𝞹",
+ "\\bisansrho" : "𝞺",
+ "\\bisansvarsigma" : "𝞻",
+ "\\bisanssigma" : "𝞼",
+ "\\bisanstau" : "𝞽",
+ "\\bisansupsilon" : "𝞾",
+ "\\bisansphi" : "𝞿",
+ "\\bisanschi" : "𝟀",
+ "\\bisanspsi" : "𝟁",
+ "\\bisansomega" : "𝟂",
+ "\\bisansvarepsilon" : "𝟄",
+ "\\bisansvartheta" : "𝟅",
+ "\\bisansvarkappa" : "𝟆",
+ "\\bisansvarphi" : "𝟇",
+ "\\bisansvarrho" : "𝟈",
+ "\\bisansvarpi" : "𝟉",
+ "\\bfzero" : "𝟎",
+ "\\bfone" : "𝟏",
+ "\\bftwo" : "𝟐",
+ "\\bfthree" : "𝟑",
+ "\\bffour" : "𝟒",
+ "\\bffive" : "𝟓",
+ "\\bfsix" : "𝟔",
+ "\\bfseven" : "𝟕",
+ "\\bfeight" : "𝟖",
+ "\\bfnine" : "𝟗",
+ "\\bbzero" : "𝟘",
+ "\\bbone" : "𝟙",
+ "\\bbtwo" : "𝟚",
+ "\\bbthree" : "𝟛",
+ "\\bbfour" : "𝟜",
+ "\\bbfive" : "𝟝",
+ "\\bbsix" : "𝟞",
+ "\\bbseven" : "𝟟",
+ "\\bbeight" : "𝟠",
+ "\\bbnine" : "𝟡",
+ "\\sanszero" : "𝟢",
+ "\\sansone" : "𝟣",
+ "\\sanstwo" : "𝟤",
+ "\\sansthree" : "𝟥",
+ "\\sansfour" : "𝟦",
+ "\\sansfive" : "𝟧",
+ "\\sanssix" : "𝟨",
+ "\\sansseven" : "𝟩",
+ "\\sanseight" : "𝟪",
+ "\\sansnine" : "𝟫",
+ "\\bsanszero" : "𝟬",
+ "\\bsansone" : "𝟭",
+ "\\bsanstwo" : "𝟮",
+ "\\bsansthree" : "𝟯",
+ "\\bsansfour" : "𝟰",
+ "\\bsansfive" : "𝟱",
+ "\\bsanssix" : "𝟲",
+ "\\bsansseven" : "𝟳",
+ "\\bsanseight" : "𝟴",
+ "\\bsansnine" : "𝟵",
+ "\\ttzero" : "𝟶",
+ "\\ttone" : "𝟷",
+ "\\tttwo" : "𝟸",
+ "\\ttthree" : "𝟹",
+ "\\ttfour" : "𝟺",
+ "\\ttfive" : "𝟻",
+ "\\ttsix" : "𝟼",
+ "\\ttseven" : "𝟽",
+ "\\tteight" : "𝟾",
+ "\\ttnine" : "𝟿",
+ "\\underbar" : "̲",
+ "\\underleftrightarrow" : "͍",
+}
+
+
+reverse_latex_symbol = { v:k for k,v in latex_symbols.items()}
diff --git a/contrib/python/ipython/py3/IPython/core/logger.py b/contrib/python/ipython/py3/IPython/core/logger.py
new file mode 100644
index 0000000000..99e7ce2918
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/logger.py
@@ -0,0 +1,227 @@
+"""Logger class for IPython's logging facilities.
+"""
+
+#*****************************************************************************
+# Copyright (C) 2001 Janko Hauser <jhauser@zscout.de> and
+# Copyright (C) 2001-2006 Fernando Perez <fperez@colorado.edu>
+#
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#*****************************************************************************
+
+#****************************************************************************
+# Modules and globals
+
+# Python standard modules
+import glob
+import io
+import os
+import time
+
+
+#****************************************************************************
+# FIXME: This class isn't a mixin anymore, but it still needs attributes from
+# ipython and does input cache management. Finish cleanup later...
+
+class Logger(object):
+ """A Logfile class with different policies for file creation"""
+
+ def __init__(self, home_dir, logfname='Logger.log', loghead=u'',
+ logmode='over'):
+
+ # this is the full ipython instance, we need some attributes from it
+ # which won't exist until later. What a mess, clean up later...
+ self.home_dir = home_dir
+
+ self.logfname = logfname
+ self.loghead = loghead
+ self.logmode = logmode
+ self.logfile = None
+
+ # Whether to log raw or processed input
+ self.log_raw_input = False
+
+ # whether to also log output
+ self.log_output = False
+
+ # whether to put timestamps before each log entry
+ self.timestamp = False
+
+ # activity control flags
+ self.log_active = False
+
+ # logmode is a validated property
+ def _set_mode(self,mode):
+ if mode not in ['append','backup','global','over','rotate']:
+ raise ValueError('invalid log mode %s given' % mode)
+ self._logmode = mode
+
+ def _get_mode(self):
+ return self._logmode
+
+ logmode = property(_get_mode,_set_mode)
+
+ def logstart(self, logfname=None, loghead=None, logmode=None,
+ log_output=False, timestamp=False, log_raw_input=False):
+ """Generate a new log-file with a default header.
+
+ Raises RuntimeError if the log has already been started"""
+
+ if self.logfile is not None:
+ raise RuntimeError('Log file is already active: %s' %
+ self.logfname)
+
+ # The parameters can override constructor defaults
+ if logfname is not None: self.logfname = logfname
+ if loghead is not None: self.loghead = loghead
+ if logmode is not None: self.logmode = logmode
+
+ # Parameters not part of the constructor
+ self.timestamp = timestamp
+ self.log_output = log_output
+ self.log_raw_input = log_raw_input
+
+ # init depending on the log mode requested
+ isfile = os.path.isfile
+ logmode = self.logmode
+
+ if logmode == 'append':
+ self.logfile = io.open(self.logfname, 'a', encoding='utf-8')
+
+ elif logmode == 'backup':
+ if isfile(self.logfname):
+ backup_logname = self.logfname+'~'
+ # Manually remove any old backup, since os.rename may fail
+ # under Windows.
+ if isfile(backup_logname):
+ os.remove(backup_logname)
+ os.rename(self.logfname,backup_logname)
+ self.logfile = io.open(self.logfname, 'w', encoding='utf-8')
+
+ elif logmode == 'global':
+ self.logfname = os.path.join(self.home_dir,self.logfname)
+ self.logfile = io.open(self.logfname, 'a', encoding='utf-8')
+
+ elif logmode == 'over':
+ if isfile(self.logfname):
+ os.remove(self.logfname)
+ self.logfile = io.open(self.logfname,'w', encoding='utf-8')
+
+ elif logmode == 'rotate':
+ if isfile(self.logfname):
+ if isfile(self.logfname+'.001~'):
+ old = glob.glob(self.logfname+'.*~')
+ old.sort()
+ old.reverse()
+ for f in old:
+ root, ext = os.path.splitext(f)
+ num = int(ext[1:-1])+1
+ os.rename(f, root+'.'+repr(num).zfill(3)+'~')
+ os.rename(self.logfname, self.logfname+'.001~')
+ self.logfile = io.open(self.logfname, 'w', encoding='utf-8')
+
+ if logmode != 'append':
+ self.logfile.write(self.loghead)
+
+ self.logfile.flush()
+ self.log_active = True
+
+ def switch_log(self,val):
+ """Switch logging on/off. val should be ONLY a boolean."""
+
+ if val not in [False,True,0,1]:
+ raise ValueError('Call switch_log ONLY with a boolean argument, '
+ 'not with: %s' % val)
+
+ label = {0:'OFF',1:'ON',False:'OFF',True:'ON'}
+
+ if self.logfile is None:
+ print("""
+Logging hasn't been started yet (use logstart for that).
+
+%logon/%logoff are for temporarily starting and stopping logging for a logfile
+which already exists. But you must first start the logging process with
+%logstart (optionally giving a logfile name).""")
+
+ else:
+ if self.log_active == val:
+ print('Logging is already',label[val])
+ else:
+ print('Switching logging',label[val])
+ self.log_active = not self.log_active
+ self.log_active_out = self.log_active
+
+ def logstate(self):
+ """Print a status message about the logger."""
+ if self.logfile is None:
+ print('Logging has not been activated.')
+ else:
+ state = self.log_active and 'active' or 'temporarily suspended'
+ print('Filename :', self.logfname)
+ print('Mode :', self.logmode)
+ print('Output logging :', self.log_output)
+ print('Raw input log :', self.log_raw_input)
+ print('Timestamping :', self.timestamp)
+ print('State :', state)
+
+ def log(self, line_mod, line_ori):
+ """Write the sources to a log.
+
+ Inputs:
+
+ - line_mod: possibly modified input, such as the transformations made
+ by input prefilters or input handlers of various kinds. This should
+ always be valid Python.
+
+ - line_ori: unmodified input line from the user. This is not
+ necessarily valid Python.
+ """
+
+ # Write the log line, but decide which one according to the
+ # log_raw_input flag, set when the log is started.
+ if self.log_raw_input:
+ self.log_write(line_ori)
+ else:
+ self.log_write(line_mod)
+
+ def log_write(self, data, kind='input'):
+ """Write data to the log file, if active"""
+
+ #print 'data: %r' % data # dbg
+ if self.log_active and data:
+ write = self.logfile.write
+ if kind=='input':
+ if self.timestamp:
+ write(time.strftime('# %a, %d %b %Y %H:%M:%S\n', time.localtime()))
+ write(data)
+ elif kind=='output' and self.log_output:
+ odata = u'\n'.join([u'#[Out]# %s' % s
+ for s in data.splitlines()])
+ write(u'%s\n' % odata)
+ try:
+ self.logfile.flush()
+ except OSError:
+ print("Failed to flush the log file.")
+ print(
+ f"Please check that {self.logfname} exists and have the right permissions."
+ )
+ print(
+ "Also consider turning off the log with `%logstop` to avoid this warning."
+ )
+
+ def logstop(self):
+ """Fully stop logging and close log file.
+
+ In order to start logging again, a new logstart() call needs to be
+ made, possibly (though not necessarily) with a new filename, mode and
+ other options."""
+
+ if self.logfile is not None:
+ self.logfile.close()
+ self.logfile = None
+ else:
+ print("Logging hadn't been started.")
+ self.log_active = False
+
+ # For backwards compatibility, in case anyone was using this.
+ close_log = logstop
diff --git a/contrib/python/ipython/py3/IPython/core/macro.py b/contrib/python/ipython/py3/IPython/core/macro.py
new file mode 100644
index 0000000000..ce86898cac
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/macro.py
@@ -0,0 +1,53 @@
+"""Support for interactive macros in IPython"""
+
+#*****************************************************************************
+# Copyright (C) 2001-2005 Fernando Perez <fperez@colorado.edu>
+#
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#*****************************************************************************
+
+import re
+
+from IPython.utils.encoding import DEFAULT_ENCODING
+
+coding_declaration = re.compile(r"#\s*coding[:=]\s*([-\w.]+)")
+
+class Macro(object):
+ """Simple class to store the value of macros as strings.
+
+ Macro is just a callable that executes a string of IPython
+ input when called.
+ """
+
+ def __init__(self,code):
+ """store the macro value, as a single string which can be executed"""
+ lines = []
+ enc = None
+ for line in code.splitlines():
+ coding_match = coding_declaration.match(line)
+ if coding_match:
+ enc = coding_match.group(1)
+ else:
+ lines.append(line)
+ code = "\n".join(lines)
+ if isinstance(code, bytes):
+ code = code.decode(enc or DEFAULT_ENCODING)
+ self.value = code + '\n'
+
+ def __str__(self):
+ return self.value
+
+ def __repr__(self):
+ return 'IPython.macro.Macro(%s)' % repr(self.value)
+
+ def __getstate__(self):
+ """ needed for safe pickling via %store """
+ return {'value': self.value}
+
+ def __add__(self, other):
+ if isinstance(other, Macro):
+ return Macro(self.value + other.value)
+ elif isinstance(other, str):
+ return Macro(self.value + other)
+ raise TypeError
diff --git a/contrib/python/ipython/py3/IPython/core/magic.py b/contrib/python/ipython/py3/IPython/core/magic.py
new file mode 100644
index 0000000000..4f9e4e548f
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/magic.py
@@ -0,0 +1,757 @@
+# encoding: utf-8
+"""Magic functions for InteractiveShell.
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (C) 2001 Janko Hauser <jhauser@zscout.de> and
+# Copyright (C) 2001 Fernando Perez <fperez@colorado.edu>
+# Copyright (C) 2008 The IPython Development Team
+
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#-----------------------------------------------------------------------------
+
+import os
+import re
+import sys
+from getopt import getopt, GetoptError
+
+from traitlets.config.configurable import Configurable
+from . import oinspect
+from .error import UsageError
+from .inputtransformer2 import ESC_MAGIC, ESC_MAGIC2
+from ..utils.ipstruct import Struct
+from ..utils.process import arg_split
+from ..utils.text import dedent
+from traitlets import Bool, Dict, Instance, observe
+from logging import error
+
+#-----------------------------------------------------------------------------
+# Globals
+#-----------------------------------------------------------------------------
+
+# A dict we'll use for each class that has magics, used as temporary storage to
+# pass information between the @line/cell_magic method decorators and the
+# @magics_class class decorator, because the method decorators have no
+# access to the class when they run. See for more details:
+# http://stackoverflow.com/questions/2366713/can-a-python-decorator-of-an-instance-method-access-the-class
+
+magics = dict(line={}, cell={})
+
+magic_kinds = ('line', 'cell')
+magic_spec = ('line', 'cell', 'line_cell')
+magic_escapes = dict(line=ESC_MAGIC, cell=ESC_MAGIC2)
+
+#-----------------------------------------------------------------------------
+# Utility classes and functions
+#-----------------------------------------------------------------------------
+
+class Bunch: pass
+
+
+def on_off(tag):
+ """Return an ON/OFF string for a 1/0 input. Simple utility function."""
+ return ['OFF','ON'][tag]
+
+
+def compress_dhist(dh):
+ """Compress a directory history into a new one with at most 20 entries.
+
+ Return a new list made from the first and last 10 elements of dhist after
+ removal of duplicates.
+ """
+ head, tail = dh[:-10], dh[-10:]
+
+ newhead = []
+ done = set()
+ for h in head:
+ if h in done:
+ continue
+ newhead.append(h)
+ done.add(h)
+
+ return newhead + tail
+
+
+def needs_local_scope(func):
+ """Decorator to mark magic functions which need to local scope to run."""
+ func.needs_local_scope = True
+ return func
+
+#-----------------------------------------------------------------------------
+# Class and method decorators for registering magics
+#-----------------------------------------------------------------------------
+
+def magics_class(cls):
+ """Class decorator for all subclasses of the main Magics class.
+
+ Any class that subclasses Magics *must* also apply this decorator, to
+ ensure that all the methods that have been decorated as line/cell magics
+ get correctly registered in the class instance. This is necessary because
+ when method decorators run, the class does not exist yet, so they
+ temporarily store their information into a module global. Application of
+ this class decorator copies that global data to the class instance and
+ clears the global.
+
+ Obviously, this mechanism is not thread-safe, which means that the
+ *creation* of subclasses of Magic should only be done in a single-thread
+ context. Instantiation of the classes has no restrictions. Given that
+ these classes are typically created at IPython startup time and before user
+ application code becomes active, in practice this should not pose any
+ problems.
+ """
+ cls.registered = True
+ cls.magics = dict(line = magics['line'],
+ cell = magics['cell'])
+ magics['line'] = {}
+ magics['cell'] = {}
+ return cls
+
+
+def record_magic(dct, magic_kind, magic_name, func):
+ """Utility function to store a function as a magic of a specific kind.
+
+ Parameters
+ ----------
+ dct : dict
+ A dictionary with 'line' and 'cell' subdicts.
+ magic_kind : str
+ Kind of magic to be stored.
+ magic_name : str
+ Key to store the magic as.
+ func : function
+ Callable object to store.
+ """
+ if magic_kind == 'line_cell':
+ dct['line'][magic_name] = dct['cell'][magic_name] = func
+ else:
+ dct[magic_kind][magic_name] = func
+
+
+def validate_type(magic_kind):
+ """Ensure that the given magic_kind is valid.
+
+ Check that the given magic_kind is one of the accepted spec types (stored
+ in the global `magic_spec`), raise ValueError otherwise.
+ """
+ if magic_kind not in magic_spec:
+ raise ValueError('magic_kind must be one of %s, %s given' %
+ magic_kinds, magic_kind)
+
+
+# The docstrings for the decorator below will be fairly similar for the two
+# types (method and function), so we generate them here once and reuse the
+# templates below.
+_docstring_template = \
+"""Decorate the given {0} as {1} magic.
+
+The decorator can be used with or without arguments, as follows.
+
+i) without arguments: it will create a {1} magic named as the {0} being
+decorated::
+
+ @deco
+ def foo(...)
+
+will create a {1} magic named `foo`.
+
+ii) with one string argument: which will be used as the actual name of the
+resulting magic::
+
+ @deco('bar')
+ def foo(...)
+
+will create a {1} magic named `bar`.
+
+To register a class magic use ``Interactiveshell.register_magic(class or instance)``.
+"""
+
+# These two are decorator factories. While they are conceptually very similar,
+# there are enough differences in the details that it's simpler to have them
+# written as completely standalone functions rather than trying to share code
+# and make a single one with convoluted logic.
+
+def _method_magic_marker(magic_kind):
+ """Decorator factory for methods in Magics subclasses.
+ """
+
+ validate_type(magic_kind)
+
+ # This is a closure to capture the magic_kind. We could also use a class,
+ # but it's overkill for just that one bit of state.
+ def magic_deco(arg):
+ if callable(arg):
+ # "Naked" decorator call (just @foo, no args)
+ func = arg
+ name = func.__name__
+ retval = arg
+ record_magic(magics, magic_kind, name, name)
+ elif isinstance(arg, str):
+ # Decorator called with arguments (@foo('bar'))
+ name = arg
+ def mark(func, *a, **kw):
+ record_magic(magics, magic_kind, name, func.__name__)
+ return func
+ retval = mark
+ else:
+ raise TypeError("Decorator can only be called with "
+ "string or function")
+ return retval
+
+ # Ensure the resulting decorator has a usable docstring
+ magic_deco.__doc__ = _docstring_template.format('method', magic_kind)
+ return magic_deco
+
+
+def _function_magic_marker(magic_kind):
+ """Decorator factory for standalone functions.
+ """
+ validate_type(magic_kind)
+
+ # This is a closure to capture the magic_kind. We could also use a class,
+ # but it's overkill for just that one bit of state.
+ def magic_deco(arg):
+ # Find get_ipython() in the caller's namespace
+ caller = sys._getframe(1)
+ for ns in ['f_locals', 'f_globals', 'f_builtins']:
+ get_ipython = getattr(caller, ns).get('get_ipython')
+ if get_ipython is not None:
+ break
+ else:
+ raise NameError('Decorator can only run in context where '
+ '`get_ipython` exists')
+
+ ip = get_ipython()
+
+ if callable(arg):
+ # "Naked" decorator call (just @foo, no args)
+ func = arg
+ name = func.__name__
+ ip.register_magic_function(func, magic_kind, name)
+ retval = arg
+ elif isinstance(arg, str):
+ # Decorator called with arguments (@foo('bar'))
+ name = arg
+ def mark(func, *a, **kw):
+ ip.register_magic_function(func, magic_kind, name)
+ return func
+ retval = mark
+ else:
+ raise TypeError("Decorator can only be called with "
+ "string or function")
+ return retval
+
+ # Ensure the resulting decorator has a usable docstring
+ ds = _docstring_template.format('function', magic_kind)
+
+ ds += dedent("""
+ Note: this decorator can only be used in a context where IPython is already
+ active, so that the `get_ipython()` call succeeds. You can therefore use
+ it in your startup files loaded after IPython initializes, but *not* in the
+ IPython configuration file itself, which is executed before IPython is
+ fully up and running. Any file located in the `startup` subdirectory of
+ your configuration profile will be OK in this sense.
+ """)
+
+ magic_deco.__doc__ = ds
+ return magic_deco
+
+
+MAGIC_NO_VAR_EXPAND_ATTR = "_ipython_magic_no_var_expand"
+MAGIC_OUTPUT_CAN_BE_SILENCED = "_ipython_magic_output_can_be_silenced"
+
+
+def no_var_expand(magic_func):
+ """Mark a magic function as not needing variable expansion
+
+ By default, IPython interprets `{a}` or `$a` in the line passed to magics
+ as variables that should be interpolated from the interactive namespace
+ before passing the line to the magic function.
+ This is not always desirable, e.g. when the magic executes Python code
+ (%timeit, %time, etc.).
+ Decorate magics with `@no_var_expand` to opt-out of variable expansion.
+
+ .. versionadded:: 7.3
+ """
+ setattr(magic_func, MAGIC_NO_VAR_EXPAND_ATTR, True)
+ return magic_func
+
+
+def output_can_be_silenced(magic_func):
+ """Mark a magic function so its output may be silenced.
+
+ The output is silenced if the Python code used as a parameter of
+ the magic ends in a semicolon, not counting a Python comment that can
+ follow it.
+ """
+ setattr(magic_func, MAGIC_OUTPUT_CAN_BE_SILENCED, True)
+ return magic_func
+
+# Create the actual decorators for public use
+
+# These three are used to decorate methods in class definitions
+line_magic = _method_magic_marker('line')
+cell_magic = _method_magic_marker('cell')
+line_cell_magic = _method_magic_marker('line_cell')
+
+# These three decorate standalone functions and perform the decoration
+# immediately. They can only run where get_ipython() works
+register_line_magic = _function_magic_marker('line')
+register_cell_magic = _function_magic_marker('cell')
+register_line_cell_magic = _function_magic_marker('line_cell')
+
+#-----------------------------------------------------------------------------
+# Core Magic classes
+#-----------------------------------------------------------------------------
+
+class MagicsManager(Configurable):
+ """Object that handles all magic-related functionality for IPython.
+ """
+ # Non-configurable class attributes
+
+ # A two-level dict, first keyed by magic type, then by magic function, and
+ # holding the actual callable object as value. This is the dict used for
+ # magic function dispatch
+ magics = Dict()
+ lazy_magics = Dict(
+ help="""
+ Mapping from magic names to modules to load.
+
+ This can be used in IPython/IPykernel configuration to declare lazy magics
+ that will only be imported/registered on first use.
+
+ For example::
+
+ c.MagicsManager.lazy_magics = {
+ "my_magic": "slow.to.import",
+ "my_other_magic": "also.slow",
+ }
+
+ On first invocation of `%my_magic`, `%%my_magic`, `%%my_other_magic` or
+ `%%my_other_magic`, the corresponding module will be loaded as an ipython
+ extensions as if you had previously done `%load_ext ipython`.
+
+ Magics names should be without percent(s) as magics can be both cell
+ and line magics.
+
+ Lazy loading happen relatively late in execution process, and
+ complex extensions that manipulate Python/IPython internal state or global state
+ might not support lazy loading.
+ """
+ ).tag(
+ config=True,
+ )
+
+ # A registry of the original objects that we've been given holding magics.
+ registry = Dict()
+
+ shell = Instance('IPython.core.interactiveshell.InteractiveShellABC', allow_none=True)
+
+ auto_magic = Bool(True, help=
+ "Automatically call line magics without requiring explicit % prefix"
+ ).tag(config=True)
+ @observe('auto_magic')
+ def _auto_magic_changed(self, change):
+ self.shell.automagic = change['new']
+
+ _auto_status = [
+ 'Automagic is OFF, % prefix IS needed for line magics.',
+ 'Automagic is ON, % prefix IS NOT needed for line magics.']
+
+ user_magics = Instance('IPython.core.magics.UserMagics', allow_none=True)
+
+ def __init__(self, shell=None, config=None, user_magics=None, **traits):
+
+ super(MagicsManager, self).__init__(shell=shell, config=config,
+ user_magics=user_magics, **traits)
+ self.magics = dict(line={}, cell={})
+ # Let's add the user_magics to the registry for uniformity, so *all*
+ # registered magic containers can be found there.
+ self.registry[user_magics.__class__.__name__] = user_magics
+
+ def auto_status(self):
+ """Return descriptive string with automagic status."""
+ return self._auto_status[self.auto_magic]
+
+ def lsmagic(self):
+ """Return a dict of currently available magic functions.
+
+ The return dict has the keys 'line' and 'cell', corresponding to the
+ two types of magics we support. Each value is a list of names.
+ """
+ return self.magics
+
+ def lsmagic_docs(self, brief=False, missing=''):
+ """Return dict of documentation of magic functions.
+
+ The return dict has the keys 'line' and 'cell', corresponding to the
+ two types of magics we support. Each value is a dict keyed by magic
+ name whose value is the function docstring. If a docstring is
+ unavailable, the value of `missing` is used instead.
+
+ If brief is True, only the first line of each docstring will be returned.
+ """
+ docs = {}
+ for m_type in self.magics:
+ m_docs = {}
+ for m_name, m_func in self.magics[m_type].items():
+ if m_func.__doc__:
+ if brief:
+ m_docs[m_name] = m_func.__doc__.split('\n', 1)[0]
+ else:
+ m_docs[m_name] = m_func.__doc__.rstrip()
+ else:
+ m_docs[m_name] = missing
+ docs[m_type] = m_docs
+ return docs
+
+ def register_lazy(self, name: str, fully_qualified_name: str):
+ """
+ Lazily register a magic via an extension.
+
+
+ Parameters
+ ----------
+ name : str
+ Name of the magic you wish to register.
+ fully_qualified_name :
+ Fully qualified name of the module/submodule that should be loaded
+ as an extensions when the magic is first called.
+ It is assumed that loading this extensions will register the given
+ magic.
+ """
+
+ self.lazy_magics[name] = fully_qualified_name
+
+ def register(self, *magic_objects):
+ """Register one or more instances of Magics.
+
+ Take one or more classes or instances of classes that subclass the main
+ `core.Magic` class, and register them with IPython to use the magic
+ functions they provide. The registration process will then ensure that
+ any methods that have decorated to provide line and/or cell magics will
+ be recognized with the `%x`/`%%x` syntax as a line/cell magic
+ respectively.
+
+ If classes are given, they will be instantiated with the default
+ constructor. If your classes need a custom constructor, you should
+ instanitate them first and pass the instance.
+
+ The provided arguments can be an arbitrary mix of classes and instances.
+
+ Parameters
+ ----------
+ *magic_objects : one or more classes or instances
+ """
+ # Start by validating them to ensure they have all had their magic
+ # methods registered at the instance level
+ for m in magic_objects:
+ if not m.registered:
+ raise ValueError("Class of magics %r was constructed without "
+ "the @register_magics class decorator")
+ if isinstance(m, type):
+ # If we're given an uninstantiated class
+ m = m(shell=self.shell)
+
+ # Now that we have an instance, we can register it and update the
+ # table of callables
+ self.registry[m.__class__.__name__] = m
+ for mtype in magic_kinds:
+ self.magics[mtype].update(m.magics[mtype])
+
+ def register_function(self, func, magic_kind='line', magic_name=None):
+ """Expose a standalone function as magic function for IPython.
+
+ This will create an IPython magic (line, cell or both) from a
+ standalone function. The functions should have the following
+ signatures:
+
+ * For line magics: `def f(line)`
+ * For cell magics: `def f(line, cell)`
+ * For a function that does both: `def f(line, cell=None)`
+
+ In the latter case, the function will be called with `cell==None` when
+ invoked as `%f`, and with cell as a string when invoked as `%%f`.
+
+ Parameters
+ ----------
+ func : callable
+ Function to be registered as a magic.
+ magic_kind : str
+ Kind of magic, one of 'line', 'cell' or 'line_cell'
+ magic_name : optional str
+ If given, the name the magic will have in the IPython namespace. By
+ default, the name of the function itself is used.
+ """
+
+ # Create the new method in the user_magics and register it in the
+ # global table
+ validate_type(magic_kind)
+ magic_name = func.__name__ if magic_name is None else magic_name
+ setattr(self.user_magics, magic_name, func)
+ record_magic(self.magics, magic_kind, magic_name, func)
+
+ def register_alias(self, alias_name, magic_name, magic_kind='line', magic_params=None):
+ """Register an alias to a magic function.
+
+ The alias is an instance of :class:`MagicAlias`, which holds the
+ name and kind of the magic it should call. Binding is done at
+ call time, so if the underlying magic function is changed the alias
+ will call the new function.
+
+ Parameters
+ ----------
+ alias_name : str
+ The name of the magic to be registered.
+ magic_name : str
+ The name of an existing magic.
+ magic_kind : str
+ Kind of magic, one of 'line' or 'cell'
+ """
+
+ # `validate_type` is too permissive, as it allows 'line_cell'
+ # which we do not handle.
+ if magic_kind not in magic_kinds:
+ raise ValueError('magic_kind must be one of %s, %s given' %
+ magic_kinds, magic_kind)
+
+ alias = MagicAlias(self.shell, magic_name, magic_kind, magic_params)
+ setattr(self.user_magics, alias_name, alias)
+ record_magic(self.magics, magic_kind, alias_name, alias)
+
+# Key base class that provides the central functionality for magics.
+
+
+class Magics(Configurable):
+ """Base class for implementing magic functions.
+
+ Shell functions which can be reached as %function_name. All magic
+ functions should accept a string, which they can parse for their own
+ needs. This can make some functions easier to type, eg `%cd ../`
+ vs. `%cd("../")`
+
+ Classes providing magic functions need to subclass this class, and they
+ MUST:
+
+ - Use the method decorators `@line_magic` and `@cell_magic` to decorate
+ individual methods as magic functions, AND
+
+ - Use the class decorator `@magics_class` to ensure that the magic
+ methods are properly registered at the instance level upon instance
+ initialization.
+
+ See :mod:`magic_functions` for examples of actual implementation classes.
+ """
+ # Dict holding all command-line options for each magic.
+ options_table = None
+ # Dict for the mapping of magic names to methods, set by class decorator
+ magics = None
+ # Flag to check that the class decorator was properly applied
+ registered = False
+ # Instance of IPython shell
+ shell = None
+
+ def __init__(self, shell=None, **kwargs):
+ if not(self.__class__.registered):
+ raise ValueError('Magics subclass without registration - '
+ 'did you forget to apply @magics_class?')
+ if shell is not None:
+ if hasattr(shell, 'configurables'):
+ shell.configurables.append(self)
+ if hasattr(shell, 'config'):
+ kwargs.setdefault('parent', shell)
+
+ self.shell = shell
+ self.options_table = {}
+ # The method decorators are run when the instance doesn't exist yet, so
+ # they can only record the names of the methods they are supposed to
+ # grab. Only now, that the instance exists, can we create the proper
+ # mapping to bound methods. So we read the info off the original names
+ # table and replace each method name by the actual bound method.
+ # But we mustn't clobber the *class* mapping, in case of multiple instances.
+ class_magics = self.magics
+ self.magics = {}
+ for mtype in magic_kinds:
+ tab = self.magics[mtype] = {}
+ cls_tab = class_magics[mtype]
+ for magic_name, meth_name in cls_tab.items():
+ if isinstance(meth_name, str):
+ # it's a method name, grab it
+ tab[magic_name] = getattr(self, meth_name)
+ else:
+ # it's the real thing
+ tab[magic_name] = meth_name
+ # Configurable **needs** to be initiated at the end or the config
+ # magics get screwed up.
+ super(Magics, self).__init__(**kwargs)
+
+ def arg_err(self,func):
+ """Print docstring if incorrect arguments were passed"""
+ print('Error in arguments:')
+ print(oinspect.getdoc(func))
+
+ def format_latex(self, strng):
+ """Format a string for latex inclusion."""
+
+ # Characters that need to be escaped for latex:
+ escape_re = re.compile(r'(%|_|\$|#|&)',re.MULTILINE)
+ # Magic command names as headers:
+ cmd_name_re = re.compile(r'^(%s.*?):' % ESC_MAGIC,
+ re.MULTILINE)
+ # Magic commands
+ cmd_re = re.compile(r'(?P<cmd>%s.+?\b)(?!\}\}:)' % ESC_MAGIC,
+ re.MULTILINE)
+ # Paragraph continue
+ par_re = re.compile(r'\\$',re.MULTILINE)
+
+ # The "\n" symbol
+ newline_re = re.compile(r'\\n')
+
+ # Now build the string for output:
+ #strng = cmd_name_re.sub(r'\n\\texttt{\\textsl{\\large \1}}:',strng)
+ strng = cmd_name_re.sub(r'\n\\bigskip\n\\texttt{\\textbf{ \1}}:',
+ strng)
+ strng = cmd_re.sub(r'\\texttt{\g<cmd>}',strng)
+ strng = par_re.sub(r'\\\\',strng)
+ strng = escape_re.sub(r'\\\1',strng)
+ strng = newline_re.sub(r'\\textbackslash{}n',strng)
+ return strng
+
+ def parse_options(self, arg_str, opt_str, *long_opts, **kw):
+ """Parse options passed to an argument string.
+
+ The interface is similar to that of :func:`getopt.getopt`, but it
+ returns a :class:`~IPython.utils.struct.Struct` with the options as keys
+ and the stripped argument string still as a string.
+
+ arg_str is quoted as a true sys.argv vector by using shlex.split.
+ This allows us to easily expand variables, glob files, quote
+ arguments, etc.
+
+ Parameters
+ ----------
+ arg_str : str
+ The arguments to parse.
+ opt_str : str
+ The options specification.
+ mode : str, default 'string'
+ If given as 'list', the argument string is returned as a list (split
+ on whitespace) instead of a string.
+ list_all : bool, default False
+ Put all option values in lists. Normally only options
+ appearing more than once are put in a list.
+ posix : bool, default True
+ Whether to split the input line in POSIX mode or not, as per the
+ conventions outlined in the :mod:`shlex` module from the standard
+ library.
+ """
+
+ # inject default options at the beginning of the input line
+ caller = sys._getframe(1).f_code.co_name
+ arg_str = '%s %s' % (self.options_table.get(caller,''),arg_str)
+
+ mode = kw.get('mode','string')
+ if mode not in ['string','list']:
+ raise ValueError('incorrect mode given: %s' % mode)
+ # Get options
+ list_all = kw.get('list_all',0)
+ posix = kw.get('posix', os.name == 'posix')
+ strict = kw.get('strict', True)
+
+ preserve_non_opts = kw.get("preserve_non_opts", False)
+ remainder_arg_str = arg_str
+
+ # Check if we have more than one argument to warrant extra processing:
+ odict = {} # Dictionary with options
+ args = arg_str.split()
+ if len(args) >= 1:
+ # If the list of inputs only has 0 or 1 thing in it, there's no
+ # need to look for options
+ argv = arg_split(arg_str, posix, strict)
+ # Do regular option processing
+ try:
+ opts,args = getopt(argv, opt_str, long_opts)
+ except GetoptError as e:
+ raise UsageError(
+ '%s ( allowed: "%s" %s)' % (e.msg, opt_str, " ".join(long_opts))
+ ) from e
+ for o, a in opts:
+ if mode == "string" and preserve_non_opts:
+ # remove option-parts from the original args-string and preserve remaining-part.
+ # This relies on the arg_split(...) and getopt(...)'s impl spec, that the parsed options are
+ # returned in the original order.
+ remainder_arg_str = remainder_arg_str.replace(o, "", 1).replace(
+ a, "", 1
+ )
+ if o.startswith("--"):
+ o = o[2:]
+ else:
+ o = o[1:]
+ try:
+ odict[o].append(a)
+ except AttributeError:
+ odict[o] = [odict[o],a]
+ except KeyError:
+ if list_all:
+ odict[o] = [a]
+ else:
+ odict[o] = a
+
+ # Prepare opts,args for return
+ opts = Struct(odict)
+ if mode == 'string':
+ if preserve_non_opts:
+ args = remainder_arg_str.lstrip()
+ else:
+ args = " ".join(args)
+
+ return opts,args
+
+ def default_option(self, fn, optstr):
+ """Make an entry in the options_table for fn, with value optstr"""
+
+ if fn not in self.lsmagic():
+ error("%s is not a magic function" % fn)
+ self.options_table[fn] = optstr
+
+
+class MagicAlias(object):
+ """An alias to another magic function.
+
+ An alias is determined by its magic name and magic kind. Lookup
+ is done at call time, so if the underlying magic changes the alias
+ will call the new function.
+
+ Use the :meth:`MagicsManager.register_alias` method or the
+ `%alias_magic` magic function to create and register a new alias.
+ """
+ def __init__(self, shell, magic_name, magic_kind, magic_params=None):
+ self.shell = shell
+ self.magic_name = magic_name
+ self.magic_params = magic_params
+ self.magic_kind = magic_kind
+
+ self.pretty_target = '%s%s' % (magic_escapes[self.magic_kind], self.magic_name)
+ self.__doc__ = "Alias for `%s`." % self.pretty_target
+
+ self._in_call = False
+
+ def __call__(self, *args, **kwargs):
+ """Call the magic alias."""
+ fn = self.shell.find_magic(self.magic_name, self.magic_kind)
+ if fn is None:
+ raise UsageError("Magic `%s` not found." % self.pretty_target)
+
+ # Protect against infinite recursion.
+ if self._in_call:
+ raise UsageError("Infinite recursion detected; "
+ "magic aliases cannot call themselves.")
+ self._in_call = True
+ try:
+ if self.magic_params:
+ args_list = list(args)
+ args_list[0] = self.magic_params + " " + args[0]
+ args = tuple(args_list)
+ return fn(*args, **kwargs)
+ finally:
+ self._in_call = False
diff --git a/contrib/python/ipython/py3/IPython/core/magic_arguments.py b/contrib/python/ipython/py3/IPython/core/magic_arguments.py
new file mode 100644
index 0000000000..24dd541876
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/magic_arguments.py
@@ -0,0 +1,310 @@
+''' A decorator-based method of constructing IPython magics with `argparse`
+option handling.
+
+New magic functions can be defined like so::
+
+ from IPython.core.magic_arguments import (argument, magic_arguments,
+ parse_argstring)
+
+ @magic_arguments()
+ @argument('-o', '--option', help='An optional argument.')
+ @argument('arg', type=int, help='An integer positional argument.')
+ def magic_cool(self, arg):
+ """ A really cool magic command.
+
+ """
+ args = parse_argstring(magic_cool, arg)
+ ...
+
+The `@magic_arguments` decorator marks the function as having argparse arguments.
+The `@argument` decorator adds an argument using the same syntax as argparse's
+`add_argument()` method. More sophisticated uses may also require the
+`@argument_group` or `@kwds` decorator to customize the formatting and the
+parsing.
+
+Help text for the magic is automatically generated from the docstring and the
+arguments::
+
+ In[1]: %cool?
+ %cool [-o OPTION] arg
+
+ A really cool magic command.
+
+ positional arguments:
+ arg An integer positional argument.
+
+ optional arguments:
+ -o OPTION, --option OPTION
+ An optional argument.
+
+Here is an elaborated example that uses default parameters in `argument` and calls the `args` in the cell magic::
+
+ from IPython.core.magic import register_cell_magic
+ from IPython.core.magic_arguments import (argument, magic_arguments,
+ parse_argstring)
+
+
+ @magic_arguments()
+ @argument(
+ "--option",
+ "-o",
+ help=("Add an option here"),
+ )
+ @argument(
+ "--style",
+ "-s",
+ default="foo",
+ help=("Add some style arguments"),
+ )
+ @register_cell_magic
+ def my_cell_magic(line, cell):
+ args = parse_argstring(my_cell_magic, line)
+ print(f"{args.option=}")
+ print(f"{args.style=}")
+ print(f"{cell=}")
+
+In a jupyter notebook, this cell magic can be executed like this::
+
+ %%my_cell_magic -o Hello
+ print("bar")
+ i = 42
+
+Inheritance diagram:
+
+.. inheritance-diagram:: IPython.core.magic_arguments
+ :parts: 3
+
+'''
+#-----------------------------------------------------------------------------
+# Copyright (C) 2010-2011, IPython Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+import argparse
+import re
+
+# Our own imports
+from IPython.core.error import UsageError
+from IPython.utils.decorators import undoc
+from IPython.utils.process import arg_split
+from IPython.utils.text import dedent
+
+NAME_RE = re.compile(r"[a-zA-Z][a-zA-Z0-9_-]*$")
+
+@undoc
+class MagicHelpFormatter(argparse.RawDescriptionHelpFormatter):
+ """A HelpFormatter with a couple of changes to meet our needs.
+ """
+ # Modified to dedent text.
+ def _fill_text(self, text, width, indent):
+ return argparse.RawDescriptionHelpFormatter._fill_text(self, dedent(text), width, indent)
+
+ # Modified to wrap argument placeholders in <> where necessary.
+ def _format_action_invocation(self, action):
+ if not action.option_strings:
+ metavar, = self._metavar_formatter(action, action.dest)(1)
+ return metavar
+
+ else:
+ parts = []
+
+ # if the Optional doesn't take a value, format is:
+ # -s, --long
+ if action.nargs == 0:
+ parts.extend(action.option_strings)
+
+ # if the Optional takes a value, format is:
+ # -s ARGS, --long ARGS
+ else:
+ default = action.dest.upper()
+ args_string = self._format_args(action, default)
+ # IPYTHON MODIFICATION: If args_string is not a plain name, wrap
+ # it in <> so it's valid RST.
+ if not NAME_RE.match(args_string):
+ args_string = "<%s>" % args_string
+ for option_string in action.option_strings:
+ parts.append('%s %s' % (option_string, args_string))
+
+ return ', '.join(parts)
+
+ # Override the default prefix ('usage') to our % magic escape,
+ # in a code block.
+ def add_usage(self, usage, actions, groups, prefix="::\n\n %"):
+ super(MagicHelpFormatter, self).add_usage(usage, actions, groups, prefix)
+
+class MagicArgumentParser(argparse.ArgumentParser):
+ """ An ArgumentParser tweaked for use by IPython magics.
+ """
+ def __init__(self,
+ prog=None,
+ usage=None,
+ description=None,
+ epilog=None,
+ parents=None,
+ formatter_class=MagicHelpFormatter,
+ prefix_chars='-',
+ argument_default=None,
+ conflict_handler='error',
+ add_help=False):
+ if parents is None:
+ parents = []
+ super(MagicArgumentParser, self).__init__(prog=prog, usage=usage,
+ description=description, epilog=epilog,
+ parents=parents, formatter_class=formatter_class,
+ prefix_chars=prefix_chars, argument_default=argument_default,
+ conflict_handler=conflict_handler, add_help=add_help)
+
+ def error(self, message):
+ """ Raise a catchable error instead of exiting.
+ """
+ raise UsageError(message)
+
+ def parse_argstring(self, argstring):
+ """ Split a string into an argument list and parse that argument list.
+ """
+ argv = arg_split(argstring)
+ return self.parse_args(argv)
+
+
+def construct_parser(magic_func):
+ """ Construct an argument parser using the function decorations.
+ """
+ kwds = getattr(magic_func, 'argcmd_kwds', {})
+ if 'description' not in kwds:
+ kwds['description'] = getattr(magic_func, '__doc__', None)
+ arg_name = real_name(magic_func)
+ parser = MagicArgumentParser(arg_name, **kwds)
+ # Reverse the list of decorators in order to apply them in the
+ # order in which they appear in the source.
+ group = None
+ for deco in magic_func.decorators[::-1]:
+ result = deco.add_to_parser(parser, group)
+ if result is not None:
+ group = result
+
+ # Replace the magic function's docstring with the full help text.
+ magic_func.__doc__ = parser.format_help()
+
+ return parser
+
+
+def parse_argstring(magic_func, argstring):
+ """ Parse the string of arguments for the given magic function.
+ """
+ return magic_func.parser.parse_argstring(argstring)
+
+
+def real_name(magic_func):
+ """ Find the real name of the magic.
+ """
+ magic_name = magic_func.__name__
+ if magic_name.startswith('magic_'):
+ magic_name = magic_name[len('magic_'):]
+ return getattr(magic_func, 'argcmd_name', magic_name)
+
+
+class ArgDecorator(object):
+ """ Base class for decorators to add ArgumentParser information to a method.
+ """
+
+ def __call__(self, func):
+ if not getattr(func, 'has_arguments', False):
+ func.has_arguments = True
+ func.decorators = []
+ func.decorators.append(self)
+ return func
+
+ def add_to_parser(self, parser, group):
+ """ Add this object's information to the parser, if necessary.
+ """
+ pass
+
+
+class magic_arguments(ArgDecorator):
+ """ Mark the magic as having argparse arguments and possibly adjust the
+ name.
+ """
+
+ def __init__(self, name=None):
+ self.name = name
+
+ def __call__(self, func):
+ if not getattr(func, 'has_arguments', False):
+ func.has_arguments = True
+ func.decorators = []
+ if self.name is not None:
+ func.argcmd_name = self.name
+ # This should be the first decorator in the list of decorators, thus the
+ # last to execute. Build the parser.
+ func.parser = construct_parser(func)
+ return func
+
+
+class ArgMethodWrapper(ArgDecorator):
+
+ """
+ Base class to define a wrapper for ArgumentParser method.
+
+ Child class must define either `_method_name` or `add_to_parser`.
+
+ """
+
+ _method_name: str
+
+ def __init__(self, *args, **kwds):
+ self.args = args
+ self.kwds = kwds
+
+ def add_to_parser(self, parser, group):
+ """ Add this object's information to the parser.
+ """
+ if group is not None:
+ parser = group
+ getattr(parser, self._method_name)(*self.args, **self.kwds)
+ return None
+
+
+class argument(ArgMethodWrapper):
+ """ Store arguments and keywords to pass to add_argument().
+
+ Instances also serve to decorate command methods.
+ """
+ _method_name = 'add_argument'
+
+
+class defaults(ArgMethodWrapper):
+ """ Store arguments and keywords to pass to set_defaults().
+
+ Instances also serve to decorate command methods.
+ """
+ _method_name = 'set_defaults'
+
+
+class argument_group(ArgMethodWrapper):
+ """ Store arguments and keywords to pass to add_argument_group().
+
+ Instances also serve to decorate command methods.
+ """
+
+ def add_to_parser(self, parser, group):
+ """ Add this object's information to the parser.
+ """
+ return parser.add_argument_group(*self.args, **self.kwds)
+
+
+class kwds(ArgDecorator):
+ """ Provide other keywords to the sub-parser constructor.
+ """
+ def __init__(self, **kwds):
+ self.kwds = kwds
+
+ def __call__(self, func):
+ func = super(kwds, self).__call__(func)
+ func.argcmd_kwds = self.kwds
+ return func
+
+
+__all__ = ['magic_arguments', 'argument', 'argument_group', 'kwds',
+ 'parse_argstring']
diff --git a/contrib/python/ipython/py3/IPython/core/magics/__init__.py b/contrib/python/ipython/py3/IPython/core/magics/__init__.py
new file mode 100644
index 0000000000..a6c5f474c1
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/magics/__init__.py
@@ -0,0 +1,42 @@
+"""Implementation of all the magic functions built into IPython.
+"""
+#-----------------------------------------------------------------------------
+# Copyright (c) 2012 The IPython Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+from ..magic import Magics, magics_class
+from .auto import AutoMagics
+from .basic import BasicMagics, AsyncMagics
+from .code import CodeMagics, MacroToEdit
+from .config import ConfigMagics
+from .display import DisplayMagics
+from .execution import ExecutionMagics
+from .extension import ExtensionMagics
+from .history import HistoryMagics
+from .logging import LoggingMagics
+from .namespace import NamespaceMagics
+from .osm import OSMagics
+from .packaging import PackagingMagics
+from .pylab import PylabMagics
+from .script import ScriptMagics
+
+#-----------------------------------------------------------------------------
+# Magic implementation classes
+#-----------------------------------------------------------------------------
+
+@magics_class
+class UserMagics(Magics):
+ """Placeholder for user-defined magics to be added at runtime.
+
+ All magics are eventually merged into a single namespace at runtime, but we
+ use this class to isolate the magics defined dynamically by the user into
+ their own class.
+ """
diff --git a/contrib/python/ipython/py3/IPython/core/magics/auto.py b/contrib/python/ipython/py3/IPython/core/magics/auto.py
new file mode 100644
index 0000000000..56aa4f72eb
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/magics/auto.py
@@ -0,0 +1,144 @@
+"""Implementation of magic functions that control various automatic behaviors.
+"""
+#-----------------------------------------------------------------------------
+# Copyright (c) 2012 The IPython Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+# Our own packages
+from IPython.core.magic import Bunch, Magics, magics_class, line_magic
+from IPython.testing.skipdoctest import skip_doctest
+from logging import error
+
+#-----------------------------------------------------------------------------
+# Magic implementation classes
+#-----------------------------------------------------------------------------
+
+@magics_class
+class AutoMagics(Magics):
+ """Magics that control various autoX behaviors."""
+
+ def __init__(self, shell):
+ super(AutoMagics, self).__init__(shell)
+ # namespace for holding state we may need
+ self._magic_state = Bunch()
+
+ @line_magic
+ def automagic(self, parameter_s=''):
+ """Make magic functions callable without having to type the initial %.
+
+ Without arguments toggles on/off (when off, you must call it as
+ %automagic, of course). With arguments it sets the value, and you can
+ use any of (case insensitive):
+
+ - on, 1, True: to activate
+
+ - off, 0, False: to deactivate.
+
+ Note that magic functions have lowest priority, so if there's a
+ variable whose name collides with that of a magic fn, automagic won't
+ work for that function (you get the variable instead). However, if you
+ delete the variable (del var), the previously shadowed magic function
+ becomes visible to automagic again."""
+
+ arg = parameter_s.lower()
+ mman = self.shell.magics_manager
+ if arg in ('on', '1', 'true'):
+ val = True
+ elif arg in ('off', '0', 'false'):
+ val = False
+ else:
+ val = not mman.auto_magic
+ mman.auto_magic = val
+ print('\n' + self.shell.magics_manager.auto_status())
+
+ @skip_doctest
+ @line_magic
+ def autocall(self, parameter_s=''):
+ """Make functions callable without having to type parentheses.
+
+ Usage:
+
+ %autocall [mode]
+
+ The mode can be one of: 0->Off, 1->Smart, 2->Full. If not given, the
+ value is toggled on and off (remembering the previous state).
+
+ In more detail, these values mean:
+
+ 0 -> fully disabled
+
+ 1 -> active, but do not apply if there are no arguments on the line.
+
+ In this mode, you get::
+
+ In [1]: callable
+ Out[1]: <built-in function callable>
+
+ In [2]: callable 'hello'
+ ------> callable('hello')
+ Out[2]: False
+
+ 2 -> Active always. Even if no arguments are present, the callable
+ object is called::
+
+ In [2]: float
+ ------> float()
+ Out[2]: 0.0
+
+ Note that even with autocall off, you can still use '/' at the start of
+ a line to treat the first argument on the command line as a function
+ and add parentheses to it::
+
+ In [8]: /str 43
+ ------> str(43)
+ Out[8]: '43'
+
+ # all-random (note for auto-testing)
+ """
+
+ valid_modes = {
+ 0: "Off",
+ 1: "Smart",
+ 2: "Full",
+ }
+
+ def errorMessage() -> str:
+ error = "Valid modes: "
+ for k, v in valid_modes.items():
+ error += str(k) + "->" + v + ", "
+ error = error[:-2] # remove tailing `, ` after last element
+ return error
+
+ if parameter_s:
+ if not parameter_s in map(str, valid_modes.keys()):
+ error(errorMessage())
+ return
+ arg = int(parameter_s)
+ else:
+ arg = 'toggle'
+
+ if not arg in (*list(valid_modes.keys()), "toggle"):
+ error(errorMessage())
+ return
+
+ if arg in (valid_modes.keys()):
+ self.shell.autocall = arg
+ else: # toggle
+ if self.shell.autocall:
+ self._magic_state.autocall_save = self.shell.autocall
+ self.shell.autocall = 0
+ else:
+ try:
+ self.shell.autocall = self._magic_state.autocall_save
+ except AttributeError:
+ self.shell.autocall = self._magic_state.autocall_save = 1
+
+ print("Automatic calling is:", list(valid_modes.values())[self.shell.autocall])
diff --git a/contrib/python/ipython/py3/IPython/core/magics/basic.py b/contrib/python/ipython/py3/IPython/core/magics/basic.py
new file mode 100644
index 0000000000..814dec72e2
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/magics/basic.py
@@ -0,0 +1,663 @@
+"""Implementation of basic magic functions."""
+
+
+from logging import error
+import io
+import os
+from pprint import pformat
+import sys
+from warnings import warn
+
+from traitlets.utils.importstring import import_item
+from IPython.core import magic_arguments, page
+from IPython.core.error import UsageError
+from IPython.core.magic import Magics, magics_class, line_magic, magic_escapes
+from IPython.utils.text import format_screen, dedent, indent
+from IPython.testing.skipdoctest import skip_doctest
+from IPython.utils.ipstruct import Struct
+
+
+class MagicsDisplay(object):
+ def __init__(self, magics_manager, ignore=None):
+ self.ignore = ignore if ignore else []
+ self.magics_manager = magics_manager
+
+ def _lsmagic(self):
+ """The main implementation of the %lsmagic"""
+ mesc = magic_escapes['line']
+ cesc = magic_escapes['cell']
+ mman = self.magics_manager
+ magics = mman.lsmagic()
+ out = ['Available line magics:',
+ mesc + (' '+mesc).join(sorted([m for m,v in magics['line'].items() if (v not in self.ignore)])),
+ '',
+ 'Available cell magics:',
+ cesc + (' '+cesc).join(sorted([m for m,v in magics['cell'].items() if (v not in self.ignore)])),
+ '',
+ mman.auto_status()]
+ return '\n'.join(out)
+
+ def _repr_pretty_(self, p, cycle):
+ p.text(self._lsmagic())
+
+ def __str__(self):
+ return self._lsmagic()
+
+ def _jsonable(self):
+ """turn magics dict into jsonable dict of the same structure
+
+ replaces object instances with their class names as strings
+ """
+ magic_dict = {}
+ mman = self.magics_manager
+ magics = mman.lsmagic()
+ for key, subdict in magics.items():
+ d = {}
+ magic_dict[key] = d
+ for name, obj in subdict.items():
+ try:
+ classname = obj.__self__.__class__.__name__
+ except AttributeError:
+ classname = 'Other'
+
+ d[name] = classname
+ return magic_dict
+
+ def _repr_json_(self):
+ return self._jsonable()
+
+
+@magics_class
+class BasicMagics(Magics):
+ """Magics that provide central IPython functionality.
+
+ These are various magics that don't fit into specific categories but that
+ are all part of the base 'IPython experience'."""
+
+ @skip_doctest
+ @magic_arguments.magic_arguments()
+ @magic_arguments.argument(
+ '-l', '--line', action='store_true',
+ help="""Create a line magic alias."""
+ )
+ @magic_arguments.argument(
+ '-c', '--cell', action='store_true',
+ help="""Create a cell magic alias."""
+ )
+ @magic_arguments.argument(
+ 'name',
+ help="""Name of the magic to be created."""
+ )
+ @magic_arguments.argument(
+ 'target',
+ help="""Name of the existing line or cell magic."""
+ )
+ @magic_arguments.argument(
+ '-p', '--params', default=None,
+ help="""Parameters passed to the magic function."""
+ )
+ @line_magic
+ def alias_magic(self, line=''):
+ """Create an alias for an existing line or cell magic.
+
+ Examples
+ --------
+ ::
+
+ In [1]: %alias_magic t timeit
+ Created `%t` as an alias for `%timeit`.
+ Created `%%t` as an alias for `%%timeit`.
+
+ In [2]: %t -n1 pass
+ 1 loops, best of 3: 954 ns per loop
+
+ In [3]: %%t -n1
+ ...: pass
+ ...:
+ 1 loops, best of 3: 954 ns per loop
+
+ In [4]: %alias_magic --cell whereami pwd
+ UsageError: Cell magic function `%%pwd` not found.
+ In [5]: %alias_magic --line whereami pwd
+ Created `%whereami` as an alias for `%pwd`.
+
+ In [6]: %whereami
+ Out[6]: u'/home/testuser'
+
+ In [7]: %alias_magic h history "-p -l 30" --line
+ Created `%h` as an alias for `%history -l 30`.
+ """
+
+ args = magic_arguments.parse_argstring(self.alias_magic, line)
+ shell = self.shell
+ mman = self.shell.magics_manager
+ escs = ''.join(magic_escapes.values())
+
+ target = args.target.lstrip(escs)
+ name = args.name.lstrip(escs)
+
+ params = args.params
+ if (params and
+ ((params.startswith('"') and params.endswith('"'))
+ or (params.startswith("'") and params.endswith("'")))):
+ params = params[1:-1]
+
+ # Find the requested magics.
+ m_line = shell.find_magic(target, 'line')
+ m_cell = shell.find_magic(target, 'cell')
+ if args.line and m_line is None:
+ raise UsageError('Line magic function `%s%s` not found.' %
+ (magic_escapes['line'], target))
+ if args.cell and m_cell is None:
+ raise UsageError('Cell magic function `%s%s` not found.' %
+ (magic_escapes['cell'], target))
+
+ # If --line and --cell are not specified, default to the ones
+ # that are available.
+ if not args.line and not args.cell:
+ if not m_line and not m_cell:
+ raise UsageError(
+ 'No line or cell magic with name `%s` found.' % target
+ )
+ args.line = bool(m_line)
+ args.cell = bool(m_cell)
+
+ params_str = "" if params is None else " " + params
+
+ if args.line:
+ mman.register_alias(name, target, 'line', params)
+ print('Created `%s%s` as an alias for `%s%s%s`.' % (
+ magic_escapes['line'], name,
+ magic_escapes['line'], target, params_str))
+
+ if args.cell:
+ mman.register_alias(name, target, 'cell', params)
+ print('Created `%s%s` as an alias for `%s%s%s`.' % (
+ magic_escapes['cell'], name,
+ magic_escapes['cell'], target, params_str))
+
+ @line_magic
+ def lsmagic(self, parameter_s=''):
+ """List currently available magic functions."""
+ return MagicsDisplay(self.shell.magics_manager, ignore=[])
+
+ def _magic_docs(self, brief=False, rest=False):
+ """Return docstrings from magic functions."""
+ mman = self.shell.magics_manager
+ docs = mman.lsmagic_docs(brief, missing='No documentation')
+
+ if rest:
+ format_string = '**%s%s**::\n\n%s\n\n'
+ else:
+ format_string = '%s%s:\n%s\n'
+
+ return ''.join(
+ [format_string % (magic_escapes['line'], fname,
+ indent(dedent(fndoc)))
+ for fname, fndoc in sorted(docs['line'].items())]
+ +
+ [format_string % (magic_escapes['cell'], fname,
+ indent(dedent(fndoc)))
+ for fname, fndoc in sorted(docs['cell'].items())]
+ )
+
+ @line_magic
+ def magic(self, parameter_s=''):
+ """Print information about the magic function system.
+
+ Supported formats: -latex, -brief, -rest
+ """
+
+ mode = ''
+ try:
+ mode = parameter_s.split()[0][1:]
+ except IndexError:
+ pass
+
+ brief = (mode == 'brief')
+ rest = (mode == 'rest')
+ magic_docs = self._magic_docs(brief, rest)
+
+ if mode == 'latex':
+ print(self.format_latex(magic_docs))
+ return
+ else:
+ magic_docs = format_screen(magic_docs)
+
+ out = ["""
+IPython's 'magic' functions
+===========================
+
+The magic function system provides a series of functions which allow you to
+control the behavior of IPython itself, plus a lot of system-type
+features. There are two kinds of magics, line-oriented and cell-oriented.
+
+Line magics are prefixed with the % character and work much like OS
+command-line calls: they get as an argument the rest of the line, where
+arguments are passed without parentheses or quotes. For example, this will
+time the given statement::
+
+ %timeit range(1000)
+
+Cell magics are prefixed with a double %%, and they are functions that get as
+an argument not only the rest of the line, but also the lines below it in a
+separate argument. These magics are called with two arguments: the rest of the
+call line and the body of the cell, consisting of the lines below the first.
+For example::
+
+ %%timeit x = numpy.random.randn((100, 100))
+ numpy.linalg.svd(x)
+
+will time the execution of the numpy svd routine, running the assignment of x
+as part of the setup phase, which is not timed.
+
+In a line-oriented client (the terminal or Qt console IPython), starting a new
+input with %% will automatically enter cell mode, and IPython will continue
+reading input until a blank line is given. In the notebook, simply type the
+whole cell as one entity, but keep in mind that the %% escape can only be at
+the very start of the cell.
+
+NOTE: If you have 'automagic' enabled (via the command line option or with the
+%automagic function), you don't need to type in the % explicitly for line
+magics; cell magics always require an explicit '%%' escape. By default,
+IPython ships with automagic on, so you should only rarely need the % escape.
+
+Example: typing '%cd mydir' (without the quotes) changes your working directory
+to 'mydir', if it exists.
+
+For a list of the available magic functions, use %lsmagic. For a description
+of any of them, type %magic_name?, e.g. '%cd?'.
+
+Currently the magic system has the following functions:""",
+ magic_docs,
+ "Summary of magic functions (from %slsmagic):" % magic_escapes['line'],
+ str(self.lsmagic()),
+ ]
+ page.page('\n'.join(out))
+
+
+ @line_magic
+ def page(self, parameter_s=''):
+ """Pretty print the object and display it through a pager.
+
+ %page [options] OBJECT
+
+ If no object is given, use _ (last output).
+
+ Options:
+
+ -r: page str(object), don't pretty-print it."""
+
+ # After a function contributed by Olivier Aubert, slightly modified.
+
+ # Process options/args
+ opts, args = self.parse_options(parameter_s, 'r')
+ raw = 'r' in opts
+
+ oname = args and args or '_'
+ info = self.shell._ofind(oname)
+ if info.found:
+ if raw:
+ txt = str(info.obj)
+ else:
+ txt = pformat(info.obj)
+ page.page(txt)
+ else:
+ print('Object `%s` not found' % oname)
+
+ @line_magic
+ def pprint(self, parameter_s=''):
+ """Toggle pretty printing on/off."""
+ ptformatter = self.shell.display_formatter.formatters['text/plain']
+ ptformatter.pprint = bool(1 - ptformatter.pprint)
+ print('Pretty printing has been turned',
+ ['OFF','ON'][ptformatter.pprint])
+
+ @line_magic
+ def colors(self, parameter_s=''):
+ """Switch color scheme for prompts, info system and exception handlers.
+
+ Currently implemented schemes: NoColor, Linux, LightBG.
+
+ Color scheme names are not case-sensitive.
+
+ Examples
+ --------
+ To get a plain black and white terminal::
+
+ %colors nocolor
+ """
+ def color_switch_err(name):
+ warn('Error changing %s color schemes.\n%s' %
+ (name, sys.exc_info()[1]), stacklevel=2)
+
+
+ new_scheme = parameter_s.strip()
+ if not new_scheme:
+ raise UsageError(
+ "%colors: you must specify a color scheme. See '%colors?'")
+ # local shortcut
+ shell = self.shell
+
+ # Set shell colour scheme
+ try:
+ shell.colors = new_scheme
+ shell.refresh_style()
+ except:
+ color_switch_err('shell')
+
+ # Set exception colors
+ try:
+ shell.InteractiveTB.set_colors(scheme = new_scheme)
+ shell.SyntaxTB.set_colors(scheme = new_scheme)
+ except:
+ color_switch_err('exception')
+
+ # Set info (for 'object?') colors
+ if shell.color_info:
+ try:
+ shell.inspector.set_active_scheme(new_scheme)
+ except:
+ color_switch_err('object inspector')
+ else:
+ shell.inspector.set_active_scheme('NoColor')
+
+ @line_magic
+ def xmode(self, parameter_s=''):
+ """Switch modes for the exception handlers.
+
+ Valid modes: Plain, Context, Verbose, and Minimal.
+
+ If called without arguments, acts as a toggle.
+
+ When in verbose mode the value `--show` (and `--hide`)
+ will respectively show (or hide) frames with ``__tracebackhide__ =
+ True`` value set.
+ """
+
+ def xmode_switch_err(name):
+ warn('Error changing %s exception modes.\n%s' %
+ (name,sys.exc_info()[1]))
+
+ shell = self.shell
+ if parameter_s.strip() == "--show":
+ shell.InteractiveTB.skip_hidden = False
+ return
+ if parameter_s.strip() == "--hide":
+ shell.InteractiveTB.skip_hidden = True
+ return
+
+ new_mode = parameter_s.strip().capitalize()
+ try:
+ shell.InteractiveTB.set_mode(mode=new_mode)
+ print('Exception reporting mode:',shell.InteractiveTB.mode)
+ except:
+ xmode_switch_err('user')
+
+ @line_magic
+ def quickref(self, arg):
+ """ Show a quick reference sheet """
+ from IPython.core.usage import quick_reference
+ qr = quick_reference + self._magic_docs(brief=True)
+ page.page(qr)
+
+ @line_magic
+ def doctest_mode(self, parameter_s=''):
+ """Toggle doctest mode on and off.
+
+ This mode is intended to make IPython behave as much as possible like a
+ plain Python shell, from the perspective of how its prompts, exceptions
+ and output look. This makes it easy to copy and paste parts of a
+ session into doctests. It does so by:
+
+ - Changing the prompts to the classic ``>>>`` ones.
+ - Changing the exception reporting mode to 'Plain'.
+ - Disabling pretty-printing of output.
+
+ Note that IPython also supports the pasting of code snippets that have
+ leading '>>>' and '...' prompts in them. This means that you can paste
+ doctests from files or docstrings (even if they have leading
+ whitespace), and the code will execute correctly. You can then use
+ '%history -t' to see the translated history; this will give you the
+ input after removal of all the leading prompts and whitespace, which
+ can be pasted back into an editor.
+
+ With these features, you can switch into this mode easily whenever you
+ need to do testing and changes to doctests, without having to leave
+ your existing IPython session.
+ """
+
+ # Shorthands
+ shell = self.shell
+ meta = shell.meta
+ disp_formatter = self.shell.display_formatter
+ ptformatter = disp_formatter.formatters['text/plain']
+ # dstore is a data store kept in the instance metadata bag to track any
+ # changes we make, so we can undo them later.
+ dstore = meta.setdefault('doctest_mode',Struct())
+ save_dstore = dstore.setdefault
+
+ # save a few values we'll need to recover later
+ mode = save_dstore('mode',False)
+ save_dstore('rc_pprint',ptformatter.pprint)
+ save_dstore('xmode',shell.InteractiveTB.mode)
+ save_dstore('rc_separate_out',shell.separate_out)
+ save_dstore('rc_separate_out2',shell.separate_out2)
+ save_dstore('rc_separate_in',shell.separate_in)
+ save_dstore('rc_active_types',disp_formatter.active_types)
+
+ if not mode:
+ # turn on
+
+ # Prompt separators like plain python
+ shell.separate_in = ''
+ shell.separate_out = ''
+ shell.separate_out2 = ''
+
+
+ ptformatter.pprint = False
+ disp_formatter.active_types = ['text/plain']
+
+ shell.magic('xmode Plain')
+ else:
+ # turn off
+ shell.separate_in = dstore.rc_separate_in
+
+ shell.separate_out = dstore.rc_separate_out
+ shell.separate_out2 = dstore.rc_separate_out2
+
+ ptformatter.pprint = dstore.rc_pprint
+ disp_formatter.active_types = dstore.rc_active_types
+
+ shell.magic('xmode ' + dstore.xmode)
+
+ # mode here is the state before we switch; switch_doctest_mode takes
+ # the mode we're switching to.
+ shell.switch_doctest_mode(not mode)
+
+ # Store new mode and inform
+ dstore.mode = bool(not mode)
+ mode_label = ['OFF','ON'][dstore.mode]
+ print('Doctest mode is:', mode_label)
+
+ @line_magic
+ def gui(self, parameter_s=''):
+ """Enable or disable IPython GUI event loop integration.
+
+ %gui [GUINAME]
+
+ This magic replaces IPython's threaded shells that were activated
+ using the (pylab/wthread/etc.) command line flags. GUI toolkits
+ can now be enabled at runtime and keyboard
+ interrupts should work without any problems. The following toolkits
+ are supported: wxPython, PyQt4, PyGTK, Tk and Cocoa (OSX)::
+
+ %gui wx # enable wxPython event loop integration
+ %gui qt # enable PyQt/PySide event loop integration
+ # with the latest version available.
+ %gui qt6 # enable PyQt6/PySide6 event loop integration
+ %gui qt5 # enable PyQt5/PySide2 event loop integration
+ %gui gtk # enable PyGTK event loop integration
+ %gui gtk3 # enable Gtk3 event loop integration
+ %gui gtk4 # enable Gtk4 event loop integration
+ %gui tk # enable Tk event loop integration
+ %gui osx # enable Cocoa event loop integration
+ # (requires %matplotlib 1.1)
+ %gui # disable all event loop integration
+
+ WARNING: after any of these has been called you can simply create
+ an application object, but DO NOT start the event loop yourself, as
+ we have already handled that.
+ """
+ opts, arg = self.parse_options(parameter_s, '')
+ if arg=='': arg = None
+ try:
+ return self.shell.enable_gui(arg)
+ except Exception as e:
+ # print simple error message, rather than traceback if we can't
+ # hook up the GUI
+ error(str(e))
+
+ @skip_doctest
+ @line_magic
+ def precision(self, s=''):
+ """Set floating point precision for pretty printing.
+
+ Can set either integer precision or a format string.
+
+ If numpy has been imported and precision is an int,
+ numpy display precision will also be set, via ``numpy.set_printoptions``.
+
+ If no argument is given, defaults will be restored.
+
+ Examples
+ --------
+ ::
+
+ In [1]: from math import pi
+
+ In [2]: %precision 3
+ Out[2]: u'%.3f'
+
+ In [3]: pi
+ Out[3]: 3.142
+
+ In [4]: %precision %i
+ Out[4]: u'%i'
+
+ In [5]: pi
+ Out[5]: 3
+
+ In [6]: %precision %e
+ Out[6]: u'%e'
+
+ In [7]: pi**10
+ Out[7]: 9.364805e+04
+
+ In [8]: %precision
+ Out[8]: u'%r'
+
+ In [9]: pi**10
+ Out[9]: 93648.047476082982
+ """
+ ptformatter = self.shell.display_formatter.formatters['text/plain']
+ ptformatter.float_precision = s
+ return ptformatter.float_format
+
+ @magic_arguments.magic_arguments()
+ @magic_arguments.argument(
+ 'filename', type=str,
+ help='Notebook name or filename'
+ )
+ @line_magic
+ def notebook(self, s):
+ """Export and convert IPython notebooks.
+
+ This function can export the current IPython history to a notebook file.
+ For example, to export the history to "foo.ipynb" do "%notebook foo.ipynb".
+ """
+ args = magic_arguments.parse_argstring(self.notebook, s)
+ outfname = os.path.expanduser(args.filename)
+
+ from nbformat import write, v4
+
+ cells = []
+ hist = list(self.shell.history_manager.get_range())
+ if(len(hist)<=1):
+ raise ValueError('History is empty, cannot export')
+ for session, execution_count, source in hist[:-1]:
+ cells.append(v4.new_code_cell(
+ execution_count=execution_count,
+ source=source
+ ))
+ nb = v4.new_notebook(cells=cells)
+ with io.open(outfname, "w", encoding="utf-8") as f:
+ write(nb, f, version=4)
+
+@magics_class
+class AsyncMagics(BasicMagics):
+
+ @line_magic
+ def autoawait(self, parameter_s):
+ """
+ Allow to change the status of the autoawait option.
+
+ This allow you to set a specific asynchronous code runner.
+
+ If no value is passed, print the currently used asynchronous integration
+ and whether it is activated.
+
+ It can take a number of value evaluated in the following order:
+
+ - False/false/off deactivate autoawait integration
+ - True/true/on activate autoawait integration using configured default
+ loop
+ - asyncio/curio/trio activate autoawait integration and use integration
+ with said library.
+
+ - `sync` turn on the pseudo-sync integration (mostly used for
+ `IPython.embed()` which does not run IPython with a real eventloop and
+ deactivate running asynchronous code. Turning on Asynchronous code with
+ the pseudo sync loop is undefined behavior and may lead IPython to crash.
+
+ If the passed parameter does not match any of the above and is a python
+ identifier, get said object from user namespace and set it as the
+ runner, and activate autoawait.
+
+ If the object is a fully qualified object name, attempt to import it and
+ set it as the runner, and activate autoawait.
+
+ The exact behavior of autoawait is experimental and subject to change
+ across version of IPython and Python.
+ """
+
+ param = parameter_s.strip()
+ d = {True: "on", False: "off"}
+
+ if not param:
+ print("IPython autoawait is `{}`, and set to use `{}`".format(
+ d[self.shell.autoawait],
+ self.shell.loop_runner
+ ))
+ return None
+
+ if param.lower() in ('false', 'off'):
+ self.shell.autoawait = False
+ return None
+ if param.lower() in ('true', 'on'):
+ self.shell.autoawait = True
+ return None
+
+ if param in self.shell.loop_runner_map:
+ self.shell.loop_runner, self.shell.autoawait = self.shell.loop_runner_map[param]
+ return None
+
+ if param in self.shell.user_ns :
+ self.shell.loop_runner = self.shell.user_ns[param]
+ self.shell.autoawait = True
+ return None
+
+ runner = import_item(param)
+
+ self.shell.loop_runner = runner
+ self.shell.autoawait = True
diff --git a/contrib/python/ipython/py3/IPython/core/magics/code.py b/contrib/python/ipython/py3/IPython/core/magics/code.py
new file mode 100644
index 0000000000..65ba52b8bb
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/magics/code.py
@@ -0,0 +1,755 @@
+"""Implementation of code management magic functions.
+"""
+#-----------------------------------------------------------------------------
+# Copyright (c) 2012 The IPython Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+# Stdlib
+import inspect
+import io
+import os
+import re
+import sys
+import ast
+from itertools import chain
+from urllib.request import Request, urlopen
+from urllib.parse import urlencode
+from pathlib import Path
+
+# Our own packages
+from IPython.core.error import TryNext, StdinNotImplementedError, UsageError
+from IPython.core.macro import Macro
+from IPython.core.magic import Magics, magics_class, line_magic
+from IPython.core.oinspect import find_file, find_source_lines
+from IPython.core.release import version
+from IPython.testing.skipdoctest import skip_doctest
+from IPython.utils.contexts import preserve_keys
+from IPython.utils.path import get_py_filename
+from warnings import warn
+from logging import error
+from IPython.utils.text import get_text_list
+
+#-----------------------------------------------------------------------------
+# Magic implementation classes
+#-----------------------------------------------------------------------------
+
+# Used for exception handling in magic_edit
+class MacroToEdit(ValueError): pass
+
+ipython_input_pat = re.compile(r"<ipython\-input\-(\d+)-[a-z\d]+>$")
+
+# To match, e.g. 8-10 1:5 :10 3-
+range_re = re.compile(r"""
+(?P<start>\d+)?
+((?P<sep>[\-:])
+ (?P<end>\d+)?)?
+$""", re.VERBOSE)
+
+
+def extract_code_ranges(ranges_str):
+ """Turn a string of range for %%load into 2-tuples of (start, stop)
+ ready to use as a slice of the content split by lines.
+
+ Examples
+ --------
+ list(extract_input_ranges("5-10 2"))
+ [(4, 10), (1, 2)]
+ """
+ for range_str in ranges_str.split():
+ rmatch = range_re.match(range_str)
+ if not rmatch:
+ continue
+ sep = rmatch.group("sep")
+ start = rmatch.group("start")
+ end = rmatch.group("end")
+
+ if sep == '-':
+ start = int(start) - 1 if start else None
+ end = int(end) if end else None
+ elif sep == ':':
+ start = int(start) - 1 if start else None
+ end = int(end) - 1 if end else None
+ else:
+ end = int(start)
+ start = int(start) - 1
+ yield (start, end)
+
+
+def extract_symbols(code, symbols):
+ """
+ Return a tuple (blocks, not_found)
+ where ``blocks`` is a list of code fragments
+ for each symbol parsed from code, and ``not_found`` are
+ symbols not found in the code.
+
+ For example::
+
+ In [1]: code = '''a = 10
+ ...: def b(): return 42
+ ...: class A: pass'''
+
+ In [2]: extract_symbols(code, 'A,b,z')
+ Out[2]: (['class A: pass\\n', 'def b(): return 42\\n'], ['z'])
+ """
+ symbols = symbols.split(',')
+
+ # this will raise SyntaxError if code isn't valid Python
+ py_code = ast.parse(code)
+
+ marks = [(getattr(s, 'name', None), s.lineno) for s in py_code.body]
+ code = code.split('\n')
+
+ symbols_lines = {}
+
+ # we already know the start_lineno of each symbol (marks).
+ # To find each end_lineno, we traverse in reverse order until each
+ # non-blank line
+ end = len(code)
+ for name, start in reversed(marks):
+ while not code[end - 1].strip():
+ end -= 1
+ if name:
+ symbols_lines[name] = (start - 1, end)
+ end = start - 1
+
+ # Now symbols_lines is a map
+ # {'symbol_name': (start_lineno, end_lineno), ...}
+
+ # fill a list with chunks of codes for each requested symbol
+ blocks = []
+ not_found = []
+ for symbol in symbols:
+ if symbol in symbols_lines:
+ start, end = symbols_lines[symbol]
+ blocks.append('\n'.join(code[start:end]) + '\n')
+ else:
+ not_found.append(symbol)
+
+ return blocks, not_found
+
+def strip_initial_indent(lines):
+ """For %load, strip indent from lines until finding an unindented line.
+
+ https://github.com/ipython/ipython/issues/9775
+ """
+ indent_re = re.compile(r'\s+')
+
+ it = iter(lines)
+ first_line = next(it)
+ indent_match = indent_re.match(first_line)
+
+ if indent_match:
+ # First line was indented
+ indent = indent_match.group()
+ yield first_line[len(indent):]
+
+ for line in it:
+ if line.startswith(indent):
+ yield line[len(indent):]
+ else:
+ # Less indented than the first line - stop dedenting
+ yield line
+ break
+ else:
+ yield first_line
+
+ # Pass the remaining lines through without dedenting
+ for line in it:
+ yield line
+
+
+class InteractivelyDefined(Exception):
+ """Exception for interactively defined variable in magic_edit"""
+ def __init__(self, index):
+ self.index = index
+
+
+@magics_class
+class CodeMagics(Magics):
+ """Magics related to code management (loading, saving, editing, ...)."""
+
+ def __init__(self, *args, **kwargs):
+ self._knowntemps = set()
+ super(CodeMagics, self).__init__(*args, **kwargs)
+
+ @line_magic
+ def save(self, parameter_s=''):
+ """Save a set of lines or a macro to a given filename.
+
+ Usage:\\
+ %save [options] filename [history]
+
+ Options:
+
+ -r: use 'raw' input. By default, the 'processed' history is used,
+ so that magics are loaded in their transformed version to valid
+ Python. If this option is given, the raw input as typed as the
+ command line is used instead.
+
+ -f: force overwrite. If file exists, %save will prompt for overwrite
+ unless -f is given.
+
+ -a: append to the file instead of overwriting it.
+
+ The history argument uses the same syntax as %history for input ranges,
+ then saves the lines to the filename you specify.
+
+ If no ranges are specified, saves history of the current session up to
+ this point.
+
+ It adds a '.py' extension to the file if you don't do so yourself, and
+ it asks for confirmation before overwriting existing files.
+
+ If `-r` option is used, the default extension is `.ipy`.
+ """
+
+ opts,args = self.parse_options(parameter_s,'fra',mode='list')
+ if not args:
+ raise UsageError('Missing filename.')
+ raw = 'r' in opts
+ force = 'f' in opts
+ append = 'a' in opts
+ mode = 'a' if append else 'w'
+ ext = '.ipy' if raw else '.py'
+ fname, codefrom = args[0], " ".join(args[1:])
+ if not fname.endswith(('.py','.ipy')):
+ fname += ext
+ fname = os.path.expanduser(fname)
+ file_exists = os.path.isfile(fname)
+ if file_exists and not force and not append:
+ try:
+ overwrite = self.shell.ask_yes_no('File `%s` exists. Overwrite (y/[N])? ' % fname, default='n')
+ except StdinNotImplementedError:
+ print("File `%s` exists. Use `%%save -f %s` to force overwrite" % (fname, parameter_s))
+ return
+ if not overwrite :
+ print('Operation cancelled.')
+ return
+ try:
+ cmds = self.shell.find_user_code(codefrom,raw)
+ except (TypeError, ValueError) as e:
+ print(e.args[0])
+ return
+ with io.open(fname, mode, encoding="utf-8") as f:
+ if not file_exists or not append:
+ f.write("# coding: utf-8\n")
+ f.write(cmds)
+ # make sure we end on a newline
+ if not cmds.endswith('\n'):
+ f.write('\n')
+ print('The following commands were written to file `%s`:' % fname)
+ print(cmds)
+
+ @line_magic
+ def pastebin(self, parameter_s=''):
+ """Upload code to dpaste.com, returning the URL.
+
+ Usage:\\
+ %pastebin [-d "Custom description"][-e 24] 1-7
+
+ The argument can be an input history range, a filename, or the name of a
+ string or macro.
+
+ If no arguments are given, uploads the history of this session up to
+ this point.
+
+ Options:
+
+ -d: Pass a custom description. The default will say
+ "Pasted from IPython".
+ -e: Pass number of days for the link to be expired.
+ The default will be 7 days.
+ """
+ opts, args = self.parse_options(parameter_s, "d:e:")
+
+ try:
+ code = self.shell.find_user_code(args)
+ except (ValueError, TypeError) as e:
+ print(e.args[0])
+ return
+
+ expiry_days = 7
+ try:
+ expiry_days = int(opts.get("e", 7))
+ except ValueError as e:
+ print(e.args[0].capitalize())
+ return
+ if expiry_days < 1 or expiry_days > 365:
+ print("Expiry days should be in range of 1 to 365")
+ return
+
+ post_data = urlencode(
+ {
+ "title": opts.get("d", "Pasted from IPython"),
+ "syntax": "python",
+ "content": code,
+ "expiry_days": expiry_days,
+ }
+ ).encode("utf-8")
+
+ request = Request(
+ "https://dpaste.com/api/v2/",
+ headers={"User-Agent": "IPython v{}".format(version)},
+ )
+ response = urlopen(request, post_data)
+ return response.headers.get('Location')
+
+ @line_magic
+ def loadpy(self, arg_s):
+ """Alias of `%load`
+
+ `%loadpy` has gained some flexibility and dropped the requirement of a `.py`
+ extension. So it has been renamed simply into %load. You can look at
+ `%load`'s docstring for more info.
+ """
+ self.load(arg_s)
+
+ @line_magic
+ def load(self, arg_s):
+ """Load code into the current frontend.
+
+ Usage:\\
+ %load [options] source
+
+ where source can be a filename, URL, input history range, macro, or
+ element in the user namespace
+
+ If no arguments are given, loads the history of this session up to this
+ point.
+
+ Options:
+
+ -r <lines>: Specify lines or ranges of lines to load from the source.
+ Ranges could be specified as x-y (x..y) or in python-style x:y
+ (x..(y-1)). Both limits x and y can be left blank (meaning the
+ beginning and end of the file, respectively).
+
+ -s <symbols>: Specify function or classes to load from python source.
+
+ -y : Don't ask confirmation for loading source above 200 000 characters.
+
+ -n : Include the user's namespace when searching for source code.
+
+ This magic command can either take a local filename, a URL, an history
+ range (see %history) or a macro as argument, it will prompt for
+ confirmation before loading source with more than 200 000 characters, unless
+ -y flag is passed or if the frontend does not support raw_input::
+
+ %load
+ %load myscript.py
+ %load 7-27
+ %load myMacro
+ %load http://www.example.com/myscript.py
+ %load -r 5-10 myscript.py
+ %load -r 10-20,30,40: foo.py
+ %load -s MyClass,wonder_function myscript.py
+ %load -n MyClass
+ %load -n my_module.wonder_function
+ """
+ opts,args = self.parse_options(arg_s,'yns:r:')
+ search_ns = 'n' in opts
+ contents = self.shell.find_user_code(args, search_ns=search_ns)
+
+ if 's' in opts:
+ try:
+ blocks, not_found = extract_symbols(contents, opts['s'])
+ except SyntaxError:
+ # non python code
+ error("Unable to parse the input as valid Python code")
+ return
+
+ if len(not_found) == 1:
+ warn('The symbol `%s` was not found' % not_found[0])
+ elif len(not_found) > 1:
+ warn('The symbols %s were not found' % get_text_list(not_found,
+ wrap_item_with='`')
+ )
+
+ contents = '\n'.join(blocks)
+
+ if 'r' in opts:
+ ranges = opts['r'].replace(',', ' ')
+ lines = contents.split('\n')
+ slices = extract_code_ranges(ranges)
+ contents = [lines[slice(*slc)] for slc in slices]
+ contents = '\n'.join(strip_initial_indent(chain.from_iterable(contents)))
+
+ l = len(contents)
+
+ # 200 000 is ~ 2500 full 80 character lines
+ # so in average, more than 5000 lines
+ if l > 200000 and 'y' not in opts:
+ try:
+ ans = self.shell.ask_yes_no(("The text you're trying to load seems pretty big"\
+ " (%d characters). Continue (y/[N]) ?" % l), default='n' )
+ except StdinNotImplementedError:
+ #assume yes if raw input not implemented
+ ans = True
+
+ if ans is False :
+ print('Operation cancelled.')
+ return
+
+ contents = "# %load {}\n".format(arg_s) + contents
+
+ self.shell.set_next_input(contents, replace=True)
+
+ @staticmethod
+ def _find_edit_target(shell, args, opts, last_call):
+ """Utility method used by magic_edit to find what to edit."""
+
+ def make_filename(arg):
+ "Make a filename from the given args"
+ try:
+ filename = get_py_filename(arg)
+ except IOError:
+ # If it ends with .py but doesn't already exist, assume we want
+ # a new file.
+ if arg.endswith('.py'):
+ filename = arg
+ else:
+ filename = None
+ return filename
+
+ # Set a few locals from the options for convenience:
+ opts_prev = 'p' in opts
+ opts_raw = 'r' in opts
+
+ # custom exceptions
+ class DataIsObject(Exception): pass
+
+ # Default line number value
+ lineno = opts.get('n',None)
+
+ if opts_prev:
+ args = '_%s' % last_call[0]
+ if args not in shell.user_ns:
+ args = last_call[1]
+
+ # by default this is done with temp files, except when the given
+ # arg is a filename
+ use_temp = True
+
+ data = ''
+
+ # First, see if the arguments should be a filename.
+ filename = make_filename(args)
+ if filename:
+ use_temp = False
+ elif args:
+ # Mode where user specifies ranges of lines, like in %macro.
+ data = shell.extract_input_lines(args, opts_raw)
+ if not data:
+ try:
+ # Load the parameter given as a variable. If not a string,
+ # process it as an object instead (below)
+
+ #print '*** args',args,'type',type(args) # dbg
+ data = eval(args, shell.user_ns)
+ if not isinstance(data, str):
+ raise DataIsObject
+
+ except (NameError,SyntaxError):
+ # given argument is not a variable, try as a filename
+ filename = make_filename(args)
+ if filename is None:
+ warn("Argument given (%s) can't be found as a variable "
+ "or as a filename." % args)
+ return (None, None, None)
+ use_temp = False
+
+ except DataIsObject as e:
+ # macros have a special edit function
+ if isinstance(data, Macro):
+ raise MacroToEdit(data) from e
+
+ # For objects, try to edit the file where they are defined
+ filename = find_file(data)
+ if filename:
+ if 'fakemodule' in filename.lower() and \
+ inspect.isclass(data):
+ # class created by %edit? Try to find source
+ # by looking for method definitions instead, the
+ # __module__ in those classes is FakeModule.
+ attrs = [getattr(data, aname) for aname in dir(data)]
+ for attr in attrs:
+ if not inspect.ismethod(attr):
+ continue
+ filename = find_file(attr)
+ if filename and \
+ 'fakemodule' not in filename.lower():
+ # change the attribute to be the edit
+ # target instead
+ data = attr
+ break
+
+ m = ipython_input_pat.match(os.path.basename(filename))
+ if m:
+ raise InteractivelyDefined(int(m.groups()[0])) from e
+
+ datafile = 1
+ if filename is None:
+ filename = make_filename(args)
+ datafile = 1
+ if filename is not None:
+ # only warn about this if we get a real name
+ warn('Could not find file where `%s` is defined.\n'
+ 'Opening a file named `%s`' % (args, filename))
+ # Now, make sure we can actually read the source (if it was
+ # in a temp file it's gone by now).
+ if datafile:
+ if lineno is None:
+ lineno = find_source_lines(data)
+ if lineno is None:
+ filename = make_filename(args)
+ if filename is None:
+ warn('The file where `%s` was defined '
+ 'cannot be read or found.' % data)
+ return (None, None, None)
+ use_temp = False
+
+ if use_temp:
+ filename = shell.mktempfile(data)
+ print('IPython will make a temporary file named:',filename)
+
+ # use last_call to remember the state of the previous call, but don't
+ # let it be clobbered by successive '-p' calls.
+ try:
+ last_call[0] = shell.displayhook.prompt_count
+ if not opts_prev:
+ last_call[1] = args
+ except:
+ pass
+
+
+ return filename, lineno, use_temp
+
+ def _edit_macro(self,mname,macro):
+ """open an editor with the macro data in a file"""
+ filename = self.shell.mktempfile(macro.value)
+ self.shell.hooks.editor(filename)
+
+ # and make a new macro object, to replace the old one
+ mvalue = Path(filename).read_text(encoding="utf-8")
+ self.shell.user_ns[mname] = Macro(mvalue)
+
+ @skip_doctest
+ @line_magic
+ def edit(self, parameter_s='',last_call=['','']):
+ """Bring up an editor and execute the resulting code.
+
+ Usage:
+ %edit [options] [args]
+
+ %edit runs IPython's editor hook. The default version of this hook is
+ set to call the editor specified by your $EDITOR environment variable.
+ If this isn't found, it will default to vi under Linux/Unix and to
+ notepad under Windows. See the end of this docstring for how to change
+ the editor hook.
+
+ You can also set the value of this editor via the
+ ``TerminalInteractiveShell.editor`` option in your configuration file.
+ This is useful if you wish to use a different editor from your typical
+ default with IPython (and for Windows users who typically don't set
+ environment variables).
+
+ This command allows you to conveniently edit multi-line code right in
+ your IPython session.
+
+ If called without arguments, %edit opens up an empty editor with a
+ temporary file and will execute the contents of this file when you
+ close it (don't forget to save it!).
+
+
+ Options:
+
+ -n <number>: open the editor at a specified line number. By default,
+ the IPython editor hook uses the unix syntax 'editor +N filename', but
+ you can configure this by providing your own modified hook if your
+ favorite editor supports line-number specifications with a different
+ syntax.
+
+ -p: this will call the editor with the same data as the previous time
+ it was used, regardless of how long ago (in your current session) it
+ was.
+
+ -r: use 'raw' input. This option only applies to input taken from the
+ user's history. By default, the 'processed' history is used, so that
+ magics are loaded in their transformed version to valid Python. If
+ this option is given, the raw input as typed as the command line is
+ used instead. When you exit the editor, it will be executed by
+ IPython's own processor.
+
+ -x: do not execute the edited code immediately upon exit. This is
+ mainly useful if you are editing programs which need to be called with
+ command line arguments, which you can then do using %run.
+
+
+ Arguments:
+
+ If arguments are given, the following possibilities exist:
+
+ - If the argument is a filename, IPython will load that into the
+ editor. It will execute its contents with execfile() when you exit,
+ loading any code in the file into your interactive namespace.
+
+ - The arguments are ranges of input history, e.g. "7 ~1/4-6".
+ The syntax is the same as in the %history magic.
+
+ - If the argument is a string variable, its contents are loaded
+ into the editor. You can thus edit any string which contains
+ python code (including the result of previous edits).
+
+ - If the argument is the name of an object (other than a string),
+ IPython will try to locate the file where it was defined and open the
+ editor at the point where it is defined. You can use `%edit function`
+ to load an editor exactly at the point where 'function' is defined,
+ edit it and have the file be executed automatically.
+
+ - If the object is a macro (see %macro for details), this opens up your
+ specified editor with a temporary file containing the macro's data.
+ Upon exit, the macro is reloaded with the contents of the file.
+
+ Note: opening at an exact line is only supported under Unix, and some
+ editors (like kedit and gedit up to Gnome 2.8) do not understand the
+ '+NUMBER' parameter necessary for this feature. Good editors like
+ (X)Emacs, vi, jed, pico and joe all do.
+
+ After executing your code, %edit will return as output the code you
+ typed in the editor (except when it was an existing file). This way
+ you can reload the code in further invocations of %edit as a variable,
+ via _<NUMBER> or Out[<NUMBER>], where <NUMBER> is the prompt number of
+ the output.
+
+ Note that %edit is also available through the alias %ed.
+
+ This is an example of creating a simple function inside the editor and
+ then modifying it. First, start up the editor::
+
+ In [1]: edit
+ Editing... done. Executing edited code...
+ Out[1]: 'def foo():\\n print "foo() was defined in an editing
+ session"\\n'
+
+ We can then call the function foo()::
+
+ In [2]: foo()
+ foo() was defined in an editing session
+
+ Now we edit foo. IPython automatically loads the editor with the
+ (temporary) file where foo() was previously defined::
+
+ In [3]: edit foo
+ Editing... done. Executing edited code...
+
+ And if we call foo() again we get the modified version::
+
+ In [4]: foo()
+ foo() has now been changed!
+
+ Here is an example of how to edit a code snippet successive
+ times. First we call the editor::
+
+ In [5]: edit
+ Editing... done. Executing edited code...
+ hello
+ Out[5]: "print 'hello'\\n"
+
+ Now we call it again with the previous output (stored in _)::
+
+ In [6]: edit _
+ Editing... done. Executing edited code...
+ hello world
+ Out[6]: "print 'hello world'\\n"
+
+ Now we call it with the output #8 (stored in _8, also as Out[8])::
+
+ In [7]: edit _8
+ Editing... done. Executing edited code...
+ hello again
+ Out[7]: "print 'hello again'\\n"
+
+
+ Changing the default editor hook:
+
+ If you wish to write your own editor hook, you can put it in a
+ configuration file which you load at startup time. The default hook
+ is defined in the IPython.core.hooks module, and you can use that as a
+ starting example for further modifications. That file also has
+ general instructions on how to set a new hook for use once you've
+ defined it."""
+ opts,args = self.parse_options(parameter_s,'prxn:')
+
+ try:
+ filename, lineno, is_temp = self._find_edit_target(self.shell,
+ args, opts, last_call)
+ except MacroToEdit as e:
+ self._edit_macro(args, e.args[0])
+ return
+ except InteractivelyDefined as e:
+ print("Editing In[%i]" % e.index)
+ args = str(e.index)
+ filename, lineno, is_temp = self._find_edit_target(self.shell,
+ args, opts, last_call)
+ if filename is None:
+ # nothing was found, warnings have already been issued,
+ # just give up.
+ return
+
+ if is_temp:
+ self._knowntemps.add(filename)
+ elif (filename in self._knowntemps):
+ is_temp = True
+
+
+ # do actual editing here
+ print('Editing...', end=' ')
+ sys.stdout.flush()
+ filepath = Path(filename)
+ try:
+ # Quote filenames that may have spaces in them when opening
+ # the editor
+ quoted = filename = str(filepath.absolute())
+ if " " in quoted:
+ quoted = "'%s'" % quoted
+ self.shell.hooks.editor(quoted, lineno)
+ except TryNext:
+ warn('Could not open editor')
+ return
+
+ # XXX TODO: should this be generalized for all string vars?
+ # For now, this is special-cased to blocks created by cpaste
+ if args.strip() == "pasted_block":
+ self.shell.user_ns["pasted_block"] = filepath.read_text(encoding="utf-8")
+
+ if 'x' in opts: # -x prevents actual execution
+ print()
+ else:
+ print('done. Executing edited code...')
+ with preserve_keys(self.shell.user_ns, '__file__'):
+ if not is_temp:
+ self.shell.user_ns["__file__"] = filename
+ if "r" in opts: # Untranslated IPython code
+ source = filepath.read_text(encoding="utf-8")
+ self.shell.run_cell(source, store_history=False)
+ else:
+ self.shell.safe_execfile(filename, self.shell.user_ns,
+ self.shell.user_ns)
+
+ if is_temp:
+ try:
+ return filepath.read_text(encoding="utf-8")
+ except IOError as msg:
+ if Path(msg.filename) == filepath:
+ warn('File not found. Did you forget to save?')
+ return
+ else:
+ self.shell.showtraceback()
diff --git a/contrib/python/ipython/py3/IPython/core/magics/config.py b/contrib/python/ipython/py3/IPython/core/magics/config.py
new file mode 100644
index 0000000000..9e1cb38c25
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/magics/config.py
@@ -0,0 +1,140 @@
+"""Implementation of configuration-related magic functions.
+"""
+#-----------------------------------------------------------------------------
+# Copyright (c) 2012 The IPython Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+# Stdlib
+import re
+
+# Our own packages
+from IPython.core.error import UsageError
+from IPython.core.magic import Magics, magics_class, line_magic
+from logging import error
+
+#-----------------------------------------------------------------------------
+# Magic implementation classes
+#-----------------------------------------------------------------------------
+
+reg = re.compile(r'^\w+\.\w+$')
+@magics_class
+class ConfigMagics(Magics):
+
+ def __init__(self, shell):
+ super(ConfigMagics, self).__init__(shell)
+ self.configurables = []
+
+ @line_magic
+ def config(self, s):
+ """configure IPython
+
+ %config Class[.trait=value]
+
+ This magic exposes most of the IPython config system. Any
+ Configurable class should be able to be configured with the simple
+ line::
+
+ %config Class.trait=value
+
+ Where `value` will be resolved in the user's namespace, if it is an
+ expression or variable name.
+
+ Examples
+ --------
+
+ To see what classes are available for config, pass no arguments::
+
+ In [1]: %config
+ Available objects for config:
+ AliasManager
+ DisplayFormatter
+ HistoryManager
+ IPCompleter
+ LoggingMagics
+ MagicsManager
+ OSMagics
+ PrefilterManager
+ ScriptMagics
+ TerminalInteractiveShell
+
+ To view what is configurable on a given class, just pass the class
+ name::
+
+ In [2]: %config LoggingMagics
+ LoggingMagics(Magics) options
+ ---------------------------
+ LoggingMagics.quiet=<Bool>
+ Suppress output of log state when logging is enabled
+ Current: False
+
+ but the real use is in setting values::
+
+ In [3]: %config LoggingMagics.quiet = True
+
+ and these values are read from the user_ns if they are variables::
+
+ In [4]: feeling_quiet=False
+
+ In [5]: %config LoggingMagics.quiet = feeling_quiet
+
+ """
+ from traitlets.config.loader import Config
+ # some IPython objects are Configurable, but do not yet have
+ # any configurable traits. Exclude them from the effects of
+ # this magic, as their presence is just noise:
+ configurables = sorted(set([ c for c in self.shell.configurables
+ if c.__class__.class_traits(config=True)
+ ]), key=lambda x: x.__class__.__name__)
+ classnames = [ c.__class__.__name__ for c in configurables ]
+
+ line = s.strip()
+ if not line:
+ # print available configurable names
+ print("Available objects for config:")
+ for name in classnames:
+ print(" ", name)
+ return
+ elif line in classnames:
+ # `%config TerminalInteractiveShell` will print trait info for
+ # TerminalInteractiveShell
+ c = configurables[classnames.index(line)]
+ cls = c.__class__
+ help = cls.class_get_help(c)
+ # strip leading '--' from cl-args:
+ help = re.sub(re.compile(r'^--', re.MULTILINE), '', help)
+ print(help)
+ return
+ elif reg.match(line):
+ cls, attr = line.split('.')
+ return getattr(configurables[classnames.index(cls)],attr)
+ elif '=' not in line:
+ msg = "Invalid config statement: %r, "\
+ "should be `Class.trait = value`."
+
+ ll = line.lower()
+ for classname in classnames:
+ if ll == classname.lower():
+ msg = msg + '\nDid you mean %s (note the case)?' % classname
+ break
+
+ raise UsageError( msg % line)
+
+ # otherwise, assume we are setting configurables.
+ # leave quotes on args when splitting, because we want
+ # unquoted args to eval in user_ns
+ cfg = Config()
+ exec("cfg."+line, self.shell.user_ns, locals())
+
+ for configurable in configurables:
+ try:
+ configurable.update_config(cfg)
+ except Exception as e:
+ error(e)
diff --git a/contrib/python/ipython/py3/IPython/core/magics/display.py b/contrib/python/ipython/py3/IPython/core/magics/display.py
new file mode 100644
index 0000000000..6c0eff6884
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/magics/display.py
@@ -0,0 +1,93 @@
+"""Simple magics for display formats"""
+#-----------------------------------------------------------------------------
+# Copyright (c) 2012 The IPython Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+# Our own packages
+from IPython.display import display, Javascript, Latex, SVG, HTML, Markdown
+from IPython.core.magic import (
+ Magics, magics_class, cell_magic
+)
+from IPython.core import magic_arguments
+
+#-----------------------------------------------------------------------------
+# Magic implementation classes
+#-----------------------------------------------------------------------------
+
+
+@magics_class
+class DisplayMagics(Magics):
+ """Magics for displaying various output types with literals
+
+ Defines javascript/latex/svg/html cell magics for writing
+ blocks in those languages, to be rendered in the frontend.
+ """
+
+ @cell_magic
+ def js(self, line, cell):
+ """Run the cell block of Javascript code
+
+ Alias of `%%javascript`
+
+ Starting with IPython 8.0 %%javascript is pending deprecation to be replaced
+ by a more flexible system
+
+ Please See https://github.com/ipython/ipython/issues/13376
+ """
+ self.javascript(line, cell)
+
+ @cell_magic
+ def javascript(self, line, cell):
+ """Run the cell block of Javascript code
+
+ Starting with IPython 8.0 %%javascript is pending deprecation to be replaced
+ by a more flexible system
+
+ Please See https://github.com/ipython/ipython/issues/13376
+ """
+ display(Javascript(cell))
+
+
+ @cell_magic
+ def latex(self, line, cell):
+ """Render the cell as a block of LaTeX
+
+ The subset of LaTeX which is supported depends on the implementation in
+ the client. In the Jupyter Notebook, this magic only renders the subset
+ of LaTeX defined by MathJax
+ [here](https://docs.mathjax.org/en/v2.5-latest/tex.html)."""
+ display(Latex(cell))
+
+ @cell_magic
+ def svg(self, line, cell):
+ """Render the cell as an SVG literal"""
+ display(SVG(cell))
+
+ @magic_arguments.magic_arguments()
+ @magic_arguments.argument(
+ '--isolated', action='store_true', default=False,
+ help="""Annotate the cell as 'isolated'.
+Isolated cells are rendered inside their own <iframe> tag"""
+ )
+ @cell_magic
+ def html(self, line, cell):
+ """Render the cell as a block of HTML"""
+ args = magic_arguments.parse_argstring(self.html, line)
+ html = HTML(cell)
+ if args.isolated:
+ display(html, metadata={'text/html':{'isolated':True}})
+ else:
+ display(html)
+
+ @cell_magic
+ def markdown(self, line, cell):
+ """Render the cell as Markdown text block"""
+ display(Markdown(cell))
diff --git a/contrib/python/ipython/py3/IPython/core/magics/execution.py b/contrib/python/ipython/py3/IPython/core/magics/execution.py
new file mode 100644
index 0000000000..228cbd9da7
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/magics/execution.py
@@ -0,0 +1,1522 @@
+# -*- coding: utf-8 -*-
+"""Implementation of execution-related magic functions."""
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+
+import ast
+import bdb
+import builtins as builtin_mod
+import cProfile as profile
+import gc
+import itertools
+import math
+import os
+import pstats
+import re
+import shlex
+import sys
+import time
+import timeit
+from ast import Module
+from io import StringIO
+from logging import error
+from pathlib import Path
+from pdb import Restart
+from warnings import warn
+
+from IPython.core import magic_arguments, oinspect, page
+from IPython.core.error import UsageError
+from IPython.core.macro import Macro
+from IPython.core.magic import (
+ Magics,
+ cell_magic,
+ line_cell_magic,
+ line_magic,
+ magics_class,
+ needs_local_scope,
+ no_var_expand,
+ output_can_be_silenced,
+ on_off,
+)
+from IPython.testing.skipdoctest import skip_doctest
+from IPython.utils.capture import capture_output
+from IPython.utils.contexts import preserve_keys
+from IPython.utils.ipstruct import Struct
+from IPython.utils.module_paths import find_mod
+from IPython.utils.path import get_py_filename, shellglob
+from IPython.utils.timing import clock, clock2
+from IPython.core.displayhook import DisplayHook
+
+#-----------------------------------------------------------------------------
+# Magic implementation classes
+#-----------------------------------------------------------------------------
+
+
+class TimeitResult(object):
+ """
+ Object returned by the timeit magic with info about the run.
+
+ Contains the following attributes :
+
+ loops: (int) number of loops done per measurement
+ repeat: (int) number of times the measurement has been repeated
+ best: (float) best execution time / number
+ all_runs: (list of float) execution time of each run (in s)
+ compile_time: (float) time of statement compilation (s)
+
+ """
+ def __init__(self, loops, repeat, best, worst, all_runs, compile_time, precision):
+ self.loops = loops
+ self.repeat = repeat
+ self.best = best
+ self.worst = worst
+ self.all_runs = all_runs
+ self.compile_time = compile_time
+ self._precision = precision
+ self.timings = [ dt / self.loops for dt in all_runs]
+
+ @property
+ def average(self):
+ return math.fsum(self.timings) / len(self.timings)
+
+ @property
+ def stdev(self):
+ mean = self.average
+ return (math.fsum([(x - mean) ** 2 for x in self.timings]) / len(self.timings)) ** 0.5
+
+ def __str__(self):
+ pm = '+-'
+ if hasattr(sys.stdout, 'encoding') and sys.stdout.encoding:
+ try:
+ u'\xb1'.encode(sys.stdout.encoding)
+ pm = u'\xb1'
+ except:
+ pass
+ return "{mean} {pm} {std} per loop (mean {pm} std. dev. of {runs} run{run_plural}, {loops:,} loop{loop_plural} each)".format(
+ pm=pm,
+ runs=self.repeat,
+ loops=self.loops,
+ loop_plural="" if self.loops == 1 else "s",
+ run_plural="" if self.repeat == 1 else "s",
+ mean=_format_time(self.average, self._precision),
+ std=_format_time(self.stdev, self._precision),
+ )
+
+ def _repr_pretty_(self, p , cycle):
+ unic = self.__str__()
+ p.text(u'<TimeitResult : '+unic+u'>')
+
+
+class TimeitTemplateFiller(ast.NodeTransformer):
+ """Fill in the AST template for timing execution.
+
+ This is quite closely tied to the template definition, which is in
+ :meth:`ExecutionMagics.timeit`.
+ """
+ def __init__(self, ast_setup, ast_stmt):
+ self.ast_setup = ast_setup
+ self.ast_stmt = ast_stmt
+
+ def visit_FunctionDef(self, node):
+ "Fill in the setup statement"
+ self.generic_visit(node)
+ if node.name == "inner":
+ node.body[:1] = self.ast_setup.body
+
+ return node
+
+ def visit_For(self, node):
+ "Fill in the statement to be timed"
+ if getattr(getattr(node.body[0], 'value', None), 'id', None) == 'stmt':
+ node.body = self.ast_stmt.body
+ return node
+
+
+class Timer(timeit.Timer):
+ """Timer class that explicitly uses self.inner
+
+ which is an undocumented implementation detail of CPython,
+ not shared by PyPy.
+ """
+ # Timer.timeit copied from CPython 3.4.2
+ def timeit(self, number=timeit.default_number):
+ """Time 'number' executions of the main statement.
+
+ To be precise, this executes the setup statement once, and
+ then returns the time it takes to execute the main statement
+ a number of times, as a float measured in seconds. The
+ argument is the number of times through the loop, defaulting
+ to one million. The main statement, the setup statement and
+ the timer function to be used are passed to the constructor.
+ """
+ it = itertools.repeat(None, number)
+ gcold = gc.isenabled()
+ gc.disable()
+ try:
+ timing = self.inner(it, self.timer)
+ finally:
+ if gcold:
+ gc.enable()
+ return timing
+
+
+@magics_class
+class ExecutionMagics(Magics):
+ """Magics related to code execution, debugging, profiling, etc.
+
+ """
+
+ def __init__(self, shell):
+ super(ExecutionMagics, self).__init__(shell)
+ # Default execution function used to actually run user code.
+ self.default_runner = None
+
+ @skip_doctest
+ @no_var_expand
+ @line_cell_magic
+ def prun(self, parameter_s='', cell=None):
+
+ """Run a statement through the python code profiler.
+
+ Usage, in line mode:
+ %prun [options] statement
+
+ Usage, in cell mode:
+ %%prun [options] [statement]
+ code...
+ code...
+
+ In cell mode, the additional code lines are appended to the (possibly
+ empty) statement in the first line. Cell mode allows you to easily
+ profile multiline blocks without having to put them in a separate
+ function.
+
+ The given statement (which doesn't require quote marks) is run via the
+ python profiler in a manner similar to the profile.run() function.
+ Namespaces are internally managed to work correctly; profile.run
+ cannot be used in IPython because it makes certain assumptions about
+ namespaces which do not hold under IPython.
+
+ Options:
+
+ -l <limit>
+ you can place restrictions on what or how much of the
+ profile gets printed. The limit value can be:
+
+ * A string: only information for function names containing this string
+ is printed.
+
+ * An integer: only these many lines are printed.
+
+ * A float (between 0 and 1): this fraction of the report is printed
+ (for example, use a limit of 0.4 to see the topmost 40% only).
+
+ You can combine several limits with repeated use of the option. For
+ example, ``-l __init__ -l 5`` will print only the topmost 5 lines of
+ information about class constructors.
+
+ -r
+ return the pstats.Stats object generated by the profiling. This
+ object has all the information about the profile in it, and you can
+ later use it for further analysis or in other functions.
+
+ -s <key>
+ sort profile by given key. You can provide more than one key
+ by using the option several times: '-s key1 -s key2 -s key3...'. The
+ default sorting key is 'time'.
+
+ The following is copied verbatim from the profile documentation
+ referenced below:
+
+ When more than one key is provided, additional keys are used as
+ secondary criteria when the there is equality in all keys selected
+ before them.
+
+ Abbreviations can be used for any key names, as long as the
+ abbreviation is unambiguous. The following are the keys currently
+ defined:
+
+ ============ =====================
+ Valid Arg Meaning
+ ============ =====================
+ "calls" call count
+ "cumulative" cumulative time
+ "file" file name
+ "module" file name
+ "pcalls" primitive call count
+ "line" line number
+ "name" function name
+ "nfl" name/file/line
+ "stdname" standard name
+ "time" internal time
+ ============ =====================
+
+ Note that all sorts on statistics are in descending order (placing
+ most time consuming items first), where as name, file, and line number
+ searches are in ascending order (i.e., alphabetical). The subtle
+ distinction between "nfl" and "stdname" is that the standard name is a
+ sort of the name as printed, which means that the embedded line
+ numbers get compared in an odd way. For example, lines 3, 20, and 40
+ would (if the file names were the same) appear in the string order
+ "20" "3" and "40". In contrast, "nfl" does a numeric compare of the
+ line numbers. In fact, sort_stats("nfl") is the same as
+ sort_stats("name", "file", "line").
+
+ -T <filename>
+ save profile results as shown on screen to a text
+ file. The profile is still shown on screen.
+
+ -D <filename>
+ save (via dump_stats) profile statistics to given
+ filename. This data is in a format understood by the pstats module, and
+ is generated by a call to the dump_stats() method of profile
+ objects. The profile is still shown on screen.
+
+ -q
+ suppress output to the pager. Best used with -T and/or -D above.
+
+ If you want to run complete programs under the profiler's control, use
+ ``%run -p [prof_opts] filename.py [args to program]`` where prof_opts
+ contains profiler specific options as described here.
+
+ You can read the complete documentation for the profile module with::
+
+ In [1]: import profile; profile.help()
+
+ .. versionchanged:: 7.3
+ User variables are no longer expanded,
+ the magic line is always left unmodified.
+
+ """
+ opts, arg_str = self.parse_options(parameter_s, 'D:l:rs:T:q',
+ list_all=True, posix=False)
+ if cell is not None:
+ arg_str += '\n' + cell
+ arg_str = self.shell.transform_cell(arg_str)
+ return self._run_with_profiler(arg_str, opts, self.shell.user_ns)
+
+ def _run_with_profiler(self, code, opts, namespace):
+ """
+ Run `code` with profiler. Used by ``%prun`` and ``%run -p``.
+
+ Parameters
+ ----------
+ code : str
+ Code to be executed.
+ opts : Struct
+ Options parsed by `self.parse_options`.
+ namespace : dict
+ A dictionary for Python namespace (e.g., `self.shell.user_ns`).
+
+ """
+
+ # Fill default values for unspecified options:
+ opts.merge(Struct(D=[''], l=[], s=['time'], T=['']))
+
+ prof = profile.Profile()
+ try:
+ prof = prof.runctx(code, namespace, namespace)
+ sys_exit = ''
+ except SystemExit:
+ sys_exit = """*** SystemExit exception caught in code being profiled."""
+
+ stats = pstats.Stats(prof).strip_dirs().sort_stats(*opts.s)
+
+ lims = opts.l
+ if lims:
+ lims = [] # rebuild lims with ints/floats/strings
+ for lim in opts.l:
+ try:
+ lims.append(int(lim))
+ except ValueError:
+ try:
+ lims.append(float(lim))
+ except ValueError:
+ lims.append(lim)
+
+ # Trap output.
+ stdout_trap = StringIO()
+ stats_stream = stats.stream
+ try:
+ stats.stream = stdout_trap
+ stats.print_stats(*lims)
+ finally:
+ stats.stream = stats_stream
+
+ output = stdout_trap.getvalue()
+ output = output.rstrip()
+
+ if 'q' not in opts:
+ page.page(output)
+ print(sys_exit, end=' ')
+
+ dump_file = opts.D[0]
+ text_file = opts.T[0]
+ if dump_file:
+ prof.dump_stats(dump_file)
+ print(
+ f"\n*** Profile stats marshalled to file {repr(dump_file)}.{sys_exit}"
+ )
+ if text_file:
+ pfile = Path(text_file)
+ pfile.touch(exist_ok=True)
+ pfile.write_text(output, encoding="utf-8")
+
+ print(
+ f"\n*** Profile printout saved to text file {repr(text_file)}.{sys_exit}"
+ )
+
+ if 'r' in opts:
+ return stats
+
+ return None
+
+ @line_magic
+ def pdb(self, parameter_s=''):
+ """Control the automatic calling of the pdb interactive debugger.
+
+ Call as '%pdb on', '%pdb 1', '%pdb off' or '%pdb 0'. If called without
+ argument it works as a toggle.
+
+ When an exception is triggered, IPython can optionally call the
+ interactive pdb debugger after the traceback printout. %pdb toggles
+ this feature on and off.
+
+ The initial state of this feature is set in your configuration
+ file (the option is ``InteractiveShell.pdb``).
+
+ If you want to just activate the debugger AFTER an exception has fired,
+ without having to type '%pdb on' and rerunning your code, you can use
+ the %debug magic."""
+
+ par = parameter_s.strip().lower()
+
+ if par:
+ try:
+ new_pdb = {'off':0,'0':0,'on':1,'1':1}[par]
+ except KeyError:
+ print ('Incorrect argument. Use on/1, off/0, '
+ 'or nothing for a toggle.')
+ return
+ else:
+ # toggle
+ new_pdb = not self.shell.call_pdb
+
+ # set on the shell
+ self.shell.call_pdb = new_pdb
+ print('Automatic pdb calling has been turned',on_off(new_pdb))
+
+ @magic_arguments.magic_arguments()
+ @magic_arguments.argument('--breakpoint', '-b', metavar='FILE:LINE',
+ help="""
+ Set break point at LINE in FILE.
+ """
+ )
+ @magic_arguments.argument('statement', nargs='*',
+ help="""
+ Code to run in debugger.
+ You can omit this in cell magic mode.
+ """
+ )
+ @no_var_expand
+ @line_cell_magic
+ @needs_local_scope
+ def debug(self, line="", cell=None, local_ns=None):
+ """Activate the interactive debugger.
+
+ This magic command support two ways of activating debugger.
+ One is to activate debugger before executing code. This way, you
+ can set a break point, to step through the code from the point.
+ You can use this mode by giving statements to execute and optionally
+ a breakpoint.
+
+ The other one is to activate debugger in post-mortem mode. You can
+ activate this mode simply running %debug without any argument.
+ If an exception has just occurred, this lets you inspect its stack
+ frames interactively. Note that this will always work only on the last
+ traceback that occurred, so you must call this quickly after an
+ exception that you wish to inspect has fired, because if another one
+ occurs, it clobbers the previous one.
+
+ If you want IPython to automatically do this on every exception, see
+ the %pdb magic for more details.
+
+ .. versionchanged:: 7.3
+ When running code, user variables are no longer expanded,
+ the magic line is always left unmodified.
+
+ """
+ args = magic_arguments.parse_argstring(self.debug, line)
+
+ if not (args.breakpoint or args.statement or cell):
+ self._debug_post_mortem()
+ elif not (args.breakpoint or cell):
+ # If there is no breakpoints, the line is just code to execute
+ self._debug_exec(line, None, local_ns)
+ else:
+ # Here we try to reconstruct the code from the output of
+ # parse_argstring. This might not work if the code has spaces
+ # For example this fails for `print("a b")`
+ code = "\n".join(args.statement)
+ if cell:
+ code += "\n" + cell
+ self._debug_exec(code, args.breakpoint, local_ns)
+
+ def _debug_post_mortem(self):
+ self.shell.debugger(force=True)
+
+ def _debug_exec(self, code, breakpoint, local_ns=None):
+ if breakpoint:
+ (filename, bp_line) = breakpoint.rsplit(':', 1)
+ bp_line = int(bp_line)
+ else:
+ (filename, bp_line) = (None, None)
+ self._run_with_debugger(
+ code, self.shell.user_ns, filename, bp_line, local_ns=local_ns
+ )
+
+ @line_magic
+ def tb(self, s):
+ """Print the last traceback.
+
+ Optionally, specify an exception reporting mode, tuning the
+ verbosity of the traceback. By default the currently-active exception
+ mode is used. See %xmode for changing exception reporting modes.
+
+ Valid modes: Plain, Context, Verbose, and Minimal.
+ """
+ interactive_tb = self.shell.InteractiveTB
+ if s:
+ # Switch exception reporting mode for this one call.
+ # Ensure it is switched back.
+ def xmode_switch_err(name):
+ warn('Error changing %s exception modes.\n%s' %
+ (name,sys.exc_info()[1]))
+
+ new_mode = s.strip().capitalize()
+ original_mode = interactive_tb.mode
+ try:
+ try:
+ interactive_tb.set_mode(mode=new_mode)
+ except Exception:
+ xmode_switch_err('user')
+ else:
+ self.shell.showtraceback()
+ finally:
+ interactive_tb.set_mode(mode=original_mode)
+ else:
+ self.shell.showtraceback()
+
+ @skip_doctest
+ @line_magic
+ def run(self, parameter_s='', runner=None,
+ file_finder=get_py_filename):
+ """Run the named file inside IPython as a program.
+
+ Usage::
+
+ %run [-n -i -e -G]
+ [( -t [-N<N>] | -d [-b<N>] | -p [profile options] )]
+ ( -m mod | filename ) [args]
+
+ The filename argument should be either a pure Python script (with
+ extension ``.py``), or a file with custom IPython syntax (such as
+ magics). If the latter, the file can be either a script with ``.ipy``
+ extension, or a Jupyter notebook with ``.ipynb`` extension. When running
+ a Jupyter notebook, the output from print statements and other
+ displayed objects will appear in the terminal (even matplotlib figures
+ will open, if a terminal-compliant backend is being used). Note that,
+ at the system command line, the ``jupyter run`` command offers similar
+ functionality for executing notebooks (albeit currently with some
+ differences in supported options).
+
+ Parameters after the filename are passed as command-line arguments to
+ the program (put in sys.argv). Then, control returns to IPython's
+ prompt.
+
+ This is similar to running at a system prompt ``python file args``,
+ but with the advantage of giving you IPython's tracebacks, and of
+ loading all variables into your interactive namespace for further use
+ (unless -p is used, see below).
+
+ The file is executed in a namespace initially consisting only of
+ ``__name__=='__main__'`` and sys.argv constructed as indicated. It thus
+ sees its environment as if it were being run as a stand-alone program
+ (except for sharing global objects such as previously imported
+ modules). But after execution, the IPython interactive namespace gets
+ updated with all variables defined in the program (except for __name__
+ and sys.argv). This allows for very convenient loading of code for
+ interactive work, while giving each program a 'clean sheet' to run in.
+
+ Arguments are expanded using shell-like glob match. Patterns
+ '*', '?', '[seq]' and '[!seq]' can be used. Additionally,
+ tilde '~' will be expanded into user's home directory. Unlike
+ real shells, quotation does not suppress expansions. Use
+ *two* back slashes (e.g. ``\\\\*``) to suppress expansions.
+ To completely disable these expansions, you can use -G flag.
+
+ On Windows systems, the use of single quotes `'` when specifying
+ a file is not supported. Use double quotes `"`.
+
+ Options:
+
+ -n
+ __name__ is NOT set to '__main__', but to the running file's name
+ without extension (as python does under import). This allows running
+ scripts and reloading the definitions in them without calling code
+ protected by an ``if __name__ == "__main__"`` clause.
+
+ -i
+ run the file in IPython's namespace instead of an empty one. This
+ is useful if you are experimenting with code written in a text editor
+ which depends on variables defined interactively.
+
+ -e
+ ignore sys.exit() calls or SystemExit exceptions in the script
+ being run. This is particularly useful if IPython is being used to
+ run unittests, which always exit with a sys.exit() call. In such
+ cases you are interested in the output of the test results, not in
+ seeing a traceback of the unittest module.
+
+ -t
+ print timing information at the end of the run. IPython will give
+ you an estimated CPU time consumption for your script, which under
+ Unix uses the resource module to avoid the wraparound problems of
+ time.clock(). Under Unix, an estimate of time spent on system tasks
+ is also given (for Windows platforms this is reported as 0.0).
+
+ If -t is given, an additional ``-N<N>`` option can be given, where <N>
+ must be an integer indicating how many times you want the script to
+ run. The final timing report will include total and per run results.
+
+ For example (testing the script uniq_stable.py)::
+
+ In [1]: run -t uniq_stable
+
+ IPython CPU timings (estimated):
+ User : 0.19597 s.
+ System: 0.0 s.
+
+ In [2]: run -t -N5 uniq_stable
+
+ IPython CPU timings (estimated):
+ Total runs performed: 5
+ Times : Total Per run
+ User : 0.910862 s, 0.1821724 s.
+ System: 0.0 s, 0.0 s.
+
+ -d
+ run your program under the control of pdb, the Python debugger.
+ This allows you to execute your program step by step, watch variables,
+ etc. Internally, what IPython does is similar to calling::
+
+ pdb.run('execfile("YOURFILENAME")')
+
+ with a breakpoint set on line 1 of your file. You can change the line
+ number for this automatic breakpoint to be <N> by using the -bN option
+ (where N must be an integer). For example::
+
+ %run -d -b40 myscript
+
+ will set the first breakpoint at line 40 in myscript.py. Note that
+ the first breakpoint must be set on a line which actually does
+ something (not a comment or docstring) for it to stop execution.
+
+ Or you can specify a breakpoint in a different file::
+
+ %run -d -b myotherfile.py:20 myscript
+
+ When the pdb debugger starts, you will see a (Pdb) prompt. You must
+ first enter 'c' (without quotes) to start execution up to the first
+ breakpoint.
+
+ Entering 'help' gives information about the use of the debugger. You
+ can easily see pdb's full documentation with "import pdb;pdb.help()"
+ at a prompt.
+
+ -p
+ run program under the control of the Python profiler module (which
+ prints a detailed report of execution times, function calls, etc).
+
+ You can pass other options after -p which affect the behavior of the
+ profiler itself. See the docs for %prun for details.
+
+ In this mode, the program's variables do NOT propagate back to the
+ IPython interactive namespace (because they remain in the namespace
+ where the profiler executes them).
+
+ Internally this triggers a call to %prun, see its documentation for
+ details on the options available specifically for profiling.
+
+ There is one special usage for which the text above doesn't apply:
+ if the filename ends with .ipy[nb], the file is run as ipython script,
+ just as if the commands were written on IPython prompt.
+
+ -m
+ specify module name to load instead of script path. Similar to
+ the -m option for the python interpreter. Use this option last if you
+ want to combine with other %run options. Unlike the python interpreter
+ only source modules are allowed no .pyc or .pyo files.
+ For example::
+
+ %run -m example
+
+ will run the example module.
+
+ -G
+ disable shell-like glob expansion of arguments.
+
+ """
+
+ # Logic to handle issue #3664
+ # Add '--' after '-m <module_name>' to ignore additional args passed to a module.
+ if '-m' in parameter_s and '--' not in parameter_s:
+ argv = shlex.split(parameter_s, posix=(os.name == 'posix'))
+ for idx, arg in enumerate(argv):
+ if arg and arg.startswith('-') and arg != '-':
+ if arg == '-m':
+ argv.insert(idx + 2, '--')
+ break
+ else:
+ # Positional arg, break
+ break
+ parameter_s = ' '.join(shlex.quote(arg) for arg in argv)
+
+ # get arguments and set sys.argv for program to be run.
+ opts, arg_lst = self.parse_options(parameter_s,
+ 'nidtN:b:pD:l:rs:T:em:G',
+ mode='list', list_all=1)
+ if "m" in opts:
+ modulename = opts["m"][0]
+ modpath = find_mod(modulename)
+ if modpath is None:
+ msg = '%r is not a valid modulename on sys.path'%modulename
+ raise Exception(msg)
+ arg_lst = [modpath] + arg_lst
+ try:
+ fpath = None # initialize to make sure fpath is in scope later
+ fpath = arg_lst[0]
+ filename = file_finder(fpath)
+ except IndexError as e:
+ msg = 'you must provide at least a filename.'
+ raise Exception(msg) from e
+ except IOError as e:
+ try:
+ msg = str(e)
+ except UnicodeError:
+ msg = e.message
+ if os.name == 'nt' and re.match(r"^'.*'$",fpath):
+ warn('For Windows, use double quotes to wrap a filename: %run "mypath\\myfile.py"')
+ raise Exception(msg) from e
+ except TypeError:
+ if fpath in sys.meta_path:
+ filename = ""
+ else:
+ raise
+
+ if filename.lower().endswith(('.ipy', '.ipynb')):
+ with preserve_keys(self.shell.user_ns, '__file__'):
+ self.shell.user_ns['__file__'] = filename
+ self.shell.safe_execfile_ipy(filename, raise_exceptions=True)
+ return
+
+ # Control the response to exit() calls made by the script being run
+ exit_ignore = 'e' in opts
+
+ # Make sure that the running script gets a proper sys.argv as if it
+ # were run from a system shell.
+ save_argv = sys.argv # save it for later restoring
+
+ if 'G' in opts:
+ args = arg_lst[1:]
+ else:
+ # tilde and glob expansion
+ args = shellglob(map(os.path.expanduser, arg_lst[1:]))
+
+ sys.argv = [filename] + args # put in the proper filename
+
+ if 'n' in opts:
+ name = Path(filename).stem
+ else:
+ name = '__main__'
+
+ if 'i' in opts:
+ # Run in user's interactive namespace
+ prog_ns = self.shell.user_ns
+ __name__save = self.shell.user_ns['__name__']
+ prog_ns['__name__'] = name
+ main_mod = self.shell.user_module
+
+ # Since '%run foo' emulates 'python foo.py' at the cmd line, we must
+ # set the __file__ global in the script's namespace
+ # TK: Is this necessary in interactive mode?
+ prog_ns['__file__'] = filename
+ else:
+ # Run in a fresh, empty namespace
+
+ # The shell MUST hold a reference to prog_ns so after %run
+ # exits, the python deletion mechanism doesn't zero it out
+ # (leaving dangling references). See interactiveshell for details
+ main_mod = self.shell.new_main_mod(filename, name)
+ prog_ns = main_mod.__dict__
+
+ # pickle fix. See interactiveshell for an explanation. But we need to
+ # make sure that, if we overwrite __main__, we replace it at the end
+ main_mod_name = prog_ns['__name__']
+
+ if main_mod_name == '__main__':
+ restore_main = sys.modules['__main__']
+ else:
+ restore_main = False
+
+ # This needs to be undone at the end to prevent holding references to
+ # every single object ever created.
+ sys.modules[main_mod_name] = main_mod
+
+ if 'p' in opts or 'd' in opts:
+ if 'm' in opts:
+ code = 'run_module(modulename, prog_ns)'
+ code_ns = {
+ 'run_module': self.shell.safe_run_module,
+ 'prog_ns': prog_ns,
+ 'modulename': modulename,
+ }
+ else:
+ if 'd' in opts:
+ # allow exceptions to raise in debug mode
+ code = 'execfile(filename, prog_ns, raise_exceptions=True)'
+ else:
+ code = 'execfile(filename, prog_ns)'
+ code_ns = {
+ 'execfile': self.shell.safe_execfile,
+ 'prog_ns': prog_ns,
+ 'filename': get_py_filename(filename),
+ }
+
+ try:
+ stats = None
+ if 'p' in opts:
+ stats = self._run_with_profiler(code, opts, code_ns)
+ else:
+ if 'd' in opts:
+ bp_file, bp_line = parse_breakpoint(
+ opts.get('b', ['1'])[0], filename)
+ self._run_with_debugger(
+ code, code_ns, filename, bp_line, bp_file)
+ else:
+ if 'm' in opts:
+ def run():
+ self.shell.safe_run_module(modulename, prog_ns)
+ else:
+ if runner is None:
+ runner = self.default_runner
+ if runner is None:
+ runner = self.shell.safe_execfile
+
+ def run():
+ runner(filename, prog_ns, prog_ns,
+ exit_ignore=exit_ignore)
+
+ if 't' in opts:
+ # timed execution
+ try:
+ nruns = int(opts['N'][0])
+ if nruns < 1:
+ error('Number of runs must be >=1')
+ return
+ except (KeyError):
+ nruns = 1
+ self._run_with_timing(run, nruns)
+ else:
+ # regular execution
+ run()
+
+ if 'i' in opts:
+ self.shell.user_ns['__name__'] = __name__save
+ else:
+ # update IPython interactive namespace
+
+ # Some forms of read errors on the file may mean the
+ # __name__ key was never set; using pop we don't have to
+ # worry about a possible KeyError.
+ prog_ns.pop('__name__', None)
+
+ with preserve_keys(self.shell.user_ns, '__file__'):
+ self.shell.user_ns.update(prog_ns)
+ finally:
+ # It's a bit of a mystery why, but __builtins__ can change from
+ # being a module to becoming a dict missing some key data after
+ # %run. As best I can see, this is NOT something IPython is doing
+ # at all, and similar problems have been reported before:
+ # http://coding.derkeiler.com/Archive/Python/comp.lang.python/2004-10/0188.html
+ # Since this seems to be done by the interpreter itself, the best
+ # we can do is to at least restore __builtins__ for the user on
+ # exit.
+ self.shell.user_ns['__builtins__'] = builtin_mod
+
+ # Ensure key global structures are restored
+ sys.argv = save_argv
+ if restore_main:
+ sys.modules['__main__'] = restore_main
+ if '__mp_main__' in sys.modules:
+ sys.modules['__mp_main__'] = restore_main
+ else:
+ # Remove from sys.modules the reference to main_mod we'd
+ # added. Otherwise it will trap references to objects
+ # contained therein.
+ del sys.modules[main_mod_name]
+
+ return stats
+
+ def _run_with_debugger(
+ self, code, code_ns, filename=None, bp_line=None, bp_file=None, local_ns=None
+ ):
+ """
+ Run `code` in debugger with a break point.
+
+ Parameters
+ ----------
+ code : str
+ Code to execute.
+ code_ns : dict
+ A namespace in which `code` is executed.
+ filename : str
+ `code` is ran as if it is in `filename`.
+ bp_line : int, optional
+ Line number of the break point.
+ bp_file : str, optional
+ Path to the file in which break point is specified.
+ `filename` is used if not given.
+ local_ns : dict, optional
+ A local namespace in which `code` is executed.
+
+ Raises
+ ------
+ UsageError
+ If the break point given by `bp_line` is not valid.
+
+ """
+ deb = self.shell.InteractiveTB.pdb
+ if not deb:
+ self.shell.InteractiveTB.pdb = self.shell.InteractiveTB.debugger_cls()
+ deb = self.shell.InteractiveTB.pdb
+
+ # deb.checkline() fails if deb.curframe exists but is None; it can
+ # handle it not existing. https://github.com/ipython/ipython/issues/10028
+ if hasattr(deb, 'curframe'):
+ del deb.curframe
+
+ # reset Breakpoint state, which is moronically kept
+ # in a class
+ bdb.Breakpoint.next = 1
+ bdb.Breakpoint.bplist = {}
+ bdb.Breakpoint.bpbynumber = [None]
+ deb.clear_all_breaks()
+ if bp_line is not None:
+ # Set an initial breakpoint to stop execution
+ maxtries = 10
+ bp_file = bp_file or filename
+ checkline = deb.checkline(bp_file, bp_line)
+ if not checkline:
+ for bp in range(bp_line + 1, bp_line + maxtries + 1):
+ if deb.checkline(bp_file, bp):
+ break
+ else:
+ msg = ("\nI failed to find a valid line to set "
+ "a breakpoint\n"
+ "after trying up to line: %s.\n"
+ "Please set a valid breakpoint manually "
+ "with the -b option." % bp)
+ raise UsageError(msg)
+ # if we find a good linenumber, set the breakpoint
+ deb.do_break('%s:%s' % (bp_file, bp_line))
+
+ if filename:
+ # Mimic Pdb._runscript(...)
+ deb._wait_for_mainpyfile = True
+ deb.mainpyfile = deb.canonic(filename)
+
+ # Start file run
+ print("NOTE: Enter 'c' at the %s prompt to continue execution." % deb.prompt)
+ try:
+ if filename:
+ # save filename so it can be used by methods on the deb object
+ deb._exec_filename = filename
+ while True:
+ try:
+ trace = sys.gettrace()
+ deb.run(code, code_ns, local_ns)
+ except Restart:
+ print("Restarting")
+ if filename:
+ deb._wait_for_mainpyfile = True
+ deb.mainpyfile = deb.canonic(filename)
+ continue
+ else:
+ break
+ finally:
+ sys.settrace(trace)
+
+
+ except:
+ etype, value, tb = sys.exc_info()
+ # Skip three frames in the traceback: the %run one,
+ # one inside bdb.py, and the command-line typed by the
+ # user (run by exec in pdb itself).
+ self.shell.InteractiveTB(etype, value, tb, tb_offset=3)
+
+ @staticmethod
+ def _run_with_timing(run, nruns):
+ """
+ Run function `run` and print timing information.
+
+ Parameters
+ ----------
+ run : callable
+ Any callable object which takes no argument.
+ nruns : int
+ Number of times to execute `run`.
+
+ """
+ twall0 = time.perf_counter()
+ if nruns == 1:
+ t0 = clock2()
+ run()
+ t1 = clock2()
+ t_usr = t1[0] - t0[0]
+ t_sys = t1[1] - t0[1]
+ print("\nIPython CPU timings (estimated):")
+ print(" User : %10.2f s." % t_usr)
+ print(" System : %10.2f s." % t_sys)
+ else:
+ runs = range(nruns)
+ t0 = clock2()
+ for nr in runs:
+ run()
+ t1 = clock2()
+ t_usr = t1[0] - t0[0]
+ t_sys = t1[1] - t0[1]
+ print("\nIPython CPU timings (estimated):")
+ print("Total runs performed:", nruns)
+ print(" Times : %10s %10s" % ('Total', 'Per run'))
+ print(" User : %10.2f s, %10.2f s." % (t_usr, t_usr / nruns))
+ print(" System : %10.2f s, %10.2f s." % (t_sys, t_sys / nruns))
+ twall1 = time.perf_counter()
+ print("Wall time: %10.2f s." % (twall1 - twall0))
+
+ @skip_doctest
+ @no_var_expand
+ @line_cell_magic
+ @needs_local_scope
+ def timeit(self, line='', cell=None, local_ns=None):
+ """Time execution of a Python statement or expression
+
+ Usage, in line mode:
+ %timeit [-n<N> -r<R> [-t|-c] -q -p<P> -o] statement
+ or in cell mode:
+ %%timeit [-n<N> -r<R> [-t|-c] -q -p<P> -o] setup_code
+ code
+ code...
+
+ Time execution of a Python statement or expression using the timeit
+ module. This function can be used both as a line and cell magic:
+
+ - In line mode you can time a single-line statement (though multiple
+ ones can be chained with using semicolons).
+
+ - In cell mode, the statement in the first line is used as setup code
+ (executed but not timed) and the body of the cell is timed. The cell
+ body has access to any variables created in the setup code.
+
+ Options:
+ -n<N>: execute the given statement <N> times in a loop. If <N> is not
+ provided, <N> is determined so as to get sufficient accuracy.
+
+ -r<R>: number of repeats <R>, each consisting of <N> loops, and take the
+ best result.
+ Default: 7
+
+ -t: use time.time to measure the time, which is the default on Unix.
+ This function measures wall time.
+
+ -c: use time.clock to measure the time, which is the default on
+ Windows and measures wall time. On Unix, resource.getrusage is used
+ instead and returns the CPU user time.
+
+ -p<P>: use a precision of <P> digits to display the timing result.
+ Default: 3
+
+ -q: Quiet, do not print result.
+
+ -o: return a TimeitResult that can be stored in a variable to inspect
+ the result in more details.
+
+ .. versionchanged:: 7.3
+ User variables are no longer expanded,
+ the magic line is always left unmodified.
+
+ Examples
+ --------
+ ::
+
+ In [1]: %timeit pass
+ 8.26 ns ± 0.12 ns per loop (mean ± std. dev. of 7 runs, 100000000 loops each)
+
+ In [2]: u = None
+
+ In [3]: %timeit u is None
+ 29.9 ns ± 0.643 ns per loop (mean ± std. dev. of 7 runs, 10000000 loops each)
+
+ In [4]: %timeit -r 4 u == None
+
+ In [5]: import time
+
+ In [6]: %timeit -n1 time.sleep(2)
+
+ The times reported by %timeit will be slightly higher than those
+ reported by the timeit.py script when variables are accessed. This is
+ due to the fact that %timeit executes the statement in the namespace
+ of the shell, compared with timeit.py, which uses a single setup
+ statement to import function or create variables. Generally, the bias
+ does not matter as long as results from timeit.py are not mixed with
+ those from %timeit."""
+
+ opts, stmt = self.parse_options(
+ line, "n:r:tcp:qo", posix=False, strict=False, preserve_non_opts=True
+ )
+ if stmt == "" and cell is None:
+ return
+
+ timefunc = timeit.default_timer
+ number = int(getattr(opts, "n", 0))
+ default_repeat = 7 if timeit.default_repeat < 7 else timeit.default_repeat
+ repeat = int(getattr(opts, "r", default_repeat))
+ precision = int(getattr(opts, "p", 3))
+ quiet = 'q' in opts
+ return_result = 'o' in opts
+ if hasattr(opts, "t"):
+ timefunc = time.time
+ if hasattr(opts, "c"):
+ timefunc = clock
+
+ timer = Timer(timer=timefunc)
+ # this code has tight coupling to the inner workings of timeit.Timer,
+ # but is there a better way to achieve that the code stmt has access
+ # to the shell namespace?
+ transform = self.shell.transform_cell
+
+ if cell is None:
+ # called as line magic
+ ast_setup = self.shell.compile.ast_parse("pass")
+ ast_stmt = self.shell.compile.ast_parse(transform(stmt))
+ else:
+ ast_setup = self.shell.compile.ast_parse(transform(stmt))
+ ast_stmt = self.shell.compile.ast_parse(transform(cell))
+
+ ast_setup = self.shell.transform_ast(ast_setup)
+ ast_stmt = self.shell.transform_ast(ast_stmt)
+
+ # Check that these compile to valid Python code *outside* the timer func
+ # Invalid code may become valid when put inside the function & loop,
+ # which messes up error messages.
+ # https://github.com/ipython/ipython/issues/10636
+ self.shell.compile(ast_setup, "<magic-timeit-setup>", "exec")
+ self.shell.compile(ast_stmt, "<magic-timeit-stmt>", "exec")
+
+ # This codestring is taken from timeit.template - we fill it in as an
+ # AST, so that we can apply our AST transformations to the user code
+ # without affecting the timing code.
+ timeit_ast_template = ast.parse('def inner(_it, _timer):\n'
+ ' setup\n'
+ ' _t0 = _timer()\n'
+ ' for _i in _it:\n'
+ ' stmt\n'
+ ' _t1 = _timer()\n'
+ ' return _t1 - _t0\n')
+
+ timeit_ast = TimeitTemplateFiller(ast_setup, ast_stmt).visit(timeit_ast_template)
+ timeit_ast = ast.fix_missing_locations(timeit_ast)
+
+ # Track compilation time so it can be reported if too long
+ # Minimum time above which compilation time will be reported
+ tc_min = 0.1
+
+ t0 = clock()
+ code = self.shell.compile(timeit_ast, "<magic-timeit>", "exec")
+ tc = clock()-t0
+
+ ns = {}
+ glob = self.shell.user_ns
+ # handles global vars with same name as local vars. We store them in conflict_globs.
+ conflict_globs = {}
+ if local_ns and cell is None:
+ for var_name, var_val in glob.items():
+ if var_name in local_ns:
+ conflict_globs[var_name] = var_val
+ glob.update(local_ns)
+
+ exec(code, glob, ns)
+ timer.inner = ns["inner"]
+
+ # This is used to check if there is a huge difference between the
+ # best and worst timings.
+ # Issue: https://github.com/ipython/ipython/issues/6471
+ if number == 0:
+ # determine number so that 0.2 <= total time < 2.0
+ for index in range(0, 10):
+ number = 10 ** index
+ time_number = timer.timeit(number)
+ if time_number >= 0.2:
+ break
+
+ all_runs = timer.repeat(repeat, number)
+ best = min(all_runs) / number
+ worst = max(all_runs) / number
+ timeit_result = TimeitResult(number, repeat, best, worst, all_runs, tc, precision)
+
+ # Restore global vars from conflict_globs
+ if conflict_globs:
+ glob.update(conflict_globs)
+
+ if not quiet :
+ # Check best timing is greater than zero to avoid a
+ # ZeroDivisionError.
+ # In cases where the slowest timing is lesser than a microsecond
+ # we assume that it does not really matter if the fastest
+ # timing is 4 times faster than the slowest timing or not.
+ if worst > 4 * best and best > 0 and worst > 1e-6:
+ print("The slowest run took %0.2f times longer than the "
+ "fastest. This could mean that an intermediate result "
+ "is being cached." % (worst / best))
+
+ print( timeit_result )
+
+ if tc > tc_min:
+ print("Compiler time: %.2f s" % tc)
+ if return_result:
+ return timeit_result
+
+ @skip_doctest
+ @no_var_expand
+ @needs_local_scope
+ @line_cell_magic
+ @output_can_be_silenced
+ def time(self,line='', cell=None, local_ns=None):
+ """Time execution of a Python statement or expression.
+
+ The CPU and wall clock times are printed, and the value of the
+ expression (if any) is returned. Note that under Win32, system time
+ is always reported as 0, since it can not be measured.
+
+ This function can be used both as a line and cell magic:
+
+ - In line mode you can time a single-line statement (though multiple
+ ones can be chained with using semicolons).
+
+ - In cell mode, you can time the cell body (a directly
+ following statement raises an error).
+
+ This function provides very basic timing functionality. Use the timeit
+ magic for more control over the measurement.
+
+ .. versionchanged:: 7.3
+ User variables are no longer expanded,
+ the magic line is always left unmodified.
+
+ Examples
+ --------
+ ::
+
+ In [1]: %time 2**128
+ CPU times: user 0.00 s, sys: 0.00 s, total: 0.00 s
+ Wall time: 0.00
+ Out[1]: 340282366920938463463374607431768211456L
+
+ In [2]: n = 1000000
+
+ In [3]: %time sum(range(n))
+ CPU times: user 1.20 s, sys: 0.05 s, total: 1.25 s
+ Wall time: 1.37
+ Out[3]: 499999500000L
+
+ In [4]: %time print 'hello world'
+ hello world
+ CPU times: user 0.00 s, sys: 0.00 s, total: 0.00 s
+ Wall time: 0.00
+
+ .. note::
+ The time needed by Python to compile the given expression will be
+ reported if it is more than 0.1s.
+
+ In the example below, the actual exponentiation is done by Python
+ at compilation time, so while the expression can take a noticeable
+ amount of time to compute, that time is purely due to the
+ compilation::
+
+ In [5]: %time 3**9999;
+ CPU times: user 0.00 s, sys: 0.00 s, total: 0.00 s
+ Wall time: 0.00 s
+
+ In [6]: %time 3**999999;
+ CPU times: user 0.00 s, sys: 0.00 s, total: 0.00 s
+ Wall time: 0.00 s
+ Compiler : 0.78 s
+ """
+ # fail immediately if the given expression can't be compiled
+
+ if line and cell:
+ raise UsageError("Can't use statement directly after '%%time'!")
+
+ if cell:
+ expr = self.shell.transform_cell(cell)
+ else:
+ expr = self.shell.transform_cell(line)
+
+ # Minimum time above which parse time will be reported
+ tp_min = 0.1
+
+ t0 = clock()
+ expr_ast = self.shell.compile.ast_parse(expr)
+ tp = clock()-t0
+
+ # Apply AST transformations
+ expr_ast = self.shell.transform_ast(expr_ast)
+
+ # Minimum time above which compilation time will be reported
+ tc_min = 0.1
+
+ expr_val=None
+ if len(expr_ast.body)==1 and isinstance(expr_ast.body[0], ast.Expr):
+ mode = 'eval'
+ source = '<timed eval>'
+ expr_ast = ast.Expression(expr_ast.body[0].value)
+ else:
+ mode = 'exec'
+ source = '<timed exec>'
+ # multi-line %%time case
+ if len(expr_ast.body) > 1 and isinstance(expr_ast.body[-1], ast.Expr):
+ expr_val= expr_ast.body[-1]
+ expr_ast = expr_ast.body[:-1]
+ expr_ast = Module(expr_ast, [])
+ expr_val = ast.Expression(expr_val.value)
+
+ t0 = clock()
+ code = self.shell.compile(expr_ast, source, mode)
+ tc = clock()-t0
+
+ # skew measurement as little as possible
+ glob = self.shell.user_ns
+ wtime = time.time
+ # time execution
+ wall_st = wtime()
+ if mode=='eval':
+ st = clock2()
+ try:
+ out = eval(code, glob, local_ns)
+ except:
+ self.shell.showtraceback()
+ return
+ end = clock2()
+ else:
+ st = clock2()
+ try:
+ exec(code, glob, local_ns)
+ out=None
+ # multi-line %%time case
+ if expr_val is not None:
+ code_2 = self.shell.compile(expr_val, source, 'eval')
+ out = eval(code_2, glob, local_ns)
+ except:
+ self.shell.showtraceback()
+ return
+ end = clock2()
+
+ wall_end = wtime()
+ # Compute actual times and report
+ wall_time = wall_end - wall_st
+ cpu_user = end[0] - st[0]
+ cpu_sys = end[1] - st[1]
+ cpu_tot = cpu_user + cpu_sys
+ # On windows cpu_sys is always zero, so only total is displayed
+ if sys.platform != "win32":
+ print(
+ f"CPU times: user {_format_time(cpu_user)}, sys: {_format_time(cpu_sys)}, total: {_format_time(cpu_tot)}"
+ )
+ else:
+ print(f"CPU times: total: {_format_time(cpu_tot)}")
+ print(f"Wall time: {_format_time(wall_time)}")
+ if tc > tc_min:
+ print(f"Compiler : {_format_time(tc)}")
+ if tp > tp_min:
+ print(f"Parser : {_format_time(tp)}")
+ return out
+
+ @skip_doctest
+ @line_magic
+ def macro(self, parameter_s=''):
+ """Define a macro for future re-execution. It accepts ranges of history,
+ filenames or string objects.
+
+ Usage:\\
+ %macro [options] name n1-n2 n3-n4 ... n5 .. n6 ...
+
+ Options:
+
+ -r: use 'raw' input. By default, the 'processed' history is used,
+ so that magics are loaded in their transformed version to valid
+ Python. If this option is given, the raw input as typed at the
+ command line is used instead.
+
+ -q: quiet macro definition. By default, a tag line is printed
+ to indicate the macro has been created, and then the contents of
+ the macro are printed. If this option is given, then no printout
+ is produced once the macro is created.
+
+ This will define a global variable called `name` which is a string
+ made of joining the slices and lines you specify (n1,n2,... numbers
+ above) from your input history into a single string. This variable
+ acts like an automatic function which re-executes those lines as if
+ you had typed them. You just type 'name' at the prompt and the code
+ executes.
+
+ The syntax for indicating input ranges is described in %history.
+
+ Note: as a 'hidden' feature, you can also use traditional python slice
+ notation, where N:M means numbers N through M-1.
+
+ For example, if your history contains (print using %hist -n )::
+
+ 44: x=1
+ 45: y=3
+ 46: z=x+y
+ 47: print x
+ 48: a=5
+ 49: print 'x',x,'y',y
+
+ you can create a macro with lines 44 through 47 (included) and line 49
+ called my_macro with::
+
+ In [55]: %macro my_macro 44-47 49
+
+ Now, typing `my_macro` (without quotes) will re-execute all this code
+ in one pass.
+
+ You don't need to give the line-numbers in order, and any given line
+ number can appear multiple times. You can assemble macros with any
+ lines from your input history in any order.
+
+ The macro is a simple object which holds its value in an attribute,
+ but IPython's display system checks for macros and executes them as
+ code instead of printing them when you type their name.
+
+ You can view a macro's contents by explicitly printing it with::
+
+ print macro_name
+
+ """
+ opts,args = self.parse_options(parameter_s,'rq',mode='list')
+ if not args: # List existing macros
+ return sorted(k for k,v in self.shell.user_ns.items() if isinstance(v, Macro))
+ if len(args) == 1:
+ raise UsageError(
+ "%macro insufficient args; usage '%macro name n1-n2 n3-4...")
+ name, codefrom = args[0], " ".join(args[1:])
+
+ #print 'rng',ranges # dbg
+ try:
+ lines = self.shell.find_user_code(codefrom, 'r' in opts)
+ except (ValueError, TypeError) as e:
+ print(e.args[0])
+ return
+ macro = Macro(lines)
+ self.shell.define_macro(name, macro)
+ if not ( 'q' in opts) :
+ print('Macro `%s` created. To execute, type its name (without quotes).' % name)
+ print('=== Macro contents: ===')
+ print(macro, end=' ')
+
+ @magic_arguments.magic_arguments()
+ @magic_arguments.argument('output', type=str, default='', nargs='?',
+ help="""The name of the variable in which to store output.
+ This is a utils.io.CapturedIO object with stdout/err attributes
+ for the text of the captured output.
+
+ CapturedOutput also has a show() method for displaying the output,
+ and __call__ as well, so you can use that to quickly display the
+ output.
+
+ If unspecified, captured output is discarded.
+ """
+ )
+ @magic_arguments.argument('--no-stderr', action="store_true",
+ help="""Don't capture stderr."""
+ )
+ @magic_arguments.argument('--no-stdout', action="store_true",
+ help="""Don't capture stdout."""
+ )
+ @magic_arguments.argument('--no-display', action="store_true",
+ help="""Don't capture IPython's rich display."""
+ )
+ @cell_magic
+ def capture(self, line, cell):
+ """run the cell, capturing stdout, stderr, and IPython's rich display() calls."""
+ args = magic_arguments.parse_argstring(self.capture, line)
+ out = not args.no_stdout
+ err = not args.no_stderr
+ disp = not args.no_display
+ with capture_output(out, err, disp) as io:
+ self.shell.run_cell(cell)
+ if DisplayHook.semicolon_at_end_of_expression(cell):
+ if args.output in self.shell.user_ns:
+ del self.shell.user_ns[args.output]
+ elif args.output:
+ self.shell.user_ns[args.output] = io
+
+def parse_breakpoint(text, current_file):
+ '''Returns (file, line) for file:line and (current_file, line) for line'''
+ colon = text.find(':')
+ if colon == -1:
+ return current_file, int(text)
+ else:
+ return text[:colon], int(text[colon+1:])
+
+def _format_time(timespan, precision=3):
+ """Formats the timespan in a human readable form"""
+
+ if timespan >= 60.0:
+ # we have more than a minute, format that in a human readable form
+ # Idea from http://snipplr.com/view/5713/
+ parts = [("d", 60*60*24),("h", 60*60),("min", 60), ("s", 1)]
+ time = []
+ leftover = timespan
+ for suffix, length in parts:
+ value = int(leftover / length)
+ if value > 0:
+ leftover = leftover % length
+ time.append(u'%s%s' % (str(value), suffix))
+ if leftover < 1:
+ break
+ return " ".join(time)
+
+
+ # Unfortunately the unicode 'micro' symbol can cause problems in
+ # certain terminals.
+ # See bug: https://bugs.launchpad.net/ipython/+bug/348466
+ # Try to prevent crashes by being more secure than it needs to
+ # E.g. eclipse is able to print a µ, but has no sys.stdout.encoding set.
+ units = [u"s", u"ms",u'us',"ns"] # the save value
+ if hasattr(sys.stdout, 'encoding') and sys.stdout.encoding:
+ try:
+ u'\xb5'.encode(sys.stdout.encoding)
+ units = [u"s", u"ms",u'\xb5s',"ns"]
+ except:
+ pass
+ scaling = [1, 1e3, 1e6, 1e9]
+
+ if timespan > 0.0:
+ order = min(-int(math.floor(math.log10(timespan)) // 3), 3)
+ else:
+ order = 3
+ return u"%.*g %s" % (precision, timespan * scaling[order], units[order])
diff --git a/contrib/python/ipython/py3/IPython/core/magics/extension.py b/contrib/python/ipython/py3/IPython/core/magics/extension.py
new file mode 100644
index 0000000000..2bc76b2d55
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/magics/extension.py
@@ -0,0 +1,63 @@
+"""Implementation of magic functions for the extension machinery.
+"""
+#-----------------------------------------------------------------------------
+# Copyright (c) 2012 The IPython Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+
+# Our own packages
+from IPython.core.error import UsageError
+from IPython.core.magic import Magics, magics_class, line_magic
+
+#-----------------------------------------------------------------------------
+# Magic implementation classes
+#-----------------------------------------------------------------------------
+
+@magics_class
+class ExtensionMagics(Magics):
+ """Magics to manage the IPython extensions system."""
+
+ @line_magic
+ def load_ext(self, module_str):
+ """Load an IPython extension by its module name."""
+ if not module_str:
+ raise UsageError('Missing module name.')
+ res = self.shell.extension_manager.load_extension(module_str)
+
+ if res == 'already loaded':
+ print("The %s extension is already loaded. To reload it, use:" % module_str)
+ print(" %reload_ext", module_str)
+ elif res == 'no load function':
+ print("The %s module is not an IPython extension." % module_str)
+
+ @line_magic
+ def unload_ext(self, module_str):
+ """Unload an IPython extension by its module name.
+
+ Not all extensions can be unloaded, only those which define an
+ ``unload_ipython_extension`` function.
+ """
+ if not module_str:
+ raise UsageError('Missing module name.')
+
+ res = self.shell.extension_manager.unload_extension(module_str)
+
+ if res == 'no unload function':
+ print("The %s extension doesn't define how to unload it." % module_str)
+ elif res == "not loaded":
+ print("The %s extension is not loaded." % module_str)
+
+ @line_magic
+ def reload_ext(self, module_str):
+ """Reload an IPython extension by its module name."""
+ if not module_str:
+ raise UsageError('Missing module name.')
+ self.shell.extension_manager.reload_extension(module_str)
diff --git a/contrib/python/ipython/py3/IPython/core/magics/history.py b/contrib/python/ipython/py3/IPython/core/magics/history.py
new file mode 100644
index 0000000000..faa4335faa
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/magics/history.py
@@ -0,0 +1,338 @@
+"""Implementation of magic functions related to History.
+"""
+#-----------------------------------------------------------------------------
+# Copyright (c) 2012, IPython Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+# Stdlib
+import os
+import sys
+from io import open as io_open
+import fnmatch
+
+# Our own packages
+from IPython.core.error import StdinNotImplementedError
+from IPython.core.magic import Magics, magics_class, line_magic
+from IPython.core.magic_arguments import (argument, magic_arguments,
+ parse_argstring)
+from IPython.testing.skipdoctest import skip_doctest
+from IPython.utils import io
+
+#-----------------------------------------------------------------------------
+# Magics class implementation
+#-----------------------------------------------------------------------------
+
+
+_unspecified = object()
+
+
+@magics_class
+class HistoryMagics(Magics):
+
+ @magic_arguments()
+ @argument(
+ '-n', dest='print_nums', action='store_true', default=False,
+ help="""
+ print line numbers for each input.
+ This feature is only available if numbered prompts are in use.
+ """)
+ @argument(
+ '-o', dest='get_output', action='store_true', default=False,
+ help="also print outputs for each input.")
+ @argument(
+ '-p', dest='pyprompts', action='store_true', default=False,
+ help="""
+ print classic '>>>' python prompts before each input.
+ This is useful for making documentation, and in conjunction
+ with -o, for producing doctest-ready output.
+ """)
+ @argument(
+ '-t', dest='raw', action='store_false', default=True,
+ help="""
+ print the 'translated' history, as IPython understands it.
+ IPython filters your input and converts it all into valid Python
+ source before executing it (things like magics or aliases are turned
+ into function calls, for example). With this option, you'll see the
+ native history instead of the user-entered version: '%%cd /' will be
+ seen as 'get_ipython().run_line_magic("cd", "/")' instead of '%%cd /'.
+ """)
+ @argument(
+ '-f', dest='filename',
+ help="""
+ FILENAME: instead of printing the output to the screen, redirect
+ it to the given file. The file is always overwritten, though *when
+ it can*, IPython asks for confirmation first. In particular, running
+ the command 'history -f FILENAME' from the IPython Notebook
+ interface will replace FILENAME even if it already exists *without*
+ confirmation.
+ """)
+ @argument(
+ '-g', dest='pattern', nargs='*', default=None,
+ help="""
+ treat the arg as a glob pattern to search for in (full) history.
+ This includes the saved history (almost all commands ever written).
+ The pattern may contain '?' to match one unknown character and '*'
+ to match any number of unknown characters. Use '%%hist -g' to show
+ full saved history (may be very long).
+ """)
+ @argument(
+ '-l', dest='limit', type=int, nargs='?', default=_unspecified,
+ help="""
+ get the last n lines from all sessions. Specify n as a single
+ arg, or the default is the last 10 lines.
+ """)
+ @argument(
+ '-u', dest='unique', action='store_true',
+ help="""
+ when searching history using `-g`, show only unique history.
+ """)
+ @argument('range', nargs='*')
+ @skip_doctest
+ @line_magic
+ def history(self, parameter_s = ''):
+ """Print input history (_i<n> variables), with most recent last.
+
+ By default, input history is printed without line numbers so it can be
+ directly pasted into an editor. Use -n to show them.
+
+ By default, all input history from the current session is displayed.
+ Ranges of history can be indicated using the syntax:
+
+ ``4``
+ Line 4, current session
+ ``4-6``
+ Lines 4-6, current session
+ ``243/1-5``
+ Lines 1-5, session 243
+ ``~2/7``
+ Line 7, session 2 before current
+ ``~8/1-~6/5``
+ From the first line of 8 sessions ago, to the fifth line of 6
+ sessions ago.
+
+ Multiple ranges can be entered, separated by spaces
+
+ The same syntax is used by %macro, %save, %edit, %rerun
+
+ Examples
+ --------
+ ::
+
+ In [6]: %history -n 4-6
+ 4:a = 12
+ 5:print a**2
+ 6:%history -n 4-6
+
+ """
+
+ args = parse_argstring(self.history, parameter_s)
+
+ # For brevity
+ history_manager = self.shell.history_manager
+
+ def _format_lineno(session, line):
+ """Helper function to format line numbers properly."""
+ if session in (0, history_manager.session_number):
+ return str(line)
+ return "%s/%s" % (session, line)
+
+ # Check if output to specific file was requested.
+ outfname = args.filename
+ if not outfname:
+ outfile = sys.stdout # default
+ # We don't want to close stdout at the end!
+ close_at_end = False
+ else:
+ outfname = os.path.expanduser(outfname)
+ if os.path.exists(outfname):
+ try:
+ ans = io.ask_yes_no("File %r exists. Overwrite?" % outfname)
+ except StdinNotImplementedError:
+ ans = True
+ if not ans:
+ print('Aborting.')
+ return
+ print("Overwriting file.")
+ outfile = io_open(outfname, 'w', encoding='utf-8')
+ close_at_end = True
+
+ print_nums = args.print_nums
+ get_output = args.get_output
+ pyprompts = args.pyprompts
+ raw = args.raw
+
+ pattern = None
+ limit = None if args.limit is _unspecified else args.limit
+
+ range_pattern = False
+ if args.pattern is not None and not args.range:
+ if args.pattern:
+ pattern = "*" + " ".join(args.pattern) + "*"
+ else:
+ pattern = "*"
+ hist = history_manager.search(pattern, raw=raw, output=get_output,
+ n=limit, unique=args.unique)
+ print_nums = True
+ elif args.limit is not _unspecified:
+ n = 10 if limit is None else limit
+ hist = history_manager.get_tail(n, raw=raw, output=get_output)
+ else:
+ if args.pattern:
+ range_pattern = "*" + " ".join(args.pattern) + "*"
+ print_nums = True
+ hist = history_manager.get_range_by_str(
+ " ".join(args.range), raw, get_output
+ )
+
+ # We could be displaying the entire history, so let's not try to pull
+ # it into a list in memory. Anything that needs more space will just
+ # misalign.
+ width = 4
+
+ for session, lineno, inline in hist:
+ # Print user history with tabs expanded to 4 spaces. The GUI
+ # clients use hard tabs for easier usability in auto-indented code,
+ # but we want to produce PEP-8 compliant history for safe pasting
+ # into an editor.
+ if get_output:
+ inline, output = inline
+ if range_pattern:
+ if not fnmatch.fnmatch(inline, range_pattern):
+ continue
+ inline = inline.expandtabs(4).rstrip()
+
+ multiline = "\n" in inline
+ line_sep = '\n' if multiline else ' '
+ if print_nums:
+ print(u'%s:%s' % (_format_lineno(session, lineno).rjust(width),
+ line_sep), file=outfile, end=u'')
+ if pyprompts:
+ print(u">>> ", end=u"", file=outfile)
+ if multiline:
+ inline = "\n... ".join(inline.splitlines()) + "\n..."
+ print(inline, file=outfile)
+ if get_output and output:
+ print(output, file=outfile)
+
+ if close_at_end:
+ outfile.close()
+
+ @line_magic
+ def recall(self, arg):
+ r"""Repeat a command, or get command to input line for editing.
+
+ %recall and %rep are equivalent.
+
+ - %recall (no arguments):
+
+ Place a string version of last computation result (stored in the
+ special '_' variable) to the next input prompt. Allows you to create
+ elaborate command lines without using copy-paste::
+
+ In[1]: l = ["hei", "vaan"]
+ In[2]: "".join(l)
+ Out[2]: heivaan
+ In[3]: %recall
+ In[4]: heivaan_ <== cursor blinking
+
+ %recall 45
+
+ Place history line 45 on the next input prompt. Use %hist to find
+ out the number.
+
+ %recall 1-4
+
+ Combine the specified lines into one cell, and place it on the next
+ input prompt. See %history for the slice syntax.
+
+ %recall foo+bar
+
+ If foo+bar can be evaluated in the user namespace, the result is
+ placed at the next input prompt. Otherwise, the history is searched
+ for lines which contain that substring, and the most recent one is
+ placed at the next input prompt.
+ """
+ if not arg: # Last output
+ self.shell.set_next_input(str(self.shell.user_ns["_"]))
+ return
+ # Get history range
+ histlines = self.shell.history_manager.get_range_by_str(arg)
+ cmd = "\n".join(x[2] for x in histlines)
+ if cmd:
+ self.shell.set_next_input(cmd.rstrip())
+ return
+
+ try: # Variable in user namespace
+ cmd = str(eval(arg, self.shell.user_ns))
+ except Exception: # Search for term in history
+ histlines = self.shell.history_manager.search("*"+arg+"*")
+ for h in reversed([x[2] for x in histlines]):
+ if 'recall' in h or 'rep' in h:
+ continue
+ self.shell.set_next_input(h.rstrip())
+ return
+ else:
+ self.shell.set_next_input(cmd.rstrip())
+ return
+ print("Couldn't evaluate or find in history:", arg)
+
+ @line_magic
+ def rerun(self, parameter_s=''):
+ """Re-run previous input
+
+ By default, you can specify ranges of input history to be repeated
+ (as with %history). With no arguments, it will repeat the last line.
+
+ Options:
+
+ -l <n> : Repeat the last n lines of input, not including the
+ current command.
+
+ -g foo : Repeat the most recent line which contains foo
+ """
+ opts, args = self.parse_options(parameter_s, 'l:g:', mode='string')
+ if "l" in opts: # Last n lines
+ try:
+ n = int(opts["l"])
+ except ValueError:
+ print("Number of lines must be an integer")
+ return
+
+ if n == 0:
+ print("Requested 0 last lines - nothing to run")
+ return
+ elif n < 0:
+ print("Number of lines to rerun cannot be negative")
+ return
+
+ hist = self.shell.history_manager.get_tail(n)
+ elif "g" in opts: # Search
+ p = "*"+opts['g']+"*"
+ hist = list(self.shell.history_manager.search(p))
+ for l in reversed(hist):
+ if "rerun" not in l[2]:
+ hist = [l] # The last match which isn't a %rerun
+ break
+ else:
+ hist = [] # No matches except %rerun
+ elif args: # Specify history ranges
+ hist = self.shell.history_manager.get_range_by_str(args)
+ else: # Last line
+ hist = self.shell.history_manager.get_tail(1)
+ hist = [x[2] for x in hist]
+ if not hist:
+ print("No lines in history match specification")
+ return
+ histlines = "\n".join(hist)
+ print("=== Executing: ===")
+ print(histlines)
+ print("=== Output: ===")
+ self.shell.run_cell("\n".join(hist), store_history=False)
diff --git a/contrib/python/ipython/py3/IPython/core/magics/logging.py b/contrib/python/ipython/py3/IPython/core/magics/logging.py
new file mode 100644
index 0000000000..b6b8d8a5af
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/magics/logging.py
@@ -0,0 +1,195 @@
+"""Implementation of magic functions for IPython's own logging.
+"""
+#-----------------------------------------------------------------------------
+# Copyright (c) 2012 The IPython Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+# Stdlib
+import os
+import sys
+
+# Our own packages
+from IPython.core.magic import Magics, magics_class, line_magic
+from warnings import warn
+from traitlets import Bool
+
+#-----------------------------------------------------------------------------
+# Magic implementation classes
+#-----------------------------------------------------------------------------
+
+@magics_class
+class LoggingMagics(Magics):
+ """Magics related to all logging machinery."""
+
+ quiet = Bool(False, help=
+ """
+ Suppress output of log state when logging is enabled
+ """
+ ).tag(config=True)
+
+ @line_magic
+ def logstart(self, parameter_s=''):
+ """Start logging anywhere in a session.
+
+ %logstart [-o|-r|-t|-q] [log_name [log_mode]]
+
+ If no name is given, it defaults to a file named 'ipython_log.py' in your
+ current directory, in 'rotate' mode (see below).
+
+ '%logstart name' saves to file 'name' in 'backup' mode. It saves your
+ history up to that point and then continues logging.
+
+ %logstart takes a second optional parameter: logging mode. This can be one
+ of (note that the modes are given unquoted):
+
+ append
+ Keep logging at the end of any existing file.
+
+ backup
+ Rename any existing file to name~ and start name.
+
+ global
+ Append to a single logfile in your home directory.
+
+ over
+ Overwrite any existing log.
+
+ rotate
+ Create rotating logs: name.1~, name.2~, etc.
+
+ Options:
+
+ -o
+ log also IPython's output. In this mode, all commands which
+ generate an Out[NN] prompt are recorded to the logfile, right after
+ their corresponding input line. The output lines are always
+ prepended with a '#[Out]# ' marker, so that the log remains valid
+ Python code.
+
+ Since this marker is always the same, filtering only the output from
+ a log is very easy, using for example a simple awk call::
+
+ awk -F'#\\[Out\\]# ' '{if($2) {print $2}}' ipython_log.py
+
+ -r
+ log 'raw' input. Normally, IPython's logs contain the processed
+ input, so that user lines are logged in their final form, converted
+ into valid Python. For example, %Exit is logged as
+ _ip.magic("Exit"). If the -r flag is given, all input is logged
+ exactly as typed, with no transformations applied.
+
+ -t
+ put timestamps before each input line logged (these are put in
+ comments).
+
+ -q
+ suppress output of logstate message when logging is invoked
+ """
+
+ opts,par = self.parse_options(parameter_s,'ortq')
+ log_output = 'o' in opts
+ log_raw_input = 'r' in opts
+ timestamp = 't' in opts
+ quiet = 'q' in opts
+
+ logger = self.shell.logger
+
+ # if no args are given, the defaults set in the logger constructor by
+ # ipython remain valid
+ if par:
+ try:
+ logfname,logmode = par.split()
+ except:
+ logfname = par
+ logmode = 'backup'
+ else:
+ logfname = logger.logfname
+ logmode = logger.logmode
+ # put logfname into rc struct as if it had been called on the command
+ # line, so it ends up saved in the log header Save it in case we need
+ # to restore it...
+ old_logfile = self.shell.logfile
+ if logfname:
+ logfname = os.path.expanduser(logfname)
+ self.shell.logfile = logfname
+
+ loghead = u'# IPython log file\n\n'
+ try:
+ logger.logstart(logfname, loghead, logmode, log_output, timestamp,
+ log_raw_input)
+ except:
+ self.shell.logfile = old_logfile
+ warn("Couldn't start log: %s" % sys.exc_info()[1])
+ else:
+ # log input history up to this point, optionally interleaving
+ # output if requested
+
+ if timestamp:
+ # disable timestamping for the previous history, since we've
+ # lost those already (no time machine here).
+ logger.timestamp = False
+
+ if log_raw_input:
+ input_hist = self.shell.history_manager.input_hist_raw
+ else:
+ input_hist = self.shell.history_manager.input_hist_parsed
+
+ if log_output:
+ log_write = logger.log_write
+ output_hist = self.shell.history_manager.output_hist
+ for n in range(1,len(input_hist)-1):
+ log_write(input_hist[n].rstrip() + u'\n')
+ if n in output_hist:
+ log_write(repr(output_hist[n]),'output')
+ else:
+ logger.log_write(u'\n'.join(input_hist[1:]))
+ logger.log_write(u'\n')
+ if timestamp:
+ # re-enable timestamping
+ logger.timestamp = True
+
+ if not (self.quiet or quiet):
+ print ('Activating auto-logging. '
+ 'Current session state plus future input saved.')
+ logger.logstate()
+
+ @line_magic
+ def logstop(self, parameter_s=''):
+ """Fully stop logging and close log file.
+
+ In order to start logging again, a new %logstart call needs to be made,
+ possibly (though not necessarily) with a new filename, mode and other
+ options."""
+ self.shell.logger.logstop()
+
+ @line_magic
+ def logoff(self, parameter_s=''):
+ """Temporarily stop logging.
+
+ You must have previously started logging."""
+ self.shell.logger.switch_log(0)
+
+ @line_magic
+ def logon(self, parameter_s=''):
+ """Restart logging.
+
+ This function is for restarting logging which you've temporarily
+ stopped with %logoff. For starting logging for the first time, you
+ must use the %logstart function, which allows you to specify an
+ optional log filename."""
+
+ self.shell.logger.switch_log(1)
+
+ @line_magic
+ def logstate(self, parameter_s=''):
+ """Print the status of the logging system."""
+
+ self.shell.logger.logstate()
diff --git a/contrib/python/ipython/py3/IPython/core/magics/namespace.py b/contrib/python/ipython/py3/IPython/core/magics/namespace.py
new file mode 100644
index 0000000000..5da8f7161a
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/magics/namespace.py
@@ -0,0 +1,711 @@
+"""Implementation of namespace-related magic functions.
+"""
+#-----------------------------------------------------------------------------
+# Copyright (c) 2012 The IPython Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+# Stdlib
+import gc
+import re
+import sys
+
+# Our own packages
+from IPython.core import page
+from IPython.core.error import StdinNotImplementedError, UsageError
+from IPython.core.magic import Magics, magics_class, line_magic
+from IPython.testing.skipdoctest import skip_doctest
+from IPython.utils.encoding import DEFAULT_ENCODING
+from IPython.utils.openpy import read_py_file
+from IPython.utils.path import get_py_filename
+
+#-----------------------------------------------------------------------------
+# Magic implementation classes
+#-----------------------------------------------------------------------------
+
+@magics_class
+class NamespaceMagics(Magics):
+ """Magics to manage various aspects of the user's namespace.
+
+ These include listing variables, introspecting into them, etc.
+ """
+
+ @line_magic
+ def pinfo(self, parameter_s='', namespaces=None):
+ """Provide detailed information about an object.
+
+ '%pinfo object' is just a synonym for object? or ?object."""
+
+ #print 'pinfo par: <%s>' % parameter_s # dbg
+ # detail_level: 0 -> obj? , 1 -> obj??
+ detail_level = 0
+ # We need to detect if we got called as 'pinfo pinfo foo', which can
+ # happen if the user types 'pinfo foo?' at the cmd line.
+ pinfo,qmark1,oname,qmark2 = \
+ re.match(r'(pinfo )?(\?*)(.*?)(\??$)',parameter_s).groups()
+ if pinfo or qmark1 or qmark2:
+ detail_level = 1
+ if "*" in oname:
+ self.psearch(oname)
+ else:
+ self.shell._inspect('pinfo', oname, detail_level=detail_level,
+ namespaces=namespaces)
+
+ @line_magic
+ def pinfo2(self, parameter_s='', namespaces=None):
+ """Provide extra detailed information about an object.
+
+ '%pinfo2 object' is just a synonym for object?? or ??object."""
+ self.shell._inspect('pinfo', parameter_s, detail_level=1,
+ namespaces=namespaces)
+
+ @skip_doctest
+ @line_magic
+ def pdef(self, parameter_s='', namespaces=None):
+ """Print the call signature for any callable object.
+
+ If the object is a class, print the constructor information.
+
+ Examples
+ --------
+ ::
+
+ In [3]: %pdef urllib.urlopen
+ urllib.urlopen(url, data=None, proxies=None)
+ """
+ self.shell._inspect('pdef',parameter_s, namespaces)
+
+ @line_magic
+ def pdoc(self, parameter_s='', namespaces=None):
+ """Print the docstring for an object.
+
+ If the given object is a class, it will print both the class and the
+ constructor docstrings."""
+ self.shell._inspect('pdoc',parameter_s, namespaces)
+
+ @line_magic
+ def psource(self, parameter_s='', namespaces=None):
+ """Print (or run through pager) the source code for an object."""
+ if not parameter_s:
+ raise UsageError('Missing object name.')
+ self.shell._inspect('psource',parameter_s, namespaces)
+
+ @line_magic
+ def pfile(self, parameter_s='', namespaces=None):
+ """Print (or run through pager) the file where an object is defined.
+
+ The file opens at the line where the object definition begins. IPython
+ will honor the environment variable PAGER if set, and otherwise will
+ do its best to print the file in a convenient form.
+
+ If the given argument is not an object currently defined, IPython will
+ try to interpret it as a filename (automatically adding a .py extension
+ if needed). You can thus use %pfile as a syntax highlighting code
+ viewer."""
+
+ # first interpret argument as an object name
+ out = self.shell._inspect('pfile',parameter_s, namespaces)
+ # if not, try the input as a filename
+ if out == 'not found':
+ try:
+ filename = get_py_filename(parameter_s)
+ except IOError as msg:
+ print(msg)
+ return
+ page.page(self.shell.pycolorize(read_py_file(filename, skip_encoding_cookie=False)))
+
+ @line_magic
+ def psearch(self, parameter_s=''):
+ """Search for object in namespaces by wildcard.
+
+ %psearch [options] PATTERN [OBJECT TYPE]
+
+ Note: ? can be used as a synonym for %psearch, at the beginning or at
+ the end: both a*? and ?a* are equivalent to '%psearch a*'. Still, the
+ rest of the command line must be unchanged (options come first), so
+ for example the following forms are equivalent
+
+ %psearch -i a* function
+ -i a* function?
+ ?-i a* function
+
+ Arguments:
+
+ PATTERN
+
+ where PATTERN is a string containing * as a wildcard similar to its
+ use in a shell. The pattern is matched in all namespaces on the
+ search path. By default objects starting with a single _ are not
+ matched, many IPython generated objects have a single
+ underscore. The default is case insensitive matching. Matching is
+ also done on the attributes of objects and not only on the objects
+ in a module.
+
+ [OBJECT TYPE]
+
+ Is the name of a python type from the types module. The name is
+ given in lowercase without the ending type, ex. StringType is
+ written string. By adding a type here only objects matching the
+ given type are matched. Using all here makes the pattern match all
+ types (this is the default).
+
+ Options:
+
+ -a: makes the pattern match even objects whose names start with a
+ single underscore. These names are normally omitted from the
+ search.
+
+ -i/-c: make the pattern case insensitive/sensitive. If neither of
+ these options are given, the default is read from your configuration
+ file, with the option ``InteractiveShell.wildcards_case_sensitive``.
+ If this option is not specified in your configuration file, IPython's
+ internal default is to do a case sensitive search.
+
+ -e/-s NAMESPACE: exclude/search a given namespace. The pattern you
+ specify can be searched in any of the following namespaces:
+ 'builtin', 'user', 'user_global','internal', 'alias', where
+ 'builtin' and 'user' are the search defaults. Note that you should
+ not use quotes when specifying namespaces.
+
+ -l: List all available object types for object matching. This function
+ can be used without arguments.
+
+ 'Builtin' contains the python module builtin, 'user' contains all
+ user data, 'alias' only contain the shell aliases and no python
+ objects, 'internal' contains objects used by IPython. The
+ 'user_global' namespace is only used by embedded IPython instances,
+ and it contains module-level globals. You can add namespaces to the
+ search with -s or exclude them with -e (these options can be given
+ more than once).
+
+ Examples
+ --------
+ ::
+
+ %psearch a* -> objects beginning with an a
+ %psearch -e builtin a* -> objects NOT in the builtin space starting in a
+ %psearch a* function -> all functions beginning with an a
+ %psearch re.e* -> objects beginning with an e in module re
+ %psearch r*.e* -> objects that start with e in modules starting in r
+ %psearch r*.* string -> all strings in modules beginning with r
+
+ Case sensitive search::
+
+ %psearch -c a* list all object beginning with lower case a
+
+ Show objects beginning with a single _::
+
+ %psearch -a _* list objects beginning with a single underscore
+
+ List available objects::
+
+ %psearch -l list all available object types
+ """
+ # default namespaces to be searched
+ def_search = ['user_local', 'user_global', 'builtin']
+
+ # Process options/args
+ opts,args = self.parse_options(parameter_s,'cias:e:l',list_all=True)
+ opt = opts.get
+ shell = self.shell
+ psearch = shell.inspector.psearch
+
+ # select list object types
+ list_types = False
+ if 'l' in opts:
+ list_types = True
+
+ # select case options
+ if 'i' in opts:
+ ignore_case = True
+ elif 'c' in opts:
+ ignore_case = False
+ else:
+ ignore_case = not shell.wildcards_case_sensitive
+
+ # Build list of namespaces to search from user options
+ def_search.extend(opt('s',[]))
+ ns_exclude = ns_exclude=opt('e',[])
+ ns_search = [nm for nm in def_search if nm not in ns_exclude]
+
+ # Call the actual search
+ try:
+ psearch(args,shell.ns_table,ns_search,
+ show_all=opt('a'),ignore_case=ignore_case, list_types=list_types)
+ except:
+ shell.showtraceback()
+
+ @skip_doctest
+ @line_magic
+ def who_ls(self, parameter_s=''):
+ """Return a sorted list of all interactive variables.
+
+ If arguments are given, only variables of types matching these
+ arguments are returned.
+
+ Examples
+ --------
+ Define two variables and list them with who_ls::
+
+ In [1]: alpha = 123
+
+ In [2]: beta = 'test'
+
+ In [3]: %who_ls
+ Out[3]: ['alpha', 'beta']
+
+ In [4]: %who_ls int
+ Out[4]: ['alpha']
+
+ In [5]: %who_ls str
+ Out[5]: ['beta']
+ """
+
+ user_ns = self.shell.user_ns
+ user_ns_hidden = self.shell.user_ns_hidden
+ nonmatching = object() # This can never be in user_ns
+ out = [ i for i in user_ns
+ if not i.startswith('_') \
+ and (user_ns[i] is not user_ns_hidden.get(i, nonmatching)) ]
+
+ typelist = parameter_s.split()
+ if typelist:
+ typeset = set(typelist)
+ out = [i for i in out if type(user_ns[i]).__name__ in typeset]
+
+ out.sort()
+ return out
+
+ @skip_doctest
+ @line_magic
+ def who(self, parameter_s=''):
+ """Print all interactive variables, with some minimal formatting.
+
+ If any arguments are given, only variables whose type matches one of
+ these are printed. For example::
+
+ %who function str
+
+ will only list functions and strings, excluding all other types of
+ variables. To find the proper type names, simply use type(var) at a
+ command line to see how python prints type names. For example:
+
+ ::
+
+ In [1]: type('hello')\\
+ Out[1]: <type 'str'>
+
+ indicates that the type name for strings is 'str'.
+
+ ``%who`` always excludes executed names loaded through your configuration
+ file and things which are internal to IPython.
+
+ This is deliberate, as typically you may load many modules and the
+ purpose of %who is to show you only what you've manually defined.
+
+ Examples
+ --------
+
+ Define two variables and list them with who::
+
+ In [1]: alpha = 123
+
+ In [2]: beta = 'test'
+
+ In [3]: %who
+ alpha beta
+
+ In [4]: %who int
+ alpha
+
+ In [5]: %who str
+ beta
+ """
+
+ varlist = self.who_ls(parameter_s)
+ if not varlist:
+ if parameter_s:
+ print('No variables match your requested type.')
+ else:
+ print('Interactive namespace is empty.')
+ return
+
+ # if we have variables, move on...
+ count = 0
+ for i in varlist:
+ print(i+'\t', end=' ')
+ count += 1
+ if count > 8:
+ count = 0
+ print()
+ print()
+
+ @skip_doctest
+ @line_magic
+ def whos(self, parameter_s=''):
+ """Like %who, but gives some extra information about each variable.
+
+ The same type filtering of %who can be applied here.
+
+ For all variables, the type is printed. Additionally it prints:
+
+ - For {},[],(): their length.
+
+ - For numpy arrays, a summary with shape, number of
+ elements, typecode and size in memory.
+
+ - Everything else: a string representation, snipping their middle if
+ too long.
+
+ Examples
+ --------
+ Define two variables and list them with whos::
+
+ In [1]: alpha = 123
+
+ In [2]: beta = 'test'
+
+ In [3]: %whos
+ Variable Type Data/Info
+ --------------------------------
+ alpha int 123
+ beta str test
+ """
+
+ varnames = self.who_ls(parameter_s)
+ if not varnames:
+ if parameter_s:
+ print('No variables match your requested type.')
+ else:
+ print('Interactive namespace is empty.')
+ return
+
+ # if we have variables, move on...
+
+ # for these types, show len() instead of data:
+ seq_types = ['dict', 'list', 'tuple']
+
+ # for numpy arrays, display summary info
+ ndarray_type = None
+ if 'numpy' in sys.modules:
+ try:
+ from numpy import ndarray
+ except ImportError:
+ pass
+ else:
+ ndarray_type = ndarray.__name__
+
+ # Find all variable names and types so we can figure out column sizes
+
+ # some types are well known and can be shorter
+ abbrevs = {'IPython.core.macro.Macro' : 'Macro'}
+ def type_name(v):
+ tn = type(v).__name__
+ return abbrevs.get(tn,tn)
+
+ varlist = [self.shell.user_ns[n] for n in varnames]
+
+ typelist = []
+ for vv in varlist:
+ tt = type_name(vv)
+
+ if tt=='instance':
+ typelist.append( abbrevs.get(str(vv.__class__),
+ str(vv.__class__)))
+ else:
+ typelist.append(tt)
+
+ # column labels and # of spaces as separator
+ varlabel = 'Variable'
+ typelabel = 'Type'
+ datalabel = 'Data/Info'
+ colsep = 3
+ # variable format strings
+ vformat = "{0:<{varwidth}}{1:<{typewidth}}"
+ aformat = "%s: %s elems, type `%s`, %s bytes"
+ # find the size of the columns to format the output nicely
+ varwidth = max(max(map(len,varnames)), len(varlabel)) + colsep
+ typewidth = max(max(map(len,typelist)), len(typelabel)) + colsep
+ # table header
+ print(varlabel.ljust(varwidth) + typelabel.ljust(typewidth) + \
+ ' '+datalabel+'\n' + '-'*(varwidth+typewidth+len(datalabel)+1))
+ # and the table itself
+ kb = 1024
+ Mb = 1048576 # kb**2
+ for vname,var,vtype in zip(varnames,varlist,typelist):
+ print(vformat.format(vname, vtype, varwidth=varwidth, typewidth=typewidth), end=' ')
+ if vtype in seq_types:
+ print("n="+str(len(var)))
+ elif vtype == ndarray_type:
+ vshape = str(var.shape).replace(',','').replace(' ','x')[1:-1]
+ if vtype==ndarray_type:
+ # numpy
+ vsize = var.size
+ vbytes = vsize*var.itemsize
+ vdtype = var.dtype
+
+ if vbytes < 100000:
+ print(aformat % (vshape, vsize, vdtype, vbytes))
+ else:
+ print(aformat % (vshape, vsize, vdtype, vbytes), end=' ')
+ if vbytes < Mb:
+ print('(%s kb)' % (vbytes/kb,))
+ else:
+ print('(%s Mb)' % (vbytes/Mb,))
+ else:
+ try:
+ vstr = str(var)
+ except UnicodeEncodeError:
+ vstr = var.encode(DEFAULT_ENCODING,
+ 'backslashreplace')
+ except:
+ vstr = "<object with id %d (str() failed)>" % id(var)
+ vstr = vstr.replace('\n', '\\n')
+ if len(vstr) < 50:
+ print(vstr)
+ else:
+ print(vstr[:25] + "<...>" + vstr[-25:])
+
+ @line_magic
+ def reset(self, parameter_s=''):
+ """Resets the namespace by removing all names defined by the user, if
+ called without arguments, or by removing some types of objects, such
+ as everything currently in IPython's In[] and Out[] containers (see
+ the parameters for details).
+
+ Parameters
+ ----------
+ -f
+ force reset without asking for confirmation.
+ -s
+ 'Soft' reset: Only clears your namespace, leaving history intact.
+ References to objects may be kept. By default (without this option),
+ we do a 'hard' reset, giving you a new session and removing all
+ references to objects from the current session.
+ --aggressive
+ Try to aggressively remove modules from sys.modules ; this
+ may allow you to reimport Python modules that have been updated and
+ pick up changes, but can have unintended consequences.
+
+ in
+ reset input history
+ out
+ reset output history
+ dhist
+ reset directory history
+ array
+ reset only variables that are NumPy arrays
+
+ See Also
+ --------
+ reset_selective : invoked as ``%reset_selective``
+
+ Examples
+ --------
+ ::
+
+ In [6]: a = 1
+
+ In [7]: a
+ Out[7]: 1
+
+ In [8]: 'a' in get_ipython().user_ns
+ Out[8]: True
+
+ In [9]: %reset -f
+
+ In [1]: 'a' in get_ipython().user_ns
+ Out[1]: False
+
+ In [2]: %reset -f in
+ Flushing input history
+
+ In [3]: %reset -f dhist in
+ Flushing directory history
+ Flushing input history
+
+ Notes
+ -----
+ Calling this magic from clients that do not implement standard input,
+ such as the ipython notebook interface, will reset the namespace
+ without confirmation.
+ """
+ opts, args = self.parse_options(parameter_s, "sf", "aggressive", mode="list")
+ if "f" in opts:
+ ans = True
+ else:
+ try:
+ ans = self.shell.ask_yes_no(
+ "Once deleted, variables cannot be recovered. Proceed (y/[n])?",
+ default='n')
+ except StdinNotImplementedError:
+ ans = True
+ if not ans:
+ print('Nothing done.')
+ return
+
+ if 's' in opts: # Soft reset
+ user_ns = self.shell.user_ns
+ for i in self.who_ls():
+ del(user_ns[i])
+ elif len(args) == 0: # Hard reset
+ self.shell.reset(new_session=False, aggressive=("aggressive" in opts))
+
+ # reset in/out/dhist/array: previously extensinions/clearcmd.py
+ ip = self.shell
+ user_ns = self.shell.user_ns # local lookup, heavily used
+
+ for target in args:
+ target = target.lower() # make matches case insensitive
+ if target == 'out':
+ print("Flushing output cache (%d entries)" % len(user_ns['_oh']))
+ self.shell.displayhook.flush()
+
+ elif target == 'in':
+ print("Flushing input history")
+ pc = self.shell.displayhook.prompt_count + 1
+ for n in range(1, pc):
+ key = '_i'+repr(n)
+ user_ns.pop(key,None)
+ user_ns.update(dict(_i=u'',_ii=u'',_iii=u''))
+ hm = ip.history_manager
+ # don't delete these, as %save and %macro depending on the
+ # length of these lists to be preserved
+ hm.input_hist_parsed[:] = [''] * pc
+ hm.input_hist_raw[:] = [''] * pc
+ # hm has internal machinery for _i,_ii,_iii, clear it out
+ hm._i = hm._ii = hm._iii = hm._i00 = u''
+
+ elif target == 'array':
+ # Support cleaning up numpy arrays
+ try:
+ from numpy import ndarray
+ # This must be done with items and not iteritems because
+ # we're going to modify the dict in-place.
+ for x,val in list(user_ns.items()):
+ if isinstance(val,ndarray):
+ del user_ns[x]
+ except ImportError:
+ print("reset array only works if Numpy is available.")
+
+ elif target == 'dhist':
+ print("Flushing directory history")
+ del user_ns['_dh'][:]
+
+ else:
+ print("Don't know how to reset ", end=' ')
+ print(target + ", please run `%reset?` for details")
+
+ gc.collect()
+
+ @line_magic
+ def reset_selective(self, parameter_s=''):
+ """Resets the namespace by removing names defined by the user.
+
+ Input/Output history are left around in case you need them.
+
+ %reset_selective [-f] regex
+
+ No action is taken if regex is not included
+
+ Options
+ -f : force reset without asking for confirmation.
+
+ See Also
+ --------
+ reset : invoked as ``%reset``
+
+ Examples
+ --------
+ We first fully reset the namespace so your output looks identical to
+ this example for pedagogical reasons; in practice you do not need a
+ full reset::
+
+ In [1]: %reset -f
+
+ Now, with a clean namespace we can make a few variables and use
+ ``%reset_selective`` to only delete names that match our regexp::
+
+ In [2]: a=1; b=2; c=3; b1m=4; b2m=5; b3m=6; b4m=7; b2s=8
+
+ In [3]: who_ls
+ Out[3]: ['a', 'b', 'b1m', 'b2m', 'b2s', 'b3m', 'b4m', 'c']
+
+ In [4]: %reset_selective -f b[2-3]m
+
+ In [5]: who_ls
+ Out[5]: ['a', 'b', 'b1m', 'b2s', 'b4m', 'c']
+
+ In [6]: %reset_selective -f d
+
+ In [7]: who_ls
+ Out[7]: ['a', 'b', 'b1m', 'b2s', 'b4m', 'c']
+
+ In [8]: %reset_selective -f c
+
+ In [9]: who_ls
+ Out[9]: ['a', 'b', 'b1m', 'b2s', 'b4m']
+
+ In [10]: %reset_selective -f b
+
+ In [11]: who_ls
+ Out[11]: ['a']
+
+ Notes
+ -----
+ Calling this magic from clients that do not implement standard input,
+ such as the ipython notebook interface, will reset the namespace
+ without confirmation.
+ """
+
+ opts, regex = self.parse_options(parameter_s,'f')
+
+ if 'f' in opts:
+ ans = True
+ else:
+ try:
+ ans = self.shell.ask_yes_no(
+ "Once deleted, variables cannot be recovered. Proceed (y/[n])? ",
+ default='n')
+ except StdinNotImplementedError:
+ ans = True
+ if not ans:
+ print('Nothing done.')
+ return
+ user_ns = self.shell.user_ns
+ if not regex:
+ print('No regex pattern specified. Nothing done.')
+ return
+ else:
+ try:
+ m = re.compile(regex)
+ except TypeError as e:
+ raise TypeError('regex must be a string or compiled pattern') from e
+ for i in self.who_ls():
+ if m.search(i):
+ del(user_ns[i])
+
+ @line_magic
+ def xdel(self, parameter_s=''):
+ """Delete a variable, trying to clear it from anywhere that
+ IPython's machinery has references to it. By default, this uses
+ the identity of the named object in the user namespace to remove
+ references held under other names. The object is also removed
+ from the output history.
+
+ Options
+ -n : Delete the specified name from all namespaces, without
+ checking their identity.
+ """
+ opts, varname = self.parse_options(parameter_s,'n')
+ try:
+ self.shell.del_var(varname, ('n' in opts))
+ except (NameError, ValueError) as e:
+ print(type(e).__name__ +": "+ str(e))
diff --git a/contrib/python/ipython/py3/IPython/core/magics/osm.py b/contrib/python/ipython/py3/IPython/core/magics/osm.py
new file mode 100644
index 0000000000..f64f1bce6a
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/magics/osm.py
@@ -0,0 +1,855 @@
+"""Implementation of magic functions for interaction with the OS.
+
+Note: this module is named 'osm' instead of 'os' to avoid a collision with the
+builtin.
+"""
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+import io
+import os
+import pathlib
+import re
+import sys
+from pprint import pformat
+
+from IPython.core import magic_arguments
+from IPython.core import oinspect
+from IPython.core import page
+from IPython.core.alias import AliasError, Alias
+from IPython.core.error import UsageError
+from IPython.core.magic import (
+ Magics, compress_dhist, magics_class, line_magic, cell_magic, line_cell_magic
+)
+from IPython.testing.skipdoctest import skip_doctest
+from IPython.utils.openpy import source_to_unicode
+from IPython.utils.process import abbrev_cwd
+from IPython.utils.terminal import set_term_title
+from traitlets import Bool
+from warnings import warn
+
+
+@magics_class
+class OSMagics(Magics):
+ """Magics to interact with the underlying OS (shell-type functionality).
+ """
+
+ cd_force_quiet = Bool(False,
+ help="Force %cd magic to be quiet even if -q is not passed."
+ ).tag(config=True)
+
+ def __init__(self, shell=None, **kwargs):
+
+ # Now define isexec in a cross platform manner.
+ self.is_posix = False
+ self.execre = None
+ if os.name == 'posix':
+ self.is_posix = True
+ else:
+ try:
+ winext = os.environ['pathext'].replace(';','|').replace('.','')
+ except KeyError:
+ winext = 'exe|com|bat|py'
+ try:
+ self.execre = re.compile(r'(.*)\.(%s)$' % winext,re.IGNORECASE)
+ except re.error:
+ warn("Seems like your pathext environmental "
+ "variable is malformed. Please check it to "
+ "enable a proper handle of file extensions "
+ "managed for your system")
+ winext = 'exe|com|bat|py'
+ self.execre = re.compile(r'(.*)\.(%s)$' % winext,re.IGNORECASE)
+
+ # call up the chain
+ super().__init__(shell=shell, **kwargs)
+
+
+ def _isexec_POSIX(self, file):
+ """
+ Test for executable on a POSIX system
+ """
+ if os.access(file.path, os.X_OK):
+ # will fail on maxOS if access is not X_OK
+ return file.is_file()
+ return False
+
+
+
+ def _isexec_WIN(self, file):
+ """
+ Test for executable file on non POSIX system
+ """
+ return file.is_file() and self.execre.match(file.name) is not None
+
+ def isexec(self, file):
+ """
+ Test for executable file on non POSIX system
+ """
+ if self.is_posix:
+ return self._isexec_POSIX(file)
+ else:
+ return self._isexec_WIN(file)
+
+
+ @skip_doctest
+ @line_magic
+ def alias(self, parameter_s=''):
+ """Define an alias for a system command.
+
+ '%alias alias_name cmd' defines 'alias_name' as an alias for 'cmd'
+
+ Then, typing 'alias_name params' will execute the system command 'cmd
+ params' (from your underlying operating system).
+
+ Aliases have lower precedence than magic functions and Python normal
+ variables, so if 'foo' is both a Python variable and an alias, the
+ alias can not be executed until 'del foo' removes the Python variable.
+
+ You can use the %l specifier in an alias definition to represent the
+ whole line when the alias is called. For example::
+
+ In [2]: alias bracket echo "Input in brackets: <%l>"
+ In [3]: bracket hello world
+ Input in brackets: <hello world>
+
+ You can also define aliases with parameters using %s specifiers (one
+ per parameter)::
+
+ In [1]: alias parts echo first %s second %s
+ In [2]: %parts A B
+ first A second B
+ In [3]: %parts A
+ Incorrect number of arguments: 2 expected.
+ parts is an alias to: 'echo first %s second %s'
+
+ Note that %l and %s are mutually exclusive. You can only use one or
+ the other in your aliases.
+
+ Aliases expand Python variables just like system calls using ! or !!
+ do: all expressions prefixed with '$' get expanded. For details of
+ the semantic rules, see PEP-215:
+ https://peps.python.org/pep-0215/. This is the library used by
+ IPython for variable expansion. If you want to access a true shell
+ variable, an extra $ is necessary to prevent its expansion by
+ IPython::
+
+ In [6]: alias show echo
+ In [7]: PATH='A Python string'
+ In [8]: show $PATH
+ A Python string
+ In [9]: show $$PATH
+ /usr/local/lf9560/bin:/usr/local/intel/compiler70/ia32/bin:...
+
+ You can use the alias facility to access all of $PATH. See the %rehashx
+ function, which automatically creates aliases for the contents of your
+ $PATH.
+
+ If called with no parameters, %alias prints the current alias table
+ for your system. For posix systems, the default aliases are 'cat',
+ 'cp', 'mv', 'rm', 'rmdir', and 'mkdir', and other platform-specific
+ aliases are added. For windows-based systems, the default aliases are
+ 'copy', 'ddir', 'echo', 'ls', 'ldir', 'mkdir', 'ren', and 'rmdir'.
+
+ You can see the definition of alias by adding a question mark in the
+ end::
+
+ In [1]: cat?
+ Repr: <alias cat for 'cat'>"""
+
+ par = parameter_s.strip()
+ if not par:
+ aliases = sorted(self.shell.alias_manager.aliases)
+ # stored = self.shell.db.get('stored_aliases', {} )
+ # for k, v in stored:
+ # atab.append(k, v[0])
+
+ print("Total number of aliases:", len(aliases))
+ sys.stdout.flush()
+ return aliases
+
+ # Now try to define a new one
+ try:
+ alias,cmd = par.split(None, 1)
+ except TypeError:
+ print(oinspect.getdoc(self.alias))
+ return
+
+ try:
+ self.shell.alias_manager.define_alias(alias, cmd)
+ except AliasError as e:
+ print(e)
+ # end magic_alias
+
+ @line_magic
+ def unalias(self, parameter_s=''):
+ """Remove an alias"""
+
+ aname = parameter_s.strip()
+ try:
+ self.shell.alias_manager.undefine_alias(aname)
+ except ValueError as e:
+ print(e)
+ return
+
+ stored = self.shell.db.get('stored_aliases', {} )
+ if aname in stored:
+ print("Removing %stored alias",aname)
+ del stored[aname]
+ self.shell.db['stored_aliases'] = stored
+
+ @line_magic
+ def rehashx(self, parameter_s=''):
+ """Update the alias table with all executable files in $PATH.
+
+ rehashx explicitly checks that every entry in $PATH is a file
+ with execute access (os.X_OK).
+
+ Under Windows, it checks executability as a match against a
+ '|'-separated string of extensions, stored in the IPython config
+ variable win_exec_ext. This defaults to 'exe|com|bat'.
+
+ This function also resets the root module cache of module completer,
+ used on slow filesystems.
+ """
+ from IPython.core.alias import InvalidAliasError
+
+ # for the benefit of module completer in ipy_completers.py
+ del self.shell.db['rootmodules_cache']
+
+ path = [os.path.abspath(os.path.expanduser(p)) for p in
+ os.environ.get('PATH','').split(os.pathsep)]
+
+ syscmdlist = []
+ savedir = os.getcwd()
+
+ # Now walk the paths looking for executables to alias.
+ try:
+ # write the whole loop for posix/Windows so we don't have an if in
+ # the innermost part
+ if self.is_posix:
+ for pdir in path:
+ try:
+ os.chdir(pdir)
+ except OSError:
+ continue
+
+ # for python 3.6+ rewrite to: with os.scandir(pdir) as dirlist:
+ dirlist = os.scandir(path=pdir)
+ for ff in dirlist:
+ if self.isexec(ff):
+ fname = ff.name
+ try:
+ # Removes dots from the name since ipython
+ # will assume names with dots to be python.
+ if not self.shell.alias_manager.is_alias(fname):
+ self.shell.alias_manager.define_alias(
+ fname.replace('.',''), fname)
+ except InvalidAliasError:
+ pass
+ else:
+ syscmdlist.append(fname)
+ else:
+ no_alias = Alias.blacklist
+ for pdir in path:
+ try:
+ os.chdir(pdir)
+ except OSError:
+ continue
+
+ # for python 3.6+ rewrite to: with os.scandir(pdir) as dirlist:
+ dirlist = os.scandir(pdir)
+ for ff in dirlist:
+ fname = ff.name
+ base, ext = os.path.splitext(fname)
+ if self.isexec(ff) and base.lower() not in no_alias:
+ if ext.lower() == '.exe':
+ fname = base
+ try:
+ # Removes dots from the name since ipython
+ # will assume names with dots to be python.
+ self.shell.alias_manager.define_alias(
+ base.lower().replace('.',''), fname)
+ except InvalidAliasError:
+ pass
+ syscmdlist.append(fname)
+
+ self.shell.db['syscmdlist'] = syscmdlist
+ finally:
+ os.chdir(savedir)
+
+ @skip_doctest
+ @line_magic
+ def pwd(self, parameter_s=''):
+ """Return the current working directory path.
+
+ Examples
+ --------
+ ::
+
+ In [9]: pwd
+ Out[9]: '/home/tsuser/sprint/ipython'
+ """
+ try:
+ return os.getcwd()
+ except FileNotFoundError as e:
+ raise UsageError("CWD no longer exists - please use %cd to change directory.") from e
+
+ @skip_doctest
+ @line_magic
+ def cd(self, parameter_s=''):
+ """Change the current working directory.
+
+ This command automatically maintains an internal list of directories
+ you visit during your IPython session, in the variable ``_dh``. The
+ command :magic:`%dhist` shows this history nicely formatted. You can
+ also do ``cd -<tab>`` to see directory history conveniently.
+ Usage:
+
+ - ``cd 'dir'``: changes to directory 'dir'.
+ - ``cd -``: changes to the last visited directory.
+ - ``cd -<n>``: changes to the n-th directory in the directory history.
+ - ``cd --foo``: change to directory that matches 'foo' in history
+ - ``cd -b <bookmark_name>``: jump to a bookmark set by %bookmark
+ - Hitting a tab key after ``cd -b`` allows you to tab-complete
+ bookmark names.
+
+ .. note::
+ ``cd <bookmark_name>`` is enough if there is no directory
+ ``<bookmark_name>``, but a bookmark with the name exists.
+
+ Options:
+
+ -q Be quiet. Do not print the working directory after the
+ cd command is executed. By default IPython's cd
+ command does print this directory, since the default
+ prompts do not display path information.
+
+ .. note::
+ Note that ``!cd`` doesn't work for this purpose because the shell
+ where ``!command`` runs is immediately discarded after executing
+ 'command'.
+
+ Examples
+ --------
+ ::
+
+ In [10]: cd parent/child
+ /home/tsuser/parent/child
+ """
+
+ try:
+ oldcwd = os.getcwd()
+ except FileNotFoundError:
+ # Happens if the CWD has been deleted.
+ oldcwd = None
+
+ numcd = re.match(r'(-)(\d+)$',parameter_s)
+ # jump in directory history by number
+ if numcd:
+ nn = int(numcd.group(2))
+ try:
+ ps = self.shell.user_ns['_dh'][nn]
+ except IndexError:
+ print('The requested directory does not exist in history.')
+ return
+ else:
+ opts = {}
+ elif parameter_s.startswith('--'):
+ ps = None
+ fallback = None
+ pat = parameter_s[2:]
+ dh = self.shell.user_ns['_dh']
+ # first search only by basename (last component)
+ for ent in reversed(dh):
+ if pat in os.path.basename(ent) and os.path.isdir(ent):
+ ps = ent
+ break
+
+ if fallback is None and pat in ent and os.path.isdir(ent):
+ fallback = ent
+
+ # if we have no last part match, pick the first full path match
+ if ps is None:
+ ps = fallback
+
+ if ps is None:
+ print("No matching entry in directory history")
+ return
+ else:
+ opts = {}
+
+
+ else:
+ opts, ps = self.parse_options(parameter_s, 'qb', mode='string')
+ # jump to previous
+ if ps == '-':
+ try:
+ ps = self.shell.user_ns['_dh'][-2]
+ except IndexError as e:
+ raise UsageError('%cd -: No previous directory to change to.') from e
+ # jump to bookmark if needed
+ else:
+ if not os.path.isdir(ps) or 'b' in opts:
+ bkms = self.shell.db.get('bookmarks', {})
+
+ if ps in bkms:
+ target = bkms[ps]
+ print('(bookmark:%s) -> %s' % (ps, target))
+ ps = target
+ else:
+ if 'b' in opts:
+ raise UsageError("Bookmark '%s' not found. "
+ "Use '%%bookmark -l' to see your bookmarks." % ps)
+
+ # at this point ps should point to the target dir
+ if ps:
+ try:
+ os.chdir(os.path.expanduser(ps))
+ if hasattr(self.shell, 'term_title') and self.shell.term_title:
+ set_term_title(self.shell.term_title_format.format(cwd=abbrev_cwd()))
+ except OSError:
+ print(sys.exc_info()[1])
+ else:
+ cwd = pathlib.Path.cwd()
+ dhist = self.shell.user_ns['_dh']
+ if oldcwd != cwd:
+ dhist.append(cwd)
+ self.shell.db['dhist'] = compress_dhist(dhist)[-100:]
+
+ else:
+ os.chdir(self.shell.home_dir)
+ if hasattr(self.shell, 'term_title') and self.shell.term_title:
+ set_term_title(self.shell.term_title_format.format(cwd="~"))
+ cwd = pathlib.Path.cwd()
+ dhist = self.shell.user_ns['_dh']
+
+ if oldcwd != cwd:
+ dhist.append(cwd)
+ self.shell.db['dhist'] = compress_dhist(dhist)[-100:]
+ if not 'q' in opts and not self.cd_force_quiet and self.shell.user_ns['_dh']:
+ print(self.shell.user_ns['_dh'][-1])
+
+ @line_magic
+ def env(self, parameter_s=''):
+ """Get, set, or list environment variables.
+
+ Usage:\\
+
+ :``%env``: lists all environment variables/values
+ :``%env var``: get value for var
+ :``%env var val``: set value for var
+ :``%env var=val``: set value for var
+ :``%env var=$val``: set value for var, using python expansion if possible
+ """
+ if parameter_s.strip():
+ split = '=' if '=' in parameter_s else ' '
+ bits = parameter_s.split(split)
+ if len(bits) == 1:
+ key = parameter_s.strip()
+ if key in os.environ:
+ return os.environ[key]
+ else:
+ err = "Environment does not have key: {0}".format(key)
+ raise UsageError(err)
+ if len(bits) > 1:
+ return self.set_env(parameter_s)
+ env = dict(os.environ)
+ # hide likely secrets when printing the whole environment
+ for key in list(env):
+ if any(s in key.lower() for s in ('key', 'token', 'secret')):
+ env[key] = '<hidden>'
+
+ return env
+
+ @line_magic
+ def set_env(self, parameter_s):
+ """Set environment variables. Assumptions are that either "val" is a
+ name in the user namespace, or val is something that evaluates to a
+ string.
+
+ Usage:\\
+ :``%set_env var val``: set value for var
+ :``%set_env var=val``: set value for var
+ :``%set_env var=$val``: set value for var, using python expansion if possible
+ """
+ split = '=' if '=' in parameter_s else ' '
+ bits = parameter_s.split(split, 1)
+ if not parameter_s.strip() or len(bits)<2:
+ raise UsageError("usage is 'set_env var=val'")
+ var = bits[0].strip()
+ val = bits[1].strip()
+ if re.match(r'.*\s.*', var):
+ # an environment variable with whitespace is almost certainly
+ # not what the user intended. what's more likely is the wrong
+ # split was chosen, ie for "set_env cmd_args A=B", we chose
+ # '=' for the split and should have chosen ' '. to get around
+ # this, users should just assign directly to os.environ or use
+ # standard magic {var} expansion.
+ err = "refusing to set env var with whitespace: '{0}'"
+ err = err.format(val)
+ raise UsageError(err)
+ os.environ[var] = val
+ print('env: {0}={1}'.format(var,val))
+
+ @line_magic
+ def pushd(self, parameter_s=''):
+ """Place the current dir on stack and change directory.
+
+ Usage:\\
+ %pushd ['dirname']
+ """
+
+ dir_s = self.shell.dir_stack
+ tgt = os.path.expanduser(parameter_s)
+ cwd = os.getcwd().replace(self.shell.home_dir,'~')
+ if tgt:
+ self.cd(parameter_s)
+ dir_s.insert(0,cwd)
+ return self.shell.run_line_magic('dirs', '')
+
+ @line_magic
+ def popd(self, parameter_s=''):
+ """Change to directory popped off the top of the stack.
+ """
+ if not self.shell.dir_stack:
+ raise UsageError("%popd on empty stack")
+ top = self.shell.dir_stack.pop(0)
+ self.cd(top)
+ print("popd ->",top)
+
+ @line_magic
+ def dirs(self, parameter_s=''):
+ """Return the current directory stack."""
+
+ return self.shell.dir_stack
+
+ @line_magic
+ def dhist(self, parameter_s=''):
+ """Print your history of visited directories.
+
+ %dhist -> print full history\\
+ %dhist n -> print last n entries only\\
+ %dhist n1 n2 -> print entries between n1 and n2 (n2 not included)\\
+
+ This history is automatically maintained by the %cd command, and
+ always available as the global list variable _dh. You can use %cd -<n>
+ to go to directory number <n>.
+
+ Note that most of time, you should view directory history by entering
+ cd -<TAB>.
+
+ """
+
+ dh = self.shell.user_ns['_dh']
+ if parameter_s:
+ try:
+ args = map(int,parameter_s.split())
+ except:
+ self.arg_err(self.dhist)
+ return
+ if len(args) == 1:
+ ini,fin = max(len(dh)-(args[0]),0),len(dh)
+ elif len(args) == 2:
+ ini,fin = args
+ fin = min(fin, len(dh))
+ else:
+ self.arg_err(self.dhist)
+ return
+ else:
+ ini,fin = 0,len(dh)
+ print('Directory history (kept in _dh)')
+ for i in range(ini, fin):
+ print("%d: %s" % (i, dh[i]))
+
+ @skip_doctest
+ @line_magic
+ def sc(self, parameter_s=''):
+ """Shell capture - run shell command and capture output (DEPRECATED use !).
+
+ DEPRECATED. Suboptimal, retained for backwards compatibility.
+
+ You should use the form 'var = !command' instead. Example:
+
+ "%sc -l myfiles = ls ~" should now be written as
+
+ "myfiles = !ls ~"
+
+ myfiles.s, myfiles.l and myfiles.n still apply as documented
+ below.
+
+ --
+ %sc [options] varname=command
+
+ IPython will run the given command using commands.getoutput(), and
+ will then update the user's interactive namespace with a variable
+ called varname, containing the value of the call. Your command can
+ contain shell wildcards, pipes, etc.
+
+ The '=' sign in the syntax is mandatory, and the variable name you
+ supply must follow Python's standard conventions for valid names.
+
+ (A special format without variable name exists for internal use)
+
+ Options:
+
+ -l: list output. Split the output on newlines into a list before
+ assigning it to the given variable. By default the output is stored
+ as a single string.
+
+ -v: verbose. Print the contents of the variable.
+
+ In most cases you should not need to split as a list, because the
+ returned value is a special type of string which can automatically
+ provide its contents either as a list (split on newlines) or as a
+ space-separated string. These are convenient, respectively, either
+ for sequential processing or to be passed to a shell command.
+
+ For example::
+
+ # Capture into variable a
+ In [1]: sc a=ls *py
+
+ # a is a string with embedded newlines
+ In [2]: a
+ Out[2]: 'setup.py\\nwin32_manual_post_install.py'
+
+ # which can be seen as a list:
+ In [3]: a.l
+ Out[3]: ['setup.py', 'win32_manual_post_install.py']
+
+ # or as a whitespace-separated string:
+ In [4]: a.s
+ Out[4]: 'setup.py win32_manual_post_install.py'
+
+ # a.s is useful to pass as a single command line:
+ In [5]: !wc -l $a.s
+ 146 setup.py
+ 130 win32_manual_post_install.py
+ 276 total
+
+ # while the list form is useful to loop over:
+ In [6]: for f in a.l:
+ ...: !wc -l $f
+ ...:
+ 146 setup.py
+ 130 win32_manual_post_install.py
+
+ Similarly, the lists returned by the -l option are also special, in
+ the sense that you can equally invoke the .s attribute on them to
+ automatically get a whitespace-separated string from their contents::
+
+ In [7]: sc -l b=ls *py
+
+ In [8]: b
+ Out[8]: ['setup.py', 'win32_manual_post_install.py']
+
+ In [9]: b.s
+ Out[9]: 'setup.py win32_manual_post_install.py'
+
+ In summary, both the lists and strings used for output capture have
+ the following special attributes::
+
+ .l (or .list) : value as list.
+ .n (or .nlstr): value as newline-separated string.
+ .s (or .spstr): value as space-separated string.
+ """
+
+ opts,args = self.parse_options(parameter_s, 'lv')
+ # Try to get a variable name and command to run
+ try:
+ # the variable name must be obtained from the parse_options
+ # output, which uses shlex.split to strip options out.
+ var,_ = args.split('=', 1)
+ var = var.strip()
+ # But the command has to be extracted from the original input
+ # parameter_s, not on what parse_options returns, to avoid the
+ # quote stripping which shlex.split performs on it.
+ _,cmd = parameter_s.split('=', 1)
+ except ValueError:
+ var,cmd = '',''
+ # If all looks ok, proceed
+ split = 'l' in opts
+ out = self.shell.getoutput(cmd, split=split)
+ if 'v' in opts:
+ print('%s ==\n%s' % (var, pformat(out)))
+ if var:
+ self.shell.user_ns.update({var:out})
+ else:
+ return out
+
+ @line_cell_magic
+ def sx(self, line='', cell=None):
+ """Shell execute - run shell command and capture output (!! is short-hand).
+
+ %sx command
+
+ IPython will run the given command using commands.getoutput(), and
+ return the result formatted as a list (split on '\\n'). Since the
+ output is _returned_, it will be stored in ipython's regular output
+ cache Out[N] and in the '_N' automatic variables.
+
+ Notes:
+
+ 1) If an input line begins with '!!', then %sx is automatically
+ invoked. That is, while::
+
+ !ls
+
+ causes ipython to simply issue system('ls'), typing::
+
+ !!ls
+
+ is a shorthand equivalent to::
+
+ %sx ls
+
+ 2) %sx differs from %sc in that %sx automatically splits into a list,
+ like '%sc -l'. The reason for this is to make it as easy as possible
+ to process line-oriented shell output via further python commands.
+ %sc is meant to provide much finer control, but requires more
+ typing.
+
+ 3) Just like %sc -l, this is a list with special attributes:
+ ::
+
+ .l (or .list) : value as list.
+ .n (or .nlstr): value as newline-separated string.
+ .s (or .spstr): value as whitespace-separated string.
+
+ This is very useful when trying to use such lists as arguments to
+ system commands."""
+
+ if cell is None:
+ # line magic
+ return self.shell.getoutput(line)
+ else:
+ opts,args = self.parse_options(line, '', 'out=')
+ output = self.shell.getoutput(cell)
+ out_name = opts.get('out', opts.get('o'))
+ if out_name:
+ self.shell.user_ns[out_name] = output
+ else:
+ return output
+
+ system = line_cell_magic('system')(sx)
+ bang = cell_magic('!')(sx)
+
+ @line_magic
+ def bookmark(self, parameter_s=''):
+ """Manage IPython's bookmark system.
+
+ %bookmark <name> - set bookmark to current dir
+ %bookmark <name> <dir> - set bookmark to <dir>
+ %bookmark -l - list all bookmarks
+ %bookmark -d <name> - remove bookmark
+ %bookmark -r - remove all bookmarks
+
+ You can later on access a bookmarked folder with::
+
+ %cd -b <name>
+
+ or simply '%cd <name>' if there is no directory called <name> AND
+ there is such a bookmark defined.
+
+ Your bookmarks persist through IPython sessions, but they are
+ associated with each profile."""
+
+ opts,args = self.parse_options(parameter_s,'drl',mode='list')
+ if len(args) > 2:
+ raise UsageError("%bookmark: too many arguments")
+
+ bkms = self.shell.db.get('bookmarks',{})
+
+ if 'd' in opts:
+ try:
+ todel = args[0]
+ except IndexError as e:
+ raise UsageError(
+ "%bookmark -d: must provide a bookmark to delete") from e
+ else:
+ try:
+ del bkms[todel]
+ except KeyError as e:
+ raise UsageError(
+ "%%bookmark -d: Can't delete bookmark '%s'" % todel) from e
+
+ elif 'r' in opts:
+ bkms = {}
+ elif 'l' in opts:
+ bks = sorted(bkms)
+ if bks:
+ size = max(map(len, bks))
+ else:
+ size = 0
+ fmt = '%-'+str(size)+'s -> %s'
+ print('Current bookmarks:')
+ for bk in bks:
+ print(fmt % (bk, bkms[bk]))
+ else:
+ if not args:
+ raise UsageError("%bookmark: You must specify the bookmark name")
+ elif len(args)==1:
+ bkms[args[0]] = os.getcwd()
+ elif len(args)==2:
+ bkms[args[0]] = args[1]
+ self.shell.db['bookmarks'] = bkms
+
+ @line_magic
+ def pycat(self, parameter_s=''):
+ """Show a syntax-highlighted file through a pager.
+
+ This magic is similar to the cat utility, but it will assume the file
+ to be Python source and will show it with syntax highlighting.
+
+ This magic command can either take a local filename, an url,
+ an history range (see %history) or a macro as argument.
+
+ If no parameter is given, prints out history of current session up to
+ this point. ::
+
+ %pycat myscript.py
+ %pycat 7-27
+ %pycat myMacro
+ %pycat http://www.example.com/myscript.py
+ """
+ try:
+ cont = self.shell.find_user_code(parameter_s, skip_encoding_cookie=False)
+ except (ValueError, IOError):
+ print("Error: no such file, variable, URL, history range or macro")
+ return
+
+ page.page(self.shell.pycolorize(source_to_unicode(cont)))
+
+ @magic_arguments.magic_arguments()
+ @magic_arguments.argument(
+ '-a', '--append', action='store_true', default=False,
+ help='Append contents of the cell to an existing file. '
+ 'The file will be created if it does not exist.'
+ )
+ @magic_arguments.argument(
+ 'filename', type=str,
+ help='file to write'
+ )
+ @cell_magic
+ def writefile(self, line, cell):
+ """Write the contents of the cell to a file.
+
+ The file will be overwritten unless the -a (--append) flag is specified.
+ """
+ args = magic_arguments.parse_argstring(self.writefile, line)
+ if re.match(r'^(\'.*\')|(".*")$', args.filename):
+ filename = os.path.expanduser(args.filename[1:-1])
+ else:
+ filename = os.path.expanduser(args.filename)
+
+ if os.path.exists(filename):
+ if args.append:
+ print("Appending to %s" % filename)
+ else:
+ print("Overwriting %s" % filename)
+ else:
+ print("Writing %s" % filename)
+
+ mode = 'a' if args.append else 'w'
+ with io.open(filename, mode, encoding='utf-8') as f:
+ f.write(cell)
diff --git a/contrib/python/ipython/py3/IPython/core/magics/packaging.py b/contrib/python/ipython/py3/IPython/core/magics/packaging.py
new file mode 100644
index 0000000000..2f7652c169
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/magics/packaging.py
@@ -0,0 +1,112 @@
+"""Implementation of packaging-related magic functions.
+"""
+#-----------------------------------------------------------------------------
+# Copyright (c) 2018 The IPython Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import re
+import shlex
+import sys
+from pathlib import Path
+
+from IPython.core.magic import Magics, magics_class, line_magic
+
+
+def _is_conda_environment():
+ """Return True if the current Python executable is in a conda env"""
+ # TODO: does this need to change on windows?
+ return Path(sys.prefix, "conda-meta", "history").exists()
+
+
+def _get_conda_executable():
+ """Find the path to the conda executable"""
+ # Check if there is a conda executable in the same directory as the Python executable.
+ # This is the case within conda's root environment.
+ conda = Path(sys.executable).parent / "conda"
+ if conda.is_file():
+ return str(conda)
+
+ # Otherwise, attempt to extract the executable from conda history.
+ # This applies in any conda environment.
+ history = Path(sys.prefix, "conda-meta", "history").read_text(encoding="utf-8")
+ match = re.search(
+ r"^#\s*cmd:\s*(?P<command>.*conda)\s[create|install]",
+ history,
+ flags=re.MULTILINE,
+ )
+ if match:
+ return match.groupdict()["command"]
+
+ # Fallback: assume conda is available on the system path.
+ return "conda"
+
+
+CONDA_COMMANDS_REQUIRING_PREFIX = {
+ 'install', 'list', 'remove', 'uninstall', 'update', 'upgrade',
+}
+CONDA_COMMANDS_REQUIRING_YES = {
+ 'install', 'remove', 'uninstall', 'update', 'upgrade',
+}
+CONDA_ENV_FLAGS = {'-p', '--prefix', '-n', '--name'}
+CONDA_YES_FLAGS = {'-y', '--y'}
+
+
+@magics_class
+class PackagingMagics(Magics):
+ """Magics related to packaging & installation"""
+
+ @line_magic
+ def pip(self, line):
+ """Run the pip package manager within the current kernel.
+
+ Usage:
+ %pip install [pkgs]
+ """
+ python = sys.executable
+ if sys.platform == "win32":
+ python = '"' + python + '"'
+ else:
+ python = shlex.quote(python)
+
+ self.shell.system(" ".join([python, "-m", "pip", line]))
+
+ print("Note: you may need to restart the kernel to use updated packages.")
+
+ @line_magic
+ def conda(self, line):
+ """Run the conda package manager within the current kernel.
+
+ Usage:
+ %conda install [pkgs]
+ """
+ if not _is_conda_environment():
+ raise ValueError("The python kernel does not appear to be a conda environment. "
+ "Please use ``%pip install`` instead.")
+
+ conda = _get_conda_executable()
+ args = shlex.split(line)
+ command = args[0] if len(args) > 0 else ""
+ args = args[1:] if len(args) > 1 else [""]
+
+ extra_args = []
+
+ # When the subprocess does not allow us to respond "yes" during the installation,
+ # we need to insert --yes in the argument list for some commands
+ stdin_disabled = getattr(self.shell, 'kernel', None) is not None
+ needs_yes = command in CONDA_COMMANDS_REQUIRING_YES
+ has_yes = set(args).intersection(CONDA_YES_FLAGS)
+ if stdin_disabled and needs_yes and not has_yes:
+ extra_args.append("--yes")
+
+ # Add --prefix to point conda installation to the current environment
+ needs_prefix = command in CONDA_COMMANDS_REQUIRING_PREFIX
+ has_prefix = set(args).intersection(CONDA_ENV_FLAGS)
+ if needs_prefix and not has_prefix:
+ extra_args.extend(["--prefix", sys.prefix])
+
+ self.shell.system(' '.join([conda, command] + extra_args + args))
+ print("\nNote: you may need to restart the kernel to use updated packages.")
diff --git a/contrib/python/ipython/py3/IPython/core/magics/pylab.py b/contrib/python/ipython/py3/IPython/core/magics/pylab.py
new file mode 100644
index 0000000000..2a69453ac9
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/magics/pylab.py
@@ -0,0 +1,169 @@
+"""Implementation of magic functions for matplotlib/pylab support.
+"""
+#-----------------------------------------------------------------------------
+# Copyright (c) 2012 The IPython Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+# Our own packages
+from traitlets.config.application import Application
+from IPython.core import magic_arguments
+from IPython.core.magic import Magics, magics_class, line_magic
+from IPython.testing.skipdoctest import skip_doctest
+from warnings import warn
+from IPython.core.pylabtools import backends
+
+#-----------------------------------------------------------------------------
+# Magic implementation classes
+#-----------------------------------------------------------------------------
+
+magic_gui_arg = magic_arguments.argument(
+ 'gui', nargs='?',
+ help="""Name of the matplotlib backend to use %s.
+ If given, the corresponding matplotlib backend is used,
+ otherwise it will be matplotlib's default
+ (which you can set in your matplotlib config file).
+ """ % str(tuple(sorted(backends.keys())))
+)
+
+
+@magics_class
+class PylabMagics(Magics):
+ """Magics related to matplotlib's pylab support"""
+
+ @skip_doctest
+ @line_magic
+ @magic_arguments.magic_arguments()
+ @magic_arguments.argument('-l', '--list', action='store_true',
+ help='Show available matplotlib backends')
+ @magic_gui_arg
+ def matplotlib(self, line=''):
+ """Set up matplotlib to work interactively.
+
+ This function lets you activate matplotlib interactive support
+ at any point during an IPython session. It does not import anything
+ into the interactive namespace.
+
+ If you are using the inline matplotlib backend in the IPython Notebook
+ you can set which figure formats are enabled using the following::
+
+ In [1]: from matplotlib_inline.backend_inline import set_matplotlib_formats
+
+ In [2]: set_matplotlib_formats('pdf', 'svg')
+
+ The default for inline figures sets `bbox_inches` to 'tight'. This can
+ cause discrepancies between the displayed image and the identical
+ image created using `savefig`. This behavior can be disabled using the
+ `%config` magic::
+
+ In [3]: %config InlineBackend.print_figure_kwargs = {'bbox_inches':None}
+
+ In addition, see the docstrings of
+ `matplotlib_inline.backend_inline.set_matplotlib_formats` and
+ `matplotlib_inline.backend_inline.set_matplotlib_close` for more information on
+ changing additional behaviors of the inline backend.
+
+ Examples
+ --------
+ To enable the inline backend for usage with the IPython Notebook::
+
+ In [1]: %matplotlib inline
+
+ In this case, where the matplotlib default is TkAgg::
+
+ In [2]: %matplotlib
+ Using matplotlib backend: TkAgg
+
+ But you can explicitly request a different GUI backend::
+
+ In [3]: %matplotlib qt
+
+ You can list the available backends using the -l/--list option::
+
+ In [4]: %matplotlib --list
+ Available matplotlib backends: ['osx', 'qt4', 'qt5', 'gtk3', 'gtk4', 'notebook', 'wx', 'qt', 'nbagg',
+ 'gtk', 'tk', 'inline']
+ """
+ args = magic_arguments.parse_argstring(self.matplotlib, line)
+ if args.list:
+ backends_list = list(backends.keys())
+ print("Available matplotlib backends: %s" % backends_list)
+ else:
+ gui, backend = self.shell.enable_matplotlib(args.gui.lower() if isinstance(args.gui, str) else args.gui)
+ self._show_matplotlib_backend(args.gui, backend)
+
+ @skip_doctest
+ @line_magic
+ @magic_arguments.magic_arguments()
+ @magic_arguments.argument(
+ '--no-import-all', action='store_true', default=None,
+ help="""Prevent IPython from performing ``import *`` into the interactive namespace.
+
+ You can govern the default behavior of this flag with the
+ InteractiveShellApp.pylab_import_all configurable.
+ """
+ )
+ @magic_gui_arg
+ def pylab(self, line=''):
+ """Load numpy and matplotlib to work interactively.
+
+ This function lets you activate pylab (matplotlib, numpy and
+ interactive support) at any point during an IPython session.
+
+ %pylab makes the following imports::
+
+ import numpy
+ import matplotlib
+ from matplotlib import pylab, mlab, pyplot
+ np = numpy
+ plt = pyplot
+
+ from IPython.display import display
+ from IPython.core.pylabtools import figsize, getfigs
+
+ from pylab import *
+ from numpy import *
+
+ If you pass `--no-import-all`, the last two `*` imports will be excluded.
+
+ See the %matplotlib magic for more details about activating matplotlib
+ without affecting the interactive namespace.
+ """
+ args = magic_arguments.parse_argstring(self.pylab, line)
+ if args.no_import_all is None:
+ # get default from Application
+ if Application.initialized():
+ app = Application.instance()
+ try:
+ import_all = app.pylab_import_all
+ except AttributeError:
+ import_all = True
+ else:
+ # nothing specified, no app - default True
+ import_all = True
+ else:
+ # invert no-import flag
+ import_all = not args.no_import_all
+
+ gui, backend, clobbered = self.shell.enable_pylab(args.gui, import_all=import_all)
+ self._show_matplotlib_backend(args.gui, backend)
+ print(
+ "%pylab is deprecated, use %matplotlib inline and import the required libraries."
+ )
+ print("Populating the interactive namespace from numpy and matplotlib")
+ if clobbered:
+ warn("pylab import has clobbered these variables: %s" % clobbered +
+ "\n`%matplotlib` prevents importing * from pylab and numpy"
+ )
+
+ def _show_matplotlib_backend(self, gui, backend):
+ """show matplotlib message backend message"""
+ if not gui or gui == 'auto':
+ print("Using matplotlib backend: %s" % backend)
diff --git a/contrib/python/ipython/py3/IPython/core/magics/script.py b/contrib/python/ipython/py3/IPython/core/magics/script.py
new file mode 100644
index 0000000000..a858c6489c
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/magics/script.py
@@ -0,0 +1,371 @@
+"""Magic functions for running cells in various scripts."""
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+import asyncio
+import asyncio.exceptions
+import atexit
+import errno
+import os
+import signal
+import sys
+import time
+from subprocess import CalledProcessError
+from threading import Thread
+
+from traitlets import Any, Dict, List, default
+
+from IPython.core import magic_arguments
+from IPython.core.async_helpers import _AsyncIOProxy
+from IPython.core.magic import Magics, cell_magic, line_magic, magics_class
+from IPython.utils.process import arg_split
+
+#-----------------------------------------------------------------------------
+# Magic implementation classes
+#-----------------------------------------------------------------------------
+
+def script_args(f):
+ """single decorator for adding script args"""
+ args = [
+ magic_arguments.argument(
+ '--out', type=str,
+ help="""The variable in which to store stdout from the script.
+ If the script is backgrounded, this will be the stdout *pipe*,
+ instead of the stderr text itself and will not be auto closed.
+ """
+ ),
+ magic_arguments.argument(
+ '--err', type=str,
+ help="""The variable in which to store stderr from the script.
+ If the script is backgrounded, this will be the stderr *pipe*,
+ instead of the stderr text itself and will not be autoclosed.
+ """
+ ),
+ magic_arguments.argument(
+ '--bg', action="store_true",
+ help="""Whether to run the script in the background.
+ If given, the only way to see the output of the command is
+ with --out/err.
+ """
+ ),
+ magic_arguments.argument(
+ '--proc', type=str,
+ help="""The variable in which to store Popen instance.
+ This is used only when --bg option is given.
+ """
+ ),
+ magic_arguments.argument(
+ '--no-raise-error', action="store_false", dest='raise_error',
+ help="""Whether you should raise an error message in addition to
+ a stream on stderr if you get a nonzero exit code.
+ """,
+ ),
+ ]
+ for arg in args:
+ f = arg(f)
+ return f
+
+
+@magics_class
+class ScriptMagics(Magics):
+ """Magics for talking to scripts
+
+ This defines a base `%%script` cell magic for running a cell
+ with a program in a subprocess, and registers a few top-level
+ magics that call %%script with common interpreters.
+ """
+
+ event_loop = Any(
+ help="""
+ The event loop on which to run subprocesses
+
+ Not the main event loop,
+ because we want to be able to make blocking calls
+ and have certain requirements we don't want to impose on the main loop.
+ """
+ )
+
+ script_magics = List(
+ help="""Extra script cell magics to define
+
+ This generates simple wrappers of `%%script foo` as `%%foo`.
+
+ If you want to add script magics that aren't on your path,
+ specify them in script_paths
+ """,
+ ).tag(config=True)
+ @default('script_magics')
+ def _script_magics_default(self):
+ """default to a common list of programs"""
+
+ defaults = [
+ 'sh',
+ 'bash',
+ 'perl',
+ 'ruby',
+ 'python',
+ 'python2',
+ 'python3',
+ 'pypy',
+ ]
+ if os.name == 'nt':
+ defaults.extend([
+ 'cmd',
+ ])
+
+ return defaults
+
+ script_paths = Dict(
+ help="""Dict mapping short 'ruby' names to full paths, such as '/opt/secret/bin/ruby'
+
+ Only necessary for items in script_magics where the default path will not
+ find the right interpreter.
+ """
+ ).tag(config=True)
+
+ def __init__(self, shell=None):
+ super(ScriptMagics, self).__init__(shell=shell)
+ self._generate_script_magics()
+ self.bg_processes = []
+ atexit.register(self.kill_bg_processes)
+
+ def __del__(self):
+ self.kill_bg_processes()
+
+ def _generate_script_magics(self):
+ cell_magics = self.magics['cell']
+ for name in self.script_magics:
+ cell_magics[name] = self._make_script_magic(name)
+
+ def _make_script_magic(self, name):
+ """make a named magic, that calls %%script with a particular program"""
+ # expand to explicit path if necessary:
+ script = self.script_paths.get(name, name)
+
+ @magic_arguments.magic_arguments()
+ @script_args
+ def named_script_magic(line, cell):
+ # if line, add it as cl-flags
+ if line:
+ line = "%s %s" % (script, line)
+ else:
+ line = script
+ return self.shebang(line, cell)
+
+ # write a basic docstring:
+ named_script_magic.__doc__ = \
+ """%%{name} script magic
+
+ Run cells with {script} in a subprocess.
+
+ This is a shortcut for `%%script {script}`
+ """.format(**locals())
+
+ return named_script_magic
+
+ @magic_arguments.magic_arguments()
+ @script_args
+ @cell_magic("script")
+ def shebang(self, line, cell):
+ """Run a cell via a shell command
+
+ The `%%script` line is like the #! line of script,
+ specifying a program (bash, perl, ruby, etc.) with which to run.
+
+ The rest of the cell is run by that program.
+
+ Examples
+ --------
+ ::
+
+ In [1]: %%script bash
+ ...: for i in 1 2 3; do
+ ...: echo $i
+ ...: done
+ 1
+ 2
+ 3
+ """
+
+ # Create the event loop in which to run script magics
+ # this operates on a background thread
+ if self.event_loop is None:
+ if sys.platform == "win32":
+ # don't override the current policy,
+ # just create an event loop
+ event_loop = asyncio.WindowsProactorEventLoopPolicy().new_event_loop()
+ else:
+ event_loop = asyncio.new_event_loop()
+ self.event_loop = event_loop
+
+ # start the loop in a background thread
+ asyncio_thread = Thread(target=event_loop.run_forever, daemon=True)
+ asyncio_thread.start()
+ else:
+ event_loop = self.event_loop
+
+ def in_thread(coro):
+ """Call a coroutine on the asyncio thread"""
+ return asyncio.run_coroutine_threadsafe(coro, event_loop).result()
+
+ async def _readchunk(stream):
+ try:
+ return await stream.readuntil(b"\n")
+ except asyncio.exceptions.IncompleteReadError as e:
+ return e.partial
+ except asyncio.exceptions.LimitOverrunError as e:
+ return await stream.read(e.consumed)
+
+ async def _handle_stream(stream, stream_arg, file_object):
+ while True:
+ chunk = (await _readchunk(stream)).decode("utf8", errors="replace")
+ if not chunk:
+ break
+ if stream_arg:
+ self.shell.user_ns[stream_arg] = chunk
+ else:
+ file_object.write(chunk)
+ file_object.flush()
+
+ async def _stream_communicate(process, cell):
+ process.stdin.write(cell)
+ process.stdin.close()
+ stdout_task = asyncio.create_task(
+ _handle_stream(process.stdout, args.out, sys.stdout)
+ )
+ stderr_task = asyncio.create_task(
+ _handle_stream(process.stderr, args.err, sys.stderr)
+ )
+ await asyncio.wait([stdout_task, stderr_task])
+ await process.wait()
+
+ argv = arg_split(line, posix=not sys.platform.startswith("win"))
+ args, cmd = self.shebang.parser.parse_known_args(argv)
+
+ try:
+ p = in_thread(
+ asyncio.create_subprocess_exec(
+ *cmd,
+ stdout=asyncio.subprocess.PIPE,
+ stderr=asyncio.subprocess.PIPE,
+ stdin=asyncio.subprocess.PIPE,
+ )
+ )
+ except OSError as e:
+ if e.errno == errno.ENOENT:
+ print("Couldn't find program: %r" % cmd[0])
+ return
+ else:
+ raise
+
+ if not cell.endswith('\n'):
+ cell += '\n'
+ cell = cell.encode('utf8', 'replace')
+ if args.bg:
+ self.bg_processes.append(p)
+ self._gc_bg_processes()
+ to_close = []
+ if args.out:
+ self.shell.user_ns[args.out] = _AsyncIOProxy(p.stdout, event_loop)
+ else:
+ to_close.append(p.stdout)
+ if args.err:
+ self.shell.user_ns[args.err] = _AsyncIOProxy(p.stderr, event_loop)
+ else:
+ to_close.append(p.stderr)
+ event_loop.call_soon_threadsafe(
+ lambda: asyncio.Task(self._run_script(p, cell, to_close))
+ )
+ if args.proc:
+ proc_proxy = _AsyncIOProxy(p, event_loop)
+ proc_proxy.stdout = _AsyncIOProxy(p.stdout, event_loop)
+ proc_proxy.stderr = _AsyncIOProxy(p.stderr, event_loop)
+ self.shell.user_ns[args.proc] = proc_proxy
+ return
+
+ try:
+ in_thread(_stream_communicate(p, cell))
+ except KeyboardInterrupt:
+ try:
+ p.send_signal(signal.SIGINT)
+ in_thread(asyncio.wait_for(p.wait(), timeout=0.1))
+ if p.returncode is not None:
+ print("Process is interrupted.")
+ return
+ p.terminate()
+ in_thread(asyncio.wait_for(p.wait(), timeout=0.1))
+ if p.returncode is not None:
+ print("Process is terminated.")
+ return
+ p.kill()
+ print("Process is killed.")
+ except OSError:
+ pass
+ except Exception as e:
+ print("Error while terminating subprocess (pid=%i): %s" % (p.pid, e))
+ return
+
+ if args.raise_error and p.returncode != 0:
+ # If we get here and p.returncode is still None, we must have
+ # killed it but not yet seen its return code. We don't wait for it,
+ # in case it's stuck in uninterruptible sleep. -9 = SIGKILL
+ rc = p.returncode or -9
+ raise CalledProcessError(rc, cell)
+
+ shebang.__skip_doctest__ = os.name != "posix"
+
+ async def _run_script(self, p, cell, to_close):
+ """callback for running the script in the background"""
+
+ p.stdin.write(cell)
+ await p.stdin.drain()
+ p.stdin.close()
+ await p.stdin.wait_closed()
+ await p.wait()
+ # asyncio read pipes have no close
+ # but we should drain the data anyway
+ for s in to_close:
+ await s.read()
+ self._gc_bg_processes()
+
+ @line_magic("killbgscripts")
+ def killbgscripts(self, _nouse_=''):
+ """Kill all BG processes started by %%script and its family."""
+ self.kill_bg_processes()
+ print("All background processes were killed.")
+
+ def kill_bg_processes(self):
+ """Kill all BG processes which are still running."""
+ if not self.bg_processes:
+ return
+ for p in self.bg_processes:
+ if p.returncode is None:
+ try:
+ p.send_signal(signal.SIGINT)
+ except:
+ pass
+ time.sleep(0.1)
+ self._gc_bg_processes()
+ if not self.bg_processes:
+ return
+ for p in self.bg_processes:
+ if p.returncode is None:
+ try:
+ p.terminate()
+ except:
+ pass
+ time.sleep(0.1)
+ self._gc_bg_processes()
+ if not self.bg_processes:
+ return
+ for p in self.bg_processes:
+ if p.returncode is None:
+ try:
+ p.kill()
+ except:
+ pass
+ self._gc_bg_processes()
+
+ def _gc_bg_processes(self):
+ self.bg_processes = [p for p in self.bg_processes if p.returncode is None]
diff --git a/contrib/python/ipython/py3/IPython/core/oinspect.py b/contrib/python/ipython/py3/IPython/core/oinspect.py
new file mode 100644
index 0000000000..ef6a0d02d7
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/oinspect.py
@@ -0,0 +1,1171 @@
+# -*- coding: utf-8 -*-
+"""Tools for inspecting Python objects.
+
+Uses syntax highlighting for presenting the various information elements.
+
+Similar in spirit to the inspect module, but all calls take a name argument to
+reference the name under which an object is being read.
+"""
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+__all__ = ['Inspector','InspectColors']
+
+# stdlib modules
+from dataclasses import dataclass
+from inspect import signature
+from textwrap import dedent
+import ast
+import html
+import inspect
+import io as stdlib_io
+import linecache
+import os
+import sys
+import types
+import warnings
+
+from typing import Any, Optional, Dict, Union, List, Tuple
+
+if sys.version_info <= (3, 10):
+ from typing_extensions import TypeAlias
+else:
+ from typing import TypeAlias
+
+# IPython's own
+from IPython.core import page
+from IPython.lib.pretty import pretty
+from IPython.testing.skipdoctest import skip_doctest
+from IPython.utils import PyColorize
+from IPython.utils import openpy
+from IPython.utils.dir2 import safe_hasattr
+from IPython.utils.path import compress_user
+from IPython.utils.text import indent
+from IPython.utils.wildcard import list_namespace
+from IPython.utils.wildcard import typestr2type
+from IPython.utils.coloransi import TermColors, ColorScheme, ColorSchemeTable
+from IPython.utils.py3compat import cast_unicode
+from IPython.utils.colorable import Colorable
+from IPython.utils.decorators import undoc
+
+from pygments import highlight
+from pygments.lexers import PythonLexer
+from pygments.formatters import HtmlFormatter
+
+HOOK_NAME = "__custom_documentations__"
+
+
+UnformattedBundle: TypeAlias = Dict[str, List[Tuple[str, str]]] # List of (title, body)
+Bundle: TypeAlias = Dict[str, str]
+
+
+@dataclass
+class OInfo:
+ ismagic: bool
+ isalias: bool
+ found: bool
+ namespace: Optional[str]
+ parent: Any
+ obj: Any
+
+ def get(self, field):
+ """Get a field from the object for backward compatibility with before 8.12
+
+ see https://github.com/h5py/h5py/issues/2253
+ """
+ # We need to deprecate this at some point, but the warning will show in completion.
+ # Let's comment this for now and uncomment end of 2023 ish
+ # warnings.warn(
+ # f"OInfo dataclass with fields access since IPython 8.12 please use OInfo.{field} instead."
+ # "OInfo used to be a dict but a dataclass provide static fields verification with mypy."
+ # "This warning and backward compatibility `get()` method were added in 8.13.",
+ # DeprecationWarning,
+ # stacklevel=2,
+ # )
+ return getattr(self, field)
+
+
+def pylight(code):
+ return highlight(code, PythonLexer(), HtmlFormatter(noclasses=True))
+
+# builtin docstrings to ignore
+_func_call_docstring = types.FunctionType.__call__.__doc__
+_object_init_docstring = object.__init__.__doc__
+_builtin_type_docstrings = {
+ inspect.getdoc(t) for t in (types.ModuleType, types.MethodType,
+ types.FunctionType, property)
+}
+
+_builtin_func_type = type(all)
+_builtin_meth_type = type(str.upper) # Bound methods have the same type as builtin functions
+#****************************************************************************
+# Builtin color schemes
+
+Colors = TermColors # just a shorthand
+
+InspectColors = PyColorize.ANSICodeColors
+
+#****************************************************************************
+# Auxiliary functions and objects
+
+# See the messaging spec for the definition of all these fields. This list
+# effectively defines the order of display
+info_fields = ['type_name', 'base_class', 'string_form', 'namespace',
+ 'length', 'file', 'definition', 'docstring', 'source',
+ 'init_definition', 'class_docstring', 'init_docstring',
+ 'call_def', 'call_docstring',
+ # These won't be printed but will be used to determine how to
+ # format the object
+ 'ismagic', 'isalias', 'isclass', 'found', 'name'
+ ]
+
+
+def object_info(**kw):
+ """Make an object info dict with all fields present."""
+ infodict = {k:None for k in info_fields}
+ infodict.update(kw)
+ return infodict
+
+
+def get_encoding(obj):
+ """Get encoding for python source file defining obj
+
+ Returns None if obj is not defined in a sourcefile.
+ """
+ ofile = find_file(obj)
+ # run contents of file through pager starting at line where the object
+ # is defined, as long as the file isn't binary and is actually on the
+ # filesystem.
+ if ofile is None:
+ return None
+ elif ofile.endswith(('.so', '.dll', '.pyd')):
+ return None
+ elif not os.path.isfile(ofile):
+ return None
+ else:
+ # Print only text files, not extension binaries. Note that
+ # getsourcelines returns lineno with 1-offset and page() uses
+ # 0-offset, so we must adjust.
+ with stdlib_io.open(ofile, 'rb') as buffer: # Tweaked to use io.open for Python 2
+ encoding, lines = openpy.detect_encoding(buffer.readline)
+ return encoding
+
+def getdoc(obj) -> Union[str,None]:
+ """Stable wrapper around inspect.getdoc.
+
+ This can't crash because of attribute problems.
+
+ It also attempts to call a getdoc() method on the given object. This
+ allows objects which provide their docstrings via non-standard mechanisms
+ (like Pyro proxies) to still be inspected by ipython's ? system.
+ """
+ # Allow objects to offer customized documentation via a getdoc method:
+ try:
+ ds = obj.getdoc()
+ except Exception:
+ pass
+ else:
+ if isinstance(ds, str):
+ return inspect.cleandoc(ds)
+ docstr = inspect.getdoc(obj)
+ return docstr
+
+
+def getsource(obj, oname='') -> Union[str,None]:
+ """Wrapper around inspect.getsource.
+
+ This can be modified by other projects to provide customized source
+ extraction.
+
+ Parameters
+ ----------
+ obj : object
+ an object whose source code we will attempt to extract
+ oname : str
+ (optional) a name under which the object is known
+
+ Returns
+ -------
+ src : unicode or None
+
+ """
+
+ if isinstance(obj, property):
+ sources = []
+ for attrname in ['fget', 'fset', 'fdel']:
+ fn = getattr(obj, attrname)
+ if fn is not None:
+ encoding = get_encoding(fn)
+ oname_prefix = ('%s.' % oname) if oname else ''
+ sources.append(''.join(('# ', oname_prefix, attrname)))
+ if inspect.isfunction(fn):
+ _src = getsource(fn)
+ if _src:
+ # assert _src is not None, "please mypy"
+ sources.append(dedent(_src))
+ else:
+ # Default str/repr only prints function name,
+ # pretty.pretty prints module name too.
+ sources.append(
+ '%s%s = %s\n' % (oname_prefix, attrname, pretty(fn))
+ )
+ if sources:
+ return '\n'.join(sources)
+ else:
+ return None
+
+ else:
+ # Get source for non-property objects.
+
+ obj = _get_wrapped(obj)
+
+ try:
+ src = inspect.getsource(obj)
+ except TypeError:
+ # The object itself provided no meaningful source, try looking for
+ # its class definition instead.
+ try:
+ src = inspect.getsource(obj.__class__)
+ except (OSError, TypeError):
+ return None
+ except OSError:
+ return None
+
+ return src
+
+
+def is_simple_callable(obj):
+ """True if obj is a function ()"""
+ return (inspect.isfunction(obj) or inspect.ismethod(obj) or \
+ isinstance(obj, _builtin_func_type) or isinstance(obj, _builtin_meth_type))
+
+@undoc
+def getargspec(obj):
+ """Wrapper around :func:`inspect.getfullargspec`
+
+ In addition to functions and methods, this can also handle objects with a
+ ``__call__`` attribute.
+
+ DEPRECATED: Deprecated since 7.10. Do not use, will be removed.
+ """
+
+ warnings.warn('`getargspec` function is deprecated as of IPython 7.10'
+ 'and will be removed in future versions.', DeprecationWarning, stacklevel=2)
+
+ if safe_hasattr(obj, '__call__') and not is_simple_callable(obj):
+ obj = obj.__call__
+
+ return inspect.getfullargspec(obj)
+
+@undoc
+def format_argspec(argspec):
+ """Format argspect, convenience wrapper around inspect's.
+
+ This takes a dict instead of ordered arguments and calls
+ inspect.format_argspec with the arguments in the necessary order.
+
+ DEPRECATED (since 7.10): Do not use; will be removed in future versions.
+ """
+
+ warnings.warn('`format_argspec` function is deprecated as of IPython 7.10'
+ 'and will be removed in future versions.', DeprecationWarning, stacklevel=2)
+
+
+ return inspect.formatargspec(argspec['args'], argspec['varargs'],
+ argspec['varkw'], argspec['defaults'])
+
+@undoc
+def call_tip(oinfo, format_call=True):
+ """DEPRECATED since 6.0. Extract call tip data from an oinfo dict."""
+ warnings.warn(
+ "`call_tip` function is deprecated as of IPython 6.0"
+ "and will be removed in future versions.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ # Get call definition
+ argspec = oinfo.get('argspec')
+ if argspec is None:
+ call_line = None
+ else:
+ # Callable objects will have 'self' as their first argument, prune
+ # it out if it's there for clarity (since users do *not* pass an
+ # extra first argument explicitly).
+ try:
+ has_self = argspec['args'][0] == 'self'
+ except (KeyError, IndexError):
+ pass
+ else:
+ if has_self:
+ argspec['args'] = argspec['args'][1:]
+
+ call_line = oinfo['name']+format_argspec(argspec)
+
+ # Now get docstring.
+ # The priority is: call docstring, constructor docstring, main one.
+ doc = oinfo.get('call_docstring')
+ if doc is None:
+ doc = oinfo.get('init_docstring')
+ if doc is None:
+ doc = oinfo.get('docstring','')
+
+ return call_line, doc
+
+
+def _get_wrapped(obj):
+ """Get the original object if wrapped in one or more @decorators
+
+ Some objects automatically construct similar objects on any unrecognised
+ attribute access (e.g. unittest.mock.call). To protect against infinite loops,
+ this will arbitrarily cut off after 100 levels of obj.__wrapped__
+ attribute access. --TK, Jan 2016
+ """
+ orig_obj = obj
+ i = 0
+ while safe_hasattr(obj, '__wrapped__'):
+ obj = obj.__wrapped__
+ i += 1
+ if i > 100:
+ # __wrapped__ is probably a lie, so return the thing we started with
+ return orig_obj
+ return obj
+
+def find_file(obj) -> str:
+ """Find the absolute path to the file where an object was defined.
+
+ This is essentially a robust wrapper around `inspect.getabsfile`.
+
+ Returns None if no file can be found.
+
+ Parameters
+ ----------
+ obj : any Python object
+
+ Returns
+ -------
+ fname : str
+ The absolute path to the file where the object was defined.
+ """
+ obj = _get_wrapped(obj)
+
+ fname = None
+ try:
+ fname = inspect.getabsfile(obj)
+ except TypeError:
+ # For an instance, the file that matters is where its class was
+ # declared.
+ try:
+ fname = inspect.getabsfile(obj.__class__)
+ except (OSError, TypeError):
+ # Can happen for builtins
+ pass
+ except OSError:
+ pass
+
+ return cast_unicode(fname)
+
+
+def find_source_lines(obj):
+ """Find the line number in a file where an object was defined.
+
+ This is essentially a robust wrapper around `inspect.getsourcelines`.
+
+ Returns None if no file can be found.
+
+ Parameters
+ ----------
+ obj : any Python object
+
+ Returns
+ -------
+ lineno : int
+ The line number where the object definition starts.
+ """
+ obj = _get_wrapped(obj)
+
+ try:
+ lineno = inspect.getsourcelines(obj)[1]
+ except TypeError:
+ # For instances, try the class object like getsource() does
+ try:
+ lineno = inspect.getsourcelines(obj.__class__)[1]
+ except (OSError, TypeError):
+ return None
+ except OSError:
+ return None
+
+ return lineno
+
+class Inspector(Colorable):
+
+ def __init__(self, color_table=InspectColors,
+ code_color_table=PyColorize.ANSICodeColors,
+ scheme=None,
+ str_detail_level=0,
+ parent=None, config=None):
+ super(Inspector, self).__init__(parent=parent, config=config)
+ self.color_table = color_table
+ self.parser = PyColorize.Parser(out='str', parent=self, style=scheme)
+ self.format = self.parser.format
+ self.str_detail_level = str_detail_level
+ self.set_active_scheme(scheme)
+
+ def _getdef(self,obj,oname='') -> Union[str,None]:
+ """Return the call signature for any callable object.
+
+ If any exception is generated, None is returned instead and the
+ exception is suppressed."""
+ try:
+ return _render_signature(signature(obj), oname)
+ except:
+ return None
+
+ def __head(self,h) -> str:
+ """Return a header string with proper colors."""
+ return '%s%s%s' % (self.color_table.active_colors.header,h,
+ self.color_table.active_colors.normal)
+
+ def set_active_scheme(self, scheme):
+ if scheme is not None:
+ self.color_table.set_active_scheme(scheme)
+ self.parser.color_table.set_active_scheme(scheme)
+
+ def noinfo(self, msg, oname):
+ """Generic message when no information is found."""
+ print('No %s found' % msg, end=' ')
+ if oname:
+ print('for %s' % oname)
+ else:
+ print()
+
+ def pdef(self, obj, oname=''):
+ """Print the call signature for any callable object.
+
+ If the object is a class, print the constructor information."""
+
+ if not callable(obj):
+ print('Object is not callable.')
+ return
+
+ header = ''
+
+ if inspect.isclass(obj):
+ header = self.__head('Class constructor information:\n')
+
+
+ output = self._getdef(obj,oname)
+ if output is None:
+ self.noinfo('definition header',oname)
+ else:
+ print(header,self.format(output), end=' ')
+
+ # In Python 3, all classes are new-style, so they all have __init__.
+ @skip_doctest
+ def pdoc(self, obj, oname='', formatter=None):
+ """Print the docstring for any object.
+
+ Optional:
+ -formatter: a function to run the docstring through for specially
+ formatted docstrings.
+
+ Examples
+ --------
+ In [1]: class NoInit:
+ ...: pass
+
+ In [2]: class NoDoc:
+ ...: def __init__(self):
+ ...: pass
+
+ In [3]: %pdoc NoDoc
+ No documentation found for NoDoc
+
+ In [4]: %pdoc NoInit
+ No documentation found for NoInit
+
+ In [5]: obj = NoInit()
+
+ In [6]: %pdoc obj
+ No documentation found for obj
+
+ In [5]: obj2 = NoDoc()
+
+ In [6]: %pdoc obj2
+ No documentation found for obj2
+ """
+
+ head = self.__head # For convenience
+ lines = []
+ ds = getdoc(obj)
+ if formatter:
+ ds = formatter(ds).get('plain/text', ds)
+ if ds:
+ lines.append(head("Class docstring:"))
+ lines.append(indent(ds))
+ if inspect.isclass(obj) and hasattr(obj, '__init__'):
+ init_ds = getdoc(obj.__init__)
+ if init_ds is not None:
+ lines.append(head("Init docstring:"))
+ lines.append(indent(init_ds))
+ elif hasattr(obj,'__call__'):
+ call_ds = getdoc(obj.__call__)
+ if call_ds:
+ lines.append(head("Call docstring:"))
+ lines.append(indent(call_ds))
+
+ if not lines:
+ self.noinfo('documentation',oname)
+ else:
+ page.page('\n'.join(lines))
+
+ def psource(self, obj, oname=''):
+ """Print the source code for an object."""
+
+ # Flush the source cache because inspect can return out-of-date source
+ linecache.checkcache()
+ try:
+ src = getsource(obj, oname=oname)
+ except Exception:
+ src = None
+
+ if src is None:
+ self.noinfo('source', oname)
+ else:
+ page.page(self.format(src))
+
+ def pfile(self, obj, oname=''):
+ """Show the whole file where an object was defined."""
+
+ lineno = find_source_lines(obj)
+ if lineno is None:
+ self.noinfo('file', oname)
+ return
+
+ ofile = find_file(obj)
+ # run contents of file through pager starting at line where the object
+ # is defined, as long as the file isn't binary and is actually on the
+ # filesystem.
+ if ofile.endswith(('.so', '.dll', '.pyd')):
+ print('File %r is binary, not printing.' % ofile)
+ elif not os.path.isfile(ofile):
+ print('File %r does not exist, not printing.' % ofile)
+ else:
+ # Print only text files, not extension binaries. Note that
+ # getsourcelines returns lineno with 1-offset and page() uses
+ # 0-offset, so we must adjust.
+ page.page(self.format(openpy.read_py_file(ofile, skip_encoding_cookie=False)), lineno - 1)
+
+
+ def _mime_format(self, text:str, formatter=None) -> dict:
+ """Return a mime bundle representation of the input text.
+
+ - if `formatter` is None, the returned mime bundle has
+ a ``text/plain`` field, with the input text.
+ a ``text/html`` field with a ``<pre>`` tag containing the input text.
+
+ - if ``formatter`` is not None, it must be a callable transforming the
+ input text into a mime bundle. Default values for ``text/plain`` and
+ ``text/html`` representations are the ones described above.
+
+ Note:
+
+ Formatters returning strings are supported but this behavior is deprecated.
+
+ """
+ defaults = {
+ "text/plain": text,
+ "text/html": f"<pre>{html.escape(text)}</pre>",
+ }
+
+ if formatter is None:
+ return defaults
+ else:
+ formatted = formatter(text)
+
+ if not isinstance(formatted, dict):
+ # Handle the deprecated behavior of a formatter returning
+ # a string instead of a mime bundle.
+ return {"text/plain": formatted, "text/html": f"<pre>{formatted}</pre>"}
+
+ else:
+ return dict(defaults, **formatted)
+
+ def format_mime(self, bundle: UnformattedBundle) -> Bundle:
+ """Format a mimebundle being created by _make_info_unformatted into a real mimebundle"""
+ # Format text/plain mimetype
+ assert isinstance(bundle["text/plain"], list)
+ for item in bundle["text/plain"]:
+ assert isinstance(item, tuple)
+
+ new_b: Bundle = {}
+ lines = []
+ _len = max(len(h) for h, _ in bundle["text/plain"])
+
+ for head, body in bundle["text/plain"]:
+ body = body.strip("\n")
+ delim = "\n" if "\n" in body else " "
+ lines.append(
+ f"{self.__head(head+':')}{(_len - len(head))*' '}{delim}{body}"
+ )
+
+ new_b["text/plain"] = "\n".join(lines)
+
+ if "text/html" in bundle:
+ assert isinstance(bundle["text/html"], list)
+ for item in bundle["text/html"]:
+ assert isinstance(item, tuple)
+ # Format the text/html mimetype
+ if isinstance(bundle["text/html"], (list, tuple)):
+ # bundle['text/html'] is a list of (head, formatted body) pairs
+ new_b["text/html"] = "\n".join(
+ (f"<h1>{head}</h1>\n{body}" for (head, body) in bundle["text/html"])
+ )
+
+ for k in bundle.keys():
+ if k in ("text/html", "text/plain"):
+ continue
+ else:
+ new_b = bundle[k] # type:ignore
+ return new_b
+
+ def _append_info_field(
+ self,
+ bundle: UnformattedBundle,
+ title: str,
+ key: str,
+ info,
+ omit_sections,
+ formatter,
+ ):
+ """Append an info value to the unformatted mimebundle being constructed by _make_info_unformatted"""
+ if title in omit_sections or key in omit_sections:
+ return
+ field = info[key]
+ if field is not None:
+ formatted_field = self._mime_format(field, formatter)
+ bundle["text/plain"].append((title, formatted_field["text/plain"]))
+ bundle["text/html"].append((title, formatted_field["text/html"]))
+
+ def _make_info_unformatted(
+ self, obj, info, formatter, detail_level, omit_sections
+ ) -> UnformattedBundle:
+ """Assemble the mimebundle as unformatted lists of information"""
+ bundle: UnformattedBundle = {
+ "text/plain": [],
+ "text/html": [],
+ }
+
+ # A convenience function to simplify calls below
+ def append_field(
+ bundle: UnformattedBundle, title: str, key: str, formatter=None
+ ):
+ self._append_info_field(
+ bundle,
+ title=title,
+ key=key,
+ info=info,
+ omit_sections=omit_sections,
+ formatter=formatter,
+ )
+
+ def code_formatter(text) -> Bundle:
+ return {
+ 'text/plain': self.format(text),
+ 'text/html': pylight(text)
+ }
+
+ if info["isalias"]:
+ append_field(bundle, "Repr", "string_form")
+
+ elif info['ismagic']:
+ if detail_level > 0:
+ append_field(bundle, "Source", "source", code_formatter)
+ else:
+ append_field(bundle, "Docstring", "docstring", formatter)
+ append_field(bundle, "File", "file")
+
+ elif info['isclass'] or is_simple_callable(obj):
+ # Functions, methods, classes
+ append_field(bundle, "Signature", "definition", code_formatter)
+ append_field(bundle, "Init signature", "init_definition", code_formatter)
+ append_field(bundle, "Docstring", "docstring", formatter)
+ if detail_level > 0 and info["source"]:
+ append_field(bundle, "Source", "source", code_formatter)
+ else:
+ append_field(bundle, "Init docstring", "init_docstring", formatter)
+
+ append_field(bundle, "File", "file")
+ append_field(bundle, "Type", "type_name")
+ append_field(bundle, "Subclasses", "subclasses")
+
+ else:
+ # General Python objects
+ append_field(bundle, "Signature", "definition", code_formatter)
+ append_field(bundle, "Call signature", "call_def", code_formatter)
+ append_field(bundle, "Type", "type_name")
+ append_field(bundle, "String form", "string_form")
+
+ # Namespace
+ if info["namespace"] != "Interactive":
+ append_field(bundle, "Namespace", "namespace")
+
+ append_field(bundle, "Length", "length")
+ append_field(bundle, "File", "file")
+
+ # Source or docstring, depending on detail level and whether
+ # source found.
+ if detail_level > 0 and info["source"]:
+ append_field(bundle, "Source", "source", code_formatter)
+ else:
+ append_field(bundle, "Docstring", "docstring", formatter)
+
+ append_field(bundle, "Class docstring", "class_docstring", formatter)
+ append_field(bundle, "Init docstring", "init_docstring", formatter)
+ append_field(bundle, "Call docstring", "call_docstring", formatter)
+ return bundle
+
+
+ def _get_info(
+ self,
+ obj: Any,
+ oname: str = "",
+ formatter=None,
+ info: Optional[OInfo] = None,
+ detail_level=0,
+ omit_sections=(),
+ ) -> Bundle:
+ """Retrieve an info dict and format it.
+
+ Parameters
+ ----------
+ obj : any
+ Object to inspect and return info from
+ oname : str (default: ''):
+ Name of the variable pointing to `obj`.
+ formatter : callable
+ info
+ already computed information
+ detail_level : integer
+ Granularity of detail level, if set to 1, give more information.
+ omit_sections : container[str]
+ Titles or keys to omit from output (can be set, tuple, etc., anything supporting `in`)
+ """
+
+ info_dict = self.info(obj, oname=oname, info=info, detail_level=detail_level)
+ bundle = self._make_info_unformatted(
+ obj,
+ info_dict,
+ formatter,
+ detail_level=detail_level,
+ omit_sections=omit_sections,
+ )
+ return self.format_mime(bundle)
+
+ def pinfo(
+ self,
+ obj,
+ oname="",
+ formatter=None,
+ info: Optional[OInfo] = None,
+ detail_level=0,
+ enable_html_pager=True,
+ omit_sections=(),
+ ):
+ """Show detailed information about an object.
+
+ Optional arguments:
+
+ - oname: name of the variable pointing to the object.
+
+ - formatter: callable (optional)
+ A special formatter for docstrings.
+
+ The formatter is a callable that takes a string as an input
+ and returns either a formatted string or a mime type bundle
+ in the form of a dictionary.
+
+ Although the support of custom formatter returning a string
+ instead of a mime type bundle is deprecated.
+
+ - info: a structure with some information fields which may have been
+ precomputed already.
+
+ - detail_level: if set to 1, more information is given.
+
+ - omit_sections: set of section keys and titles to omit
+ """
+ assert info is not None
+ info_b: Bundle = self._get_info(
+ obj, oname, formatter, info, detail_level, omit_sections=omit_sections
+ )
+ if not enable_html_pager:
+ del info_b["text/html"]
+ page.page(info_b)
+
+ def _info(self, obj, oname="", info=None, detail_level=0):
+ """
+ Inspector.info() was likely improperly marked as deprecated
+ while only a parameter was deprecated. We "un-deprecate" it.
+ """
+
+ warnings.warn(
+ "The `Inspector.info()` method has been un-deprecated as of 8.0 "
+ "and the `formatter=` keyword removed. `Inspector._info` is now "
+ "an alias, and you can just call `.info()` directly.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ return self.info(obj, oname=oname, info=info, detail_level=detail_level)
+
+ def info(self, obj, oname="", info=None, detail_level=0) -> Dict[str, Any]:
+ """Compute a dict with detailed information about an object.
+
+ Parameters
+ ----------
+ obj : any
+ An object to find information about
+ oname : str (default: '')
+ Name of the variable pointing to `obj`.
+ info : (default: None)
+ A struct (dict like with attr access) with some information fields
+ which may have been precomputed already.
+ detail_level : int (default:0)
+ If set to 1, more information is given.
+
+ Returns
+ -------
+ An object info dict with known fields from `info_fields`. Keys are
+ strings, values are string or None.
+ """
+
+ if info is None:
+ ismagic = False
+ isalias = False
+ ospace = ''
+ else:
+ ismagic = info.ismagic
+ isalias = info.isalias
+ ospace = info.namespace
+
+ # Get docstring, special-casing aliases:
+ att_name = oname.split(".")[-1]
+ parents_docs = None
+ prelude = ""
+ if info and info.parent is not None and hasattr(info.parent, HOOK_NAME):
+ parents_docs_dict = getattr(info.parent, HOOK_NAME)
+ parents_docs = parents_docs_dict.get(att_name, None)
+ out = dict(
+ name=oname, found=True, isalias=isalias, ismagic=ismagic, subclasses=None
+ )
+
+ if parents_docs:
+ ds = parents_docs
+ elif isalias:
+ if not callable(obj):
+ try:
+ ds = "Alias to the system command:\n %s" % obj[1]
+ except:
+ ds = "Alias: " + str(obj)
+ else:
+ ds = "Alias to " + str(obj)
+ if obj.__doc__:
+ ds += "\nDocstring:\n" + obj.__doc__
+ else:
+ ds_or_None = getdoc(obj)
+ if ds_or_None is None:
+ ds = '<no docstring>'
+ else:
+ ds = ds_or_None
+
+ ds = prelude + ds
+
+ # store output in a dict, we initialize it here and fill it as we go
+
+ string_max = 200 # max size of strings to show (snipped if longer)
+ shalf = int((string_max - 5) / 2)
+
+ if ismagic:
+ out['type_name'] = 'Magic function'
+ elif isalias:
+ out['type_name'] = 'System alias'
+ else:
+ out['type_name'] = type(obj).__name__
+
+ try:
+ bclass = obj.__class__
+ out['base_class'] = str(bclass)
+ except:
+ pass
+
+ # String form, but snip if too long in ? form (full in ??)
+ if detail_level >= self.str_detail_level:
+ try:
+ ostr = str(obj)
+ str_head = 'string_form'
+ if not detail_level and len(ostr)>string_max:
+ ostr = ostr[:shalf] + ' <...> ' + ostr[-shalf:]
+ ostr = ("\n" + " " * len(str_head.expandtabs())).\
+ join(q.strip() for q in ostr.split("\n"))
+ out[str_head] = ostr
+ except:
+ pass
+
+ if ospace:
+ out['namespace'] = ospace
+
+ # Length (for strings and lists)
+ try:
+ out['length'] = str(len(obj))
+ except Exception:
+ pass
+
+ # Filename where object was defined
+ binary_file = False
+ fname = find_file(obj)
+ if fname is None:
+ # if anything goes wrong, we don't want to show source, so it's as
+ # if the file was binary
+ binary_file = True
+ else:
+ if fname.endswith(('.so', '.dll', '.pyd')):
+ binary_file = True
+ elif fname.endswith('<string>'):
+ fname = 'Dynamically generated function. No source code available.'
+ out['file'] = compress_user(fname)
+
+ # Original source code for a callable, class or property.
+ if detail_level:
+ # Flush the source cache because inspect can return out-of-date
+ # source
+ linecache.checkcache()
+ try:
+ if isinstance(obj, property) or not binary_file:
+ src = getsource(obj, oname)
+ if src is not None:
+ src = src.rstrip()
+ out['source'] = src
+
+ except Exception:
+ pass
+
+ # Add docstring only if no source is to be shown (avoid repetitions).
+ if ds and not self._source_contains_docstring(out.get('source'), ds):
+ out['docstring'] = ds
+
+ # Constructor docstring for classes
+ if inspect.isclass(obj):
+ out['isclass'] = True
+
+ # get the init signature:
+ try:
+ init_def = self._getdef(obj, oname)
+ except AttributeError:
+ init_def = None
+
+ # get the __init__ docstring
+ try:
+ obj_init = obj.__init__
+ except AttributeError:
+ init_ds = None
+ else:
+ if init_def is None:
+ # Get signature from init if top-level sig failed.
+ # Can happen for built-in types (list, etc.).
+ try:
+ init_def = self._getdef(obj_init, oname)
+ except AttributeError:
+ pass
+ init_ds = getdoc(obj_init)
+ # Skip Python's auto-generated docstrings
+ if init_ds == _object_init_docstring:
+ init_ds = None
+
+ if init_def:
+ out['init_definition'] = init_def
+
+ if init_ds:
+ out['init_docstring'] = init_ds
+
+ names = [sub.__name__ for sub in type.__subclasses__(obj)]
+ if len(names) < 10:
+ all_names = ', '.join(names)
+ else:
+ all_names = ', '.join(names[:10]+['...'])
+ out['subclasses'] = all_names
+ # and class docstring for instances:
+ else:
+ # reconstruct the function definition and print it:
+ defln = self._getdef(obj, oname)
+ if defln:
+ out['definition'] = defln
+
+ # First, check whether the instance docstring is identical to the
+ # class one, and print it separately if they don't coincide. In
+ # most cases they will, but it's nice to print all the info for
+ # objects which use instance-customized docstrings.
+ if ds:
+ try:
+ cls = getattr(obj,'__class__')
+ except:
+ class_ds = None
+ else:
+ class_ds = getdoc(cls)
+ # Skip Python's auto-generated docstrings
+ if class_ds in _builtin_type_docstrings:
+ class_ds = None
+ if class_ds and ds != class_ds:
+ out['class_docstring'] = class_ds
+
+ # Next, try to show constructor docstrings
+ try:
+ init_ds = getdoc(obj.__init__)
+ # Skip Python's auto-generated docstrings
+ if init_ds == _object_init_docstring:
+ init_ds = None
+ except AttributeError:
+ init_ds = None
+ if init_ds:
+ out['init_docstring'] = init_ds
+
+ # Call form docstring for callable instances
+ if safe_hasattr(obj, '__call__') and not is_simple_callable(obj):
+ call_def = self._getdef(obj.__call__, oname)
+ if call_def and (call_def != out.get('definition')):
+ # it may never be the case that call def and definition differ,
+ # but don't include the same signature twice
+ out['call_def'] = call_def
+ call_ds = getdoc(obj.__call__)
+ # Skip Python's auto-generated docstrings
+ if call_ds == _func_call_docstring:
+ call_ds = None
+ if call_ds:
+ out['call_docstring'] = call_ds
+
+ return object_info(**out)
+
+ @staticmethod
+ def _source_contains_docstring(src, doc):
+ """
+ Check whether the source *src* contains the docstring *doc*.
+
+ This is is helper function to skip displaying the docstring if the
+ source already contains it, avoiding repetition of information.
+ """
+ try:
+ (def_node,) = ast.parse(dedent(src)).body
+ return ast.get_docstring(def_node) == doc # type: ignore[arg-type]
+ except Exception:
+ # The source can become invalid or even non-existent (because it
+ # is re-fetched from the source file) so the above code fail in
+ # arbitrary ways.
+ return False
+
+ def psearch(self,pattern,ns_table,ns_search=[],
+ ignore_case=False,show_all=False, *, list_types=False):
+ """Search namespaces with wildcards for objects.
+
+ Arguments:
+
+ - pattern: string containing shell-like wildcards to use in namespace
+ searches and optionally a type specification to narrow the search to
+ objects of that type.
+
+ - ns_table: dict of name->namespaces for search.
+
+ Optional arguments:
+
+ - ns_search: list of namespace names to include in search.
+
+ - ignore_case(False): make the search case-insensitive.
+
+ - show_all(False): show all names, including those starting with
+ underscores.
+
+ - list_types(False): list all available object types for object matching.
+ """
+ #print 'ps pattern:<%r>' % pattern # dbg
+
+ # defaults
+ type_pattern = 'all'
+ filter = ''
+
+ # list all object types
+ if list_types:
+ page.page('\n'.join(sorted(typestr2type)))
+ return
+
+ cmds = pattern.split()
+ len_cmds = len(cmds)
+ if len_cmds == 1:
+ # Only filter pattern given
+ filter = cmds[0]
+ elif len_cmds == 2:
+ # Both filter and type specified
+ filter,type_pattern = cmds
+ else:
+ raise ValueError('invalid argument string for psearch: <%s>' %
+ pattern)
+
+ # filter search namespaces
+ for name in ns_search:
+ if name not in ns_table:
+ raise ValueError('invalid namespace <%s>. Valid names: %s' %
+ (name,ns_table.keys()))
+
+ #print 'type_pattern:',type_pattern # dbg
+ search_result, namespaces_seen = set(), set()
+ for ns_name in ns_search:
+ ns = ns_table[ns_name]
+ # Normally, locals and globals are the same, so we just check one.
+ if id(ns) in namespaces_seen:
+ continue
+ namespaces_seen.add(id(ns))
+ tmp_res = list_namespace(ns, type_pattern, filter,
+ ignore_case=ignore_case, show_all=show_all)
+ search_result.update(tmp_res)
+
+ page.page('\n'.join(sorted(search_result)))
+
+
+def _render_signature(obj_signature, obj_name) -> str:
+ """
+ This was mostly taken from inspect.Signature.__str__.
+ Look there for the comments.
+ The only change is to add linebreaks when this gets too long.
+ """
+ result = []
+ pos_only = False
+ kw_only = True
+ for param in obj_signature.parameters.values():
+ if param.kind == inspect.Parameter.POSITIONAL_ONLY:
+ pos_only = True
+ elif pos_only:
+ result.append('/')
+ pos_only = False
+
+ if param.kind == inspect.Parameter.VAR_POSITIONAL:
+ kw_only = False
+ elif param.kind == inspect.Parameter.KEYWORD_ONLY and kw_only:
+ result.append('*')
+ kw_only = False
+
+ result.append(str(param))
+
+ if pos_only:
+ result.append('/')
+
+ # add up name, parameters, braces (2), and commas
+ if len(obj_name) + sum(len(r) + 2 for r in result) > 75:
+ # This doesn’t fit behind “Signature: ” in an inspect window.
+ rendered = '{}(\n{})'.format(obj_name, ''.join(
+ ' {},\n'.format(r) for r in result)
+ )
+ else:
+ rendered = '{}({})'.format(obj_name, ', '.join(result))
+
+ if obj_signature.return_annotation is not inspect._empty:
+ anno = inspect.formatannotation(obj_signature.return_annotation)
+ rendered += ' -> {}'.format(anno)
+
+ return rendered
diff --git a/contrib/python/ipython/py3/IPython/core/page.py b/contrib/python/ipython/py3/IPython/core/page.py
new file mode 100644
index 0000000000..d3e6a9eef5
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/page.py
@@ -0,0 +1,348 @@
+# encoding: utf-8
+"""
+Paging capabilities for IPython.core
+
+Notes
+-----
+
+For now this uses IPython hooks, so it can't be in IPython.utils. If we can get
+rid of that dependency, we could move it there.
+-----
+"""
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+
+import os
+import io
+import re
+import sys
+import tempfile
+import subprocess
+
+from io import UnsupportedOperation
+from pathlib import Path
+
+from IPython import get_ipython
+from IPython.display import display
+from IPython.core.error import TryNext
+from IPython.utils.data import chop
+from IPython.utils.process import system
+from IPython.utils.terminal import get_terminal_size
+from IPython.utils import py3compat
+
+
+def display_page(strng, start=0, screen_lines=25):
+ """Just display, no paging. screen_lines is ignored."""
+ if isinstance(strng, dict):
+ data = strng
+ else:
+ if start:
+ strng = u'\n'.join(strng.splitlines()[start:])
+ data = { 'text/plain': strng }
+ display(data, raw=True)
+
+
+def as_hook(page_func):
+ """Wrap a pager func to strip the `self` arg
+
+ so it can be called as a hook.
+ """
+ return lambda self, *args, **kwargs: page_func(*args, **kwargs)
+
+
+esc_re = re.compile(r"(\x1b[^m]+m)")
+
+def page_dumb(strng, start=0, screen_lines=25):
+ """Very dumb 'pager' in Python, for when nothing else works.
+
+ Only moves forward, same interface as page(), except for pager_cmd and
+ mode.
+ """
+ if isinstance(strng, dict):
+ strng = strng.get('text/plain', '')
+ out_ln = strng.splitlines()[start:]
+ screens = chop(out_ln,screen_lines-1)
+ if len(screens) == 1:
+ print(os.linesep.join(screens[0]))
+ else:
+ last_escape = ""
+ for scr in screens[0:-1]:
+ hunk = os.linesep.join(scr)
+ print(last_escape + hunk)
+ if not page_more():
+ return
+ esc_list = esc_re.findall(hunk)
+ if len(esc_list) > 0:
+ last_escape = esc_list[-1]
+ print(last_escape + os.linesep.join(screens[-1]))
+
+def _detect_screen_size(screen_lines_def):
+ """Attempt to work out the number of lines on the screen.
+
+ This is called by page(). It can raise an error (e.g. when run in the
+ test suite), so it's separated out so it can easily be called in a try block.
+ """
+ TERM = os.environ.get('TERM',None)
+ if not((TERM=='xterm' or TERM=='xterm-color') and sys.platform != 'sunos5'):
+ # curses causes problems on many terminals other than xterm, and
+ # some termios calls lock up on Sun OS5.
+ return screen_lines_def
+
+ try:
+ import termios
+ import curses
+ except ImportError:
+ return screen_lines_def
+
+ # There is a bug in curses, where *sometimes* it fails to properly
+ # initialize, and then after the endwin() call is made, the
+ # terminal is left in an unusable state. Rather than trying to
+ # check every time for this (by requesting and comparing termios
+ # flags each time), we just save the initial terminal state and
+ # unconditionally reset it every time. It's cheaper than making
+ # the checks.
+ try:
+ term_flags = termios.tcgetattr(sys.stdout)
+ except termios.error as err:
+ # can fail on Linux 2.6, pager_page will catch the TypeError
+ raise TypeError('termios error: {0}'.format(err)) from err
+
+ try:
+ scr = curses.initscr()
+ except AttributeError:
+ # Curses on Solaris may not be complete, so we can't use it there
+ return screen_lines_def
+
+ screen_lines_real,screen_cols = scr.getmaxyx()
+ curses.endwin()
+
+ # Restore terminal state in case endwin() didn't.
+ termios.tcsetattr(sys.stdout,termios.TCSANOW,term_flags)
+ # Now we have what we needed: the screen size in rows/columns
+ return screen_lines_real
+ #print '***Screen size:',screen_lines_real,'lines x',\
+ #screen_cols,'columns.' # dbg
+
+def pager_page(strng, start=0, screen_lines=0, pager_cmd=None):
+ """Display a string, piping through a pager after a certain length.
+
+ strng can be a mime-bundle dict, supplying multiple representations,
+ keyed by mime-type.
+
+ The screen_lines parameter specifies the number of *usable* lines of your
+ terminal screen (total lines minus lines you need to reserve to show other
+ information).
+
+ If you set screen_lines to a number <=0, page() will try to auto-determine
+ your screen size and will only use up to (screen_size+screen_lines) for
+ printing, paging after that. That is, if you want auto-detection but need
+ to reserve the bottom 3 lines of the screen, use screen_lines = -3, and for
+ auto-detection without any lines reserved simply use screen_lines = 0.
+
+ If a string won't fit in the allowed lines, it is sent through the
+ specified pager command. If none given, look for PAGER in the environment,
+ and ultimately default to less.
+
+ If no system pager works, the string is sent through a 'dumb pager'
+ written in python, very simplistic.
+ """
+
+ # for compatibility with mime-bundle form:
+ if isinstance(strng, dict):
+ strng = strng['text/plain']
+
+ # Ugly kludge, but calling curses.initscr() flat out crashes in emacs
+ TERM = os.environ.get('TERM','dumb')
+ if TERM in ['dumb','emacs'] and os.name != 'nt':
+ print(strng)
+ return
+ # chop off the topmost part of the string we don't want to see
+ str_lines = strng.splitlines()[start:]
+ str_toprint = os.linesep.join(str_lines)
+ num_newlines = len(str_lines)
+ len_str = len(str_toprint)
+
+ # Dumb heuristics to guesstimate number of on-screen lines the string
+ # takes. Very basic, but good enough for docstrings in reasonable
+ # terminals. If someone later feels like refining it, it's not hard.
+ numlines = max(num_newlines,int(len_str/80)+1)
+
+ screen_lines_def = get_terminal_size()[1]
+
+ # auto-determine screen size
+ if screen_lines <= 0:
+ try:
+ screen_lines += _detect_screen_size(screen_lines_def)
+ except (TypeError, UnsupportedOperation):
+ print(str_toprint)
+ return
+
+ #print 'numlines',numlines,'screenlines',screen_lines # dbg
+ if numlines <= screen_lines :
+ #print '*** normal print' # dbg
+ print(str_toprint)
+ else:
+ # Try to open pager and default to internal one if that fails.
+ # All failure modes are tagged as 'retval=1', to match the return
+ # value of a failed system command. If any intermediate attempt
+ # sets retval to 1, at the end we resort to our own page_dumb() pager.
+ pager_cmd = get_pager_cmd(pager_cmd)
+ pager_cmd += ' ' + get_pager_start(pager_cmd,start)
+ if os.name == 'nt':
+ if pager_cmd.startswith('type'):
+ # The default WinXP 'type' command is failing on complex strings.
+ retval = 1
+ else:
+ fd, tmpname = tempfile.mkstemp('.txt')
+ tmppath = Path(tmpname)
+ try:
+ os.close(fd)
+ with tmppath.open("wt", encoding="utf-8") as tmpfile:
+ tmpfile.write(strng)
+ cmd = "%s < %s" % (pager_cmd, tmppath)
+ # tmpfile needs to be closed for windows
+ if os.system(cmd):
+ retval = 1
+ else:
+ retval = None
+ finally:
+ Path.unlink(tmppath)
+ else:
+ try:
+ retval = None
+ # Emulate os.popen, but redirect stderr
+ proc = subprocess.Popen(
+ pager_cmd,
+ shell=True,
+ stdin=subprocess.PIPE,
+ stderr=subprocess.DEVNULL,
+ )
+ pager = os._wrap_close(
+ io.TextIOWrapper(proc.stdin, encoding="utf-8"), proc
+ )
+ try:
+ pager_encoding = pager.encoding or sys.stdout.encoding
+ pager.write(strng)
+ finally:
+ retval = pager.close()
+ except IOError as msg: # broken pipe when user quits
+ if msg.args == (32, 'Broken pipe'):
+ retval = None
+ else:
+ retval = 1
+ except OSError:
+ # Other strange problems, sometimes seen in Win2k/cygwin
+ retval = 1
+ if retval is not None:
+ page_dumb(strng,screen_lines=screen_lines)
+
+
+def page(data, start=0, screen_lines=0, pager_cmd=None):
+ """Display content in a pager, piping through a pager after a certain length.
+
+ data can be a mime-bundle dict, supplying multiple representations,
+ keyed by mime-type, or text.
+
+ Pager is dispatched via the `show_in_pager` IPython hook.
+ If no hook is registered, `pager_page` will be used.
+ """
+ # Some routines may auto-compute start offsets incorrectly and pass a
+ # negative value. Offset to 0 for robustness.
+ start = max(0, start)
+
+ # first, try the hook
+ ip = get_ipython()
+ if ip:
+ try:
+ ip.hooks.show_in_pager(data, start=start, screen_lines=screen_lines)
+ return
+ except TryNext:
+ pass
+
+ # fallback on default pager
+ return pager_page(data, start, screen_lines, pager_cmd)
+
+
+def page_file(fname, start=0, pager_cmd=None):
+ """Page a file, using an optional pager command and starting line.
+ """
+
+ pager_cmd = get_pager_cmd(pager_cmd)
+ pager_cmd += ' ' + get_pager_start(pager_cmd,start)
+
+ try:
+ if os.environ['TERM'] in ['emacs','dumb']:
+ raise EnvironmentError
+ system(pager_cmd + ' ' + fname)
+ except:
+ try:
+ if start > 0:
+ start -= 1
+ page(open(fname, encoding="utf-8").read(), start)
+ except:
+ print('Unable to show file',repr(fname))
+
+
+def get_pager_cmd(pager_cmd=None):
+ """Return a pager command.
+
+ Makes some attempts at finding an OS-correct one.
+ """
+ if os.name == 'posix':
+ default_pager_cmd = 'less -R' # -R for color control sequences
+ elif os.name in ['nt','dos']:
+ default_pager_cmd = 'type'
+
+ if pager_cmd is None:
+ try:
+ pager_cmd = os.environ['PAGER']
+ except:
+ pager_cmd = default_pager_cmd
+
+ if pager_cmd == 'less' and '-r' not in os.environ.get('LESS', '').lower():
+ pager_cmd += ' -R'
+
+ return pager_cmd
+
+
+def get_pager_start(pager, start):
+ """Return the string for paging files with an offset.
+
+ This is the '+N' argument which less and more (under Unix) accept.
+ """
+
+ if pager in ['less','more']:
+ if start:
+ start_string = '+' + str(start)
+ else:
+ start_string = ''
+ else:
+ start_string = ''
+ return start_string
+
+
+# (X)emacs on win32 doesn't like to be bypassed with msvcrt.getch()
+if os.name == 'nt' and os.environ.get('TERM','dumb') != 'emacs':
+ import msvcrt
+ def page_more():
+ """ Smart pausing between pages
+
+ @return: True if need print more lines, False if quit
+ """
+ sys.stdout.write('---Return to continue, q to quit--- ')
+ ans = msvcrt.getwch()
+ if ans in ("q", "Q"):
+ result = False
+ else:
+ result = True
+ sys.stdout.write("\b"*37 + " "*37 + "\b"*37)
+ return result
+else:
+ def page_more():
+ ans = py3compat.input('---Return to continue, q to quit--- ')
+ if ans.lower().startswith('q'):
+ return False
+ else:
+ return True
diff --git a/contrib/python/ipython/py3/IPython/core/payload.py b/contrib/python/ipython/py3/IPython/core/payload.py
new file mode 100644
index 0000000000..6818be1537
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/payload.py
@@ -0,0 +1,55 @@
+# -*- coding: utf-8 -*-
+"""Payload system for IPython.
+
+Authors:
+
+* Fernando Perez
+* Brian Granger
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (C) 2008-2011 The IPython Development Team
+#
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+from traitlets.config.configurable import Configurable
+from traitlets import List
+
+#-----------------------------------------------------------------------------
+# Main payload class
+#-----------------------------------------------------------------------------
+
+class PayloadManager(Configurable):
+
+ _payload = List([])
+
+ def write_payload(self, data, single=True):
+ """Include or update the specified `data` payload in the PayloadManager.
+
+ If a previous payload with the same source exists and `single` is True,
+ it will be overwritten with the new one.
+ """
+
+ if not isinstance(data, dict):
+ raise TypeError('Each payload write must be a dict, got: %r' % data)
+
+ if single and 'source' in data:
+ source = data['source']
+ for i, pl in enumerate(self._payload):
+ if 'source' in pl and pl['source'] == source:
+ self._payload[i] = data
+ return
+
+ self._payload.append(data)
+
+ def read_payload(self):
+ return self._payload
+
+ def clear_payload(self):
+ self._payload = []
diff --git a/contrib/python/ipython/py3/IPython/core/payloadpage.py b/contrib/python/ipython/py3/IPython/core/payloadpage.py
new file mode 100644
index 0000000000..4958108076
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/payloadpage.py
@@ -0,0 +1,51 @@
+# encoding: utf-8
+"""A payload based version of page."""
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+import warnings
+from IPython.core.getipython import get_ipython
+
+
+def page(strng, start=0, screen_lines=0, pager_cmd=None):
+ """Print a string, piping through a pager.
+
+ This version ignores the screen_lines and pager_cmd arguments and uses
+ IPython's payload system instead.
+
+ Parameters
+ ----------
+ strng : str or mime-dict
+ Text to page, or a mime-type keyed dict of already formatted data.
+ start : int
+ Starting line at which to place the display.
+ """
+
+ # Some routines may auto-compute start offsets incorrectly and pass a
+ # negative value. Offset to 0 for robustness.
+ start = max(0, start)
+ shell = get_ipython()
+
+ if isinstance(strng, dict):
+ data = strng
+ else:
+ data = {'text/plain' : strng}
+ payload = dict(
+ source='page',
+ data=data,
+ start=start,
+ )
+ shell.payload_manager.write_payload(payload)
+
+
+def install_payload_page():
+ """DEPRECATED, use show_in_pager hook
+
+ Install this version of page as IPython.core.page.page.
+ """
+ warnings.warn("""install_payload_page is deprecated.
+ Use `ip.set_hook('show_in_pager, page.as_hook(payloadpage.page))`
+ """)
+ from IPython.core import page as corepage
+ corepage.page = page
diff --git a/contrib/python/ipython/py3/IPython/core/prefilter.py b/contrib/python/ipython/py3/IPython/core/prefilter.py
new file mode 100644
index 0000000000..e7e82e3377
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/prefilter.py
@@ -0,0 +1,700 @@
+# encoding: utf-8
+"""
+Prefiltering components.
+
+Prefilters transform user input before it is exec'd by Python. These
+transforms are used to implement additional syntax such as !ls and %magic.
+"""
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+from keyword import iskeyword
+import re
+
+from .autocall import IPyAutocall
+from traitlets.config.configurable import Configurable
+from .inputtransformer2 import (
+ ESC_MAGIC,
+ ESC_QUOTE,
+ ESC_QUOTE2,
+ ESC_PAREN,
+)
+from .macro import Macro
+from .splitinput import LineInfo
+
+from traitlets import (
+ List, Integer, Unicode, Bool, Instance, CRegExp
+)
+
+#-----------------------------------------------------------------------------
+# Global utilities, errors and constants
+#-----------------------------------------------------------------------------
+
+
+class PrefilterError(Exception):
+ pass
+
+
+# RegExp to identify potential function names
+re_fun_name = re.compile(r'[^\W\d]([\w.]*) *$')
+
+# RegExp to exclude strings with this start from autocalling. In
+# particular, all binary operators should be excluded, so that if foo is
+# callable, foo OP bar doesn't become foo(OP bar), which is invalid. The
+# characters '!=()' don't need to be checked for, as the checkPythonChars
+# routine explicitly does so, to catch direct calls and rebindings of
+# existing names.
+
+# Warning: the '-' HAS TO BE AT THE END of the first group, otherwise
+# it affects the rest of the group in square brackets.
+re_exclude_auto = re.compile(r'^[,&^\|\*/\+-]'
+ r'|^is |^not |^in |^and |^or ')
+
+# try to catch also methods for stuff in lists/tuples/dicts: off
+# (experimental). For this to work, the line_split regexp would need
+# to be modified so it wouldn't break things at '['. That line is
+# nasty enough that I shouldn't change it until I can test it _well_.
+#self.re_fun_name = re.compile (r'[a-zA-Z_]([a-zA-Z0-9_.\[\]]*) ?$')
+
+
+# Handler Check Utilities
+def is_shadowed(identifier, ip):
+ """Is the given identifier defined in one of the namespaces which shadow
+ the alias and magic namespaces? Note that an identifier is different
+ than ifun, because it can not contain a '.' character."""
+ # This is much safer than calling ofind, which can change state
+ return (identifier in ip.user_ns \
+ or identifier in ip.user_global_ns \
+ or identifier in ip.ns_table['builtin']\
+ or iskeyword(identifier))
+
+
+#-----------------------------------------------------------------------------
+# Main Prefilter manager
+#-----------------------------------------------------------------------------
+
+
+class PrefilterManager(Configurable):
+ """Main prefilter component.
+
+ The IPython prefilter is run on all user input before it is run. The
+ prefilter consumes lines of input and produces transformed lines of
+ input.
+
+ The implementation consists of two phases:
+
+ 1. Transformers
+ 2. Checkers and handlers
+
+ Over time, we plan on deprecating the checkers and handlers and doing
+ everything in the transformers.
+
+ The transformers are instances of :class:`PrefilterTransformer` and have
+ a single method :meth:`transform` that takes a line and returns a
+ transformed line. The transformation can be accomplished using any
+ tool, but our current ones use regular expressions for speed.
+
+ After all the transformers have been run, the line is fed to the checkers,
+ which are instances of :class:`PrefilterChecker`. The line is passed to
+ the :meth:`check` method, which either returns `None` or a
+ :class:`PrefilterHandler` instance. If `None` is returned, the other
+ checkers are tried. If an :class:`PrefilterHandler` instance is returned,
+ the line is passed to the :meth:`handle` method of the returned
+ handler and no further checkers are tried.
+
+ Both transformers and checkers have a `priority` attribute, that determines
+ the order in which they are called. Smaller priorities are tried first.
+
+ Both transformers and checkers also have `enabled` attribute, which is
+ a boolean that determines if the instance is used.
+
+ Users or developers can change the priority or enabled attribute of
+ transformers or checkers, but they must call the :meth:`sort_checkers`
+ or :meth:`sort_transformers` method after changing the priority.
+ """
+
+ multi_line_specials = Bool(True).tag(config=True)
+ shell = Instance('IPython.core.interactiveshell.InteractiveShellABC', allow_none=True)
+
+ def __init__(self, shell=None, **kwargs):
+ super(PrefilterManager, self).__init__(shell=shell, **kwargs)
+ self.shell = shell
+ self._transformers = []
+ self.init_handlers()
+ self.init_checkers()
+
+ #-------------------------------------------------------------------------
+ # API for managing transformers
+ #-------------------------------------------------------------------------
+
+ def sort_transformers(self):
+ """Sort the transformers by priority.
+
+ This must be called after the priority of a transformer is changed.
+ The :meth:`register_transformer` method calls this automatically.
+ """
+ self._transformers.sort(key=lambda x: x.priority)
+
+ @property
+ def transformers(self):
+ """Return a list of checkers, sorted by priority."""
+ return self._transformers
+
+ def register_transformer(self, transformer):
+ """Register a transformer instance."""
+ if transformer not in self._transformers:
+ self._transformers.append(transformer)
+ self.sort_transformers()
+
+ def unregister_transformer(self, transformer):
+ """Unregister a transformer instance."""
+ if transformer in self._transformers:
+ self._transformers.remove(transformer)
+
+ #-------------------------------------------------------------------------
+ # API for managing checkers
+ #-------------------------------------------------------------------------
+
+ def init_checkers(self):
+ """Create the default checkers."""
+ self._checkers = []
+ for checker in _default_checkers:
+ checker(
+ shell=self.shell, prefilter_manager=self, parent=self
+ )
+
+ def sort_checkers(self):
+ """Sort the checkers by priority.
+
+ This must be called after the priority of a checker is changed.
+ The :meth:`register_checker` method calls this automatically.
+ """
+ self._checkers.sort(key=lambda x: x.priority)
+
+ @property
+ def checkers(self):
+ """Return a list of checkers, sorted by priority."""
+ return self._checkers
+
+ def register_checker(self, checker):
+ """Register a checker instance."""
+ if checker not in self._checkers:
+ self._checkers.append(checker)
+ self.sort_checkers()
+
+ def unregister_checker(self, checker):
+ """Unregister a checker instance."""
+ if checker in self._checkers:
+ self._checkers.remove(checker)
+
+ #-------------------------------------------------------------------------
+ # API for managing handlers
+ #-------------------------------------------------------------------------
+
+ def init_handlers(self):
+ """Create the default handlers."""
+ self._handlers = {}
+ self._esc_handlers = {}
+ for handler in _default_handlers:
+ handler(
+ shell=self.shell, prefilter_manager=self, parent=self
+ )
+
+ @property
+ def handlers(self):
+ """Return a dict of all the handlers."""
+ return self._handlers
+
+ def register_handler(self, name, handler, esc_strings):
+ """Register a handler instance by name with esc_strings."""
+ self._handlers[name] = handler
+ for esc_str in esc_strings:
+ self._esc_handlers[esc_str] = handler
+
+ def unregister_handler(self, name, handler, esc_strings):
+ """Unregister a handler instance by name with esc_strings."""
+ try:
+ del self._handlers[name]
+ except KeyError:
+ pass
+ for esc_str in esc_strings:
+ h = self._esc_handlers.get(esc_str)
+ if h is handler:
+ del self._esc_handlers[esc_str]
+
+ def get_handler_by_name(self, name):
+ """Get a handler by its name."""
+ return self._handlers.get(name)
+
+ def get_handler_by_esc(self, esc_str):
+ """Get a handler by its escape string."""
+ return self._esc_handlers.get(esc_str)
+
+ #-------------------------------------------------------------------------
+ # Main prefiltering API
+ #-------------------------------------------------------------------------
+
+ def prefilter_line_info(self, line_info):
+ """Prefilter a line that has been converted to a LineInfo object.
+
+ This implements the checker/handler part of the prefilter pipe.
+ """
+ # print "prefilter_line_info: ", line_info
+ handler = self.find_handler(line_info)
+ return handler.handle(line_info)
+
+ def find_handler(self, line_info):
+ """Find a handler for the line_info by trying checkers."""
+ for checker in self.checkers:
+ if checker.enabled:
+ handler = checker.check(line_info)
+ if handler:
+ return handler
+ return self.get_handler_by_name('normal')
+
+ def transform_line(self, line, continue_prompt):
+ """Calls the enabled transformers in order of increasing priority."""
+ for transformer in self.transformers:
+ if transformer.enabled:
+ line = transformer.transform(line, continue_prompt)
+ return line
+
+ def prefilter_line(self, line, continue_prompt=False):
+ """Prefilter a single input line as text.
+
+ This method prefilters a single line of text by calling the
+ transformers and then the checkers/handlers.
+ """
+
+ # print "prefilter_line: ", line, continue_prompt
+ # All handlers *must* return a value, even if it's blank ('').
+
+ # save the line away in case we crash, so the post-mortem handler can
+ # record it
+ self.shell._last_input_line = line
+
+ if not line:
+ # Return immediately on purely empty lines, so that if the user
+ # previously typed some whitespace that started a continuation
+ # prompt, he can break out of that loop with just an empty line.
+ # This is how the default python prompt works.
+ return ''
+
+ # At this point, we invoke our transformers.
+ if not continue_prompt or (continue_prompt and self.multi_line_specials):
+ line = self.transform_line(line, continue_prompt)
+
+ # Now we compute line_info for the checkers and handlers
+ line_info = LineInfo(line, continue_prompt)
+
+ # the input history needs to track even empty lines
+ stripped = line.strip()
+
+ normal_handler = self.get_handler_by_name('normal')
+ if not stripped:
+ return normal_handler.handle(line_info)
+
+ # special handlers are only allowed for single line statements
+ if continue_prompt and not self.multi_line_specials:
+ return normal_handler.handle(line_info)
+
+ prefiltered = self.prefilter_line_info(line_info)
+ # print "prefiltered line: %r" % prefiltered
+ return prefiltered
+
+ def prefilter_lines(self, lines, continue_prompt=False):
+ """Prefilter multiple input lines of text.
+
+ This is the main entry point for prefiltering multiple lines of
+ input. This simply calls :meth:`prefilter_line` for each line of
+ input.
+
+ This covers cases where there are multiple lines in the user entry,
+ which is the case when the user goes back to a multiline history
+ entry and presses enter.
+ """
+ llines = lines.rstrip('\n').split('\n')
+ # We can get multiple lines in one shot, where multiline input 'blends'
+ # into one line, in cases like recalling from the readline history
+ # buffer. We need to make sure that in such cases, we correctly
+ # communicate downstream which line is first and which are continuation
+ # ones.
+ if len(llines) > 1:
+ out = '\n'.join([self.prefilter_line(line, lnum>0)
+ for lnum, line in enumerate(llines) ])
+ else:
+ out = self.prefilter_line(llines[0], continue_prompt)
+
+ return out
+
+#-----------------------------------------------------------------------------
+# Prefilter transformers
+#-----------------------------------------------------------------------------
+
+
+class PrefilterTransformer(Configurable):
+ """Transform a line of user input."""
+
+ priority = Integer(100).tag(config=True)
+ # Transformers don't currently use shell or prefilter_manager, but as we
+ # move away from checkers and handlers, they will need them.
+ shell = Instance('IPython.core.interactiveshell.InteractiveShellABC', allow_none=True)
+ prefilter_manager = Instance('IPython.core.prefilter.PrefilterManager', allow_none=True)
+ enabled = Bool(True).tag(config=True)
+
+ def __init__(self, shell=None, prefilter_manager=None, **kwargs):
+ super(PrefilterTransformer, self).__init__(
+ shell=shell, prefilter_manager=prefilter_manager, **kwargs
+ )
+ self.prefilter_manager.register_transformer(self)
+
+ def transform(self, line, continue_prompt):
+ """Transform a line, returning the new one."""
+ return None
+
+ def __repr__(self):
+ return "<%s(priority=%r, enabled=%r)>" % (
+ self.__class__.__name__, self.priority, self.enabled)
+
+
+#-----------------------------------------------------------------------------
+# Prefilter checkers
+#-----------------------------------------------------------------------------
+
+
+class PrefilterChecker(Configurable):
+ """Inspect an input line and return a handler for that line."""
+
+ priority = Integer(100).tag(config=True)
+ shell = Instance('IPython.core.interactiveshell.InteractiveShellABC', allow_none=True)
+ prefilter_manager = Instance('IPython.core.prefilter.PrefilterManager', allow_none=True)
+ enabled = Bool(True).tag(config=True)
+
+ def __init__(self, shell=None, prefilter_manager=None, **kwargs):
+ super(PrefilterChecker, self).__init__(
+ shell=shell, prefilter_manager=prefilter_manager, **kwargs
+ )
+ self.prefilter_manager.register_checker(self)
+
+ def check(self, line_info):
+ """Inspect line_info and return a handler instance or None."""
+ return None
+
+ def __repr__(self):
+ return "<%s(priority=%r, enabled=%r)>" % (
+ self.__class__.__name__, self.priority, self.enabled)
+
+
+class EmacsChecker(PrefilterChecker):
+
+ priority = Integer(100).tag(config=True)
+ enabled = Bool(False).tag(config=True)
+
+ def check(self, line_info):
+ "Emacs ipython-mode tags certain input lines."
+ if line_info.line.endswith('# PYTHON-MODE'):
+ return self.prefilter_manager.get_handler_by_name('emacs')
+ else:
+ return None
+
+
+class MacroChecker(PrefilterChecker):
+
+ priority = Integer(250).tag(config=True)
+
+ def check(self, line_info):
+ obj = self.shell.user_ns.get(line_info.ifun)
+ if isinstance(obj, Macro):
+ return self.prefilter_manager.get_handler_by_name('macro')
+ else:
+ return None
+
+
+class IPyAutocallChecker(PrefilterChecker):
+
+ priority = Integer(300).tag(config=True)
+
+ def check(self, line_info):
+ "Instances of IPyAutocall in user_ns get autocalled immediately"
+ obj = self.shell.user_ns.get(line_info.ifun, None)
+ if isinstance(obj, IPyAutocall):
+ obj.set_ip(self.shell)
+ return self.prefilter_manager.get_handler_by_name('auto')
+ else:
+ return None
+
+
+class AssignmentChecker(PrefilterChecker):
+
+ priority = Integer(600).tag(config=True)
+
+ def check(self, line_info):
+ """Check to see if user is assigning to a var for the first time, in
+ which case we want to avoid any sort of automagic / autocall games.
+
+ This allows users to assign to either alias or magic names true python
+ variables (the magic/alias systems always take second seat to true
+ python code). E.g. ls='hi', or ls,that=1,2"""
+ if line_info.the_rest:
+ if line_info.the_rest[0] in '=,':
+ return self.prefilter_manager.get_handler_by_name('normal')
+ else:
+ return None
+
+
+class AutoMagicChecker(PrefilterChecker):
+
+ priority = Integer(700).tag(config=True)
+
+ def check(self, line_info):
+ """If the ifun is magic, and automagic is on, run it. Note: normal,
+ non-auto magic would already have been triggered via '%' in
+ check_esc_chars. This just checks for automagic. Also, before
+ triggering the magic handler, make sure that there is nothing in the
+ user namespace which could shadow it."""
+ if not self.shell.automagic or not self.shell.find_magic(line_info.ifun):
+ return None
+
+ # We have a likely magic method. Make sure we should actually call it.
+ if line_info.continue_prompt and not self.prefilter_manager.multi_line_specials:
+ return None
+
+ head = line_info.ifun.split('.',1)[0]
+ if is_shadowed(head, self.shell):
+ return None
+
+ return self.prefilter_manager.get_handler_by_name('magic')
+
+
+class PythonOpsChecker(PrefilterChecker):
+
+ priority = Integer(900).tag(config=True)
+
+ def check(self, line_info):
+ """If the 'rest' of the line begins with a function call or pretty much
+ any python operator, we should simply execute the line (regardless of
+ whether or not there's a possible autocall expansion). This avoids
+ spurious (and very confusing) geattr() accesses."""
+ if line_info.the_rest and line_info.the_rest[0] in '!=()<>,+*/%^&|':
+ return self.prefilter_manager.get_handler_by_name('normal')
+ else:
+ return None
+
+
+class AutocallChecker(PrefilterChecker):
+
+ priority = Integer(1000).tag(config=True)
+
+ function_name_regexp = CRegExp(re_fun_name,
+ help="RegExp to identify potential function names."
+ ).tag(config=True)
+ exclude_regexp = CRegExp(re_exclude_auto,
+ help="RegExp to exclude strings with this start from autocalling."
+ ).tag(config=True)
+
+ def check(self, line_info):
+ "Check if the initial word/function is callable and autocall is on."
+ if not self.shell.autocall:
+ return None
+
+ oinfo = line_info.ofind(self.shell) # This can mutate state via getattr
+ if not oinfo.found:
+ return None
+
+ ignored_funs = ['b', 'f', 'r', 'u', 'br', 'rb', 'fr', 'rf']
+ ifun = line_info.ifun
+ line = line_info.line
+ if ifun.lower() in ignored_funs and (line.startswith(ifun + "'") or line.startswith(ifun + '"')):
+ return None
+
+ if (
+ callable(oinfo.obj)
+ and (not self.exclude_regexp.match(line_info.the_rest))
+ and self.function_name_regexp.match(line_info.ifun)
+ ):
+ return self.prefilter_manager.get_handler_by_name("auto")
+ else:
+ return None
+
+
+#-----------------------------------------------------------------------------
+# Prefilter handlers
+#-----------------------------------------------------------------------------
+
+
+class PrefilterHandler(Configurable):
+
+ handler_name = Unicode('normal')
+ esc_strings = List([])
+ shell = Instance('IPython.core.interactiveshell.InteractiveShellABC', allow_none=True)
+ prefilter_manager = Instance('IPython.core.prefilter.PrefilterManager', allow_none=True)
+
+ def __init__(self, shell=None, prefilter_manager=None, **kwargs):
+ super(PrefilterHandler, self).__init__(
+ shell=shell, prefilter_manager=prefilter_manager, **kwargs
+ )
+ self.prefilter_manager.register_handler(
+ self.handler_name,
+ self,
+ self.esc_strings
+ )
+
+ def handle(self, line_info):
+ # print "normal: ", line_info
+ """Handle normal input lines. Use as a template for handlers."""
+
+ # With autoindent on, we need some way to exit the input loop, and I
+ # don't want to force the user to have to backspace all the way to
+ # clear the line. The rule will be in this case, that either two
+ # lines of pure whitespace in a row, or a line of pure whitespace but
+ # of a size different to the indent level, will exit the input loop.
+ line = line_info.line
+ continue_prompt = line_info.continue_prompt
+
+ if (continue_prompt and
+ self.shell.autoindent and
+ line.isspace() and
+ 0 < abs(len(line) - self.shell.indent_current_nsp) <= 2):
+ line = ''
+
+ return line
+
+ def __str__(self):
+ return "<%s(name=%s)>" % (self.__class__.__name__, self.handler_name)
+
+
+class MacroHandler(PrefilterHandler):
+ handler_name = Unicode("macro")
+
+ def handle(self, line_info):
+ obj = self.shell.user_ns.get(line_info.ifun)
+ pre_space = line_info.pre_whitespace
+ line_sep = "\n" + pre_space
+ return pre_space + line_sep.join(obj.value.splitlines())
+
+
+class MagicHandler(PrefilterHandler):
+
+ handler_name = Unicode('magic')
+ esc_strings = List([ESC_MAGIC])
+
+ def handle(self, line_info):
+ """Execute magic functions."""
+ ifun = line_info.ifun
+ the_rest = line_info.the_rest
+ #Prepare arguments for get_ipython().run_line_magic(magic_name, magic_args)
+ t_arg_s = ifun + " " + the_rest
+ t_magic_name, _, t_magic_arg_s = t_arg_s.partition(' ')
+ t_magic_name = t_magic_name.lstrip(ESC_MAGIC)
+ cmd = '%sget_ipython().run_line_magic(%r, %r)' % (line_info.pre_whitespace, t_magic_name, t_magic_arg_s)
+ return cmd
+
+
+class AutoHandler(PrefilterHandler):
+
+ handler_name = Unicode('auto')
+ esc_strings = List([ESC_PAREN, ESC_QUOTE, ESC_QUOTE2])
+
+ def handle(self, line_info):
+ """Handle lines which can be auto-executed, quoting if requested."""
+ line = line_info.line
+ ifun = line_info.ifun
+ the_rest = line_info.the_rest
+ esc = line_info.esc
+ continue_prompt = line_info.continue_prompt
+ obj = line_info.ofind(self.shell).obj
+
+ # This should only be active for single-line input!
+ if continue_prompt:
+ return line
+
+ force_auto = isinstance(obj, IPyAutocall)
+
+ # User objects sometimes raise exceptions on attribute access other
+ # than AttributeError (we've seen it in the past), so it's safest to be
+ # ultra-conservative here and catch all.
+ try:
+ auto_rewrite = obj.rewrite
+ except Exception:
+ auto_rewrite = True
+
+ if esc == ESC_QUOTE:
+ # Auto-quote splitting on whitespace
+ newcmd = '%s("%s")' % (ifun,'", "'.join(the_rest.split()) )
+ elif esc == ESC_QUOTE2:
+ # Auto-quote whole string
+ newcmd = '%s("%s")' % (ifun,the_rest)
+ elif esc == ESC_PAREN:
+ newcmd = '%s(%s)' % (ifun,",".join(the_rest.split()))
+ else:
+ # Auto-paren.
+ if force_auto:
+ # Don't rewrite if it is already a call.
+ do_rewrite = not the_rest.startswith('(')
+ else:
+ if not the_rest:
+ # We only apply it to argument-less calls if the autocall
+ # parameter is set to 2.
+ do_rewrite = (self.shell.autocall >= 2)
+ elif the_rest.startswith('[') and hasattr(obj, '__getitem__'):
+ # Don't autocall in this case: item access for an object
+ # which is BOTH callable and implements __getitem__.
+ do_rewrite = False
+ else:
+ do_rewrite = True
+
+ # Figure out the rewritten command
+ if do_rewrite:
+ if the_rest.endswith(';'):
+ newcmd = '%s(%s);' % (ifun.rstrip(),the_rest[:-1])
+ else:
+ newcmd = '%s(%s)' % (ifun.rstrip(), the_rest)
+ else:
+ normal_handler = self.prefilter_manager.get_handler_by_name('normal')
+ return normal_handler.handle(line_info)
+
+ # Display the rewritten call
+ if auto_rewrite:
+ self.shell.auto_rewrite_input(newcmd)
+
+ return newcmd
+
+
+class EmacsHandler(PrefilterHandler):
+
+ handler_name = Unicode('emacs')
+ esc_strings = List([])
+
+ def handle(self, line_info):
+ """Handle input lines marked by python-mode."""
+
+ # Currently, nothing is done. Later more functionality can be added
+ # here if needed.
+
+ # The input cache shouldn't be updated
+ return line_info.line
+
+
+#-----------------------------------------------------------------------------
+# Defaults
+#-----------------------------------------------------------------------------
+
+
+_default_checkers = [
+ EmacsChecker,
+ MacroChecker,
+ IPyAutocallChecker,
+ AssignmentChecker,
+ AutoMagicChecker,
+ PythonOpsChecker,
+ AutocallChecker
+]
+
+_default_handlers = [
+ PrefilterHandler,
+ MacroHandler,
+ MagicHandler,
+ AutoHandler,
+ EmacsHandler
+]
diff --git a/contrib/python/ipython/py3/IPython/core/profile/README_STARTUP b/contrib/python/ipython/py3/IPython/core/profile/README_STARTUP
new file mode 100644
index 0000000000..61d4700042
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/profile/README_STARTUP
@@ -0,0 +1,11 @@
+This is the IPython startup directory
+
+.py and .ipy files in this directory will be run *prior* to any code or files specified
+via the exec_lines or exec_files configurables whenever you load this profile.
+
+Files will be run in lexicographical order, so you can control the execution order of files
+with a prefix, e.g.::
+
+ 00-first.py
+ 50-middle.py
+ 99-last.ipy
diff --git a/contrib/python/ipython/py3/IPython/core/profileapp.py b/contrib/python/ipython/py3/IPython/core/profileapp.py
new file mode 100644
index 0000000000..9a1bae55ac
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/profileapp.py
@@ -0,0 +1,312 @@
+# encoding: utf-8
+"""
+An application for managing IPython profiles.
+
+To be invoked as the `ipython profile` subcommand.
+
+Authors:
+
+* Min RK
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (C) 2008 The IPython Development Team
+#
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+import os
+
+from traitlets.config.application import Application
+from IPython.core.application import (
+ BaseIPythonApplication, base_flags
+)
+from IPython.core.profiledir import ProfileDir
+from IPython.utils.importstring import import_item
+from IPython.paths import get_ipython_dir, get_ipython_package_dir
+from traitlets import Unicode, Bool, Dict, observe
+
+#-----------------------------------------------------------------------------
+# Constants
+#-----------------------------------------------------------------------------
+
+create_help = """Create an IPython profile by name
+
+Create an ipython profile directory by its name or
+profile directory path. Profile directories contain
+configuration, log and security related files and are named
+using the convention 'profile_<name>'. By default they are
+located in your ipython directory. Once created, you will
+can edit the configuration files in the profile
+directory to configure IPython. Most users will create a
+profile directory by name,
+`ipython profile create myprofile`, which will put the directory
+in `<ipython_dir>/profile_myprofile`.
+"""
+list_help = """List available IPython profiles
+
+List all available profiles, by profile location, that can
+be found in the current working directly or in the ipython
+directory. Profile directories are named using the convention
+'profile_<profile>'.
+"""
+profile_help = """Manage IPython profiles
+
+Profile directories contain
+configuration, log and security related files and are named
+using the convention 'profile_<name>'. By default they are
+located in your ipython directory. You can create profiles
+with `ipython profile create <name>`, or see the profiles you
+already have with `ipython profile list`
+
+To get started configuring IPython, simply do:
+
+$> ipython profile create
+
+and IPython will create the default profile in <ipython_dir>/profile_default,
+where you can edit ipython_config.py to start configuring IPython.
+
+"""
+
+_list_examples = "ipython profile list # list all profiles"
+
+_create_examples = """
+ipython profile create foo # create profile foo w/ default config files
+ipython profile create foo --reset # restage default config files over current
+ipython profile create foo --parallel # also stage parallel config files
+"""
+
+_main_examples = """
+ipython profile create -h # show the help string for the create subcommand
+ipython profile list -h # show the help string for the list subcommand
+
+ipython locate profile foo # print the path to the directory for profile 'foo'
+"""
+
+#-----------------------------------------------------------------------------
+# Profile Application Class (for `ipython profile` subcommand)
+#-----------------------------------------------------------------------------
+
+
+def list_profiles_in(path):
+ """list profiles in a given root directory"""
+ profiles = []
+
+ # for python 3.6+ rewrite to: with os.scandir(path) as dirlist:
+ files = os.scandir(path)
+ for f in files:
+ if f.is_dir() and f.name.startswith('profile_'):
+ profiles.append(f.name.split('_', 1)[-1])
+ return profiles
+
+
+def list_bundled_profiles():
+ """list profiles that are bundled with IPython."""
+ path = os.path.join(get_ipython_package_dir(), u'core', u'profile')
+ profiles = []
+
+ # for python 3.6+ rewrite to: with os.scandir(path) as dirlist:
+ files = os.scandir(path)
+ for profile in files:
+ if profile.is_dir() and profile.name != "__pycache__":
+ profiles.append(profile.name)
+ return profiles
+
+
+class ProfileLocate(BaseIPythonApplication):
+ description = """print the path to an IPython profile dir"""
+
+ def parse_command_line(self, argv=None):
+ super(ProfileLocate, self).parse_command_line(argv)
+ if self.extra_args:
+ self.profile = self.extra_args[0]
+
+ def start(self):
+ print(self.profile_dir.location)
+
+
+class ProfileList(Application):
+ name = u'ipython-profile'
+ description = list_help
+ examples = _list_examples
+
+ aliases = Dict({
+ 'ipython-dir' : 'ProfileList.ipython_dir',
+ 'log-level' : 'Application.log_level',
+ })
+ flags = Dict(dict(
+ debug = ({'Application' : {'log_level' : 0}},
+ "Set Application.log_level to 0, maximizing log output."
+ )
+ ))
+
+ ipython_dir = Unicode(get_ipython_dir(),
+ help="""
+ The name of the IPython directory. This directory is used for logging
+ configuration (through profiles), history storage, etc. The default
+ is usually $HOME/.ipython. This options can also be specified through
+ the environment variable IPYTHONDIR.
+ """
+ ).tag(config=True)
+
+
+ def _print_profiles(self, profiles):
+ """print list of profiles, indented."""
+ for profile in profiles:
+ print(' %s' % profile)
+
+ def list_profile_dirs(self):
+ profiles = list_bundled_profiles()
+ if profiles:
+ print()
+ print("Available profiles in IPython:")
+ self._print_profiles(profiles)
+ print()
+ print(" The first request for a bundled profile will copy it")
+ print(" into your IPython directory (%s)," % self.ipython_dir)
+ print(" where you can customize it.")
+
+ profiles = list_profiles_in(self.ipython_dir)
+ if profiles:
+ print()
+ print("Available profiles in %s:" % self.ipython_dir)
+ self._print_profiles(profiles)
+
+ profiles = list_profiles_in(os.getcwd())
+ if profiles:
+ print()
+ print(
+ "Profiles from CWD have been removed for security reason, see CVE-2022-21699:"
+ )
+
+ print()
+ print("To use any of the above profiles, start IPython with:")
+ print(" ipython --profile=<name>")
+ print()
+
+ def start(self):
+ self.list_profile_dirs()
+
+
+create_flags = {}
+create_flags.update(base_flags)
+# don't include '--init' flag, which implies running profile create in other apps
+create_flags.pop('init')
+create_flags['reset'] = ({'ProfileCreate': {'overwrite' : True}},
+ "reset config files in this profile to the defaults.")
+create_flags['parallel'] = ({'ProfileCreate': {'parallel' : True}},
+ "Include the config files for parallel "
+ "computing apps (ipengine, ipcontroller, etc.)")
+
+
+class ProfileCreate(BaseIPythonApplication):
+ name = u'ipython-profile'
+ description = create_help
+ examples = _create_examples
+ auto_create = Bool(True)
+ def _log_format_default(self):
+ return "[%(name)s] %(message)s"
+
+ def _copy_config_files_default(self):
+ return True
+
+ parallel = Bool(False,
+ help="whether to include parallel computing config files"
+ ).tag(config=True)
+
+ @observe('parallel')
+ def _parallel_changed(self, change):
+ parallel_files = [ 'ipcontroller_config.py',
+ 'ipengine_config.py',
+ 'ipcluster_config.py'
+ ]
+ if change['new']:
+ for cf in parallel_files:
+ self.config_files.append(cf)
+ else:
+ for cf in parallel_files:
+ if cf in self.config_files:
+ self.config_files.remove(cf)
+
+ def parse_command_line(self, argv):
+ super(ProfileCreate, self).parse_command_line(argv)
+ # accept positional arg as profile name
+ if self.extra_args:
+ self.profile = self.extra_args[0]
+
+ flags = Dict(create_flags)
+
+ classes = [ProfileDir]
+
+ def _import_app(self, app_path):
+ """import an app class"""
+ app = None
+ name = app_path.rsplit('.', 1)[-1]
+ try:
+ app = import_item(app_path)
+ except ImportError:
+ self.log.info("Couldn't import %s, config file will be excluded", name)
+ except Exception:
+ self.log.warning('Unexpected error importing %s', name, exc_info=True)
+ return app
+
+ def init_config_files(self):
+ super(ProfileCreate, self).init_config_files()
+ # use local imports, since these classes may import from here
+ from IPython.terminal.ipapp import TerminalIPythonApp
+ apps = [TerminalIPythonApp]
+ for app_path in (
+ 'ipykernel.kernelapp.IPKernelApp',
+ ):
+ app = self._import_app(app_path)
+ if app is not None:
+ apps.append(app)
+ if self.parallel:
+ from ipyparallel.apps.ipcontrollerapp import IPControllerApp
+ from ipyparallel.apps.ipengineapp import IPEngineApp
+ from ipyparallel.apps.ipclusterapp import IPClusterStart
+ apps.extend([
+ IPControllerApp,
+ IPEngineApp,
+ IPClusterStart,
+ ])
+ for App in apps:
+ app = App()
+ app.config.update(self.config)
+ app.log = self.log
+ app.overwrite = self.overwrite
+ app.copy_config_files=True
+ app.ipython_dir=self.ipython_dir
+ app.profile_dir=self.profile_dir
+ app.init_config_files()
+
+ def stage_default_config_file(self):
+ pass
+
+
+class ProfileApp(Application):
+ name = u'ipython profile'
+ description = profile_help
+ examples = _main_examples
+
+ subcommands = Dict(dict(
+ create = (ProfileCreate, ProfileCreate.description.splitlines()[0]),
+ list = (ProfileList, ProfileList.description.splitlines()[0]),
+ locate = (ProfileLocate, ProfileLocate.description.splitlines()[0]),
+ ))
+
+ def start(self):
+ if self.subapp is None:
+ print("No subcommand specified. Must specify one of: %s"%(self.subcommands.keys()))
+ print()
+ self.print_description()
+ self.print_subcommands()
+ self.exit(1)
+ else:
+ return self.subapp.start()
diff --git a/contrib/python/ipython/py3/IPython/core/profiledir.py b/contrib/python/ipython/py3/IPython/core/profiledir.py
new file mode 100644
index 0000000000..cb4d39339a
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/profiledir.py
@@ -0,0 +1,223 @@
+# encoding: utf-8
+"""An object for managing IPython profile directories."""
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+import os
+import shutil
+import errno
+from pathlib import Path
+
+from traitlets.config.configurable import LoggingConfigurable
+from ..paths import get_ipython_package_dir
+from ..utils.path import expand_path, ensure_dir_exists
+from traitlets import Unicode, Bool, observe
+
+#-----------------------------------------------------------------------------
+# Module errors
+#-----------------------------------------------------------------------------
+
+class ProfileDirError(Exception):
+ pass
+
+
+#-----------------------------------------------------------------------------
+# Class for managing profile directories
+#-----------------------------------------------------------------------------
+
+class ProfileDir(LoggingConfigurable):
+ """An object to manage the profile directory and its resources.
+
+ The profile directory is used by all IPython applications, to manage
+ configuration, logging and security.
+
+ This object knows how to find, create and manage these directories. This
+ should be used by any code that wants to handle profiles.
+ """
+
+ security_dir_name = Unicode('security')
+ log_dir_name = Unicode('log')
+ startup_dir_name = Unicode('startup')
+ pid_dir_name = Unicode('pid')
+ static_dir_name = Unicode('static')
+ security_dir = Unicode(u'')
+ log_dir = Unicode(u'')
+ startup_dir = Unicode(u'')
+ pid_dir = Unicode(u'')
+ static_dir = Unicode(u'')
+
+ location = Unicode(u'',
+ help="""Set the profile location directly. This overrides the logic used by the
+ `profile` option.""",
+ ).tag(config=True)
+
+ _location_isset = Bool(False) # flag for detecting multiply set location
+ @observe('location')
+ def _location_changed(self, change):
+ if self._location_isset:
+ raise RuntimeError("Cannot set profile location more than once.")
+ self._location_isset = True
+ new = change['new']
+ ensure_dir_exists(new)
+
+ # ensure config files exist:
+ self.security_dir = os.path.join(new, self.security_dir_name)
+ self.log_dir = os.path.join(new, self.log_dir_name)
+ self.startup_dir = os.path.join(new, self.startup_dir_name)
+ self.pid_dir = os.path.join(new, self.pid_dir_name)
+ self.static_dir = os.path.join(new, self.static_dir_name)
+ self.check_dirs()
+
+ def _mkdir(self, path, mode=None):
+ """ensure a directory exists at a given path
+
+ This is a version of os.mkdir, with the following differences:
+
+ - returns True if it created the directory, False otherwise
+ - ignores EEXIST, protecting against race conditions where
+ the dir may have been created in between the check and
+ the creation
+ - sets permissions if requested and the dir already exists
+ """
+ if os.path.exists(path):
+ if mode and os.stat(path).st_mode != mode:
+ try:
+ os.chmod(path, mode)
+ except OSError:
+ self.log.warning(
+ "Could not set permissions on %s",
+ path
+ )
+ return False
+ try:
+ if mode:
+ os.mkdir(path, mode)
+ else:
+ os.mkdir(path)
+ except OSError as e:
+ if e.errno == errno.EEXIST:
+ return False
+ else:
+ raise
+
+ return True
+
+ @observe('log_dir')
+ def check_log_dir(self, change=None):
+ self._mkdir(self.log_dir)
+
+ @observe('startup_dir')
+ def check_startup_dir(self, change=None):
+ self._mkdir(self.startup_dir)
+
+ readme = os.path.join(self.startup_dir, 'README')
+
+ if not os.path.exists(readme):
+ import pkgutil
+ with open(readme, 'wb') as f:
+ f.write(pkgutil.get_data(__name__, 'profile/README_STARTUP'))
+
+ @observe('security_dir')
+ def check_security_dir(self, change=None):
+ self._mkdir(self.security_dir, 0o40700)
+
+ @observe('pid_dir')
+ def check_pid_dir(self, change=None):
+ self._mkdir(self.pid_dir, 0o40700)
+
+ def check_dirs(self):
+ self.check_security_dir()
+ self.check_log_dir()
+ self.check_pid_dir()
+ self.check_startup_dir()
+
+ def copy_config_file(self, config_file: str, path: Path, overwrite=False) -> bool:
+ """Copy a default config file into the active profile directory.
+
+ Default configuration files are kept in :mod:`IPython.core.profile`.
+ This function moves these from that location to the working profile
+ directory.
+ """
+ dst = Path(os.path.join(self.location, config_file))
+ if dst.exists() and not overwrite:
+ return False
+ if path is None:
+ path = os.path.join(get_ipython_package_dir(), u'core', u'profile', u'default')
+ assert isinstance(path, Path)
+ src = path / config_file
+ shutil.copy(src, dst)
+ return True
+
+ @classmethod
+ def create_profile_dir(cls, profile_dir, config=None):
+ """Create a new profile directory given a full path.
+
+ Parameters
+ ----------
+ profile_dir : str
+ The full path to the profile directory. If it does exist, it will
+ be used. If not, it will be created.
+ """
+ return cls(location=profile_dir, config=config)
+
+ @classmethod
+ def create_profile_dir_by_name(cls, path, name=u'default', config=None):
+ """Create a profile dir by profile name and path.
+
+ Parameters
+ ----------
+ path : unicode
+ The path (directory) to put the profile directory in.
+ name : unicode
+ The name of the profile. The name of the profile directory will
+ be "profile_<profile>".
+ """
+ if not os.path.isdir(path):
+ raise ProfileDirError('Directory not found: %s' % path)
+ profile_dir = os.path.join(path, u'profile_' + name)
+ return cls(location=profile_dir, config=config)
+
+ @classmethod
+ def find_profile_dir_by_name(cls, ipython_dir, name=u'default', config=None):
+ """Find an existing profile dir by profile name, return its ProfileDir.
+
+ This searches through a sequence of paths for a profile dir. If it
+ is not found, a :class:`ProfileDirError` exception will be raised.
+
+ The search path algorithm is:
+ 1. ``os.getcwd()`` # removed for security reason.
+ 2. ``ipython_dir``
+
+ Parameters
+ ----------
+ ipython_dir : unicode or str
+ The IPython directory to use.
+ name : unicode or str
+ The name of the profile. The name of the profile directory
+ will be "profile_<profile>".
+ """
+ dirname = u'profile_' + name
+ paths = [ipython_dir]
+ for p in paths:
+ profile_dir = os.path.join(p, dirname)
+ if os.path.isdir(profile_dir):
+ return cls(location=profile_dir, config=config)
+ else:
+ raise ProfileDirError('Profile directory not found in paths: %s' % dirname)
+
+ @classmethod
+ def find_profile_dir(cls, profile_dir, config=None):
+ """Find/create a profile dir and return its ProfileDir.
+
+ This will create the profile directory if it doesn't exist.
+
+ Parameters
+ ----------
+ profile_dir : unicode or str
+ The path of the profile directory.
+ """
+ profile_dir = expand_path(profile_dir)
+ if not os.path.isdir(profile_dir):
+ raise ProfileDirError('Profile directory not found: %s' % profile_dir)
+ return cls(location=profile_dir, config=config)
diff --git a/contrib/python/ipython/py3/IPython/core/prompts.py b/contrib/python/ipython/py3/IPython/core/prompts.py
new file mode 100644
index 0000000000..7fd218d37a
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/prompts.py
@@ -0,0 +1,21 @@
+# -*- coding: utf-8 -*-
+"""Being removed
+"""
+
+class LazyEvaluate(object):
+ """This is used for formatting strings with values that need to be updated
+ at that time, such as the current time or working directory."""
+ def __init__(self, func, *args, **kwargs):
+ self.func = func
+ self.args = args
+ self.kwargs = kwargs
+
+ def __call__(self, **kwargs):
+ self.kwargs.update(kwargs)
+ return self.func(*self.args, **self.kwargs)
+
+ def __str__(self):
+ return str(self())
+
+ def __format__(self, format_spec):
+ return format(self(), format_spec)
diff --git a/contrib/python/ipython/py3/IPython/core/pylabtools.py b/contrib/python/ipython/py3/IPython/core/pylabtools.py
new file mode 100644
index 0000000000..deadf038ea
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/pylabtools.py
@@ -0,0 +1,425 @@
+# -*- coding: utf-8 -*-
+"""Pylab (matplotlib) support utilities."""
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+from io import BytesIO
+from binascii import b2a_base64
+from functools import partial
+import warnings
+
+from IPython.core.display import _pngxy
+from IPython.utils.decorators import flag_calls
+
+# If user specifies a GUI, that dictates the backend, otherwise we read the
+# user's mpl default from the mpl rc structure
+backends = {
+ "tk": "TkAgg",
+ "gtk": "GTKAgg",
+ "gtk3": "GTK3Agg",
+ "gtk4": "GTK4Agg",
+ "wx": "WXAgg",
+ "qt4": "Qt4Agg",
+ "qt5": "Qt5Agg",
+ "qt6": "QtAgg",
+ "qt": "Qt5Agg",
+ "osx": "MacOSX",
+ "nbagg": "nbAgg",
+ "webagg": "WebAgg",
+ "notebook": "nbAgg",
+ "agg": "agg",
+ "svg": "svg",
+ "pdf": "pdf",
+ "ps": "ps",
+ "inline": "module://matplotlib_inline.backend_inline",
+ "ipympl": "module://ipympl.backend_nbagg",
+ "widget": "module://ipympl.backend_nbagg",
+}
+
+# We also need a reverse backends2guis mapping that will properly choose which
+# GUI support to activate based on the desired matplotlib backend. For the
+# most part it's just a reverse of the above dict, but we also need to add a
+# few others that map to the same GUI manually:
+backend2gui = dict(zip(backends.values(), backends.keys()))
+# In the reverse mapping, there are a few extra valid matplotlib backends that
+# map to the same GUI support
+backend2gui["GTK"] = backend2gui["GTKCairo"] = "gtk"
+backend2gui["GTK3Cairo"] = "gtk3"
+backend2gui["GTK4Cairo"] = "gtk4"
+backend2gui["WX"] = "wx"
+backend2gui["CocoaAgg"] = "osx"
+# There needs to be a hysteresis here as the new QtAgg Matplotlib backend
+# supports either Qt5 or Qt6 and the IPython qt event loop support Qt4, Qt5,
+# and Qt6.
+backend2gui["QtAgg"] = "qt"
+backend2gui["Qt4Agg"] = "qt"
+backend2gui["Qt5Agg"] = "qt"
+
+# And some backends that don't need GUI integration
+del backend2gui["nbAgg"]
+del backend2gui["agg"]
+del backend2gui["svg"]
+del backend2gui["pdf"]
+del backend2gui["ps"]
+del backend2gui["module://matplotlib_inline.backend_inline"]
+del backend2gui["module://ipympl.backend_nbagg"]
+
+#-----------------------------------------------------------------------------
+# Matplotlib utilities
+#-----------------------------------------------------------------------------
+
+
+def getfigs(*fig_nums):
+ """Get a list of matplotlib figures by figure numbers.
+
+ If no arguments are given, all available figures are returned. If the
+ argument list contains references to invalid figures, a warning is printed
+ but the function continues pasting further figures.
+
+ Parameters
+ ----------
+ figs : tuple
+ A tuple of ints giving the figure numbers of the figures to return.
+ """
+ from matplotlib._pylab_helpers import Gcf
+ if not fig_nums:
+ fig_managers = Gcf.get_all_fig_managers()
+ return [fm.canvas.figure for fm in fig_managers]
+ else:
+ figs = []
+ for num in fig_nums:
+ f = Gcf.figs.get(num)
+ if f is None:
+ print('Warning: figure %s not available.' % num)
+ else:
+ figs.append(f.canvas.figure)
+ return figs
+
+
+def figsize(sizex, sizey):
+ """Set the default figure size to be [sizex, sizey].
+
+ This is just an easy to remember, convenience wrapper that sets::
+
+ matplotlib.rcParams['figure.figsize'] = [sizex, sizey]
+ """
+ import matplotlib
+ matplotlib.rcParams['figure.figsize'] = [sizex, sizey]
+
+
+def print_figure(fig, fmt="png", bbox_inches="tight", base64=False, **kwargs):
+ """Print a figure to an image, and return the resulting file data
+
+ Returned data will be bytes unless ``fmt='svg'``,
+ in which case it will be unicode.
+
+ Any keyword args are passed to fig.canvas.print_figure,
+ such as ``quality`` or ``bbox_inches``.
+
+ If `base64` is True, return base64-encoded str instead of raw bytes
+ for binary-encoded image formats
+
+ .. versionadded:: 7.29
+ base64 argument
+ """
+ # When there's an empty figure, we shouldn't return anything, otherwise we
+ # get big blank areas in the qt console.
+ if not fig.axes and not fig.lines:
+ return
+
+ dpi = fig.dpi
+ if fmt == 'retina':
+ dpi = dpi * 2
+ fmt = 'png'
+
+ # build keyword args
+ kw = {
+ "format":fmt,
+ "facecolor":fig.get_facecolor(),
+ "edgecolor":fig.get_edgecolor(),
+ "dpi":dpi,
+ "bbox_inches":bbox_inches,
+ }
+ # **kwargs get higher priority
+ kw.update(kwargs)
+
+ bytes_io = BytesIO()
+ if fig.canvas is None:
+ from matplotlib.backend_bases import FigureCanvasBase
+ FigureCanvasBase(fig)
+
+ fig.canvas.print_figure(bytes_io, **kw)
+ data = bytes_io.getvalue()
+ if fmt == 'svg':
+ data = data.decode('utf-8')
+ elif base64:
+ data = b2a_base64(data, newline=False).decode("ascii")
+ return data
+
+def retina_figure(fig, base64=False, **kwargs):
+ """format a figure as a pixel-doubled (retina) PNG
+
+ If `base64` is True, return base64-encoded str instead of raw bytes
+ for binary-encoded image formats
+
+ .. versionadded:: 7.29
+ base64 argument
+ """
+ pngdata = print_figure(fig, fmt="retina", base64=False, **kwargs)
+ # Make sure that retina_figure acts just like print_figure and returns
+ # None when the figure is empty.
+ if pngdata is None:
+ return
+ w, h = _pngxy(pngdata)
+ metadata = {"width": w//2, "height":h//2}
+ if base64:
+ pngdata = b2a_base64(pngdata, newline=False).decode("ascii")
+ return pngdata, metadata
+
+
+# We need a little factory function here to create the closure where
+# safe_execfile can live.
+def mpl_runner(safe_execfile):
+ """Factory to return a matplotlib-enabled runner for %run.
+
+ Parameters
+ ----------
+ safe_execfile : function
+ This must be a function with the same interface as the
+ :meth:`safe_execfile` method of IPython.
+
+ Returns
+ -------
+ A function suitable for use as the ``runner`` argument of the %run magic
+ function.
+ """
+
+ def mpl_execfile(fname,*where,**kw):
+ """matplotlib-aware wrapper around safe_execfile.
+
+ Its interface is identical to that of the :func:`execfile` builtin.
+
+ This is ultimately a call to execfile(), but wrapped in safeties to
+ properly handle interactive rendering."""
+
+ import matplotlib
+ import matplotlib.pyplot as plt
+
+ #print '*** Matplotlib runner ***' # dbg
+ # turn off rendering until end of script
+ is_interactive = matplotlib.rcParams['interactive']
+ matplotlib.interactive(False)
+ safe_execfile(fname,*where,**kw)
+ matplotlib.interactive(is_interactive)
+ # make rendering call now, if the user tried to do it
+ if plt.draw_if_interactive.called:
+ plt.draw()
+ plt.draw_if_interactive.called = False
+
+ # re-draw everything that is stale
+ try:
+ da = plt.draw_all
+ except AttributeError:
+ pass
+ else:
+ da()
+
+ return mpl_execfile
+
+
+def _reshow_nbagg_figure(fig):
+ """reshow an nbagg figure"""
+ try:
+ reshow = fig.canvas.manager.reshow
+ except AttributeError as e:
+ raise NotImplementedError() from e
+ else:
+ reshow()
+
+
+def select_figure_formats(shell, formats, **kwargs):
+ """Select figure formats for the inline backend.
+
+ Parameters
+ ----------
+ shell : InteractiveShell
+ The main IPython instance.
+ formats : str or set
+ One or a set of figure formats to enable: 'png', 'retina', 'jpeg', 'svg', 'pdf'.
+ **kwargs : any
+ Extra keyword arguments to be passed to fig.canvas.print_figure.
+ """
+ import matplotlib
+ from matplotlib.figure import Figure
+
+ svg_formatter = shell.display_formatter.formatters['image/svg+xml']
+ png_formatter = shell.display_formatter.formatters['image/png']
+ jpg_formatter = shell.display_formatter.formatters['image/jpeg']
+ pdf_formatter = shell.display_formatter.formatters['application/pdf']
+
+ if isinstance(formats, str):
+ formats = {formats}
+ # cast in case of list / tuple
+ formats = set(formats)
+
+ [ f.pop(Figure, None) for f in shell.display_formatter.formatters.values() ]
+ mplbackend = matplotlib.get_backend().lower()
+ if mplbackend == 'nbagg' or mplbackend == 'module://ipympl.backend_nbagg':
+ formatter = shell.display_formatter.ipython_display_formatter
+ formatter.for_type(Figure, _reshow_nbagg_figure)
+
+ supported = {'png', 'png2x', 'retina', 'jpg', 'jpeg', 'svg', 'pdf'}
+ bad = formats.difference(supported)
+ if bad:
+ bs = "%s" % ','.join([repr(f) for f in bad])
+ gs = "%s" % ','.join([repr(f) for f in supported])
+ raise ValueError("supported formats are: %s not %s" % (gs, bs))
+
+ if "png" in formats:
+ png_formatter.for_type(
+ Figure, partial(print_figure, fmt="png", base64=True, **kwargs)
+ )
+ if "retina" in formats or "png2x" in formats:
+ png_formatter.for_type(Figure, partial(retina_figure, base64=True, **kwargs))
+ if "jpg" in formats or "jpeg" in formats:
+ jpg_formatter.for_type(
+ Figure, partial(print_figure, fmt="jpg", base64=True, **kwargs)
+ )
+ if "svg" in formats:
+ svg_formatter.for_type(Figure, partial(print_figure, fmt="svg", **kwargs))
+ if "pdf" in formats:
+ pdf_formatter.for_type(
+ Figure, partial(print_figure, fmt="pdf", base64=True, **kwargs)
+ )
+
+#-----------------------------------------------------------------------------
+# Code for initializing matplotlib and importing pylab
+#-----------------------------------------------------------------------------
+
+
+def find_gui_and_backend(gui=None, gui_select=None):
+ """Given a gui string return the gui and mpl backend.
+
+ Parameters
+ ----------
+ gui : str
+ Can be one of ('tk','gtk','wx','qt','qt4','inline','agg').
+ gui_select : str
+ Can be one of ('tk','gtk','wx','qt','qt4','inline').
+ This is any gui already selected by the shell.
+
+ Returns
+ -------
+ A tuple of (gui, backend) where backend is one of ('TkAgg','GTKAgg',
+ 'WXAgg','Qt4Agg','module://matplotlib_inline.backend_inline','agg').
+ """
+
+ import matplotlib
+
+ if gui and gui != 'auto':
+ # select backend based on requested gui
+ backend = backends[gui]
+ if gui == 'agg':
+ gui = None
+ else:
+ # We need to read the backend from the original data structure, *not*
+ # from mpl.rcParams, since a prior invocation of %matplotlib may have
+ # overwritten that.
+ # WARNING: this assumes matplotlib 1.1 or newer!!
+ backend = matplotlib.rcParamsOrig['backend']
+ # In this case, we need to find what the appropriate gui selection call
+ # should be for IPython, so we can activate inputhook accordingly
+ gui = backend2gui.get(backend, None)
+
+ # If we have already had a gui active, we need it and inline are the
+ # ones allowed.
+ if gui_select and gui != gui_select:
+ gui = gui_select
+ backend = backends[gui]
+
+ return gui, backend
+
+
+def activate_matplotlib(backend):
+ """Activate the given backend and set interactive to True."""
+
+ import matplotlib
+ matplotlib.interactive(True)
+
+ # Matplotlib had a bug where even switch_backend could not force
+ # the rcParam to update. This needs to be set *before* the module
+ # magic of switch_backend().
+ matplotlib.rcParams['backend'] = backend
+
+ # Due to circular imports, pyplot may be only partially initialised
+ # when this function runs.
+ # So avoid needing matplotlib attribute-lookup to access pyplot.
+ from matplotlib import pyplot as plt
+
+ plt.switch_backend(backend)
+
+ plt.show._needmain = False
+ # We need to detect at runtime whether show() is called by the user.
+ # For this, we wrap it into a decorator which adds a 'called' flag.
+ plt.draw_if_interactive = flag_calls(plt.draw_if_interactive)
+
+
+def import_pylab(user_ns, import_all=True):
+ """Populate the namespace with pylab-related values.
+
+ Imports matplotlib, pylab, numpy, and everything from pylab and numpy.
+
+ Also imports a few names from IPython (figsize, display, getfigs)
+
+ """
+
+ # Import numpy as np/pyplot as plt are conventions we're trying to
+ # somewhat standardize on. Making them available to users by default
+ # will greatly help this.
+ s = ("import numpy\n"
+ "import matplotlib\n"
+ "from matplotlib import pylab, mlab, pyplot\n"
+ "np = numpy\n"
+ "plt = pyplot\n"
+ )
+ exec(s, user_ns)
+
+ if import_all:
+ s = ("from matplotlib.pylab import *\n"
+ "from numpy import *\n")
+ exec(s, user_ns)
+
+ # IPython symbols to add
+ user_ns['figsize'] = figsize
+ from IPython.display import display
+ # Add display and getfigs to the user's namespace
+ user_ns['display'] = display
+ user_ns['getfigs'] = getfigs
+
+
+def configure_inline_support(shell, backend):
+ """
+ .. deprecated:: 7.23
+
+ use `matplotlib_inline.backend_inline.configure_inline_support()`
+
+ Configure an IPython shell object for matplotlib use.
+
+ Parameters
+ ----------
+ shell : InteractiveShell instance
+ backend : matplotlib backend
+ """
+ warnings.warn(
+ "`configure_inline_support` is deprecated since IPython 7.23, directly "
+ "use `matplotlib_inline.backend_inline.configure_inline_support()`",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+
+ from matplotlib_inline.backend_inline import (
+ configure_inline_support as configure_inline_support_orig,
+ )
+
+ configure_inline_support_orig(shell, backend)
diff --git a/contrib/python/ipython/py3/IPython/core/release.py b/contrib/python/ipython/py3/IPython/core/release.py
new file mode 100644
index 0000000000..50080642ee
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/release.py
@@ -0,0 +1,54 @@
+# -*- coding: utf-8 -*-
+"""Release data for the IPython project."""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2008, IPython Development Team.
+# Copyright (c) 2001, Fernando Perez <fernando.perez@colorado.edu>
+# Copyright (c) 2001, Janko Hauser <jhauser@zscout.de>
+# Copyright (c) 2001, Nathaniel Gray <n8gray@caltech.edu>
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+# IPython version information. An empty _version_extra corresponds to a full
+# release. 'dev' as a _version_extra string means this is a development
+# version
+_version_major = 8
+_version_minor = 14
+_version_patch = 0
+_version_extra = ".dev"
+# _version_extra = "rc1"
+_version_extra = "" # Uncomment this for full releases
+
+# Construct full version string from these.
+_ver = [_version_major, _version_minor, _version_patch]
+
+__version__ = '.'.join(map(str, _ver))
+if _version_extra:
+ __version__ = __version__ + _version_extra
+
+version = __version__ # backwards compatibility name
+version_info = (_version_major, _version_minor, _version_patch, _version_extra)
+
+# Change this when incrementing the kernel protocol version
+kernel_protocol_version_info = (5, 0)
+kernel_protocol_version = "%i.%i" % kernel_protocol_version_info
+
+license = "BSD-3-Clause"
+
+authors = {'Fernando' : ('Fernando Perez','fperez.net@gmail.com'),
+ 'Janko' : ('Janko Hauser','jhauser@zscout.de'),
+ 'Nathan' : ('Nathaniel Gray','n8gray@caltech.edu'),
+ 'Ville' : ('Ville Vainio','vivainio@gmail.com'),
+ 'Brian' : ('Brian E Granger', 'ellisonbg@gmail.com'),
+ 'Min' : ('Min Ragan-Kelley', 'benjaminrk@gmail.com'),
+ 'Thomas' : ('Thomas A. Kluyver', 'takowl@gmail.com'),
+ 'Jorgen' : ('Jorgen Stenarson', 'jorgen.stenarson@bostream.nu'),
+ 'Matthias' : ('Matthias Bussonnier', 'bussonniermatthias@gmail.com'),
+ }
+
+author = 'The IPython Development Team'
+
+author_email = 'ipython-dev@python.org'
diff --git a/contrib/python/ipython/py3/IPython/core/shellapp.py b/contrib/python/ipython/py3/IPython/core/shellapp.py
new file mode 100644
index 0000000000..29325a0ad2
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/shellapp.py
@@ -0,0 +1,451 @@
+# encoding: utf-8
+"""
+A mixin for :class:`~IPython.core.application.Application` classes that
+launch InteractiveShell instances, load extensions, etc.
+"""
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+import glob
+from itertools import chain
+import os
+import sys
+
+from traitlets.config.application import boolean_flag
+from traitlets.config.configurable import Configurable
+from traitlets.config.loader import Config
+from IPython.core.application import SYSTEM_CONFIG_DIRS, ENV_CONFIG_DIRS
+from IPython.core import pylabtools
+from IPython.utils.contexts import preserve_keys
+from IPython.utils.path import filefind
+from traitlets import (
+ Unicode, Instance, List, Bool, CaselessStrEnum, observe,
+ DottedObjectName,
+)
+from IPython.terminal import pt_inputhooks
+
+#-----------------------------------------------------------------------------
+# Aliases and Flags
+#-----------------------------------------------------------------------------
+
+gui_keys = tuple(sorted(pt_inputhooks.backends) + sorted(pt_inputhooks.aliases))
+
+backend_keys = sorted(pylabtools.backends.keys())
+backend_keys.insert(0, 'auto')
+
+shell_flags = {}
+
+addflag = lambda *args: shell_flags.update(boolean_flag(*args))
+addflag('autoindent', 'InteractiveShell.autoindent',
+ 'Turn on autoindenting.', 'Turn off autoindenting.'
+)
+addflag('automagic', 'InteractiveShell.automagic',
+ """Turn on the auto calling of magic commands. Type %%magic at the
+ IPython prompt for more information.""",
+ 'Turn off the auto calling of magic commands.'
+)
+addflag('pdb', 'InteractiveShell.pdb',
+ "Enable auto calling the pdb debugger after every exception.",
+ "Disable auto calling the pdb debugger after every exception."
+)
+addflag('pprint', 'PlainTextFormatter.pprint',
+ "Enable auto pretty printing of results.",
+ "Disable auto pretty printing of results."
+)
+addflag('color-info', 'InteractiveShell.color_info',
+ """IPython can display information about objects via a set of functions,
+ and optionally can use colors for this, syntax highlighting
+ source code and various other elements. This is on by default, but can cause
+ problems with some pagers. If you see such problems, you can disable the
+ colours.""",
+ "Disable using colors for info related things."
+)
+addflag('ignore-cwd', 'InteractiveShellApp.ignore_cwd',
+ "Exclude the current working directory from sys.path",
+ "Include the current working directory in sys.path",
+)
+nosep_config = Config()
+nosep_config.InteractiveShell.separate_in = ''
+nosep_config.InteractiveShell.separate_out = ''
+nosep_config.InteractiveShell.separate_out2 = ''
+
+shell_flags['nosep']=(nosep_config, "Eliminate all spacing between prompts.")
+shell_flags['pylab'] = (
+ {'InteractiveShellApp' : {'pylab' : 'auto'}},
+ """Pre-load matplotlib and numpy for interactive use with
+ the default matplotlib backend."""
+)
+shell_flags['matplotlib'] = (
+ {'InteractiveShellApp' : {'matplotlib' : 'auto'}},
+ """Configure matplotlib for interactive use with
+ the default matplotlib backend."""
+)
+
+# it's possible we don't want short aliases for *all* of these:
+shell_aliases = dict(
+ autocall='InteractiveShell.autocall',
+ colors='InteractiveShell.colors',
+ logfile='InteractiveShell.logfile',
+ logappend='InteractiveShell.logappend',
+ c='InteractiveShellApp.code_to_run',
+ m='InteractiveShellApp.module_to_run',
+ ext="InteractiveShellApp.extra_extensions",
+ gui='InteractiveShellApp.gui',
+ pylab='InteractiveShellApp.pylab',
+ matplotlib='InteractiveShellApp.matplotlib',
+)
+shell_aliases['cache-size'] = 'InteractiveShell.cache_size'
+
+#-----------------------------------------------------------------------------
+# Main classes and functions
+#-----------------------------------------------------------------------------
+
+class InteractiveShellApp(Configurable):
+ """A Mixin for applications that start InteractiveShell instances.
+
+ Provides configurables for loading extensions and executing files
+ as part of configuring a Shell environment.
+
+ The following methods should be called by the :meth:`initialize` method
+ of the subclass:
+
+ - :meth:`init_path`
+ - :meth:`init_shell` (to be implemented by the subclass)
+ - :meth:`init_gui_pylab`
+ - :meth:`init_extensions`
+ - :meth:`init_code`
+ """
+ extensions = List(Unicode(),
+ help="A list of dotted module names of IPython extensions to load."
+ ).tag(config=True)
+
+ extra_extensions = List(
+ DottedObjectName(),
+ help="""
+ Dotted module name(s) of one or more IPython extensions to load.
+
+ For specifying extra extensions to load on the command-line.
+
+ .. versionadded:: 7.10
+ """,
+ ).tag(config=True)
+
+ reraise_ipython_extension_failures = Bool(False,
+ help="Reraise exceptions encountered loading IPython extensions?",
+ ).tag(config=True)
+
+ # Extensions that are always loaded (not configurable)
+ default_extensions = List(Unicode(), [u'storemagic']).tag(config=False)
+
+ hide_initial_ns = Bool(True,
+ help="""Should variables loaded at startup (by startup files, exec_lines, etc.)
+ be hidden from tools like %who?"""
+ ).tag(config=True)
+
+ exec_files = List(Unicode(),
+ help="""List of files to run at IPython startup."""
+ ).tag(config=True)
+ exec_PYTHONSTARTUP = Bool(True,
+ help="""Run the file referenced by the PYTHONSTARTUP environment
+ variable at IPython startup."""
+ ).tag(config=True)
+ file_to_run = Unicode('',
+ help="""A file to be run""").tag(config=True)
+
+ exec_lines = List(Unicode(),
+ help="""lines of code to run at IPython startup."""
+ ).tag(config=True)
+ code_to_run = Unicode('',
+ help="Execute the given command string."
+ ).tag(config=True)
+ module_to_run = Unicode('',
+ help="Run the module as a script."
+ ).tag(config=True)
+ gui = CaselessStrEnum(gui_keys, allow_none=True,
+ help="Enable GUI event loop integration with any of {0}.".format(gui_keys)
+ ).tag(config=True)
+ matplotlib = CaselessStrEnum(backend_keys, allow_none=True,
+ help="""Configure matplotlib for interactive use with
+ the default matplotlib backend."""
+ ).tag(config=True)
+ pylab = CaselessStrEnum(backend_keys, allow_none=True,
+ help="""Pre-load matplotlib and numpy for interactive use,
+ selecting a particular matplotlib backend and loop integration.
+ """
+ ).tag(config=True)
+ pylab_import_all = Bool(True,
+ help="""If true, IPython will populate the user namespace with numpy, pylab, etc.
+ and an ``import *`` is done from numpy and pylab, when using pylab mode.
+
+ When False, pylab mode should not import any names into the user namespace.
+ """
+ ).tag(config=True)
+ ignore_cwd = Bool(
+ False,
+ help="""If True, IPython will not add the current working directory to sys.path.
+ When False, the current working directory is added to sys.path, allowing imports
+ of modules defined in the current directory."""
+ ).tag(config=True)
+ shell = Instance('IPython.core.interactiveshell.InteractiveShellABC',
+ allow_none=True)
+ # whether interact-loop should start
+ interact = Bool(True)
+
+ user_ns = Instance(dict, args=None, allow_none=True)
+ @observe('user_ns')
+ def _user_ns_changed(self, change):
+ if self.shell is not None:
+ self.shell.user_ns = change['new']
+ self.shell.init_user_ns()
+
+ def init_path(self):
+ """Add current working directory, '', to sys.path
+
+ Unlike Python's default, we insert before the first `site-packages`
+ or `dist-packages` directory,
+ so that it is after the standard library.
+
+ .. versionchanged:: 7.2
+ Try to insert after the standard library, instead of first.
+ .. versionchanged:: 8.0
+ Allow optionally not including the current directory in sys.path
+ """
+ if '' in sys.path or self.ignore_cwd:
+ return
+ for idx, path in enumerate(sys.path):
+ parent, last_part = os.path.split(path)
+ if last_part in {'site-packages', 'dist-packages'}:
+ break
+ else:
+ # no site-packages or dist-packages found (?!)
+ # back to original behavior of inserting at the front
+ idx = 0
+ sys.path.insert(idx, '')
+
+ def init_shell(self):
+ raise NotImplementedError("Override in subclasses")
+
+ def init_gui_pylab(self):
+ """Enable GUI event loop integration, taking pylab into account."""
+ enable = False
+ shell = self.shell
+ if self.pylab:
+ enable = lambda key: shell.enable_pylab(key, import_all=self.pylab_import_all)
+ key = self.pylab
+ elif self.matplotlib:
+ enable = shell.enable_matplotlib
+ key = self.matplotlib
+ elif self.gui:
+ enable = shell.enable_gui
+ key = self.gui
+
+ if not enable:
+ return
+
+ try:
+ r = enable(key)
+ except ImportError:
+ self.log.warning("Eventloop or matplotlib integration failed. Is matplotlib installed?")
+ self.shell.showtraceback()
+ return
+ except Exception:
+ self.log.warning("GUI event loop or pylab initialization failed")
+ self.shell.showtraceback()
+ return
+
+ if isinstance(r, tuple):
+ gui, backend = r[:2]
+ self.log.info("Enabling GUI event loop integration, "
+ "eventloop=%s, matplotlib=%s", gui, backend)
+ if key == "auto":
+ print("Using matplotlib backend: %s" % backend)
+ else:
+ gui = r
+ self.log.info("Enabling GUI event loop integration, "
+ "eventloop=%s", gui)
+
+ def init_extensions(self):
+ """Load all IPython extensions in IPythonApp.extensions.
+
+ This uses the :meth:`ExtensionManager.load_extensions` to load all
+ the extensions listed in ``self.extensions``.
+ """
+ try:
+ self.log.debug("Loading IPython extensions...")
+ extensions = (
+ self.default_extensions + self.extensions + self.extra_extensions
+ )
+ for ext in extensions:
+ try:
+ self.log.info("Loading IPython extension: %s", ext)
+ self.shell.extension_manager.load_extension(ext)
+ except:
+ if self.reraise_ipython_extension_failures:
+ raise
+ msg = ("Error in loading extension: {ext}\n"
+ "Check your config files in {location}".format(
+ ext=ext,
+ location=self.profile_dir.location
+ ))
+ self.log.warning(msg, exc_info=True)
+ except:
+ if self.reraise_ipython_extension_failures:
+ raise
+ self.log.warning("Unknown error in loading extensions:", exc_info=True)
+
+ def init_code(self):
+ """run the pre-flight code, specified via exec_lines"""
+ self._run_startup_files()
+ self._run_exec_lines()
+ self._run_exec_files()
+
+ # Hide variables defined here from %who etc.
+ if self.hide_initial_ns:
+ self.shell.user_ns_hidden.update(self.shell.user_ns)
+
+ # command-line execution (ipython -i script.py, ipython -m module)
+ # should *not* be excluded from %whos
+ self._run_cmd_line_code()
+ self._run_module()
+
+ # flush output, so itwon't be attached to the first cell
+ sys.stdout.flush()
+ sys.stderr.flush()
+ self.shell._sys_modules_keys = set(sys.modules.keys())
+
+ def _run_exec_lines(self):
+ """Run lines of code in IPythonApp.exec_lines in the user's namespace."""
+ if not self.exec_lines:
+ return
+ try:
+ self.log.debug("Running code from IPythonApp.exec_lines...")
+ for line in self.exec_lines:
+ try:
+ self.log.info("Running code in user namespace: %s" %
+ line)
+ self.shell.run_cell(line, store_history=False)
+ except:
+ self.log.warning("Error in executing line in user "
+ "namespace: %s" % line)
+ self.shell.showtraceback()
+ except:
+ self.log.warning("Unknown error in handling IPythonApp.exec_lines:")
+ self.shell.showtraceback()
+
+ def _exec_file(self, fname, shell_futures=False):
+ try:
+ full_filename = filefind(fname, [u'.', self.ipython_dir])
+ except IOError:
+ self.log.warning("File not found: %r"%fname)
+ return
+ # Make sure that the running script gets a proper sys.argv as if it
+ # were run from a system shell.
+ save_argv = sys.argv
+ sys.argv = [full_filename] + self.extra_args[1:]
+ try:
+ if os.path.isfile(full_filename):
+ self.log.info("Running file in user namespace: %s" %
+ full_filename)
+ # Ensure that __file__ is always defined to match Python
+ # behavior.
+ with preserve_keys(self.shell.user_ns, '__file__'):
+ self.shell.user_ns['__file__'] = fname
+ if full_filename.endswith('.ipy') or full_filename.endswith('.ipynb'):
+ self.shell.safe_execfile_ipy(full_filename,
+ shell_futures=shell_futures)
+ else:
+ # default to python, even without extension
+ self.shell.safe_execfile(full_filename,
+ self.shell.user_ns,
+ shell_futures=shell_futures,
+ raise_exceptions=True)
+ finally:
+ sys.argv = save_argv
+
+ def _run_startup_files(self):
+ """Run files from profile startup directory"""
+ startup_dirs = [self.profile_dir.startup_dir] + [
+ os.path.join(p, 'startup') for p in chain(ENV_CONFIG_DIRS, SYSTEM_CONFIG_DIRS)
+ ]
+ startup_files = []
+
+ if self.exec_PYTHONSTARTUP and os.environ.get('PYTHONSTARTUP', False) and \
+ not (self.file_to_run or self.code_to_run or self.module_to_run):
+ python_startup = os.environ['PYTHONSTARTUP']
+ self.log.debug("Running PYTHONSTARTUP file %s...", python_startup)
+ try:
+ self._exec_file(python_startup)
+ except:
+ self.log.warning("Unknown error in handling PYTHONSTARTUP file %s:", python_startup)
+ self.shell.showtraceback()
+ for startup_dir in startup_dirs[::-1]:
+ startup_files += glob.glob(os.path.join(startup_dir, '*.py'))
+ startup_files += glob.glob(os.path.join(startup_dir, '*.ipy'))
+ if not startup_files:
+ return
+
+ self.log.debug("Running startup files from %s...", startup_dir)
+ try:
+ for fname in sorted(startup_files):
+ self._exec_file(fname)
+ except:
+ self.log.warning("Unknown error in handling startup files:")
+ self.shell.showtraceback()
+
+ def _run_exec_files(self):
+ """Run files from IPythonApp.exec_files"""
+ if not self.exec_files:
+ return
+
+ self.log.debug("Running files in IPythonApp.exec_files...")
+ try:
+ for fname in self.exec_files:
+ self._exec_file(fname)
+ except:
+ self.log.warning("Unknown error in handling IPythonApp.exec_files:")
+ self.shell.showtraceback()
+
+ def _run_cmd_line_code(self):
+ """Run code or file specified at the command-line"""
+ if self.code_to_run:
+ line = self.code_to_run
+ try:
+ self.log.info("Running code given at command line (c=): %s" %
+ line)
+ self.shell.run_cell(line, store_history=False)
+ except:
+ self.log.warning("Error in executing line in user namespace: %s" %
+ line)
+ self.shell.showtraceback()
+ if not self.interact:
+ self.exit(1)
+
+ # Like Python itself, ignore the second if the first of these is present
+ elif self.file_to_run:
+ fname = self.file_to_run
+ if os.path.isdir(fname):
+ fname = os.path.join(fname, "__main__.py")
+ if not os.path.exists(fname):
+ self.log.warning("File '%s' doesn't exist", fname)
+ if not self.interact:
+ self.exit(2)
+ try:
+ self._exec_file(fname, shell_futures=True)
+ except:
+ self.shell.showtraceback(tb_offset=4)
+ if not self.interact:
+ self.exit(1)
+
+ def _run_module(self):
+ """Run module specified at the command-line."""
+ if self.module_to_run:
+ # Make sure that the module gets a proper sys.argv as if it were
+ # run using `python -m`.
+ save_argv = sys.argv
+ sys.argv = [sys.executable] + self.extra_args
+ try:
+ self.shell.safe_run_module(self.module_to_run,
+ self.shell.user_ns)
+ finally:
+ sys.argv = save_argv
diff --git a/contrib/python/ipython/py3/IPython/core/splitinput.py b/contrib/python/ipython/py3/IPython/core/splitinput.py
new file mode 100644
index 0000000000..5bc3e32542
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/splitinput.py
@@ -0,0 +1,138 @@
+# encoding: utf-8
+"""
+Simple utility for splitting user input. This is used by both inputsplitter and
+prefilter.
+
+Authors:
+
+* Brian Granger
+* Fernando Perez
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (C) 2008-2011 The IPython Development Team
+#
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+import re
+import sys
+
+from IPython.utils import py3compat
+from IPython.utils.encoding import get_stream_enc
+from IPython.core.oinspect import OInfo
+
+#-----------------------------------------------------------------------------
+# Main function
+#-----------------------------------------------------------------------------
+
+# RegExp for splitting line contents into pre-char//first word-method//rest.
+# For clarity, each group in on one line.
+
+# WARNING: update the regexp if the escapes in interactiveshell are changed, as
+# they are hardwired in.
+
+# Although it's not solely driven by the regex, note that:
+# ,;/% only trigger if they are the first character on the line
+# ! and !! trigger if they are first char(s) *or* follow an indent
+# ? triggers as first or last char.
+
+line_split = re.compile(r"""
+ ^(\s*) # any leading space
+ ([,;/%]|!!?|\?\??)? # escape character or characters
+ \s*(%{0,2}[\w\.\*]*) # function/method, possibly with leading %
+ # to correctly treat things like '?%magic'
+ (.*?$|$) # rest of line
+ """, re.VERBOSE)
+
+
+def split_user_input(line, pattern=None):
+ """Split user input into initial whitespace, escape character, function part
+ and the rest.
+ """
+ # We need to ensure that the rest of this routine deals only with unicode
+ encoding = get_stream_enc(sys.stdin, 'utf-8')
+ line = py3compat.cast_unicode(line, encoding)
+
+ if pattern is None:
+ pattern = line_split
+ match = pattern.match(line)
+ if not match:
+ # print "match failed for line '%s'" % line
+ try:
+ ifun, the_rest = line.split(None,1)
+ except ValueError:
+ # print "split failed for line '%s'" % line
+ ifun, the_rest = line, u''
+ pre = re.match(r'^(\s*)(.*)',line).groups()[0]
+ esc = ""
+ else:
+ pre, esc, ifun, the_rest = match.groups()
+
+ #print 'line:<%s>' % line # dbg
+ #print 'pre <%s> ifun <%s> rest <%s>' % (pre,ifun.strip(),the_rest) # dbg
+ return pre, esc or '', ifun.strip(), the_rest.lstrip()
+
+
+class LineInfo(object):
+ """A single line of input and associated info.
+
+ Includes the following as properties:
+
+ line
+ The original, raw line
+
+ continue_prompt
+ Is this line a continuation in a sequence of multiline input?
+
+ pre
+ Any leading whitespace.
+
+ esc
+ The escape character(s) in pre or the empty string if there isn't one.
+ Note that '!!' and '??' are possible values for esc. Otherwise it will
+ always be a single character.
+
+ ifun
+ The 'function part', which is basically the maximal initial sequence
+ of valid python identifiers and the '.' character. This is what is
+ checked for alias and magic transformations, used for auto-calling,
+ etc. In contrast to Python identifiers, it may start with "%" and contain
+ "*".
+
+ the_rest
+ Everything else on the line.
+ """
+ def __init__(self, line, continue_prompt=False):
+ self.line = line
+ self.continue_prompt = continue_prompt
+ self.pre, self.esc, self.ifun, self.the_rest = split_user_input(line)
+
+ self.pre_char = self.pre.strip()
+ if self.pre_char:
+ self.pre_whitespace = '' # No whitespace allowed before esc chars
+ else:
+ self.pre_whitespace = self.pre
+
+ def ofind(self, ip) -> OInfo:
+ """Do a full, attribute-walking lookup of the ifun in the various
+ namespaces for the given IPython InteractiveShell instance.
+
+ Return a dict with keys: {found, obj, ospace, ismagic}
+
+ Note: can cause state changes because of calling getattr, but should
+ only be run if autocall is on and if the line hasn't matched any
+ other, less dangerous handlers.
+
+ Does cache the results of the call, so can be called multiple times
+ without worrying about *further* damaging state.
+ """
+ return ip._ofind(self.ifun)
+
+ def __str__(self):
+ return "LineInfo [%s|%s|%s|%s]" %(self.pre, self.esc, self.ifun, self.the_rest)
diff --git a/contrib/python/ipython/py3/IPython/core/ultratb.py b/contrib/python/ipython/py3/IPython/core/ultratb.py
new file mode 100644
index 0000000000..61b5939398
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/ultratb.py
@@ -0,0 +1,1518 @@
+# -*- coding: utf-8 -*-
+"""
+Verbose and colourful traceback formatting.
+
+**ColorTB**
+
+I've always found it a bit hard to visually parse tracebacks in Python. The
+ColorTB class is a solution to that problem. It colors the different parts of a
+traceback in a manner similar to what you would expect from a syntax-highlighting
+text editor.
+
+Installation instructions for ColorTB::
+
+ import sys,ultratb
+ sys.excepthook = ultratb.ColorTB()
+
+**VerboseTB**
+
+I've also included a port of Ka-Ping Yee's "cgitb.py" that produces all kinds
+of useful info when a traceback occurs. Ping originally had it spit out HTML
+and intended it for CGI programmers, but why should they have all the fun? I
+altered it to spit out colored text to the terminal. It's a bit overwhelming,
+but kind of neat, and maybe useful for long-running programs that you believe
+are bug-free. If a crash *does* occur in that type of program you want details.
+Give it a shot--you'll love it or you'll hate it.
+
+.. note::
+
+ The Verbose mode prints the variables currently visible where the exception
+ happened (shortening their strings if too long). This can potentially be
+ very slow, if you happen to have a huge data structure whose string
+ representation is complex to compute. Your computer may appear to freeze for
+ a while with cpu usage at 100%. If this occurs, you can cancel the traceback
+ with Ctrl-C (maybe hitting it more than once).
+
+ If you encounter this kind of situation often, you may want to use the
+ Verbose_novars mode instead of the regular Verbose, which avoids formatting
+ variables (but otherwise includes the information and context given by
+ Verbose).
+
+.. note::
+
+ The verbose mode print all variables in the stack, which means it can
+ potentially leak sensitive information like access keys, or unencrypted
+ password.
+
+Installation instructions for VerboseTB::
+
+ import sys,ultratb
+ sys.excepthook = ultratb.VerboseTB()
+
+Note: Much of the code in this module was lifted verbatim from the standard
+library module 'traceback.py' and Ka-Ping Yee's 'cgitb.py'.
+
+Color schemes
+-------------
+
+The colors are defined in the class TBTools through the use of the
+ColorSchemeTable class. Currently the following exist:
+
+ - NoColor: allows all of this module to be used in any terminal (the color
+ escapes are just dummy blank strings).
+
+ - Linux: is meant to look good in a terminal like the Linux console (black
+ or very dark background).
+
+ - LightBG: similar to Linux but swaps dark/light colors to be more readable
+ in light background terminals.
+
+ - Neutral: a neutral color scheme that should be readable on both light and
+ dark background
+
+You can implement other color schemes easily, the syntax is fairly
+self-explanatory. Please send back new schemes you develop to the author for
+possible inclusion in future releases.
+
+Inheritance diagram:
+
+.. inheritance-diagram:: IPython.core.ultratb
+ :parts: 3
+"""
+
+#*****************************************************************************
+# Copyright (C) 2001 Nathaniel Gray <n8gray@caltech.edu>
+# Copyright (C) 2001-2004 Fernando Perez <fperez@colorado.edu>
+#
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#*****************************************************************************
+
+
+from collections.abc import Sequence
+import functools
+import inspect
+import linecache
+import pydoc
+import sys
+import time
+import traceback
+import types
+from types import TracebackType
+from typing import Any, List, Optional, Tuple
+
+import stack_data
+from pygments.formatters.terminal256 import Terminal256Formatter
+from pygments.styles import get_style_by_name
+
+import IPython.utils.colorable as colorable
+# IPython's own modules
+from IPython import get_ipython
+from IPython.core import debugger
+from IPython.core.display_trap import DisplayTrap
+from IPython.core.excolors import exception_colors
+from IPython.utils import PyColorize
+from IPython.utils import path as util_path
+from IPython.utils import py3compat
+from IPython.utils.terminal import get_terminal_size
+
+# Globals
+# amount of space to put line numbers before verbose tracebacks
+INDENT_SIZE = 8
+
+# Default color scheme. This is used, for example, by the traceback
+# formatter. When running in an actual IPython instance, the user's rc.colors
+# value is used, but having a module global makes this functionality available
+# to users of ultratb who are NOT running inside ipython.
+DEFAULT_SCHEME = 'NoColor'
+FAST_THRESHOLD = 10_000
+
+# ---------------------------------------------------------------------------
+# Code begins
+
+# Helper function -- largely belongs to VerboseTB, but we need the same
+# functionality to produce a pseudo verbose TB for SyntaxErrors, so that they
+# can be recognized properly by ipython.el's py-traceback-line-re
+# (SyntaxErrors have to be treated specially because they have no traceback)
+
+
+@functools.lru_cache()
+def count_lines_in_py_file(filename: str) -> int:
+ """
+ Given a filename, returns the number of lines in the file
+ if it ends with the extension ".py". Otherwise, returns 0.
+ """
+ if not filename.endswith(".py"):
+ return 0
+ else:
+ try:
+ with open(filename, "r") as file:
+ s = sum(1 for line in file)
+ except UnicodeError:
+ return 0
+ return s
+
+ """
+ Given a frame object, returns the total number of lines in the file
+ if the filename ends with the extension ".py". Otherwise, returns 0.
+ """
+
+
+def get_line_number_of_frame(frame: types.FrameType) -> int:
+ """
+ Given a frame object, returns the total number of lines in the file
+ containing the frame's code object, or the number of lines in the
+ frame's source code if the file is not available.
+
+ Parameters
+ ----------
+ frame : FrameType
+ The frame object whose line number is to be determined.
+
+ Returns
+ -------
+ int
+ The total number of lines in the file containing the frame's
+ code object, or the number of lines in the frame's source code
+ if the file is not available.
+ """
+ filename = frame.f_code.co_filename
+ if filename is None:
+ print("No file....")
+ lines, first = inspect.getsourcelines(frame)
+ return first + len(lines)
+ return count_lines_in_py_file(filename)
+
+
+def _safe_string(value, what, func=str):
+ # Copied from cpython/Lib/traceback.py
+ try:
+ return func(value)
+ except:
+ return f"<{what} {func.__name__}() failed>"
+
+
+def _format_traceback_lines(lines, Colors, has_colors: bool, lvals):
+ """
+ Format tracebacks lines with pointing arrow, leading numbers...
+
+ Parameters
+ ----------
+ lines : list[Line]
+ Colors
+ ColorScheme used.
+ lvals : str
+ Values of local variables, already colored, to inject just after the error line.
+ """
+ numbers_width = INDENT_SIZE - 1
+ res = []
+
+ for stack_line in lines:
+ if stack_line is stack_data.LINE_GAP:
+ res.append('%s (...)%s\n' % (Colors.linenoEm, Colors.Normal))
+ continue
+
+ line = stack_line.render(pygmented=has_colors).rstrip('\n') + '\n'
+ lineno = stack_line.lineno
+ if stack_line.is_current:
+ # This is the line with the error
+ pad = numbers_width - len(str(lineno))
+ num = '%s%s' % (debugger.make_arrow(pad), str(lineno))
+ start_color = Colors.linenoEm
+ else:
+ num = '%*s' % (numbers_width, lineno)
+ start_color = Colors.lineno
+
+ line = '%s%s%s %s' % (start_color, num, Colors.Normal, line)
+
+ res.append(line)
+ if lvals and stack_line.is_current:
+ res.append(lvals + '\n')
+ return res
+
+def _simple_format_traceback_lines(lnum, index, lines, Colors, lvals, _line_format):
+ """
+ Format tracebacks lines with pointing arrow, leading numbers...
+
+ Parameters
+ ==========
+
+ lnum: int
+ number of the target line of code.
+ index: int
+ which line in the list should be highlighted.
+ lines: list[string]
+ Colors:
+ ColorScheme used.
+ lvals: bytes
+ Values of local variables, already colored, to inject just after the error line.
+ _line_format: f (str) -> (str, bool)
+ return (colorized version of str, failure to do so)
+ """
+ numbers_width = INDENT_SIZE - 1
+ res = []
+ for i, line in enumerate(lines, lnum - index):
+ # assert isinstance(line, str)
+ line = py3compat.cast_unicode(line)
+
+ new_line, err = _line_format(line, "str")
+ if not err:
+ line = new_line
+
+ if i == lnum:
+ # This is the line with the error
+ pad = numbers_width - len(str(i))
+ num = "%s%s" % (debugger.make_arrow(pad), str(lnum))
+ line = "%s%s%s %s%s" % (
+ Colors.linenoEm,
+ num,
+ Colors.line,
+ line,
+ Colors.Normal,
+ )
+ else:
+ num = "%*s" % (numbers_width, i)
+ line = "%s%s%s %s" % (Colors.lineno, num, Colors.Normal, line)
+
+ res.append(line)
+ if lvals and i == lnum:
+ res.append(lvals + "\n")
+ return res
+
+
+def _format_filename(file, ColorFilename, ColorNormal, *, lineno=None):
+ """
+ Format filename lines with custom formatting from caching compiler or `File *.py` by default
+
+ Parameters
+ ----------
+ file : str
+ ColorFilename
+ ColorScheme's filename coloring to be used.
+ ColorNormal
+ ColorScheme's normal coloring to be used.
+ """
+ ipinst = get_ipython()
+ if (
+ ipinst is not None
+ and (data := ipinst.compile.format_code_name(file)) is not None
+ ):
+ label, name = data
+ if lineno is None:
+ tpl_link = f"{{label}} {ColorFilename}{{name}}{ColorNormal}"
+ else:
+ tpl_link = (
+ f"{{label}} {ColorFilename}{{name}}, line {{lineno}}{ColorNormal}"
+ )
+ else:
+ label = "File"
+ name = util_path.compress_user(
+ py3compat.cast_unicode(file, util_path.fs_encoding)
+ )
+ if lineno is None:
+ tpl_link = f"{{label}} {ColorFilename}{{name}}{ColorNormal}"
+ else:
+ # can we make this the more friendly ", line {{lineno}}", or do we need to preserve the formatting with the colon?
+ tpl_link = f"{{label}} {ColorFilename}{{name}}:{{lineno}}{ColorNormal}"
+
+ return tpl_link.format(label=label, name=name, lineno=lineno)
+
+#---------------------------------------------------------------------------
+# Module classes
+class TBTools(colorable.Colorable):
+ """Basic tools used by all traceback printer classes."""
+
+ # Number of frames to skip when reporting tracebacks
+ tb_offset = 0
+
+ def __init__(
+ self,
+ color_scheme="NoColor",
+ call_pdb=False,
+ ostream=None,
+ parent=None,
+ config=None,
+ *,
+ debugger_cls=None,
+ ):
+ # Whether to call the interactive pdb debugger after printing
+ # tracebacks or not
+ super(TBTools, self).__init__(parent=parent, config=config)
+ self.call_pdb = call_pdb
+
+ # Output stream to write to. Note that we store the original value in
+ # a private attribute and then make the public ostream a property, so
+ # that we can delay accessing sys.stdout until runtime. The way
+ # things are written now, the sys.stdout object is dynamically managed
+ # so a reference to it should NEVER be stored statically. This
+ # property approach confines this detail to a single location, and all
+ # subclasses can simply access self.ostream for writing.
+ self._ostream = ostream
+
+ # Create color table
+ self.color_scheme_table = exception_colors()
+
+ self.set_colors(color_scheme)
+ self.old_scheme = color_scheme # save initial value for toggles
+ self.debugger_cls = debugger_cls or debugger.Pdb
+
+ if call_pdb:
+ self.pdb = self.debugger_cls()
+ else:
+ self.pdb = None
+
+ def _get_ostream(self):
+ """Output stream that exceptions are written to.
+
+ Valid values are:
+
+ - None: the default, which means that IPython will dynamically resolve
+ to sys.stdout. This ensures compatibility with most tools, including
+ Windows (where plain stdout doesn't recognize ANSI escapes).
+
+ - Any object with 'write' and 'flush' attributes.
+ """
+ return sys.stdout if self._ostream is None else self._ostream
+
+ def _set_ostream(self, val):
+ assert val is None or (hasattr(val, 'write') and hasattr(val, 'flush'))
+ self._ostream = val
+
+ ostream = property(_get_ostream, _set_ostream)
+
+ @staticmethod
+ def _get_chained_exception(exception_value):
+ cause = getattr(exception_value, "__cause__", None)
+ if cause:
+ return cause
+ if getattr(exception_value, "__suppress_context__", False):
+ return None
+ return getattr(exception_value, "__context__", None)
+
+ def get_parts_of_chained_exception(
+ self, evalue
+ ) -> Optional[Tuple[type, BaseException, TracebackType]]:
+ chained_evalue = self._get_chained_exception(evalue)
+
+ if chained_evalue:
+ return chained_evalue.__class__, chained_evalue, chained_evalue.__traceback__
+ return None
+
+ def prepare_chained_exception_message(self, cause) -> List[Any]:
+ direct_cause = "\nThe above exception was the direct cause of the following exception:\n"
+ exception_during_handling = "\nDuring handling of the above exception, another exception occurred:\n"
+
+ if cause:
+ message = [[direct_cause]]
+ else:
+ message = [[exception_during_handling]]
+ return message
+
+ @property
+ def has_colors(self) -> bool:
+ return self.color_scheme_table.active_scheme_name.lower() != "nocolor"
+
+ def set_colors(self, *args, **kw):
+ """Shorthand access to the color table scheme selector method."""
+
+ # Set own color table
+ self.color_scheme_table.set_active_scheme(*args, **kw)
+ # for convenience, set Colors to the active scheme
+ self.Colors = self.color_scheme_table.active_colors
+ # Also set colors of debugger
+ if hasattr(self, 'pdb') and self.pdb is not None:
+ self.pdb.set_colors(*args, **kw)
+
+ def color_toggle(self):
+ """Toggle between the currently active color scheme and NoColor."""
+
+ if self.color_scheme_table.active_scheme_name == 'NoColor':
+ self.color_scheme_table.set_active_scheme(self.old_scheme)
+ self.Colors = self.color_scheme_table.active_colors
+ else:
+ self.old_scheme = self.color_scheme_table.active_scheme_name
+ self.color_scheme_table.set_active_scheme('NoColor')
+ self.Colors = self.color_scheme_table.active_colors
+
+ def stb2text(self, stb):
+ """Convert a structured traceback (a list) to a string."""
+ return '\n'.join(stb)
+
+ def text(self, etype, value, tb, tb_offset: Optional[int] = None, context=5):
+ """Return formatted traceback.
+
+ Subclasses may override this if they add extra arguments.
+ """
+ tb_list = self.structured_traceback(etype, value, tb,
+ tb_offset, context)
+ return self.stb2text(tb_list)
+
+ def structured_traceback(
+ self,
+ etype: type,
+ evalue: Optional[BaseException],
+ etb: Optional[TracebackType] = None,
+ tb_offset: Optional[int] = None,
+ number_of_lines_of_context: int = 5,
+ ):
+ """Return a list of traceback frames.
+
+ Must be implemented by each class.
+ """
+ raise NotImplementedError()
+
+
+#---------------------------------------------------------------------------
+class ListTB(TBTools):
+ """Print traceback information from a traceback list, with optional color.
+
+ Calling requires 3 arguments: (etype, evalue, elist)
+ as would be obtained by::
+
+ etype, evalue, tb = sys.exc_info()
+ if tb:
+ elist = traceback.extract_tb(tb)
+ else:
+ elist = None
+
+ It can thus be used by programs which need to process the traceback before
+ printing (such as console replacements based on the code module from the
+ standard library).
+
+ Because they are meant to be called without a full traceback (only a
+ list), instances of this class can't call the interactive pdb debugger."""
+
+
+ def __call__(self, etype, value, elist):
+ self.ostream.flush()
+ self.ostream.write(self.text(etype, value, elist))
+ self.ostream.write('\n')
+
+ def _extract_tb(self, tb):
+ if tb:
+ return traceback.extract_tb(tb)
+ else:
+ return None
+
+ def structured_traceback(
+ self,
+ etype: type,
+ evalue: Optional[BaseException],
+ etb: Optional[TracebackType] = None,
+ tb_offset: Optional[int] = None,
+ context=5,
+ ):
+ """Return a color formatted string with the traceback info.
+
+ Parameters
+ ----------
+ etype : exception type
+ Type of the exception raised.
+ evalue : object
+ Data stored in the exception
+ etb : list | TracebackType | None
+ If list: List of frames, see class docstring for details.
+ If Traceback: Traceback of the exception.
+ tb_offset : int, optional
+ Number of frames in the traceback to skip. If not given, the
+ instance evalue is used (set in constructor).
+ context : int, optional
+ Number of lines of context information to print.
+
+ Returns
+ -------
+ String with formatted exception.
+ """
+ # This is a workaround to get chained_exc_ids in recursive calls
+ # etb should not be a tuple if structured_traceback is not recursive
+ if isinstance(etb, tuple):
+ etb, chained_exc_ids = etb
+ else:
+ chained_exc_ids = set()
+
+ if isinstance(etb, list):
+ elist = etb
+ elif etb is not None:
+ elist = self._extract_tb(etb)
+ else:
+ elist = []
+ tb_offset = self.tb_offset if tb_offset is None else tb_offset
+ assert isinstance(tb_offset, int)
+ Colors = self.Colors
+ out_list = []
+ if elist:
+
+ if tb_offset and len(elist) > tb_offset:
+ elist = elist[tb_offset:]
+
+ out_list.append('Traceback %s(most recent call last)%s:' %
+ (Colors.normalEm, Colors.Normal) + '\n')
+ out_list.extend(self._format_list(elist))
+ # The exception info should be a single entry in the list.
+ lines = ''.join(self._format_exception_only(etype, evalue))
+ out_list.append(lines)
+
+ exception = self.get_parts_of_chained_exception(evalue)
+
+ if exception and (id(exception[1]) not in chained_exc_ids):
+ chained_exception_message = (
+ self.prepare_chained_exception_message(evalue.__cause__)[0]
+ if evalue is not None
+ else ""
+ )
+ etype, evalue, etb = exception
+ # Trace exception to avoid infinite 'cause' loop
+ chained_exc_ids.add(id(exception[1]))
+ chained_exceptions_tb_offset = 0
+ out_list = (
+ self.structured_traceback(
+ etype,
+ evalue,
+ (etb, chained_exc_ids), # type: ignore
+ chained_exceptions_tb_offset,
+ context,
+ )
+ + chained_exception_message
+ + out_list)
+
+ return out_list
+
+ def _format_list(self, extracted_list):
+ """Format a list of traceback entry tuples for printing.
+
+ Given a list of tuples as returned by extract_tb() or
+ extract_stack(), return a list of strings ready for printing.
+ Each string in the resulting list corresponds to the item with the
+ same index in the argument list. Each string ends in a newline;
+ the strings may contain internal newlines as well, for those items
+ whose source text line is not None.
+
+ Lifted almost verbatim from traceback.py
+ """
+
+ Colors = self.Colors
+ output_list = []
+ for ind, (filename, lineno, name, line) in enumerate(extracted_list):
+ normalCol, nameCol, fileCol, lineCol = (
+ # Emphasize the last entry
+ (Colors.normalEm, Colors.nameEm, Colors.filenameEm, Colors.line)
+ if ind == len(extracted_list) - 1
+ else (Colors.Normal, Colors.name, Colors.filename, "")
+ )
+
+ fns = _format_filename(filename, fileCol, normalCol, lineno=lineno)
+ item = f"{normalCol} {fns}"
+
+ if name != "<module>":
+ item += f" in {nameCol}{name}{normalCol}\n"
+ else:
+ item += "\n"
+ if line:
+ item += f"{lineCol} {line.strip()}{normalCol}\n"
+ output_list.append(item)
+
+ return output_list
+
+ def _format_exception_only(self, etype, value):
+ """Format the exception part of a traceback.
+
+ The arguments are the exception type and value such as given by
+ sys.exc_info()[:2]. The return value is a list of strings, each ending
+ in a newline. Normally, the list contains a single string; however,
+ for SyntaxError exceptions, it contains several lines that (when
+ printed) display detailed information about where the syntax error
+ occurred. The message indicating which exception occurred is the
+ always last string in the list.
+
+ Also lifted nearly verbatim from traceback.py
+ """
+ have_filedata = False
+ Colors = self.Colors
+ output_list = []
+ stype = py3compat.cast_unicode(Colors.excName + etype.__name__ + Colors.Normal)
+ if value is None:
+ # Not sure if this can still happen in Python 2.6 and above
+ output_list.append(stype + "\n")
+ else:
+ if issubclass(etype, SyntaxError):
+ have_filedata = True
+ if not value.filename: value.filename = "<string>"
+ if value.lineno:
+ lineno = value.lineno
+ textline = linecache.getline(value.filename, value.lineno)
+ else:
+ lineno = "unknown"
+ textline = ""
+ output_list.append(
+ "%s %s%s\n"
+ % (
+ Colors.normalEm,
+ _format_filename(
+ value.filename,
+ Colors.filenameEm,
+ Colors.normalEm,
+ lineno=(None if lineno == "unknown" else lineno),
+ ),
+ Colors.Normal,
+ )
+ )
+ if textline == "":
+ textline = py3compat.cast_unicode(value.text, "utf-8")
+
+ if textline is not None:
+ i = 0
+ while i < len(textline) and textline[i].isspace():
+ i += 1
+ output_list.append(
+ "%s %s%s\n" % (Colors.line, textline.strip(), Colors.Normal)
+ )
+ if value.offset is not None:
+ s = ' '
+ for c in textline[i:value.offset - 1]:
+ if c.isspace():
+ s += c
+ else:
+ s += " "
+ output_list.append(
+ "%s%s^%s\n" % (Colors.caret, s, Colors.Normal)
+ )
+
+ try:
+ s = value.msg
+ except Exception:
+ s = self._some_str(value)
+ if s:
+ output_list.append(
+ "%s%s:%s %s\n" % (stype, Colors.excName, Colors.Normal, s)
+ )
+ else:
+ output_list.append("%s\n" % stype)
+
+ # PEP-678 notes
+ output_list.extend(f"{x}\n" for x in getattr(value, "__notes__", []))
+
+ # sync with user hooks
+ if have_filedata:
+ ipinst = get_ipython()
+ if ipinst is not None:
+ ipinst.hooks.synchronize_with_editor(value.filename, value.lineno, 0)
+
+ return output_list
+
+ def get_exception_only(self, etype, value):
+ """Only print the exception type and message, without a traceback.
+
+ Parameters
+ ----------
+ etype : exception type
+ value : exception value
+ """
+ return ListTB.structured_traceback(self, etype, value)
+
+ def show_exception_only(self, etype, evalue):
+ """Only print the exception type and message, without a traceback.
+
+ Parameters
+ ----------
+ etype : exception type
+ evalue : exception value
+ """
+ # This method needs to use __call__ from *this* class, not the one from
+ # a subclass whose signature or behavior may be different
+ ostream = self.ostream
+ ostream.flush()
+ ostream.write('\n'.join(self.get_exception_only(etype, evalue)))
+ ostream.flush()
+
+ def _some_str(self, value):
+ # Lifted from traceback.py
+ try:
+ return py3compat.cast_unicode(str(value))
+ except:
+ return u'<unprintable %s object>' % type(value).__name__
+
+
+class FrameInfo:
+ """
+ Mirror of stack data's FrameInfo, but so that we can bypass highlighting on
+ really long frames.
+ """
+
+ description: Optional[str]
+ filename: Optional[str]
+ lineno: Tuple[int]
+ # number of context lines to use
+ context: Optional[int]
+
+ @classmethod
+ def _from_stack_data_FrameInfo(cls, frame_info):
+ return cls(
+ getattr(frame_info, "description", None),
+ getattr(frame_info, "filename", None), # type: ignore[arg-type]
+ getattr(frame_info, "lineno", None), # type: ignore[arg-type]
+ getattr(frame_info, "frame", None),
+ getattr(frame_info, "code", None),
+ sd=frame_info,
+ context=None,
+ )
+
+ def __init__(
+ self,
+ description: Optional[str],
+ filename: str,
+ lineno: Tuple[int],
+ frame,
+ code,
+ *,
+ sd=None,
+ context=None,
+ ):
+ self.description = description
+ self.filename = filename
+ self.lineno = lineno
+ self.frame = frame
+ self.code = code
+ self._sd = sd
+ self.context = context
+
+ # self.lines = []
+ if sd is None:
+ ix = inspect.getsourcelines(frame)
+ self.raw_lines = ix[0]
+
+ @property
+ def variables_in_executing_piece(self):
+ if self._sd:
+ return self._sd.variables_in_executing_piece
+ else:
+ return []
+
+ @property
+ def lines(self):
+ return self._sd.lines
+
+ @property
+ def executing(self):
+ if self._sd:
+ return self._sd.executing
+ else:
+ return None
+
+
+# ----------------------------------------------------------------------------
+class VerboseTB(TBTools):
+ """A port of Ka-Ping Yee's cgitb.py module that outputs color text instead
+ of HTML. Requires inspect and pydoc. Crazy, man.
+
+ Modified version which optionally strips the topmost entries from the
+ traceback, to be used with alternate interpreters (because their own code
+ would appear in the traceback)."""
+
+ _tb_highlight = ""
+
+ def __init__(
+ self,
+ color_scheme: str = "Linux",
+ call_pdb: bool = False,
+ ostream=None,
+ tb_offset: int = 0,
+ long_header: bool = False,
+ include_vars: bool = True,
+ check_cache=None,
+ debugger_cls=None,
+ parent=None,
+ config=None,
+ ):
+ """Specify traceback offset, headers and color scheme.
+
+ Define how many frames to drop from the tracebacks. Calling it with
+ tb_offset=1 allows use of this handler in interpreters which will have
+ their own code at the top of the traceback (VerboseTB will first
+ remove that frame before printing the traceback info)."""
+ TBTools.__init__(
+ self,
+ color_scheme=color_scheme,
+ call_pdb=call_pdb,
+ ostream=ostream,
+ parent=parent,
+ config=config,
+ debugger_cls=debugger_cls,
+ )
+ self.tb_offset = tb_offset
+ self.long_header = long_header
+ self.include_vars = include_vars
+ # By default we use linecache.checkcache, but the user can provide a
+ # different check_cache implementation. This was formerly used by the
+ # IPython kernel for interactive code, but is no longer necessary.
+ if check_cache is None:
+ check_cache = linecache.checkcache
+ self.check_cache = check_cache
+
+ self.skip_hidden = True
+
+ def format_record(self, frame_info: FrameInfo):
+ """Format a single stack frame"""
+ assert isinstance(frame_info, FrameInfo)
+ Colors = self.Colors # just a shorthand + quicker name lookup
+ ColorsNormal = Colors.Normal # used a lot
+
+ if isinstance(frame_info._sd, stack_data.RepeatedFrames):
+ return ' %s[... skipping similar frames: %s]%s\n' % (
+ Colors.excName, frame_info.description, ColorsNormal)
+
+ indent = " " * INDENT_SIZE
+ em_normal = "%s\n%s%s" % (Colors.valEm, indent, ColorsNormal)
+ tpl_call = f"in {Colors.vName}{{file}}{Colors.valEm}{{scope}}{ColorsNormal}"
+ tpl_call_fail = "in %s%%s%s(***failed resolving arguments***)%s" % (
+ Colors.vName,
+ Colors.valEm,
+ ColorsNormal,
+ )
+ tpl_name_val = "%%s %s= %%s%s" % (Colors.valEm, ColorsNormal)
+
+ link = _format_filename(
+ frame_info.filename,
+ Colors.filenameEm,
+ ColorsNormal,
+ lineno=frame_info.lineno,
+ )
+ args, varargs, varkw, locals_ = inspect.getargvalues(frame_info.frame)
+ if frame_info.executing is not None:
+ func = frame_info.executing.code_qualname()
+ else:
+ func = "?"
+ if func == "<module>":
+ call = ""
+ else:
+ # Decide whether to include variable details or not
+ var_repr = eqrepr if self.include_vars else nullrepr
+ try:
+ scope = inspect.formatargvalues(
+ args, varargs, varkw, locals_, formatvalue=var_repr
+ )
+ call = tpl_call.format(file=func, scope=scope)
+ except KeyError:
+ # This happens in situations like errors inside generator
+ # expressions, where local variables are listed in the
+ # line, but can't be extracted from the frame. I'm not
+ # 100% sure this isn't actually a bug in inspect itself,
+ # but since there's no info for us to compute with, the
+ # best we can do is report the failure and move on. Here
+ # we must *not* call any traceback construction again,
+ # because that would mess up use of %debug later on. So we
+ # simply report the failure and move on. The only
+ # limitation will be that this frame won't have locals
+ # listed in the call signature. Quite subtle problem...
+ # I can't think of a good way to validate this in a unit
+ # test, but running a script consisting of:
+ # dict( (k,v.strip()) for (k,v) in range(10) )
+ # will illustrate the error, if this exception catch is
+ # disabled.
+ call = tpl_call_fail % func
+
+ lvals = ''
+ lvals_list = []
+ if self.include_vars:
+ try:
+ # we likely want to fix stackdata at some point, but
+ # still need a workaround.
+ fibp = frame_info.variables_in_executing_piece
+ for var in fibp:
+ lvals_list.append(tpl_name_val % (var.name, repr(var.value)))
+ except Exception:
+ lvals_list.append(
+ "Exception trying to inspect frame. No more locals available."
+ )
+ if lvals_list:
+ lvals = '%s%s' % (indent, em_normal.join(lvals_list))
+
+ result = f'{link}{", " if call else ""}{call}\n'
+ if frame_info._sd is None:
+ # fast fallback if file is too long
+ tpl_link = "%s%%s%s" % (Colors.filenameEm, ColorsNormal)
+ link = tpl_link % util_path.compress_user(frame_info.filename)
+ level = "%s %s\n" % (link, call)
+ _line_format = PyColorize.Parser(
+ style=self.color_scheme_table.active_scheme_name, parent=self
+ ).format2
+ first_line = frame_info.code.co_firstlineno
+ current_line = frame_info.lineno[0]
+ raw_lines = frame_info.raw_lines
+ index = current_line - first_line
+
+ if index >= frame_info.context:
+ start = max(index - frame_info.context, 0)
+ stop = index + frame_info.context
+ index = frame_info.context
+ else:
+ start = 0
+ stop = index + frame_info.context
+ raw_lines = raw_lines[start:stop]
+
+ return "%s%s" % (
+ level,
+ "".join(
+ _simple_format_traceback_lines(
+ current_line,
+ index,
+ raw_lines,
+ Colors,
+ lvals,
+ _line_format,
+ )
+ ),
+ )
+ # result += "\n".join(frame_info.raw_lines)
+ else:
+ result += "".join(
+ _format_traceback_lines(
+ frame_info.lines, Colors, self.has_colors, lvals
+ )
+ )
+ return result
+
+ def prepare_header(self, etype: str, long_version: bool = False):
+ colors = self.Colors # just a shorthand + quicker name lookup
+ colorsnormal = colors.Normal # used a lot
+ exc = '%s%s%s' % (colors.excName, etype, colorsnormal)
+ width = min(75, get_terminal_size()[0])
+ if long_version:
+ # Header with the exception type, python version, and date
+ pyver = 'Python ' + sys.version.split()[0] + ': ' + sys.executable
+ date = time.ctime(time.time())
+
+ head = "%s%s%s\n%s%s%s\n%s" % (
+ colors.topline,
+ "-" * width,
+ colorsnormal,
+ exc,
+ " " * (width - len(etype) - len(pyver)),
+ pyver,
+ date.rjust(width),
+ )
+ head += (
+ "\nA problem occurred executing Python code. Here is the sequence of function"
+ "\ncalls leading up to the error, with the most recent (innermost) call last."
+ )
+ else:
+ # Simplified header
+ head = "%s%s" % (
+ exc,
+ "Traceback (most recent call last)".rjust(width - len(etype)),
+ )
+
+ return head
+
+ def format_exception(self, etype, evalue):
+ colors = self.Colors # just a shorthand + quicker name lookup
+ colorsnormal = colors.Normal # used a lot
+ # Get (safely) a string form of the exception info
+ try:
+ etype_str, evalue_str = map(str, (etype, evalue))
+ except:
+ # User exception is improperly defined.
+ etype, evalue = str, sys.exc_info()[:2]
+ etype_str, evalue_str = map(str, (etype, evalue))
+
+ # PEP-678 notes
+ notes = getattr(evalue, "__notes__", [])
+ if not isinstance(notes, Sequence) or isinstance(notes, (str, bytes)):
+ notes = [_safe_string(notes, "__notes__", func=repr)]
+
+ # ... and format it
+ return [
+ "{}{}{}: {}".format(
+ colors.excName,
+ etype_str,
+ colorsnormal,
+ py3compat.cast_unicode(evalue_str),
+ ),
+ *(
+ "{}{}".format(
+ colorsnormal, _safe_string(py3compat.cast_unicode(n), "note")
+ )
+ for n in notes
+ ),
+ ]
+
+ def format_exception_as_a_whole(
+ self,
+ etype: type,
+ evalue: Optional[BaseException],
+ etb: Optional[TracebackType],
+ number_of_lines_of_context,
+ tb_offset: Optional[int],
+ ):
+ """Formats the header, traceback and exception message for a single exception.
+
+ This may be called multiple times by Python 3 exception chaining
+ (PEP 3134).
+ """
+ # some locals
+ orig_etype = etype
+ try:
+ etype = etype.__name__ # type: ignore
+ except AttributeError:
+ pass
+
+ tb_offset = self.tb_offset if tb_offset is None else tb_offset
+ assert isinstance(tb_offset, int)
+ head = self.prepare_header(str(etype), self.long_header)
+ records = (
+ self.get_records(etb, number_of_lines_of_context, tb_offset) if etb else []
+ )
+
+ frames = []
+ skipped = 0
+ lastrecord = len(records) - 1
+ for i, record in enumerate(records):
+ if (
+ not isinstance(record._sd, stack_data.RepeatedFrames)
+ and self.skip_hidden
+ ):
+ if (
+ record.frame.f_locals.get("__tracebackhide__", 0)
+ and i != lastrecord
+ ):
+ skipped += 1
+ continue
+ if skipped:
+ Colors = self.Colors # just a shorthand + quicker name lookup
+ ColorsNormal = Colors.Normal # used a lot
+ frames.append(
+ " %s[... skipping hidden %s frame]%s\n"
+ % (Colors.excName, skipped, ColorsNormal)
+ )
+ skipped = 0
+ frames.append(self.format_record(record))
+ if skipped:
+ Colors = self.Colors # just a shorthand + quicker name lookup
+ ColorsNormal = Colors.Normal # used a lot
+ frames.append(
+ " %s[... skipping hidden %s frame]%s\n"
+ % (Colors.excName, skipped, ColorsNormal)
+ )
+
+ formatted_exception = self.format_exception(etype, evalue)
+ if records:
+ frame_info = records[-1]
+ ipinst = get_ipython()
+ if ipinst is not None:
+ ipinst.hooks.synchronize_with_editor(frame_info.filename, frame_info.lineno, 0)
+
+ return [[head] + frames + formatted_exception]
+
+ def get_records(
+ self, etb: TracebackType, number_of_lines_of_context: int, tb_offset: int
+ ):
+ assert etb is not None
+ context = number_of_lines_of_context - 1
+ after = context // 2
+ before = context - after
+ if self.has_colors:
+ style = get_style_by_name("default")
+ style = stack_data.style_with_executing_node(style, self._tb_highlight)
+ formatter = Terminal256Formatter(style=style)
+ else:
+ formatter = None
+ options = stack_data.Options(
+ before=before,
+ after=after,
+ pygments_formatter=formatter,
+ )
+
+ # Let's estimate the amount of code we will have to parse/highlight.
+ cf: Optional[TracebackType] = etb
+ max_len = 0
+ tbs = []
+ while cf is not None:
+ try:
+ mod = inspect.getmodule(cf.tb_frame)
+ if mod is not None:
+ mod_name = mod.__name__
+ root_name, *_ = mod_name.split(".")
+ if root_name == "IPython":
+ cf = cf.tb_next
+ continue
+ max_len = get_line_number_of_frame(cf.tb_frame)
+
+ except OSError:
+ max_len = 0
+ max_len = max(max_len, max_len)
+ tbs.append(cf)
+ cf = getattr(cf, "tb_next", None)
+
+ if max_len > FAST_THRESHOLD:
+ FIs = []
+ for tb in tbs:
+ frame = tb.tb_frame # type: ignore
+ lineno = (frame.f_lineno,)
+ code = frame.f_code
+ filename = code.co_filename
+ # TODO: Here we need to use before/after/
+ FIs.append(
+ FrameInfo(
+ "Raw frame", filename, lineno, frame, code, context=context
+ )
+ )
+ return FIs
+ res = list(stack_data.FrameInfo.stack_data(etb, options=options))[tb_offset:]
+ res = [FrameInfo._from_stack_data_FrameInfo(r) for r in res]
+ return res
+
+ def structured_traceback(
+ self,
+ etype: type,
+ evalue: Optional[BaseException],
+ etb: Optional[TracebackType] = None,
+ tb_offset: Optional[int] = None,
+ number_of_lines_of_context: int = 5,
+ ):
+ """Return a nice text document describing the traceback."""
+ formatted_exception = self.format_exception_as_a_whole(etype, evalue, etb, number_of_lines_of_context,
+ tb_offset)
+
+ colors = self.Colors # just a shorthand + quicker name lookup
+ colorsnormal = colors.Normal # used a lot
+ head = '%s%s%s' % (colors.topline, '-' * min(75, get_terminal_size()[0]), colorsnormal)
+ structured_traceback_parts = [head]
+ chained_exceptions_tb_offset = 0
+ lines_of_context = 3
+ formatted_exceptions = formatted_exception
+ exception = self.get_parts_of_chained_exception(evalue)
+ if exception:
+ assert evalue is not None
+ formatted_exceptions += self.prepare_chained_exception_message(evalue.__cause__)
+ etype, evalue, etb = exception
+ else:
+ evalue = None
+ chained_exc_ids = set()
+ while evalue:
+ formatted_exceptions += self.format_exception_as_a_whole(etype, evalue, etb, lines_of_context,
+ chained_exceptions_tb_offset)
+ exception = self.get_parts_of_chained_exception(evalue)
+
+ if exception and not id(exception[1]) in chained_exc_ids:
+ chained_exc_ids.add(id(exception[1])) # trace exception to avoid infinite 'cause' loop
+ formatted_exceptions += self.prepare_chained_exception_message(evalue.__cause__)
+ etype, evalue, etb = exception
+ else:
+ evalue = None
+
+ # we want to see exceptions in a reversed order:
+ # the first exception should be on top
+ for formatted_exception in reversed(formatted_exceptions):
+ structured_traceback_parts += formatted_exception
+
+ return structured_traceback_parts
+
+ def debugger(self, force: bool = False):
+ """Call up the pdb debugger if desired, always clean up the tb
+ reference.
+
+ Keywords:
+
+ - force(False): by default, this routine checks the instance call_pdb
+ flag and does not actually invoke the debugger if the flag is false.
+ The 'force' option forces the debugger to activate even if the flag
+ is false.
+
+ If the call_pdb flag is set, the pdb interactive debugger is
+ invoked. In all cases, the self.tb reference to the current traceback
+ is deleted to prevent lingering references which hamper memory
+ management.
+
+ Note that each call to pdb() does an 'import readline', so if your app
+ requires a special setup for the readline completers, you'll have to
+ fix that by hand after invoking the exception handler."""
+
+ if force or self.call_pdb:
+ if self.pdb is None:
+ self.pdb = self.debugger_cls()
+ # the system displayhook may have changed, restore the original
+ # for pdb
+ display_trap = DisplayTrap(hook=sys.__displayhook__)
+ with display_trap:
+ self.pdb.reset()
+ # Find the right frame so we don't pop up inside ipython itself
+ if hasattr(self, "tb") and self.tb is not None: # type: ignore[has-type]
+ etb = self.tb # type: ignore[has-type]
+ else:
+ etb = self.tb = sys.last_traceback
+ while self.tb is not None and self.tb.tb_next is not None:
+ assert self.tb.tb_next is not None
+ self.tb = self.tb.tb_next
+ if etb and etb.tb_next:
+ etb = etb.tb_next
+ self.pdb.botframe = etb.tb_frame
+ self.pdb.interaction(None, etb)
+
+ if hasattr(self, 'tb'):
+ del self.tb
+
+ def handler(self, info=None):
+ (etype, evalue, etb) = info or sys.exc_info()
+ self.tb = etb
+ ostream = self.ostream
+ ostream.flush()
+ ostream.write(self.text(etype, evalue, etb))
+ ostream.write('\n')
+ ostream.flush()
+
+ # Changed so an instance can just be called as VerboseTB_inst() and print
+ # out the right info on its own.
+ def __call__(self, etype=None, evalue=None, etb=None):
+ """This hook can replace sys.excepthook (for Python 2.1 or higher)."""
+ if etb is None:
+ self.handler()
+ else:
+ self.handler((etype, evalue, etb))
+ try:
+ self.debugger()
+ except KeyboardInterrupt:
+ print("\nKeyboardInterrupt")
+
+
+#----------------------------------------------------------------------------
+class FormattedTB(VerboseTB, ListTB):
+ """Subclass ListTB but allow calling with a traceback.
+
+ It can thus be used as a sys.excepthook for Python > 2.1.
+
+ Also adds 'Context' and 'Verbose' modes, not available in ListTB.
+
+ Allows a tb_offset to be specified. This is useful for situations where
+ one needs to remove a number of topmost frames from the traceback (such as
+ occurs with python programs that themselves execute other python code,
+ like Python shells). """
+
+ mode: str
+
+ def __init__(self, mode='Plain', color_scheme='Linux', call_pdb=False,
+ ostream=None,
+ tb_offset=0, long_header=False, include_vars=False,
+ check_cache=None, debugger_cls=None,
+ parent=None, config=None):
+
+ # NEVER change the order of this list. Put new modes at the end:
+ self.valid_modes = ['Plain', 'Context', 'Verbose', 'Minimal']
+ self.verbose_modes = self.valid_modes[1:3]
+
+ VerboseTB.__init__(self, color_scheme=color_scheme, call_pdb=call_pdb,
+ ostream=ostream, tb_offset=tb_offset,
+ long_header=long_header, include_vars=include_vars,
+ check_cache=check_cache, debugger_cls=debugger_cls,
+ parent=parent, config=config)
+
+ # Different types of tracebacks are joined with different separators to
+ # form a single string. They are taken from this dict
+ self._join_chars = dict(Plain='', Context='\n', Verbose='\n',
+ Minimal='')
+ # set_mode also sets the tb_join_char attribute
+ self.set_mode(mode)
+
+ def structured_traceback(self, etype, value, tb, tb_offset=None, number_of_lines_of_context=5):
+ tb_offset = self.tb_offset if tb_offset is None else tb_offset
+ mode = self.mode
+ if mode in self.verbose_modes:
+ # Verbose modes need a full traceback
+ return VerboseTB.structured_traceback(
+ self, etype, value, tb, tb_offset, number_of_lines_of_context
+ )
+ elif mode == 'Minimal':
+ return ListTB.get_exception_only(self, etype, value)
+ else:
+ # We must check the source cache because otherwise we can print
+ # out-of-date source code.
+ self.check_cache()
+ # Now we can extract and format the exception
+ return ListTB.structured_traceback(
+ self, etype, value, tb, tb_offset, number_of_lines_of_context
+ )
+
+ def stb2text(self, stb):
+ """Convert a structured traceback (a list) to a string."""
+ return self.tb_join_char.join(stb)
+
+ def set_mode(self, mode: Optional[str] = None):
+ """Switch to the desired mode.
+
+ If mode is not specified, cycles through the available modes."""
+
+ if not mode:
+ new_idx = (self.valid_modes.index(self.mode) + 1 ) % \
+ len(self.valid_modes)
+ self.mode = self.valid_modes[new_idx]
+ elif mode not in self.valid_modes:
+ raise ValueError(
+ "Unrecognized mode in FormattedTB: <" + mode + ">\n"
+ "Valid modes: " + str(self.valid_modes)
+ )
+ else:
+ assert isinstance(mode, str)
+ self.mode = mode
+ # include variable details only in 'Verbose' mode
+ self.include_vars = (self.mode == self.valid_modes[2])
+ # Set the join character for generating text tracebacks
+ self.tb_join_char = self._join_chars[self.mode]
+
+ # some convenient shortcuts
+ def plain(self):
+ self.set_mode(self.valid_modes[0])
+
+ def context(self):
+ self.set_mode(self.valid_modes[1])
+
+ def verbose(self):
+ self.set_mode(self.valid_modes[2])
+
+ def minimal(self):
+ self.set_mode(self.valid_modes[3])
+
+
+#----------------------------------------------------------------------------
+class AutoFormattedTB(FormattedTB):
+ """A traceback printer which can be called on the fly.
+
+ It will find out about exceptions by itself.
+
+ A brief example::
+
+ AutoTB = AutoFormattedTB(mode = 'Verbose',color_scheme='Linux')
+ try:
+ ...
+ except:
+ AutoTB() # or AutoTB(out=logfile) where logfile is an open file object
+ """
+
+ def __call__(self, etype=None, evalue=None, etb=None,
+ out=None, tb_offset=None):
+ """Print out a formatted exception traceback.
+
+ Optional arguments:
+ - out: an open file-like object to direct output to.
+
+ - tb_offset: the number of frames to skip over in the stack, on a
+ per-call basis (this overrides temporarily the instance's tb_offset
+ given at initialization time."""
+
+ if out is None:
+ out = self.ostream
+ out.flush()
+ out.write(self.text(etype, evalue, etb, tb_offset))
+ out.write('\n')
+ out.flush()
+ # FIXME: we should remove the auto pdb behavior from here and leave
+ # that to the clients.
+ try:
+ self.debugger()
+ except KeyboardInterrupt:
+ print("\nKeyboardInterrupt")
+
+ def structured_traceback(
+ self,
+ etype: type,
+ evalue: Optional[BaseException],
+ etb: Optional[TracebackType] = None,
+ tb_offset: Optional[int] = None,
+ number_of_lines_of_context: int = 5,
+ ):
+ # tb: TracebackType or tupleof tb types ?
+ if etype is None:
+ etype, evalue, etb = sys.exc_info()
+ if isinstance(etb, tuple):
+ # tb is a tuple if this is a chained exception.
+ self.tb = etb[0]
+ else:
+ self.tb = etb
+ return FormattedTB.structured_traceback(
+ self, etype, evalue, etb, tb_offset, number_of_lines_of_context
+ )
+
+
+#---------------------------------------------------------------------------
+
+# A simple class to preserve Nathan's original functionality.
+class ColorTB(FormattedTB):
+ """Shorthand to initialize a FormattedTB in Linux colors mode."""
+
+ def __init__(self, color_scheme='Linux', call_pdb=0, **kwargs):
+ FormattedTB.__init__(self, color_scheme=color_scheme,
+ call_pdb=call_pdb, **kwargs)
+
+
+class SyntaxTB(ListTB):
+ """Extension which holds some state: the last exception value"""
+
+ def __init__(self, color_scheme='NoColor', parent=None, config=None):
+ ListTB.__init__(self, color_scheme, parent=parent, config=config)
+ self.last_syntax_error = None
+
+ def __call__(self, etype, value, elist):
+ self.last_syntax_error = value
+
+ ListTB.__call__(self, etype, value, elist)
+
+ def structured_traceback(self, etype, value, elist, tb_offset=None,
+ context=5):
+ # If the source file has been edited, the line in the syntax error can
+ # be wrong (retrieved from an outdated cache). This replaces it with
+ # the current value.
+ if isinstance(value, SyntaxError) \
+ and isinstance(value.filename, str) \
+ and isinstance(value.lineno, int):
+ linecache.checkcache(value.filename)
+ newtext = linecache.getline(value.filename, value.lineno)
+ if newtext:
+ value.text = newtext
+ self.last_syntax_error = value
+ return super(SyntaxTB, self).structured_traceback(etype, value, elist,
+ tb_offset=tb_offset, context=context)
+
+ def clear_err_state(self):
+ """Return the current error state and clear it"""
+ e = self.last_syntax_error
+ self.last_syntax_error = None
+ return e
+
+ def stb2text(self, stb):
+ """Convert a structured traceback (a list) to a string."""
+ return ''.join(stb)
+
+
+# some internal-use functions
+def text_repr(value):
+ """Hopefully pretty robust repr equivalent."""
+ # this is pretty horrible but should always return *something*
+ try:
+ return pydoc.text.repr(value) # type: ignore[call-arg]
+ except KeyboardInterrupt:
+ raise
+ except:
+ try:
+ return repr(value)
+ except KeyboardInterrupt:
+ raise
+ except:
+ try:
+ # all still in an except block so we catch
+ # getattr raising
+ name = getattr(value, '__name__', None)
+ if name:
+ # ick, recursion
+ return text_repr(name)
+ klass = getattr(value, '__class__', None)
+ if klass:
+ return '%s instance' % text_repr(klass)
+ except KeyboardInterrupt:
+ raise
+ except:
+ return 'UNRECOVERABLE REPR FAILURE'
+
+
+def eqrepr(value, repr=text_repr):
+ return '=%s' % repr(value)
+
+
+def nullrepr(value, repr=text_repr):
+ return ''
diff --git a/contrib/python/ipython/py3/IPython/core/usage.py b/contrib/python/ipython/py3/IPython/core/usage.py
new file mode 100644
index 0000000000..53219bceb2
--- /dev/null
+++ b/contrib/python/ipython/py3/IPython/core/usage.py
@@ -0,0 +1,341 @@
+# -*- coding: utf-8 -*-
+"""Usage information for the main IPython applications.
+"""
+#-----------------------------------------------------------------------------
+# Copyright (C) 2008-2011 The IPython Development Team
+# Copyright (C) 2001-2007 Fernando Perez. <fperez@colorado.edu>
+#
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#-----------------------------------------------------------------------------
+
+import sys
+from IPython.core import release
+
+cl_usage = """\
+=========
+ IPython
+=========
+
+Tools for Interactive Computing in Python
+=========================================
+
+ A Python shell with automatic history (input and output), dynamic object
+ introspection, easier configuration, command completion, access to the
+ system shell and more. IPython can also be embedded in running programs.
+
+
+Usage
+
+ ipython [subcommand] [options] [-c cmd | -m mod | file] [--] [arg] ...
+
+ If invoked with no options, it executes the file and exits, passing the
+ remaining arguments to the script, just as if you had specified the same
+ command with python. You may need to specify `--` before args to be passed
+ to the script, to prevent IPython from attempting to parse them. If you
+ specify the option `-i` before the filename, it will enter an interactive
+ IPython session after running the script, rather than exiting. Files ending
+ in .py will be treated as normal Python, but files ending in .ipy can
+ contain special IPython syntax (magic commands, shell expansions, etc.).
+
+ Almost all configuration in IPython is available via the command-line. Do
+ `ipython --help-all` to see all available options. For persistent
+ configuration, look into your `ipython_config.py` configuration file for
+ details.
+
+ This file is typically installed in the `IPYTHONDIR` directory, and there
+ is a separate configuration directory for each profile. The default profile
+ directory will be located in $IPYTHONDIR/profile_default. IPYTHONDIR
+ defaults to to `$HOME/.ipython`. For Windows users, $HOME resolves to
+ C:\\Users\\YourUserName in most instances.
+
+ To initialize a profile with the default configuration file, do::
+
+ $> ipython profile create
+
+ and start editing `IPYTHONDIR/profile_default/ipython_config.py`
+
+ In IPython's documentation, we will refer to this directory as
+ `IPYTHONDIR`, you can change its default location by creating an
+ environment variable with this name and setting it to the desired path.
+
+ For more information, see the manual available in HTML and PDF in your
+ installation, or online at https://ipython.org/documentation.html.
+"""
+
+interactive_usage = """
+IPython -- An enhanced Interactive Python
+=========================================
+
+IPython offers a fully compatible replacement for the standard Python
+interpreter, with convenient shell features, special commands, command
+history mechanism and output results caching.
+
+At your system command line, type 'ipython -h' to see the command line
+options available. This document only describes interactive features.
+
+GETTING HELP
+------------
+
+Within IPython you have various way to access help:
+
+ ? -> Introduction and overview of IPython's features (this screen).
+ object? -> Details about 'object'.
+ object?? -> More detailed, verbose information about 'object'.
+ %quickref -> Quick reference of all IPython specific syntax and magics.
+ help -> Access Python's own help system.
+
+If you are in terminal IPython you can quit this screen by pressing `q`.
+
+
+MAIN FEATURES
+-------------
+
+* Access to the standard Python help with object docstrings and the Python
+ manuals. Simply type 'help' (no quotes) to invoke it.
+
+* Magic commands: type %magic for information on the magic subsystem.
+
+* System command aliases, via the %alias command or the configuration file(s).
+
+* Dynamic object information:
+
+ Typing ?word or word? prints detailed information about an object. Certain
+ long strings (code, etc.) get snipped in the center for brevity.
+
+ Typing ??word or word?? gives access to the full information without
+ snipping long strings. Strings that are longer than the screen are printed
+ through the less pager.
+
+ The ?/?? system gives access to the full source code for any object (if
+ available), shows function prototypes and other useful information.
+
+ If you just want to see an object's docstring, type '%pdoc object' (without
+ quotes, and without % if you have automagic on).
+
+* Tab completion in the local namespace:
+
+ At any time, hitting tab will complete any available python commands or
+ variable names, and show you a list of the possible completions if there's
+ no unambiguous one. It will also complete filenames in the current directory.
+
+* Search previous command history in multiple ways:
+
+ - Start typing, and then use arrow keys up/down or (Ctrl-p/Ctrl-n) to search
+ through the history items that match what you've typed so far.
+
+ - Hit Ctrl-r: opens a search prompt. Begin typing and the system searches
+ your history for lines that match what you've typed so far, completing as
+ much as it can.
+
+ - %hist: search history by index.
+
+* Persistent command history across sessions.
+
+* Logging of input with the ability to save and restore a working session.
+
+* System shell with !. Typing !ls will run 'ls' in the current directory.
+
+* The reload command does a 'deep' reload of a module: changes made to the
+ module since you imported will actually be available without having to exit.
+
+* Verbose and colored exception traceback printouts. See the magic xmode and
+ xcolor functions for details (just type %magic).
+
+* Input caching system:
+
+ IPython offers numbered prompts (In/Out) with input and output caching. All
+ input is saved and can be retrieved as variables (besides the usual arrow
+ key recall).
+
+ The following GLOBAL variables always exist (so don't overwrite them!):
+ _i: stores previous input.
+ _ii: next previous.
+ _iii: next-next previous.
+ _ih : a list of all input _ih[n] is the input from line n.
+
+ Additionally, global variables named _i<n> are dynamically created (<n>
+ being the prompt counter), such that _i<n> == _ih[<n>]
+
+ For example, what you typed at prompt 14 is available as _i14 and _ih[14].
+
+ You can create macros which contain multiple input lines from this history,
+ for later re-execution, with the %macro function.
+
+ The history function %hist allows you to see any part of your input history
+ by printing a range of the _i variables. Note that inputs which contain
+ magic functions (%) appear in the history with a prepended comment. This is
+ because they aren't really valid Python code, so you can't exec them.
+
+* Output caching system:
+
+ For output that is returned from actions, a system similar to the input
+ cache exists but using _ instead of _i. Only actions that produce a result
+ (NOT assignments, for example) are cached. If you are familiar with
+ Mathematica, IPython's _ variables behave exactly like Mathematica's %
+ variables.
+
+ The following GLOBAL variables always exist (so don't overwrite them!):
+ _ (one underscore): previous output.
+ __ (two underscores): next previous.
+ ___ (three underscores): next-next previous.
+
+ Global variables named _<n> are dynamically created (<n> being the prompt
+ counter), such that the result of output <n> is always available as _<n>.
+
+ Finally, a global dictionary named _oh exists with entries for all lines
+ which generated output.
+
+* Directory history:
+
+ Your history of visited directories is kept in the global list _dh, and the
+ magic %cd command can be used to go to any entry in that list.
+
+* Auto-parentheses and auto-quotes (adapted from Nathan Gray's LazyPython)
+
+ 1. Auto-parentheses
+
+ Callable objects (i.e. functions, methods, etc) can be invoked like
+ this (notice the commas between the arguments)::
+
+ In [1]: callable_ob arg1, arg2, arg3
+
+ and the input will be translated to this::
+
+ callable_ob(arg1, arg2, arg3)
+
+ This feature is off by default (in rare cases it can produce
+ undesirable side-effects), but you can activate it at the command-line
+ by starting IPython with `--autocall 1`, set it permanently in your
+ configuration file, or turn on at runtime with `%autocall 1`.
+
+ You can force auto-parentheses by using '/' as the first character
+ of a line. For example::
+
+ In [1]: /globals # becomes 'globals()'
+
+ Note that the '/' MUST be the first character on the line! This
+ won't work::
+
+ In [2]: print /globals # syntax error
+
+ In most cases the automatic algorithm should work, so you should
+ rarely need to explicitly invoke /. One notable exception is if you
+ are trying to call a function with a list of tuples as arguments (the
+ parenthesis will confuse IPython)::
+
+ In [1]: zip (1,2,3),(4,5,6) # won't work
+
+ but this will work::
+
+ In [2]: /zip (1,2,3),(4,5,6)
+ ------> zip ((1,2,3),(4,5,6))
+ Out[2]= [(1, 4), (2, 5), (3, 6)]
+
+ IPython tells you that it has altered your command line by
+ displaying the new command line preceded by -->. e.g.::
+
+ In [18]: callable list
+ -------> callable (list)
+
+ 2. Auto-Quoting
+
+ You can force auto-quoting of a function's arguments by using ',' as
+ the first character of a line. For example::
+
+ In [1]: ,my_function /home/me # becomes my_function("/home/me")
+
+ If you use ';' instead, the whole argument is quoted as a single
+ string (while ',' splits on whitespace)::
+
+ In [2]: ,my_function a b c # becomes my_function("a","b","c")
+ In [3]: ;my_function a b c # becomes my_function("a b c")
+
+ Note that the ',' MUST be the first character on the line! This
+ won't work::
+
+ In [4]: x = ,my_function /home/me # syntax error
+"""
+
+interactive_usage_min = """\
+An enhanced console for Python.
+Some of its features are:
+- Tab completion in the local namespace.
+- Logging of input, see command-line options.
+- System shell escape via ! , eg !ls.
+- Magic commands, starting with a % (like %ls, %pwd, %cd, etc.)
+- Keeps track of locally defined variables via %who, %whos.
+- Show object information with a ? eg ?x or x? (use ?? for more info).
+"""
+
+quick_reference = r"""
+IPython -- An enhanced Interactive Python - Quick Reference Card
+================================================================
+
+obj?, obj?? : Get help, or more help for object (also works as
+ ?obj, ??obj).
+?foo.*abc* : List names in 'foo' containing 'abc' in them.
+%magic : Information about IPython's 'magic' % functions.
+
+Magic functions are prefixed by % or %%, and typically take their arguments
+without parentheses, quotes or even commas for convenience. Line magics take a
+single % and cell magics are prefixed with two %%.
+
+Example magic function calls:
+
+%alias d ls -F : 'd' is now an alias for 'ls -F'
+alias d ls -F : Works if 'alias' not a python name
+alist = %alias : Get list of aliases to 'alist'
+cd /usr/share : Obvious. cd -<tab> to choose from visited dirs.
+%cd?? : See help AND source for magic %cd
+%timeit x=10 : time the 'x=10' statement with high precision.
+%%timeit x=2**100
+x**100 : time 'x**100' with a setup of 'x=2**100'; setup code is not
+ counted. This is an example of a cell magic.
+
+System commands:
+
+!cp a.txt b/ : System command escape, calls os.system()
+cp a.txt b/ : after %rehashx, most system commands work without !
+cp ${f}.txt $bar : Variable expansion in magics and system commands
+files = !ls /usr : Capture system command output
+files.s, files.l, files.n: "a b c", ['a','b','c'], 'a\nb\nc'
+
+History:
+
+_i, _ii, _iii : Previous, next previous, next next previous input
+_i4, _ih[2:5] : Input history line 4, lines 2-4
+exec(_i81) : Execute input history line #81 again
+%rep 81 : Edit input history line #81
+_, __, ___ : previous, next previous, next next previous output
+_dh : Directory history
+_oh : Output history
+%hist : Command history of current session.
+%hist -g foo : Search command history of (almost) all sessions for 'foo'.
+%hist -g : Command history of (almost) all sessions.
+%hist 1/2-8 : Command history containing lines 2-8 of session 1.
+%hist 1/ ~2/ : Command history of session 1 and 2 sessions before current.
+%hist ~8/1-~6/5 : Command history from line 1 of 8 sessions ago to
+ line 5 of 6 sessions ago.
+%edit 0/ : Open editor to execute code with history of current session.
+
+Autocall:
+
+f 1,2 : f(1,2) # Off by default, enable with %autocall magic.
+/f 1,2 : f(1,2) (forced autoparen)
+,f 1 2 : f("1","2")
+;f 1 2 : f("1 2")
+
+Remember: TAB completion works in many contexts, not just file names
+or python names.
+
+The following magic functions are currently available:
+
+"""
+
+default_banner_parts = ["Python %s\n"%sys.version.split("\n")[0],
+ "Type 'copyright', 'credits' or 'license' for more information\n" ,
+ "IPython {version} -- An enhanced Interactive Python. Type '?' for help.\n".format(version=release.version),
+]
+
+default_banner = ''.join(default_banner_parts)