diff options
| author | Devtools Arcadia <[email protected]> | 2022-02-07 18:08:42 +0300 | 
|---|---|---|
| committer | Devtools Arcadia <[email protected]> | 2022-02-07 18:08:42 +0300 | 
| commit | 1110808a9d39d4b808aef724c861a2e1a38d2a69 (patch) | |
| tree | e26c9fed0de5d9873cce7e00bc214573dc2195b7 /contrib/python/ipython/py2/IPython/utils | |
intermediate changes
ref:cde9a383711a11544ce7e107a78147fb96cc4029
Diffstat (limited to 'contrib/python/ipython/py2/IPython/utils')
55 files changed, 8595 insertions, 0 deletions
diff --git a/contrib/python/ipython/py2/IPython/utils/PyColorize.py b/contrib/python/ipython/py2/IPython/utils/PyColorize.py new file mode 100644 index 00000000000..124eb2d4e3c --- /dev/null +++ b/contrib/python/ipython/py2/IPython/utils/PyColorize.py @@ -0,0 +1,382 @@ +# -*- coding: utf-8 -*- +""" +Class and program to colorize python source code for ANSI terminals. + +Based on an HTML code highlighter by Jurgen Hermann found at: +http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52298 + +Modifications by Fernando Perez ([email protected]). + +Information on the original HTML highlighter follows: + +MoinMoin - Python Source Parser + +Title: Colorize Python source using the built-in tokenizer + +Submitter: Jurgen Hermann +Last Updated:2001/04/06 + +Version no:1.2 + +Description: + +This code is part of MoinMoin (http://moin.sourceforge.net/) and converts +Python source code to HTML markup, rendering comments, keywords, +operators, numeric and string literals in different colors. + +It shows how to use the built-in keyword, token and tokenize modules to +scan Python source code and re-emit it with no changes to its original +formatting (which is the hard part). +""" +from __future__ import print_function +from __future__ import absolute_import +from __future__ import unicode_literals + +__all__ = ['ANSICodeColors','Parser'] + +_scheme_default = 'Linux' + + +# Imports +import keyword +import os +import sys +import token +import tokenize + +try: +    generate_tokens = tokenize.generate_tokens +except AttributeError: +    # Python 3. Note that we use the undocumented _tokenize because it expects +    # strings, not bytes. See also Python issue #9969. +    generate_tokens = tokenize._tokenize + +from IPython.utils.coloransi import TermColors, InputTermColors ,ColorScheme, ColorSchemeTable +from IPython.utils.py3compat import PY3 + +from .colorable import Colorable + +if PY3: +    from io import StringIO +else: +    from StringIO import StringIO + +############################################################################# +### Python Source Parser (does Hilighting) +############################################################################# + +_KEYWORD = token.NT_OFFSET + 1 +_TEXT    = token.NT_OFFSET + 2 + +#**************************************************************************** +# Builtin color schemes + +Colors = TermColors  # just a shorthand + +# Build a few color schemes +NoColor = ColorScheme( +    'NoColor',{ +    'header'         : Colors.NoColor, +    token.NUMBER     : Colors.NoColor, +    token.OP         : Colors.NoColor, +    token.STRING     : Colors.NoColor, +    tokenize.COMMENT : Colors.NoColor, +    token.NAME       : Colors.NoColor, +    token.ERRORTOKEN : Colors.NoColor, + +    _KEYWORD         : Colors.NoColor, +    _TEXT            : Colors.NoColor, + +    'in_prompt'      : InputTermColors.NoColor,  # Input prompt +    'in_number'      : InputTermColors.NoColor,  # Input prompt number +    'in_prompt2'     : InputTermColors.NoColor, # Continuation prompt +    'in_normal'      : InputTermColors.NoColor,  # color off (usu. Colors.Normal) + +    'out_prompt'     : Colors.NoColor, # Output prompt +    'out_number'     : Colors.NoColor, # Output prompt number + +    'normal'         : Colors.NoColor  # color off (usu. Colors.Normal) +    }  ) + +LinuxColors = ColorScheme( +    'Linux',{ +    'header'         : Colors.LightRed, +    token.NUMBER     : Colors.LightCyan, +    token.OP         : Colors.Yellow, +    token.STRING     : Colors.LightBlue, +    tokenize.COMMENT : Colors.LightRed, +    token.NAME       : Colors.Normal, +    token.ERRORTOKEN : Colors.Red, + +    _KEYWORD         : Colors.LightGreen, +    _TEXT            : Colors.Yellow, + +    'in_prompt'      : InputTermColors.Green, +    'in_number'      : InputTermColors.LightGreen, +    'in_prompt2'     : InputTermColors.Green, +    'in_normal'      : InputTermColors.Normal,  # color off (usu. Colors.Normal) + +    'out_prompt'     : Colors.Red, +    'out_number'     : Colors.LightRed, + +    'normal'         : Colors.Normal  # color off (usu. Colors.Normal) +    } ) + +NeutralColors = ColorScheme( +    'Neutral',{ +    'header'         : Colors.Red, +    token.NUMBER     : Colors.Cyan, +    token.OP         : Colors.Blue, +    token.STRING     : Colors.Blue, +    tokenize.COMMENT : Colors.Red, +    token.NAME       : Colors.Normal, +    token.ERRORTOKEN : Colors.Red, + +    _KEYWORD         : Colors.Green, +    _TEXT            : Colors.Blue, + +    'in_prompt'      : InputTermColors.Blue, +    'in_number'      : InputTermColors.LightBlue, +    'in_prompt2'     : InputTermColors.Blue, +    'in_normal'      : InputTermColors.Normal,  # color off (usu. Colors.Normal) + +    'out_prompt'     : Colors.Red, +    'out_number'     : Colors.LightRed, + +    'normal'         : Colors.Normal  # color off (usu. Colors.Normal) +    }  ) + +# Hack: the 'neutral' colours are not very visible on a dark background on +# Windows. Since Windows command prompts have a dark background by default, and +# relatively few users are likely to alter that, we will use the 'Linux' colours, +# designed for a dark background, as the default on Windows. Changing it here +# avoids affecting the prompt colours rendered by prompt_toolkit, where the +# neutral defaults do work OK. + +if os.name == 'nt': +    NeutralColors = LinuxColors.copy(name='Neutral') + +LightBGColors = ColorScheme( +    'LightBG',{ +    'header'         : Colors.Red, +    token.NUMBER     : Colors.Cyan, +    token.OP         : Colors.Blue, +    token.STRING     : Colors.Blue, +    tokenize.COMMENT : Colors.Red, +    token.NAME       : Colors.Normal, +    token.ERRORTOKEN : Colors.Red, + + +    _KEYWORD         : Colors.Green, +    _TEXT            : Colors.Blue, + +    'in_prompt'      : InputTermColors.Blue, +    'in_number'      : InputTermColors.LightBlue, +    'in_prompt2'     : InputTermColors.Blue, +    'in_normal'      : InputTermColors.Normal,  # color off (usu. Colors.Normal) + +    'out_prompt'     : Colors.Red, +    'out_number'     : Colors.LightRed, + +    'normal'         : Colors.Normal  # color off (usu. Colors.Normal) +    }  ) + +# Build table of color schemes (needed by the parser) +ANSICodeColors = ColorSchemeTable([NoColor,LinuxColors,LightBGColors, NeutralColors], +                                  _scheme_default) + +class Parser(Colorable): +    """ Format colored Python source. +    """ + +    def __init__(self, color_table=None, out = sys.stdout, parent=None, style=None): +        """ Create a parser with a specified color table and output channel. + +        Call format() to process code. +        """ + +        super(Parser, self).__init__(parent=parent) + +        self.color_table = color_table and color_table or ANSICodeColors +        self.out = out + +    def format(self, raw, out = None, scheme = ''): +        return self.format2(raw, out, scheme)[0] + +    def format2(self, raw, out = None, scheme = ''): +        """ Parse and send the colored source. + +        If out and scheme are not specified, the defaults (given to +        constructor) are used. + +        out should be a file-type object. Optionally, out can be given as the +        string 'str' and the parser will automatically return the output in a +        string.""" + +        string_output = 0 +        if out == 'str' or self.out == 'str' or \ +           isinstance(self.out,StringIO): +            # XXX - I don't really like this state handling logic, but at this +            # point I don't want to make major changes, so adding the +            # isinstance() check is the simplest I can do to ensure correct +            # behavior. +            out_old = self.out +            self.out = StringIO() +            string_output = 1 +        elif out is not None: +            self.out = out + +        # Fast return of the unmodified input for NoColor scheme +        if scheme == 'NoColor': +            error = False +            self.out.write(raw) +            if string_output: +                return raw,error +            else: +                return None,error + +        # local shorthands +        colors = self.color_table[scheme].colors +        self.colors = colors # put in object so __call__ sees it + +        # Remove trailing whitespace and normalize tabs +        self.raw = raw.expandtabs().rstrip() + +        # store line offsets in self.lines +        self.lines = [0, 0] +        pos = 0 +        raw_find = self.raw.find +        lines_append = self.lines.append +        while 1: +            pos = raw_find('\n', pos) + 1 +            if not pos: break +            lines_append(pos) +        lines_append(len(self.raw)) + +        # parse the source and write it +        self.pos = 0 +        text = StringIO(self.raw) + +        error = False +        try: +            for atoken in generate_tokens(text.readline): +                self(*atoken) +        except tokenize.TokenError as ex: +            msg = ex.args[0] +            line = ex.args[1][0] +            self.out.write("%s\n\n*** ERROR: %s%s%s\n" % +                           (colors[token.ERRORTOKEN], +                            msg, self.raw[self.lines[line]:], +                            colors.normal) +                           ) +            error = True +        self.out.write(colors.normal+'\n') +        if string_output: +            output = self.out.getvalue() +            self.out = out_old +            return (output, error) +        return (None, error) + +    def __call__(self, toktype, toktext, start_pos, end_pos, line): +        """ Token handler, with syntax highlighting.""" +        (srow,scol) = start_pos +        (erow,ecol) = end_pos +        colors = self.colors +        owrite = self.out.write + +        # line separator, so this works across platforms +        linesep = os.linesep + +        # calculate new positions +        oldpos = self.pos +        newpos = self.lines[srow] + scol +        self.pos = newpos + len(toktext) + +        # send the original whitespace, if needed +        if newpos > oldpos: +            owrite(self.raw[oldpos:newpos]) + +        # skip indenting tokens +        if toktype in [token.INDENT, token.DEDENT]: +            self.pos = newpos +            return + +        # map token type to a color group +        if token.LPAR <= toktype <= token.OP: +            toktype = token.OP +        elif toktype == token.NAME and keyword.iskeyword(toktext): +            toktype = _KEYWORD +        color = colors.get(toktype, colors[_TEXT]) + +        #print '<%s>' % toktext,    # dbg + +        # Triple quoted strings must be handled carefully so that backtracking +        # in pagers works correctly. We need color terminators on _each_ line. +        if linesep in toktext: +            toktext = toktext.replace(linesep, '%s%s%s' % +                                      (colors.normal,linesep,color)) + +        # send text +        owrite('%s%s%s' % (color,toktext,colors.normal)) + +def main(argv=None): +    """Run as a command-line script: colorize a python file or stdin using ANSI +    color escapes and print to stdout. + +    Inputs: + +      - argv(None): a list of strings like sys.argv[1:] giving the command-line +        arguments. If None, use sys.argv[1:]. +    """ + +    usage_msg = """%prog [options] [filename] + +Colorize a python file or stdin using ANSI color escapes and print to stdout. +If no filename is given, or if filename is -, read standard input.""" + +    import optparse +    parser = optparse.OptionParser(usage=usage_msg) +    newopt = parser.add_option +    newopt('-s','--scheme',metavar='NAME',dest='scheme_name',action='store', +           choices=['Linux','LightBG','NoColor'],default=_scheme_default, +           help="give the color scheme to use. Currently only 'Linux'\ + (default) and 'LightBG' and 'NoColor' are implemented (give without\ + quotes)") + +    opts,args = parser.parse_args(argv) + +    if len(args) > 1: +        parser.error("you must give at most one filename.") + +    if len(args) == 0: +        fname = '-' # no filename given; setup to read from stdin +    else: +        fname = args[0] + +    if fname == '-': +        stream = sys.stdin +    else: +        try: +            stream = open(fname) +        except IOError as msg: +            print(msg, file=sys.stderr) +            sys.exit(1) + +    parser = Parser() + +    # we need nested try blocks because pre-2.5 python doesn't support unified +    # try-except-finally +    try: +        try: +            # write colorized version to stdout +            parser.format(stream.read(),scheme=opts.scheme_name) +        except IOError as msg: +            # if user reads through a pager and quits, don't print traceback +            if msg.args != (32,'Broken pipe'): +                raise +    finally: +        if stream is not sys.stdin: +            stream.close() # in case a non-handled exception happened above + +if __name__ == "__main__": +    main() diff --git a/contrib/python/ipython/py2/IPython/utils/__init__.py b/contrib/python/ipython/py2/IPython/utils/__init__.py new file mode 100644 index 00000000000..e69de29bb2d --- /dev/null +++ b/contrib/python/ipython/py2/IPython/utils/__init__.py diff --git a/contrib/python/ipython/py2/IPython/utils/_get_terminal_size.py b/contrib/python/ipython/py2/IPython/utils/_get_terminal_size.py new file mode 100644 index 00000000000..b2c989e7aa7 --- /dev/null +++ b/contrib/python/ipython/py2/IPython/utils/_get_terminal_size.py @@ -0,0 +1,131 @@ +# vendored version of backports.get_terminal_size as nemesapece package are a +# mess and break, especially on ubuntu. This file is under MIT Licence. +# See https://pypi.python.org/pypi/backports.shutil_get_terminal_size +# +# commit: afc5714b1545a5a3aa44cfb5e078d39165bf76ab (Feb 20, 2016) +# from +# https://github.com/chrippa/backports.shutil_get_terminal_size +# +# The MIT License (MIT) +# +# Copyright (c) 2014 Christopher Rosell +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +# THE SOFTWARE. +# +"""This is a backport of shutil.get_terminal_size from Python 3.3. + +The original implementation is in C, but here we use the ctypes and +fcntl modules to create a pure Python version of os.get_terminal_size. +""" + +import os +import struct +import sys + +from collections import namedtuple + +__all__ = ["get_terminal_size"] + + +terminal_size = namedtuple("terminal_size", "columns lines") + +try: +    from ctypes import windll, create_string_buffer, WinError + +    _handle_ids = { +        0: -10, +        1: -11, +        2: -12, +    } + +    def _get_terminal_size(fd): +        handle = windll.kernel32.GetStdHandle(_handle_ids[fd]) +        if handle == 0: +            raise OSError('handle cannot be retrieved') +        if handle == -1: +            raise WinError() +        csbi = create_string_buffer(22) +        res = windll.kernel32.GetConsoleScreenBufferInfo(handle, csbi) +        if res: +            res = struct.unpack("hhhhHhhhhhh", csbi.raw) +            left, top, right, bottom = res[5:9] +            columns = right - left + 1 +            lines = bottom - top + 1 +            return terminal_size(columns, lines) +        else: +            raise WinError() + +except ImportError: +    import fcntl +    import termios + +    def _get_terminal_size(fd): +        try: +            res = fcntl.ioctl(fd, termios.TIOCGWINSZ, b"\x00" * 4) +        except IOError as e: +            raise OSError(e) +        lines, columns = struct.unpack("hh", res) + +        return terminal_size(columns, lines) + + +def get_terminal_size(fallback=(80, 24)): +    """Get the size of the terminal window. + +    For each of the two dimensions, the environment variable, COLUMNS +    and LINES respectively, is checked. If the variable is defined and +    the value is a positive integer, it is used. + +    When COLUMNS or LINES is not defined, which is the common case, +    the terminal connected to sys.__stdout__ is queried +    by invoking os.get_terminal_size. + +    If the terminal size cannot be successfully queried, either because +    the system doesn't support querying, or because we are not +    connected to a terminal, the value given in fallback parameter +    is used. Fallback defaults to (80, 24) which is the default +    size used by many terminal emulators. + +    The value returned is a named tuple of type os.terminal_size. +    """ +    # Try the environment first +    try: +        columns = int(os.environ["COLUMNS"]) +    except (KeyError, ValueError): +        columns = 0 + +    try: +        lines = int(os.environ["LINES"]) +    except (KeyError, ValueError): +        lines = 0 + +    # Only query if necessary +    if columns <= 0 or lines <= 0: +        try: +            size = _get_terminal_size(sys.__stdout__.fileno()) +        except (NameError, OSError): +            size = terminal_size(*fallback) + +        if columns <= 0: +            columns = size.columns +        if lines <= 0: +            lines = size.lines + +    return terminal_size(columns, lines) + diff --git a/contrib/python/ipython/py2/IPython/utils/_process_cli.py b/contrib/python/ipython/py2/IPython/utils/_process_cli.py new file mode 100644 index 00000000000..a7b7b90b68e --- /dev/null +++ b/contrib/python/ipython/py2/IPython/utils/_process_cli.py @@ -0,0 +1,78 @@ +"""cli-specific implementation of process utilities. + +cli - Common Language Infrastructure for IronPython. Code +      can run on any operating system. Check os.name for os- +      specific settings. + +This file is only meant to be imported by process.py, not by end-users. + +This file is largely untested. To become a full drop-in process +interface for IronPython will probably require you to help fill +in the details.  +""" + +# Import cli libraries: +import clr +import System + +# Import Python libraries: +import os + +# Import IPython libraries: +from IPython.utils import py3compat +from ._process_common import arg_split + +def _find_cmd(cmd): +    """Find the full path to a command using which.""" +    paths = System.Environment.GetEnvironmentVariable("PATH").Split(os.pathsep) +    for path in paths: +        filename = os.path.join(path, cmd) +        if System.IO.File.Exists(filename): +            return py3compat.bytes_to_str(filename) +    raise OSError("command %r not found" % cmd) + +def system(cmd): +    """ +    system(cmd) should work in a cli environment on Mac OSX, Linux, +    and Windows +    """ +    psi = System.Diagnostics.ProcessStartInfo(cmd) +    psi.RedirectStandardOutput = True +    psi.RedirectStandardError = True +    psi.WindowStyle = System.Diagnostics.ProcessWindowStyle.Normal +    psi.UseShellExecute = False +    # Start up process: +    reg = System.Diagnostics.Process.Start(psi) + +def getoutput(cmd): +    """ +    getoutput(cmd) should work in a cli environment on Mac OSX, Linux, +    and Windows +    """ +    psi = System.Diagnostics.ProcessStartInfo(cmd) +    psi.RedirectStandardOutput = True +    psi.RedirectStandardError = True +    psi.WindowStyle = System.Diagnostics.ProcessWindowStyle.Normal +    psi.UseShellExecute = False +    # Start up process: +    reg = System.Diagnostics.Process.Start(psi) +    myOutput = reg.StandardOutput +    output = myOutput.ReadToEnd() +    myError = reg.StandardError +    error = myError.ReadToEnd() +    return output + +def check_pid(pid): +    """ +    Check if a process with the given PID (pid) exists +    """ +    try: +        System.Diagnostics.Process.GetProcessById(pid) +        # process with given pid is running +        return True +    except System.InvalidOperationException: +        # process wasn't started by this object (but is running) +        return True +    except System.ArgumentException: +        # process with given pid isn't running +        return False  diff --git a/contrib/python/ipython/py2/IPython/utils/_process_common.py b/contrib/python/ipython/py2/IPython/utils/_process_common.py new file mode 100644 index 00000000000..9ede30d3f8a --- /dev/null +++ b/contrib/python/ipython/py2/IPython/utils/_process_common.py @@ -0,0 +1,223 @@ +"""Common utilities for the various process_* implementations. + +This file is only meant to be imported by the platform-specific implementations +of subprocess utilities, and it contains tools that are common to all of them. +""" + +#----------------------------------------------------------------------------- +#  Copyright (C) 2010-2011  The IPython Development Team +# +#  Distributed under the terms of the BSD License.  The full license is in +#  the file COPYING, distributed as part of this software. +#----------------------------------------------------------------------------- + +#----------------------------------------------------------------------------- +# Imports +#----------------------------------------------------------------------------- +import subprocess +import shlex +import sys +import os + +from IPython.utils import py3compat + +#----------------------------------------------------------------------------- +# Function definitions +#----------------------------------------------------------------------------- + +def read_no_interrupt(p): +    """Read from a pipe ignoring EINTR errors. + +    This is necessary because when reading from pipes with GUI event loops +    running in the background, often interrupts are raised that stop the +    command from completing.""" +    import errno + +    try: +        return p.read() +    except IOError as err: +        if err.errno != errno.EINTR: +            raise + + +def process_handler(cmd, callback, stderr=subprocess.PIPE): +    """Open a command in a shell subprocess and execute a callback. + +    This function provides common scaffolding for creating subprocess.Popen() +    calls.  It creates a Popen object and then calls the callback with it. + +    Parameters +    ---------- +    cmd : str or list +      A command to be executed by the system, using :class:`subprocess.Popen`. +      If a string is passed, it will be run in the system shell. If a list is +      passed, it will be used directly as arguments. + +    callback : callable +      A one-argument function that will be called with the Popen object. + +    stderr : file descriptor number, optional +      By default this is set to ``subprocess.PIPE``, but you can also pass the +      value ``subprocess.STDOUT`` to force the subprocess' stderr to go into +      the same file descriptor as its stdout.  This is useful to read stdout +      and stderr combined in the order they are generated. + +    Returns +    ------- +    The return value of the provided callback is returned. +    """ +    sys.stdout.flush() +    sys.stderr.flush() +    # On win32, close_fds can't be true when using pipes for stdin/out/err +    close_fds = sys.platform != 'win32' +    # Determine if cmd should be run with system shell. +    shell = isinstance(cmd, py3compat.string_types) +    # On POSIX systems run shell commands with user-preferred shell. +    executable = None +    if shell and os.name == 'posix' and 'SHELL' in os.environ: +        executable = os.environ['SHELL'] +    p = subprocess.Popen(cmd, shell=shell, +                         executable=executable, +                         stdin=subprocess.PIPE, +                         stdout=subprocess.PIPE, +                         stderr=stderr, +                         close_fds=close_fds) + +    try: +        out = callback(p) +    except KeyboardInterrupt: +        print('^C') +        sys.stdout.flush() +        sys.stderr.flush() +        out = None +    finally: +        # Make really sure that we don't leave processes behind, in case the +        # call above raises an exception +        # We start by assuming the subprocess finished (to avoid NameErrors +        # later depending on the path taken) +        if p.returncode is None: +            try: +                p.terminate() +                p.poll() +            except OSError: +                pass +        # One last try on our way out +        if p.returncode is None: +            try: +                p.kill() +            except OSError: +                pass + +    return out + + +def getoutput(cmd): +    """Run a command and return its stdout/stderr as a string. + +    Parameters +    ---------- +    cmd : str or list +      A command to be executed in the system shell. + +    Returns +    ------- +    output : str +      A string containing the combination of stdout and stderr from the +    subprocess, in whatever order the subprocess originally wrote to its +    file descriptors (so the order of the information in this string is the +    correct order as would be seen if running the command in a terminal). +    """ +    out = process_handler(cmd, lambda p: p.communicate()[0], subprocess.STDOUT) +    if out is None: +        return '' +    return py3compat.bytes_to_str(out) + + +def getoutputerror(cmd): +    """Return (standard output, standard error) of executing cmd in a shell. + +    Accepts the same arguments as os.system(). + +    Parameters +    ---------- +    cmd : str or list +      A command to be executed in the system shell. + +    Returns +    ------- +    stdout : str +    stderr : str +    """ +    return get_output_error_code(cmd)[:2] + +def get_output_error_code(cmd): +    """Return (standard output, standard error, return code) of executing cmd +    in a shell. + +    Accepts the same arguments as os.system(). + +    Parameters +    ---------- +    cmd : str or list +      A command to be executed in the system shell. + +    Returns +    ------- +    stdout : str +    stderr : str +    returncode: int +    """ + +    out_err, p = process_handler(cmd, lambda p: (p.communicate(), p)) +    if out_err is None: +        return '', '', p.returncode +    out, err = out_err +    return py3compat.bytes_to_str(out), py3compat.bytes_to_str(err), p.returncode + +def arg_split(s, posix=False, strict=True): +    """Split a command line's arguments in a shell-like manner. + +    This is a modified version of the standard library's shlex.split() +    function, but with a default of posix=False for splitting, so that quotes +    in inputs are respected. + +    if strict=False, then any errors shlex.split would raise will result in the +    unparsed remainder being the last element of the list, rather than raising. +    This is because we sometimes use arg_split to parse things other than +    command-line args. +    """ + +    # Unfortunately, python's shlex module is buggy with unicode input: +    # http://bugs.python.org/issue1170 +    # At least encoding the input when it's unicode seems to help, but there +    # may be more problems lurking.  Apparently this is fixed in python3. +    is_unicode = False +    if (not py3compat.PY3) and isinstance(s, unicode): +        is_unicode = True +        s = s.encode('utf-8') +    lex = shlex.shlex(s, posix=posix) +    lex.whitespace_split = True +    # Extract tokens, ensuring that things like leaving open quotes +    # does not cause this to raise.  This is important, because we +    # sometimes pass Python source through this (e.g. %timeit f(" ")), +    # and it shouldn't raise an exception. +    # It may be a bad idea to parse things that are not command-line args +    # through this function, but we do, so let's be safe about it. +    lex.commenters='' #fix for GH-1269 +    tokens = [] +    while True: +        try: +            tokens.append(next(lex)) +        except StopIteration: +            break +        except ValueError: +            if strict: +                raise +            # couldn't parse, get remaining blob as last token +            tokens.append(lex.token) +            break +     +    if is_unicode: +        # Convert the tokens back to unicode. +        tokens = [x.decode('utf-8') for x in tokens] +    return tokens diff --git a/contrib/python/ipython/py2/IPython/utils/_process_posix.py b/contrib/python/ipython/py2/IPython/utils/_process_posix.py new file mode 100644 index 00000000000..ac3a9a0507f --- /dev/null +++ b/contrib/python/ipython/py2/IPython/utils/_process_posix.py @@ -0,0 +1,225 @@ +"""Posix-specific implementation of process utilities. + +This file is only meant to be imported by process.py, not by end-users. +""" + +#----------------------------------------------------------------------------- +#  Copyright (C) 2010-2011  The IPython Development Team +# +#  Distributed under the terms of the BSD License.  The full license is in +#  the file COPYING, distributed as part of this software. +#----------------------------------------------------------------------------- + +#----------------------------------------------------------------------------- +# Imports +#----------------------------------------------------------------------------- +from __future__ import print_function + +# Stdlib +import errno +import os +import subprocess as sp +import sys + +import pexpect + +# Our own +from ._process_common import getoutput, arg_split +from IPython.utils import py3compat +from IPython.utils.encoding import DEFAULT_ENCODING + +#----------------------------------------------------------------------------- +# Function definitions +#----------------------------------------------------------------------------- + +def _find_cmd(cmd): +    """Find the full path to a command using which.""" + +    path = sp.Popen(['/usr/bin/env', 'which', cmd], +                    stdout=sp.PIPE, stderr=sp.PIPE).communicate()[0] +    return py3compat.bytes_to_str(path) + + +class ProcessHandler(object): +    """Execute subprocesses under the control of pexpect. +    """ +    # Timeout in seconds to wait on each reading of the subprocess' output. +    # This should not be set too low to avoid cpu overusage from our side, +    # since we read in a loop whose period is controlled by this timeout. +    read_timeout = 0.05 + +    # Timeout to give a process if we receive SIGINT, between sending the +    # SIGINT to the process and forcefully terminating it. +    terminate_timeout = 0.2 + +    # File object where stdout and stderr of the subprocess will be written +    logfile = None + +    # Shell to call for subprocesses to execute +    _sh = None + +    @property +    def sh(self): +        if self._sh is None:         +            self._sh = pexpect.which('sh') +            if self._sh is None: +                raise OSError('"sh" shell not found') +         +        return self._sh + +    def __init__(self, logfile=None, read_timeout=None, terminate_timeout=None): +        """Arguments are used for pexpect calls.""" +        self.read_timeout = (ProcessHandler.read_timeout if read_timeout is +                             None else read_timeout) +        self.terminate_timeout = (ProcessHandler.terminate_timeout if +                                  terminate_timeout is None else +                                  terminate_timeout) +        self.logfile = sys.stdout if logfile is None else logfile + +    def getoutput(self, cmd): +        """Run a command and return its stdout/stderr as a string. + +        Parameters +        ---------- +        cmd : str +          A command to be executed in the system shell. + +        Returns +        ------- +        output : str +          A string containing the combination of stdout and stderr from the +        subprocess, in whatever order the subprocess originally wrote to its +        file descriptors (so the order of the information in this string is the +        correct order as would be seen if running the command in a terminal). +        """ +        try: +            return pexpect.run(self.sh, args=['-c', cmd]).replace('\r\n', '\n') +        except KeyboardInterrupt: +            print('^C', file=sys.stderr, end='') + +    def getoutput_pexpect(self, cmd): +        """Run a command and return its stdout/stderr as a string. + +        Parameters +        ---------- +        cmd : str +          A command to be executed in the system shell. + +        Returns +        ------- +        output : str +          A string containing the combination of stdout and stderr from the +        subprocess, in whatever order the subprocess originally wrote to its +        file descriptors (so the order of the information in this string is the +        correct order as would be seen if running the command in a terminal). +        """ +        try: +            return pexpect.run(self.sh, args=['-c', cmd]).replace('\r\n', '\n') +        except KeyboardInterrupt: +            print('^C', file=sys.stderr, end='') + +    def system(self, cmd): +        """Execute a command in a subshell. + +        Parameters +        ---------- +        cmd : str +          A command to be executed in the system shell. + +        Returns +        ------- +        int : child's exitstatus +        """ +        # Get likely encoding for the output. +        enc = DEFAULT_ENCODING +         +        # Patterns to match on the output, for pexpect.  We read input and +        # allow either a short timeout or EOF +        patterns = [pexpect.TIMEOUT, pexpect.EOF] +        # the index of the EOF pattern in the list. +        # even though we know it's 1, this call means we don't have to worry if +        # we change the above list, and forget to change this value: +        EOF_index = patterns.index(pexpect.EOF) +        # The size of the output stored so far in the process output buffer. +        # Since pexpect only appends to this buffer, each time we print we +        # record how far we've printed, so that next time we only print *new* +        # content from the buffer. +        out_size = 0 +        try: +            # Since we're not really searching the buffer for text patterns, we +            # can set pexpect's search window to be tiny and it won't matter. +            # We only search for the 'patterns' timeout or EOF, which aren't in +            # the text itself. +            #child = pexpect.spawn(pcmd, searchwindowsize=1) +            if hasattr(pexpect, 'spawnb'): +                child = pexpect.spawnb(self.sh, args=['-c', cmd]) # Pexpect-U +            else: +                child = pexpect.spawn(self.sh, args=['-c', cmd])  # Vanilla Pexpect +            flush = sys.stdout.flush +            while True: +                # res is the index of the pattern that caused the match, so we +                # know whether we've finished (if we matched EOF) or not +                res_idx = child.expect_list(patterns, self.read_timeout) +                print(child.before[out_size:].decode(enc, 'replace'), end='') +                flush() +                if res_idx==EOF_index: +                    break +                # Update the pointer to what we've already printed +                out_size = len(child.before) +        except KeyboardInterrupt: +            # We need to send ^C to the process.  The ascii code for '^C' is 3 +            # (the character is known as ETX for 'End of Text', see +            # curses.ascii.ETX). +            child.sendline(chr(3)) +            # Read and print any more output the program might produce on its +            # way out. +            try: +                out_size = len(child.before) +                child.expect_list(patterns, self.terminate_timeout) +                print(child.before[out_size:].decode(enc, 'replace'), end='') +                sys.stdout.flush() +            except KeyboardInterrupt: +                # Impatient users tend to type it multiple times +                pass +            finally: +                # Ensure the subprocess really is terminated +                child.terminate(force=True) +        # add isalive check, to ensure exitstatus is set: +        child.isalive() + +        # We follow the subprocess pattern, returning either the exit status +        # as a positive number, or the terminating signal as a negative +        # number. +        # on Linux, sh returns 128+n for signals terminating child processes on Linux +        # on BSD (OS X), the signal code is set instead +        if child.exitstatus is None: +            # on WIFSIGNALED, pexpect sets signalstatus, leaving exitstatus=None +            if child.signalstatus is None: +                # this condition may never occur, +                # but let's be certain we always return an integer. +                return 0 +            return -child.signalstatus +        if child.exitstatus > 128: +            return -(child.exitstatus - 128) +        return child.exitstatus + + +# Make system() with a functional interface for outside use.  Note that we use +# getoutput() from the _common utils, which is built on top of popen(). Using +# pexpect to get subprocess output produces difficult to parse output, since +# programs think they are talking to a tty and produce highly formatted output +# (ls is a good example) that makes them hard. +system = ProcessHandler().system + +def check_pid(pid): +    try: +        os.kill(pid, 0) +    except OSError as err: +        if err.errno == errno.ESRCH: +            return False +        elif err.errno == errno.EPERM: +            # Don't have permission to signal the process - probably means it exists +            return True +        raise +    else: +        return True diff --git a/contrib/python/ipython/py2/IPython/utils/_process_win32.py b/contrib/python/ipython/py2/IPython/utils/_process_win32.py new file mode 100644 index 00000000000..3ac59b2c299 --- /dev/null +++ b/contrib/python/ipython/py2/IPython/utils/_process_win32.py @@ -0,0 +1,192 @@ +"""Windows-specific implementation of process utilities. + +This file is only meant to be imported by process.py, not by end-users. +""" + +#----------------------------------------------------------------------------- +#  Copyright (C) 2010-2011  The IPython Development Team +# +#  Distributed under the terms of the BSD License.  The full license is in +#  the file COPYING, distributed as part of this software. +#----------------------------------------------------------------------------- + +#----------------------------------------------------------------------------- +# Imports +#----------------------------------------------------------------------------- +from __future__ import print_function + +# stdlib +import os +import sys +import ctypes + +from ctypes import c_int, POINTER +from ctypes.wintypes import LPCWSTR, HLOCAL +from subprocess import STDOUT + +# our own imports +from ._process_common import read_no_interrupt, process_handler, arg_split as py_arg_split +from . import py3compat +from .encoding import DEFAULT_ENCODING + +#----------------------------------------------------------------------------- +# Function definitions +#----------------------------------------------------------------------------- + +class AvoidUNCPath(object): +    """A context manager to protect command execution from UNC paths. + +    In the Win32 API, commands can't be invoked with the cwd being a UNC path. +    This context manager temporarily changes directory to the 'C:' drive on +    entering, and restores the original working directory on exit. + +    The context manager returns the starting working directory *if* it made a +    change and None otherwise, so that users can apply the necessary adjustment +    to their system calls in the event of a change. + +    Examples +    -------- +    :: +        cmd = 'dir' +        with AvoidUNCPath() as path: +            if path is not None: +                cmd = '"pushd %s &&"%s' % (path, cmd) +            os.system(cmd) +    """ +    def __enter__(self): +        self.path = py3compat.getcwd() +        self.is_unc_path = self.path.startswith(r"\\") +        if self.is_unc_path: +            # change to c drive (as cmd.exe cannot handle UNC addresses) +            os.chdir("C:") +            return self.path +        else: +            # We return None to signal that there was no change in the working +            # directory +            return None + +    def __exit__(self, exc_type, exc_value, traceback): +        if self.is_unc_path: +            os.chdir(self.path) + + +def _find_cmd(cmd): +    """Find the full path to a .bat or .exe using the win32api module.""" +    try: +        from win32api import SearchPath +    except ImportError: +        raise ImportError('you need to have pywin32 installed for this to work') +    else: +        PATH = os.environ['PATH'] +        extensions = ['.exe', '.com', '.bat', '.py'] +        path = None +        for ext in extensions: +            try: +                path = SearchPath(PATH, cmd, ext)[0] +            except: +                pass +        if path is None: +            raise OSError("command %r not found" % cmd) +        else: +            return path + + +def _system_body(p): +    """Callback for _system.""" +    enc = DEFAULT_ENCODING +    for line in read_no_interrupt(p.stdout).splitlines(): +        line = line.decode(enc, 'replace') +        print(line, file=sys.stdout) +    for line in read_no_interrupt(p.stderr).splitlines(): +        line = line.decode(enc, 'replace') +        print(line, file=sys.stderr) + +    # Wait to finish for returncode +    return p.wait() + + +def system(cmd): +    """Win32 version of os.system() that works with network shares. + +    Note that this implementation returns None, as meant for use in IPython. + +    Parameters +    ---------- +    cmd : str or list +      A command to be executed in the system shell. + +    Returns +    ------- +    None : we explicitly do NOT return the subprocess status code, as this +    utility is meant to be used extensively in IPython, where any return value +    would trigger :func:`sys.displayhook` calls. +    """ +    # The controller provides interactivity with both +    # stdin and stdout +    #import _process_win32_controller +    #_process_win32_controller.system(cmd) + +    with AvoidUNCPath() as path: +        if path is not None: +            cmd = '"pushd %s &&"%s' % (path, cmd) +        return process_handler(cmd, _system_body) + +def getoutput(cmd): +    """Return standard output of executing cmd in a shell. + +    Accepts the same arguments as os.system(). + +    Parameters +    ---------- +    cmd : str or list +      A command to be executed in the system shell. + +    Returns +    ------- +    stdout : str +    """ + +    with AvoidUNCPath() as path: +        if path is not None: +            cmd = '"pushd %s &&"%s' % (path, cmd) +        out = process_handler(cmd, lambda p: p.communicate()[0], STDOUT) + +    if out is None: +        out = b'' +    return py3compat.bytes_to_str(out) + +try: +    CommandLineToArgvW = ctypes.windll.shell32.CommandLineToArgvW +    CommandLineToArgvW.arg_types = [LPCWSTR, POINTER(c_int)] +    CommandLineToArgvW.restype = POINTER(LPCWSTR) +    LocalFree = ctypes.windll.kernel32.LocalFree +    LocalFree.res_type = HLOCAL +    LocalFree.arg_types = [HLOCAL] +     +    def arg_split(commandline, posix=False, strict=True): +        """Split a command line's arguments in a shell-like manner. + +        This is a special version for windows that use a ctypes call to CommandLineToArgvW +        to do the argv splitting. The posix paramter is ignored. +         +        If strict=False, process_common.arg_split(...strict=False) is used instead. +        """ +        #CommandLineToArgvW returns path to executable if called with empty string. +        if commandline.strip() == "": +            return [] +        if not strict: +            # not really a cl-arg, fallback on _process_common +            return py_arg_split(commandline, posix=posix, strict=strict) +        argvn = c_int() +        result_pointer = CommandLineToArgvW(py3compat.cast_unicode(commandline.lstrip()), ctypes.byref(argvn)) +        result_array_type = LPCWSTR * argvn.value +        result = [arg for arg in result_array_type.from_address(ctypes.addressof(result_pointer.contents))] +        retval = LocalFree(result_pointer) +        return result +except AttributeError: +    arg_split = py_arg_split + +def check_pid(pid): +    # OpenProcess returns 0 if no such process (of ours) exists +    # positive int otherwise +    return bool(ctypes.windll.kernel32.OpenProcess(1,0,pid)) diff --git a/contrib/python/ipython/py2/IPython/utils/_process_win32_controller.py b/contrib/python/ipython/py2/IPython/utils/_process_win32_controller.py new file mode 100644 index 00000000000..555eec23b38 --- /dev/null +++ b/contrib/python/ipython/py2/IPython/utils/_process_win32_controller.py @@ -0,0 +1,577 @@ +"""Windows-specific implementation of process utilities with direct WinAPI. + +This file is meant to be used by process.py +""" + +#----------------------------------------------------------------------------- +#  Copyright (C) 2010-2011  The IPython Development Team +# +#  Distributed under the terms of the BSD License.  The full license is in +#  the file COPYING, distributed as part of this software. +#----------------------------------------------------------------------------- + +from __future__ import print_function + +# stdlib +import os, sys, threading +import ctypes, msvcrt + +# local imports +from . import py3compat + +# Win32 API types needed for the API calls +from ctypes import POINTER +from ctypes.wintypes import HANDLE, HLOCAL, LPVOID, WORD, DWORD, BOOL, \ +        ULONG, LPCWSTR +LPDWORD = POINTER(DWORD) +LPHANDLE = POINTER(HANDLE) +ULONG_PTR = POINTER(ULONG) +class SECURITY_ATTRIBUTES(ctypes.Structure): +    _fields_ = [("nLength", DWORD), +                ("lpSecurityDescriptor", LPVOID), +                ("bInheritHandle", BOOL)] +LPSECURITY_ATTRIBUTES = POINTER(SECURITY_ATTRIBUTES) +class STARTUPINFO(ctypes.Structure): +    _fields_ = [("cb", DWORD), +                ("lpReserved", LPCWSTR), +                ("lpDesktop", LPCWSTR), +                ("lpTitle", LPCWSTR), +                ("dwX", DWORD), +                ("dwY", DWORD), +                ("dwXSize", DWORD), +                ("dwYSize", DWORD), +                ("dwXCountChars", DWORD), +                ("dwYCountChars", DWORD), +                ("dwFillAttribute", DWORD), +                ("dwFlags", DWORD), +                ("wShowWindow", WORD), +                ("cbReserved2", WORD), +                ("lpReserved2", LPVOID), +                ("hStdInput", HANDLE), +                ("hStdOutput", HANDLE), +                ("hStdError", HANDLE)] +LPSTARTUPINFO = POINTER(STARTUPINFO) +class PROCESS_INFORMATION(ctypes.Structure): +    _fields_ = [("hProcess", HANDLE), +                ("hThread", HANDLE), +                ("dwProcessId", DWORD), +                ("dwThreadId", DWORD)] +LPPROCESS_INFORMATION = POINTER(PROCESS_INFORMATION) + +# Win32 API constants needed +ERROR_HANDLE_EOF = 38 +ERROR_BROKEN_PIPE = 109 +ERROR_NO_DATA = 232 +HANDLE_FLAG_INHERIT = 0x0001 +STARTF_USESTDHANDLES = 0x0100 +CREATE_SUSPENDED = 0x0004 +CREATE_NEW_CONSOLE = 0x0010 +CREATE_NO_WINDOW = 0x08000000 +STILL_ACTIVE = 259 +WAIT_TIMEOUT = 0x0102 +WAIT_FAILED = 0xFFFFFFFF +INFINITE = 0xFFFFFFFF +DUPLICATE_SAME_ACCESS = 0x00000002 +ENABLE_ECHO_INPUT = 0x0004 +ENABLE_LINE_INPUT = 0x0002 +ENABLE_PROCESSED_INPUT = 0x0001 + +# Win32 API functions needed +GetLastError = ctypes.windll.kernel32.GetLastError +GetLastError.argtypes = [] +GetLastError.restype = DWORD + +CreateFile = ctypes.windll.kernel32.CreateFileW +CreateFile.argtypes = [LPCWSTR, DWORD, DWORD, LPVOID, DWORD, DWORD, HANDLE] +CreateFile.restype = HANDLE + +CreatePipe = ctypes.windll.kernel32.CreatePipe +CreatePipe.argtypes = [POINTER(HANDLE), POINTER(HANDLE), +        LPSECURITY_ATTRIBUTES, DWORD] +CreatePipe.restype = BOOL + +CreateProcess = ctypes.windll.kernel32.CreateProcessW +CreateProcess.argtypes = [LPCWSTR, LPCWSTR, LPSECURITY_ATTRIBUTES, +        LPSECURITY_ATTRIBUTES, BOOL, DWORD, LPVOID, LPCWSTR, LPSTARTUPINFO, +        LPPROCESS_INFORMATION] +CreateProcess.restype = BOOL + +GetExitCodeProcess = ctypes.windll.kernel32.GetExitCodeProcess +GetExitCodeProcess.argtypes = [HANDLE, LPDWORD] +GetExitCodeProcess.restype = BOOL + +GetCurrentProcess = ctypes.windll.kernel32.GetCurrentProcess +GetCurrentProcess.argtypes = [] +GetCurrentProcess.restype = HANDLE + +ResumeThread = ctypes.windll.kernel32.ResumeThread +ResumeThread.argtypes = [HANDLE] +ResumeThread.restype = DWORD + +ReadFile = ctypes.windll.kernel32.ReadFile +ReadFile.argtypes = [HANDLE, LPVOID, DWORD, LPDWORD, LPVOID] +ReadFile.restype = BOOL + +WriteFile = ctypes.windll.kernel32.WriteFile +WriteFile.argtypes = [HANDLE, LPVOID, DWORD, LPDWORD, LPVOID] +WriteFile.restype = BOOL + +GetConsoleMode = ctypes.windll.kernel32.GetConsoleMode +GetConsoleMode.argtypes = [HANDLE, LPDWORD] +GetConsoleMode.restype = BOOL + +SetConsoleMode = ctypes.windll.kernel32.SetConsoleMode +SetConsoleMode.argtypes = [HANDLE, DWORD] +SetConsoleMode.restype = BOOL + +FlushConsoleInputBuffer = ctypes.windll.kernel32.FlushConsoleInputBuffer +FlushConsoleInputBuffer.argtypes = [HANDLE] +FlushConsoleInputBuffer.restype = BOOL + +WaitForSingleObject = ctypes.windll.kernel32.WaitForSingleObject +WaitForSingleObject.argtypes = [HANDLE, DWORD] +WaitForSingleObject.restype = DWORD + +DuplicateHandle = ctypes.windll.kernel32.DuplicateHandle +DuplicateHandle.argtypes = [HANDLE, HANDLE, HANDLE, LPHANDLE, +        DWORD, BOOL, DWORD] +DuplicateHandle.restype = BOOL + +SetHandleInformation = ctypes.windll.kernel32.SetHandleInformation +SetHandleInformation.argtypes = [HANDLE, DWORD, DWORD] +SetHandleInformation.restype = BOOL + +CloseHandle = ctypes.windll.kernel32.CloseHandle +CloseHandle.argtypes = [HANDLE] +CloseHandle.restype = BOOL + +CommandLineToArgvW = ctypes.windll.shell32.CommandLineToArgvW +CommandLineToArgvW.argtypes = [LPCWSTR, POINTER(ctypes.c_int)] +CommandLineToArgvW.restype = POINTER(LPCWSTR) + +LocalFree = ctypes.windll.kernel32.LocalFree +LocalFree.argtypes = [HLOCAL] +LocalFree.restype = HLOCAL + +class AvoidUNCPath(object): +    """A context manager to protect command execution from UNC paths. + +    In the Win32 API, commands can't be invoked with the cwd being a UNC path. +    This context manager temporarily changes directory to the 'C:' drive on +    entering, and restores the original working directory on exit. + +    The context manager returns the starting working directory *if* it made a +    change and None otherwise, so that users can apply the necessary adjustment +    to their system calls in the event of a change. + +    Examples +    -------- +    :: +        cmd = 'dir' +        with AvoidUNCPath() as path: +            if path is not None: +                cmd = '"pushd %s &&"%s' % (path, cmd) +            os.system(cmd) +    """ +    def __enter__(self): +        self.path = py3compat.getcwd() +        self.is_unc_path = self.path.startswith(r"\\") +        if self.is_unc_path: +            # change to c drive (as cmd.exe cannot handle UNC addresses) +            os.chdir("C:") +            return self.path +        else: +            # We return None to signal that there was no change in the working +            # directory +            return None + +    def __exit__(self, exc_type, exc_value, traceback): +        if self.is_unc_path: +            os.chdir(self.path) + + +class Win32ShellCommandController(object): +    """Runs a shell command in a 'with' context. + +    This implementation is Win32-specific. + +    Example: +        # Runs the command interactively with default console stdin/stdout +        with ShellCommandController('python -i') as scc: +            scc.run() + +        # Runs the command using the provided functions for stdin/stdout +        def my_stdout_func(s): +            # print or save the string 's' +            write_to_stdout(s) +        def my_stdin_func(): +            # If input is available, return it as a string. +            if input_available(): +                return get_input() +            # If no input available, return None after a short delay to +            # keep from blocking. +            else: +                time.sleep(0.01) +                return None +       +        with ShellCommandController('python -i') as scc: +            scc.run(my_stdout_func, my_stdin_func) +    """ + +    def __init__(self, cmd, mergeout = True): +        """Initializes the shell command controller. + +        The cmd is the program to execute, and mergeout is +        whether to blend stdout and stderr into one output +        in stdout. Merging them together in this fashion more +        reliably keeps stdout and stderr in the correct order +        especially for interactive shell usage. +        """ +        self.cmd = cmd +        self.mergeout = mergeout + +    def __enter__(self): +        cmd = self.cmd +        mergeout = self.mergeout + +        self.hstdout, self.hstdin, self.hstderr = None, None, None +        self.piProcInfo = None +        try: +            p_hstdout, c_hstdout, p_hstderr, \ +                    c_hstderr, p_hstdin, c_hstdin = [None]*6 + +            # SECURITY_ATTRIBUTES with inherit handle set to True +            saAttr = SECURITY_ATTRIBUTES() +            saAttr.nLength = ctypes.sizeof(saAttr) +            saAttr.bInheritHandle = True +            saAttr.lpSecurityDescriptor = None + +            def create_pipe(uninherit): +                """Creates a Windows pipe, which consists of two handles. + +                The 'uninherit' parameter controls which handle is not +                inherited by the child process. +                """ +                handles = HANDLE(), HANDLE() +                if not CreatePipe(ctypes.byref(handles[0]), +                            ctypes.byref(handles[1]), ctypes.byref(saAttr), 0): +                    raise ctypes.WinError() +                if not SetHandleInformation(handles[uninherit], +                            HANDLE_FLAG_INHERIT, 0): +                    raise ctypes.WinError() +                return handles[0].value, handles[1].value + +            p_hstdout, c_hstdout = create_pipe(uninherit=0) +            # 'mergeout' signals that stdout and stderr should be merged. +            # We do that by using one pipe for both of them. +            if mergeout: +                c_hstderr = HANDLE() +                if not DuplicateHandle(GetCurrentProcess(), c_hstdout, +                                GetCurrentProcess(), ctypes.byref(c_hstderr), +                                0, True, DUPLICATE_SAME_ACCESS): +                    raise ctypes.WinError() +            else: +                p_hstderr, c_hstderr = create_pipe(uninherit=0) +            c_hstdin,  p_hstdin  = create_pipe(uninherit=1) + +            # Create the process object +            piProcInfo = PROCESS_INFORMATION() +            siStartInfo = STARTUPINFO() +            siStartInfo.cb = ctypes.sizeof(siStartInfo) +            siStartInfo.hStdInput = c_hstdin +            siStartInfo.hStdOutput = c_hstdout +            siStartInfo.hStdError = c_hstderr +            siStartInfo.dwFlags = STARTF_USESTDHANDLES +            dwCreationFlags = CREATE_SUSPENDED | CREATE_NO_WINDOW # | CREATE_NEW_CONSOLE + +            if not CreateProcess(None, +                    u"cmd.exe /c " + cmd, +                    None, None, True, dwCreationFlags, +                    None, None, ctypes.byref(siStartInfo), +                    ctypes.byref(piProcInfo)): +                raise ctypes.WinError() + +            # Close this process's versions of the child handles +            CloseHandle(c_hstdin) +            c_hstdin = None +            CloseHandle(c_hstdout) +            c_hstdout = None +            if c_hstderr is not None: +                CloseHandle(c_hstderr) +                c_hstderr = None + +            # Transfer ownership of the parent handles to the object +            self.hstdin = p_hstdin +            p_hstdin = None +            self.hstdout = p_hstdout +            p_hstdout = None +            if not mergeout: +                self.hstderr = p_hstderr +                p_hstderr = None +            self.piProcInfo = piProcInfo + +        finally: +            if p_hstdin: +                CloseHandle(p_hstdin) +            if c_hstdin: +                CloseHandle(c_hstdin) +            if p_hstdout: +                CloseHandle(p_hstdout) +            if c_hstdout: +                CloseHandle(c_hstdout) +            if p_hstderr: +                CloseHandle(p_hstderr) +            if c_hstderr: +                CloseHandle(c_hstderr) + +        return self + +    def _stdin_thread(self, handle, hprocess, func, stdout_func): +        exitCode = DWORD() +        bytesWritten = DWORD(0) +        while True: +            #print("stdin thread loop start") +            # Get the input string (may be bytes or unicode) +            data = func() + +            # None signals to poll whether the process has exited +            if data is None: +                #print("checking for process completion") +                if not GetExitCodeProcess(hprocess, ctypes.byref(exitCode)): +                    raise ctypes.WinError() +                if exitCode.value != STILL_ACTIVE: +                    return +                # TESTING: Does zero-sized writefile help? +                if not WriteFile(handle, "", 0, +                        ctypes.byref(bytesWritten), None): +                    raise ctypes.WinError() +                continue +            #print("\nGot str %s\n" % repr(data), file=sys.stderr) + +            # Encode the string to the console encoding +            if isinstance(data, unicode): #FIXME: Python3 +                data = data.encode('utf_8') + +            # What we have now must be a string of bytes +            if not isinstance(data, str): #FIXME: Python3 +                raise RuntimeError("internal stdin function string error") + +            # An empty string signals EOF +            if len(data) == 0: +                return + +            # In a windows console, sometimes the input is echoed, +            # but sometimes not. How do we determine when to do this? +            stdout_func(data) +            # WriteFile may not accept all the data at once. +            # Loop until everything is processed +            while len(data) != 0: +                #print("Calling writefile") +                if not WriteFile(handle, data, len(data), +                        ctypes.byref(bytesWritten), None): +                    # This occurs at exit +                    if GetLastError() == ERROR_NO_DATA: +                        return +                    raise ctypes.WinError() +                #print("Called writefile") +                data = data[bytesWritten.value:] + +    def _stdout_thread(self, handle, func): +        # Allocate the output buffer +        data = ctypes.create_string_buffer(4096) +        while True: +            bytesRead = DWORD(0) +            if not ReadFile(handle, data, 4096, +                        ctypes.byref(bytesRead), None): +                le = GetLastError() +                if le == ERROR_BROKEN_PIPE: +                    return +                else: +                    raise ctypes.WinError() +            # FIXME: Python3 +            s = data.value[0:bytesRead.value] +            #print("\nv: %s" % repr(s), file=sys.stderr) +            func(s.decode('utf_8', 'replace')) + +    def run(self, stdout_func = None, stdin_func = None, stderr_func = None): +        """Runs the process, using the provided functions for I/O. + +        The function stdin_func should return strings whenever a +        character or characters become available. +        The functions stdout_func and stderr_func are called whenever +        something is printed to stdout or stderr, respectively. +        These functions are called from different threads (but not +        concurrently, because of the GIL). +        """ +        if stdout_func is None and stdin_func is None and stderr_func is None: +            return self._run_stdio() + +        if stderr_func is not None and self.mergeout: +            raise RuntimeError("Shell command was initiated with " +                    "merged stdin/stdout, but a separate stderr_func " +                    "was provided to the run() method") + +        # Create a thread for each input/output handle +        stdin_thread = None +        threads = [] +        if stdin_func: +            stdin_thread = threading.Thread(target=self._stdin_thread, +                                args=(self.hstdin, self.piProcInfo.hProcess, +                                stdin_func, stdout_func)) +        threads.append(threading.Thread(target=self._stdout_thread, +                                    args=(self.hstdout, stdout_func))) +        if not self.mergeout: +            if stderr_func is None: +                stderr_func = stdout_func +            threads.append(threading.Thread(target=self._stdout_thread, +                                        args=(self.hstderr, stderr_func))) +        # Start the I/O threads and the process +        if ResumeThread(self.piProcInfo.hThread) == 0xFFFFFFFF: +            raise ctypes.WinError() +        if stdin_thread is not None: +            stdin_thread.start() +        for thread in threads: +            thread.start() +        # Wait for the process to complete +        if WaitForSingleObject(self.piProcInfo.hProcess, INFINITE) == \ +                    WAIT_FAILED: +            raise ctypes.WinError() +        # Wait for the I/O threads to complete +        for thread in threads: +            thread.join() + +        # Wait for the stdin thread to complete +        if stdin_thread is not None: +            stdin_thread.join() + +    def _stdin_raw_nonblock(self): +        """Use the raw Win32 handle of sys.stdin to do non-blocking reads""" +        # WARNING: This is experimental, and produces inconsistent results. +        #          It's possible for the handle not to be appropriate for use +        #          with WaitForSingleObject, among other things. +        handle = msvcrt.get_osfhandle(sys.stdin.fileno()) +        result = WaitForSingleObject(handle, 100) +        if result == WAIT_FAILED: +            raise ctypes.WinError() +        elif result == WAIT_TIMEOUT: +            print(".", end='') +            return None +        else: +            data = ctypes.create_string_buffer(256) +            bytesRead = DWORD(0) +            print('?', end='') + +            if not ReadFile(handle, data, 256, +                        ctypes.byref(bytesRead), None): +                raise ctypes.WinError() +            # This ensures the non-blocking works with an actual console +            # Not checking the error, so the processing will still work with +            # other handle types +            FlushConsoleInputBuffer(handle) + +            data = data.value +            data = data.replace('\r\n', '\n') +            data = data.replace('\r', '\n') +            print(repr(data) + " ", end='') +            return data + +    def _stdin_raw_block(self): +        """Use a blocking stdin read""" +        # The big problem with the blocking read is that it doesn't +        # exit when it's supposed to in all contexts. An extra +        # key-press may be required to trigger the exit. +        try: +            data = sys.stdin.read(1) +            data = data.replace('\r', '\n') +            return data +        except WindowsError as we: +            if we.winerror == ERROR_NO_DATA: +                # This error occurs when the pipe is closed +                return None +            else: +                # Otherwise let the error propagate +                raise we + +    def _stdout_raw(self, s): +        """Writes the string to stdout""" +        print(s, end='', file=sys.stdout) +        sys.stdout.flush() + +    def _stderr_raw(self, s): +        """Writes the string to stdout""" +        print(s, end='', file=sys.stderr) +        sys.stderr.flush() + +    def _run_stdio(self): +        """Runs the process using the system standard I/O. + +        IMPORTANT: stdin needs to be asynchronous, so the Python +                   sys.stdin object is not used. Instead, +                   msvcrt.kbhit/getwch are used asynchronously. +        """ +        # Disable Line and Echo mode +        #lpMode = DWORD() +        #handle = msvcrt.get_osfhandle(sys.stdin.fileno()) +        #if GetConsoleMode(handle, ctypes.byref(lpMode)): +        #    set_console_mode = True +        #    if not SetConsoleMode(handle, lpMode.value & +        #            ~(ENABLE_ECHO_INPUT | ENABLE_LINE_INPUT | ENABLE_PROCESSED_INPUT)): +        #        raise ctypes.WinError() + +        if self.mergeout: +            return self.run(stdout_func = self._stdout_raw, +                    stdin_func = self._stdin_raw_block) +        else: +            return self.run(stdout_func = self._stdout_raw, +                    stdin_func = self._stdin_raw_block, +                    stderr_func = self._stderr_raw) + +        # Restore the previous console mode +        #if set_console_mode: +        #    if not SetConsoleMode(handle, lpMode.value): +        #        raise ctypes.WinError() + +    def __exit__(self, exc_type, exc_value, traceback): +        if self.hstdin: +            CloseHandle(self.hstdin) +            self.hstdin = None +        if self.hstdout: +            CloseHandle(self.hstdout) +            self.hstdout = None +        if self.hstderr: +            CloseHandle(self.hstderr) +            self.hstderr = None +        if self.piProcInfo is not None: +            CloseHandle(self.piProcInfo.hProcess) +            CloseHandle(self.piProcInfo.hThread) +            self.piProcInfo = None + + +def system(cmd): +    """Win32 version of os.system() that works with network shares. + +    Note that this implementation returns None, as meant for use in IPython. + +    Parameters +    ---------- +    cmd : str +      A command to be executed in the system shell. + +    Returns +    ------- +    None : we explicitly do NOT return the subprocess status code, as this +    utility is meant to be used extensively in IPython, where any return value +    would trigger :func:`sys.displayhook` calls. +    """ +    with AvoidUNCPath() as path: +        if path is not None: +            cmd = '"pushd %s &&"%s' % (path, cmd) +        with Win32ShellCommandController(cmd) as scc: +            scc.run() + + +if __name__ == "__main__": +    print("Test starting!") +    #system("cmd") +    system("python -i") +    print("Test finished!") diff --git a/contrib/python/ipython/py2/IPython/utils/_signatures.py b/contrib/python/ipython/py2/IPython/utils/_signatures.py new file mode 100644 index 00000000000..20f52b98ed9 --- /dev/null +++ b/contrib/python/ipython/py2/IPython/utils/_signatures.py @@ -0,0 +1,818 @@ +"""Function signature objects for callables. + +Back port of Python 3.3's function signature tools from the inspect module, +modified to be compatible with Python 2.7 and 3.2+. +""" + +#----------------------------------------------------------------------------- +#  Python 3.3 stdlib inspect.py is public domain +#  +#  Backports Copyright (C) 2013 Aaron Iles +#  Used under Apache License Version 2.0 +# +#  Further Changes are Copyright (C) 2013 The IPython Development Team +# +#  Distributed under the terms of the BSD License.  The full license is in +#  the file COPYING, distributed as part of this software. +#----------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function +import itertools +import functools +import re +import types +import inspect + + +# patch for single-file +# we don't support 2.6, so we can just import OrderedDict +from collections import OrderedDict + +__version__ = '0.3' +# end patch + +__all__ = ['BoundArguments', 'Parameter', 'Signature', 'signature'] + + +_WrapperDescriptor = type(type.__call__) +_MethodWrapper = type(all.__call__) + +_NonUserDefinedCallables = (_WrapperDescriptor, +                            _MethodWrapper, +                            types.BuiltinFunctionType) + + +def formatannotation(annotation, base_module=None): +    if isinstance(annotation, type): +        if annotation.__module__ in ('builtins', '__builtin__', base_module): +            return annotation.__name__ +        return annotation.__module__+'.'+annotation.__name__ +    return repr(annotation) + + +def _get_user_defined_method(cls, method_name, *nested): +    try: +        if cls is type: +            return +        meth = getattr(cls, method_name) +        for name in nested: +            meth = getattr(meth, name, meth) +    except AttributeError: +        return +    else: +        if not isinstance(meth, _NonUserDefinedCallables): +            # Once '__signature__' will be added to 'C'-level +            # callables, this check won't be necessary +            return meth + + +def signature(obj): +    '''Get a signature object for the passed callable.''' + +    if not callable(obj): +        raise TypeError('{0!r} is not a callable object'.format(obj)) + +    if inspect.ismethod(obj): +        if obj.__self__ is None: +            # Unbound method - treat it as a function (no distinction in Py 3) +            obj = obj.__func__ +        else: +            # Bound method: trim off the first parameter (typically self or cls) +            sig = signature(obj.__func__) +            return sig.replace(parameters=tuple(sig.parameters.values())[1:]) + +    try: +        sig = obj.__signature__ +    except AttributeError: +        pass +    else: +        if sig is not None: +            return sig + +    try: +        # Was this function wrapped by a decorator? +        wrapped = obj.__wrapped__ +    except AttributeError: +        pass +    else: +        return signature(wrapped) + +    if inspect.isfunction(obj): +        return Signature.from_function(obj) + +    if isinstance(obj, functools.partial): +        sig = signature(obj.func) + +        new_params = OrderedDict(sig.parameters.items()) + +        partial_args = obj.args or () +        partial_keywords = obj.keywords or {} +        try: +            ba = sig.bind_partial(*partial_args, **partial_keywords) +        except TypeError as ex: +            msg = 'partial object {0!r} has incorrect arguments'.format(obj) +            raise ValueError(msg) + +        for arg_name, arg_value in ba.arguments.items(): +            param = new_params[arg_name] +            if arg_name in partial_keywords: +                # We set a new default value, because the following code +                # is correct: +                # +                #   >>> def foo(a): print(a) +                #   >>> print(partial(partial(foo, a=10), a=20)()) +                #   20 +                #   >>> print(partial(partial(foo, a=10), a=20)(a=30)) +                #   30 +                # +                # So, with 'partial' objects, passing a keyword argument is +                # like setting a new default value for the corresponding +                # parameter +                # +                # We also mark this parameter with '_partial_kwarg' +                # flag.  Later, in '_bind', the 'default' value of this +                # parameter will be added to 'kwargs', to simulate +                # the 'functools.partial' real call. +                new_params[arg_name] = param.replace(default=arg_value, +                                                     _partial_kwarg=True) + +            elif (param.kind not in (_VAR_KEYWORD, _VAR_POSITIONAL) and +                            not param._partial_kwarg): +                new_params.pop(arg_name) + +        return sig.replace(parameters=new_params.values()) + +    sig = None +    if isinstance(obj, type): +        # obj is a class or a metaclass + +        # First, let's see if it has an overloaded __call__ defined +        # in its metaclass +        call = _get_user_defined_method(type(obj), '__call__') +        if call is not None: +            sig = signature(call) +        else: +            # Now we check if the 'obj' class has a '__new__' method +            new = _get_user_defined_method(obj, '__new__') +            if new is not None: +                sig = signature(new) +            else: +                # Finally, we should have at least __init__ implemented +                init = _get_user_defined_method(obj, '__init__') +                if init is not None: +                    sig = signature(init) +    elif not isinstance(obj, _NonUserDefinedCallables): +        # An object with __call__ +        # We also check that the 'obj' is not an instance of +        # _WrapperDescriptor or _MethodWrapper to avoid +        # infinite recursion (and even potential segfault) +        call = _get_user_defined_method(type(obj), '__call__', 'im_func') +        if call is not None: +            sig = signature(call) + +    if sig is not None: +        return sig + +    if isinstance(obj, types.BuiltinFunctionType): +        # Raise a nicer error message for builtins +        msg = 'no signature found for builtin function {0!r}'.format(obj) +        raise ValueError(msg) + +    raise ValueError('callable {0!r} is not supported by signature'.format(obj)) + + +class _void(object): +    '''A private marker - used in Parameter & Signature''' + + +class _empty(object): +    pass + + +class _ParameterKind(int): +    def __new__(self, *args, **kwargs): +        obj = int.__new__(self, *args) +        obj._name = kwargs['name'] +        return obj + +    def __str__(self): +        return self._name + +    def __repr__(self): +        return '<_ParameterKind: {0!r}>'.format(self._name) + + +_POSITIONAL_ONLY        = _ParameterKind(0, name='POSITIONAL_ONLY') +_POSITIONAL_OR_KEYWORD  = _ParameterKind(1, name='POSITIONAL_OR_KEYWORD') +_VAR_POSITIONAL         = _ParameterKind(2, name='VAR_POSITIONAL') +_KEYWORD_ONLY           = _ParameterKind(3, name='KEYWORD_ONLY') +_VAR_KEYWORD            = _ParameterKind(4, name='VAR_KEYWORD') + + +class Parameter(object): +    '''Represents a parameter in a function signature. + +    Has the following public attributes: + +    * name : str +        The name of the parameter as a string. +    * default : object +        The default value for the parameter if specified.  If the +        parameter has no default value, this attribute is not set. +    * annotation +        The annotation for the parameter if specified.  If the +        parameter has no annotation, this attribute is not set. +    * kind : str +        Describes how argument values are bound to the parameter. +        Possible values: `Parameter.POSITIONAL_ONLY`, +        `Parameter.POSITIONAL_OR_KEYWORD`, `Parameter.VAR_POSITIONAL`, +        `Parameter.KEYWORD_ONLY`, `Parameter.VAR_KEYWORD`. +    ''' + +    __slots__ = ('_name', '_kind', '_default', '_annotation', '_partial_kwarg') + +    POSITIONAL_ONLY         = _POSITIONAL_ONLY +    POSITIONAL_OR_KEYWORD   = _POSITIONAL_OR_KEYWORD +    VAR_POSITIONAL          = _VAR_POSITIONAL +    KEYWORD_ONLY            = _KEYWORD_ONLY +    VAR_KEYWORD             = _VAR_KEYWORD + +    empty = _empty + +    def __init__(self, name, kind, default=_empty, annotation=_empty, +                 _partial_kwarg=False): + +        if kind not in (_POSITIONAL_ONLY, _POSITIONAL_OR_KEYWORD, +                        _VAR_POSITIONAL, _KEYWORD_ONLY, _VAR_KEYWORD): +            raise ValueError("invalid value for 'Parameter.kind' attribute") +        self._kind = kind + +        if default is not _empty: +            if kind in (_VAR_POSITIONAL, _VAR_KEYWORD): +                msg = '{0} parameters cannot have default values'.format(kind) +                raise ValueError(msg) +        self._default = default +        self._annotation = annotation + +        if name is None: +            if kind != _POSITIONAL_ONLY: +                raise ValueError("None is not a valid name for a " +                                 "non-positional-only parameter") +            self._name = name +        else: +            name = str(name) +            if kind != _POSITIONAL_ONLY and not re.match(r'[a-z_]\w*$', name, re.I): +                msg = '{0!r} is not a valid parameter name'.format(name) +                raise ValueError(msg) +            self._name = name + +        self._partial_kwarg = _partial_kwarg + +    @property +    def name(self): +        return self._name + +    @property +    def default(self): +        return self._default + +    @property +    def annotation(self): +        return self._annotation + +    @property +    def kind(self): +        return self._kind + +    def replace(self, name=_void, kind=_void, annotation=_void, +                default=_void, _partial_kwarg=_void): +        '''Creates a customized copy of the Parameter.''' + +        if name is _void: +            name = self._name + +        if kind is _void: +            kind = self._kind + +        if annotation is _void: +            annotation = self._annotation + +        if default is _void: +            default = self._default + +        if _partial_kwarg is _void: +            _partial_kwarg = self._partial_kwarg + +        return type(self)(name, kind, default=default, annotation=annotation, +                          _partial_kwarg=_partial_kwarg) + +    def __str__(self): +        kind = self.kind + +        formatted = self._name +        if kind == _POSITIONAL_ONLY: +            if formatted is None: +                formatted = '' +            formatted = '<{0}>'.format(formatted) + +        # Add annotation and default value +        if self._annotation is not _empty: +            formatted = '{0}:{1}'.format(formatted, +                                       formatannotation(self._annotation)) + +        if self._default is not _empty: +            formatted = '{0}={1}'.format(formatted, repr(self._default)) + +        if kind == _VAR_POSITIONAL: +            formatted = '*' + formatted +        elif kind == _VAR_KEYWORD: +            formatted = '**' + formatted + +        return formatted + +    def __repr__(self): +        return '<{0} at {1:#x} {2!r}>'.format(self.__class__.__name__, +                                           id(self), self.name) + +    def __hash__(self): +        msg = "unhashable type: '{0}'".format(self.__class__.__name__) +        raise TypeError(msg) + +    def __eq__(self, other): +        return (issubclass(other.__class__, Parameter) and +                self._name == other._name and +                self._kind == other._kind and +                self._default == other._default and +                self._annotation == other._annotation) + +    def __ne__(self, other): +        return not self.__eq__(other) + + +class BoundArguments(object): +    '''Result of :meth:`Signature.bind` call.  Holds the mapping of arguments +    to the function's parameters. + +    Has the following public attributes: + +    arguments : :class:`collections.OrderedDict` +      An ordered mutable mapping of parameters' names to arguments' values. +      Does not contain arguments' default values. +    signature : :class:`Signature` +      The Signature object that created this instance. +    args : tuple +      Tuple of positional arguments values. +    kwargs : dict +      Dict of keyword arguments values. +    ''' + +    def __init__(self, signature, arguments): +        self.arguments = arguments +        self._signature = signature + +    @property +    def signature(self): +        return self._signature + +    @property +    def args(self): +        args = [] +        for param_name, param in self._signature.parameters.items(): +            if (param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY) or +                                                    param._partial_kwarg): +                # Keyword arguments mapped by 'functools.partial' +                # (Parameter._partial_kwarg is True) are mapped +                # in 'BoundArguments.kwargs', along with VAR_KEYWORD & +                # KEYWORD_ONLY +                break + +            try: +                arg = self.arguments[param_name] +            except KeyError: +                # We're done here. Other arguments +                # will be mapped in 'BoundArguments.kwargs' +                break +            else: +                if param.kind == _VAR_POSITIONAL: +                    # *args +                    args.extend(arg) +                else: +                    # plain argument +                    args.append(arg) + +        return tuple(args) + +    @property +    def kwargs(self): +        kwargs = {} +        kwargs_started = False +        for param_name, param in self._signature.parameters.items(): +            if not kwargs_started: +                if (param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY) or +                                                param._partial_kwarg): +                    kwargs_started = True +                else: +                    if param_name not in self.arguments: +                        kwargs_started = True +                        continue + +            if not kwargs_started: +                continue + +            try: +                arg = self.arguments[param_name] +            except KeyError: +                pass +            else: +                if param.kind == _VAR_KEYWORD: +                    # **kwargs +                    kwargs.update(arg) +                else: +                    # plain keyword argument +                    kwargs[param_name] = arg + +        return kwargs + +    def __hash__(self): +        msg = "unhashable type: '{0}'".format(self.__class__.__name__) +        raise TypeError(msg) + +    def __eq__(self, other): +        return (issubclass(other.__class__, BoundArguments) and +                self.signature == other.signature and +                self.arguments == other.arguments) + +    def __ne__(self, other): +        return not self.__eq__(other) + + +class Signature(object): +    '''A Signature object represents the overall signature of a function. +    It stores a Parameter object for each parameter accepted by the +    function, as well as information specific to the function itself. + +    A Signature object has the following public attributes: + +    parameters : :class:`collections.OrderedDict` +      An ordered mapping of parameters' names to the corresponding +      Parameter objects (keyword-only arguments are in the same order +      as listed in `code.co_varnames`). +    return_annotation +      The annotation for the return type of the function if specified. +      If the function has no annotation for its return type, this +      attribute is not set. +    ''' + +    __slots__ = ('_return_annotation', '_parameters') + +    _parameter_cls = Parameter +    _bound_arguments_cls = BoundArguments + +    empty = _empty + +    def __init__(self, parameters=None, return_annotation=_empty, +                 __validate_parameters__=True): +        '''Constructs Signature from the given list of Parameter +        objects and 'return_annotation'.  All arguments are optional. +        ''' + +        if parameters is None: +            params = OrderedDict() +        else: +            if __validate_parameters__: +                params = OrderedDict() +                top_kind = _POSITIONAL_ONLY + +                for idx, param in enumerate(parameters): +                    kind = param.kind +                    if kind < top_kind: +                        msg = 'wrong parameter order: {0} before {1}' +                        msg = msg.format(top_kind, param.kind) +                        raise ValueError(msg) +                    else: +                        top_kind = kind + +                    name = param.name +                    if name is None: +                        name = str(idx) +                        param = param.replace(name=name) + +                    if name in params: +                        msg = 'duplicate parameter name: {0!r}'.format(name) +                        raise ValueError(msg) +                    params[name] = param +            else: +                params = OrderedDict(((param.name, param) +                                                for param in parameters)) + +        self._parameters = params +        self._return_annotation = return_annotation + +    @classmethod +    def from_function(cls, func): +        '''Constructs Signature for the given python function''' + +        if not inspect.isfunction(func): +            raise TypeError('{0!r} is not a Python function'.format(func)) + +        Parameter = cls._parameter_cls + +        # Parameter information. +        func_code = func.__code__ +        pos_count = func_code.co_argcount +        arg_names = func_code.co_varnames +        positional = tuple(arg_names[:pos_count]) +        keyword_only_count = getattr(func_code, 'co_kwonlyargcount', 0) +        keyword_only = arg_names[pos_count:(pos_count + keyword_only_count)] +        annotations = getattr(func, '__annotations__', {}) +        defaults = func.__defaults__ +        kwdefaults = getattr(func, '__kwdefaults__', None) + +        if defaults: +            pos_default_count = len(defaults) +        else: +            pos_default_count = 0 + +        parameters = [] + +        # Non-keyword-only parameters w/o defaults. +        non_default_count = pos_count - pos_default_count +        for name in positional[:non_default_count]: +            annotation = annotations.get(name, _empty) +            parameters.append(Parameter(name, annotation=annotation, +                                        kind=_POSITIONAL_OR_KEYWORD)) + +        # ... w/ defaults. +        for offset, name in enumerate(positional[non_default_count:]): +            annotation = annotations.get(name, _empty) +            parameters.append(Parameter(name, annotation=annotation, +                                        kind=_POSITIONAL_OR_KEYWORD, +                                        default=defaults[offset])) + +        # *args +        if func_code.co_flags & 0x04: +            name = arg_names[pos_count + keyword_only_count] +            annotation = annotations.get(name, _empty) +            parameters.append(Parameter(name, annotation=annotation, +                                        kind=_VAR_POSITIONAL)) + +        # Keyword-only parameters. +        for name in keyword_only: +            default = _empty +            if kwdefaults is not None: +                default = kwdefaults.get(name, _empty) + +            annotation = annotations.get(name, _empty) +            parameters.append(Parameter(name, annotation=annotation, +                                        kind=_KEYWORD_ONLY, +                                        default=default)) +        # **kwargs +        if func_code.co_flags & 0x08: +            index = pos_count + keyword_only_count +            if func_code.co_flags & 0x04: +                index += 1 + +            name = arg_names[index] +            annotation = annotations.get(name, _empty) +            parameters.append(Parameter(name, annotation=annotation, +                                        kind=_VAR_KEYWORD)) + +        return cls(parameters, +                   return_annotation=annotations.get('return', _empty), +                   __validate_parameters__=False) + +    @property +    def parameters(self): +        try: +            return types.MappingProxyType(self._parameters) +        except AttributeError: +            return OrderedDict(self._parameters.items()) + +    @property +    def return_annotation(self): +        return self._return_annotation + +    def replace(self, parameters=_void, return_annotation=_void): +        '''Creates a customized copy of the Signature. +        Pass 'parameters' and/or 'return_annotation' arguments +        to override them in the new copy. +        ''' + +        if parameters is _void: +            parameters = self.parameters.values() + +        if return_annotation is _void: +            return_annotation = self._return_annotation + +        return type(self)(parameters, +                          return_annotation=return_annotation) + +    def __hash__(self): +        msg = "unhashable type: '{0}'".format(self.__class__.__name__) +        raise TypeError(msg) + +    def __eq__(self, other): +        if (not issubclass(type(other), Signature) or +                    self.return_annotation != other.return_annotation or +                    len(self.parameters) != len(other.parameters)): +            return False + +        other_positions = dict((param, idx) +                           for idx, param in enumerate(other.parameters.keys())) + +        for idx, (param_name, param) in enumerate(self.parameters.items()): +            if param.kind == _KEYWORD_ONLY: +                try: +                    other_param = other.parameters[param_name] +                except KeyError: +                    return False +                else: +                    if param != other_param: +                        return False +            else: +                try: +                    other_idx = other_positions[param_name] +                except KeyError: +                    return False +                else: +                    if (idx != other_idx or +                                    param != other.parameters[param_name]): +                        return False + +        return True + +    def __ne__(self, other): +        return not self.__eq__(other) + +    def _bind(self, args, kwargs, partial=False): +        '''Private method.  Don't use directly.''' + +        arguments = OrderedDict() + +        parameters = iter(self.parameters.values()) +        parameters_ex = () +        arg_vals = iter(args) + +        if partial: +            # Support for binding arguments to 'functools.partial' objects. +            # See 'functools.partial' case in 'signature()' implementation +            # for details. +            for param_name, param in self.parameters.items(): +                if (param._partial_kwarg and param_name not in kwargs): +                    # Simulating 'functools.partial' behavior +                    kwargs[param_name] = param.default + +        while True: +            # Let's iterate through the positional arguments and corresponding +            # parameters +            try: +                arg_val = next(arg_vals) +            except StopIteration: +                # No more positional arguments +                try: +                    param = next(parameters) +                except StopIteration: +                    # No more parameters. That's it. Just need to check that +                    # we have no `kwargs` after this while loop +                    break +                else: +                    if param.kind == _VAR_POSITIONAL: +                        # That's OK, just empty *args.  Let's start parsing +                        # kwargs +                        break +                    elif param.name in kwargs: +                        if param.kind == _POSITIONAL_ONLY: +                            msg = '{arg!r} parameter is positional only, ' \ +                                  'but was passed as a keyword' +                            msg = msg.format(arg=param.name) +                            raise TypeError(msg) +                        parameters_ex = (param,) +                        break +                    elif (param.kind == _VAR_KEYWORD or +                                                param.default is not _empty): +                        # That's fine too - we have a default value for this +                        # parameter.  So, lets start parsing `kwargs`, starting +                        # with the current parameter +                        parameters_ex = (param,) +                        break +                    else: +                        if partial: +                            parameters_ex = (param,) +                            break +                        else: +                            msg = '{arg!r} parameter lacking default value' +                            msg = msg.format(arg=param.name) +                            raise TypeError(msg) +            else: +                # We have a positional argument to process +                try: +                    param = next(parameters) +                except StopIteration: +                    raise TypeError('too many positional arguments') +                else: +                    if param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY): +                        # Looks like we have no parameter for this positional +                        # argument +                        raise TypeError('too many positional arguments') + +                    if param.kind == _VAR_POSITIONAL: +                        # We have an '*args'-like argument, let's fill it with +                        # all positional arguments we have left and move on to +                        # the next phase +                        values = [arg_val] +                        values.extend(arg_vals) +                        arguments[param.name] = tuple(values) +                        break + +                    if param.name in kwargs: +                        raise TypeError('multiple values for argument ' +                                        '{arg!r}'.format(arg=param.name)) + +                    arguments[param.name] = arg_val + +        # Now, we iterate through the remaining parameters to process +        # keyword arguments +        kwargs_param = None +        for param in itertools.chain(parameters_ex, parameters): +            if param.kind == _POSITIONAL_ONLY: +                # This should never happen in case of a properly built +                # Signature object (but let's have this check here +                # to ensure correct behaviour just in case) +                raise TypeError('{arg!r} parameter is positional only, ' +                                'but was passed as a keyword'. \ +                                format(arg=param.name)) + +            if param.kind == _VAR_KEYWORD: +                # Memorize that we have a '**kwargs'-like parameter +                kwargs_param = param +                continue + +            param_name = param.name +            try: +                arg_val = kwargs.pop(param_name) +            except KeyError: +                # We have no value for this parameter.  It's fine though, +                # if it has a default value, or it is an '*args'-like +                # parameter, left alone by the processing of positional +                # arguments. +                if (not partial and param.kind != _VAR_POSITIONAL and +                                                    param.default is _empty): +                    raise TypeError('{arg!r} parameter lacking default value'. \ +                                    format(arg=param_name)) + +            else: +                arguments[param_name] = arg_val + +        if kwargs: +            if kwargs_param is not None: +                # Process our '**kwargs'-like parameter +                arguments[kwargs_param.name] = kwargs +            else: +                raise TypeError('too many keyword arguments') + +        return self._bound_arguments_cls(self, arguments) + +    def bind(self, *args, **kwargs): +        '''Get a :class:`BoundArguments` object, that maps the passed `args` +        and `kwargs` to the function's signature.  Raises :exc:`TypeError` +        if the passed arguments can not be bound. +        ''' +        return self._bind(args, kwargs) + +    def bind_partial(self, *args, **kwargs): +        '''Get a :class:`BoundArguments` object, that partially maps the +        passed `args` and `kwargs` to the function's signature. +        Raises :exc:`TypeError` if the passed arguments can not be bound. +        ''' +        return self._bind(args, kwargs, partial=True) + +    def __str__(self): +        result = [] +        render_kw_only_separator = True +        for idx, param in enumerate(self.parameters.values()): +            formatted = str(param) + +            kind = param.kind +            if kind == _VAR_POSITIONAL: +                # OK, we have an '*args'-like parameter, so we won't need +                # a '*' to separate keyword-only arguments +                render_kw_only_separator = False +            elif kind == _KEYWORD_ONLY and render_kw_only_separator: +                # We have a keyword-only parameter to render and we haven't +                # rendered an '*args'-like parameter before, so add a '*' +                # separator to the parameters list ("foo(arg1, *, arg2)" case) +                result.append('*') +                # This condition should be only triggered once, so +                # reset the flag +                render_kw_only_separator = False + +            result.append(formatted) + +        rendered = '({0})'.format(', '.join(result)) + +        if self.return_annotation is not _empty: +            anno = formatannotation(self.return_annotation) +            rendered += ' -> {0}'.format(anno) + +        return rendered + diff --git a/contrib/python/ipython/py2/IPython/utils/_sysinfo.py b/contrib/python/ipython/py2/IPython/utils/_sysinfo.py new file mode 100644 index 00000000000..21dd2fccebb --- /dev/null +++ b/contrib/python/ipython/py2/IPython/utils/_sysinfo.py @@ -0,0 +1,2 @@ +# GENERATED BY setup.py +commit = u"2348ebbe4" diff --git a/contrib/python/ipython/py2/IPython/utils/_tokenize_py2.py b/contrib/python/ipython/py2/IPython/utils/_tokenize_py2.py new file mode 100644 index 00000000000..195df96ee50 --- /dev/null +++ b/contrib/python/ipython/py2/IPython/utils/_tokenize_py2.py @@ -0,0 +1,439 @@ +"""Patched version of standard library tokenize, to deal with various bugs. + +Patches + +- Relevant parts of Gareth Rees' patch for Python issue #12691 (untokenizing), +  manually applied. +- Newlines in comments and blank lines should be either NL or NEWLINE, depending +  on whether they are in a multi-line statement. Filed as Python issue #17061. + +------------------------------------------------------------------------------- +Tokenization help for Python programs. + +generate_tokens(readline) is a generator that breaks a stream of +text into Python tokens.  It accepts a readline-like method which is called +repeatedly to get the next line of input (or "" for EOF).  It generates +5-tuples with these members: + +    the token type (see token.py) +    the token (a string) +    the starting (row, column) indices of the token (a 2-tuple of ints) +    the ending (row, column) indices of the token (a 2-tuple of ints) +    the original line (string) + +It is designed to match the working of the Python tokenizer exactly, except +that it produces COMMENT tokens for comments and gives type OP for all +operators + +Older entry points +    tokenize_loop(readline, tokeneater) +    tokenize(readline, tokeneater=printtoken) +are the same, except instead of generating tokens, tokeneater is a callback +function to which the 5 fields described above are passed as 5 arguments, +each time a new token is found.""" +from __future__ import print_function + +__author__ = 'Ka-Ping Yee <[email protected]>' +__credits__ = ('GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, ' +               'Skip Montanaro, Raymond Hettinger') + +import string, re +from token import * + +import token +__all__ = [x for x in dir(token) if not x.startswith("_")] +__all__ += ["COMMENT", "tokenize", "generate_tokens", "NL", "untokenize"] +del x +del token + +__all__ += ["TokenError"] + +COMMENT = N_TOKENS +tok_name[COMMENT] = 'COMMENT' +NL = N_TOKENS + 1 +tok_name[NL] = 'NL' +N_TOKENS += 2 + +def group(*choices): return '(' + '|'.join(choices) + ')' +def any(*choices): return group(*choices) + '*' +def maybe(*choices): return group(*choices) + '?' + +Whitespace = r'[ \f\t]*' +Comment = r'#[^\r\n]*' +Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment) +Name = r'[a-zA-Z_]\w*' + +Hexnumber = r'0[xX][\da-fA-F]+[lL]?' +Octnumber = r'(0[oO][0-7]+)|(0[0-7]*)[lL]?' +Binnumber = r'0[bB][01]+[lL]?' +Decnumber = r'[1-9]\d*[lL]?' +Intnumber = group(Hexnumber, Binnumber, Octnumber, Decnumber) +Exponent = r'[eE][-+]?\d+' +Pointfloat = group(r'\d+\.\d*', r'\.\d+') + maybe(Exponent) +Expfloat = r'\d+' + Exponent +Floatnumber = group(Pointfloat, Expfloat) +Imagnumber = group(r'\d+[jJ]', Floatnumber + r'[jJ]') +Number = group(Imagnumber, Floatnumber, Intnumber) + +# Tail end of ' string. +Single = r"[^'\\]*(?:\\.[^'\\]*)*'" +# Tail end of " string. +Double = r'[^"\\]*(?:\\.[^"\\]*)*"' +# Tail end of ''' string. +Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''" +# Tail end of """ string. +Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""' +Triple = group("[uUbB]?[rR]?'''", '[uUbB]?[rR]?"""') +# Single-line ' or " string. +String = group(r"[uUbB]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*'", +               r'[uUbB]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*"') + +# Because of leftmost-then-longest match semantics, be sure to put the +# longest operators first (e.g., if = came before ==, == would get +# recognized as two instances of =). +Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"<>", r"!=", +                 r"//=?", +                 r"[+\-*/%&|^=<>]=?", +                 r"~") + +Bracket = '[][(){}]' +Special = group(r'\r?\n', r'[:;.,`@]') +Funny = group(Operator, Bracket, Special) + +PlainToken = group(Number, Funny, String, Name) +Token = Ignore + PlainToken + +# First (or only) line of ' or " string. +ContStr = group(r"[uUbB]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*" + +                group("'", r'\\\r?\n'), +                r'[uUbB]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*' + +                group('"', r'\\\r?\n')) +PseudoExtras = group(r'\\\r?\n', Comment, Triple) +PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name) + +tokenprog, pseudoprog, single3prog, double3prog = map( +    re.compile, (Token, PseudoToken, Single3, Double3)) +endprogs = {"'": re.compile(Single), '"': re.compile(Double), +            "'''": single3prog, '"""': double3prog, +            "r'''": single3prog, 'r"""': double3prog, +            "u'''": single3prog, 'u"""': double3prog, +            "ur'''": single3prog, 'ur"""': double3prog, +            "R'''": single3prog, 'R"""': double3prog, +            "U'''": single3prog, 'U"""': double3prog, +            "uR'''": single3prog, 'uR"""': double3prog, +            "Ur'''": single3prog, 'Ur"""': double3prog, +            "UR'''": single3prog, 'UR"""': double3prog, +            "b'''": single3prog, 'b"""': double3prog, +            "br'''": single3prog, 'br"""': double3prog, +            "B'''": single3prog, 'B"""': double3prog, +            "bR'''": single3prog, 'bR"""': double3prog, +            "Br'''": single3prog, 'Br"""': double3prog, +            "BR'''": single3prog, 'BR"""': double3prog, +            'r': None, 'R': None, 'u': None, 'U': None, +            'b': None, 'B': None} + +triple_quoted = {} +for t in ("'''", '"""', +          "r'''", 'r"""', "R'''", 'R"""', +          "u'''", 'u"""', "U'''", 'U"""', +          "ur'''", 'ur"""', "Ur'''", 'Ur"""', +          "uR'''", 'uR"""', "UR'''", 'UR"""', +          "b'''", 'b"""', "B'''", 'B"""', +          "br'''", 'br"""', "Br'''", 'Br"""', +          "bR'''", 'bR"""', "BR'''", 'BR"""'): +    triple_quoted[t] = t +single_quoted = {} +for t in ("'", '"', +          "r'", 'r"', "R'", 'R"', +          "u'", 'u"', "U'", 'U"', +          "ur'", 'ur"', "Ur'", 'Ur"', +          "uR'", 'uR"', "UR'", 'UR"', +          "b'", 'b"', "B'", 'B"', +          "br'", 'br"', "Br'", 'Br"', +          "bR'", 'bR"', "BR'", 'BR"' ): +    single_quoted[t] = t + +tabsize = 8 + +class TokenError(Exception): pass + +class StopTokenizing(Exception): pass + +def printtoken(type, token, srow_scol, erow_ecol, line): # for testing +    srow, scol = srow_scol +    erow, ecol = erow_ecol +    print("%d,%d-%d,%d:\t%s\t%s" % \ +        (srow, scol, erow, ecol, tok_name[type], repr(token))) + +def tokenize(readline, tokeneater=printtoken): +    """ +    The tokenize() function accepts two parameters: one representing the +    input stream, and one providing an output mechanism for tokenize(). + +    The first parameter, readline, must be a callable object which provides +    the same interface as the readline() method of built-in file objects. +    Each call to the function should return one line of input as a string. + +    The second parameter, tokeneater, must also be a callable object. It is +    called once for each token, with five arguments, corresponding to the +    tuples generated by generate_tokens(). +    """ +    try: +        tokenize_loop(readline, tokeneater) +    except StopTokenizing: +        pass + +# backwards compatible interface +def tokenize_loop(readline, tokeneater): +    for token_info in generate_tokens(readline): +        tokeneater(*token_info) + +class Untokenizer: + +    def __init__(self): +        self.tokens = [] +        self.prev_row = 1 +        self.prev_col = 0 + +    def add_whitespace(self, start): +        row, col = start +        assert row >= self.prev_row +        col_offset = col - self.prev_col +        if col_offset > 0: +            self.tokens.append(" " * col_offset) +        elif row > self.prev_row and tok_type not in (NEWLINE, NL, ENDMARKER): +            # Line was backslash-continued +            self.tokens.append(" ") + +    def untokenize(self, tokens): +        iterable = iter(tokens) +        for t in iterable: +            if len(t) == 2: +                self.compat(t, iterable) +                break +            tok_type, token, start, end = t[:4] +            self.add_whitespace(start) +            self.tokens.append(token) +            self.prev_row, self.prev_col = end +            if tok_type in (NEWLINE, NL): +                self.prev_row += 1 +                self.prev_col = 0 +        return "".join(self.tokens) + +    def compat(self, token, iterable): +        # This import is here to avoid problems when the itertools +        # module is not built yet and tokenize is imported. +        from itertools import chain +        startline = False +        prevstring = False +        indents = [] +        toks_append = self.tokens.append +        for tok in chain([token], iterable): +            toknum, tokval = tok[:2] + +            if toknum in (NAME, NUMBER): +                tokval += ' ' + +            # Insert a space between two consecutive strings +            if toknum == STRING: +                if prevstring: +                    tokval = ' ' + tokval +                prevstring = True +            else: +                prevstring = False + +            if toknum == INDENT: +                indents.append(tokval) +                continue +            elif toknum == DEDENT: +                indents.pop() +                continue +            elif toknum in (NEWLINE, NL): +                startline = True +            elif startline and indents: +                toks_append(indents[-1]) +                startline = False +            toks_append(tokval) + +def untokenize(iterable): +    """Transform tokens back into Python source code. + +    Each element returned by the iterable must be a token sequence +    with at least two elements, a token number and token value.  If +    only two tokens are passed, the resulting output is poor. + +    Round-trip invariant for full input: +        Untokenized source will match input source exactly + +    Round-trip invariant for limited intput: +        # Output text will tokenize the back to the input +        t1 = [tok[:2] for tok in generate_tokens(f.readline)] +        newcode = untokenize(t1) +        readline = iter(newcode.splitlines(1)).next +        t2 = [tok[:2] for tok in generate_tokens(readline)] +        assert t1 == t2 +    """ +    ut = Untokenizer() +    return ut.untokenize(iterable) + +def generate_tokens(readline): +    """ +    The generate_tokens() generator requires one argment, readline, which +    must be a callable object which provides the same interface as the +    readline() method of built-in file objects. Each call to the function +    should return one line of input as a string.  Alternately, readline +    can be a callable function terminating with StopIteration: +        readline = open(myfile).next    # Example of alternate readline + +    The generator produces 5-tuples with these members: the token type; the +    token string; a 2-tuple (srow, scol) of ints specifying the row and +    column where the token begins in the source; a 2-tuple (erow, ecol) of +    ints specifying the row and column where the token ends in the source; +    and the line on which the token was found. The line passed is the +    logical line; continuation lines are included. +    """ +    lnum = parenlev = continued = 0 +    namechars, numchars = string.ascii_letters + '_', '0123456789' +    contstr, needcont = '', 0 +    contline = None +    indents = [0] + +    while 1:                                   # loop over lines in stream +        try: +            line = readline() +        except StopIteration: +            line = '' +        lnum += 1 +        pos, max = 0, len(line) + +        if contstr:                            # continued string +            if not line: +                raise TokenError("EOF in multi-line string", strstart) +            endmatch = endprog.match(line) +            if endmatch: +                pos = end = endmatch.end(0) +                yield (STRING, contstr + line[:end], +                       strstart, (lnum, end), contline + line) +                contstr, needcont = '', 0 +                contline = None +            elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n': +                yield (ERRORTOKEN, contstr + line, +                           strstart, (lnum, len(line)), contline) +                contstr = '' +                contline = None +                continue +            else: +                contstr = contstr + line +                contline = contline + line +                continue + +        elif parenlev == 0 and not continued:  # new statement +            if not line: break +            column = 0 +            while pos < max:                   # measure leading whitespace +                if line[pos] == ' ': +                    column += 1 +                elif line[pos] == '\t': +                    column = (column//tabsize + 1)*tabsize +                elif line[pos] == '\f': +                    column = 0 +                else: +                    break +                pos += 1 +            if pos == max: +                break + +            if line[pos] in '#\r\n':           # skip comments or blank lines +                if line[pos] == '#': +                    comment_token = line[pos:].rstrip('\r\n') +                    nl_pos = pos + len(comment_token) +                    yield (COMMENT, comment_token, +                           (lnum, pos), (lnum, pos + len(comment_token)), line) +                    yield (NEWLINE, line[nl_pos:], +                           (lnum, nl_pos), (lnum, len(line)), line) +                else: +                    yield (NEWLINE, line[pos:], +                           (lnum, pos), (lnum, len(line)), line) +                continue + +            if column > indents[-1]:           # count indents or dedents +                indents.append(column) +                yield (INDENT, line[:pos], (lnum, 0), (lnum, pos), line) +            while column < indents[-1]: +                if column not in indents: +                    raise IndentationError( +                        "unindent does not match any outer indentation level", +                        ("<tokenize>", lnum, pos, line)) +                indents = indents[:-1] +                yield (DEDENT, '', (lnum, pos), (lnum, pos), line) + +        else:                                  # continued statement +            if not line: +                raise TokenError("EOF in multi-line statement", (lnum, 0)) +            continued = 0 + +        while pos < max: +            pseudomatch = pseudoprog.match(line, pos) +            if pseudomatch:                                # scan for tokens +                start, end = pseudomatch.span(1) +                spos, epos, pos = (lnum, start), (lnum, end), end +                token, initial = line[start:end], line[start] + +                if initial in numchars or \ +                   (initial == '.' and token != '.'):      # ordinary number +                    yield (NUMBER, token, spos, epos, line) +                elif initial in '\r\n': +                    yield (NL if parenlev > 0 else NEWLINE, +                           token, spos, epos, line) +                elif initial == '#': +                    assert not token.endswith("\n") +                    yield (COMMENT, token, spos, epos, line) +                elif token in triple_quoted: +                    endprog = endprogs[token] +                    endmatch = endprog.match(line, pos) +                    if endmatch:                           # all on one line +                        pos = endmatch.end(0) +                        token = line[start:pos] +                        yield (STRING, token, spos, (lnum, pos), line) +                    else: +                        strstart = (lnum, start)           # multiple lines +                        contstr = line[start:] +                        contline = line +                        break +                elif initial in single_quoted or \ +                    token[:2] in single_quoted or \ +                    token[:3] in single_quoted: +                    if token[-1] == '\n':                  # continued string +                        strstart = (lnum, start) +                        endprog = (endprogs[initial] or endprogs[token[1]] or +                                   endprogs[token[2]]) +                        contstr, needcont = line[start:], 1 +                        contline = line +                        break +                    else:                                  # ordinary string +                        yield (STRING, token, spos, epos, line) +                elif initial in namechars:                 # ordinary name +                    yield (NAME, token, spos, epos, line) +                elif initial == '\\':                      # continued stmt +                    continued = 1 +                else: +                    if initial in '([{': +                        parenlev += 1 +                    elif initial in ')]}': +                        parenlev -= 1 +                    yield (OP, token, spos, epos, line) +            else: +                yield (ERRORTOKEN, line[pos], +                           (lnum, pos), (lnum, pos+1), line) +                pos += 1 + +    for indent in indents[1:]:                 # pop remaining indent levels +        yield (DEDENT, '', (lnum, 0), (lnum, 0), '') +    yield (ENDMARKER, '', (lnum, 0), (lnum, 0), '') + +if __name__ == '__main__':                     # testing +    import sys +    if len(sys.argv) > 1: +        tokenize(open(sys.argv[1]).readline) +    else: +        tokenize(sys.stdin.readline) diff --git a/contrib/python/ipython/py2/IPython/utils/_tokenize_py3.py b/contrib/python/ipython/py2/IPython/utils/_tokenize_py3.py new file mode 100644 index 00000000000..ee1fd9e639b --- /dev/null +++ b/contrib/python/ipython/py2/IPython/utils/_tokenize_py3.py @@ -0,0 +1,595 @@ +"""Patched version of standard library tokenize, to deal with various bugs. + +Based on Python 3.2 code. + +Patches: + +- Gareth Rees' patch for Python issue #12691 (untokenizing) +  - Except we don't encode the output of untokenize +  - Python 2 compatible syntax, so that it can be byte-compiled at installation +- Newlines in comments and blank lines should be either NL or NEWLINE, depending +  on whether they are in a multi-line statement. Filed as Python issue #17061. +- Export generate_tokens & TokenError +- u and rb literals are allowed under Python 3.3 and above. + +------------------------------------------------------------------------------ +Tokenization help for Python programs. + +tokenize(readline) is a generator that breaks a stream of bytes into +Python tokens.  It decodes the bytes according to PEP-0263 for +determining source file encoding. + +It accepts a readline-like method which is called repeatedly to get the +next line of input (or b"" for EOF).  It generates 5-tuples with these +members: + +    the token type (see token.py) +    the token (a string) +    the starting (row, column) indices of the token (a 2-tuple of ints) +    the ending (row, column) indices of the token (a 2-tuple of ints) +    the original line (string) + +It is designed to match the working of the Python tokenizer exactly, except +that it produces COMMENT tokens for comments and gives type OP for all +operators.  Additionally, all token lists start with an ENCODING token +which tells you which encoding was used to decode the bytes stream. +""" +from __future__ import absolute_import + +__author__ = 'Ka-Ping Yee <[email protected]>' +__credits__ = ('GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, ' +               'Skip Montanaro, Raymond Hettinger, Trent Nelson, ' +               'Michael Foord') +import builtins +import re +import sys +from token import * +from codecs import lookup, BOM_UTF8 +import collections +from io import TextIOWrapper +cookie_re = re.compile("coding[:=]\s*([-\w.]+)") + +import token +__all__ = token.__all__ + ["COMMENT", "tokenize", "detect_encoding", +                           "NL", "untokenize", "ENCODING", "TokenInfo"] +del token + +__all__ += ["generate_tokens", "TokenError"] + +COMMENT = N_TOKENS +tok_name[COMMENT] = 'COMMENT' +NL = N_TOKENS + 1 +tok_name[NL] = 'NL' +ENCODING = N_TOKENS + 2 +tok_name[ENCODING] = 'ENCODING' +N_TOKENS += 3 + +class TokenInfo(collections.namedtuple('TokenInfo', 'type string start end line')): +    def __repr__(self): +        annotated_type = '%d (%s)' % (self.type, tok_name[self.type]) +        return ('TokenInfo(type=%s, string=%r, start=%r, end=%r, line=%r)' % +                self._replace(type=annotated_type)) + +def group(*choices): return '(' + '|'.join(choices) + ')' +def any(*choices): return group(*choices) + '*' +def maybe(*choices): return group(*choices) + '?' + +# Note: we use unicode matching for names ("\w") but ascii matching for +# number literals. +Whitespace = r'[ \f\t]*' +Comment = r'#[^\r\n]*' +Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment) +Name = r'\w+' + +Hexnumber = r'0[xX][0-9a-fA-F]+' +Binnumber = r'0[bB][01]+' +Octnumber = r'0[oO][0-7]+' +Decnumber = r'(?:0+|[1-9][0-9]*)' +Intnumber = group(Hexnumber, Binnumber, Octnumber, Decnumber) +Exponent = r'[eE][-+]?[0-9]+' +Pointfloat = group(r'[0-9]+\.[0-9]*', r'\.[0-9]+') + maybe(Exponent) +Expfloat = r'[0-9]+' + Exponent +Floatnumber = group(Pointfloat, Expfloat) +Imagnumber = group(r'[0-9]+[jJ]', Floatnumber + r'[jJ]') +Number = group(Imagnumber, Floatnumber, Intnumber) + +if sys.version_info.minor >= 3: +    StringPrefix = r'(?:[bB][rR]?|[rR][bB]?|[uU])?' +else: +    StringPrefix = r'(?:[bB]?[rR]?)?' + +# Tail end of ' string. +Single = r"[^'\\]*(?:\\.[^'\\]*)*'" +# Tail end of " string. +Double = r'[^"\\]*(?:\\.[^"\\]*)*"' +# Tail end of ''' string. +Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''" +# Tail end of """ string. +Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""' +Triple = group(StringPrefix + "'''", StringPrefix + '"""') +# Single-line ' or " string. +String = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*'", +               StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*"') + +# Because of leftmost-then-longest match semantics, be sure to put the +# longest operators first (e.g., if = came before ==, == would get +# recognized as two instances of =). +Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"!=", +                 r"//=?", r"->", +                 r"[+\-*/%&|^=<>]=?", +                 r"~") + +Bracket = '[][(){}]' +Special = group(r'\r?\n', r'\.\.\.', r'[:;.,@]') +Funny = group(Operator, Bracket, Special) + +PlainToken = group(Number, Funny, String, Name) +Token = Ignore + PlainToken + +# First (or only) line of ' or " string. +ContStr = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*" + +                group("'", r'\\\r?\n'), +                StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*' + +                group('"', r'\\\r?\n')) +PseudoExtras = group(r'\\\r?\n', Comment, Triple) +PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name) + +def _compile(expr): +    return re.compile(expr, re.UNICODE) + +tokenprog, pseudoprog, single3prog, double3prog = map( +    _compile, (Token, PseudoToken, Single3, Double3)) +endprogs = {"'": _compile(Single), '"': _compile(Double), +            "'''": single3prog, '"""': double3prog, +            "r'''": single3prog, 'r"""': double3prog, +            "b'''": single3prog, 'b"""': double3prog, +            "R'''": single3prog, 'R"""': double3prog, +            "B'''": single3prog, 'B"""': double3prog, +            "br'''": single3prog, 'br"""': double3prog, +            "bR'''": single3prog, 'bR"""': double3prog, +            "Br'''": single3prog, 'Br"""': double3prog, +            "BR'''": single3prog, 'BR"""': double3prog, +            'r': None, 'R': None, 'b': None, 'B': None} + +triple_quoted = {} +for t in ("'''", '"""', +          "r'''", 'r"""', "R'''", 'R"""', +          "b'''", 'b"""', "B'''", 'B"""', +          "br'''", 'br"""', "Br'''", 'Br"""', +          "bR'''", 'bR"""', "BR'''", 'BR"""'): +    triple_quoted[t] = t +single_quoted = {} +for t in ("'", '"', +          "r'", 'r"', "R'", 'R"', +          "b'", 'b"', "B'", 'B"', +          "br'", 'br"', "Br'", 'Br"', +          "bR'", 'bR"', "BR'", 'BR"' ): +    single_quoted[t] = t + +if sys.version_info.minor >= 3: +    # Python 3.3 +    for _prefix in ['rb', 'rB', 'Rb', 'RB', 'u', 'U']: +        _t2 = _prefix+'"""' +        endprogs[_t2] = double3prog +        triple_quoted[_t2] = _t2 +        _t1 = _prefix + "'''" +        endprogs[_t1] = single3prog +        triple_quoted[_t1] = _t1 +        single_quoted[_prefix+'"'] = _prefix+'"' +        single_quoted[_prefix+"'"] = _prefix+"'" +    del _prefix, _t2, _t1 +    endprogs['u'] = None +    endprogs['U'] = None + +del _compile + +tabsize = 8 + +class TokenError(Exception): pass + +class StopTokenizing(Exception): pass + + +class Untokenizer: + +    def __init__(self): +        self.tokens = [] +        self.prev_row = 1 +        self.prev_col = 0 +        self.encoding = 'utf-8' + +    def add_whitespace(self, tok_type, start): +        row, col = start +        assert row >= self.prev_row +        col_offset = col - self.prev_col +        if col_offset > 0: +            self.tokens.append(" " * col_offset) +        elif row > self.prev_row and tok_type not in (NEWLINE, NL, ENDMARKER): +            # Line was backslash-continued. +            self.tokens.append(" ") + +    def untokenize(self, tokens): +        iterable = iter(tokens) +        for t in iterable: +            if len(t) == 2: +                self.compat(t, iterable) +                break +            tok_type, token, start, end = t[:4] +            if tok_type == ENCODING: +                self.encoding = token +                continue +            self.add_whitespace(tok_type, start) +            self.tokens.append(token) +            self.prev_row, self.prev_col = end +            if tok_type in (NEWLINE, NL): +                self.prev_row += 1 +                self.prev_col = 0 +        return "".join(self.tokens) + +    def compat(self, token, iterable): +        # This import is here to avoid problems when the itertools +        # module is not built yet and tokenize is imported. +        from itertools import chain +        startline = False +        prevstring = False +        indents = [] +        toks_append = self.tokens.append + +        for tok in chain([token], iterable): +            toknum, tokval = tok[:2] +            if toknum == ENCODING: +                self.encoding = tokval +                continue + +            if toknum in (NAME, NUMBER): +                tokval += ' ' + +            # Insert a space between two consecutive strings +            if toknum == STRING: +                if prevstring: +                    tokval = ' ' + tokval +                prevstring = True +            else: +                prevstring = False + +            if toknum == INDENT: +                indents.append(tokval) +                continue +            elif toknum == DEDENT: +                indents.pop() +                continue +            elif toknum in (NEWLINE, NL): +                startline = True +            elif startline and indents: +                toks_append(indents[-1]) +                startline = False +            toks_append(tokval) + + +def untokenize(tokens): +    """ +    Convert ``tokens`` (an iterable) back into Python source code. Return +    a bytes object, encoded using the encoding specified by the last +    ENCODING token in ``tokens``, or UTF-8 if no ENCODING token is found. + +    The result is guaranteed to tokenize back to match the input so that +    the conversion is lossless and round-trips are assured.  The +    guarantee applies only to the token type and token string as the +    spacing between tokens (column positions) may change. + +    :func:`untokenize` has two modes. If the input tokens are sequences +    of length 2 (``type``, ``string``) then spaces are added as necessary to +    preserve the round-trip property. + +    If the input tokens are sequences of length 4 or more (``type``, +    ``string``, ``start``, ``end``), as returned by :func:`tokenize`, then +    spaces are added so that each token appears in the result at the +    position indicated by ``start`` and ``end``, if possible. +    """ +    return Untokenizer().untokenize(tokens) + + +def _get_normal_name(orig_enc): +    """Imitates get_normal_name in tokenizer.c.""" +    # Only care about the first 12 characters. +    enc = orig_enc[:12].lower().replace("_", "-") +    if enc == "utf-8" or enc.startswith("utf-8-"): +        return "utf-8" +    if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \ +       enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")): +        return "iso-8859-1" +    return orig_enc + +def detect_encoding(readline): +    """ +    The detect_encoding() function is used to detect the encoding that should +    be used to decode a Python source file.  It requires one argment, readline, +    in the same way as the tokenize() generator. + +    It will call readline a maximum of twice, and return the encoding used +    (as a string) and a list of any lines (left as bytes) it has read in. + +    It detects the encoding from the presence of a utf-8 bom or an encoding +    cookie as specified in pep-0263.  If both a bom and a cookie are present, +    but disagree, a SyntaxError will be raised.  If the encoding cookie is an +    invalid charset, raise a SyntaxError.  Note that if a utf-8 bom is found, +    'utf-8-sig' is returned. + +    If no encoding is specified, then the default of 'utf-8' will be returned. +    """ +    bom_found = False +    encoding = None +    default = 'utf-8' +    def read_or_stop(): +        try: +            return readline() +        except StopIteration: +            return b'' + +    def find_cookie(line): +        try: +            # Decode as UTF-8. Either the line is an encoding declaration, +            # in which case it should be pure ASCII, or it must be UTF-8 +            # per default encoding. +            line_string = line.decode('utf-8') +        except UnicodeDecodeError: +            raise SyntaxError("invalid or missing encoding declaration") + +        matches = cookie_re.findall(line_string) +        if not matches: +            return None +        encoding = _get_normal_name(matches[0]) +        try: +            codec = lookup(encoding) +        except LookupError: +            # This behaviour mimics the Python interpreter +            raise SyntaxError("unknown encoding: " + encoding) + +        if bom_found: +            if encoding != 'utf-8': +                # This behaviour mimics the Python interpreter +                raise SyntaxError('encoding problem: utf-8') +            encoding += '-sig' +        return encoding + +    first = read_or_stop() +    if first.startswith(BOM_UTF8): +        bom_found = True +        first = first[3:] +        default = 'utf-8-sig' +    if not first: +        return default, [] + +    encoding = find_cookie(first) +    if encoding: +        return encoding, [first] + +    second = read_or_stop() +    if not second: +        return default, [first] + +    encoding = find_cookie(second) +    if encoding: +        return encoding, [first, second] + +    return default, [first, second] + + +def open(filename): +    """Open a file in read only mode using the encoding detected by +    detect_encoding(). +    """ +    buffer = builtins.open(filename, 'rb') +    encoding, lines = detect_encoding(buffer.readline) +    buffer.seek(0) +    text = TextIOWrapper(buffer, encoding, line_buffering=True) +    text.mode = 'r' +    return text + + +def tokenize(readline): +    """ +    The tokenize() generator requires one argment, readline, which +    must be a callable object which provides the same interface as the +    readline() method of built-in file objects.  Each call to the function +    should return one line of input as bytes.  Alternately, readline +    can be a callable function terminating with StopIteration: +        readline = open(myfile, 'rb').__next__  # Example of alternate readline + +    The generator produces 5-tuples with these members: the token type; the +    token string; a 2-tuple (srow, scol) of ints specifying the row and +    column where the token begins in the source; a 2-tuple (erow, ecol) of +    ints specifying the row and column where the token ends in the source; +    and the line on which the token was found.  The line passed is the +    logical line; continuation lines are included. + +    The first token sequence will always be an ENCODING token +    which tells you which encoding was used to decode the bytes stream. +    """ +    # This import is here to avoid problems when the itertools module is not +    # built yet and tokenize is imported. +    from itertools import chain, repeat +    encoding, consumed = detect_encoding(readline) +    rl_gen = iter(readline, b"") +    empty = repeat(b"") +    return _tokenize(chain(consumed, rl_gen, empty).__next__, encoding) + + +def _tokenize(readline, encoding): +    lnum = parenlev = continued = 0 +    numchars = '0123456789' +    contstr, needcont = '', 0 +    contline = None +    indents = [0] + +    if encoding is not None: +        if encoding == "utf-8-sig": +            # BOM will already have been stripped. +            encoding = "utf-8" +        yield TokenInfo(ENCODING, encoding, (0, 0), (0, 0), '') +    while True:             # loop over lines in stream +        try: +            line = readline() +        except StopIteration: +            line = b'' + +        if encoding is not None: +            line = line.decode(encoding) +        lnum += 1 +        pos, max = 0, len(line) + +        if contstr:                            # continued string +            if not line: +                raise TokenError("EOF in multi-line string", strstart) +            endmatch = endprog.match(line) +            if endmatch: +                pos = end = endmatch.end(0) +                yield TokenInfo(STRING, contstr + line[:end], +                       strstart, (lnum, end), contline + line) +                contstr, needcont = '', 0 +                contline = None +            elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n': +                yield TokenInfo(ERRORTOKEN, contstr + line, +                           strstart, (lnum, len(line)), contline) +                contstr = '' +                contline = None +                continue +            else: +                contstr = contstr + line +                contline = contline + line +                continue + +        elif parenlev == 0 and not continued:  # new statement +            if not line: break +            column = 0 +            while pos < max:                   # measure leading whitespace +                if line[pos] == ' ': +                    column += 1 +                elif line[pos] == '\t': +                    column = (column//tabsize + 1)*tabsize +                elif line[pos] == '\f': +                    column = 0 +                else: +                    break +                pos += 1 +            if pos == max: +                break + +            if line[pos] in '#\r\n':           # skip comments or blank lines +                if line[pos] == '#': +                    comment_token = line[pos:].rstrip('\r\n') +                    nl_pos = pos + len(comment_token) +                    yield TokenInfo(COMMENT, comment_token, +                           (lnum, pos), (lnum, pos + len(comment_token)), line) +                    yield TokenInfo(NEWLINE, line[nl_pos:], +                           (lnum, nl_pos), (lnum, len(line)), line) +                else: +                    yield TokenInfo(NEWLINE, line[pos:], +                           (lnum, pos), (lnum, len(line)), line) +                continue + +            if column > indents[-1]:           # count indents or dedents +                indents.append(column) +                yield TokenInfo(INDENT, line[:pos], (lnum, 0), (lnum, pos), line) +            while column < indents[-1]: +                if column not in indents: +                    raise IndentationError( +                        "unindent does not match any outer indentation level", +                        ("<tokenize>", lnum, pos, line)) +                indents = indents[:-1] +                yield TokenInfo(DEDENT, '', (lnum, pos), (lnum, pos), line) + +        else:                                  # continued statement +            if not line: +                raise TokenError("EOF in multi-line statement", (lnum, 0)) +            continued = 0 + +        while pos < max: +            pseudomatch = pseudoprog.match(line, pos) +            if pseudomatch:                                # scan for tokens +                start, end = pseudomatch.span(1) +                spos, epos, pos = (lnum, start), (lnum, end), end +                token, initial = line[start:end], line[start] + +                if (initial in numchars or                  # ordinary number +                    (initial == '.' and token != '.' and token != '...')): +                    yield TokenInfo(NUMBER, token, spos, epos, line) +                elif initial in '\r\n': +                    yield TokenInfo(NL if parenlev > 0 else NEWLINE, +                           token, spos, epos, line) +                elif initial == '#': +                    assert not token.endswith("\n") +                    yield TokenInfo(COMMENT, token, spos, epos, line) +                elif token in triple_quoted: +                    endprog = endprogs[token] +                    endmatch = endprog.match(line, pos) +                    if endmatch:                           # all on one line +                        pos = endmatch.end(0) +                        token = line[start:pos] +                        yield TokenInfo(STRING, token, spos, (lnum, pos), line) +                    else: +                        strstart = (lnum, start)           # multiple lines +                        contstr = line[start:] +                        contline = line +                        break +                elif initial in single_quoted or \ +                    token[:2] in single_quoted or \ +                    token[:3] in single_quoted: +                    if token[-1] == '\n':                  # continued string +                        strstart = (lnum, start) +                        endprog = (endprogs[initial] or endprogs[token[1]] or +                                   endprogs[token[2]]) +                        contstr, needcont = line[start:], 1 +                        contline = line +                        break +                    else:                                  # ordinary string +                        yield TokenInfo(STRING, token, spos, epos, line) +                elif initial.isidentifier():               # ordinary name +                    yield TokenInfo(NAME, token, spos, epos, line) +                elif initial == '\\':                      # continued stmt +                    continued = 1 +                else: +                    if initial in '([{': +                        parenlev += 1 +                    elif initial in ')]}': +                        parenlev -= 1 +                    yield TokenInfo(OP, token, spos, epos, line) +            else: +                yield TokenInfo(ERRORTOKEN, line[pos], +                           (lnum, pos), (lnum, pos+1), line) +                pos += 1 + +    for indent in indents[1:]:                 # pop remaining indent levels +        yield TokenInfo(DEDENT, '', (lnum, 0), (lnum, 0), '') +    yield TokenInfo(ENDMARKER, '', (lnum, 0), (lnum, 0), '') + + +# An undocumented, backwards compatible, API for all the places in the standard +# library that expect to be able to use tokenize with strings +def generate_tokens(readline): +    return _tokenize(readline, None) + +if __name__ == "__main__": +    # Quick sanity check +    s = b'''def parseline(self, line): +            """Parse the line into a command name and a string containing +            the arguments.  Returns a tuple containing (command, args, line). +            'command' and 'args' may be None if the line couldn't be parsed. +            """ +            line = line.strip() +            if not line: +                return None, None, line +            elif line[0] == '?': +                line = 'help ' + line[1:] +            elif line[0] == '!': +                if hasattr(self, 'do_shell'): +                    line = 'shell ' + line[1:] +                else: +                    return None, None, line +            i, n = 0, len(line) +            while i < n and line[i] in self.identchars: i = i+1 +            cmd, arg = line[:i], line[i:].strip() +            return cmd, arg, line +    ''' +    for tok in tokenize(iter(s.splitlines()).__next__): +        print(tok) diff --git a/contrib/python/ipython/py2/IPython/utils/capture.py b/contrib/python/ipython/py2/IPython/utils/capture.py new file mode 100644 index 00000000000..d8f919568cf --- /dev/null +++ b/contrib/python/ipython/py2/IPython/utils/capture.py @@ -0,0 +1,176 @@ +# encoding: utf-8 +"""IO capturing utilities.""" + +# Copyright (c) IPython Development Team. +# Distributed under the terms of the Modified BSD License. + +from __future__ import print_function, absolute_import + +import sys + +from IPython.utils.py3compat import PY3 + +if PY3: +    from io import StringIO +else: +    from StringIO import StringIO + +#----------------------------------------------------------------------------- +# Classes and functions +#----------------------------------------------------------------------------- + + +class RichOutput(object): +    def __init__(self, data=None, metadata=None, transient=None, update=False): +        self.data = data or {} +        self.metadata = metadata or {} +        self.transient = transient or {} +        self.update = update + +    def display(self): +        from IPython.display import publish_display_data +        publish_display_data(data=self.data, metadata=self.metadata, +                             transient=self.transient, update=self.update) + +    def _repr_mime_(self, mime): +        if mime not in self.data: +            return +        data = self.data[mime] +        if mime in self.metadata: +            return data, self.metadata[mime] +        else: +            return data +             +    def _repr_html_(self): +        return self._repr_mime_("text/html") + +    def _repr_latex_(self): +        return self._repr_mime_("text/latex") + +    def _repr_json_(self): +        return self._repr_mime_("application/json") + +    def _repr_javascript_(self): +        return self._repr_mime_("application/javascript") + +    def _repr_png_(self): +        return self._repr_mime_("image/png") + +    def _repr_jpeg_(self): +        return self._repr_mime_("image/jpeg") + +    def _repr_svg_(self): +        return self._repr_mime_("image/svg+xml") + + +class CapturedIO(object): +    """Simple object for containing captured stdout/err and rich display StringIO objects + +    Each instance `c` has three attributes: + +    - ``c.stdout`` : standard output as a string +    - ``c.stderr`` : standard error as a string +    - ``c.outputs``: a list of rich display outputs + +    Additionally, there's a ``c.show()`` method which will print all of the +    above in the same order, and can be invoked simply via ``c()``. +    """ + +    def __init__(self, stdout, stderr, outputs=None): +        self._stdout = stdout +        self._stderr = stderr +        if outputs is None: +            outputs = [] +        self._outputs = outputs + +    def __str__(self): +        return self.stdout + +    @property +    def stdout(self): +        "Captured standard output" +        if not self._stdout: +            return '' +        return self._stdout.getvalue() + +    @property +    def stderr(self): +        "Captured standard error" +        if not self._stderr: +            return '' +        return self._stderr.getvalue() + +    @property +    def outputs(self): +        """A list of the captured rich display outputs, if any. + +        If you have a CapturedIO object ``c``, these can be displayed in IPython +        using:: + +            from IPython.display import display +            for o in c.outputs: +                display(o) +        """ +        return [ RichOutput(**kargs) for kargs in self._outputs ] + +    def show(self): +        """write my output to sys.stdout/err as appropriate""" +        sys.stdout.write(self.stdout) +        sys.stderr.write(self.stderr) +        sys.stdout.flush() +        sys.stderr.flush() +        for kargs in self._outputs: +            RichOutput(**kargs).display() + +    __call__ = show + + +class capture_output(object): +    """context manager for capturing stdout/err""" +    stdout = True +    stderr = True +    display = True + +    def __init__(self, stdout=True, stderr=True, display=True): +        self.stdout = stdout +        self.stderr = stderr +        self.display = display +        self.shell = None + +    def __enter__(self): +        from IPython.core.getipython import get_ipython +        from IPython.core.displaypub import CapturingDisplayPublisher +        from IPython.core.displayhook import CapturingDisplayHook + +        self.sys_stdout = sys.stdout +        self.sys_stderr = sys.stderr + +        if self.display: +            self.shell = get_ipython() +            if self.shell is None: +                self.save_display_pub = None +                self.display = False + +        stdout = stderr = outputs = None +        if self.stdout: +            stdout = sys.stdout = StringIO() +        if self.stderr: +            stderr = sys.stderr = StringIO() +        if self.display: +            self.save_display_pub = self.shell.display_pub +            self.shell.display_pub = CapturingDisplayPublisher() +            outputs = self.shell.display_pub.outputs +            self.save_display_hook = sys.displayhook +            sys.displayhook = CapturingDisplayHook(shell=self.shell, +                                                   outputs=outputs) + +        return CapturedIO(stdout, stderr, outputs) + +    def __exit__(self, exc_type, exc_value, traceback): +        sys.stdout = self.sys_stdout +        sys.stderr = self.sys_stderr +        if self.display and self.shell: +            self.shell.display_pub = self.save_display_pub +            sys.displayhook = self.save_display_hook + + diff --git a/contrib/python/ipython/py2/IPython/utils/colorable.py b/contrib/python/ipython/py2/IPython/utils/colorable.py new file mode 100644 index 00000000000..9f7c5ac213c --- /dev/null +++ b/contrib/python/ipython/py2/IPython/utils/colorable.py @@ -0,0 +1,26 @@ +#***************************************************************************** +# Copyright (C) 2016 The IPython Team <[email protected]> +# +# Distributed under the terms of the BSD License.  The full license is in +# the file COPYING, distributed as part of this software. +#***************************************************************************** +from __future__ import absolute_import + +""" +Color managing related utilities +""" + +import pygments + +from traitlets.config import Configurable +from traitlets import Unicode + + +available_themes = lambda : [s for s in pygments.styles.get_all_styles()]+['NoColor','LightBG','Linux', 'Neutral'] + +class Colorable(Configurable): +    """ +    A subclass of configurable for all the classes that have a `default_scheme` +    """ +    default_style=Unicode('lightbg').tag(config=True) + diff --git a/contrib/python/ipython/py2/IPython/utils/coloransi.py b/contrib/python/ipython/py2/IPython/utils/coloransi.py new file mode 100644 index 00000000000..bc8e8377f79 --- /dev/null +++ b/contrib/python/ipython/py2/IPython/utils/coloransi.py @@ -0,0 +1,187 @@ +# -*- coding: utf-8 -*- +"""Tools for coloring text in ANSI terminals. +""" + +#***************************************************************************** +#       Copyright (C) 2002-2006 Fernando Perez. <[email protected]> +# +#  Distributed under the terms of the BSD License.  The full license is in +#  the file COPYING, distributed as part of this software. +#***************************************************************************** + +__all__ = ['TermColors','InputTermColors','ColorScheme','ColorSchemeTable'] + +import os + +from IPython.utils.ipstruct import Struct + +color_templates = ( +        # Dark colors +        ("Black"       , "0;30"), +        ("Red"         , "0;31"), +        ("Green"       , "0;32"), +        ("Brown"       , "0;33"), +        ("Blue"        , "0;34"), +        ("Purple"      , "0;35"), +        ("Cyan"        , "0;36"), +        ("LightGray"   , "0;37"), +        # Light colors +        ("DarkGray"    , "1;30"), +        ("LightRed"    , "1;31"), +        ("LightGreen"  , "1;32"), +        ("Yellow"      , "1;33"), +        ("LightBlue"   , "1;34"), +        ("LightPurple" , "1;35"), +        ("LightCyan"   , "1;36"), +        ("White"       , "1;37"), +        # Blinking colors.  Probably should not be used in anything serious. +        ("BlinkBlack"  , "5;30"), +        ("BlinkRed"    , "5;31"), +        ("BlinkGreen"  , "5;32"), +        ("BlinkYellow" , "5;33"), +        ("BlinkBlue"   , "5;34"), +        ("BlinkPurple" , "5;35"), +        ("BlinkCyan"   , "5;36"), +        ("BlinkLightGray", "5;37"), +        ) + +def make_color_table(in_class): +    """Build a set of color attributes in a class. + +    Helper function for building the :class:`TermColors` and +    :class`InputTermColors`. +    """ +    for name,value in color_templates: +        setattr(in_class,name,in_class._base % value) + +class TermColors: +    """Color escape sequences. + +    This class defines the escape sequences for all the standard (ANSI?) +    colors in terminals. Also defines a NoColor escape which is just the null +    string, suitable for defining 'dummy' color schemes in terminals which get +    confused by color escapes. + +    This class should be used as a mixin for building color schemes.""" + +    NoColor = ''  # for color schemes in color-less terminals. +    Normal = '\033[0m'   # Reset normal coloring +    _base  = '\033[%sm'  # Template for all other colors + +# Build the actual color table as a set of class attributes: +make_color_table(TermColors) + +class InputTermColors: +    """Color escape sequences for input prompts. + +    This class is similar to TermColors, but the escapes are wrapped in \001 +    and \002 so that readline can properly know the length of each line and +    can wrap lines accordingly.  Use this class for any colored text which +    needs to be used in input prompts, such as in calls to raw_input(). + +    This class defines the escape sequences for all the standard (ANSI?) +    colors in terminals. Also defines a NoColor escape which is just the null +    string, suitable for defining 'dummy' color schemes in terminals which get +    confused by color escapes. + +    This class should be used as a mixin for building color schemes.""" + +    NoColor = ''  # for color schemes in color-less terminals. + +    if os.name == 'nt' and os.environ.get('TERM','dumb') == 'emacs': +        # (X)emacs on W32 gets confused with \001 and \002 so we remove them +        Normal = '\033[0m'   # Reset normal coloring +        _base  = '\033[%sm'  # Template for all other colors +    else: +        Normal = '\001\033[0m\002'   # Reset normal coloring +        _base  = '\001\033[%sm\002'  # Template for all other colors + +# Build the actual color table as a set of class attributes: +make_color_table(InputTermColors) + +class NoColors: +    """This defines all the same names as the colour classes, but maps them to +    empty strings, so it can easily be substituted to turn off colours.""" +    NoColor = '' +    Normal  = '' + +for name, value in color_templates: +    setattr(NoColors, name, '') + +class ColorScheme: +    """Generic color scheme class. Just a name and a Struct.""" +    def __init__(self,__scheme_name_,colordict=None,**colormap): +        self.name = __scheme_name_ +        if colordict is None: +            self.colors = Struct(**colormap) +        else: +            self.colors = Struct(colordict) + +    def copy(self,name=None): +        """Return a full copy of the object, optionally renaming it.""" +        if name is None: +            name = self.name +        return ColorScheme(name, self.colors.dict()) + +class ColorSchemeTable(dict): +    """General class to handle tables of color schemes. + +    It's basically a dict of color schemes with a couple of shorthand +    attributes and some convenient methods. + +    active_scheme_name -> obvious +    active_colors -> actual color table of the active scheme""" + +    def __init__(self, scheme_list=None, default_scheme=''): +        """Create a table of color schemes. + +        The table can be created empty and manually filled or it can be +        created with a list of valid color schemes AND the specification for +        the default active scheme. +        """ + +        # create object attributes to be set later +        self.active_scheme_name = '' +        self.active_colors = None + +        if scheme_list: +            if default_scheme == '': +                raise ValueError('you must specify the default color scheme') +            for scheme in scheme_list: +                self.add_scheme(scheme) +            self.set_active_scheme(default_scheme) + +    def copy(self): +        """Return full copy of object""" +        return ColorSchemeTable(self.values(),self.active_scheme_name) + +    def add_scheme(self,new_scheme): +        """Add a new color scheme to the table.""" +        if not isinstance(new_scheme,ColorScheme): +            raise ValueError('ColorSchemeTable only accepts ColorScheme instances') +        self[new_scheme.name] = new_scheme + +    def set_active_scheme(self,scheme,case_sensitive=0): +        """Set the currently active scheme. + +        Names are by default compared in a case-insensitive way, but this can +        be changed by setting the parameter case_sensitive to true.""" + +        scheme_names = list(self.keys()) +        if case_sensitive: +            valid_schemes = scheme_names +            scheme_test = scheme +        else: +            valid_schemes = [s.lower() for s in scheme_names] +            scheme_test = scheme.lower() +        try: +            scheme_idx = valid_schemes.index(scheme_test) +        except ValueError: +            raise ValueError('Unrecognized color scheme: ' + scheme + \ +                  '\nValid schemes: '+str(scheme_names).replace("'', ",'')) +        else: +            active = scheme_names[scheme_idx] +            self.active_scheme_name = active +            self.active_colors = self[active].colors +            # Now allow using '' as an index for the current active scheme +            self[''] = self[active] diff --git a/contrib/python/ipython/py2/IPython/utils/contexts.py b/contrib/python/ipython/py2/IPython/utils/contexts.py new file mode 100644 index 00000000000..4d379b0eda1 --- /dev/null +++ b/contrib/python/ipython/py2/IPython/utils/contexts.py @@ -0,0 +1,74 @@ +# encoding: utf-8 +"""Miscellaneous context managers. +""" + +import warnings + +# Copyright (c) IPython Development Team. +# Distributed under the terms of the Modified BSD License. + +class preserve_keys(object): +    """Preserve a set of keys in a dictionary. + +    Upon entering the context manager the current values of the keys +    will be saved. Upon exiting, the dictionary will be updated to +    restore the original value of the preserved keys. Preserved keys +    which did not exist when entering the context manager will be +    deleted. + +    Examples +    -------- + +    >>> d = {'a': 1, 'b': 2, 'c': 3} +    >>> with preserve_keys(d, 'b', 'c', 'd'): +    ...     del d['a'] +    ...     del d['b']      # will be reset to 2 +    ...     d['c'] = None   # will be reset to 3 +    ...     d['d'] = 4      # will be deleted +    ...     d['e'] = 5 +    ...     print(sorted(d.items())) +    ... +    [('c', None), ('d', 4), ('e', 5)] +    >>> print(sorted(d.items())) +    [('b', 2), ('c', 3), ('e', 5)] +    """ + +    def __init__(self, dictionary, *keys): +        self.dictionary = dictionary +        self.keys = keys + +    def __enter__(self): +        # Actions to perform upon exiting. +        to_delete = [] +        to_update = {} + +        d = self.dictionary +        for k in self.keys: +            if k in d: +                to_update[k] = d[k] +            else: +                to_delete.append(k) + +        self.to_delete = to_delete +        self.to_update = to_update + +    def __exit__(self, *exc_info): +        d = self.dictionary + +        for k in self.to_delete: +            d.pop(k, None) +        d.update(self.to_update) + + +class NoOpContext(object): +    """ +    Deprecated +     +    Context manager that does nothing.""" + +    def __init__(self): +        warnings.warn("""NoOpContext is deprecated since IPython 5.0 """, +                                            DeprecationWarning, stacklevel=2) + +    def __enter__(self): pass +    def __exit__(self, type, value, traceback): pass diff --git a/contrib/python/ipython/py2/IPython/utils/daemonize.py b/contrib/python/ipython/py2/IPython/utils/daemonize.py new file mode 100644 index 00000000000..a1bfaa193bc --- /dev/null +++ b/contrib/python/ipython/py2/IPython/utils/daemonize.py @@ -0,0 +1,4 @@ +from warnings import warn + +warn("IPython.utils.daemonize has moved to ipyparallel.apps.daemonize") +from ipyparallel.apps.daemonize import daemonize diff --git a/contrib/python/ipython/py2/IPython/utils/data.py b/contrib/python/ipython/py2/IPython/utils/data.py new file mode 100644 index 00000000000..308a692559b --- /dev/null +++ b/contrib/python/ipython/py2/IPython/utils/data.py @@ -0,0 +1,37 @@ +# encoding: utf-8 +"""Utilities for working with data structures like lists, dicts and tuples. +""" + +#----------------------------------------------------------------------------- +#  Copyright (C) 2008-2011  The IPython Development Team +# +#  Distributed under the terms of the BSD License.  The full license is in +#  the file COPYING, distributed as part of this software. +#----------------------------------------------------------------------------- + +from .py3compat import xrange + +def uniq_stable(elems): +    """uniq_stable(elems) -> list + +    Return from an iterable, a list of all the unique elements in the input, +    but maintaining the order in which they first appear. + +    Note: All elements in the input must be hashable for this routine +    to work, as it internally uses a set for efficiency reasons. +    """ +    seen = set() +    return [x for x in elems if x not in seen and not seen.add(x)] + + +def flatten(seq): +    """Flatten a list of lists (NOT recursive, only works for 2d lists).""" + +    return [x for subseq in seq for x in subseq] +     + +def chop(seq, size): +    """Chop a sequence into chunks of the given size.""" +    return [seq[i:i+size] for i in xrange(0,len(seq),size)] + + diff --git a/contrib/python/ipython/py2/IPython/utils/decorators.py b/contrib/python/ipython/py2/IPython/utils/decorators.py new file mode 100644 index 00000000000..c26485553c2 --- /dev/null +++ b/contrib/python/ipython/py2/IPython/utils/decorators.py @@ -0,0 +1,58 @@ +# encoding: utf-8 +"""Decorators that don't go anywhere else. + +This module contains misc. decorators that don't really go with another module +in :mod:`IPython.utils`. Beore putting something here please see if it should +go into another topical module in :mod:`IPython.utils`. +""" + +#----------------------------------------------------------------------------- +#  Copyright (C) 2008-2011  The IPython Development Team +# +#  Distributed under the terms of the BSD License.  The full license is in +#  the file COPYING, distributed as part of this software. +#----------------------------------------------------------------------------- + +#----------------------------------------------------------------------------- +# Imports +#----------------------------------------------------------------------------- + +#----------------------------------------------------------------------------- +# Code +#----------------------------------------------------------------------------- + +def flag_calls(func): +    """Wrap a function to detect and flag when it gets called. + +    This is a decorator which takes a function and wraps it in a function with +    a 'called' attribute. wrapper.called is initialized to False. + +    The wrapper.called attribute is set to False right before each call to the +    wrapped function, so if the call fails it remains False.  After the call +    completes, wrapper.called is set to True and the output is returned. + +    Testing for truth in wrapper.called allows you to determine if a call to +    func() was attempted and succeeded.""" +     +    # don't wrap twice +    if hasattr(func, 'called'): +        return func + +    def wrapper(*args,**kw): +        wrapper.called = False +        out = func(*args,**kw) +        wrapper.called = True +        return out + +    wrapper.called = False +    wrapper.__doc__ = func.__doc__ +    return wrapper + +def undoc(func): +    """Mark a function or class as undocumented. +     +    This is found by inspecting the AST, so for now it must be used directly +    as @undoc, not as e.g. @decorators.undoc +    """ +    return func + diff --git a/contrib/python/ipython/py2/IPython/utils/dir2.py b/contrib/python/ipython/py2/IPython/utils/dir2.py new file mode 100644 index 00000000000..f6f164f9b12 --- /dev/null +++ b/contrib/python/ipython/py2/IPython/utils/dir2.py @@ -0,0 +1,81 @@ +# encoding: utf-8 +"""A fancy version of Python's builtin :func:`dir` function. +""" + +# Copyright (c) IPython Development Team. +# Distributed under the terms of the Modified BSD License. + +import inspect +from .py3compat import string_types + + +def safe_hasattr(obj, attr): +    """In recent versions of Python, hasattr() only catches AttributeError. +    This catches all errors. +    """ +    try: +        getattr(obj, attr) +        return True +    except: +        return False + + +def dir2(obj): +    """dir2(obj) -> list of strings + +    Extended version of the Python builtin dir(), which does a few extra +    checks. + +    This version is guaranteed to return only a list of true strings, whereas +    dir() returns anything that objects inject into themselves, even if they +    are later not really valid for attribute access (many extension libraries +    have such bugs). +    """ + +    # Start building the attribute list via dir(), and then complete it +    # with a few extra special-purpose calls. + +    try: +        words = set(dir(obj)) +    except Exception: +        # TypeError: dir(obj) does not return a list +        words = set() + +    # filter out non-string attributes which may be stuffed by dir() calls +    # and poor coding in third-party modules + +    words = [w for w in words if isinstance(w, string_types)] +    return sorted(words) + + +def get_real_method(obj, name): +    """Like getattr, but with a few extra sanity checks: + +    - If obj is a class, ignore its methods +    - Check if obj is a proxy that claims to have all attributes +    - Catch attribute access failing with any exception +    - Check that the attribute is a callable object + +    Returns the method or None. +    """ +    if inspect.isclass(obj): +        return None + +    try: +        canary = getattr(obj, '_ipython_canary_method_should_not_exist_', None) +    except Exception: +        return None + +    if canary is not None: +        # It claimed to have an attribute it should never have +        return None + +    try: +        m = getattr(obj, name, None) +    except Exception: +        return None + +    if callable(m): +        return m + +    return None diff --git a/contrib/python/ipython/py2/IPython/utils/encoding.py b/contrib/python/ipython/py2/IPython/utils/encoding.py new file mode 100644 index 00000000000..387a24700cf --- /dev/null +++ b/contrib/python/ipython/py2/IPython/utils/encoding.py @@ -0,0 +1,71 @@ +# coding: utf-8 +""" +Utilities for dealing with text encodings +""" + +#----------------------------------------------------------------------------- +#  Copyright (C) 2008-2012  The IPython Development Team +# +#  Distributed under the terms of the BSD License.  The full license is in +#  the file COPYING, distributed as part of this software. +#----------------------------------------------------------------------------- + +#----------------------------------------------------------------------------- +# Imports +#----------------------------------------------------------------------------- +import sys +import locale +import warnings + +# to deal with the possibility of sys.std* not being a stream at all +def get_stream_enc(stream, default=None): +    """Return the given stream's encoding or a default. + +    There are cases where ``sys.std*`` might not actually be a stream, so +    check for the encoding attribute prior to returning it, and return +    a default if it doesn't exist or evaluates as False. ``default`` +    is None if not provided. +    """ +    if not hasattr(stream, 'encoding') or not stream.encoding: +        return default +    else: +        return stream.encoding + +# Less conservative replacement for sys.getdefaultencoding, that will try +# to match the environment. +# Defined here as central function, so if we find better choices, we +# won't need to make changes all over IPython. +def getdefaultencoding(prefer_stream=True): +    """Return IPython's guess for the default encoding for bytes as text. +     +    If prefer_stream is True (default), asks for stdin.encoding first, +    to match the calling Terminal, but that is often None for subprocesses. +     +    Then fall back on locale.getpreferredencoding(), +    which should be a sensible platform default (that respects LANG environment), +    and finally to sys.getdefaultencoding() which is the most conservative option, +    and usually ASCII on Python 2 or UTF8 on Python 3. +    """ +    enc = None +    if prefer_stream: +        enc = get_stream_enc(sys.stdin) +    if not enc or enc=='ascii': +        try: +            # There are reports of getpreferredencoding raising errors +            # in some cases, which may well be fixed, but let's be conservative here. +            enc = locale.getpreferredencoding() +        except Exception: +            pass +    enc = enc or sys.getdefaultencoding() +    # On windows `cp0` can be returned to indicate that there is no code page. +    # Since cp0 is an invalid encoding return instead cp1252 which is the +    # Western European default. +    if enc == 'cp0': +        warnings.warn( +            "Invalid code page cp0 detected - using cp1252 instead." +            "If cp1252 is incorrect please ensure a valid code page " +            "is defined for the process.", RuntimeWarning) +        return 'cp1252' +    return enc + +DEFAULT_ENCODING = getdefaultencoding() diff --git a/contrib/python/ipython/py2/IPython/utils/eventful.py b/contrib/python/ipython/py2/IPython/utils/eventful.py new file mode 100644 index 00000000000..fc0f7aee4f6 --- /dev/null +++ b/contrib/python/ipython/py2/IPython/utils/eventful.py @@ -0,0 +1,7 @@ +from __future__ import absolute_import + +from warnings import warn + +warn("IPython.utils.eventful has moved to traitlets.eventful") + +from traitlets.eventful import * diff --git a/contrib/python/ipython/py2/IPython/utils/frame.py b/contrib/python/ipython/py2/IPython/utils/frame.py new file mode 100644 index 00000000000..76ccc71c446 --- /dev/null +++ b/contrib/python/ipython/py2/IPython/utils/frame.py @@ -0,0 +1,98 @@ +# encoding: utf-8 +""" +Utilities for working with stack frames. +""" +from __future__ import print_function + +#----------------------------------------------------------------------------- +#  Copyright (C) 2008-2011  The IPython Development Team +# +#  Distributed under the terms of the BSD License.  The full license is in +#  the file COPYING, distributed as part of this software. +#----------------------------------------------------------------------------- + +#----------------------------------------------------------------------------- +# Imports +#----------------------------------------------------------------------------- + +import sys +from IPython.utils import py3compat + +#----------------------------------------------------------------------------- +# Code +#----------------------------------------------------------------------------- + [email protected]_refactor_print +def extract_vars(*names,**kw): +    """Extract a set of variables by name from another frame. + +    Parameters +    ---------- +    *names : str +        One or more variable names which will be extracted from the caller's +        frame. + +    depth : integer, optional +        How many frames in the stack to walk when looking for your variables. +        The default is 0, which will use the frame where the call was made. + + +    Examples +    -------- +    :: + +        In [2]: def func(x): +           ...:     y = 1 +           ...:     print(sorted(extract_vars('x','y').items())) +           ...: + +        In [3]: func('hello') +        [('x', 'hello'), ('y', 1)] +    """ + +    depth = kw.get('depth',0) +     +    callerNS = sys._getframe(depth+1).f_locals +    return dict((k,callerNS[k]) for k in names) + + +def extract_vars_above(*names): +    """Extract a set of variables by name from another frame. + +    Similar to extractVars(), but with a specified depth of 1, so that names +    are exctracted exactly from above the caller. + +    This is simply a convenience function so that the very common case (for us) +    of skipping exactly 1 frame doesn't have to construct a special dict for +    keyword passing.""" + +    callerNS = sys._getframe(2).f_locals +    return dict((k,callerNS[k]) for k in names) + + +def debugx(expr,pre_msg=''): +    """Print the value of an expression from the caller's frame. + +    Takes an expression, evaluates it in the caller's frame and prints both +    the given expression and the resulting value (as well as a debug mark +    indicating the name of the calling function.  The input must be of a form +    suitable for eval(). + +    An optional message can be passed, which will be prepended to the printed +    expr->value pair.""" + +    cf = sys._getframe(1) +    print('[DBG:%s] %s%s -> %r' % (cf.f_code.co_name,pre_msg,expr, +                                   eval(expr,cf.f_globals,cf.f_locals))) + + +# deactivate it by uncommenting the following line, which makes it a no-op +#def debugx(expr,pre_msg=''): pass + +def extract_module_locals(depth=0): +    """Returns (module, locals) of the function `depth` frames away from the caller""" +    f = sys._getframe(depth + 1) +    global_ns = f.f_globals +    module = sys.modules[global_ns['__name__']] +    return (module, f.f_locals) + diff --git a/contrib/python/ipython/py2/IPython/utils/generics.py b/contrib/python/ipython/py2/IPython/utils/generics.py new file mode 100644 index 00000000000..5ffdc86ebda --- /dev/null +++ b/contrib/python/ipython/py2/IPython/utils/generics.py @@ -0,0 +1,34 @@ +# encoding: utf-8 +"""Generic functions for extending IPython. + +See http://pypi.python.org/pypi/simplegeneric. +""" + +from IPython.core.error import TryNext +from simplegeneric import generic + + +@generic +def inspect_object(obj): +    """Called when you do obj?""" +    raise TryNext + + +@generic +def complete_object(obj, prev_completions): +    """Custom completer dispatching for python objects. + +    Parameters +    ---------- +    obj : object +        The object to complete. +    prev_completions : list +        List of attributes discovered so far. + +    This should return the list of attributes in obj. If you only wish to +    add to the attributes already discovered normally, return +    own_attrs + prev_completions. +    """ +    raise TryNext + + diff --git a/contrib/python/ipython/py2/IPython/utils/importstring.py b/contrib/python/ipython/py2/IPython/utils/importstring.py new file mode 100644 index 00000000000..c8e1840eb37 --- /dev/null +++ b/contrib/python/ipython/py2/IPython/utils/importstring.py @@ -0,0 +1,39 @@ +# encoding: utf-8 +""" +A simple utility to import something by its string name. +""" + +# Copyright (c) IPython Development Team. +# Distributed under the terms of the Modified BSD License. + + +def import_item(name): +    """Import and return ``bar`` given the string ``foo.bar``. + +    Calling ``bar = import_item("foo.bar")`` is the functional equivalent of +    executing the code ``from foo import bar``. + +    Parameters +    ---------- +    name : string +      The fully qualified name of the module/package being imported. + +    Returns +    ------- +    mod : module object +       The module that was imported. +    """ +     +    parts = name.rsplit('.', 1) +    if len(parts) == 2: +        # called with 'foo.bar....' +        package, obj = parts +        module = __import__(package, fromlist=[obj]) +        try: +            pak = getattr(module, obj) +        except AttributeError: +            raise ImportError('No module named %s' % obj) +        return pak +    else: +        # called with un-dotted string +        return __import__(parts[0]) diff --git a/contrib/python/ipython/py2/IPython/utils/io.py b/contrib/python/ipython/py2/IPython/utils/io.py new file mode 100644 index 00000000000..036d6e3926a --- /dev/null +++ b/contrib/python/ipython/py2/IPython/utils/io.py @@ -0,0 +1,246 @@ +# encoding: utf-8 +""" +IO related utilities. +""" + +# Copyright (c) IPython Development Team. +# Distributed under the terms of the Modified BSD License. + +from __future__ import print_function +from __future__ import absolute_import + + +import atexit +import os +import sys +import tempfile +import warnings +from warnings import warn + +from IPython.utils.decorators import undoc +from .capture import CapturedIO, capture_output +from .py3compat import string_types, input, PY3 + +@undoc +class IOStream: + +    def __init__(self, stream, fallback=None): +        warn('IOStream is deprecated since IPython 5.0, use sys.{stdin,stdout,stderr} instead', +             DeprecationWarning, stacklevel=2) +        if not hasattr(stream,'write') or not hasattr(stream,'flush'): +            if fallback is not None: +                stream = fallback +            else: +                raise ValueError("fallback required, but not specified") +        self.stream = stream +        self._swrite = stream.write + +        # clone all methods not overridden: +        def clone(meth): +            return not hasattr(self, meth) and not meth.startswith('_') +        for meth in filter(clone, dir(stream)): +            try: +                val = getattr(stream, meth) +            except AttributeError: +                pass +            else: +                setattr(self, meth, val) + +    def __repr__(self): +        cls = self.__class__ +        tpl = '{mod}.{cls}({args})' +        return tpl.format(mod=cls.__module__, cls=cls.__name__, args=self.stream) + +    def write(self,data): +        warn('IOStream is deprecated since IPython 5.0, use sys.{stdin,stdout,stderr} instead', +             DeprecationWarning, stacklevel=2) +        try: +            self._swrite(data) +        except: +            try: +                # print handles some unicode issues which may trip a plain +                # write() call.  Emulate write() by using an empty end +                # argument. +                print(data, end='', file=self.stream) +            except: +                # if we get here, something is seriously broken. +                print('ERROR - failed to write data to stream:', self.stream, +                      file=sys.stderr) + +    def writelines(self, lines): +        warn('IOStream is deprecated since IPython 5.0, use sys.{stdin,stdout,stderr} instead', +             DeprecationWarning, stacklevel=2) +        if isinstance(lines, string_types): +            lines = [lines] +        for line in lines: +            self.write(line) + +    # This class used to have a writeln method, but regular files and streams +    # in Python don't have this method. We need to keep this completely +    # compatible so we removed it. + +    @property +    def closed(self): +        return self.stream.closed + +    def close(self): +        pass + +# setup stdin/stdout/stderr to sys.stdin/sys.stdout/sys.stderr +devnull = open(os.devnull, 'w') +atexit.register(devnull.close) + +# io.std* are deprecated, but don't show our own deprecation warnings +# during initialization of the deprecated API. +with warnings.catch_warnings(): +    warnings.simplefilter('ignore', DeprecationWarning) +    stdin = IOStream(sys.stdin, fallback=devnull) +    stdout = IOStream(sys.stdout, fallback=devnull) +    stderr = IOStream(sys.stderr, fallback=devnull) + +class Tee(object): +    """A class to duplicate an output stream to stdout/err. + +    This works in a manner very similar to the Unix 'tee' command. + +    When the object is closed or deleted, it closes the original file given to +    it for duplication. +    """ +    # Inspired by: +    # http://mail.python.org/pipermail/python-list/2007-May/442737.html + +    def __init__(self, file_or_name, mode="w", channel='stdout'): +        """Construct a new Tee object. + +        Parameters +        ---------- +        file_or_name : filename or open filehandle (writable) +          File that will be duplicated + +        mode : optional, valid mode for open(). +          If a filename was give, open with this mode. + +        channel : str, one of ['stdout', 'stderr'] +        """ +        if channel not in ['stdout', 'stderr']: +            raise ValueError('Invalid channel spec %s' % channel) + +        if hasattr(file_or_name, 'write') and hasattr(file_or_name, 'seek'): +            self.file = file_or_name +        else: +            self.file = open(file_or_name, mode) +        self.channel = channel +        self.ostream = getattr(sys, channel) +        setattr(sys, channel, self) +        self._closed = False + +    def close(self): +        """Close the file and restore the channel.""" +        self.flush() +        setattr(sys, self.channel, self.ostream) +        self.file.close() +        self._closed = True + +    def write(self, data): +        """Write data to both channels.""" +        self.file.write(data) +        self.ostream.write(data) +        self.ostream.flush() + +    def flush(self): +        """Flush both channels.""" +        self.file.flush() +        self.ostream.flush() + +    def __del__(self): +        if not self._closed: +            self.close() + + +def ask_yes_no(prompt, default=None, interrupt=None): +    """Asks a question and returns a boolean (y/n) answer. + +    If default is given (one of 'y','n'), it is used if the user input is +    empty. If interrupt is given (one of 'y','n'), it is used if the user +    presses Ctrl-C. Otherwise the question is repeated until an answer is +    given. + +    An EOF is treated as the default answer.  If there is no default, an +    exception is raised to prevent infinite loops. + +    Valid answers are: y/yes/n/no (match is not case sensitive).""" + +    answers = {'y':True,'n':False,'yes':True,'no':False} +    ans = None +    while ans not in answers.keys(): +        try: +            ans = input(prompt+' ').lower() +            if not ans:  # response was an empty string +                ans = default +        except KeyboardInterrupt: +            if interrupt: +                ans = interrupt +        except EOFError: +            if default in answers.keys(): +                ans = default +                print() +            else: +                raise + +    return answers[ans] + + +def temp_pyfile(src, ext='.py'): +    """Make a temporary python file, return filename and filehandle. + +    Parameters +    ---------- +    src : string or list of strings (no need for ending newlines if list) +      Source code to be written to the file. + +    ext : optional, string +      Extension for the generated file. + +    Returns +    ------- +    (filename, open filehandle) +      It is the caller's responsibility to close the open file and unlink it. +    """ +    fname = tempfile.mkstemp(ext)[1] +    f = open(fname,'w') +    f.write(src) +    f.flush() +    return fname, f + +def atomic_writing(*args, **kwargs): +    """DEPRECATED: moved to notebook.services.contents.fileio""" +    warn("IPython.utils.io.atomic_writing has moved to notebook.services.contents.fileio") +    from notebook.services.contents.fileio import atomic_writing +    return atomic_writing(*args, **kwargs) + +def raw_print(*args, **kw): +    """Raw print to sys.__stdout__, otherwise identical interface to print().""" + +    print(*args, sep=kw.get('sep', ' '), end=kw.get('end', '\n'), +          file=sys.__stdout__) +    sys.__stdout__.flush() + + +def raw_print_err(*args, **kw): +    """Raw print to sys.__stderr__, otherwise identical interface to print().""" + +    print(*args, sep=kw.get('sep', ' '), end=kw.get('end', '\n'), +          file=sys.__stderr__) +    sys.__stderr__.flush() + + +# Short aliases for quick debugging, do NOT use these in production code. +rprint = raw_print +rprinte = raw_print_err + + +def unicode_std_stream(stream='stdout'): +    """DEPRECATED, moved to nbconvert.utils.io""" +    warn("IPython.utils.io.unicode_std_stream has moved to nbconvert.utils.io") +    from nbconvert.utils.io import unicode_std_stream +    return unicode_std_stream(stream) diff --git a/contrib/python/ipython/py2/IPython/utils/ipstruct.py b/contrib/python/ipython/py2/IPython/utils/ipstruct.py new file mode 100644 index 00000000000..e2b3e8fa4c5 --- /dev/null +++ b/contrib/python/ipython/py2/IPython/utils/ipstruct.py @@ -0,0 +1,391 @@ +# encoding: utf-8 +"""A dict subclass that supports attribute style access. + +Authors: + +* Fernando Perez (original) +* Brian Granger (refactoring to a dict subclass) +""" + +#----------------------------------------------------------------------------- +#  Copyright (C) 2008-2011  The IPython Development Team +# +#  Distributed under the terms of the BSD License.  The full license is in +#  the file COPYING, distributed as part of this software. +#----------------------------------------------------------------------------- + +#----------------------------------------------------------------------------- +# Imports +#----------------------------------------------------------------------------- + +__all__ = ['Struct'] + +#----------------------------------------------------------------------------- +# Code +#----------------------------------------------------------------------------- + + +class Struct(dict): +    """A dict subclass with attribute style access. + +    This dict subclass has a a few extra features: + +    * Attribute style access. +    * Protection of class members (like keys, items) when using attribute +      style access. +    * The ability to restrict assignment to only existing keys. +    * Intelligent merging. +    * Overloaded operators. +    """ +    _allownew = True +    def __init__(self, *args, **kw): +        """Initialize with a dictionary, another Struct, or data. + +        Parameters +        ---------- +        args : dict, Struct +            Initialize with one dict or Struct +        kw : dict +            Initialize with key, value pairs. + +        Examples +        -------- + +        >>> s = Struct(a=10,b=30) +        >>> s.a +        10 +        >>> s.b +        30 +        >>> s2 = Struct(s,c=30) +        >>> sorted(s2.keys()) +        ['a', 'b', 'c'] +        """ +        object.__setattr__(self, '_allownew', True) +        dict.__init__(self, *args, **kw) + +    def __setitem__(self, key, value): +        """Set an item with check for allownew. + +        Examples +        -------- + +        >>> s = Struct() +        >>> s['a'] = 10 +        >>> s.allow_new_attr(False) +        >>> s['a'] = 10 +        >>> s['a'] +        10 +        >>> try: +        ...     s['b'] = 20 +        ... except KeyError: +        ...     print('this is not allowed') +        ... +        this is not allowed +        """ +        if not self._allownew and key not in self: +            raise KeyError( +                "can't create new attribute %s when allow_new_attr(False)" % key) +        dict.__setitem__(self, key, value) + +    def __setattr__(self, key, value): +        """Set an attr with protection of class members. + +        This calls :meth:`self.__setitem__` but convert :exc:`KeyError` to +        :exc:`AttributeError`. + +        Examples +        -------- + +        >>> s = Struct() +        >>> s.a = 10 +        >>> s.a +        10 +        >>> try: +        ...     s.get = 10 +        ... except AttributeError: +        ...     print("you can't set a class member") +        ... +        you can't set a class member +        """ +        # If key is an str it might be a class member or instance var +        if isinstance(key, str): +            # I can't simply call hasattr here because it calls getattr, which +            # calls self.__getattr__, which returns True for keys in +            # self._data.  But I only want keys in the class and in +            # self.__dict__ +            if key in self.__dict__ or hasattr(Struct, key): +                raise AttributeError( +                    'attr %s is a protected member of class Struct.' % key +                ) +        try: +            self.__setitem__(key, value) +        except KeyError as e: +            raise AttributeError(e) + +    def __getattr__(self, key): +        """Get an attr by calling :meth:`dict.__getitem__`. + +        Like :meth:`__setattr__`, this method converts :exc:`KeyError` to +        :exc:`AttributeError`. + +        Examples +        -------- + +        >>> s = Struct(a=10) +        >>> s.a +        10 +        >>> type(s.get) +        <... 'builtin_function_or_method'> +        >>> try: +        ...     s.b +        ... except AttributeError: +        ...     print("I don't have that key") +        ... +        I don't have that key +        """ +        try: +            result = self[key] +        except KeyError: +            raise AttributeError(key) +        else: +            return result + +    def __iadd__(self, other): +        """s += s2 is a shorthand for s.merge(s2). + +        Examples +        -------- + +        >>> s = Struct(a=10,b=30) +        >>> s2 = Struct(a=20,c=40) +        >>> s += s2 +        >>> sorted(s.keys()) +        ['a', 'b', 'c'] +        """ +        self.merge(other) +        return self + +    def __add__(self,other): +        """s + s2 -> New Struct made from s.merge(s2). + +        Examples +        -------- + +        >>> s1 = Struct(a=10,b=30) +        >>> s2 = Struct(a=20,c=40) +        >>> s = s1 + s2 +        >>> sorted(s.keys()) +        ['a', 'b', 'c'] +        """ +        sout = self.copy() +        sout.merge(other) +        return sout + +    def __sub__(self,other): +        """s1 - s2 -> remove keys in s2 from s1. + +        Examples +        -------- + +        >>> s1 = Struct(a=10,b=30) +        >>> s2 = Struct(a=40) +        >>> s = s1 - s2 +        >>> s +        {'b': 30} +        """ +        sout = self.copy() +        sout -= other +        return sout + +    def __isub__(self,other): +        """Inplace remove keys from self that are in other. + +        Examples +        -------- + +        >>> s1 = Struct(a=10,b=30) +        >>> s2 = Struct(a=40) +        >>> s1 -= s2 +        >>> s1 +        {'b': 30} +        """ +        for k in other.keys(): +            if k in self: +                del self[k] +        return self + +    def __dict_invert(self, data): +        """Helper function for merge. + +        Takes a dictionary whose values are lists and returns a dict with +        the elements of each list as keys and the original keys as values. +        """ +        outdict = {} +        for k,lst in data.items(): +            if isinstance(lst, str): +                lst = lst.split() +            for entry in lst: +                outdict[entry] = k +        return outdict + +    def dict(self): +        return self + +    def copy(self): +        """Return a copy as a Struct. + +        Examples +        -------- + +        >>> s = Struct(a=10,b=30) +        >>> s2 = s.copy() +        >>> type(s2) is Struct +        True +        """ +        return Struct(dict.copy(self)) + +    def hasattr(self, key): +        """hasattr function available as a method. + +        Implemented like has_key. + +        Examples +        -------- + +        >>> s = Struct(a=10) +        >>> s.hasattr('a') +        True +        >>> s.hasattr('b') +        False +        >>> s.hasattr('get') +        False +        """ +        return key in self + +    def allow_new_attr(self, allow = True): +        """Set whether new attributes can be created in this Struct. + +        This can be used to catch typos by verifying that the attribute user +        tries to change already exists in this Struct. +        """ +        object.__setattr__(self, '_allownew', allow) + +    def merge(self, __loc_data__=None, __conflict_solve=None, **kw): +        """Merge two Structs with customizable conflict resolution. + +        This is similar to :meth:`update`, but much more flexible. First, a +        dict is made from data+key=value pairs. When merging this dict with +        the Struct S, the optional dictionary 'conflict' is used to decide +        what to do. + +        If conflict is not given, the default behavior is to preserve any keys +        with their current value (the opposite of the :meth:`update` method's +        behavior). + +        Parameters +        ---------- +        __loc_data : dict, Struct +            The data to merge into self +        __conflict_solve : dict +            The conflict policy dict.  The keys are binary functions used to +            resolve the conflict and the values are lists of strings naming +            the keys the conflict resolution function applies to.  Instead of +            a list of strings a space separated string can be used, like +            'a b c'. +        kw : dict +            Additional key, value pairs to merge in + +        Notes +        ----- + +        The `__conflict_solve` dict is a dictionary of binary functions which will be used to +        solve key conflicts.  Here is an example:: + +            __conflict_solve = dict( +                func1=['a','b','c'], +                func2=['d','e'] +            ) + +        In this case, the function :func:`func1` will be used to resolve +        keys 'a', 'b' and 'c' and the function :func:`func2` will be used for +        keys 'd' and 'e'.  This could also be written as:: + +            __conflict_solve = dict(func1='a b c',func2='d e') + +        These functions will be called for each key they apply to with the +        form:: + +            func1(self['a'], other['a']) + +        The return value is used as the final merged value. + +        As a convenience, merge() provides five (the most commonly needed) +        pre-defined policies: preserve, update, add, add_flip and add_s. The +        easiest explanation is their implementation:: + +            preserve = lambda old,new: old +            update   = lambda old,new: new +            add      = lambda old,new: old + new +            add_flip = lambda old,new: new + old  # note change of order! +            add_s    = lambda old,new: old + ' ' + new  # only for str! + +        You can use those four words (as strings) as keys instead +        of defining them as functions, and the merge method will substitute +        the appropriate functions for you. + +        For more complicated conflict resolution policies, you still need to +        construct your own functions. + +        Examples +        -------- + +        This show the default policy: + +        >>> s = Struct(a=10,b=30) +        >>> s2 = Struct(a=20,c=40) +        >>> s.merge(s2) +        >>> sorted(s.items()) +        [('a', 10), ('b', 30), ('c', 40)] + +        Now, show how to specify a conflict dict: + +        >>> s = Struct(a=10,b=30) +        >>> s2 = Struct(a=20,b=40) +        >>> conflict = {'update':'a','add':'b'} +        >>> s.merge(s2,conflict) +        >>> sorted(s.items()) +        [('a', 20), ('b', 70)] +        """ + +        data_dict = dict(__loc_data__,**kw) + +        # policies for conflict resolution: two argument functions which return +        # the value that will go in the new struct +        preserve = lambda old,new: old +        update   = lambda old,new: new +        add      = lambda old,new: old + new +        add_flip = lambda old,new: new + old  # note change of order! +        add_s    = lambda old,new: old + ' ' + new + +        # default policy is to keep current keys when there's a conflict +        conflict_solve = dict.fromkeys(self, preserve) + +        # the conflict_solve dictionary is given by the user 'inverted': we +        # need a name-function mapping, it comes as a function -> names +        # dict. Make a local copy (b/c we'll make changes), replace user +        # strings for the three builtin policies and invert it. +        if __conflict_solve: +            inv_conflict_solve_user = __conflict_solve.copy() +            for name, func in [('preserve',preserve), ('update',update), +                               ('add',add), ('add_flip',add_flip), +                               ('add_s',add_s)]: +                if name in inv_conflict_solve_user.keys(): +                    inv_conflict_solve_user[func] = inv_conflict_solve_user[name] +                    del inv_conflict_solve_user[name] +            conflict_solve.update(self.__dict_invert(inv_conflict_solve_user)) +        for key in data_dict: +            if key not in self: +                self[key] = data_dict[key] +            else: +                self[key] = conflict_solve[key](self[key],data_dict[key]) + diff --git a/contrib/python/ipython/py2/IPython/utils/jsonutil.py b/contrib/python/ipython/py2/IPython/utils/jsonutil.py new file mode 100644 index 00000000000..c3ee93859e3 --- /dev/null +++ b/contrib/python/ipython/py2/IPython/utils/jsonutil.py @@ -0,0 +1,5 @@ +from warnings import warn + +warn("IPython.utils.jsonutil has moved to jupyter_client.jsonutil") + +from jupyter_client.jsonutil import * diff --git a/contrib/python/ipython/py2/IPython/utils/localinterfaces.py b/contrib/python/ipython/py2/IPython/utils/localinterfaces.py new file mode 100644 index 00000000000..89b8fdeb54d --- /dev/null +++ b/contrib/python/ipython/py2/IPython/utils/localinterfaces.py @@ -0,0 +1,5 @@ +from warnings import warn + +warn("IPython.utils.localinterfaces has moved to jupyter_client.localinterfaces") + +from jupyter_client.localinterfaces import * diff --git a/contrib/python/ipython/py2/IPython/utils/log.py b/contrib/python/ipython/py2/IPython/utils/log.py new file mode 100644 index 00000000000..3eb9bdadd80 --- /dev/null +++ b/contrib/python/ipython/py2/IPython/utils/log.py @@ -0,0 +1,7 @@ +from __future__ import absolute_import + +from warnings import warn + +warn("IPython.utils.log has moved to traitlets.log") + +from traitlets.log import * diff --git a/contrib/python/ipython/py2/IPython/utils/module_paths.py b/contrib/python/ipython/py2/IPython/utils/module_paths.py new file mode 100644 index 00000000000..45a711c0b41 --- /dev/null +++ b/contrib/python/ipython/py2/IPython/utils/module_paths.py @@ -0,0 +1,125 @@ +"""Utility functions for finding modules + +Utility functions for finding modules on sys.path. + +`find_mod` finds named module on sys.path. + +`get_init` helper function that finds __init__ file in a directory. + +`find_module` variant of imp.find_module in std_lib that only returns +path to module and not an open file object as well. + + + +""" +#----------------------------------------------------------------------------- +# Copyright (c) 2011, the IPython Development Team. +# +# Distributed under the terms of the Modified BSD License. +# +# The full license is in the file COPYING.txt, distributed with this software. +#----------------------------------------------------------------------------- + +#----------------------------------------------------------------------------- +# Imports +#----------------------------------------------------------------------------- +from __future__ import print_function + +# Stdlib imports +import imp +import os + +# Third-party imports + +# Our own imports + + +#----------------------------------------------------------------------------- +# Globals and constants +#----------------------------------------------------------------------------- + +#----------------------------------------------------------------------------- +# Local utilities +#----------------------------------------------------------------------------- + +#----------------------------------------------------------------------------- +# Classes and functions +#----------------------------------------------------------------------------- +def find_module(name, path=None): +    """imp.find_module variant that only return path of module. +     +    The `imp.find_module` returns a filehandle that we are not interested in. +    Also we ignore any bytecode files that `imp.find_module` finds. + +    Parameters +    ---------- +    name : str +        name of module to locate +    path : list of str +        list of paths to search for `name`. If path=None then search sys.path + +    Returns +    ------- +    filename : str +        Return full path of module or None if module is missing or does not have +        .py or .pyw extension +    """ +    if name is None: +        return None +    try: +        file, filename, _ = imp.find_module(name, path) +    except ImportError: +        return None +    if file is None: +        return filename +    else: +        file.close() +    if os.path.splitext(filename)[1] in [".py", ".pyc"]: +        return filename +    else: +        return None + +def get_init(dirname): +    """Get __init__ file path for module directory +     +    Parameters +    ---------- +    dirname : str +        Find the __init__ file in directory `dirname` + +    Returns +    ------- +    init_path : str +        Path to __init__ file +    """ +    fbase =  os.path.join(dirname, "__init__") +    for ext in [".py", ".pyw"]: +        fname = fbase + ext +        if os.path.isfile(fname): +            return fname + + +def find_mod(module_name): +    """Find module `module_name` on sys.path +     +    Return the path to module `module_name`. If `module_name` refers to +    a module directory then return path to __init__ file. Return full  +    path of module or None if module is missing or does not have .py or .pyw +    extension. We are not interested in running bytecode. +     +    Parameters +    ---------- +    module_name : str +     +    Returns +    ------- +    modulepath : str +        Path to module `module_name`. +    """ +    parts = module_name.split(".") +    basepath = find_module(parts[0]) +    for submodname in parts[1:]: +        basepath = find_module(submodname, [basepath]) +    if basepath and os.path.isdir(basepath): +        basepath = get_init(basepath) +    return basepath diff --git a/contrib/python/ipython/py2/IPython/utils/openpy.py b/contrib/python/ipython/py2/IPython/utils/openpy.py new file mode 100644 index 00000000000..0a7cc0f00e3 --- /dev/null +++ b/contrib/python/ipython/py2/IPython/utils/openpy.py @@ -0,0 +1,249 @@ +""" +Tools to open .py files as Unicode, using the encoding specified within the file, +as per PEP 263. + +Much of the code is taken from the tokenize module in Python 3.2. +""" +from __future__ import absolute_import + +import io +from io import TextIOWrapper, BytesIO +import os.path +import re + +from .py3compat import unicode_type + +cookie_re = re.compile(r"coding[:=]\s*([-\w.]+)", re.UNICODE) +cookie_comment_re = re.compile(r"^\s*#.*coding[:=]\s*([-\w.]+)", re.UNICODE) + +try: +    # Available in Python 3 +    from tokenize import detect_encoding +except ImportError: +    from codecs import lookup, BOM_UTF8 +     +    # Copied from Python 3.2 tokenize +    def _get_normal_name(orig_enc): +        """Imitates get_normal_name in tokenizer.c.""" +        # Only care about the first 12 characters. +        enc = orig_enc[:12].lower().replace("_", "-") +        if enc == "utf-8" or enc.startswith("utf-8-"): +            return "utf-8" +        if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \ +           enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")): +            return "iso-8859-1" +        return orig_enc +     +    # Copied from Python 3.2 tokenize +    def detect_encoding(readline): +        """ +        The detect_encoding() function is used to detect the encoding that should +        be used to decode a Python source file.  It requires one argment, readline, +        in the same way as the tokenize() generator. + +        It will call readline a maximum of twice, and return the encoding used +        (as a string) and a list of any lines (left as bytes) it has read in. + +        It detects the encoding from the presence of a utf-8 bom or an encoding +        cookie as specified in pep-0263.  If both a bom and a cookie are present, +        but disagree, a SyntaxError will be raised.  If the encoding cookie is an +        invalid charset, raise a SyntaxError.  Note that if a utf-8 bom is found, +        'utf-8-sig' is returned. + +        If no encoding is specified, then the default of 'utf-8' will be returned. +        """ +        bom_found = False +        encoding = None +        default = 'utf-8' +        def read_or_stop(): +            try: +                return readline() +            except StopIteration: +                return b'' + +        def find_cookie(line): +            try: +                line_string = line.decode('ascii') +            except UnicodeDecodeError: +                return None + +            matches = cookie_re.findall(line_string) +            if not matches: +                return None +            encoding = _get_normal_name(matches[0]) +            try: +                codec = lookup(encoding) +            except LookupError: +                # This behaviour mimics the Python interpreter +                raise SyntaxError("unknown encoding: " + encoding) + +            if bom_found: +                if codec.name != 'utf-8': +                    # This behaviour mimics the Python interpreter +                    raise SyntaxError('encoding problem: utf-8') +                encoding += '-sig' +            return encoding + +        first = read_or_stop() +        if first.startswith(BOM_UTF8): +            bom_found = True +            first = first[3:] +            default = 'utf-8-sig' +        if not first: +            return default, [] + +        encoding = find_cookie(first) +        if encoding: +            return encoding, [first] + +        second = read_or_stop() +        if not second: +            return default, [first] + +        encoding = find_cookie(second) +        if encoding: +            return encoding, [first, second] + +        return default, [first, second] + +try: +    # Available in Python 3.2 and above. +    from tokenize import open +except ImportError: +    # Copied from Python 3.2 tokenize +    def open(filename): +        """Open a file in read only mode using the encoding detected by +        detect_encoding(). +        """ +        buffer = io.open(filename, 'rb')   # Tweaked to use io.open for Python 2 +        encoding, lines = detect_encoding(buffer.readline) +        buffer.seek(0) +        text = TextIOWrapper(buffer, encoding, line_buffering=True) +        text.mode = 'r' +        return text    + +def source_to_unicode(txt, errors='replace', skip_encoding_cookie=True): +    """Converts a bytes string with python source code to unicode. + +    Unicode strings are passed through unchanged. Byte strings are checked +    for the python source file encoding cookie to determine encoding. +    txt can be either a bytes buffer or a string containing the source +    code. +    """ +    if isinstance(txt, unicode_type): +        return txt +    if isinstance(txt, bytes): +        buffer = BytesIO(txt) +    else: +        buffer = txt +    try: +        encoding, _ = detect_encoding(buffer.readline) +    except SyntaxError: +        encoding = "ascii" +    buffer.seek(0) +    text = TextIOWrapper(buffer, encoding, errors=errors, line_buffering=True) +    text.mode = 'r' +    if skip_encoding_cookie: +        return u"".join(strip_encoding_cookie(text)) +    else: +        return text.read() + +def strip_encoding_cookie(filelike): +    """Generator to pull lines from a text-mode file, skipping the encoding +    cookie if it is found in the first two lines. +    """ +    it = iter(filelike) +    try: +        first = next(it) +        if not cookie_comment_re.match(first): +            yield first +        second = next(it) +        if not cookie_comment_re.match(second): +            yield second +    except StopIteration: +        return +     +    for line in it: +        yield line + +def read_py_file(filename, skip_encoding_cookie=True): +    """Read a Python file, using the encoding declared inside the file. +     +    Parameters +    ---------- +    filename : str +      The path to the file to read. +    skip_encoding_cookie : bool +      If True (the default), and the encoding declaration is found in the first +      two lines, that line will be excluded from the output - compiling a +      unicode string with an encoding declaration is a SyntaxError in Python 2. +     +    Returns +    ------- +    A unicode string containing the contents of the file. +    """ +    with open(filename) as f:   # the open function defined in this module. +        if skip_encoding_cookie: +            return "".join(strip_encoding_cookie(f)) +        else: +            return f.read() + +def read_py_url(url, errors='replace', skip_encoding_cookie=True): +    """Read a Python file from a URL, using the encoding declared inside the file. +     +    Parameters +    ---------- +    url : str +      The URL from which to fetch the file. +    errors : str +      How to handle decoding errors in the file. Options are the same as for +      bytes.decode(), but here 'replace' is the default. +    skip_encoding_cookie : bool +      If True (the default), and the encoding declaration is found in the first +      two lines, that line will be excluded from the output - compiling a +      unicode string with an encoding declaration is a SyntaxError in Python 2. +     +    Returns +    ------- +    A unicode string containing the contents of the file. +    """ +    # Deferred import for faster start +    try: +        from urllib.request import urlopen # Py 3 +    except ImportError: +        from urllib import urlopen +    response = urlopen(url) +    buffer = io.BytesIO(response.read()) +    return source_to_unicode(buffer, errors, skip_encoding_cookie) + +def _list_readline(x): +    """Given a list, returns a readline() function that returns the next element +    with each call. +    """ +    x = iter(x) +    def readline(): +        return next(x) +    return readline + +# Code for going between .py files and cached .pyc files ---------------------- + +try:    # Python 3.2, see PEP 3147 +    try:  +        from importlib.util import source_from_cache, cache_from_source +    except ImportError : +        ## deprecated since 3.4 +        from imp import source_from_cache, cache_from_source +except ImportError: +    # Python <= 3.1: .pyc files go next to .py +    def source_from_cache(path): +        basename, ext = os.path.splitext(path) +        if ext not in ('.pyc', '.pyo'): +            raise ValueError('Not a cached Python file extension', ext) +        # Should we look for .pyw files? +        return basename + '.py' +     +    def cache_from_source(path, debug_override=None): +        if debug_override is None: +            debug_override = __debug__ +        basename, ext = os.path.splitext(path) +        return basename + '.pyc' if debug_override else '.pyo' diff --git a/contrib/python/ipython/py2/IPython/utils/path.py b/contrib/python/ipython/py2/IPython/utils/path.py new file mode 100644 index 00000000000..fa850812c7f --- /dev/null +++ b/contrib/python/ipython/py2/IPython/utils/path.py @@ -0,0 +1,447 @@ +# encoding: utf-8 +""" +Utilities for path handling. +""" + +# Copyright (c) IPython Development Team. +# Distributed under the terms of the Modified BSD License. + +import os +import sys +import errno +import shutil +import random +import glob +from warnings import warn +from hashlib import md5 + +from IPython.utils.process import system +from IPython.utils import py3compat +from IPython.utils.decorators import undoc + +#----------------------------------------------------------------------------- +# Code +#----------------------------------------------------------------------------- + +fs_encoding = sys.getfilesystemencoding() + +def _writable_dir(path): +    """Whether `path` is a directory, to which the user has write access.""" +    return os.path.isdir(path) and os.access(path, os.W_OK) + +if sys.platform == 'win32': +    def _get_long_path_name(path): +        """Get a long path name (expand ~) on Windows using ctypes. + +        Examples +        -------- + +        >>> get_long_path_name('c:\\docume~1') +        u'c:\\\\Documents and Settings' + +        """ +        try: +            import ctypes +        except ImportError: +            raise ImportError('you need to have ctypes installed for this to work') +        _GetLongPathName = ctypes.windll.kernel32.GetLongPathNameW +        _GetLongPathName.argtypes = [ctypes.c_wchar_p, ctypes.c_wchar_p, +            ctypes.c_uint ] + +        buf = ctypes.create_unicode_buffer(260) +        rv = _GetLongPathName(path, buf, 260) +        if rv == 0 or rv > 260: +            return path +        else: +            return buf.value +else: +    def _get_long_path_name(path): +        """Dummy no-op.""" +        return path + + + +def get_long_path_name(path): +    """Expand a path into its long form. + +    On Windows this expands any ~ in the paths. On other platforms, it is +    a null operation. +    """ +    return _get_long_path_name(path) + + +def unquote_filename(name, win32=(sys.platform=='win32')): +    """ On Windows, remove leading and trailing quotes from filenames. + +    This function has been deprecated and should not be used any more: +    unquoting is now taken care of by :func:`IPython.utils.process.arg_split`. +    """ +    warn("'unquote_filename' is deprecated since IPython 5.0 and should not " +         "be used anymore", DeprecationWarning, stacklevel=2) +    if win32: +        if name.startswith(("'", '"')) and name.endswith(("'", '"')): +            name = name[1:-1] +    return name + + +def compress_user(path): +    """Reverse of :func:`os.path.expanduser` +    """ +    path = py3compat.unicode_to_str(path, sys.getfilesystemencoding()) +    home = os.path.expanduser('~') +    if path.startswith(home): +        path =  "~" + path[len(home):] +    return path + +def get_py_filename(name, force_win32=None): +    """Return a valid python filename in the current directory. + +    If the given name is not a file, it adds '.py' and searches again. +    Raises IOError with an informative message if the file isn't found. +    """ + +    name = os.path.expanduser(name) +    if force_win32 is not None: +        warn("The 'force_win32' argument to 'get_py_filename' is deprecated " +             "since IPython 5.0 and should not be used anymore", +            DeprecationWarning, stacklevel=2) +    if not os.path.isfile(name) and not name.endswith('.py'): +        name += '.py' +    if os.path.isfile(name): +        return name +    else: +        raise IOError('File `%r` not found.' % name) + + +def filefind(filename, path_dirs=None): +    """Find a file by looking through a sequence of paths. + +    This iterates through a sequence of paths looking for a file and returns +    the full, absolute path of the first occurence of the file.  If no set of +    path dirs is given, the filename is tested as is, after running through +    :func:`expandvars` and :func:`expanduser`.  Thus a simple call:: + +        filefind('myfile.txt') + +    will find the file in the current working dir, but:: + +        filefind('~/myfile.txt') + +    Will find the file in the users home directory.  This function does not +    automatically try any paths, such as the cwd or the user's home directory. + +    Parameters +    ---------- +    filename : str +        The filename to look for. +    path_dirs : str, None or sequence of str +        The sequence of paths to look for the file in.  If None, the filename +        need to be absolute or be in the cwd.  If a string, the string is +        put into a sequence and the searched.  If a sequence, walk through +        each element and join with ``filename``, calling :func:`expandvars` +        and :func:`expanduser` before testing for existence. + +    Returns +    ------- +    Raises :exc:`IOError` or returns absolute path to file. +    """ + +    # If paths are quoted, abspath gets confused, strip them... +    filename = filename.strip('"').strip("'") +    # If the input is an absolute path, just check it exists +    if os.path.isabs(filename) and os.path.isfile(filename): +        return filename + +    if path_dirs is None: +        path_dirs = ("",) +    elif isinstance(path_dirs, py3compat.string_types): +        path_dirs = (path_dirs,) + +    for path in path_dirs: +        if path == '.': path = py3compat.getcwd() +        testname = expand_path(os.path.join(path, filename)) +        if os.path.isfile(testname): +            return os.path.abspath(testname) + +    raise IOError("File %r does not exist in any of the search paths: %r" % +                  (filename, path_dirs) ) + + +class HomeDirError(Exception): +    pass + + +def get_home_dir(require_writable=False): +    """Return the 'home' directory, as a unicode string. + +    Uses os.path.expanduser('~'), and checks for writability. + +    See stdlib docs for how this is determined. +    $HOME is first priority on *ALL* platforms. + +    Parameters +    ---------- + +    require_writable : bool [default: False] +        if True: +            guarantees the return value is a writable directory, otherwise +            raises HomeDirError +        if False: +            The path is resolved, but it is not guaranteed to exist or be writable. +    """ + +    homedir = os.path.expanduser('~') +    # Next line will make things work even when /home/ is a symlink to +    # /usr/home as it is on FreeBSD, for example +    homedir = os.path.realpath(homedir) + +    if not _writable_dir(homedir) and os.name == 'nt': +        # expanduser failed, use the registry to get the 'My Documents' folder. +        try: +            try: +                import winreg as wreg  # Py 3 +            except ImportError: +                import _winreg as wreg  # Py 2 +            key = wreg.OpenKey( +                wreg.HKEY_CURRENT_USER, +                "Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders" +            ) +            homedir = wreg.QueryValueEx(key,'Personal')[0] +            key.Close() +        except: +            pass + +    if (not require_writable) or _writable_dir(homedir): +        return py3compat.cast_unicode(homedir, fs_encoding) +    else: +        raise HomeDirError('%s is not a writable dir, ' +                'set $HOME environment variable to override' % homedir) + +def get_xdg_dir(): +    """Return the XDG_CONFIG_HOME, if it is defined and exists, else None. + +    This is only for non-OS X posix (Linux,Unix,etc.) systems. +    """ + +    env = os.environ + +    if os.name == 'posix' and sys.platform != 'darwin': +        # Linux, Unix, AIX, etc. +        # use ~/.config if empty OR not set +        xdg = env.get("XDG_CONFIG_HOME", None) or os.path.join(get_home_dir(), '.config') +        if xdg and _writable_dir(xdg): +            return py3compat.cast_unicode(xdg, fs_encoding) + +    return None + + +def get_xdg_cache_dir(): +    """Return the XDG_CACHE_HOME, if it is defined and exists, else None. + +    This is only for non-OS X posix (Linux,Unix,etc.) systems. +    """ + +    env = os.environ + +    if os.name == 'posix' and sys.platform != 'darwin': +        # Linux, Unix, AIX, etc. +        # use ~/.cache if empty OR not set +        xdg = env.get("XDG_CACHE_HOME", None) or os.path.join(get_home_dir(), '.cache') +        if xdg and _writable_dir(xdg): +            return py3compat.cast_unicode(xdg, fs_encoding) + +    return None + + +@undoc +def get_ipython_dir(): +    warn("get_ipython_dir has moved to the IPython.paths module since IPython 4.0.", stacklevel=2) +    from IPython.paths import get_ipython_dir +    return get_ipython_dir() + +@undoc +def get_ipython_cache_dir(): +    warn("get_ipython_cache_dir has moved to the IPython.paths module since IPython 4.0.", stacklevel=2) +    from IPython.paths import get_ipython_cache_dir +    return get_ipython_cache_dir() + +@undoc +def get_ipython_package_dir(): +    warn("get_ipython_package_dir has moved to the IPython.paths module since IPython 4.0.", stacklevel=2) +    from IPython.paths import get_ipython_package_dir +    return get_ipython_package_dir() + +@undoc +def get_ipython_module_path(module_str): +    warn("get_ipython_module_path has moved to the IPython.paths module since IPython 4.0.", stacklevel=2) +    from IPython.paths import get_ipython_module_path +    return get_ipython_module_path(module_str) + +@undoc +def locate_profile(profile='default'): +    warn("locate_profile has moved to the IPython.paths module since IPython 4.0.", stacklevel=2) +    from IPython.paths import locate_profile +    return locate_profile(profile=profile) + +def expand_path(s): +    """Expand $VARS and ~names in a string, like a shell + +    :Examples: + +       In [2]: os.environ['FOO']='test' + +       In [3]: expand_path('variable FOO is $FOO') +       Out[3]: 'variable FOO is test' +    """ +    # This is a pretty subtle hack. When expand user is given a UNC path +    # on Windows (\\server\share$\%username%), os.path.expandvars, removes +    # the $ to get (\\server\share\%username%). I think it considered $ +    # alone an empty var. But, we need the $ to remains there (it indicates +    # a hidden share). +    if os.name=='nt': +        s = s.replace('$\\', 'IPYTHON_TEMP') +    s = os.path.expandvars(os.path.expanduser(s)) +    if os.name=='nt': +        s = s.replace('IPYTHON_TEMP', '$\\') +    return s + + +def unescape_glob(string): +    """Unescape glob pattern in `string`.""" +    def unescape(s): +        for pattern in '*[]!?': +            s = s.replace(r'\{0}'.format(pattern), pattern) +        return s +    return '\\'.join(map(unescape, string.split('\\\\'))) + + +def shellglob(args): +    """ +    Do glob expansion for each element in `args` and return a flattened list. + +    Unmatched glob pattern will remain as-is in the returned list. + +    """ +    expanded = [] +    # Do not unescape backslash in Windows as it is interpreted as +    # path separator: +    unescape = unescape_glob if sys.platform != 'win32' else lambda x: x +    for a in args: +        expanded.extend(glob.glob(a) or [unescape(a)]) +    return expanded + + +def target_outdated(target,deps): +    """Determine whether a target is out of date. + +    target_outdated(target,deps) -> 1/0 + +    deps: list of filenames which MUST exist. +    target: single filename which may or may not exist. + +    If target doesn't exist or is older than any file listed in deps, return +    true, otherwise return false. +    """ +    try: +        target_time = os.path.getmtime(target) +    except os.error: +        return 1 +    for dep in deps: +        dep_time = os.path.getmtime(dep) +        if dep_time > target_time: +            #print "For target",target,"Dep failed:",dep # dbg +            #print "times (dep,tar):",dep_time,target_time # dbg +            return 1 +    return 0 + + +def target_update(target,deps,cmd): +    """Update a target with a given command given a list of dependencies. + +    target_update(target,deps,cmd) -> runs cmd if target is outdated. + +    This is just a wrapper around target_outdated() which calls the given +    command if target is outdated.""" + +    if target_outdated(target,deps): +        system(cmd) + +@undoc +def filehash(path): +    """Make an MD5 hash of a file, ignoring any differences in line +    ending characters.""" +    warn("filehash() is deprecated since IPython 4.0", DeprecationWarning, stacklevel=2) +    with open(path, "rU") as f: +        return md5(py3compat.str_to_bytes(f.read())).hexdigest() + +ENOLINK = 1998 + +def link(src, dst): +    """Hard links ``src`` to ``dst``, returning 0 or errno. + +    Note that the special errno ``ENOLINK`` will be returned if ``os.link`` isn't +    supported by the operating system. +    """ + +    if not hasattr(os, "link"): +        return ENOLINK +    link_errno = 0 +    try: +        os.link(src, dst) +    except OSError as e: +        link_errno = e.errno +    return link_errno + + +def link_or_copy(src, dst): +    """Attempts to hardlink ``src`` to ``dst``, copying if the link fails. + +    Attempts to maintain the semantics of ``shutil.copy``. + +    Because ``os.link`` does not overwrite files, a unique temporary file +    will be used if the target already exists, then that file will be moved +    into place. +    """ + +    if os.path.isdir(dst): +        dst = os.path.join(dst, os.path.basename(src)) + +    link_errno = link(src, dst) +    if link_errno == errno.EEXIST: +        if os.stat(src).st_ino == os.stat(dst).st_ino: +            # dst is already a hard link to the correct file, so we don't need +            # to do anything else. If we try to link and rename the file +            # anyway, we get duplicate files - see http://bugs.python.org/issue21876 +            return + +        new_dst = dst + "-temp-%04X" %(random.randint(1, 16**4), ) +        try: +            link_or_copy(src, new_dst) +        except: +            try: +                os.remove(new_dst) +            except OSError: +                pass +            raise +        os.rename(new_dst, dst) +    elif link_errno != 0: +        # Either link isn't supported, or the filesystem doesn't support +        # linking, or 'src' and 'dst' are on different filesystems. +        shutil.copy(src, dst) + +def ensure_dir_exists(path, mode=0o755): +    """ensure that a directory exists + +    If it doesn't exist, try to create it and protect against a race condition +    if another process is doing the same. + +    The default permissions are 755, which differ from os.makedirs default of 777. +    """ +    if not os.path.exists(path): +        try: +            os.makedirs(path, mode=mode) +        except OSError as e: +            if e.errno != errno.EEXIST: +                raise +    elif not os.path.isdir(path): +        raise IOError("%r exists but is not a directory" % path) diff --git a/contrib/python/ipython/py2/IPython/utils/pickleutil.py b/contrib/python/ipython/py2/IPython/utils/pickleutil.py new file mode 100644 index 00000000000..665ff09f2d4 --- /dev/null +++ b/contrib/python/ipython/py2/IPython/utils/pickleutil.py @@ -0,0 +1,5 @@ +from warnings import warn + +warn("IPython.utils.pickleutil has moved to ipykernel.pickleutil") + +from ipykernel.pickleutil import * diff --git a/contrib/python/ipython/py2/IPython/utils/process.py b/contrib/python/ipython/py2/IPython/utils/process.py new file mode 100644 index 00000000000..a274f43f3a4 --- /dev/null +++ b/contrib/python/ipython/py2/IPython/utils/process.py @@ -0,0 +1,106 @@ +# encoding: utf-8 +""" +Utilities for working with external processes. +""" + +# Copyright (c) IPython Development Team. +# Distributed under the terms of the Modified BSD License. + +from __future__ import print_function + +import os +import sys + +if sys.platform == 'win32': +    from ._process_win32 import system, getoutput, arg_split, check_pid +elif sys.platform == 'cli': +    from ._process_cli import system, getoutput, arg_split, check_pid +else: +    from ._process_posix import system, getoutput, arg_split, check_pid + +from ._process_common import getoutputerror, get_output_error_code, process_handler +from . import py3compat + + +class FindCmdError(Exception): +    pass + + +def find_cmd(cmd): +    """Find absolute path to executable cmd in a cross platform manner. + +    This function tries to determine the full path to a command line program +    using `which` on Unix/Linux/OS X and `win32api` on Windows.  Most of the +    time it will use the version that is first on the users `PATH`. + +    Warning, don't use this to find IPython command line programs as there +    is a risk you will find the wrong one.  Instead find those using the +    following code and looking for the application itself:: + +        from IPython.utils.path import get_ipython_module_path +        from IPython.utils.process import pycmd2argv +        argv = pycmd2argv(get_ipython_module_path('IPython.terminal.ipapp')) + +    Parameters +    ---------- +    cmd : str +        The command line program to look for. +    """ +    path = py3compat.which(cmd) +    if path is None: +        raise FindCmdError('command could not be found: %s' % cmd) +    return path + + +def is_cmd_found(cmd): +    """Check whether executable `cmd` exists or not and return a bool.""" +    try: +        find_cmd(cmd) +        return True +    except FindCmdError: +        return False + + +def pycmd2argv(cmd): +    r"""Take the path of a python command and return a list (argv-style). + +    This only works on Python based command line programs and will find the +    location of the ``python`` executable using ``sys.executable`` to make +    sure the right version is used. + +    For a given path ``cmd``, this returns [cmd] if cmd's extension is .exe, +    .com or .bat, and [, cmd] otherwise. + +    Parameters +    ---------- +    cmd : string +      The path of the command. + +    Returns +    ------- +    argv-style list. +    """ +    ext = os.path.splitext(cmd)[1] +    if ext in ['.exe', '.com', '.bat']: +        return [cmd] +    else: +        return [sys.executable, cmd] + + +def abbrev_cwd(): +    """ Return abbreviated version of cwd, e.g. d:mydir """ +    cwd = py3compat.getcwd().replace('\\','/') +    drivepart = '' +    tail = cwd +    if sys.platform == 'win32': +        if len(cwd) < 4: +            return cwd +        drivepart,tail = os.path.splitdrive(cwd) + + +    parts = tail.split('/') +    if len(parts) > 2: +        tail = '/'.join(parts[-2:]) + +    return (drivepart + ( +        cwd == '/' and '/' or tail)) diff --git a/contrib/python/ipython/py2/IPython/utils/py3compat.py b/contrib/python/ipython/py2/IPython/utils/py3compat.py new file mode 100644 index 00000000000..88602e5342d --- /dev/null +++ b/contrib/python/ipython/py2/IPython/utils/py3compat.py @@ -0,0 +1,336 @@ +# coding: utf-8 +"""Compatibility tricks for Python 3. Mainly to do with unicode.""" +import functools +import os +import sys +import re +import shutil +import types +import platform + +from .encoding import DEFAULT_ENCODING + +def no_code(x, encoding=None): +    return x + +def decode(s, encoding=None): +    encoding = encoding or DEFAULT_ENCODING +    return s.decode(encoding, "replace") + +def encode(u, encoding=None): +    encoding = encoding or DEFAULT_ENCODING +    return u.encode(encoding, "replace") + + +def cast_unicode(s, encoding=None): +    if isinstance(s, bytes): +        return decode(s, encoding) +    return s + +def cast_bytes(s, encoding=None): +    if not isinstance(s, bytes): +        return encode(s, encoding) +    return s + +def buffer_to_bytes(buf): +    """Cast a buffer object to bytes""" +    if not isinstance(buf, bytes): +        buf = bytes(buf) +    return buf + +def _modify_str_or_docstring(str_change_func): +    @functools.wraps(str_change_func) +    def wrapper(func_or_str): +        if isinstance(func_or_str, string_types): +            func = None +            doc = func_or_str +        else: +            func = func_or_str +            doc = func.__doc__ +         +        # PYTHONOPTIMIZE=2 strips docstrings, so they can disappear unexpectedly +        if doc is not None: +            doc = str_change_func(doc) +         +        if func: +            func.__doc__ = doc +            return func +        return doc +    return wrapper + +def safe_unicode(e): +    """unicode(e) with various fallbacks. Used for exceptions, which may not be +    safe to call unicode() on. +    """ +    try: +        return unicode_type(e) +    except UnicodeError: +        pass + +    try: +        return str_to_unicode(str(e)) +    except UnicodeError: +        pass + +    try: +        return str_to_unicode(repr(e)) +    except UnicodeError: +        pass + +    return u'Unrecoverably corrupt evalue' + +# shutil.which from Python 3.4 +def _shutil_which(cmd, mode=os.F_OK | os.X_OK, path=None): +    """Given a command, mode, and a PATH string, return the path which +    conforms to the given mode on the PATH, or None if there is no such +    file. + +    `mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result +    of os.environ.get("PATH"), or can be overridden with a custom search +    path. +     +    This is a backport of shutil.which from Python 3.4 +    """ +    # Check that a given file can be accessed with the correct mode. +    # Additionally check that `file` is not a directory, as on Windows +    # directories pass the os.access check. +    def _access_check(fn, mode): +        return (os.path.exists(fn) and os.access(fn, mode) +                and not os.path.isdir(fn)) + +    # If we're given a path with a directory part, look it up directly rather +    # than referring to PATH directories. This includes checking relative to the +    # current directory, e.g. ./script +    if os.path.dirname(cmd): +        if _access_check(cmd, mode): +            return cmd +        return None + +    if path is None: +        path = os.environ.get("PATH", os.defpath) +    if not path: +        return None +    path = path.split(os.pathsep) + +    if sys.platform == "win32": +        # The current directory takes precedence on Windows. +        if not os.curdir in path: +            path.insert(0, os.curdir) + +        # PATHEXT is necessary to check on Windows. +        pathext = os.environ.get("PATHEXT", "").split(os.pathsep) +        # See if the given file matches any of the expected path extensions. +        # This will allow us to short circuit when given "python.exe". +        # If it does match, only test that one, otherwise we have to try +        # others. +        if any(cmd.lower().endswith(ext.lower()) for ext in pathext): +            files = [cmd] +        else: +            files = [cmd + ext for ext in pathext] +    else: +        # On other platforms you don't have things like PATHEXT to tell you +        # what file suffixes are executable, so just pass on cmd as-is. +        files = [cmd] + +    seen = set() +    for dir in path: +        normdir = os.path.normcase(dir) +        if not normdir in seen: +            seen.add(normdir) +            for thefile in files: +                name = os.path.join(dir, thefile) +                if _access_check(name, mode): +                    return name +    return None + +if sys.version_info[0] >= 3: +    PY3 = True +     +    # keep reference to builtin_mod because the kernel overrides that value +    # to forward requests to a frontend. +    def input(prompt=''): +        return builtin_mod.input(prompt) +     +    builtin_mod_name = "builtins" +    import builtins as builtin_mod +     +    str_to_unicode = no_code +    unicode_to_str = no_code +    str_to_bytes = encode +    bytes_to_str = decode +    cast_bytes_py2 = no_code +    cast_unicode_py2 = no_code +    buffer_to_bytes_py2 = no_code +     +    string_types = (str,) +    unicode_type = str +     +    which = shutil.which +     +    def isidentifier(s, dotted=False): +        if dotted: +            return all(isidentifier(a) for a in s.split(".")) +        return s.isidentifier() + +    xrange = range +    def iteritems(d): return iter(d.items()) +    def itervalues(d): return iter(d.values()) +    getcwd = os.getcwd +     +    MethodType = types.MethodType + +    def execfile(fname, glob, loc=None, compiler=None): +        loc = loc if (loc is not None) else glob +        with open(fname, 'rb') as f: +            compiler = compiler or compile +            exec(compiler(f.read(), fname, 'exec'), glob, loc) +     +    # Refactor print statements in doctests. +    _print_statement_re = re.compile(r"\bprint (?P<expr>.*)$", re.MULTILINE) +    def _print_statement_sub(match): +        expr = match.groups('expr') +        return "print(%s)" % expr +     +    @_modify_str_or_docstring +    def doctest_refactor_print(doc): +        """Refactor 'print x' statements in a doctest to print(x) style. 2to3 +        unfortunately doesn't pick up on our doctests. +         +        Can accept a string or a function, so it can be used as a decorator.""" +        return _print_statement_re.sub(_print_statement_sub, doc) +     +    # Abstract u'abc' syntax: +    @_modify_str_or_docstring +    def u_format(s): +        """"{u}'abc'" --> "'abc'" (Python 3) +         +        Accepts a string or a function, so it can be used as a decorator.""" +        return s.format(u='') +     +    def get_closure(f): +        """Get a function's closure attribute""" +        return f.__closure__ + +else: +    PY3 = False +     +    # keep reference to builtin_mod because the kernel overrides that value +    # to forward requests to a frontend. +    def input(prompt=''): +        return builtin_mod.raw_input(prompt) +     +    builtin_mod_name = "__builtin__" +    import __builtin__ as builtin_mod +     +    str_to_unicode = decode +    unicode_to_str = encode +    str_to_bytes = no_code +    bytes_to_str = no_code +    cast_bytes_py2 = cast_bytes +    cast_unicode_py2 = cast_unicode +    buffer_to_bytes_py2 = buffer_to_bytes +     +    string_types = (str, unicode) +    unicode_type = unicode +     +    import re +    _name_re = re.compile(r"[a-zA-Z_][a-zA-Z0-9_]*$") +    def isidentifier(s, dotted=False): +        if dotted: +            return all(isidentifier(a) for a in s.split(".")) +        return bool(_name_re.match(s)) +     +    xrange = xrange +    def iteritems(d): return d.iteritems() +    def itervalues(d): return d.itervalues() +    getcwd = os.getcwdu + +    def MethodType(func, instance): +        return types.MethodType(func, instance, type(instance)) +     +    def doctest_refactor_print(func_or_str): +        return func_or_str + +    def get_closure(f): +        """Get a function's closure attribute""" +        return f.func_closure +     +    which = _shutil_which + +    # Abstract u'abc' syntax: +    @_modify_str_or_docstring +    def u_format(s): +        """"{u}'abc'" --> "u'abc'" (Python 2) +         +        Accepts a string or a function, so it can be used as a decorator.""" +        return s.format(u='u') + +    if sys.platform == 'win32': +        def execfile(fname, glob=None, loc=None, compiler=None): +            loc = loc if (loc is not None) else glob +            scripttext = builtin_mod.open(fname).read()+ '\n' +            # compile converts unicode filename to str assuming +            # ascii. Let's do the conversion before calling compile +            if isinstance(fname, unicode): +                filename = unicode_to_str(fname) +            else: +                filename = fname +            compiler = compiler or compile +            exec(compiler(scripttext, filename, 'exec'), glob, loc) + +    else: +        def execfile(fname, glob=None, loc=None, compiler=None): +            if isinstance(fname, unicode): +                filename = fname.encode(sys.getfilesystemencoding()) +            else: +                filename = fname +            where = [ns for ns in [glob, loc] if ns is not None] +            if compiler is None: +                builtin_mod.execfile(filename, *where) +            else: +                scripttext = builtin_mod.open(fname).read().rstrip() + '\n' +                exec(compiler(scripttext, filename, 'exec'), glob, loc) + + +PY2 = not PY3 +PYPY = platform.python_implementation() == "PyPy" + + +def annotate(**kwargs): +    """Python 3 compatible function annotation for Python 2.""" +    if not kwargs: +        raise ValueError('annotations must be provided as keyword arguments') +    def dec(f): +        if hasattr(f, '__annotations__'): +            for k, v in kwargs.items(): +                f.__annotations__[k] = v +        else: +            f.__annotations__ = kwargs +        return f +    return dec + + +# Parts below taken from six: +# Copyright (c) 2010-2013 Benjamin Peterson +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +def with_metaclass(meta, *bases): +    """Create a base class with a metaclass.""" +    return meta("_NewBase", bases, {}) diff --git a/contrib/python/ipython/py2/IPython/utils/rlineimpl.py b/contrib/python/ipython/py2/IPython/utils/rlineimpl.py new file mode 100644 index 00000000000..e1cf03942cd --- /dev/null +++ b/contrib/python/ipython/py2/IPython/utils/rlineimpl.py @@ -0,0 +1,74 @@ +# -*- coding: utf-8 -*- +""" Imports and provides the 'correct' version of readline for the platform. + +Readline is used throughout IPython as:: + +    import IPython.utils.rlineimpl as readline + +In addition to normal readline stuff, this module provides have_readline +boolean and _outputfile variable used in IPython.utils. +""" + +import sys +import warnings + +_rlmod_names = ['gnureadline', 'readline'] + +have_readline = False +for _rlmod_name in _rlmod_names: +    try: +        # import readline as _rl +        _rl = __import__(_rlmod_name) +        # from readline import * +        globals().update({k:v for k,v in _rl.__dict__.items() if not k.startswith('_')}) +    except ImportError: +        pass +    else: +        have_readline = True +        break + +if have_readline and (sys.platform == 'win32' or sys.platform == 'cli'): +    try: +        _outputfile=_rl.GetOutputFile() +    except AttributeError: +        warnings.warn("Failed GetOutputFile") +        have_readline = False + +# Test to see if libedit is being used instead of GNU readline. +# Thanks to Boyd Waters for the original patch. +uses_libedit = False + +if have_readline: +    # Official Python docs state that 'libedit' is in the docstring for libedit readline: +    uses_libedit = _rl.__doc__ and 'libedit' in _rl.__doc__ +    # Note that many non-System Pythons also do not use proper readline, +    # but do not report libedit at all, nor are they linked dynamically against libedit. +    # known culprits of this include: EPD, Fink +    # There is not much we can do to detect this, until we find a specific failure +    # case, rather than relying on the readline module to self-identify as broken. + +if uses_libedit and sys.platform == 'darwin': +    _rl.parse_and_bind("bind ^I rl_complete") +    warnings.warn('\n'.join(['', "*"*78, +        "libedit detected - readline will not be well behaved, including but not limited to:", +        "   * crashes on tab completion", +        "   * incorrect history navigation", +        "   * corrupting long-lines", +        "   * failure to wrap or indent lines properly", +        "It is highly recommended that you install gnureadline, which is installable with:", +        "     pip install gnureadline", +        "*"*78]), +        RuntimeWarning) + +# the clear_history() function was only introduced in Python 2.4 and is +# actually optional in the readline API, so we must explicitly check for its +# existence.  Some known platforms actually don't have it.  This thread: +# http://mail.python.org/pipermail/python-dev/2003-August/037845.html +# has the original discussion. + +if have_readline: +    try: +        _rl.clear_history +    except AttributeError: +        def clear_history(): pass +        _rl.clear_history = clear_history diff --git a/contrib/python/ipython/py2/IPython/utils/sentinel.py b/contrib/python/ipython/py2/IPython/utils/sentinel.py new file mode 100644 index 00000000000..dc57a2591ca --- /dev/null +++ b/contrib/python/ipython/py2/IPython/utils/sentinel.py @@ -0,0 +1,17 @@ +"""Sentinel class for constants with useful reprs""" + +# Copyright (c) IPython Development Team. +# Distributed under the terms of the Modified BSD License. + +class Sentinel(object): + +    def __init__(self, name, module, docstring=None): +        self.name = name +        self.module = module +        if docstring: +            self.__doc__ = docstring + + +    def __repr__(self): +        return str(self.module)+'.'+self.name + diff --git a/contrib/python/ipython/py2/IPython/utils/shimmodule.py b/contrib/python/ipython/py2/IPython/utils/shimmodule.py new file mode 100644 index 00000000000..8b74f5011a7 --- /dev/null +++ b/contrib/python/ipython/py2/IPython/utils/shimmodule.py @@ -0,0 +1,92 @@ +"""A shim module for deprecated imports +""" +# Copyright (c) IPython Development Team. +# Distributed under the terms of the Modified BSD License. + +import sys +import types + +from .importstring import import_item + +class ShimWarning(Warning): +    """A warning to show when a module has moved, and a shim is in its place.""" + +class ShimImporter(object): +    """Import hook for a shim. +     +    This ensures that submodule imports return the real target module, +    not a clone that will confuse `is` and `isinstance` checks. +    """ +    def __init__(self, src, mirror): +        self.src = src +        self.mirror = mirror +     +    def _mirror_name(self, fullname): +        """get the name of the mirrored module""" +         +        return self.mirror + fullname[len(self.src):] + +    def find_module(self, fullname, path=None): +        """Return self if we should be used to import the module.""" +        if fullname.startswith(self.src + '.'): +            mirror_name = self._mirror_name(fullname) +            try: +                mod = import_item(mirror_name) +            except ImportError: +                return +            else: +                if not isinstance(mod, types.ModuleType): +                    # not a module +                    return None +                return self + +    def load_module(self, fullname): +        """Import the mirrored module, and insert it into sys.modules""" +        mirror_name = self._mirror_name(fullname) +        mod = import_item(mirror_name) +        sys.modules[fullname] = mod +        return mod + + +class ShimModule(types.ModuleType): + +    def __init__(self, *args, **kwargs): +        self._mirror = kwargs.pop("mirror") +        src = kwargs.pop("src", None) +        if src: +            kwargs['name'] = src.rsplit('.', 1)[-1] +        super(ShimModule, self).__init__(*args, **kwargs) +        # add import hook for descendent modules +        if src: +            sys.meta_path.append( +                ShimImporter(src=src, mirror=self._mirror) +            ) +     +    @property +    def __path__(self): +        return [] +     +    @property +    def __spec__(self): +        """Don't produce __spec__ until requested""" +        return __import__(self._mirror).__spec__ +     +    def __dir__(self): +        return dir(__import__(self._mirror)) +     +    @property +    def __all__(self): +        """Ensure __all__ is always defined""" +        mod = __import__(self._mirror) +        try: +            return mod.__all__ +        except AttributeError: +            return [name for name in dir(mod) if not name.startswith('_')] + +    def __getattr__(self, key): +        # Use the equivalent of import_item(name), see below +        name = "%s.%s" % (self._mirror, key) +        try: +            return import_item(name) +        except ImportError: +            raise AttributeError(key) diff --git a/contrib/python/ipython/py2/IPython/utils/signatures.py b/contrib/python/ipython/py2/IPython/utils/signatures.py new file mode 100644 index 00000000000..dedc51cfda5 --- /dev/null +++ b/contrib/python/ipython/py2/IPython/utils/signatures.py @@ -0,0 +1,11 @@ +"""Function signature objects for callables. + +Use the standard library version if available, as it is more up to date. +Fallback on backport otherwise. +""" + + +try: +    from inspect import BoundArguments, Parameter, Signature, signature +except ImportError: +    from ._signatures import  BoundArguments, Parameter, Signature, signature diff --git a/contrib/python/ipython/py2/IPython/utils/strdispatch.py b/contrib/python/ipython/py2/IPython/utils/strdispatch.py new file mode 100644 index 00000000000..d6bf510535e --- /dev/null +++ b/contrib/python/ipython/py2/IPython/utils/strdispatch.py @@ -0,0 +1,68 @@ +"""String dispatch class to match regexps and dispatch commands. +""" + +# Stdlib imports +import re + +# Our own modules +from IPython.core.hooks import CommandChainDispatcher + +# Code begins +class StrDispatch(object): +    """Dispatch (lookup) a set of strings / regexps for match. + +    Example: + +    >>> dis = StrDispatch() +    >>> dis.add_s('hei',34, priority = 4) +    >>> dis.add_s('hei',123, priority = 2) +    >>> dis.add_re('h.i', 686) +    >>> print(list(dis.flat_matches('hei'))) +    [123, 34, 686] +    """ + +    def __init__(self): +        self.strs = {} +        self.regexs = {} + +    def add_s(self, s, obj, priority= 0 ): +        """ Adds a target 'string' for dispatching """ + +        chain = self.strs.get(s, CommandChainDispatcher()) +        chain.add(obj,priority) +        self.strs[s] = chain + +    def add_re(self, regex, obj, priority= 0 ): +        """ Adds a target regexp for dispatching """ + +        chain = self.regexs.get(regex, CommandChainDispatcher()) +        chain.add(obj,priority) +        self.regexs[regex] = chain + +    def dispatch(self, key): +        """ Get a seq of Commandchain objects that match key """ +        if key in self.strs: +            yield self.strs[key] + +        for r, obj in self.regexs.items(): +            if re.match(r, key): +                yield obj +            else: +                #print "nomatch",key  # dbg +                pass + +    def __repr__(self): +        return "<Strdispatch %s, %s>" % (self.strs, self.regexs) + +    def s_matches(self, key): +        if key not in self.strs: +             return +        for el in self.strs[key]: +            yield el[1] + +    def flat_matches(self, key): +        """ Yield all 'value' targets, without priority """ +        for val in self.dispatch(key): +            for el in val: +                yield el[1] # only value, no priority +        return diff --git a/contrib/python/ipython/py2/IPython/utils/sysinfo.py b/contrib/python/ipython/py2/IPython/utils/sysinfo.py new file mode 100644 index 00000000000..db7f2914d40 --- /dev/null +++ b/contrib/python/ipython/py2/IPython/utils/sysinfo.py @@ -0,0 +1,167 @@ +# encoding: utf-8 +""" +Utilities for getting information about IPython and the system it's running in. +""" + +#----------------------------------------------------------------------------- +#  Copyright (C) 2008-2011  The IPython Development Team +# +#  Distributed under the terms of the BSD License.  The full license is in +#  the file COPYING, distributed as part of this software. +#----------------------------------------------------------------------------- + +#----------------------------------------------------------------------------- +# Imports +#----------------------------------------------------------------------------- + +import os +import platform +import pprint +import sys +import subprocess + +from IPython.core import release +from IPython.utils import py3compat, _sysinfo, encoding + +#----------------------------------------------------------------------------- +# Code +#----------------------------------------------------------------------------- + +def pkg_commit_hash(pkg_path): +    """Get short form of commit hash given directory `pkg_path` + +    We get the commit hash from (in order of preference): + +    * IPython.utils._sysinfo.commit +    * git output, if we are in a git repository + +    If these fail, we return a not-found placeholder tuple + +    Parameters +    ---------- +    pkg_path : str +       directory containing package +       only used for getting commit from active repo + +    Returns +    ------- +    hash_from : str +       Where we got the hash from - description +    hash_str : str +       short form of hash +    """ +    # Try and get commit from written commit text file +    if _sysinfo.commit: +        return "installation", _sysinfo.commit + +    # maybe we are in a repository +    proc = subprocess.Popen('git rev-parse --short HEAD', +                            stdout=subprocess.PIPE, +                            stderr=subprocess.PIPE, +                            cwd=pkg_path, shell=True) +    repo_commit, _ = proc.communicate() +    if repo_commit: +        return 'repository', repo_commit.strip().decode('ascii') +    return '(none found)', u'<not found>' + + +def pkg_info(pkg_path): +    """Return dict describing the context of this package + +    Parameters +    ---------- +    pkg_path : str +       path containing __init__.py for package + +    Returns +    ------- +    context : dict +       with named parameters of interest +    """ +    src, hsh = pkg_commit_hash(pkg_path) +    return dict( +        ipython_version=release.version, +        ipython_path=pkg_path, +        commit_source=src, +        commit_hash=hsh, +        sys_version=sys.version, +        sys_executable=sys.executable, +        sys_platform=sys.platform, +        platform=platform.platform(), +        os_name=os.name, +        default_encoding=encoding.DEFAULT_ENCODING, +        ) + +def get_sys_info(): +    """Return useful information about IPython and the system, as a dict.""" +    p = os.path +    path = p.realpath(p.dirname(p.abspath(p.join(__file__, '..')))) +    return pkg_info(path) + [email protected]_refactor_print +def sys_info(): +    """Return useful information about IPython and the system, as a string. + +    Examples +    -------- +    :: +     +        In [2]: print sys_info() +        {'commit_hash': '144fdae',      # random +         'commit_source': 'repository', +         'ipython_path': '/home/fperez/usr/lib/python2.6/site-packages/IPython', +         'ipython_version': '0.11.dev', +         'os_name': 'posix', +         'platform': 'Linux-2.6.35-22-generic-i686-with-Ubuntu-10.10-maverick', +         'sys_executable': '/usr/bin/python', +         'sys_platform': 'linux2', +         'sys_version': '2.6.6 (r266:84292, Sep 15 2010, 15:52:39) \\n[GCC 4.4.5]'} +    """ +    return pprint.pformat(get_sys_info()) + +def _num_cpus_unix(): +    """Return the number of active CPUs on a Unix system.""" +    return os.sysconf("SC_NPROCESSORS_ONLN") + + +def _num_cpus_darwin(): +    """Return the number of active CPUs on a Darwin system.""" +    p = subprocess.Popen(['sysctl','-n','hw.ncpu'],stdout=subprocess.PIPE) +    return p.stdout.read() + + +def _num_cpus_windows(): +    """Return the number of active CPUs on a Windows system.""" +    return os.environ.get("NUMBER_OF_PROCESSORS") + + +def num_cpus(): +   """Return the effective number of CPUs in the system as an integer. + +   This cross-platform function makes an attempt at finding the total number of +   available CPUs in the system, as returned by various underlying system and +   python calls. + +   If it can't find a sensible answer, it returns 1 (though an error *may* make +   it return a large positive number that's actually incorrect). +   """ + +   # Many thanks to the Parallel Python project (http://www.parallelpython.com) +   # for the names of the keys we needed to look up for this function.  This +   # code was inspired by their equivalent function. + +   ncpufuncs = {'Linux':_num_cpus_unix, +                'Darwin':_num_cpus_darwin, +                'Windows':_num_cpus_windows +                } + +   ncpufunc = ncpufuncs.get(platform.system(), +                            # default to unix version (Solaris, AIX, etc) +                            _num_cpus_unix) + +   try: +       ncpus = max(1,int(ncpufunc())) +   except: +       ncpus = 1 +   return ncpus + diff --git a/contrib/python/ipython/py2/IPython/utils/syspathcontext.py b/contrib/python/ipython/py2/IPython/utils/syspathcontext.py new file mode 100644 index 00000000000..89612038ff1 --- /dev/null +++ b/contrib/python/ipython/py2/IPython/utils/syspathcontext.py @@ -0,0 +1,71 @@ +# encoding: utf-8 +""" +Context managers for adding things to sys.path temporarily. + +Authors: + +* Brian Granger +""" + +#----------------------------------------------------------------------------- +#  Copyright (C) 2008-2011  The IPython Development Team +# +#  Distributed under the terms of the BSD License.  The full license is in +#  the file COPYING, distributed as part of this software. +#----------------------------------------------------------------------------- + +#----------------------------------------------------------------------------- +# Imports +#----------------------------------------------------------------------------- + +import sys + +from IPython.utils.py3compat import cast_bytes_py2 + +#----------------------------------------------------------------------------- +# Code +#----------------------------------------------------------------------------- + +class appended_to_syspath(object): +    """A context for appending a directory to sys.path for a second.""" + +    def __init__(self, dir): +        self.dir = cast_bytes_py2(dir, sys.getdefaultencoding()) + +    def __enter__(self): +        if self.dir not in sys.path: +            sys.path.append(self.dir) +            self.added = True +        else: +            self.added = False + +    def __exit__(self, type, value, traceback): +        if self.added: +            try: +                sys.path.remove(self.dir) +            except ValueError: +                pass +        # Returning False causes any exceptions to be re-raised. +        return False + +class prepended_to_syspath(object): +    """A context for prepending a directory to sys.path for a second.""" + +    def __init__(self, dir): +        self.dir = cast_bytes_py2(dir, sys.getdefaultencoding()) + +    def __enter__(self): +        if self.dir not in sys.path: +            sys.path.insert(0,self.dir) +            self.added = True +        else: +            self.added = False + +    def __exit__(self, type, value, traceback): +        if self.added: +            try: +                sys.path.remove(self.dir) +            except ValueError: +                pass +        # Returning False causes any exceptions to be re-raised. +        return False diff --git a/contrib/python/ipython/py2/IPython/utils/tempdir.py b/contrib/python/ipython/py2/IPython/utils/tempdir.py new file mode 100644 index 00000000000..951abd65c9b --- /dev/null +++ b/contrib/python/ipython/py2/IPython/utils/tempdir.py @@ -0,0 +1,145 @@ +"""TemporaryDirectory class, copied from Python 3.2. + +This is copied from the stdlib and will be standard in Python 3.2 and onwards. +""" +from __future__ import print_function + +import os as _os +import warnings as _warnings +import sys as _sys + +# This code should only be used in Python versions < 3.2, since after that we +# can rely on the stdlib itself. +try: +    from tempfile import TemporaryDirectory + +except ImportError: +    from tempfile import mkdtemp, template + +    class TemporaryDirectory(object): +        """Create and return a temporary directory.  This has the same +        behavior as mkdtemp but can be used as a context manager.  For +        example: + +            with TemporaryDirectory() as tmpdir: +                ... + +        Upon exiting the context, the directory and everthing contained +        in it are removed. +        """ + +        def __init__(self, suffix="", prefix=template, dir=None): +            self.name = mkdtemp(suffix, prefix, dir) +            self._closed = False + +        def __enter__(self): +            return self.name + +        def cleanup(self, _warn=False): +            if self.name and not self._closed: +                try: +                    self._rmtree(self.name) +                except (TypeError, AttributeError) as ex: +                    # Issue #10188: Emit a warning on stderr +                    # if the directory could not be cleaned +                    # up due to missing globals +                    if "None" not in str(ex): +                        raise +                    print("ERROR: {!r} while cleaning up {!r}".format(ex, self,), +                          file=_sys.stderr) +                    return +                self._closed = True +                if _warn: +                    self._warn("Implicitly cleaning up {!r}".format(self), +                               Warning) + +        def __exit__(self, exc, value, tb): +            self.cleanup() + +        def __del__(self): +            # Issue a ResourceWarning if implicit cleanup needed +            self.cleanup(_warn=True) + + +        # XXX (ncoghlan): The following code attempts to make +        # this class tolerant of the module nulling out process +        # that happens during CPython interpreter shutdown +        # Alas, it doesn't actually manage it. See issue #10188 +        _listdir = staticmethod(_os.listdir) +        _path_join = staticmethod(_os.path.join) +        _isdir = staticmethod(_os.path.isdir) +        _remove = staticmethod(_os.remove) +        _rmdir = staticmethod(_os.rmdir) +        _os_error = _os.error +        _warn = _warnings.warn + +        def _rmtree(self, path): +            # Essentially a stripped down version of shutil.rmtree.  We can't +            # use globals because they may be None'ed out at shutdown. +            for name in self._listdir(path): +                fullname = self._path_join(path, name) +                try: +                    isdir = self._isdir(fullname) +                except self._os_error: +                    isdir = False +                if isdir: +                    self._rmtree(fullname) +                else: +                    try: +                        self._remove(fullname) +                    except self._os_error: +                        pass +            try: +                self._rmdir(path) +            except self._os_error: +                pass + + +class NamedFileInTemporaryDirectory(object): + +    def __init__(self, filename, mode='w+b', bufsize=-1, **kwds): +        """ +        Open a file named `filename` in a temporary directory. + +        This context manager is preferred over `NamedTemporaryFile` in +        stdlib `tempfile` when one needs to reopen the file. + +        Arguments `mode` and `bufsize` are passed to `open`. +        Rest of the arguments are passed to `TemporaryDirectory`. + +        """ +        self._tmpdir = TemporaryDirectory(**kwds) +        path = _os.path.join(self._tmpdir.name, filename) +        self.file = open(path, mode, bufsize) + +    def cleanup(self): +        self.file.close() +        self._tmpdir.cleanup() + +    __del__ = cleanup + +    def __enter__(self): +        return self.file + +    def __exit__(self, type, value, traceback): +        self.cleanup() + + +class TemporaryWorkingDirectory(TemporaryDirectory): +    """ +    Creates a temporary directory and sets the cwd to that directory. +    Automatically reverts to previous cwd upon cleanup. +    Usage example: + +        with TemporaryWorkingDirectory() as tmpdir: +            ... +    """ +    def __enter__(self): +        self.old_wd = _os.getcwd() +        _os.chdir(self.name) +        return super(TemporaryWorkingDirectory, self).__enter__() + +    def __exit__(self, exc, value, tb): +        _os.chdir(self.old_wd) +        return super(TemporaryWorkingDirectory, self).__exit__(exc, value, tb) + diff --git a/contrib/python/ipython/py2/IPython/utils/terminal.py b/contrib/python/ipython/py2/IPython/utils/terminal.py new file mode 100644 index 00000000000..e92c410c79f --- /dev/null +++ b/contrib/python/ipython/py2/IPython/utils/terminal.py @@ -0,0 +1,125 @@ +# encoding: utf-8 +""" +Utilities for working with terminals. + +Authors: + +* Brian E. Granger +* Fernando Perez +* Alexander Belchenko (e-mail: bialix AT ukr.net) +""" + +from __future__ import absolute_import + +# Copyright (c) IPython Development Team. +# Distributed under the terms of the Modified BSD License. + +import os +import sys +import warnings +try: +    from shutil import get_terminal_size as _get_terminal_size +except ImportError: +    # use backport on Python 2 +    try: +        from backports.shutil_get_terminal_size import get_terminal_size as _get_terminal_size +    except ImportError: +        from ._get_terminal_size import get_terminal_size as _get_terminal_size + +from . import py3compat + +#----------------------------------------------------------------------------- +# Code +#----------------------------------------------------------------------------- + +# This variable is part of the expected API of the module: +ignore_termtitle = True + + + +if os.name == 'posix': +    def _term_clear(): +        os.system('clear') +elif sys.platform == 'win32': +    def _term_clear(): +        os.system('cls') +else: +    def _term_clear(): +        pass + + + +def toggle_set_term_title(val): +    """Control whether set_term_title is active or not. + +    set_term_title() allows writing to the console titlebar.  In embedded +    widgets this can cause problems, so this call can be used to toggle it on +    or off as needed. + +    The default state of the module is for the function to be disabled. + +    Parameters +    ---------- +      val : bool +        If True, set_term_title() actually writes to the terminal (using the +        appropriate platform-specific module).  If False, it is a no-op. +    """ +    global ignore_termtitle +    ignore_termtitle = not(val) + + +def _set_term_title(*args,**kw): +    """Dummy no-op.""" +    pass + + +def _set_term_title_xterm(title): +    """ Change virtual terminal title in xterm-workalikes """ +    sys.stdout.write('\033]0;%s\007' % title) + +if os.name == 'posix': +    TERM = os.environ.get('TERM','') +    if TERM.startswith('xterm'): +        _set_term_title = _set_term_title_xterm +elif sys.platform == 'win32': +    try: +        import ctypes + +        SetConsoleTitleW = ctypes.windll.kernel32.SetConsoleTitleW +        SetConsoleTitleW.argtypes = [ctypes.c_wchar_p] +     +        def _set_term_title(title): +            """Set terminal title using ctypes to access the Win32 APIs.""" +            SetConsoleTitleW(title) +    except ImportError: +        def _set_term_title(title): +            """Set terminal title using the 'title' command.""" +            global ignore_termtitle + +            try: +                # Cannot be on network share when issuing system commands +                curr = py3compat.getcwd() +                os.chdir("C:") +                ret = os.system("title " + title) +            finally: +                os.chdir(curr) +            if ret: +                # non-zero return code signals error, don't try again +                ignore_termtitle = True + + +def set_term_title(title): +    """Set terminal title using the necessary platform-dependent calls.""" +    if ignore_termtitle: +        return +    _set_term_title(title) + + +def freeze_term_title(): +    warnings.warn("This function is deprecated, use toggle_set_term_title()") +    global ignore_termtitle +    ignore_termtitle = True + + +def get_terminal_size(defaultx=80, defaulty=25): +    return _get_terminal_size((defaultx, defaulty)) diff --git a/contrib/python/ipython/py2/IPython/utils/text.py b/contrib/python/ipython/py2/IPython/utils/text.py new file mode 100644 index 00000000000..5ed1a845e35 --- /dev/null +++ b/contrib/python/ipython/py2/IPython/utils/text.py @@ -0,0 +1,783 @@ +# encoding: utf-8 +""" +Utilities for working with strings and text. + +Inheritance diagram: + +.. inheritance-diagram:: IPython.utils.text +   :parts: 3 +""" +from __future__ import absolute_import + +import os +import re +import sys +import textwrap +from string import Formatter +try: +    from pathlib import Path +except ImportError: +    # Python 2 backport +    from pathlib2 import Path + +from IPython.testing.skipdoctest import skip_doctest_py3, skip_doctest +from IPython.utils import py3compat + +# datetime.strftime date format for ipython +if sys.platform == 'win32': +    date_format = "%B %d, %Y" +else: +    date_format = "%B %-d, %Y" + +class LSString(str): +    """String derivative with a special access attributes. + +    These are normal strings, but with the special attributes: + +        .l (or .list) : value as list (split on newlines). +        .n (or .nlstr): original value (the string itself). +        .s (or .spstr): value as whitespace-separated string. +        .p (or .paths): list of path objects (requires path.py package) + +    Any values which require transformations are computed only once and +    cached. + +    Such strings are very useful to efficiently interact with the shell, which +    typically only understands whitespace-separated options for commands.""" + +    def get_list(self): +        try: +            return self.__list +        except AttributeError: +            self.__list = self.split('\n') +            return self.__list + +    l = list = property(get_list) + +    def get_spstr(self): +        try: +            return self.__spstr +        except AttributeError: +            self.__spstr = self.replace('\n',' ') +            return self.__spstr + +    s = spstr = property(get_spstr) + +    def get_nlstr(self): +        return self + +    n = nlstr = property(get_nlstr) + +    def get_paths(self): +        try: +            return self.__paths +        except AttributeError: +            self.__paths = [Path(p) for p in self.split('\n') if os.path.exists(p)] +            return self.__paths + +    p = paths = property(get_paths) + +# FIXME: We need to reimplement type specific displayhook and then add this +# back as a custom printer. This should also be moved outside utils into the +# core. + +# def print_lsstring(arg): +#     """ Prettier (non-repr-like) and more informative printer for LSString """ +#     print "LSString (.p, .n, .l, .s available). Value:" +#     print arg +# +# +# print_lsstring = result_display.when_type(LSString)(print_lsstring) + + +class SList(list): +    """List derivative with a special access attributes. + +    These are normal lists, but with the special attributes: + +    * .l (or .list) : value as list (the list itself). +    * .n (or .nlstr): value as a string, joined on newlines. +    * .s (or .spstr): value as a string, joined on spaces. +    * .p (or .paths): list of path objects (requires path.py package) + +    Any values which require transformations are computed only once and +    cached.""" + +    def get_list(self): +        return self + +    l = list = property(get_list) + +    def get_spstr(self): +        try: +            return self.__spstr +        except AttributeError: +            self.__spstr = ' '.join(self) +            return self.__spstr + +    s = spstr = property(get_spstr) + +    def get_nlstr(self): +        try: +            return self.__nlstr +        except AttributeError: +            self.__nlstr = '\n'.join(self) +            return self.__nlstr + +    n = nlstr = property(get_nlstr) + +    def get_paths(self): +        try: +            return self.__paths +        except AttributeError: +            self.__paths = [Path(p) for p in self if os.path.exists(p)] +            return self.__paths + +    p = paths = property(get_paths) + +    def grep(self, pattern, prune = False, field = None): +        """ Return all strings matching 'pattern' (a regex or callable) + +        This is case-insensitive. If prune is true, return all items +        NOT matching the pattern. + +        If field is specified, the match must occur in the specified +        whitespace-separated field. + +        Examples:: + +            a.grep( lambda x: x.startswith('C') ) +            a.grep('Cha.*log', prune=1) +            a.grep('chm', field=-1) +        """ + +        def match_target(s): +            if field is None: +                return s +            parts = s.split() +            try: +                tgt = parts[field] +                return tgt +            except IndexError: +                return "" + +        if isinstance(pattern, py3compat.string_types): +            pred = lambda x : re.search(pattern, x, re.IGNORECASE) +        else: +            pred = pattern +        if not prune: +            return SList([el for el in self if pred(match_target(el))]) +        else: +            return SList([el for el in self if not pred(match_target(el))]) + +    def fields(self, *fields): +        """ Collect whitespace-separated fields from string list + +        Allows quick awk-like usage of string lists. + +        Example data (in var a, created by 'a = !ls -l'):: + +            -rwxrwxrwx  1 ville None      18 Dec 14  2006 ChangeLog +            drwxrwxrwx+ 6 ville None       0 Oct 24 18:05 IPython + +        * ``a.fields(0)`` is ``['-rwxrwxrwx', 'drwxrwxrwx+']`` +        * ``a.fields(1,0)`` is ``['1 -rwxrwxrwx', '6 drwxrwxrwx+']`` +          (note the joining by space). +        * ``a.fields(-1)`` is ``['ChangeLog', 'IPython']`` + +        IndexErrors are ignored. + +        Without args, fields() just split()'s the strings. +        """ +        if len(fields) == 0: +            return [el.split() for el in self] + +        res = SList() +        for el in [f.split() for f in self]: +            lineparts = [] + +            for fd in fields: +                try: +                    lineparts.append(el[fd]) +                except IndexError: +                    pass +            if lineparts: +                res.append(" ".join(lineparts)) + +        return res + +    def sort(self,field= None,  nums = False): +        """ sort by specified fields (see fields()) + +        Example:: + +            a.sort(1, nums = True) + +        Sorts a by second field, in numerical order (so that 21 > 3) + +        """ + +        #decorate, sort, undecorate +        if field is not None: +            dsu = [[SList([line]).fields(field),  line] for line in self] +        else: +            dsu = [[line,  line] for line in self] +        if nums: +            for i in range(len(dsu)): +                numstr = "".join([ch for ch in dsu[i][0] if ch.isdigit()]) +                try: +                    n = int(numstr) +                except ValueError: +                    n = 0 +                dsu[i][0] = n + + +        dsu.sort() +        return SList([t[1] for t in dsu]) + + +# FIXME: We need to reimplement type specific displayhook and then add this +# back as a custom printer. This should also be moved outside utils into the +# core. + +# def print_slist(arg): +#     """ Prettier (non-repr-like) and more informative printer for SList """ +#     print "SList (.p, .n, .l, .s, .grep(), .fields(), sort() available):" +#     if hasattr(arg,  'hideonce') and arg.hideonce: +#         arg.hideonce = False +#         return +# +#     nlprint(arg)   # This was a nested list printer, now removed. +# +# print_slist = result_display.when_type(SList)(print_slist) + + +def indent(instr,nspaces=4, ntabs=0, flatten=False): +    """Indent a string a given number of spaces or tabstops. + +    indent(str,nspaces=4,ntabs=0) -> indent str by ntabs+nspaces. + +    Parameters +    ---------- + +    instr : basestring +        The string to be indented. +    nspaces : int (default: 4) +        The number of spaces to be indented. +    ntabs : int (default: 0) +        The number of tabs to be indented. +    flatten : bool (default: False) +        Whether to scrub existing indentation.  If True, all lines will be +        aligned to the same indentation.  If False, existing indentation will +        be strictly increased. + +    Returns +    ------- + +    str|unicode : string indented by ntabs and nspaces. + +    """ +    if instr is None: +        return +    ind = '\t'*ntabs+' '*nspaces +    if flatten: +        pat = re.compile(r'^\s*', re.MULTILINE) +    else: +        pat = re.compile(r'^', re.MULTILINE) +    outstr = re.sub(pat, ind, instr) +    if outstr.endswith(os.linesep+ind): +        return outstr[:-len(ind)] +    else: +        return outstr + + +def list_strings(arg): +    """Always return a list of strings, given a string or list of strings +    as input. + +    Examples +    -------- +    :: + +        In [7]: list_strings('A single string') +        Out[7]: ['A single string'] + +        In [8]: list_strings(['A single string in a list']) +        Out[8]: ['A single string in a list'] + +        In [9]: list_strings(['A','list','of','strings']) +        Out[9]: ['A', 'list', 'of', 'strings'] +    """ + +    if isinstance(arg, py3compat.string_types): return [arg] +    else: return arg + + +def marquee(txt='',width=78,mark='*'): +    """Return the input string centered in a 'marquee'. + +    Examples +    -------- +    :: + +        In [16]: marquee('A test',40) +        Out[16]: '**************** A test ****************' + +        In [17]: marquee('A test',40,'-') +        Out[17]: '---------------- A test ----------------' + +        In [18]: marquee('A test',40,' ') +        Out[18]: '                 A test                 ' + +    """ +    if not txt: +        return (mark*width)[:width] +    nmark = (width-len(txt)-2)//len(mark)//2 +    if nmark < 0: nmark =0 +    marks = mark*nmark +    return '%s %s %s' % (marks,txt,marks) + + +ini_spaces_re = re.compile(r'^(\s+)') + +def num_ini_spaces(strng): +    """Return the number of initial spaces in a string""" + +    ini_spaces = ini_spaces_re.match(strng) +    if ini_spaces: +        return ini_spaces.end() +    else: +        return 0 + + +def format_screen(strng): +    """Format a string for screen printing. + +    This removes some latex-type format codes.""" +    # Paragraph continue +    par_re = re.compile(r'\\$',re.MULTILINE) +    strng = par_re.sub('',strng) +    return strng + + +def dedent(text): +    """Equivalent of textwrap.dedent that ignores unindented first line. + +    This means it will still dedent strings like: +    '''foo +    is a bar +    ''' + +    For use in wrap_paragraphs. +    """ + +    if text.startswith('\n'): +        # text starts with blank line, don't ignore the first line +        return textwrap.dedent(text) + +    # split first line +    splits = text.split('\n',1) +    if len(splits) == 1: +        # only one line +        return textwrap.dedent(text) + +    first, rest = splits +    # dedent everything but the first line +    rest = textwrap.dedent(rest) +    return '\n'.join([first, rest]) + + +def wrap_paragraphs(text, ncols=80): +    """Wrap multiple paragraphs to fit a specified width. + +    This is equivalent to textwrap.wrap, but with support for multiple +    paragraphs, as separated by empty lines. + +    Returns +    ------- + +    list of complete paragraphs, wrapped to fill `ncols` columns. +    """ +    paragraph_re = re.compile(r'\n(\s*\n)+', re.MULTILINE) +    text = dedent(text).strip() +    paragraphs = paragraph_re.split(text)[::2] # every other entry is space +    out_ps = [] +    indent_re = re.compile(r'\n\s+', re.MULTILINE) +    for p in paragraphs: +        # presume indentation that survives dedent is meaningful formatting, +        # so don't fill unless text is flush. +        if indent_re.search(p) is None: +            # wrap paragraph +            p = textwrap.fill(p, ncols) +        out_ps.append(p) +    return out_ps + + +def long_substr(data): +    """Return the longest common substring in a list of strings. +     +    Credit: http://stackoverflow.com/questions/2892931/longest-common-substring-from-more-than-two-strings-python +    """ +    substr = '' +    if len(data) > 1 and len(data[0]) > 0: +        for i in range(len(data[0])): +            for j in range(len(data[0])-i+1): +                if j > len(substr) and all(data[0][i:i+j] in x for x in data): +                    substr = data[0][i:i+j] +    elif len(data) == 1: +        substr = data[0] +    return substr + + +def strip_email_quotes(text): +    """Strip leading email quotation characters ('>'). + +    Removes any combination of leading '>' interspersed with whitespace that +    appears *identically* in all lines of the input text. + +    Parameters +    ---------- +    text : str + +    Examples +    -------- + +    Simple uses:: + +        In [2]: strip_email_quotes('> > text') +        Out[2]: 'text' + +        In [3]: strip_email_quotes('> > text\\n> > more') +        Out[3]: 'text\\nmore' + +    Note how only the common prefix that appears in all lines is stripped:: + +        In [4]: strip_email_quotes('> > text\\n> > more\\n> more...') +        Out[4]: '> text\\n> more\\nmore...' + +    So if any line has no quote marks ('>') , then none are stripped from any +    of them :: +     +        In [5]: strip_email_quotes('> > text\\n> > more\\nlast different') +        Out[5]: '> > text\\n> > more\\nlast different' +    """ +    lines = text.splitlines() +    matches = set() +    for line in lines: +        prefix = re.match(r'^(\s*>[ >]*)', line) +        if prefix: +            matches.add(prefix.group(1)) +        else: +            break +    else: +        prefix = long_substr(list(matches)) +        if prefix: +            strip = len(prefix) +            text = '\n'.join([ ln[strip:] for ln in lines]) +    return text + +def strip_ansi(source): +    """ +    Remove ansi escape codes from text. +     +    Parameters +    ---------- +    source : str +        Source to remove the ansi from +    """ +    return re.sub(r'\033\[(\d|;)+?m', '', source) + + +class EvalFormatter(Formatter): +    """A String Formatter that allows evaluation of simple expressions. +     +    Note that this version interprets a : as specifying a format string (as per +    standard string formatting), so if slicing is required, you must explicitly +    create a slice. +     +    This is to be used in templating cases, such as the parallel batch +    script templates, where simple arithmetic on arguments is useful. + +    Examples +    -------- +    :: + +        In [1]: f = EvalFormatter() +        In [2]: f.format('{n//4}', n=8) +        Out[2]: '2' + +        In [3]: f.format("{greeting[slice(2,4)]}", greeting="Hello") +        Out[3]: 'll' +    """ +    def get_field(self, name, args, kwargs): +        v = eval(name, kwargs) +        return v, name + +#XXX: As of Python 3.4, the format string parsing no longer splits on a colon +# inside [], so EvalFormatter can handle slicing. Once we only support 3.4 and +# above, it should be possible to remove FullEvalFormatter. + +@skip_doctest_py3 +class FullEvalFormatter(Formatter): +    """A String Formatter that allows evaluation of simple expressions. +     +    Any time a format key is not found in the kwargs, +    it will be tried as an expression in the kwargs namespace. +     +    Note that this version allows slicing using [1:2], so you cannot specify +    a format string. Use :class:`EvalFormatter` to permit format strings. +     +    Examples +    -------- +    :: + +        In [1]: f = FullEvalFormatter() +        In [2]: f.format('{n//4}', n=8) +        Out[2]: u'2' + +        In [3]: f.format('{list(range(5))[2:4]}') +        Out[3]: u'[2, 3]' + +        In [4]: f.format('{3*2}') +        Out[4]: u'6' +    """ +    # copied from Formatter._vformat with minor changes to allow eval +    # and replace the format_spec code with slicing +    def vformat(self, format_string, args, kwargs): +        result = [] +        for literal_text, field_name, format_spec, conversion in \ +                self.parse(format_string): + +            # output the literal text +            if literal_text: +                result.append(literal_text) + +            # if there's a field, output it +            if field_name is not None: +                # this is some markup, find the object and do +                # the formatting + +                if format_spec: +                    # override format spec, to allow slicing: +                    field_name = ':'.join([field_name, format_spec]) + +                # eval the contents of the field for the object +                # to be formatted +                obj = eval(field_name, kwargs) + +                # do any conversion on the resulting object +                obj = self.convert_field(obj, conversion) + +                # format the object and append to the result +                result.append(self.format_field(obj, '')) + +        return u''.join(py3compat.cast_unicode(s) for s in result) + + +@skip_doctest_py3 +class DollarFormatter(FullEvalFormatter): +    """Formatter allowing Itpl style $foo replacement, for names and attribute +    access only. Standard {foo} replacement also works, and allows full +    evaluation of its arguments. + +    Examples +    -------- +    :: + +        In [1]: f = DollarFormatter() +        In [2]: f.format('{n//4}', n=8) +        Out[2]: u'2' + +        In [3]: f.format('23 * 76 is $result', result=23*76) +        Out[3]: u'23 * 76 is 1748' + +        In [4]: f.format('$a or {b}', a=1, b=2) +        Out[4]: u'1 or 2' +    """ +    _dollar_pattern = re.compile("(.*?)\$(\$?[\w\.]+)") +    def parse(self, fmt_string): +        for literal_txt, field_name, format_spec, conversion \ +                    in Formatter.parse(self, fmt_string): +             +            # Find $foo patterns in the literal text. +            continue_from = 0 +            txt = "" +            for m in self._dollar_pattern.finditer(literal_txt): +                new_txt, new_field = m.group(1,2) +                # $$foo --> $foo +                if new_field.startswith("$"): +                    txt += new_txt + new_field +                else: +                    yield (txt + new_txt, new_field, "", None) +                    txt = "" +                continue_from = m.end() +             +            # Re-yield the {foo} style pattern +            yield (txt + literal_txt[continue_from:], field_name, format_spec, conversion) + +#----------------------------------------------------------------------------- +# Utils to columnize a list of string +#----------------------------------------------------------------------------- + +def _col_chunks(l, max_rows, row_first=False): +    """Yield successive max_rows-sized column chunks from l.""" +    if row_first: +        ncols = (len(l) // max_rows) + (len(l) % max_rows > 0) +        for i in py3compat.xrange(ncols): +            yield [l[j] for j in py3compat.xrange(i, len(l), ncols)] +    else: +        for i in py3compat.xrange(0, len(l), max_rows): +            yield l[i:(i + max_rows)] + + +def _find_optimal(rlist, row_first=False, separator_size=2, displaywidth=80): +    """Calculate optimal info to columnize a list of string""" +    for max_rows in range(1, len(rlist) + 1): +        col_widths = list(map(max, _col_chunks(rlist, max_rows, row_first))) +        sumlength = sum(col_widths) +        ncols = len(col_widths) +        if sumlength + separator_size * (ncols - 1) <= displaywidth: +            break +    return {'num_columns': ncols, +            'optimal_separator_width': (displaywidth - sumlength) / (ncols - 1) if (ncols - 1) else 0, +            'max_rows': max_rows, +            'column_widths': col_widths +            } + + +def _get_or_default(mylist, i, default=None): +    """return list item number, or default if don't exist""" +    if i >= len(mylist): +        return default +    else : +        return mylist[i] + + +def compute_item_matrix(items, row_first=False, empty=None, *args, **kwargs) : +    """Returns a nested list, and info to columnize items + +    Parameters +    ---------- + +    items +        list of strings to columize +    row_first : (default False) +        Whether to compute columns for a row-first matrix instead of +        column-first (default). +    empty : (default None) +        default value to fill list if needed +    separator_size : int (default=2) +        How much caracters will be used as a separation between each columns. +    displaywidth : int (default=80) +        The width of the area onto wich the columns should enter + +    Returns +    ------- + +    strings_matrix + +        nested list of string, the outer most list contains as many list as +        rows, the innermost lists have each as many element as colums. If the +        total number of elements in `items` does not equal the product of +        rows*columns, the last element of some lists are filled with `None`. + +    dict_info +        some info to make columnize easier: + +        num_columns +          number of columns +        max_rows +          maximum number of rows (final number may be less) +        column_widths +          list of with of each columns +        optimal_separator_width +          best separator width between columns + +    Examples +    -------- +    :: + +        In [1]: l = ['aaa','b','cc','d','eeeee','f','g','h','i','j','k','l'] +           ...: compute_item_matrix(l, displaywidth=12) +        Out[1]: +            ([['aaa', 'f', 'k'], +            ['b', 'g', 'l'], +            ['cc', 'h', None], +            ['d', 'i', None], +            ['eeeee', 'j', None]], +            {'num_columns': 3, +            'column_widths': [5, 1, 1], +            'optimal_separator_width': 2, +            'max_rows': 5}) +    """ +    info = _find_optimal(list(map(len, items)), row_first, *args, **kwargs) +    nrow, ncol = info['max_rows'], info['num_columns'] +    if row_first: +        return ([[_get_or_default(items, r * ncol + c, default=empty) for c in range(ncol)] for r in range(nrow)], info) +    else: +        return ([[_get_or_default(items, c * nrow + r, default=empty) for c in range(ncol)] for r in range(nrow)], info) + + +def columnize(items, row_first=False, separator='  ', displaywidth=80, spread=False): +    """ Transform a list of strings into a single string with columns. + +    Parameters +    ---------- +    items : sequence of strings +        The strings to process. + +    row_first : (default False) +        Whether to compute columns for a row-first matrix instead of +        column-first (default). + +    separator : str, optional [default is two spaces] +        The string that separates columns. + +    displaywidth : int, optional [default is 80] +        Width of the display in number of characters. + +    Returns +    ------- +    The formatted string. +    """ +    if not items: +        return '\n' +    matrix, info = compute_item_matrix(items, row_first=row_first, separator_size=len(separator), displaywidth=displaywidth) +    if spread: +        separator = separator.ljust(int(info['optimal_separator_width'])) +    fmatrix = [filter(None, x) for x in matrix] +    sjoin = lambda x : separator.join([ y.ljust(w, ' ') for y, w in zip(x, info['column_widths'])]) +    return '\n'.join(map(sjoin, fmatrix))+'\n' + + +def get_text_list(list_, last_sep=' and ', sep=", ", wrap_item_with=""): +    """ +    Return a string with a natural enumeration of items + +    >>> get_text_list(['a', 'b', 'c', 'd']) +    'a, b, c and d' +    >>> get_text_list(['a', 'b', 'c'], ' or ') +    'a, b or c' +    >>> get_text_list(['a', 'b', 'c'], ', ') +    'a, b, c' +    >>> get_text_list(['a', 'b'], ' or ') +    'a or b' +    >>> get_text_list(['a']) +    'a' +    >>> get_text_list([]) +    '' +    >>> get_text_list(['a', 'b'], wrap_item_with="`") +    '`a` and `b`' +    >>> get_text_list(['a', 'b', 'c', 'd'], " = ", sep=" + ") +    'a + b + c = d' +    """ +    if len(list_) == 0: +        return '' +    if wrap_item_with: +        list_ = ['%s%s%s' % (wrap_item_with, item, wrap_item_with) for +                 item in list_] +    if len(list_) == 1: +        return list_[0] +    return '%s%s%s' % ( +        sep.join(i for i in list_[:-1]), +        last_sep, list_[-1]) diff --git a/contrib/python/ipython/py2/IPython/utils/timing.py b/contrib/python/ipython/py2/IPython/utils/timing.py new file mode 100644 index 00000000000..99b7bbc59a9 --- /dev/null +++ b/contrib/python/ipython/py2/IPython/utils/timing.py @@ -0,0 +1,118 @@ +# encoding: utf-8 +""" +Utilities for timing code execution. +""" + +#----------------------------------------------------------------------------- +#  Copyright (C) 2008-2011  The IPython Development Team +# +#  Distributed under the terms of the BSD License.  The full license is in +#  the file COPYING, distributed as part of this software. +#----------------------------------------------------------------------------- + +#----------------------------------------------------------------------------- +# Imports +#----------------------------------------------------------------------------- + +import time + +from .py3compat import xrange + +#----------------------------------------------------------------------------- +# Code +#----------------------------------------------------------------------------- + +# If possible (Unix), use the resource module instead of time.clock() +try: +    import resource +    def clocku(): +        """clocku() -> floating point number + +        Return the *USER* CPU time in seconds since the start of the process. +        This is done via a call to resource.getrusage, so it avoids the +        wraparound problems in time.clock().""" + +        return resource.getrusage(resource.RUSAGE_SELF)[0] + +    def clocks(): +        """clocks() -> floating point number + +        Return the *SYSTEM* CPU time in seconds since the start of the process. +        This is done via a call to resource.getrusage, so it avoids the +        wraparound problems in time.clock().""" + +        return resource.getrusage(resource.RUSAGE_SELF)[1] + +    def clock(): +        """clock() -> floating point number + +        Return the *TOTAL USER+SYSTEM* CPU time in seconds since the start of +        the process.  This is done via a call to resource.getrusage, so it +        avoids the wraparound problems in time.clock().""" + +        u,s = resource.getrusage(resource.RUSAGE_SELF)[:2] +        return u+s + +    def clock2(): +        """clock2() -> (t_user,t_system) + +        Similar to clock(), but return a tuple of user/system times.""" +        return resource.getrusage(resource.RUSAGE_SELF)[:2] +except ImportError: +    # There is no distinction of user/system time under windows, so we just use +    # time.clock() for everything... +    clocku = clocks = clock = time.clock +    def clock2(): +        """Under windows, system CPU time can't be measured. + +        This just returns clock() and zero.""" +        return time.clock(),0.0 + +     +def timings_out(reps,func,*args,**kw): +    """timings_out(reps,func,*args,**kw) -> (t_total,t_per_call,output) + +    Execute a function reps times, return a tuple with the elapsed total +    CPU time in seconds, the time per call and the function's output. + +    Under Unix, the return value is the sum of user+system time consumed by +    the process, computed via the resource module.  This prevents problems +    related to the wraparound effect which the time.clock() function has. + +    Under Windows the return value is in wall clock seconds. See the +    documentation for the time module for more details.""" + +    reps = int(reps) +    assert reps >=1, 'reps must be >= 1' +    if reps==1: +        start = clock() +        out = func(*args,**kw) +        tot_time = clock()-start +    else: +        rng = xrange(reps-1) # the last time is executed separately to store output +        start = clock() +        for dummy in rng: func(*args,**kw) +        out = func(*args,**kw)  # one last time +        tot_time = clock()-start +    av_time = tot_time / reps +    return tot_time,av_time,out + + +def timings(reps,func,*args,**kw): +    """timings(reps,func,*args,**kw) -> (t_total,t_per_call) + +    Execute a function reps times, return a tuple with the elapsed total CPU +    time in seconds and the time per call. These are just the first two values +    in timings_out().""" + +    return timings_out(reps,func,*args,**kw)[0:2] + + +def timing(func,*args,**kw): +    """timing(func,*args,**kw) -> t_total + +    Execute a function once, return the elapsed total CPU time in +    seconds. This is just the first value in timings_out().""" + +    return timings_out(1,func,*args,**kw)[0] + diff --git a/contrib/python/ipython/py2/IPython/utils/tokenize2.py b/contrib/python/ipython/py2/IPython/utils/tokenize2.py new file mode 100644 index 00000000000..cbb5292e5a8 --- /dev/null +++ b/contrib/python/ipython/py2/IPython/utils/tokenize2.py @@ -0,0 +1,9 @@ +"""Load our patched versions of tokenize. +""" + +import sys + +if sys.version_info[0] >= 3: +    from ._tokenize_py3 import * +else: +    from ._tokenize_py2 import * diff --git a/contrib/python/ipython/py2/IPython/utils/tokenutil.py b/contrib/python/ipython/py2/IPython/utils/tokenutil.py new file mode 100644 index 00000000000..f52d3b76583 --- /dev/null +++ b/contrib/python/ipython/py2/IPython/utils/tokenutil.py @@ -0,0 +1,128 @@ +"""Token-related utilities""" + +# Copyright (c) IPython Development Team. +# Distributed under the terms of the Modified BSD License. + +from __future__ import absolute_import, print_function + +from collections import namedtuple +from io import StringIO +from keyword import iskeyword + +from . import tokenize2 +from .py3compat import cast_unicode_py2 + +Token = namedtuple('Token', ['token', 'text', 'start', 'end', 'line']) + +def generate_tokens(readline): +    """wrap generate_tokens to catch EOF errors""" +    try: +        for token in tokenize2.generate_tokens(readline): +            yield token +    except tokenize2.TokenError: +        # catch EOF error +        return + +def line_at_cursor(cell, cursor_pos=0): +    """Return the line in a cell at a given cursor position +     +    Used for calling line-based APIs that don't support multi-line input, yet. +     +    Parameters +    ---------- +     +    cell: str +        multiline block of text +    cursor_pos: integer +        the cursor position +     +    Returns +    ------- +     +    (line, offset): (text, integer) +        The line with the current cursor, and the character offset of the start of the line. +    """ +    offset = 0 +    lines = cell.splitlines(True) +    for line in lines: +        next_offset = offset + len(line) +        if next_offset >= cursor_pos: +            break +        offset = next_offset +    else: +        line = "" +    return (line, offset) + +def token_at_cursor(cell, cursor_pos=0): +    """Get the token at a given cursor +     +    Used for introspection. +     +    Function calls are prioritized, so the token for the callable will be returned +    if the cursor is anywhere inside the call. +     +    Parameters +    ---------- +     +    cell : unicode +        A block of Python code +    cursor_pos : int +        The location of the cursor in the block where the token should be found +    """ +    cell = cast_unicode_py2(cell) +    names = [] +    tokens = [] +    call_names = [] +     +    offsets = {1: 0} # lines start at 1 +    for tup in generate_tokens(StringIO(cell).readline): +         +        tok = Token(*tup) +         +        # token, text, start, end, line = tup +        start_line, start_col = tok.start +        end_line, end_col = tok.end +        if end_line + 1 not in offsets: +            # keep track of offsets for each line +            lines = tok.line.splitlines(True) +            for lineno, line in zip(range(start_line + 1, end_line + 2), lines): +                if lineno not in offsets: +                    offsets[lineno] = offsets[lineno-1] + len(line) +         +        offset = offsets[start_line] +        # allow '|foo' to find 'foo' at the beginning of a line +        boundary = cursor_pos + 1 if start_col == 0 else cursor_pos +        if offset + start_col >= boundary: +            # current token starts after the cursor, +            # don't consume it +            break +         +        if tok.token == tokenize2.NAME and not iskeyword(tok.text): +            if names and tokens and tokens[-1].token == tokenize2.OP and tokens[-1].text == '.': +                names[-1] = "%s.%s" % (names[-1], tok.text) +            else: +                names.append(tok.text) +        elif tok.token == tokenize2.OP: +            if tok.text == '=' and names: +                # don't inspect the lhs of an assignment +                names.pop(-1) +            if tok.text == '(' and names: +                # if we are inside a function call, inspect the function +                call_names.append(names[-1]) +            elif tok.text == ')' and call_names: +                call_names.pop(-1) +         +        tokens.append(tok) +         +        if offsets[end_line] + end_col > cursor_pos: +            # we found the cursor, stop reading +            break +         +    if call_names: +        return call_names[-1] +    elif names: +        return names[-1] +    else: +        return '' +     + diff --git a/contrib/python/ipython/py2/IPython/utils/traitlets.py b/contrib/python/ipython/py2/IPython/utils/traitlets.py new file mode 100644 index 00000000000..b4ff7a2689f --- /dev/null +++ b/contrib/python/ipython/py2/IPython/utils/traitlets.py @@ -0,0 +1,7 @@ +from __future__ import absolute_import + +from warnings import warn + +warn("IPython.utils.traitlets has moved to a top-level traitlets package.") + +from traitlets import * diff --git a/contrib/python/ipython/py2/IPython/utils/tz.py b/contrib/python/ipython/py2/IPython/utils/tz.py new file mode 100644 index 00000000000..b315d532d12 --- /dev/null +++ b/contrib/python/ipython/py2/IPython/utils/tz.py @@ -0,0 +1,46 @@ +# encoding: utf-8 +""" +Timezone utilities + +Just UTC-awareness right now +""" + +#----------------------------------------------------------------------------- +#  Copyright (C) 2013 The IPython Development Team +# +#  Distributed under the terms of the BSD License.  The full license is in +#  the file COPYING, distributed as part of this software. +#----------------------------------------------------------------------------- + +#----------------------------------------------------------------------------- +# Imports +#----------------------------------------------------------------------------- + +from datetime import tzinfo, timedelta, datetime + +#----------------------------------------------------------------------------- +# Code +#----------------------------------------------------------------------------- +# constant for zero offset +ZERO = timedelta(0) + +class tzUTC(tzinfo): +    """tzinfo object for UTC (zero offset)""" + +    def utcoffset(self, d): +        return ZERO + +    def dst(self, d): +        return ZERO + +UTC = tzUTC() + +def utc_aware(unaware): +    """decorator for adding UTC tzinfo to datetime's utcfoo methods""" +    def utc_method(*args, **kwargs): +        dt = unaware(*args, **kwargs) +        return dt.replace(tzinfo=UTC) +    return utc_method + +utcfromtimestamp = utc_aware(datetime.utcfromtimestamp) +utcnow = utc_aware(datetime.utcnow) diff --git a/contrib/python/ipython/py2/IPython/utils/ulinecache.py b/contrib/python/ipython/py2/IPython/utils/ulinecache.py new file mode 100644 index 00000000000..f53b0dde693 --- /dev/null +++ b/contrib/python/ipython/py2/IPython/utils/ulinecache.py @@ -0,0 +1,45 @@ +"""Wrapper around linecache which decodes files to unicode according to PEP 263. + +This is only needed for Python 2 - linecache in Python 3 does the same thing +itself. +""" +import functools +import linecache +import sys + +from IPython.utils import py3compat +from IPython.utils import openpy + +if py3compat.PY3: +    getline = linecache.getline +     +    # getlines has to be looked up at runtime, because doctests monkeypatch it. +    @functools.wraps(linecache.getlines) +    def getlines(filename, module_globals=None): +        return linecache.getlines(filename, module_globals=module_globals) + +else: +    def getlines(filename, module_globals=None): +        """Get the lines (as unicode) for a file from the cache. +        Update the cache if it doesn't contain an entry for this file already.""" +        filename = py3compat.cast_bytes(filename, sys.getfilesystemencoding()) +        lines = linecache.getlines(filename, module_globals=module_globals) +         +        # The bits we cache ourselves can be unicode. +        if (not lines) or isinstance(lines[0], py3compat.unicode_type): +            return lines +         +        readline = openpy._list_readline(lines) +        try: +            encoding, _ = openpy.detect_encoding(readline) +        except SyntaxError: +            encoding = 'ascii' +        return [l.decode(encoding, 'replace') for l in lines] + +    # This is a straight copy of linecache.getline +    def getline(filename, lineno, module_globals=None): +        lines = getlines(filename, module_globals) +        if 1 <= lineno <= len(lines): +            return lines[lineno-1] +        else: +            return '' diff --git a/contrib/python/ipython/py2/IPython/utils/version.py b/contrib/python/ipython/py2/IPython/utils/version.py new file mode 100644 index 00000000000..1de0047e6b4 --- /dev/null +++ b/contrib/python/ipython/py2/IPython/utils/version.py @@ -0,0 +1,36 @@ +# encoding: utf-8 +""" +Utilities for version comparison + +It is a bit ridiculous that we need these. +""" + +#----------------------------------------------------------------------------- +#  Copyright (C) 2013  The IPython Development Team +# +#  Distributed under the terms of the BSD License.  The full license is in +#  the file COPYING, distributed as part of this software. +#----------------------------------------------------------------------------- + +#----------------------------------------------------------------------------- +# Imports +#----------------------------------------------------------------------------- + +from distutils.version import LooseVersion + +#----------------------------------------------------------------------------- +# Code +#----------------------------------------------------------------------------- + +def check_version(v, check): +    """check version string v >= check + +    If dev/prerelease tags result in TypeError for string-number comparison, +    it is assumed that the dependency is satisfied. +    Users on dev branches are responsible for keeping their own packages up to date. +    """ +    try: +        return LooseVersion(v) >= LooseVersion(check) +    except TypeError: +        return True + diff --git a/contrib/python/ipython/py2/IPython/utils/warn.py b/contrib/python/ipython/py2/IPython/utils/warn.py new file mode 100644 index 00000000000..dd4852227ba --- /dev/null +++ b/contrib/python/ipython/py2/IPython/utils/warn.py @@ -0,0 +1,65 @@ +# encoding: utf-8 +""" +Utilities for warnings.  Shoudn't we just use the built in warnings module. +""" + +# Copyright (c) IPython Development Team. +# Distributed under the terms of the Modified BSD License. + +from __future__ import print_function + +import sys +import warnings + +warnings.warn("The module IPython.utils.warn is deprecated since IPython 4.0, use the standard warnings module instead", DeprecationWarning) + +def warn(msg,level=2,exit_val=1): +    """Deprecated + +    Standard warning printer. Gives formatting consistency. + +    Output is sent to sys.stderr. + +    Options: + +    -level(2): allows finer control: +      0 -> Do nothing, dummy function. +      1 -> Print message. +      2 -> Print 'WARNING:' + message. (Default level). +      3 -> Print 'ERROR:' + message. +      4 -> Print 'FATAL ERROR:' + message and trigger a sys.exit(exit_val). + +    -exit_val (1): exit value returned by sys.exit() for a level 4 +    warning. Ignored for all other levels.""" +     +    warnings.warn("The module IPython.utils.warn is deprecated since IPython 4.0, use the standard warnings module instead", DeprecationWarning) +    if level>0: +        header = ['','','WARNING: ','ERROR: ','FATAL ERROR: '] +        print(header[level], msg, sep='', file=sys.stderr) +        if level == 4: +            print('Exiting.\n', file=sys.stderr) +            sys.exit(exit_val) + +             +def info(msg): +    """Deprecated  +     +    Equivalent to warn(msg,level=1).""" + +    warn(msg,level=1) + +     +def error(msg): +    """Deprecated  +     +    Equivalent to warn(msg,level=3).""" + +    warn(msg,level=3) + +     +def fatal(msg,exit_val=1): +    """Deprecated  +     +    Equivalent to warn(msg,exit_val=exit_val,level=4).""" + +    warn(msg,exit_val=exit_val,level=4) diff --git a/contrib/python/ipython/py2/IPython/utils/wildcard.py b/contrib/python/ipython/py2/IPython/utils/wildcard.py new file mode 100644 index 00000000000..d22491bd964 --- /dev/null +++ b/contrib/python/ipython/py2/IPython/utils/wildcard.py @@ -0,0 +1,112 @@ +# -*- coding: utf-8 -*- +"""Support for wildcard pattern matching in object inspection. + +Authors +------- +- Jörgen Stenarson <[email protected]> +- Thomas Kluyver +""" + +#***************************************************************************** +#       Copyright (C) 2005 Jörgen Stenarson <[email protected]> +# +#  Distributed under the terms of the BSD License.  The full license is in +#  the file COPYING, distributed as part of this software. +#***************************************************************************** + +import re +import types + +from IPython.utils.dir2 import dir2 +from .py3compat import iteritems + +def create_typestr2type_dicts(dont_include_in_type2typestr=["lambda"]): +    """Return dictionaries mapping lower case typename (e.g. 'tuple') to type +    objects from the types package, and vice versa.""" +    typenamelist = [tname for tname in dir(types) if tname.endswith("Type")] +    typestr2type, type2typestr = {}, {} + +    for tname in typenamelist: +        name = tname[:-4].lower()          # Cut 'Type' off the end of the name +        obj = getattr(types, tname) +        typestr2type[name] = obj +        if name not in dont_include_in_type2typestr: +            type2typestr[obj] = name +    return typestr2type, type2typestr + +typestr2type, type2typestr = create_typestr2type_dicts() + +def is_type(obj, typestr_or_type): +    """is_type(obj, typestr_or_type) verifies if obj is of a certain type. It +    can take strings or actual python types for the second argument, i.e. +    'tuple'<->TupleType. 'all' matches all types. + +    TODO: Should be extended for choosing more than one type.""" +    if typestr_or_type == "all": +        return True +    if type(typestr_or_type) == type: +        test_type = typestr_or_type +    else: +        test_type = typestr2type.get(typestr_or_type, False) +    if test_type: +        return isinstance(obj, test_type) +    return False + +def show_hidden(str, show_all=False): +    """Return true for strings starting with single _ if show_all is true.""" +    return show_all or str.startswith("__") or not str.startswith("_") + +def dict_dir(obj): +    """Produce a dictionary of an object's attributes. Builds on dir2 by +    checking that a getattr() call actually succeeds.""" +    ns = {} +    for key in dir2(obj): +       # This seemingly unnecessary try/except is actually needed +       # because there is code out there with metaclasses that +       # create 'write only' attributes, where a getattr() call +       # will fail even if the attribute appears listed in the +       # object's dictionary.  Properties can actually do the same +       # thing.  In particular, Traits use this pattern +       try: +           ns[key] = getattr(obj, key) +       except AttributeError: +           pass +    return ns + +def filter_ns(ns, name_pattern="*", type_pattern="all", ignore_case=True, +            show_all=True): +    """Filter a namespace dictionary by name pattern and item type.""" +    pattern = name_pattern.replace("*",".*").replace("?",".") +    if ignore_case: +        reg = re.compile(pattern+"$", re.I) +    else: +        reg = re.compile(pattern+"$") + +    # Check each one matches regex; shouldn't be hidden; of correct type. +    return dict((key,obj) for key, obj in iteritems(ns) if reg.match(key) \ +                                            and show_hidden(key, show_all) \ +                                            and is_type(obj, type_pattern) ) + +def list_namespace(namespace, type_pattern, filter, ignore_case=False, show_all=False): +    """Return dictionary of all objects in a namespace dictionary that match +    type_pattern and filter.""" +    pattern_list=filter.split(".") +    if len(pattern_list) == 1: +       return filter_ns(namespace, name_pattern=pattern_list[0], +                        type_pattern=type_pattern, +                        ignore_case=ignore_case, show_all=show_all) +    else: +        # This is where we can change if all objects should be searched or +        # only modules. Just change the type_pattern to module to search only +        # modules +        filtered = filter_ns(namespace, name_pattern=pattern_list[0], +                            type_pattern="all", +                            ignore_case=ignore_case, show_all=show_all) +        results = {} +        for name, obj in iteritems(filtered): +            ns = list_namespace(dict_dir(obj), type_pattern, +                                ".".join(pattern_list[1:]), +                                ignore_case=ignore_case, show_all=show_all) +            for inner_name, inner_obj in iteritems(ns): +                results["%s.%s"%(name,inner_name)] = inner_obj +        return results  | 
