diff options
author | Mikhail Borisov <borisov.mikhail@gmail.com> | 2022-02-10 16:45:39 +0300 |
---|---|---|
committer | Daniil Cherednik <dcherednik@yandex-team.ru> | 2022-02-10 16:45:39 +0300 |
commit | a6a92afe03e02795227d2641b49819b687f088f8 (patch) | |
tree | f6984a1d27d5a7ec88a6fdd6e20cd5b7693b6ece /contrib/python/jedi | |
parent | c6dc8b8bd530985bc4cce0137e9a5de32f1087cb (diff) | |
download | ydb-a6a92afe03e02795227d2641b49819b687f088f8.tar.gz |
Restoring authorship annotation for Mikhail Borisov <borisov.mikhail@gmail.com>. Commit 1 of 2.
Diffstat (limited to 'contrib/python/jedi')
39 files changed, 3321 insertions, 3321 deletions
diff --git a/contrib/python/jedi/LICENSE.txt b/contrib/python/jedi/LICENSE.txt index 94f954567b..06be7c0b0b 100644 --- a/contrib/python/jedi/LICENSE.txt +++ b/contrib/python/jedi/LICENSE.txt @@ -1,24 +1,24 @@ All contributions towards Jedi are MIT licensed. ------------------------------------------------------------------------------- -The MIT License (MIT) - -Copyright (c) <2013> <David Halter and others, see AUTHORS.txt> - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. +The MIT License (MIT) + +Copyright (c) <2013> <David Halter and others, see AUTHORS.txt> + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/contrib/python/jedi/jedi/__init__.py b/contrib/python/jedi/jedi/__init__.py index d23739bee3..6f423b4ed8 100644 --- a/contrib/python/jedi/jedi/__init__.py +++ b/contrib/python/jedi/jedi/__init__.py @@ -1,46 +1,46 @@ -""" -Jedi is a static analysis tool for Python that can be used in IDEs/editors. Its -historic focus is autocompletion, but does static analysis for now as well. -Jedi is fast and is very well tested. It understands Python on a deeper level -than all other static analysis frameworks for Python. - -Jedi has support for two different goto functions. It's possible to search for -related names and to list all names in a Python file and infer them. Jedi -understands docstrings and you can use Jedi autocompletion in your REPL as -well. - -Jedi uses a very simple API to connect with IDE's. There's a reference -implementation as a `VIM-Plugin <https://github.com/davidhalter/jedi-vim>`_, -which uses Jedi's autocompletion. We encourage you to use Jedi in your IDEs. -It's really easy. - -To give you a simple example how you can use the Jedi library, here is an -example for the autocompletion feature: - ->>> import jedi ->>> source = ''' -... import datetime -... datetime.da''' ->>> script = jedi.Script(source, 3, len('datetime.da'), 'example.py') ->>> script +""" +Jedi is a static analysis tool for Python that can be used in IDEs/editors. Its +historic focus is autocompletion, but does static analysis for now as well. +Jedi is fast and is very well tested. It understands Python on a deeper level +than all other static analysis frameworks for Python. + +Jedi has support for two different goto functions. It's possible to search for +related names and to list all names in a Python file and infer them. Jedi +understands docstrings and you can use Jedi autocompletion in your REPL as +well. + +Jedi uses a very simple API to connect with IDE's. There's a reference +implementation as a `VIM-Plugin <https://github.com/davidhalter/jedi-vim>`_, +which uses Jedi's autocompletion. We encourage you to use Jedi in your IDEs. +It's really easy. + +To give you a simple example how you can use the Jedi library, here is an +example for the autocompletion feature: + +>>> import jedi +>>> source = ''' +... import datetime +... datetime.da''' +>>> script = jedi.Script(source, 3, len('datetime.da'), 'example.py') +>>> script <Script: 'example.py' ...> ->>> completions = script.completions() ->>> completions #doctest: +ELLIPSIS -[<Completion: date>, <Completion: datetime>, ...] ->>> print(completions[0].complete) -te ->>> print(completions[0].name) -date - -As you see Jedi is pretty simple and allows you to concentrate on writing a -good text editor, while still having very good IDE features for Python. -""" - +>>> completions = script.completions() +>>> completions #doctest: +ELLIPSIS +[<Completion: date>, <Completion: datetime>, ...] +>>> print(completions[0].complete) +te +>>> print(completions[0].name) +date + +As you see Jedi is pretty simple and allows you to concentrate on writing a +good text editor, while still having very good IDE features for Python. +""" + __version__ = '0.13.3' - + from jedi.api import Script, Interpreter, set_debug_function, \ preload_module, names -from jedi import settings +from jedi import settings from jedi.api.environment import find_virtualenvs, find_system_environments, \ get_default_environment, InvalidPythonEnvironment, create_environment, \ get_system_environment diff --git a/contrib/python/jedi/jedi/__main__.py b/contrib/python/jedi/jedi/__main__.py index f2ee047769..8498f90762 100644 --- a/contrib/python/jedi/jedi/__main__.py +++ b/contrib/python/jedi/jedi/__main__.py @@ -1,43 +1,43 @@ import sys -from os.path import join, dirname, abspath, isdir - - +from os.path import join, dirname, abspath, isdir + + def _start_linter(): - """ - This is a pre-alpha API. You're not supposed to use it at all, except for - testing. It will very likely change. - """ - import jedi - - if '--debug' in sys.argv: - jedi.set_debug_function() - - for path in sys.argv[2:]: - if path.startswith('--'): - continue - if isdir(path): - import fnmatch - import os - - paths = [] - for root, dirnames, filenames in os.walk(path): - for filename in fnmatch.filter(filenames, '*.py'): - paths.append(os.path.join(root, filename)) - else: - paths = [path] - - try: - for path in paths: - for error in jedi.Script(path=path)._analysis(): - print(error) - except Exception: - if '--pdb' in sys.argv: + """ + This is a pre-alpha API. You're not supposed to use it at all, except for + testing. It will very likely change. + """ + import jedi + + if '--debug' in sys.argv: + jedi.set_debug_function() + + for path in sys.argv[2:]: + if path.startswith('--'): + continue + if isdir(path): + import fnmatch + import os + + paths = [] + for root, dirnames, filenames in os.walk(path): + for filename in fnmatch.filter(filenames, '*.py'): + paths.append(os.path.join(root, filename)) + else: + paths = [path] + + try: + for path in paths: + for error in jedi.Script(path=path)._analysis(): + print(error) + except Exception: + if '--pdb' in sys.argv: import traceback traceback.print_exc() - import pdb - pdb.post_mortem() - else: - raise + import pdb + pdb.post_mortem() + else: + raise if len(sys.argv) == 2 and sys.argv[1] == 'repl': diff --git a/contrib/python/jedi/jedi/_compatibility.py b/contrib/python/jedi/jedi/_compatibility.py index f8f93c1063..a14b126ec6 100644 --- a/contrib/python/jedi/jedi/_compatibility.py +++ b/contrib/python/jedi/jedi/_compatibility.py @@ -1,30 +1,30 @@ -""" +""" To ensure compatibility from Python ``2.7`` - ``3.x``, a module has been -created. Clearly there is huge need to use conforming syntax. -""" +created. Clearly there is huge need to use conforming syntax. +""" import errno -import sys -import os -import re +import sys +import os +import re import pkgutil import warnings import inspect import subprocess -try: - import importlib -except ImportError: - pass - -is_py3 = sys.version_info[0] >= 3 +try: + import importlib +except ImportError: + pass + +is_py3 = sys.version_info[0] >= 3 is_py35 = is_py3 and sys.version_info[1] >= 5 py_version = int(str(sys.version_info[0]) + str(sys.version_info[1])) - - + + class DummyFile(object): def __init__(self, loader, string): self.loader = loader self.string = string - + def read(self): return self.loader.get_source(self.string) @@ -64,8 +64,8 @@ def find_module_py34(string, path=None, full_name=None, is_global_search=True): def find_module_py33(string, path=None, loader=None, full_name=None, is_global_search=True): loader = loader or importlib.machinery.PathFinder.find_module(string, path) - if loader is None and path is None: # Fallback to find builtins - try: + if loader is None and path is None: # Fallback to find builtins + try: with warnings.catch_warnings(record=True): # Mute "DeprecationWarning: Use importlib.util.find_spec() # instead." While we should replace that in the future, it's @@ -73,17 +73,17 @@ def find_module_py33(string, path=None, loader=None, full_name=None, is_global_s # it was added in Python 3.4 and find_loader hasn't been # removed in 3.6. loader = importlib.find_loader(string) - except ValueError as e: - # See #491. Importlib might raise a ValueError, to avoid this, we - # just raise an ImportError to fix the issue. + except ValueError as e: + # See #491. Importlib might raise a ValueError, to avoid this, we + # just raise an ImportError to fix the issue. raise ImportError("Originally " + repr(e)) - - if loader is None: + + if loader is None: raise ImportError("Couldn't find a loader for {}".format(string)) - - try: - is_package = loader.is_package(string) - if is_package: + + try: + is_package = loader.is_package(string) + if is_package: if hasattr(loader, 'path'): module_path = os.path.dirname(loader.path) else: @@ -93,27 +93,27 @@ def find_module_py33(string, path=None, loader=None, full_name=None, is_global_s module_file = DummyFile(loader, string) else: module_file = None - else: - module_path = loader.get_filename(string) + else: + module_path = loader.get_filename(string) module_file = DummyFile(loader, string) - except AttributeError: - # ExtensionLoader has not attribute get_filename, instead it has a - # path attribute that we can use to retrieve the module path - try: - module_path = loader.path + except AttributeError: + # ExtensionLoader has not attribute get_filename, instead it has a + # path attribute that we can use to retrieve the module path + try: + module_path = loader.path module_file = DummyFile(loader, string) - except AttributeError: - module_path = string - module_file = None - finally: - is_package = False - + except AttributeError: + module_path = string + module_file = None + finally: + is_package = False + if hasattr(loader, 'archive'): module_path = loader.archive - return module_file, module_path, is_package - - + return module_file, module_path, is_package + + def find_module_pre_py34(string, path=None, full_name=None, is_global_search=True): # This import is here, because in other places it will raise a # DeprecationWarning. @@ -124,7 +124,7 @@ def find_module_pre_py34(string, path=None, full_name=None, is_global_search=Tru return module_file, module_path, module_type is imp.PKG_DIRECTORY except ImportError: pass - + if path is None: path = sys.path for item in path: @@ -147,20 +147,20 @@ def find_module_pre_py34(string, path=None, full_name=None, is_global_search=Tru except ImportError: pass raise ImportError("No module named {}".format(string)) - + find_module = find_module_py34 if is_py3 else find_module_pre_py34 -find_module.__doc__ = """ -Provides information about a module. - -This function isolates the differences in importing libraries introduced with -python 3.3 on; it gets a module name and optionally a path. It will return a -tuple containin an open file for the module (if not builtin), the filename -or the name of the module if it is a builtin one and a boolean indicating -if the module is contained in a package. -""" - - +find_module.__doc__ = """ +Provides information about a module. + +This function isolates the differences in importing libraries introduced with +python 3.3 on; it gets a module name and optionally a path. It will return a +tuple containin an open file for the module (if not builtin), the filename +or the name of the module if it is a builtin one and a boolean indicating +if the module is contained in a package. +""" + + def _iter_modules(paths, prefix=''): # Copy of pkgutil.iter_modules adapted to work with namespaces @@ -236,69 +236,69 @@ else: return [suffix for suffix, _, _ in imp.get_suffixes()] -# unicode function -try: - unicode = unicode -except NameError: - unicode = str - - -# re-raise function -if is_py3: - def reraise(exception, traceback): - raise exception.with_traceback(traceback) -else: - eval(compile(""" -def reraise(exception, traceback): - raise exception, None, traceback -""", 'blub', 'exec')) - -reraise.__doc__ = """ -Re-raise `exception` with a `traceback` object. - -Usage:: - - reraise(Exception, sys.exc_info()[2]) - -""" - - -class Python3Method(object): - def __init__(self, func): - self.func = func - - def __get__(self, obj, objtype): - if obj is None: - return lambda *args, **kwargs: self.func(*args, **kwargs) - else: - return lambda *args, **kwargs: self.func(obj, *args, **kwargs) - - -def use_metaclass(meta, *bases): - """ Create a class with a metaclass. """ - if not bases: - bases = (object,) +# unicode function +try: + unicode = unicode +except NameError: + unicode = str + + +# re-raise function +if is_py3: + def reraise(exception, traceback): + raise exception.with_traceback(traceback) +else: + eval(compile(""" +def reraise(exception, traceback): + raise exception, None, traceback +""", 'blub', 'exec')) + +reraise.__doc__ = """ +Re-raise `exception` with a `traceback` object. + +Usage:: + + reraise(Exception, sys.exc_info()[2]) + +""" + + +class Python3Method(object): + def __init__(self, func): + self.func = func + + def __get__(self, obj, objtype): + if obj is None: + return lambda *args, **kwargs: self.func(*args, **kwargs) + else: + return lambda *args, **kwargs: self.func(obj, *args, **kwargs) + + +def use_metaclass(meta, *bases): + """ Create a class with a metaclass. """ + if not bases: + bases = (object,) return meta("Py2CompatibilityMetaClass", bases, {}) - - -try: - encoding = sys.stdout.encoding - if encoding is None: - encoding = 'utf-8' -except AttributeError: - encoding = 'ascii' - - + + +try: + encoding = sys.stdout.encoding + if encoding is None: + encoding = 'utf-8' +except AttributeError: + encoding = 'ascii' + + def u(string, errors='strict'): - """Cast to unicode DAMMIT! - Written because Python2 repr always implicitly casts to a string, so we - have to cast back to a unicode (and we now that we always deal with valid - unicode, because we check that in the beginning). - """ + """Cast to unicode DAMMIT! + Written because Python2 repr always implicitly casts to a string, so we + have to cast back to a unicode (and we now that we always deal with valid + unicode, because we check that in the beginning). + """ if isinstance(string, bytes): return unicode(string, encoding='UTF-8', errors=errors) - return string - + return string + def cast_path(obj): """ @@ -320,29 +320,29 @@ def force_unicode(obj): return cast_path(obj) -try: - import builtins # module name in python 3 -except ImportError: +try: + import builtins # module name in python 3 +except ImportError: import __builtin__ as builtins # noqa: F401 - - + + import ast # noqa: F401 - - -def literal_eval(string): - return ast.literal_eval(string) - - -try: - from itertools import zip_longest -except ImportError: + + +def literal_eval(string): + return ast.literal_eval(string) + + +try: + from itertools import zip_longest +except ImportError: from itertools import izip_longest as zip_longest # Python 2 # noqa: F401 - + try: FileNotFoundError = FileNotFoundError except NameError: FileNotFoundError = IOError - + try: NotADirectoryError = NotADirectoryError except NameError: @@ -354,18 +354,18 @@ except NameError: PermissionError = IOError -def no_unicode_pprint(dct): - """ - Python 2/3 dict __repr__ may be different, because of unicode differens - (with or without a `u` prefix). Normally in doctests we could use `pprint` - to sort dicts and check for equality, but here we have to write a separate - function to do that. - """ - import pprint - s = pprint.pformat(dct) - print(re.sub("u'", "'", s)) - - +def no_unicode_pprint(dct): + """ + Python 2/3 dict __repr__ may be different, because of unicode differens + (with or without a `u` prefix). Normally in doctests we could use `pprint` + to sort dicts and check for equality, but here we have to write a separate + function to do that. + """ + import pprint + s = pprint.pformat(dct) + print(re.sub("u'", "'", s)) + + def print_to_stderr(*args): if is_py3: eval("print(*args, file=sys.stderr)") @@ -374,22 +374,22 @@ def print_to_stderr(*args): sys.stderr.flush() -def utf8_repr(func): - """ - ``__repr__`` methods in Python 2 don't allow unicode objects to be - returned. Therefore cast them to utf-8 bytes in this decorator. - """ - def wrapper(self): - result = func(self) - if isinstance(result, unicode): - return result.encode('utf-8') - else: - return result - - if is_py3: - return func - else: - return wrapper +def utf8_repr(func): + """ + ``__repr__`` methods in Python 2 don't allow unicode objects to be + returned. Therefore cast them to utf-8 bytes in this decorator. + """ + def wrapper(self): + result = func(self) + if isinstance(result, unicode): + return result.encode('utf-8') + else: + return result + + if is_py3: + return func + else: + return wrapper if is_py3: diff --git a/contrib/python/jedi/jedi/api/__init__.py b/contrib/python/jedi/jedi/api/__init__.py index 9e568cf8f8..69d34ab64f 100644 --- a/contrib/python/jedi/jedi/api/__init__.py +++ b/contrib/python/jedi/jedi/api/__init__.py @@ -1,34 +1,34 @@ -""" -The API basically only provides one class. You can create a :class:`Script` and -use its methods. - -Additionally you can add a debug function with :func:`set_debug_function`. +""" +The API basically only provides one class. You can create a :class:`Script` and +use its methods. + +Additionally you can add a debug function with :func:`set_debug_function`. Alternatively, if you don't need a custom function and are happy with printing debug messages to stdout, simply call :func:`set_debug_function` without arguments. - -.. warning:: Please, note that Jedi is **not thread safe**. -""" -import os + +.. warning:: Please, note that Jedi is **not thread safe**. +""" +import os import sys -import warnings - +import warnings + import parso from parso.python import tree from jedi._compatibility import force_unicode, is_py3 from jedi.parser_utils import get_executable_nodes -from jedi import debug -from jedi import settings -from jedi import cache -from jedi.api import classes -from jedi.api import interpreter -from jedi.api import helpers +from jedi import debug +from jedi import settings +from jedi import cache +from jedi.api import classes +from jedi.api import interpreter +from jedi.api import helpers from jedi.api.completion import Completion from jedi.api.environment import InterpreterEnvironment from jedi.api.project import get_default_project -from jedi.evaluate import Evaluator -from jedi.evaluate import imports +from jedi.evaluate import Evaluator +from jedi.evaluate import imports from jedi.evaluate import usages from jedi.evaluate.arguments import try_iter_content from jedi.evaluate.helpers import get_module_names, evaluate_call_of_leaf @@ -37,20 +37,20 @@ from jedi.evaluate.filters import TreeNameDefinition, ParamName from jedi.evaluate.syntax_tree import tree_name_to_contexts from jedi.evaluate.context import ModuleContext from jedi.evaluate.context.iterable import unpack_tuple_to_dict - -# Jedi uses lots and lots of recursion. By setting this a little bit higher, we -# can remove some "maximum recursion depth" errors. + +# Jedi uses lots and lots of recursion. By setting this a little bit higher, we +# can remove some "maximum recursion depth" errors. sys.setrecursionlimit(3000) - - -class Script(object): - """ - A Script is the base for completions, goto or whatever you want to do with - |jedi|. - - You can either use the ``source`` parameter or ``path`` to read a file. - Usually you're going to want to use both of them (in an editor). - + + +class Script(object): + """ + A Script is the base for completions, goto or whatever you want to do with + |jedi|. + + You can either use the ``source`` parameter or ``path`` to read a file. + Usually you're going to want to use both of them (in an editor). + The script might be analyzed in a different ``sys.path`` than |jedi|: - if `sys_path` parameter is not ``None``, it will be used as ``sys.path`` @@ -63,34 +63,34 @@ class Script(object): - otherwise ``sys.path`` will match that of |jedi|. - :param source: The source code of the current file, separated by newlines. - :type source: str - :param line: The line to perform actions on (starting with 1). - :type line: int + :param source: The source code of the current file, separated by newlines. + :type source: str + :param line: The line to perform actions on (starting with 1). + :type line: int :param column: The column of the cursor (starting with 0). :type column: int - :param path: The path of the file in the file system, or ``''`` if - it hasn't been saved yet. - :type path: str or None - :param encoding: The encoding of ``source``, if it is not a - ``unicode`` object (default ``'utf-8'``). - :type encoding: str + :param path: The path of the file in the file system, or ``''`` if + it hasn't been saved yet. + :type path: str or None + :param encoding: The encoding of ``source``, if it is not a + ``unicode`` object (default ``'utf-8'``). + :type encoding: str :param sys_path: ``sys.path`` to use during analysis of the script :type sys_path: list :param environment: TODO :type sys_path: Environment - """ - def __init__(self, source=None, line=None, column=None, path=None, + """ + def __init__(self, source=None, line=None, column=None, path=None, encoding='utf-8', sys_path=None, environment=None): - self._orig_path = path + self._orig_path = path # An empty path (also empty string) should always result in no path. self.path = os.path.abspath(path) if path else None - - if source is None: + + if source is None: # TODO add a better warning than the traceback! with open(path, 'rb') as f: - source = f.read() - + source = f.read() + # Load the Python grammar of the current interpreter. self._grammar = parso.load_grammar() @@ -122,8 +122,8 @@ class Script(object): self._code = source line = max(len(self._code_lines), 1) if line is None else line if not (0 < line <= len(self._code_lines)): - raise ValueError('`line` parameter is not in a valid range.') - + raise ValueError('`line` parameter is not in a valid range.') + line_string = self._code_lines[line - 1] line_len = len(line_string) if line_string.endswith('\r\n'): @@ -131,53 +131,53 @@ class Script(object): if line_string.endswith('\n'): line_len -= 1 - column = line_len if column is None else column - if not (0 <= column <= line_len): + column = line_len if column is None else column + if not (0 <= column <= line_len): raise ValueError('`column` parameter (%d) is not in a valid range ' '(0-%d) for line %d (%r).' % ( column, line_len, line, line_string)) - self._pos = line, column + self._pos = line, column self._path = path - - cache.clear_time_caches() - debug.reset_time() - + + cache.clear_time_caches() + debug.reset_time() + def _get_module(self): name = '__main__' if self.path is not None: import_names = dotted_path_in_sys_path(self._evaluator.get_sys_path(), self.path) if import_names is not None: name = '.'.join(import_names) - + module = ModuleContext( self._evaluator, self._module_node, self.path, code_lines=self._code_lines ) imports.add_module_to_cache(self._evaluator, name, module) return module - - def __repr__(self): + + def __repr__(self): return '<%s: %s %r>' % ( self.__class__.__name__, repr(self._orig_path), self._evaluator.environment, ) - - def completions(self): - """ - Return :class:`classes.Completion` objects. Those objects contain - information about the completions, more than just names. - - :return: Completion objects, sorted by name and __ comes last. - :rtype: list of :class:`classes.Completion` - """ - debug.speed('completions start') + + def completions(self): + """ + Return :class:`classes.Completion` objects. Those objects contain + information about the completions, more than just names. + + :return: Completion objects, sorted by name and __ comes last. + :rtype: list of :class:`classes.Completion` + """ + debug.speed('completions start') completion = Completion( self._evaluator, self._get_module(), self._code_lines, self._pos, self.call_signatures ) completions = completion.completions() - + def iter_import_completions(): for c in completions: tree_name = c._name.tree_name @@ -187,57 +187,57 @@ class Script(object): if definition is not None \ and definition.type in ('import_name', 'import_from'): yield c - + if len(list(iter_import_completions())) > 10: # For now disable completions if there's a lot of imports that # might potentially be resolved. This is the case for tensorflow # and has been fixed for it. This is obviously temporary until we # have a better solution. self._evaluator.infer_enabled = False - - debug.speed('completions end') + + debug.speed('completions end') return completions - - def goto_definitions(self): - """ - Return the definitions of a the path under the cursor. goto function! - This follows complicated paths and returns the end, not the first - definition. The big difference between :meth:`goto_assignments` and - :meth:`goto_definitions` is that :meth:`goto_assignments` doesn't - follow imports and statements. Multiple objects may be returned, - because Python itself is a dynamic language, which means depending on - an option you can have two different versions of a function. - - :rtype: list of :class:`classes.Definition` - """ + + def goto_definitions(self): + """ + Return the definitions of a the path under the cursor. goto function! + This follows complicated paths and returns the end, not the first + definition. The big difference between :meth:`goto_assignments` and + :meth:`goto_definitions` is that :meth:`goto_assignments` doesn't + follow imports and statements. Multiple objects may be returned, + because Python itself is a dynamic language, which means depending on + an option you can have two different versions of a function. + + :rtype: list of :class:`classes.Definition` + """ leaf = self._module_node.get_name_of_position(self._pos) if leaf is None: leaf = self._module_node.get_leaf_for_position(self._pos) if leaf is None: return [] - + context = self._evaluator.create_context(self._get_module(), leaf) definitions = helpers.evaluate_goto_definition(self._evaluator, context, leaf) - - names = [s.name for s in definitions] - defs = [classes.Definition(self._evaluator, name) for name in names] + + names = [s.name for s in definitions] + defs = [classes.Definition(self._evaluator, name) for name in names] # The additional set here allows the definitions to become unique in an # API sense. In the internals we want to separate more things than in # the API. - return helpers.sorted_definitions(set(defs)) - + return helpers.sorted_definitions(set(defs)) + def goto_assignments(self, follow_imports=False, follow_builtin_imports=False): - """ + """ Return the first definition found, while optionally following imports. Multiple objects may be returned, because Python itself is a - dynamic language, which means depending on an option you can have two - different versions of a function. - + dynamic language, which means depending on an option you can have two + different versions of a function. + :param follow_imports: The goto call will follow imports. :param follow_builtin_imports: If follow_imports is True will decide if it follow builtin imports. - :rtype: list of :class:`classes.Definition` - """ + :rtype: list of :class:`classes.Definition` + """ def filter_follow_imports(names, check): for name in names: if check(name): @@ -247,7 +247,7 @@ class Script(object): for new_name in new_names: if new_name.start_pos is None: found_builtin = True - + if found_builtin and not isinstance(name, imports.SubModuleName): yield name else: @@ -255,81 +255,81 @@ class Script(object): yield new_name else: yield name - + tree_name = self._module_node.get_name_of_position(self._pos) if tree_name is None: - return [] + return [] context = self._evaluator.create_context(self._get_module(), tree_name) names = list(self._evaluator.goto(context, tree_name)) - + if follow_imports: def check(name): return name.is_import() - else: + else: def check(name): return isinstance(name, imports.SubModuleName) - + names = filter_follow_imports(names, check) - + defs = [classes.Definition(self._evaluator, d) for d in set(names)] return helpers.sorted_definitions(defs) - + def usages(self, additional_module_paths=(), **kwargs): - """ - Return :class:`classes.Definition` objects, which contain all - names that point to the definition of the name under the cursor. This - is very useful for refactoring (renaming), or to show all usages of a - variable. - - .. todo:: Implement additional_module_paths - + """ + Return :class:`classes.Definition` objects, which contain all + names that point to the definition of the name under the cursor. This + is very useful for refactoring (renaming), or to show all usages of a + variable. + + .. todo:: Implement additional_module_paths + :param additional_module_paths: Deprecated, never ever worked. :param include_builtins: Default True, checks if a usage is a builtin (e.g. ``sys``) and in that case does not return it. - :rtype: list of :class:`classes.Definition` - """ + :rtype: list of :class:`classes.Definition` + """ if additional_module_paths: warnings.warn( "Deprecated since version 0.12.0. This never even worked, just ignore it.", DeprecationWarning, stacklevel=2 ) - + def _usages(include_builtins=True): tree_name = self._module_node.get_name_of_position(self._pos) if tree_name is None: # Must be syntax - return [] - + return [] + names = usages.usages(self._get_module(), tree_name) - + definitions = [classes.Definition(self._evaluator, n) for n in names] if not include_builtins: definitions = [d for d in definitions if not d.in_builtin_module()] return helpers.sorted_definitions(definitions) return _usages(**kwargs) - - def call_signatures(self): - """ - Return the function object of the call you're currently in. - - E.g. if the cursor is here:: - - abs(# <-- cursor is here - - This would return the ``abs`` function. On the other hand:: - - abs()# <-- cursor is here - + + def call_signatures(self): + """ + Return the function object of the call you're currently in. + + E.g. if the cursor is here:: + + abs(# <-- cursor is here + + This would return the ``abs`` function. On the other hand:: + + abs()# <-- cursor is here + This would return an empty list.. - - :rtype: list of :class:`classes.CallSignature` - """ + + :rtype: list of :class:`classes.CallSignature` + """ call_signature_details = \ helpers.get_call_signature_details(self._module_node, self._pos) if call_signature_details is None: - return [] - + return [] + context = self._evaluator.create_context( self._get_module(), call_signature_details.bracket_leaf @@ -341,15 +341,15 @@ class Script(object): self._code_lines, self._pos ) - debug.speed('func_call followed') - + debug.speed('func_call followed') + return [classes.CallSignature(self._evaluator, d.name, call_signature_details.bracket_leaf.start_pos, call_signature_details.call_index, call_signature_details.keyword_name_str) for d in definitions if hasattr(d, 'py__call__')] - - def _analysis(self): + + def _analysis(self): self._evaluator.is_analysis = True self._evaluator.analysis_modules = [self._module_node] module = self._get_module() @@ -370,65 +370,65 @@ class Script(object): for testlist in node.children[:-1:2]: # Iterate tuples. unpack_tuple_to_dict(context, types, testlist) - else: + else: if node.type == 'name': defs = self._evaluator.goto_definitions(context, node) else: defs = evaluate_call_of_leaf(context, node) try_iter_content(defs) self._evaluator.reset_recursion_limitations() - + ana = [a for a in self._evaluator.analysis if self.path == a.path] return sorted(set(ana), key=lambda x: x.line) finally: self._evaluator.is_analysis = False - - -class Interpreter(Script): - """ - Jedi API for Python REPLs. - - In addition to completion of simple attribute access, Jedi - supports code completion based on static code analysis. - Jedi can complete attributes of object which is not initialized - yet. - - >>> from os.path import join - >>> namespace = locals() + + +class Interpreter(Script): + """ + Jedi API for Python REPLs. + + In addition to completion of simple attribute access, Jedi + supports code completion based on static code analysis. + Jedi can complete attributes of object which is not initialized + yet. + + >>> from os.path import join + >>> namespace = locals() >>> script = Interpreter('join("").up', [namespace]) - >>> print(script.completions()[0].name) - upper - """ - - def __init__(self, source, namespaces, **kwds): - """ - Parse `source` and mixin interpreted Python objects from `namespaces`. - - :type source: str - :arg source: Code to parse. - :type namespaces: list of dict - :arg namespaces: a list of namespace dictionaries such as the one - returned by :func:`locals`. - - Other optional arguments are same as the ones for :class:`Script`. - If `line` and `column` are None, they are assumed be at the end of - `source`. - """ + >>> print(script.completions()[0].name) + upper + """ + + def __init__(self, source, namespaces, **kwds): + """ + Parse `source` and mixin interpreted Python objects from `namespaces`. + + :type source: str + :arg source: Code to parse. + :type namespaces: list of dict + :arg namespaces: a list of namespace dictionaries such as the one + returned by :func:`locals`. + + Other optional arguments are same as the ones for :class:`Script`. + If `line` and `column` are None, they are assumed be at the end of + `source`. + """ try: namespaces = [dict(n) for n in namespaces] except Exception: raise TypeError("namespaces must be a non-empty list of dicts.") - + environment = kwds.get('environment', None) if environment is None: environment = InterpreterEnvironment() - else: + else: if not isinstance(environment, InterpreterEnvironment): raise TypeError("The environment needs to be an InterpreterEnvironment subclass.") - + super(Interpreter, self).__init__(source, environment=environment, **kwds) self.namespaces = namespaces - + def _get_module(self): return interpreter.MixedModuleContext( self._evaluator, @@ -437,28 +437,28 @@ class Interpreter(Script): path=self.path, code_lines=self._code_lines, ) - - -def names(source=None, path=None, encoding='utf-8', all_scopes=False, + + +def names(source=None, path=None, encoding='utf-8', all_scopes=False, definitions=True, references=False, environment=None): - """ - Returns a list of `Definition` objects, containing name parts. - This means you can call ``Definition.goto_assignments()`` and get the - reference of a name. - The parameters are the same as in :py:class:`Script`, except or the - following ones: - - :param all_scopes: If True lists the names of all scopes instead of only - the module namespace. - :param definitions: If True lists the names that have been defined by a - class, function or a statement (``a = b`` returns ``a``). - :param references: If True lists all the names that are not listed by - ``definitions=True``. E.g. ``a = b`` returns ``b``. - """ - def def_ref_filter(_def): + """ + Returns a list of `Definition` objects, containing name parts. + This means you can call ``Definition.goto_assignments()`` and get the + reference of a name. + The parameters are the same as in :py:class:`Script`, except or the + following ones: + + :param all_scopes: If True lists the names of all scopes instead of only + the module namespace. + :param definitions: If True lists the names that have been defined by a + class, function or a statement (``a = b`` returns ``a``). + :param references: If True lists all the names that are not listed by + ``definitions=True``. E.g. ``a = b`` returns ``b``. + """ + def def_ref_filter(_def): is_def = _def._name.tree_name.is_definition() - return definitions and is_def or references and not is_def - + return definitions and is_def or references and not is_def + def create_name(name): if name.parent.type == 'param': cls = ParamName @@ -470,7 +470,7 @@ def names(source=None, path=None, encoding='utf-8', all_scopes=False, name ) - # Set line/column to a random position, because they don't matter. + # Set line/column to a random position, because they don't matter. script = Script(source, line=1, column=0, path=path, encoding=encoding, environment=environment) module_context = script._get_module() defs = [ @@ -479,31 +479,31 @@ def names(source=None, path=None, encoding='utf-8', all_scopes=False, create_name(name) ) for name in get_module_names(script._module_node, all_scopes) ] - return sorted(filter(def_ref_filter, defs), key=lambda x: (x.line, x.column)) - - -def preload_module(*modules): - """ - Preloading modules tells Jedi to load a module now, instead of lazy parsing - of modules. Usful for IDEs, to control which modules to load on startup. - - :param modules: different module names, list of string. - """ - for m in modules: - s = "import %s as x; x." % m - Script(s, 1, len(s), None).completions() - - -def set_debug_function(func_cb=debug.print_to_stdout, warnings=True, - notices=True, speed=True): - """ - Define a callback debug function to get all the debug messages. - + return sorted(filter(def_ref_filter, defs), key=lambda x: (x.line, x.column)) + + +def preload_module(*modules): + """ + Preloading modules tells Jedi to load a module now, instead of lazy parsing + of modules. Usful for IDEs, to control which modules to load on startup. + + :param modules: different module names, list of string. + """ + for m in modules: + s = "import %s as x; x." % m + Script(s, 1, len(s), None).completions() + + +def set_debug_function(func_cb=debug.print_to_stdout, warnings=True, + notices=True, speed=True): + """ + Define a callback debug function to get all the debug messages. + If you don't specify any arguments, debug messages will be printed to stdout. - :param func_cb: The callback function for debug messages, with n params. - """ - debug.debug_function = func_cb - debug.enable_warning = warnings - debug.enable_notice = notices - debug.enable_speed = speed + :param func_cb: The callback function for debug messages, with n params. + """ + debug.debug_function = func_cb + debug.enable_warning = warnings + debug.enable_notice = notices + debug.enable_speed = speed diff --git a/contrib/python/jedi/jedi/api/classes.py b/contrib/python/jedi/jedi/api/classes.py index 9602e04a8b..bba278adc8 100644 --- a/contrib/python/jedi/jedi/api/classes.py +++ b/contrib/python/jedi/jedi/api/classes.py @@ -1,138 +1,138 @@ -""" -The :mod:`jedi.api.classes` module contains the return classes of the API. -These classes are the much bigger part of the whole API, because they contain -the interesting information about completion and goto operations. -""" -import re - +""" +The :mod:`jedi.api.classes` module contains the return classes of the API. +These classes are the much bigger part of the whole API, because they contain +the interesting information about completion and goto operations. +""" +import re + from parso.python.tree import search_ancestor -from jedi import settings +from jedi import settings from jedi.evaluate.utils import ignored, unite from jedi.cache import memoize_method -from jedi.evaluate import imports -from jedi.evaluate import compiled +from jedi.evaluate import imports +from jedi.evaluate import compiled from jedi.evaluate.imports import ImportName from jedi.evaluate.context import instance from jedi.evaluate.context import ClassContext, FunctionExecutionContext from jedi.api.keywords import KeywordName - - + + def _sort_names_by_start_pos(names): return sorted(names, key=lambda s: s.start_pos or (0, 0)) def defined_names(evaluator, context): - """ - List sub-definitions (e.g., methods in class). - - :type scope: Scope - :rtype: list of Definition - """ + """ + List sub-definitions (e.g., methods in class). + + :type scope: Scope + :rtype: list of Definition + """ filter = next(context.get_filters(search_global=True)) names = [name for name in filter.values()] return [Definition(evaluator, n) for n in _sort_names_by_start_pos(names)] - - -class BaseDefinition(object): - _mapping = { - 'posixpath': 'os.path', - 'riscospath': 'os.path', - 'ntpath': 'os.path', - 'os2emxpath': 'os.path', - 'macpath': 'os.path', - 'genericpath': 'os.path', - 'posix': 'os', - '_io': 'io', - '_functools': 'functools', - '_sqlite3': 'sqlite3', - '__builtin__': '', - 'builtins': '', - } - - _tuple_mapping = dict((tuple(k.split('.')), v) for (k, v) in { - 'argparse._ActionsContainer': 'argparse.ArgumentParser', - }.items()) - - def __init__(self, evaluator, name): - self._evaluator = evaluator - self._name = name - """ + + +class BaseDefinition(object): + _mapping = { + 'posixpath': 'os.path', + 'riscospath': 'os.path', + 'ntpath': 'os.path', + 'os2emxpath': 'os.path', + 'macpath': 'os.path', + 'genericpath': 'os.path', + 'posix': 'os', + '_io': 'io', + '_functools': 'functools', + '_sqlite3': 'sqlite3', + '__builtin__': '', + 'builtins': '', + } + + _tuple_mapping = dict((tuple(k.split('.')), v) for (k, v) in { + 'argparse._ActionsContainer': 'argparse.ArgumentParser', + }.items()) + + def __init__(self, evaluator, name): + self._evaluator = evaluator + self._name = name + """ An instance of :class:`parso.reprsentation.Name` subclass. - """ + """ self.is_keyword = isinstance(self._name, KeywordName) - - # generate a path to the definition + + # generate a path to the definition self._module = name.get_root_context() - if self.in_builtin_module(): - self.module_path = None - else: + if self.in_builtin_module(): + self.module_path = None + else: self.module_path = self._module.py__file__() - """Shows the file path of a module. e.g. ``/usr/lib/python2.7/os.py``""" - - @property - def name(self): - """ - Name of variable/function/class/module. - - For example, for ``x = None`` it returns ``'x'``. - - :rtype: str or None - """ + """Shows the file path of a module. e.g. ``/usr/lib/python2.7/os.py``""" + + @property + def name(self): + """ + Name of variable/function/class/module. + + For example, for ``x = None`` it returns ``'x'``. + + :rtype: str or None + """ return self._name.string_name - - @property - def type(self): - """ - The type of the definition. - - Here is an example of the value of this attribute. Let's consider - the following source. As what is in ``variable`` is unambiguous - to Jedi, :meth:`jedi.Script.goto_definitions` should return a list of - definition for ``sys``, ``f``, ``C`` and ``x``. - - >>> from jedi import Script - >>> source = ''' - ... import keyword - ... - ... class C: - ... pass - ... - ... class D: - ... pass - ... - ... x = D() - ... - ... def f(): - ... pass - ... - ... for variable in [keyword, f, C, x]: - ... variable''' - - >>> script = Script(source) - >>> defs = script.goto_definitions() - - Before showing what is in ``defs``, let's sort it by :attr:`line` - so that it is easy to relate the result to the source code. - - >>> defs = sorted(defs, key=lambda d: d.line) - >>> defs # doctest: +NORMALIZE_WHITESPACE - [<Definition module keyword>, <Definition class C>, + + @property + def type(self): + """ + The type of the definition. + + Here is an example of the value of this attribute. Let's consider + the following source. As what is in ``variable`` is unambiguous + to Jedi, :meth:`jedi.Script.goto_definitions` should return a list of + definition for ``sys``, ``f``, ``C`` and ``x``. + + >>> from jedi import Script + >>> source = ''' + ... import keyword + ... + ... class C: + ... pass + ... + ... class D: + ... pass + ... + ... x = D() + ... + ... def f(): + ... pass + ... + ... for variable in [keyword, f, C, x]: + ... variable''' + + >>> script = Script(source) + >>> defs = script.goto_definitions() + + Before showing what is in ``defs``, let's sort it by :attr:`line` + so that it is easy to relate the result to the source code. + + >>> defs = sorted(defs, key=lambda d: d.line) + >>> defs # doctest: +NORMALIZE_WHITESPACE + [<Definition module keyword>, <Definition class C>, <Definition instance D>, <Definition def f>] - - Finally, here is what you can get from :attr:`type`: - + + Finally, here is what you can get from :attr:`type`: + >>> defs = [str(d.type) for d in defs] # It's unicode and in Py2 has u before it. >>> defs[0] - 'module' + 'module' >>> defs[1] - 'class' + 'class' >>> defs[2] - 'instance' + 'instance' >>> defs[3] - 'function' - - """ + 'function' + + """ tree_name = self._name.tree_name resolve = False if tree_name is not None: @@ -141,14 +141,14 @@ class BaseDefinition(object): if definition is not None and definition.type == 'import_from' and \ tree_name.is_definition(): resolve = True - + if isinstance(self._name, imports.SubModuleName) or resolve: for context in self._name.infer(): return context.api_type return self._name.api_type - - def _path(self): - """The path to a module/class/function definition.""" + + def _path(self): + """The path to a module/class/function definition.""" def to_reverse(): name = self._name if name.api_type == 'module': @@ -169,7 +169,7 @@ class BaseDefinition(object): # ImportError. So simply return the name. yield name.string_name return - else: + else: yield name.string_name parent_context = name.parent_context @@ -181,139 +181,139 @@ class BaseDefinition(object): yield parent_context.name.string_name except AttributeError: pass - else: + else: for name in reversed(method().split('.')): yield name parent_context = parent_context.parent_context return reversed(list(to_reverse())) - - @property - def module_name(self): - """ - The module name. - - >>> from jedi import Script - >>> source = 'import json' - >>> script = Script(source, path='example.py') - >>> d = script.goto_definitions()[0] - >>> print(d.module_name) # doctest: +ELLIPSIS - json - """ + + @property + def module_name(self): + """ + The module name. + + >>> from jedi import Script + >>> source = 'import json' + >>> script = Script(source, path='example.py') + >>> d = script.goto_definitions()[0] + >>> print(d.module_name) # doctest: +ELLIPSIS + json + """ return self._module.name.string_name - - def in_builtin_module(self): - """Whether this is a builtin module.""" - return isinstance(self._module, compiled.CompiledObject) - - @property - def line(self): - """The line where the definition occurs (starting with 1).""" + + def in_builtin_module(self): + """Whether this is a builtin module.""" + return isinstance(self._module, compiled.CompiledObject) + + @property + def line(self): + """The line where the definition occurs (starting with 1).""" start_pos = self._name.start_pos if start_pos is None: - return None + return None return start_pos[0] - - @property - def column(self): - """The column where the definition occurs (starting with 0).""" + + @property + def column(self): + """The column where the definition occurs (starting with 0).""" start_pos = self._name.start_pos if start_pos is None: - return None + return None return start_pos[1] - + def docstring(self, raw=False, fast=True): - r""" - Return a document string for this completion object. - - Example: - - >>> from jedi import Script - >>> source = '''\ - ... def f(a, b=1): - ... "Document for function f." - ... ''' - >>> script = Script(source, 1, len('def f'), 'example.py') - >>> doc = script.goto_definitions()[0].docstring() - >>> print(doc) - f(a, b=1) - <BLANKLINE> - Document for function f. - - Notice that useful extra information is added to the actual - docstring. For function, it is call signature. If you need - actual docstring, use ``raw=True`` instead. - - >>> print(script.goto_definitions()[0].docstring(raw=True)) - Document for function f. - + r""" + Return a document string for this completion object. + + Example: + + >>> from jedi import Script + >>> source = '''\ + ... def f(a, b=1): + ... "Document for function f." + ... ''' + >>> script = Script(source, 1, len('def f'), 'example.py') + >>> doc = script.goto_definitions()[0].docstring() + >>> print(doc) + f(a, b=1) + <BLANKLINE> + Document for function f. + + Notice that useful extra information is added to the actual + docstring. For function, it is call signature. If you need + actual docstring, use ``raw=True`` instead. + + >>> print(script.goto_definitions()[0].docstring(raw=True)) + Document for function f. + :param fast: Don't follow imports that are only one level deep like ``import foo``, but follow ``from foo import bar``. This makes sense for speed reasons. Completing `import a` is slow if you use the ``foo.docstring(fast=False)`` on every object, because it parses all libraries starting with ``a``. - """ + """ return _Help(self._name).docstring(fast=fast, raw=raw) - - @property - def description(self): - """A textual description of the object.""" + + @property + def description(self): + """A textual description of the object.""" return self._name.string_name - - @property - def full_name(self): - """ - Dot-separated path of this object. - - It is in the form of ``<module>[.<submodule>[...]][.<object>]``. - It is useful when you want to look up Python manual of the - object at hand. - - Example: - - >>> from jedi import Script - >>> source = ''' - ... import os - ... os.path.join''' - >>> script = Script(source, 3, len('os.path.join'), 'example.py') - >>> print(script.goto_definitions()[0].full_name) - os.path.join - + + @property + def full_name(self): + """ + Dot-separated path of this object. + + It is in the form of ``<module>[.<submodule>[...]][.<object>]``. + It is useful when you want to look up Python manual of the + object at hand. + + Example: + + >>> from jedi import Script + >>> source = ''' + ... import os + ... os.path.join''' + >>> script = Script(source, 3, len('os.path.join'), 'example.py') + >>> print(script.goto_definitions()[0].full_name) + os.path.join + Notice that it returns ``'os.path.join'`` instead of (for example) ``'posixpath.join'``. This is not correct, since the modules name would be ``<module 'posixpath' ...>```. However most users find the latter more practical. - """ + """ path = list(self._path()) - # TODO add further checks, the mapping should only occur on stdlib. - if not path: - return None # for keywords the path is empty - + # TODO add further checks, the mapping should only occur on stdlib. + if not path: + return None # for keywords the path is empty + with ignored(KeyError): - path[0] = self._mapping[path[0]] - for key, repl in self._tuple_mapping.items(): - if tuple(path[:len(key)]) == key: - path = [repl] + path[len(key):] - - return '.'.join(path if path[0] else path[1:]) - - def goto_assignments(self): + path[0] = self._mapping[path[0]] + for key, repl in self._tuple_mapping.items(): + if tuple(path[:len(key)]) == key: + path = [repl] + path[len(key):] + + return '.'.join(path if path[0] else path[1:]) + + def goto_assignments(self): if self._name.tree_name is None: return self - + names = self._evaluator.goto(self._name.parent_context, self._name.tree_name) return [Definition(self._evaluator, n) for n in names] - + def _goto_definitions(self): # TODO make this function public. return [Definition(self._evaluator, d.name) for d in self._name.infer()] - @property + @property @memoize_method - def params(self): - """ - Raises an ``AttributeError``if the definition is not callable. - Otherwise returns a list of `Definition` that represents the params. - """ + def params(self): + """ + Raises an ``AttributeError``if the definition is not callable. + Otherwise returns a list of `Definition` that represents the params. + """ def get_param_names(context): param_names = [] if context.api_type == 'function': @@ -341,28 +341,28 @@ class BaseDefinition(object): return param_names followed = list(self._name.infer()) - if not followed or not hasattr(followed[0], 'py__call__'): + if not followed or not hasattr(followed[0], 'py__call__'): raise AttributeError('There are no params defined on this.') context = followed[0] # only check the first one. - + return [Definition(self._evaluator, n) for n in get_param_names(context)] - - def parent(self): + + def parent(self): context = self._name.parent_context if context is None: return None - + if isinstance(context, FunctionExecutionContext): context = context.function_context return Definition(self._evaluator, context.name) - def __repr__(self): - return "<%s %s>" % (type(self).__name__, self.description) - + def __repr__(self): + return "<%s %s>" % (type(self).__name__, self.description) + def get_line_code(self, before=0, after=0): """ Returns the line of code where this object was defined. - + :param before: Add n lines before the current line to the output. :param after: Add n lines after the current line to the output. @@ -379,47 +379,47 @@ class BaseDefinition(object): return ''.join(lines[start_index:index + after + 1]) -class Completion(BaseDefinition): - """ - `Completion` objects are returned from :meth:`api.Script.completions`. They - provide additional information about a completion. - """ +class Completion(BaseDefinition): + """ + `Completion` objects are returned from :meth:`api.Script.completions`. They + provide additional information about a completion. + """ def __init__(self, evaluator, name, stack, like_name_length): - super(Completion, self).__init__(evaluator, name) - - self._like_name_length = like_name_length + super(Completion, self).__init__(evaluator, name) + + self._like_name_length = like_name_length self._stack = stack - - # Completion objects with the same Completion name (which means - # duplicate items in the completion) - self._same_name_completions = [] - - def _complete(self, like_name): - append = '' - if settings.add_bracket_after_function \ - and self.type == 'Function': - append = '(' - + + # Completion objects with the same Completion name (which means + # duplicate items in the completion) + self._same_name_completions = [] + + def _complete(self, like_name): + append = '' + if settings.add_bracket_after_function \ + and self.type == 'Function': + append = '(' + if self._name.api_type == 'param' and self._stack is not None: nonterminals = [stack_node.nonterminal for stack_node in self._stack] if 'trailer' in nonterminals and 'argument' not in nonterminals: # TODO this doesn't work for nested calls. append += '=' - + name = self._name.string_name - if like_name: - name = name[self._like_name_length:] + if like_name: + name = name[self._like_name_length:] return name + append - - @property - def complete(self): - """ - Return the rest of the word, e.g. completing ``isinstance``:: - - isinstan# <-- Cursor is here - - would return the string 'ce'. It also adds additional stuff, depending - on your `settings.py`. + + @property + def complete(self): + """ + Return the rest of the word, e.g. completing ``isinstance``:: + + isinstan# <-- Cursor is here + + would return the string 'ce'. It also adds additional stuff, depending + on your `settings.py`. Assuming the following function definition:: @@ -430,24 +430,24 @@ class Completion(BaseDefinition): would be `am=` - """ - return self._complete(True) - - @property - def name_with_symbols(self): - """ + """ + return self._complete(True) + + @property + def name_with_symbols(self): + """ Similar to :attr:`name`, but like :attr:`name` returns also the symbols, for example assuming the following function definition:: - + def foo(param=0): pass - + completing ``foo(`` would give a ``Completion`` which ``name_with_symbols`` would be "param=". - """ - return self._complete(False) - + """ + return self._complete(False) + def docstring(self, raw=False, fast=True): if self._like_name_length >= 3: # In this case we can just resolve the like name, because we @@ -455,65 +455,65 @@ class Completion(BaseDefinition): fast = False return super(Completion, self).docstring(raw=raw, fast=fast) - @property - def description(self): - """Provide a description of the completion object.""" + @property + def description(self): + """Provide a description of the completion object.""" # TODO improve the class structure. return Definition.description.__get__(self) - - def __repr__(self): + + def __repr__(self): return '<%s: %s>' % (type(self).__name__, self._name.string_name) - + @memoize_method - def follow_definition(self): - """ - Return the original definitions. I strongly recommend not using it for - your completions, because it might slow down |jedi|. If you want to - read only a few objects (<=20), it might be useful, especially to get - the original docstrings. The basic problem of this function is that it - follows all results. This means with 1000 completions (e.g. numpy), - it's just PITA-slow. - """ + def follow_definition(self): + """ + Return the original definitions. I strongly recommend not using it for + your completions, because it might slow down |jedi|. If you want to + read only a few objects (<=20), it might be useful, especially to get + the original docstrings. The basic problem of this function is that it + follows all results. This means with 1000 completions (e.g. numpy), + it's just PITA-slow. + """ defs = self._name.infer() - return [Definition(self._evaluator, d.name) for d in defs] - - + return [Definition(self._evaluator, d.name) for d in defs] + + class Definition(BaseDefinition): - """ - *Definition* objects are returned from :meth:`api.Script.goto_assignments` - or :meth:`api.Script.goto_definitions`. - """ - def __init__(self, evaluator, definition): - super(Definition, self).__init__(evaluator, definition) - - @property - def description(self): - """ - A description of the :class:`.Definition` object, which is heavily used - in testing. e.g. for ``isinstance`` it returns ``def isinstance``. - - Example: - - >>> from jedi import Script - >>> source = ''' - ... def f(): - ... pass - ... - ... class C: - ... pass - ... - ... variable = f if random.choice([0,1]) else C''' - >>> script = Script(source, column=3) # line is maximum by default - >>> defs = script.goto_definitions() - >>> defs = sorted(defs, key=lambda d: d.line) - >>> defs - [<Definition def f>, <Definition class C>] - >>> str(defs[0].description) # strip literals in python2 - 'def f' - >>> str(defs[1].description) - 'class C' - - """ + """ + *Definition* objects are returned from :meth:`api.Script.goto_assignments` + or :meth:`api.Script.goto_definitions`. + """ + def __init__(self, evaluator, definition): + super(Definition, self).__init__(evaluator, definition) + + @property + def description(self): + """ + A description of the :class:`.Definition` object, which is heavily used + in testing. e.g. for ``isinstance`` it returns ``def isinstance``. + + Example: + + >>> from jedi import Script + >>> source = ''' + ... def f(): + ... pass + ... + ... class C: + ... pass + ... + ... variable = f if random.choice([0,1]) else C''' + >>> script = Script(source, column=3) # line is maximum by default + >>> defs = script.goto_definitions() + >>> defs = sorted(defs, key=lambda d: d.line) + >>> defs + [<Definition def f>, <Definition class C>] + >>> str(defs[0].description) # strip literals in python2 + 'def f' + >>> str(defs[1].description) + 'class C' + + """ typ = self.type tree_name = self._name.tree_name if typ in ('function', 'class', 'module', 'instance') or tree_name is None: @@ -527,152 +527,152 @@ class Definition(BaseDefinition): include_comma=False ) return typ + ' ' + code - + definition = tree_name.get_definition() or tree_name # Remove the prefix, because that's not what we want for get_code # here. txt = definition.get_code(include_prefix=False) - # Delete comments: + # Delete comments: txt = re.sub(r'#[^\n]+\n', ' ', txt) - # Delete multi spaces/newlines + # Delete multi spaces/newlines txt = re.sub(r'\s+', ' ', txt).strip() return txt - - @property - def desc_with_module(self): - """ - In addition to the definition, also return the module. - - .. warning:: Don't use this function yet, its behaviour may change. If - you really need it, talk to me. - - .. todo:: Add full path. This function is should return a - `module.class.function` path. - """ + + @property + def desc_with_module(self): + """ + In addition to the definition, also return the module. + + .. warning:: Don't use this function yet, its behaviour may change. If + you really need it, talk to me. + + .. todo:: Add full path. This function is should return a + `module.class.function` path. + """ position = '' if self.in_builtin_module else '@%s' % self.line - return "%s:%s%s" % (self.module_name, self.description, position) - + return "%s:%s%s" % (self.module_name, self.description, position) + @memoize_method - def defined_names(self): - """ - List sub-definitions (e.g., methods in class). - - :rtype: list of Definition - """ + def defined_names(self): + """ + List sub-definitions (e.g., methods in class). + + :rtype: list of Definition + """ defs = self._name.infer() return sorted( unite(defined_names(self._evaluator, d) for d in defs), key=lambda s: s._name.start_pos or (0, 0) ) - - def is_definition(self): - """ - Returns True, if defined as a name in a statement, function or class. - Returns False, if it's a reference to such a definition. - """ + + def is_definition(self): + """ + Returns True, if defined as a name in a statement, function or class. + Returns False, if it's a reference to such a definition. + """ if self._name.tree_name is None: return True else: return self._name.tree_name.is_definition() - - def __eq__(self, other): - return self._name.start_pos == other._name.start_pos \ - and self.module_path == other.module_path \ - and self.name == other.name \ - and self._evaluator == other._evaluator - - def __ne__(self, other): - return not self.__eq__(other) - - def __hash__(self): - return hash((self._name.start_pos, self.module_path, self.name, self._evaluator)) - - -class CallSignature(Definition): - """ - `CallSignature` objects is the return value of `Script.function_definition`. - It knows what functions you are currently in. e.g. `isinstance(` would - return the `isinstance` function. without `(` it would return nothing. - """ + + def __eq__(self, other): + return self._name.start_pos == other._name.start_pos \ + and self.module_path == other.module_path \ + and self.name == other.name \ + and self._evaluator == other._evaluator + + def __ne__(self, other): + return not self.__eq__(other) + + def __hash__(self): + return hash((self._name.start_pos, self.module_path, self.name, self._evaluator)) + + +class CallSignature(Definition): + """ + `CallSignature` objects is the return value of `Script.function_definition`. + It knows what functions you are currently in. e.g. `isinstance(` would + return the `isinstance` function. without `(` it would return nothing. + """ def __init__(self, evaluator, executable_name, bracket_start_pos, index, key_name_str): - super(CallSignature, self).__init__(evaluator, executable_name) - self._index = index + super(CallSignature, self).__init__(evaluator, executable_name) + self._index = index self._key_name_str = key_name_str self._bracket_start_pos = bracket_start_pos - - @property - def index(self): - """ - The Param index of the current call. - Returns None if the index cannot be found in the curent call. - """ + + @property + def index(self): + """ + The Param index of the current call. + Returns None if the index cannot be found in the curent call. + """ if self._key_name_str is not None: - for i, param in enumerate(self.params): + for i, param in enumerate(self.params): if self._key_name_str == param.name: - return i + return i if self.params: param_name = self.params[-1]._name if param_name.tree_name is not None: if param_name.tree_name.get_definition().star_count == 2: return i return None - - if self._index >= len(self.params): - for i, param in enumerate(self.params): + + if self._index >= len(self.params): + for i, param in enumerate(self.params): tree_name = param._name.tree_name if tree_name is not None: # *args case if tree_name.get_definition().star_count == 1: return i - return None - return self._index - - @property - def bracket_start(self): - """ - The indent of the bracket that is responsible for the last function - call. - """ + return None + return self._index + + @property + def bracket_start(self): + """ + The indent of the bracket that is responsible for the last function + call. + """ return self._bracket_start_pos - - @property + + @property def _params_str(self): return ', '.join([p.description[6:] for p in self.params]) - - def __repr__(self): + + def __repr__(self): return '<%s: %s index=%r params=[%s]>' % ( type(self).__name__, self._name.string_name, self._index, self._params_str, ) - - -class _Help(object): - """ - Temporary implementation, will be used as `Script.help() or something in - the future. - """ - def __init__(self, definition): - self._name = definition - + + +class _Help(object): + """ + Temporary implementation, will be used as `Script.help() or something in + the future. + """ + def __init__(self, definition): + self._name = definition + @memoize_method def _get_contexts(self, fast): if isinstance(self._name, ImportName) and fast: return {} - + if self._name.api_type == 'statement': return {} return self._name.infer() def docstring(self, fast=True, raw=True): - """ + """ The docstring ``__doc__`` for any object. - - See :attr:`doc` for example. - """ + + See :attr:`doc` for example. + """ # TODO: Use all of the followed objects as output. Possibly divinding # them by a few dashes. for context in self._get_contexts(fast=fast): diff --git a/contrib/python/jedi/jedi/api/helpers.py b/contrib/python/jedi/jedi/api/helpers.py index 7cf4bc6fc4..1cf24b6a32 100644 --- a/contrib/python/jedi/jedi/api/helpers.py +++ b/contrib/python/jedi/jedi/api/helpers.py @@ -1,28 +1,28 @@ -""" -Helpers for the API -""" -import re +""" +Helpers for the API +""" +import re from collections import namedtuple from textwrap import dedent - + from parso.python.parser import Parser from parso.python import tree - + from jedi._compatibility import u from jedi.evaluate.syntax_tree import eval_atom from jedi.evaluate.helpers import evaluate_call_of_leaf from jedi.evaluate.compiled import get_string_context_set from jedi.cache import call_signature_time_cache - - + + CompletionParts = namedtuple('CompletionParts', ['path', 'has_dot', 'name']) + - -def sorted_definitions(defs): - # Note: `or ''` below is required because `module_path` could be - return sorted(defs, key=lambda x: (x.module_path or '', x.line or 0, x.column or 0)) - - +def sorted_definitions(defs): + # Note: `or ''` below is required because `module_path` could be + return sorted(defs, key=lambda x: (x.module_path or '', x.line or 0, x.column or 0)) + + def get_on_completion_name(module_node, lines, position): leaf = module_node.get_leaf_for_position(position) if leaf is None or leaf.type in ('string', 'error_leaf'): @@ -95,12 +95,12 @@ def _get_code_for_stack(code_lines, module_node, position): def get_stack_at_position(grammar, code_lines, module_node, pos): - """ + """ Returns the possible node names (e.g. import_from, xor_test or yield_stmt). - """ + """ class EndMarkerReached(Exception): pass - + def tokenize_without_endmarker(code): # TODO This is for now not an official parso API that exists purely # for Jedi. @@ -116,7 +116,7 @@ def get_stack_at_position(grammar, code_lines, module_node, pos): raise EndMarkerReached() else: yield token - + # The code might be indedented, just remove it. code = dedent(_get_code_for_stack(code_lines, module_node, pos)) # We use a word to tell Jedi when we have reached the start of the @@ -124,7 +124,7 @@ def get_stack_at_position(grammar, code_lines, module_node, pos): # Use Z as a prefix because it's not part of a number suffix. safeword = 'ZZZ_USER_WANTS_TO_COMPLETE_HERE_WITH_JEDI' code = code + ' ' + safeword - + p = Parser(grammar._pgen_grammar, error_recovery=True) try: p.parse(tokens=tokenize_without_endmarker(code)) @@ -134,14 +134,14 @@ def get_stack_at_position(grammar, code_lines, module_node, pos): "This really shouldn't happen. There's a bug in Jedi:\n%s" % list(tokenize_without_endmarker(code)) ) - - + + def evaluate_goto_definition(evaluator, context, leaf): if leaf.type == 'name': # In case of a name we can just use goto_definition which does all the # magic itself. return evaluator.goto_definitions(context, leaf) - + parent = leaf.parent if parent.type == 'atom': return context.eval_node(leaf.parent) @@ -152,7 +152,7 @@ def evaluate_goto_definition(evaluator, context, leaf): elif leaf.type in ('fstring_string', 'fstring_start', 'fstring_end'): return get_string_context_set(evaluator) return [] - + CallSignatureDetails = namedtuple( 'CallSignatureDetails', diff --git a/contrib/python/jedi/jedi/api/interpreter.py b/contrib/python/jedi/jedi/api/interpreter.py index c9b7bd69bb..3395142964 100644 --- a/contrib/python/jedi/jedi/api/interpreter.py +++ b/contrib/python/jedi/jedi/api/interpreter.py @@ -1,32 +1,32 @@ -""" -TODO Some parts of this module are still not well documented. -""" - +""" +TODO Some parts of this module are still not well documented. +""" + from jedi.evaluate.context import ModuleContext -from jedi.evaluate import compiled +from jedi.evaluate import compiled from jedi.evaluate.compiled import mixed from jedi.evaluate.compiled.access import create_access_path from jedi.evaluate.base_context import Context - - + + def _create(evaluator, obj): return compiled.create_from_access_path( evaluator, create_access_path(evaluator, obj) ) - - + + class NamespaceObject(object): def __init__(self, dct): self.__dict__ = dct - - + + class MixedModuleContext(Context): type = 'mixed_module' - + def __init__(self, evaluator, tree_module, namespaces, path, code_lines): self.evaluator = evaluator self._namespaces = namespaces - + self._namespace_objects = [NamespaceObject(n) for n in namespaces] self._module_context = ModuleContext( evaluator, tree_module, @@ -34,14 +34,14 @@ class MixedModuleContext(Context): code_lines=code_lines ) self.tree_node = tree_module - + def get_node(self): return self.tree_node - + def get_filters(self, *args, **kwargs): for filter in self._module_context.get_filters(*args, **kwargs): yield filter - + for namespace_obj in self._namespace_objects: compiled_object = _create(self.evaluator, namespace_obj) mixed_object = mixed.MixedObject( @@ -52,7 +52,7 @@ class MixedModuleContext(Context): ) for filter in mixed_object.get_filters(*args, **kwargs): yield filter - + @property def code_lines(self): return self._module_context.code_lines diff --git a/contrib/python/jedi/jedi/api/keywords.py b/contrib/python/jedi/jedi/api/keywords.py index 2991a0f81a..81e3d1c21c 100644 --- a/contrib/python/jedi/jedi/api/keywords.py +++ b/contrib/python/jedi/jedi/api/keywords.py @@ -1,11 +1,11 @@ -import pydoc - +import pydoc + from jedi.evaluate.utils import ignored from jedi.evaluate.filters import AbstractNameDefinition - -try: - from pydoc_data import topics as pydoc_topics -except ImportError: + +try: + from pydoc_data import topics as pydoc_topics +except ImportError: # Python 2 try: import pydoc_topics @@ -13,72 +13,72 @@ except ImportError: # This is for Python 3 embeddable version, which dont have # pydoc_data module in its file python3x.zip. pydoc_topics = None - - + + def get_operator(evaluator, string, pos): return Keyword(evaluator, string, pos) - - + + class KeywordName(AbstractNameDefinition): api_type = u'keyword' - + def __init__(self, evaluator, name): self.evaluator = evaluator self.string_name = name self.parent_context = evaluator.builtins_module - + def infer(self): return [Keyword(self.evaluator, self.string_name, (0, 0))] - - + + class Keyword(object): api_type = u'keyword' - + def __init__(self, evaluator, name, pos): self.name = KeywordName(evaluator, name) - self.start_pos = pos + self.start_pos = pos self.parent = evaluator.builtins_module - - @property - def names(self): - """ For a `parsing.Name` like comparision """ - return [self.name] - + + @property + def names(self): + """ For a `parsing.Name` like comparision """ + return [self.name] + def py__doc__(self, include_call_signature=False): return imitate_pydoc(self.name.string_name) - - def __repr__(self): - return '<%s: %s>' % (type(self).__name__, self.name) - - -def imitate_pydoc(string): - """ - It's not possible to get the pydoc's without starting the annoying pager - stuff. - """ + + def __repr__(self): + return '<%s: %s>' % (type(self).__name__, self.name) + + +def imitate_pydoc(string): + """ + It's not possible to get the pydoc's without starting the annoying pager + stuff. + """ if pydoc_topics is None: return '' - # str needed because of possible unicode stuff in py2k (pydoc doesn't work - # with unicode strings) - string = str(string) - h = pydoc.help + # str needed because of possible unicode stuff in py2k (pydoc doesn't work + # with unicode strings) + string = str(string) + h = pydoc.help with ignored(KeyError): - # try to access symbols - string = h.symbols[string] - string, _, related = string.partition(' ') - - get_target = lambda s: h.topics.get(s, h.keywords.get(s)) - while isinstance(string, str): - string = get_target(string) - - try: - # is a tuple now - label, related = string - except TypeError: - return '' - - try: + # try to access symbols + string = h.symbols[string] + string, _, related = string.partition(' ') + + get_target = lambda s: h.topics.get(s, h.keywords.get(s)) + while isinstance(string, str): + string = get_target(string) + + try: + # is a tuple now + label, related = string + except TypeError: + return '' + + try: return pydoc_topics.topics[label].strip() if pydoc_topics else '' - except KeyError: - return '' + except KeyError: + return '' diff --git a/contrib/python/jedi/jedi/api/replstartup.py b/contrib/python/jedi/jedi/api/replstartup.py index 3ac8470877..98734bf703 100644 --- a/contrib/python/jedi/jedi/api/replstartup.py +++ b/contrib/python/jedi/jedi/api/replstartup.py @@ -1,29 +1,29 @@ -""" -To use Jedi completion in Python interpreter, add the following in your shell +""" +To use Jedi completion in Python interpreter, add the following in your shell setup (e.g., ``.bashrc``). This works only on Linux/Mac, because readline is not available on Windows. If you still want Jedi autocompletion in your REPL, just use IPython instead:: - - export PYTHONSTARTUP="$(python -m jedi repl)" - -Then you will be able to use Jedi completer in your Python interpreter:: - - $ python - Python 2.7.2+ (default, Jul 20 2012, 22:15:08) - [GCC 4.6.1] on linux2 - Type "help", "copyright", "credits" or "license" for more information. - >>> import os + + export PYTHONSTARTUP="$(python -m jedi repl)" + +Then you will be able to use Jedi completer in your Python interpreter:: + + $ python + Python 2.7.2+ (default, Jul 20 2012, 22:15:08) + [GCC 4.6.1] on linux2 + Type "help", "copyright", "credits" or "license" for more information. + >>> import os >>> os.path.join('a', 'b').split().in<TAB> # doctest: +SKIP ..dex ..sert - -""" -import jedi.utils -from jedi import __version__ as __jedi_version__ - -print('REPL completion using Jedi %s' % __jedi_version__) -jedi.utils.setup_readline() - -del jedi - -# Note: try not to do many things here, as it will contaminate global -# namespace of the interpreter. + +""" +import jedi.utils +from jedi import __version__ as __jedi_version__ + +print('REPL completion using Jedi %s' % __jedi_version__) +jedi.utils.setup_readline() + +del jedi + +# Note: try not to do many things here, as it will contaminate global +# namespace of the interpreter. diff --git a/contrib/python/jedi/jedi/cache.py b/contrib/python/jedi/jedi/cache.py index 93e2bd7fcf..b20ea1e779 100644 --- a/contrib/python/jedi/jedi/cache.py +++ b/contrib/python/jedi/jedi/cache.py @@ -1,37 +1,37 @@ -""" -This caching is very important for speed and memory optimizations. There's -nothing really spectacular, just some decorators. The following cache types are -available: - -- ``time_cache`` can be used to cache something for just a limited time span, - which can be useful if there's user interaction and the user cannot react - faster than a certain time. - -This module is one of the reasons why |jedi| is not thread-safe. As you can see -there are global variables, which are holding the cache information. Some of -these variables are being cleaned after every API usage. -""" -import time +""" +This caching is very important for speed and memory optimizations. There's +nothing really spectacular, just some decorators. The following cache types are +available: + +- ``time_cache`` can be used to cache something for just a limited time span, + which can be useful if there's user interaction and the user cannot react + faster than a certain time. + +This module is one of the reasons why |jedi| is not thread-safe. As you can see +there are global variables, which are holding the cache information. Some of +these variables are being cleaned after every API usage. +""" +import time from functools import wraps - -from jedi import settings + +from jedi import settings from parso.cache import parser_cache - -_time_caches = {} - - + +_time_caches = {} + + def underscore_memoization(func): """ Decorator for methods:: - + class A(object): def x(self): if self._x: self._x = 10 return self._x - + Becomes:: - + class A(object): @underscore_memoization def x(self): @@ -52,65 +52,65 @@ def underscore_memoization(func): return wrapper -def clear_time_caches(delete_all=False): - """ Jedi caches many things, that should be completed after each completion - finishes. - - :param delete_all: Deletes also the cache that is normally not deleted, - like parser cache, which is important for faster parsing. - """ - global _time_caches - - if delete_all: - for cache in _time_caches.values(): - cache.clear() - parser_cache.clear() - else: - # normally just kill the expired entries, not all - for tc in _time_caches.values(): - # check time_cache for expired entries - for key, (t, value) in list(tc.items()): - if t < time.time(): - # delete expired entries - del tc[key] - - +def clear_time_caches(delete_all=False): + """ Jedi caches many things, that should be completed after each completion + finishes. + + :param delete_all: Deletes also the cache that is normally not deleted, + like parser cache, which is important for faster parsing. + """ + global _time_caches + + if delete_all: + for cache in _time_caches.values(): + cache.clear() + parser_cache.clear() + else: + # normally just kill the expired entries, not all + for tc in _time_caches.values(): + # check time_cache for expired entries + for key, (t, value) in list(tc.items()): + if t < time.time(): + # delete expired entries + del tc[key] + + def call_signature_time_cache(time_add_setting): - """ - This decorator works as follows: Call it with a setting and after that - use the function with a callable that returns the key. - But: This function is only called if the key is not available. After a - certain amount of time (`time_add_setting`) the cache is invalid. + """ + This decorator works as follows: Call it with a setting and after that + use the function with a callable that returns the key. + But: This function is only called if the key is not available. After a + certain amount of time (`time_add_setting`) the cache is invalid. If the given key is None, the function will not be cached. - """ - def _temp(key_func): - dct = {} - _time_caches[time_add_setting] = dct - - def wrapper(*args, **kwargs): - generator = key_func(*args, **kwargs) - key = next(generator) - try: - expiry, value = dct[key] - if expiry > time.time(): - return value - except KeyError: - pass - - value = next(generator) - time_add = getattr(settings, time_add_setting) - if key is not None: - dct[key] = time.time() + time_add, value - return value - return wrapper - return _temp - - + """ + def _temp(key_func): + dct = {} + _time_caches[time_add_setting] = dct + + def wrapper(*args, **kwargs): + generator = key_func(*args, **kwargs) + key = next(generator) + try: + expiry, value = dct[key] + if expiry > time.time(): + return value + except KeyError: + pass + + value = next(generator) + time_add = getattr(settings, time_add_setting) + if key is not None: + dct[key] = time.time() + time_add, value + return value + return wrapper + return _temp + + def time_cache(seconds): def decorator(func): cache = {} - + @wraps(func) def wrapper(*args, **kwargs): key = (args, frozenset(kwargs.items())) @@ -123,24 +123,24 @@ def time_cache(seconds): result = func(*args, **kwargs) cache[key] = time.time(), result return result - + wrapper.clear_cache = lambda: cache.clear() return wrapper - + return decorator - - -def memoize_method(method): - """A normal memoize function.""" + + +def memoize_method(method): + """A normal memoize function.""" @wraps(method) - def wrapper(self, *args, **kwargs): + def wrapper(self, *args, **kwargs): cache_dict = self.__dict__.setdefault('_memoize_method_dct', {}) dct = cache_dict.setdefault(method, {}) - key = (args, frozenset(kwargs.items())) - try: - return dct[key] - except KeyError: - result = method(self, *args, **kwargs) - dct[key] = result - return result - return wrapper + key = (args, frozenset(kwargs.items())) + try: + return dct[key] + except KeyError: + result = method(self, *args, **kwargs) + dct[key] = result + return result + return wrapper diff --git a/contrib/python/jedi/jedi/debug.py b/contrib/python/jedi/jedi/debug.py index 49fda4bcac..69205d921d 100644 --- a/contrib/python/jedi/jedi/debug.py +++ b/contrib/python/jedi/jedi/debug.py @@ -1,7 +1,7 @@ -from jedi._compatibility import encoding, is_py3, u -import os -import time - +from jedi._compatibility import encoding, is_py3, u +import os +import time + _inited = False @@ -14,14 +14,14 @@ def _lazy_colorama_init(): """ -try: - if os.name == 'nt': +try: + if os.name == 'nt': # Does not work on Windows, as pyreadline and colorama interfere - raise ImportError - else: - # Use colorama for nicer console output. - from colorama import Fore, init - from colorama import initialise + raise ImportError + else: + # Use colorama for nicer console output. + from colorama import Fore, init + from colorama import initialise def _lazy_colorama_init(): # noqa: F811 """ @@ -44,76 +44,76 @@ try: pass _inited = True -except ImportError: - class Fore(object): - RED = '' - GREEN = '' - YELLOW = '' +except ImportError: + class Fore(object): + RED = '' + GREEN = '' + YELLOW = '' MAGENTA = '' - RESET = '' - -NOTICE = object() -WARNING = object() -SPEED = object() - -enable_speed = False -enable_warning = False -enable_notice = False - -# callback, interface: level, str -debug_function = None + RESET = '' + +NOTICE = object() +WARNING = object() +SPEED = object() + +enable_speed = False +enable_warning = False +enable_notice = False + +# callback, interface: level, str +debug_function = None _debug_indent = 0 -_start_time = time.time() - - -def reset_time(): - global _start_time, _debug_indent - _start_time = time.time() +_start_time = time.time() + + +def reset_time(): + global _start_time, _debug_indent + _start_time = time.time() _debug_indent = 0 - - -def increase_indent(func): - """Decorator for makin """ - def wrapper(*args, **kwargs): - global _debug_indent - _debug_indent += 1 - try: + + +def increase_indent(func): + """Decorator for makin """ + def wrapper(*args, **kwargs): + global _debug_indent + _debug_indent += 1 + try: return func(*args, **kwargs) - finally: - _debug_indent -= 1 - return wrapper - - + finally: + _debug_indent -= 1 + return wrapper + + def dbg(message, *args, **kwargs): - """ Looks at the stack, to see if a debug message should be printed. """ + """ Looks at the stack, to see if a debug message should be printed. """ # Python 2 compatibility, because it doesn't understand default args color = kwargs.pop('color', 'GREEN') assert color - if debug_function and enable_notice: + if debug_function and enable_notice: i = ' ' * _debug_indent _lazy_colorama_init() debug_function(color, i + 'dbg: ' + message % tuple(u(repr(a)) for a in args)) - - + + def warning(message, *args, **kwargs): format = kwargs.pop('format', True) assert not kwargs - if debug_function and enable_warning: - i = ' ' * _debug_indent + if debug_function and enable_warning: + i = ' ' * _debug_indent if format: message = message % tuple(u(repr(a)) for a in args) debug_function('RED', i + 'warning: ' + message) - - -def speed(name): - if debug_function and enable_speed: - now = time.time() - i = ' ' * _debug_indent + + +def speed(name): + if debug_function and enable_speed: + now = time.time() + i = ' ' * _debug_indent debug_function('YELLOW', i + 'speed: ' + '%s %s' % (name, now - _start_time)) - - + + def print_to_stdout(color, str_out): """ The default debug function that prints to standard out. @@ -122,9 +122,9 @@ def print_to_stdout(color, str_out): """ col = getattr(Fore, color) _lazy_colorama_init() - if not is_py3: - str_out = str_out.encode(encoding, 'replace') - print(col + str_out + Fore.RESET) - - -# debug_function = print_to_stdout + if not is_py3: + str_out = str_out.encode(encoding, 'replace') + print(col + str_out + Fore.RESET) + + +# debug_function = print_to_stdout diff --git a/contrib/python/jedi/jedi/evaluate/__init__.py b/contrib/python/jedi/jedi/evaluate/__init__.py index 48339439ee..00104c870e 100644 --- a/contrib/python/jedi/jedi/evaluate/__init__.py +++ b/contrib/python/jedi/jedi/evaluate/__init__.py @@ -1,80 +1,80 @@ -""" -Evaluation of Python code in |jedi| is based on three assumptions: - -* The code uses as least side effects as possible. Jedi understands certain - list/tuple/set modifications, but there's no guarantee that Jedi detects - everything (list.append in different modules for example). -* No magic is being used: - - - metaclasses - - ``setattr()`` / ``__import__()`` - - writing to ``globals()``, ``locals()``, ``object.__dict__`` -* The programmer is not a total dick, e.g. like `this - <https://github.com/davidhalter/jedi/issues/24>`_ :-) - +""" +Evaluation of Python code in |jedi| is based on three assumptions: + +* The code uses as least side effects as possible. Jedi understands certain + list/tuple/set modifications, but there's no guarantee that Jedi detects + everything (list.append in different modules for example). +* No magic is being used: + + - metaclasses + - ``setattr()`` / ``__import__()`` + - writing to ``globals()``, ``locals()``, ``object.__dict__`` +* The programmer is not a total dick, e.g. like `this + <https://github.com/davidhalter/jedi/issues/24>`_ :-) + The actual algorithm is based on a principle called lazy evaluation. That said, the typical entry point for static analysis is calling ``eval_expr_stmt``. There's separate logic for autocompletion in the API, the evaluator is all about evaluating an expression. - + TODO this paragraph is not what jedi does anymore, it's similar, but not the same. Now you need to understand what follows after ``eval_expr_stmt``. Let's -make an example:: - - import datetime - datetime.date.toda# <-- cursor here - -First of all, this module doesn't care about completion. It really just cares +make an example:: + + import datetime + datetime.date.toda# <-- cursor here + +First of all, this module doesn't care about completion. It really just cares about ``datetime.date``. At the end of the procedure ``eval_expr_stmt`` will -return the ``date`` class. - -To *visualize* this (simplified): - +return the ``date`` class. + +To *visualize* this (simplified): + - ``Evaluator.eval_expr_stmt`` doesn't do much, because there's no assignment. - ``Context.eval_node`` cares for resolving the dotted path -- ``Evaluator.find_types`` searches for global definitions of datetime, which - it finds in the definition of an import, by scanning the syntax tree. -- Using the import logic, the datetime module is found. +- ``Evaluator.find_types`` searches for global definitions of datetime, which + it finds in the definition of an import, by scanning the syntax tree. +- Using the import logic, the datetime module is found. - Now ``find_types`` is called again by ``eval_node`` to find ``date`` - inside the datetime module. - -Now what would happen if we wanted ``datetime.date.foo.bar``? Two more -calls to ``find_types``. However the second call would be ignored, because the -first one would return nothing (there's no foo attribute in ``date``). - -What if the import would contain another ``ExprStmt`` like this:: - - from foo import bar - Date = bar.baz - + inside the datetime module. + +Now what would happen if we wanted ``datetime.date.foo.bar``? Two more +calls to ``find_types``. However the second call would be ignored, because the +first one would return nothing (there's no foo attribute in ``date``). + +What if the import would contain another ``ExprStmt`` like this:: + + from foo import bar + Date = bar.baz + Well... You get it. Just another ``eval_expr_stmt`` recursion. It's really -easy. Python can obviously get way more complicated then this. To understand -tuple assignments, list comprehensions and everything else, a lot more code had -to be written. - -Jedi has been tested very well, so you can just start modifying code. It's best -to write your own test first for your "new" feature. Don't be scared of -breaking stuff. As long as the tests pass, you're most likely to be fine. - -I need to mention now that lazy evaluation is really good because it -only *evaluates* what needs to be *evaluated*. All the statements and modules -that are not used are just being ignored. -""" - +easy. Python can obviously get way more complicated then this. To understand +tuple assignments, list comprehensions and everything else, a lot more code had +to be written. + +Jedi has been tested very well, so you can just start modifying code. It's best +to write your own test first for your "new" feature. Don't be scared of +breaking stuff. As long as the tests pass, you're most likely to be fine. + +I need to mention now that lazy evaluation is really good because it +only *evaluates* what needs to be *evaluated*. All the statements and modules +that are not used are just being ignored. +""" + from parso.python import tree import parso from parso import python_bytes_to_unicode - -from jedi import debug + +from jedi import debug from jedi import parser_utils from jedi.evaluate.utils import unite -from jedi.evaluate import imports -from jedi.evaluate import recursion +from jedi.evaluate import imports +from jedi.evaluate import recursion from jedi.evaluate.cache import evaluator_function_cache -from jedi.evaluate import compiled -from jedi.evaluate import helpers +from jedi.evaluate import compiled +from jedi.evaluate import helpers from jedi.evaluate.filters import TreeNameDefinition, ParamName from jedi.evaluate.base_context import ContextualizedName, ContextualizedNode, \ ContextSet, NO_CONTEXTS, iterate_contexts @@ -83,9 +83,9 @@ from jedi.evaluate.context import ClassContext, FunctionContext, \ from jedi.evaluate.context.iterable import CompForContext from jedi.evaluate.syntax_tree import eval_trailer, eval_expr_stmt, \ eval_node, check_tuple_assignments - - -class Evaluator(object): + + +class Evaluator(object): def __init__(self, project, environment=None, script_path=None): if environment is None: environment = project.get_environment() @@ -95,12 +95,12 @@ class Evaluator(object): self.grammar = environment.get_grammar() self.latest_grammar = parso.load_grammar(version='3.6') - self.memoize_cache = {} # for memoize decorators + self.memoize_cache = {} # for memoize decorators self.module_cache = imports.ModuleCache() # does the job of `sys.modules`. self.compiled_cache = {} # see `evaluate.compiled.create()` self.inferred_element_counts = {} self.mixed_cache = {} # see `evaluate.compiled.mixed._create()` - self.analysis = [] + self.analysis = [] self.dynamic_params_depth = 0 self.is_analysis = False self.project = project @@ -108,30 +108,30 @@ class Evaluator(object): # This setting is only temporary to limit the work we have to do with # tensorflow and others. self.infer_enabled = True - + self.reset_recursion_limitations() self.allow_different_encoding = True - + @property @evaluator_function_cache() def builtins_module(self): return compiled.get_special_object(self, u'BUILTINS') - + def reset_recursion_limitations(self): self.recursion_detector = recursion.RecursionDetector() self.execution_recursion_detector = recursion.ExecutionRecursionDetector(self) - + def get_sys_path(self): """Convenience function""" return self.project._get_sys_path(self, environment=self.environment) - + def eval_element(self, context, element): if not self.infer_enabled: return NO_CONTEXTS - + if isinstance(context, CompForContext): return eval_node(context, element) - + if_stmt = element while if_stmt is not None: if_stmt = if_stmt.parent @@ -172,7 +172,7 @@ class Evaluator(object): # long. name_dicts = [{}] break - + original_name_dicts = list(name_dicts) name_dicts = [] for definition in definitions: @@ -180,7 +180,7 @@ class Evaluator(object): for i, name_dict in enumerate(new_name_dicts): new_name_dicts[i] = name_dict.copy() new_name_dicts[i][if_name.value] = ContextSet(definition) - + name_dicts += new_name_dicts else: for name_dict in name_dicts: @@ -191,18 +191,18 @@ class Evaluator(object): with helpers.predefine_names(context, if_stmt, name_dict): result |= eval_node(context, element) return result - else: + else: return self._eval_element_if_evaluated(context, element) - else: + else: if predefined_if_name_dict: return eval_node(context, element) - else: + else: return self._eval_element_if_evaluated(context, element) - + def _eval_element_if_evaluated(self, context, element): - """ + """ TODO This function is temporary: Merge with eval_element. - """ + """ parent = element while parent is not None: parent = parent.parent @@ -210,11 +210,11 @@ class Evaluator(object): if predefined_if_name_dict is not None: return eval_node(context, element) return self._eval_element_cached(context, element) - + @evaluator_function_cache(default=NO_CONTEXTS) def _eval_element_cached(self, context, element): return eval_node(context, element) - + def goto_definitions(self, context, name): def_ = name.get_definition(import_name_always=True) if def_ is not None: @@ -223,7 +223,7 @@ class Evaluator(object): return [ClassContext(self, context, name.parent)] elif type_ == 'funcdef': return [FunctionContext.from_context(context, name.parent)] - + if type_ == 'expr_stmt': is_simple_name = name.parent.type not in ('power', 'trailer') if is_simple_name: @@ -236,9 +236,9 @@ class Evaluator(object): return check_tuple_assignments(self, c_node, for_types) if type_ in ('import_from', 'import_name'): return imports.infer_import(context, name) - + return helpers.evaluate_call_of_leaf(context, name) - + def goto(self, context, name): definition = name.get_definition(import_name_always=True) if definition is not None: @@ -256,59 +256,59 @@ class Evaluator(object): elif type_ in ('import_from', 'import_name'): module_names = imports.infer_import(context, name, is_goto=True) return module_names - - par = name.parent + + par = name.parent node_type = par.type if node_type == 'argument' and par.children[1] == '=' and par.children[0] == name: - # Named param goto. - trailer = par.parent - if trailer.type == 'arglist': - trailer = trailer.parent - if trailer.type != 'classdef': - if trailer.type == 'decorator': + # Named param goto. + trailer = par.parent + if trailer.type == 'arglist': + trailer = trailer.parent + if trailer.type != 'classdef': + if trailer.type == 'decorator': context_set = context.eval_node(trailer.children[1]) - else: - i = trailer.parent.children.index(trailer) - to_evaluate = trailer.parent.children[:i] + else: + i = trailer.parent.children.index(trailer) + to_evaluate = trailer.parent.children[:i] if to_evaluate[0] == 'await': to_evaluate.pop(0) context_set = context.eval_node(to_evaluate[0]) - for trailer in to_evaluate[1:]: + for trailer in to_evaluate[1:]: context_set = eval_trailer(context, context_set, trailer) - param_names = [] + param_names = [] for context in context_set: - try: + try: get_param_names = context.get_param_names - except AttributeError: - pass - else: + except AttributeError: + pass + else: for param_name in get_param_names(): if param_name.string_name == name.value: param_names.append(param_name) - return param_names + return param_names elif node_type == 'dotted_name': # Is a decorator. - index = par.children.index(name) - if index > 0: - new_dotted = helpers.deep_ast_copy(par) - new_dotted.children[index - 1:] = [] + index = par.children.index(name) + if index > 0: + new_dotted = helpers.deep_ast_copy(par) + new_dotted.children[index - 1:] = [] values = context.eval_node(new_dotted) return unite( value.py__getattribute__(name, name_context=context, is_goto=True) for value in values ) - + if node_type == 'trailer' and par.children[0] == '.': values = helpers.evaluate_call_of_leaf(context, name, cut_own_trailer=True) return unite( value.py__getattribute__(name, name_context=context, is_goto=True) for value in values ) - else: + else: stmt = tree.search_ancestor( name, 'expr_stmt', 'lambdef' ) or name if stmt.type == 'lambdef': - stmt = name + stmt = name return context.py__getattribute__( name, position=stmt.start_pos, diff --git a/contrib/python/jedi/jedi/evaluate/analysis.py b/contrib/python/jedi/jedi/evaluate/analysis.py index ded4e9f208..f942a268f2 100644 --- a/contrib/python/jedi/jedi/evaluate/analysis.py +++ b/contrib/python/jedi/jedi/evaluate/analysis.py @@ -1,18 +1,18 @@ -""" -Module for statical analysis. -""" +""" +Module for statical analysis. +""" from parso.python import tree from jedi._compatibility import force_unicode -from jedi import debug -from jedi.evaluate.compiled import CompiledObject +from jedi import debug +from jedi.evaluate.compiled import CompiledObject from jedi.evaluate.helpers import is_string - - -CODES = { - 'attribute-error': (1, AttributeError, 'Potential AttributeError.'), - 'name-error': (2, NameError, 'Potential NameError.'), - 'import-error': (3, ImportError, 'Potential ImportError.'), + + +CODES = { + 'attribute-error': (1, AttributeError, 'Potential AttributeError.'), + 'name-error': (2, NameError, 'Potential NameError.'), + 'import-error': (3, ImportError, 'Potential ImportError.'), 'type-error-too-many-arguments': (4, TypeError, None), 'type-error-too-few-arguments': (5, TypeError, None), 'type-error-keyword-argument': (6, TypeError, None), @@ -25,96 +25,96 @@ CODES = { 'type-error-not-subscriptable': (13, TypeError, None), 'value-error-too-many-values': (14, ValueError, None), 'value-error-too-few-values': (15, ValueError, None), -} - - -class Error(object): - def __init__(self, name, module_path, start_pos, message=None): - self.path = module_path - self._start_pos = start_pos - self.name = name - if message is None: - message = CODES[self.name][2] - self.message = message - - @property - def line(self): - return self._start_pos[0] - - @property - def column(self): - return self._start_pos[1] - - @property - def code(self): - # The class name start - first = self.__class__.__name__[0] - return first + str(CODES[self.name][0]) - - def __unicode__(self): - return '%s:%s:%s: %s %s' % (self.path, self.line, self.column, - self.code, self.message) - - def __str__(self): - return self.__unicode__() - - def __eq__(self, other): +} + + +class Error(object): + def __init__(self, name, module_path, start_pos, message=None): + self.path = module_path + self._start_pos = start_pos + self.name = name + if message is None: + message = CODES[self.name][2] + self.message = message + + @property + def line(self): + return self._start_pos[0] + + @property + def column(self): + return self._start_pos[1] + + @property + def code(self): + # The class name start + first = self.__class__.__name__[0] + return first + str(CODES[self.name][0]) + + def __unicode__(self): + return '%s:%s:%s: %s %s' % (self.path, self.line, self.column, + self.code, self.message) + + def __str__(self): + return self.__unicode__() + + def __eq__(self, other): return (self.path == other.path and self.name == other.name and self._start_pos == other._start_pos) - - def __ne__(self, other): - return not self.__eq__(other) - - def __hash__(self): - return hash((self.path, self._start_pos, self.name)) - - def __repr__(self): - return '<%s %s: %s@%s,%s>' % (self.__class__.__name__, - self.name, self.path, - self._start_pos[0], self._start_pos[1]) - - -class Warning(Error): - pass - - + + def __ne__(self, other): + return not self.__eq__(other) + + def __hash__(self): + return hash((self.path, self._start_pos, self.name)) + + def __repr__(self): + return '<%s %s: %s@%s,%s>' % (self.__class__.__name__, + self.name, self.path, + self._start_pos[0], self._start_pos[1]) + + +class Warning(Error): + pass + + def add(node_context, error_name, node, message=None, typ=Error, payload=None): exception = CODES[error_name][1] if _check_for_exception_catch(node_context, node, exception, payload): - return - + return + # TODO this path is probably not right module_context = node_context.get_root_context() module_path = module_context.py__file__() instance = typ(error_name, module_path, node.start_pos, message) debug.warning(str(instance), format=False) node_context.evaluator.analysis.append(instance) - - -def _check_for_setattr(instance): - """ - Check if there's any setattr method inside an instance. If so, return True. - """ + + +def _check_for_setattr(instance): + """ + Check if there's any setattr method inside an instance. If so, return True. + """ from jedi.evaluate.context import ModuleContext module = instance.get_root_context() if not isinstance(module, ModuleContext): return False node = module.tree_node - try: + try: stmts = node.get_used_names()['setattr'] - except KeyError: - return False - + except KeyError: + return False + return any(node.start_pos < stmt.start_pos < node.end_pos - for stmt in stmts) - - + for stmt in stmts) + + def add_attribute_error(name_context, lookup_context, name): message = ('AttributeError: %s has no attribute %s.' % (lookup_context, name)) from jedi.evaluate.context.instance import AbstractInstanceContext, CompiledInstanceName - # Check for __getattr__/__getattribute__ existance and issue a warning - # instead of an error, if that happens. + # Check for __getattr__/__getattribute__ existance and issue a warning + # instead of an error, if that happens. typ = Error if isinstance(lookup_context, AbstractInstanceContext): slot_names = lookup_context.get_function_slot_names(u'__getattr__') + \ @@ -125,94 +125,94 @@ def add_attribute_error(name_context, lookup_context, name): n.parent_context.obj == object: typ = Warning break - + if _check_for_setattr(lookup_context): typ = Warning - + payload = lookup_context, name add(name_context, 'attribute-error', name, message, typ, payload) - + def _check_for_exception_catch(node_context, jedi_name, exception, payload=None): - """ - Checks if a jedi object (e.g. `Statement`) sits inside a try/catch and - doesn't count as an error (if equal to `exception`). - Also checks `hasattr` for AttributeErrors and uses the `payload` to compare - it. - Returns True if the exception was catched. - """ - def check_match(cls, exception): - try: + """ + Checks if a jedi object (e.g. `Statement`) sits inside a try/catch and + doesn't count as an error (if equal to `exception`). + Also checks `hasattr` for AttributeErrors and uses the `payload` to compare + it. + Returns True if the exception was catched. + """ + def check_match(cls, exception): + try: return isinstance(cls, CompiledObject) and cls.is_super_class(exception) - except TypeError: - return False - - def check_try_for_except(obj, exception): - # Only nodes in try - iterator = iter(obj.children) - for branch_type in iterator: - colon = next(iterator) - suite = next(iterator) - if branch_type == 'try' \ + except TypeError: + return False + + def check_try_for_except(obj, exception): + # Only nodes in try + iterator = iter(obj.children) + for branch_type in iterator: + colon = next(iterator) + suite = next(iterator) + if branch_type == 'try' \ and not (branch_type.start_pos < jedi_name.start_pos <= suite.end_pos): - return False - + return False + for node in obj.get_except_clause_tests(): - if node is None: - return True # An exception block that catches everything. - else: + if node is None: + return True # An exception block that catches everything. + else: except_classes = node_context.eval_node(node) - for cls in except_classes: + for cls in except_classes: from jedi.evaluate.context import iterable if isinstance(cls, iterable.Sequence) and \ cls.array_type == 'tuple': - # multiple exceptions + # multiple exceptions for lazy_context in cls.py__iter__(): for typ in lazy_context.infer(): if check_match(typ, exception): return True - else: - if check_match(cls, exception): - return True - - def check_hasattr(node, suite): - try: + else: + if check_match(cls, exception): + return True + + def check_hasattr(node, suite): + try: assert suite.start_pos <= jedi_name.start_pos < suite.end_pos assert node.type in ('power', 'atom_expr') - base = node.children[0] - assert base.type == 'name' and base.value == 'hasattr' - trailer = node.children[1] - assert trailer.type == 'trailer' - arglist = trailer.children[1] - assert arglist.type == 'arglist' + base = node.children[0] + assert base.type == 'name' and base.value == 'hasattr' + trailer = node.children[1] + assert trailer.type == 'trailer' + arglist = trailer.children[1] + assert arglist.type == 'arglist' from jedi.evaluate.arguments import TreeArguments args = list(TreeArguments(node_context.evaluator, node_context, arglist).unpack()) - # Arguments should be very simple - assert len(args) == 2 - - # Check name + # Arguments should be very simple + assert len(args) == 2 + + # Check name key, lazy_context = args[1] names = list(lazy_context.infer()) assert len(names) == 1 and is_string(names[0]) assert force_unicode(names[0].get_safe_value()) == payload[1].value - - # Check objects + + # Check objects key, lazy_context = args[0] objects = lazy_context.infer() - return payload[0] in objects - except AssertionError: - return False - + return payload[0] in objects + except AssertionError: + return False + obj = jedi_name while obj is not None and not isinstance(obj, (tree.Function, tree.Class)): if isinstance(obj, tree.Flow): - # try/except catch check + # try/except catch check if obj.type == 'try_stmt' and check_try_for_except(obj, exception): - return True - # hasattr check + return True + # hasattr check if exception == AttributeError and obj.type in ('if_stmt', 'while_stmt'): - if check_hasattr(obj.children[1], obj.children[3]): - return True - obj = obj.parent - - return False + if check_hasattr(obj.children[1], obj.children[3]): + return True + obj = obj.parent + + return False diff --git a/contrib/python/jedi/jedi/evaluate/cache.py b/contrib/python/jedi/jedi/evaluate/cache.py index c619e698a3..d61a053b42 100644 --- a/contrib/python/jedi/jedi/evaluate/cache.py +++ b/contrib/python/jedi/jedi/evaluate/cache.py @@ -1,50 +1,50 @@ -""" +""" - the popular ``_memoize_default`` works like a typical memoize and returns the - default otherwise. + default otherwise. - ``CachedMetaClass`` uses ``_memoize_default`` to do the same with classes. -""" - +""" + _NO_DEFAULT = object() - - + + def _memoize_default(default=_NO_DEFAULT, evaluator_is_first_arg=False, second_arg_is_evaluator=False): - """ This is a typical memoization decorator, BUT there is one difference: - To prevent recursion it sets defaults. - - Preventing recursion is in this case the much bigger use than speed. I - don't think, that there is a big speed difference, but there are many cases - where recursion could happen (think about a = b; b = a). - """ - def func(function): - def wrapper(obj, *args, **kwargs): + """ This is a typical memoization decorator, BUT there is one difference: + To prevent recursion it sets defaults. + + Preventing recursion is in this case the much bigger use than speed. I + don't think, that there is a big speed difference, but there are many cases + where recursion could happen (think about a = b; b = a). + """ + def func(function): + def wrapper(obj, *args, **kwargs): # TODO These checks are kind of ugly and slow. - if evaluator_is_first_arg: - cache = obj.memoize_cache + if evaluator_is_first_arg: + cache = obj.memoize_cache elif second_arg_is_evaluator: cache = args[0].memoize_cache # needed for meta classes - else: + else: cache = obj.evaluator.memoize_cache - - try: - memo = cache[function] - except KeyError: - memo = {} - cache[function] = memo - - key = (obj, args, frozenset(kwargs.items())) - if key in memo: - return memo[key] - else: + + try: + memo = cache[function] + except KeyError: + memo = {} + cache[function] = memo + + key = (obj, args, frozenset(kwargs.items())) + if key in memo: + return memo[key] + else: if default is not _NO_DEFAULT: - memo[key] = default - rv = function(obj, *args, **kwargs) - memo[key] = rv - return rv - return wrapper - - return func - - + memo[key] = default + rv = function(obj, *args, **kwargs) + memo[key] = rv + return rv + return wrapper + + return func + + def evaluator_function_cache(default=_NO_DEFAULT): def decorator(func): return _memoize_default(default=default, evaluator_is_first_arg=True)(func) @@ -66,12 +66,12 @@ def evaluator_as_method_param_cache(): return decorator -class CachedMetaClass(type): - """ - This is basically almost the same than the decorator above, it just caches - class initializations. Either you do it this way or with decorators, but - with decorators you lose class access (isinstance, etc). - """ +class CachedMetaClass(type): + """ + This is basically almost the same than the decorator above, it just caches + class initializations. Either you do it this way or with decorators, but + with decorators you lose class access (isinstance, etc). + """ @evaluator_as_method_param_cache() - def __call__(self, *args, **kwargs): - return super(CachedMetaClass, self).__call__(*args, **kwargs) + def __call__(self, *args, **kwargs): + return super(CachedMetaClass, self).__call__(*args, **kwargs) diff --git a/contrib/python/jedi/jedi/evaluate/compiled/__init__.py b/contrib/python/jedi/jedi/evaluate/compiled/__init__.py index 757aec5234..386eef81ea 100644 --- a/contrib/python/jedi/jedi/evaluate/compiled/__init__.py +++ b/contrib/python/jedi/jedi/evaluate/compiled/__init__.py @@ -2,41 +2,41 @@ from jedi._compatibility import unicode from jedi.evaluate.compiled.context import CompiledObject, CompiledName, \ CompiledObjectFilter, CompiledContextName, create_from_access_path, \ create_from_name - - + + def builtin_from_name(evaluator, string): builtins = evaluator.builtins_module return create_from_name(evaluator, builtins, string) - - + + def create_simple_object(evaluator, obj): - """ + """ Only allows creations of objects that are easily picklable across Python versions. - """ + """ assert isinstance(obj, (int, float, str, bytes, unicode, slice, complex)) return create_from_access_path( evaluator, evaluator.compiled_subprocess.create_simple_object(obj) ) - - + + def get_special_object(evaluator, identifier): return create_from_access_path( evaluator, evaluator.compiled_subprocess.get_special_object(identifier) ) - - + + def get_string_context_set(evaluator): return builtin_from_name(evaluator, u'str').execute_evaluated() - - + + def load_module(evaluator, dotted_name, **kwargs): # Temporary, some tensorflow builtins cannot be loaded, so it's tried again # and again and it's really slow. if dotted_name.startswith('tensorflow.'): - return None + return None access_path = evaluator.compiled_subprocess.load_module(dotted_name=dotted_name, **kwargs) if access_path is None: return None diff --git a/contrib/python/jedi/jedi/evaluate/compiled/fake.py b/contrib/python/jedi/jedi/evaluate/compiled/fake.py index a38ff34938..98f126d144 100644 --- a/contrib/python/jedi/jedi/evaluate/compiled/fake.py +++ b/contrib/python/jedi/jedi/evaluate/compiled/fake.py @@ -1,19 +1,19 @@ -""" -Loads functions that are mixed in to the standard library. E.g. builtins are -written in C (binaries), but my autocompletion only understands Python code. By -mixing in Python code, the autocompletion should work much better for builtins. -""" +""" +Loads functions that are mixed in to the standard library. E.g. builtins are +written in C (binaries), but my autocompletion only understands Python code. By +mixing in Python code, the autocompletion should work much better for builtins. +""" import sys -import os +import os from itertools import chain - + import __res - + from jedi._compatibility import unicode - + fake_modules = {} - - + + def _get_path_dict(): path = os.path.dirname(__file__) base_path = os.path.join(path, 'fake') @@ -24,37 +24,37 @@ def _get_path_dict(): if file_name.startswith(base_path) and file_name.endswith('.pym'): dct[file_name[len(base_path) + 1:-4]] = file_name return dct - - + + _path_dict = _get_path_dict() - - + + class FakeDoesNotExist(Exception): pass - + def _load_faked_module(evaluator, module_name): - try: + try: return fake_modules[module_name] except KeyError: - pass - + pass + check_module_name = module_name if module_name == '__builtin__' and evaluator.environment.version_info.major == 2: check_module_name = 'builtins' - try: + try: path = _path_dict[check_module_name] except KeyError: fake_modules[module_name] = None return - + if sys.version_info[0] == 3: path = bytes(path, 'ascii') source = __res.resfs_read(path) - + fake_modules[module_name] = m = evaluator.latest_grammar.parse(unicode(source)) - + if check_module_name != module_name: # There are two implementations of `open` for either python 2/3. # -> Rename the python2 version (`look at fake/builtins.pym`). @@ -63,14 +63,14 @@ def _load_faked_module(evaluator, module_name): open_func = _search_scope(m, 'open_python2') open_func.children[1].value = 'open' return m - - + + def _search_scope(scope, obj_name): for s in chain(scope.iter_classdefs(), scope.iter_funcdefs()): if s.name.value == obj_name: return s - - + + def get_faked_with_parent_context(parent_context, name): if parent_context.tree_node is not None: # Try to search in already clearly defined stuff. @@ -78,7 +78,7 @@ def get_faked_with_parent_context(parent_context, name): if found is not None: return found raise FakeDoesNotExist - + def get_faked_module(evaluator, string_name): module = _load_faked_module(evaluator, string_name) diff --git a/contrib/python/jedi/jedi/evaluate/compiled/fake/_functools.pym b/contrib/python/jedi/jedi/evaluate/compiled/fake/_functools.pym index 909ef03fc3..e56a1f4fa4 100644 --- a/contrib/python/jedi/jedi/evaluate/compiled/fake/_functools.pym +++ b/contrib/python/jedi/jedi/evaluate/compiled/fake/_functools.pym @@ -1,9 +1,9 @@ -class partial(): - def __init__(self, func, *args, **keywords): - self.__func = func - self.__args = args - self.__keywords = keywords - - def __call__(self, *args, **kwargs): - # TODO should be **dict(self.__keywords, **kwargs) - return self.__func(*(self.__args + args), **self.__keywords) +class partial(): + def __init__(self, func, *args, **keywords): + self.__func = func + self.__args = args + self.__keywords = keywords + + def __call__(self, *args, **kwargs): + # TODO should be **dict(self.__keywords, **kwargs) + return self.__func(*(self.__args + args), **self.__keywords) diff --git a/contrib/python/jedi/jedi/evaluate/compiled/fake/_sqlite3.pym b/contrib/python/jedi/jedi/evaluate/compiled/fake/_sqlite3.pym index 2151e652b4..0df417a0bf 100644 --- a/contrib/python/jedi/jedi/evaluate/compiled/fake/_sqlite3.pym +++ b/contrib/python/jedi/jedi/evaluate/compiled/fake/_sqlite3.pym @@ -1,26 +1,26 @@ -def connect(database, timeout=None, isolation_level=None, detect_types=None, factory=None): - return Connection() - - -class Connection(): - def cursor(self): - return Cursor() - - -class Cursor(): - def cursor(self): - return Cursor() - - def fetchone(self): - return Row() - - def fetchmany(self, size=cursor.arraysize): - return [self.fetchone()] - - def fetchall(self): - return [self.fetchone()] - - -class Row(): - def keys(self): - return [''] +def connect(database, timeout=None, isolation_level=None, detect_types=None, factory=None): + return Connection() + + +class Connection(): + def cursor(self): + return Cursor() + + +class Cursor(): + def cursor(self): + return Cursor() + + def fetchone(self): + return Row() + + def fetchmany(self, size=cursor.arraysize): + return [self.fetchone()] + + def fetchall(self): + return [self.fetchone()] + + +class Row(): + def keys(self): + return [''] diff --git a/contrib/python/jedi/jedi/evaluate/compiled/fake/_sre.pym b/contrib/python/jedi/jedi/evaluate/compiled/fake/_sre.pym index 217be56339..4d271c0cb5 100644 --- a/contrib/python/jedi/jedi/evaluate/compiled/fake/_sre.pym +++ b/contrib/python/jedi/jedi/evaluate/compiled/fake/_sre.pym @@ -1,99 +1,99 @@ -def compile(): - class SRE_Match(): - endpos = int() - lastgroup = int() - lastindex = int() - pos = int() - string = str() - regs = ((int(), int()),) - - def __init__(self, pattern): - self.re = pattern - - def start(self): - return int() - - def end(self): - return int() - - def span(self): - return int(), int() - - def expand(self): - return str() - - def group(self, nr): - return str() - - def groupdict(self): - return {str(): str()} - - def groups(self): - return (str(),) - - class SRE_Pattern(): - flags = int() - groupindex = {} - groups = int() - pattern = str() - - def findall(self, string, pos=None, endpos=None): - """ - findall(string[, pos[, endpos]]) --> list. - Return a list of all non-overlapping matches of pattern in string. - """ - return [str()] - - def finditer(self, string, pos=None, endpos=None): - """ - finditer(string[, pos[, endpos]]) --> iterator. - Return an iterator over all non-overlapping matches for the - RE pattern in string. For each match, the iterator returns a - match object. - """ - yield SRE_Match(self) - - def match(self, string, pos=None, endpos=None): - """ - match(string[, pos[, endpos]]) --> match object or None. - Matches zero or more characters at the beginning of the string - pattern - """ - return SRE_Match(self) - - def scanner(self, string, pos=None, endpos=None): - pass - - def search(self, string, pos=None, endpos=None): - """ - search(string[, pos[, endpos]]) --> match object or None. - Scan through string looking for a match, and return a corresponding - MatchObject instance. Return None if no position in the string matches. - """ - return SRE_Match(self) - - def split(self, string, maxsplit=0]): - """ - split(string[, maxsplit = 0]) --> list. - Split string by the occurrences of pattern. - """ - return [str()] - - def sub(self, repl, string, count=0): - """ - sub(repl, string[, count = 0]) --> newstring - Return the string obtained by replacing the leftmost non-overlapping - occurrences of pattern in string by the replacement repl. - """ - return str() - - def subn(self, repl, string, count=0): - """ - subn(repl, string[, count = 0]) --> (newstring, number of subs) - Return the tuple (new_string, number_of_subs_made) found by replacing - the leftmost non-overlapping occurrences of pattern with the - replacement repl. - """ - return (str(), int()) - - return SRE_Pattern() +def compile(): + class SRE_Match(): + endpos = int() + lastgroup = int() + lastindex = int() + pos = int() + string = str() + regs = ((int(), int()),) + + def __init__(self, pattern): + self.re = pattern + + def start(self): + return int() + + def end(self): + return int() + + def span(self): + return int(), int() + + def expand(self): + return str() + + def group(self, nr): + return str() + + def groupdict(self): + return {str(): str()} + + def groups(self): + return (str(),) + + class SRE_Pattern(): + flags = int() + groupindex = {} + groups = int() + pattern = str() + + def findall(self, string, pos=None, endpos=None): + """ + findall(string[, pos[, endpos]]) --> list. + Return a list of all non-overlapping matches of pattern in string. + """ + return [str()] + + def finditer(self, string, pos=None, endpos=None): + """ + finditer(string[, pos[, endpos]]) --> iterator. + Return an iterator over all non-overlapping matches for the + RE pattern in string. For each match, the iterator returns a + match object. + """ + yield SRE_Match(self) + + def match(self, string, pos=None, endpos=None): + """ + match(string[, pos[, endpos]]) --> match object or None. + Matches zero or more characters at the beginning of the string + pattern + """ + return SRE_Match(self) + + def scanner(self, string, pos=None, endpos=None): + pass + + def search(self, string, pos=None, endpos=None): + """ + search(string[, pos[, endpos]]) --> match object or None. + Scan through string looking for a match, and return a corresponding + MatchObject instance. Return None if no position in the string matches. + """ + return SRE_Match(self) + + def split(self, string, maxsplit=0]): + """ + split(string[, maxsplit = 0]) --> list. + Split string by the occurrences of pattern. + """ + return [str()] + + def sub(self, repl, string, count=0): + """ + sub(repl, string[, count = 0]) --> newstring + Return the string obtained by replacing the leftmost non-overlapping + occurrences of pattern in string by the replacement repl. + """ + return str() + + def subn(self, repl, string, count=0): + """ + subn(repl, string[, count = 0]) --> (newstring, number of subs) + Return the tuple (new_string, number_of_subs_made) found by replacing + the leftmost non-overlapping occurrences of pattern with the + replacement repl. + """ + return (str(), int()) + + return SRE_Pattern() diff --git a/contrib/python/jedi/jedi/evaluate/compiled/fake/_weakref.pym b/contrib/python/jedi/jedi/evaluate/compiled/fake/_weakref.pym index 298d0b0dba..26148b7df4 100644 --- a/contrib/python/jedi/jedi/evaluate/compiled/fake/_weakref.pym +++ b/contrib/python/jedi/jedi/evaluate/compiled/fake/_weakref.pym @@ -1,9 +1,9 @@ -def proxy(object, callback=None): - return object - +def proxy(object, callback=None): + return object + class ref(): - def __init__(self, object, callback=None): - self.__object = object + def __init__(self, object, callback=None): + self.__object = object - def __call__(self): - return self.__object + def __call__(self): + return self.__object diff --git a/contrib/python/jedi/jedi/evaluate/compiled/fake/builtins.pym b/contrib/python/jedi/jedi/evaluate/compiled/fake/builtins.pym index 46ec619fb4..4f737d4e8a 100644 --- a/contrib/python/jedi/jedi/evaluate/compiled/fake/builtins.pym +++ b/contrib/python/jedi/jedi/evaluate/compiled/fake/builtins.pym @@ -1,225 +1,225 @@ -""" -Pure Python implementation of some builtins. -This code is not going to be executed anywhere. -These implementations are not always correct, but should work as good as -possible for the auto completion. -""" - - -def next(iterator, default=None): - if random.choice([0, 1]): - if hasattr("next"): - return iterator.next() - else: - return iterator.__next__() - else: - if default is not None: - return default - - -def iter(collection, sentinel=None): - if sentinel: - yield collection() - else: - for c in collection: - yield c - - -def range(start, stop=None, step=1): - return [0] - - -class file(): - def __iter__(self): - yield '' - - def next(self): - return '' - +""" +Pure Python implementation of some builtins. +This code is not going to be executed anywhere. +These implementations are not always correct, but should work as good as +possible for the auto completion. +""" + + +def next(iterator, default=None): + if random.choice([0, 1]): + if hasattr("next"): + return iterator.next() + else: + return iterator.__next__() + else: + if default is not None: + return default + + +def iter(collection, sentinel=None): + if sentinel: + yield collection() + else: + for c in collection: + yield c + + +def range(start, stop=None, step=1): + return [0] + + +class file(): + def __iter__(self): + yield '' + + def next(self): + return '' + def readlines(self): return [''] - + def __enter__(self): return self -class xrange(): - # Attention: this function doesn't exist in Py3k (there it is range). - def __iter__(self): - yield 1 - - def count(self): - return 1 - - def index(self): - return 1 - - -def open(file, mode='r', buffering=-1, encoding=None, errors=None, newline=None, closefd=True): - import io - return io.TextIOWrapper(file, mode, buffering, encoding, errors, newline, closefd) - - -def open_python2(name, mode=None, buffering=None): - return file(name, mode, buffering) - - -#-------------------------------------------------------- -# descriptors -#-------------------------------------------------------- -class property(): - def __init__(self, fget, fset=None, fdel=None, doc=None): - self.fget = fget - self.fset = fset - self.fdel = fdel - self.__doc__ = doc - - def __get__(self, obj, cls): - return self.fget(obj) - - def __set__(self, obj, value): - self.fset(obj, value) - - def __delete__(self, obj): - self.fdel(obj) - - def setter(self, func): - self.fset = func - return self - - def getter(self, func): - self.fget = func - return self - - def deleter(self, func): - self.fdel = func - return self - - -class staticmethod(): - def __init__(self, func): - self.__func = func - - def __get__(self, obj, cls): - return self.__func - - -class classmethod(): - def __init__(self, func): - self.__func = func - - def __get__(self, obj, cls): - def _method(*args, **kwargs): - return self.__func(cls, *args, **kwargs) - return _method - - -#-------------------------------------------------------- -# array stuff -#-------------------------------------------------------- -class list(): - def __init__(self, iterable=[]): - self.__iterable = [] - for i in iterable: - self.__iterable += [i] - - def __iter__(self): - for i in self.__iterable: - yield i - - def __getitem__(self, y): - return self.__iterable[y] - - def pop(self): +class xrange(): + # Attention: this function doesn't exist in Py3k (there it is range). + def __iter__(self): + yield 1 + + def count(self): + return 1 + + def index(self): + return 1 + + +def open(file, mode='r', buffering=-1, encoding=None, errors=None, newline=None, closefd=True): + import io + return io.TextIOWrapper(file, mode, buffering, encoding, errors, newline, closefd) + + +def open_python2(name, mode=None, buffering=None): + return file(name, mode, buffering) + + +#-------------------------------------------------------- +# descriptors +#-------------------------------------------------------- +class property(): + def __init__(self, fget, fset=None, fdel=None, doc=None): + self.fget = fget + self.fset = fset + self.fdel = fdel + self.__doc__ = doc + + def __get__(self, obj, cls): + return self.fget(obj) + + def __set__(self, obj, value): + self.fset(obj, value) + + def __delete__(self, obj): + self.fdel(obj) + + def setter(self, func): + self.fset = func + return self + + def getter(self, func): + self.fget = func + return self + + def deleter(self, func): + self.fdel = func + return self + + +class staticmethod(): + def __init__(self, func): + self.__func = func + + def __get__(self, obj, cls): + return self.__func + + +class classmethod(): + def __init__(self, func): + self.__func = func + + def __get__(self, obj, cls): + def _method(*args, **kwargs): + return self.__func(cls, *args, **kwargs) + return _method + + +#-------------------------------------------------------- +# array stuff +#-------------------------------------------------------- +class list(): + def __init__(self, iterable=[]): + self.__iterable = [] + for i in iterable: + self.__iterable += [i] + + def __iter__(self): + for i in self.__iterable: + yield i + + def __getitem__(self, y): + return self.__iterable[y] + + def pop(self): return self.__iterable[int()] - - -class tuple(): - def __init__(self, iterable=[]): - self.__iterable = [] - for i in iterable: - self.__iterable += [i] - - def __iter__(self): - for i in self.__iterable: - yield i - - def __getitem__(self, y): - return self.__iterable[y] - - def index(self): - return 1 - - def count(self): - return 1 - - -class set(): - def __init__(self, iterable=[]): - self.__iterable = iterable - - def __iter__(self): - for i in self.__iterable: - yield i - - def pop(self): - return list(self.__iterable)[-1] - - def copy(self): - return self - - def difference(self, other): - return self - other - - def intersection(self, other): - return self & other - - def symmetric_difference(self, other): - return self ^ other - - def union(self, other): - return self | other - - -class frozenset(): - def __init__(self, iterable=[]): - self.__iterable = iterable - - def __iter__(self): - for i in self.__iterable: - yield i - - def copy(self): - return self - - -class dict(): - def __init__(self, **elements): - self.__elements = elements - - def clear(self): - # has a strange docstr - pass - + + +class tuple(): + def __init__(self, iterable=[]): + self.__iterable = [] + for i in iterable: + self.__iterable += [i] + + def __iter__(self): + for i in self.__iterable: + yield i + + def __getitem__(self, y): + return self.__iterable[y] + + def index(self): + return 1 + + def count(self): + return 1 + + +class set(): + def __init__(self, iterable=[]): + self.__iterable = iterable + + def __iter__(self): + for i in self.__iterable: + yield i + + def pop(self): + return list(self.__iterable)[-1] + + def copy(self): + return self + + def difference(self, other): + return self - other + + def intersection(self, other): + return self & other + + def symmetric_difference(self, other): + return self ^ other + + def union(self, other): + return self | other + + +class frozenset(): + def __init__(self, iterable=[]): + self.__iterable = iterable + + def __iter__(self): + for i in self.__iterable: + yield i + + def copy(self): + return self + + +class dict(): + def __init__(self, **elements): + self.__elements = elements + + def clear(self): + # has a strange docstr + pass + def __getitem__(self, obj): return self.__elements[obj] - def get(self, k, d=None): - # TODO implement - try: + def get(self, k, d=None): + # TODO implement + try: return self.__elements[k] - pass - except KeyError: - return d - + pass + except KeyError: + return d + def values(self): return self.__elements.values() - def setdefault(self, k, d): - # TODO maybe also return the content - return d - - + def setdefault(self, k, d): + # TODO maybe also return the content + return d + + class enumerate(): def __init__(self, sequence, start=0): self.__sequence = sequence @@ -235,43 +235,43 @@ class enumerate(): return next(self.__iter__()) -class reversed(): - def __init__(self, sequence): - self.__sequence = sequence - - def __iter__(self): - for i in self.__sequence: - yield i - - def __next__(self): - return next(self.__iter__()) - - def next(self): - return next(self.__iter__()) - - -def sorted(iterable, cmp=None, key=None, reverse=False): - return iterable - - -#-------------------------------------------------------- -# basic types -#-------------------------------------------------------- -class int(): - def __init__(self, x, base=None): - pass - - -class str(): - def __init__(self, obj): - pass - +class reversed(): + def __init__(self, sequence): + self.__sequence = sequence + + def __iter__(self): + for i in self.__sequence: + yield i + + def __next__(self): + return next(self.__iter__()) + + def next(self): + return next(self.__iter__()) + + +def sorted(iterable, cmp=None, key=None, reverse=False): + return iterable + + +#-------------------------------------------------------- +# basic types +#-------------------------------------------------------- +class int(): + def __init__(self, x, base=None): + pass + + +class str(): + def __init__(self, obj): + pass + def strip(self): return str() - + def split(self): return [str()] -class type(): - def mro(): - return [object] +class type(): + def mro(): + return [object] diff --git a/contrib/python/jedi/jedi/evaluate/compiled/fake/datetime.pym b/contrib/python/jedi/jedi/evaluate/compiled/fake/datetime.pym index 823ac5b7fd..d981e60051 100644 --- a/contrib/python/jedi/jedi/evaluate/compiled/fake/datetime.pym +++ b/contrib/python/jedi/jedi/evaluate/compiled/fake/datetime.pym @@ -1,4 +1,4 @@ -class datetime(): - @staticmethod - def now(): - return datetime() +class datetime(): + @staticmethod + def now(): + return datetime() diff --git a/contrib/python/jedi/jedi/evaluate/compiled/fake/io.pym b/contrib/python/jedi/jedi/evaluate/compiled/fake/io.pym index c1f4fc0116..e97310e742 100644 --- a/contrib/python/jedi/jedi/evaluate/compiled/fake/io.pym +++ b/contrib/python/jedi/jedi/evaluate/compiled/fake/io.pym @@ -1,9 +1,9 @@ -class TextIOWrapper(): - def __next__(self): - return str() - - def __iter__(self): - yield str() +class TextIOWrapper(): + def __next__(self): + return str() + + def __iter__(self): + yield str() def readlines(self): return [''] diff --git a/contrib/python/jedi/jedi/evaluate/compiled/fake/posix.pym b/contrib/python/jedi/jedi/evaluate/compiled/fake/posix.pym index 4417f7cb04..2c8f6a56e3 100644 --- a/contrib/python/jedi/jedi/evaluate/compiled/fake/posix.pym +++ b/contrib/python/jedi/jedi/evaluate/compiled/fake/posix.pym @@ -1,5 +1,5 @@ -def getcwd(): - return '' - -def getcwdu(): - return '' +def getcwd(): + return '' + +def getcwdu(): + return '' diff --git a/contrib/python/jedi/jedi/evaluate/docstrings.py b/contrib/python/jedi/jedi/evaluate/docstrings.py index aaef8ea853..1983a7635b 100644 --- a/contrib/python/jedi/jedi/evaluate/docstrings.py +++ b/contrib/python/jedi/jedi/evaluate/docstrings.py @@ -1,49 +1,49 @@ -""" -Docstrings are another source of information for functions and classes. -:mod:`jedi.evaluate.dynamic` tries to find all executions of functions, while +""" +Docstrings are another source of information for functions and classes. +:mod:`jedi.evaluate.dynamic` tries to find all executions of functions, while the docstring parsing is much easier. There are three different types of -docstrings that |jedi| understands: - -- `Sphinx <http://sphinx-doc.org/markup/desc.html#info-field-lists>`_ -- `Epydoc <http://epydoc.sourceforge.net/manual-fields.html>`_ +docstrings that |jedi| understands: + +- `Sphinx <http://sphinx-doc.org/markup/desc.html#info-field-lists>`_ +- `Epydoc <http://epydoc.sourceforge.net/manual-fields.html>`_ - `Numpydoc <https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt>`_ - -For example, the sphinx annotation ``:type foo: str`` clearly states that the -type of ``foo`` is ``str``. - -As an addition to parameter searching, this module also provides return -annotations. -""" - -import re -from textwrap import dedent - + +For example, the sphinx annotation ``:type foo: str`` clearly states that the +type of ``foo`` is ``str``. + +As an addition to parameter searching, this module also provides return +annotations. +""" + +import re +from textwrap import dedent + from parso import parse, ParserSyntaxError - + from jedi._compatibility import u from jedi.evaluate.utils import indent_block from jedi.evaluate.cache import evaluator_method_cache from jedi.evaluate.base_context import iterator_to_context_set, ContextSet, \ NO_CONTEXTS from jedi.evaluate.lazy_context import LazyKnownContexts + - -DOCSTRING_PARAM_PATTERNS = [ - r'\s*:type\s+%s:\s*([^\n]+)', # Sphinx +DOCSTRING_PARAM_PATTERNS = [ + r'\s*:type\s+%s:\s*([^\n]+)', # Sphinx r'\s*:param\s+(\w+)\s+%s:[^\n]*', # Sphinx param with type - r'\s*@type\s+%s:\s*([^\n]+)', # Epydoc -] - -DOCSTRING_RETURN_PATTERNS = [ - re.compile(r'\s*:rtype:\s*([^\n]+)', re.M), # Sphinx - re.compile(r'\s*@rtype:\s*([^\n]+)', re.M), # Epydoc -] - -REST_ROLE_PATTERN = re.compile(r':[^`]+:`([^`]+)`') - - + r'\s*@type\s+%s:\s*([^\n]+)', # Epydoc +] + +DOCSTRING_RETURN_PATTERNS = [ + re.compile(r'\s*:rtype:\s*([^\n]+)', re.M), # Sphinx + re.compile(r'\s*@rtype:\s*([^\n]+)', re.M), # Epydoc +] + +REST_ROLE_PATTERN = re.compile(r':[^`]+:`([^`]+)`') + + _numpy_doc_string_cache = None - + def _get_numpy_doc_string_cls(): global _numpy_doc_string_cache @@ -65,7 +65,7 @@ def _search_param_in_numpydocstr(docstr, param_str): # prepared and return gracefully. params = _get_numpy_doc_string_cls()(docstr)._parsed_data['Parameters'] except (KeyError, AttributeError, ImportError): - return [] + return [] for p_name, p_type, p_descr in params: if p_name == param_str: m = re.match(r'([^,]+(,[^,]+)*?)(,[ ]*optional)?$', p_type) @@ -73,8 +73,8 @@ def _search_param_in_numpydocstr(docstr, param_str): p_type = m.group(1) return list(_expand_typestr(p_type)) return [] - - + + def _search_return_in_numpydocstr(docstr): """ Search `docstr` (in numpydoc format) for type(-s) of function returns. @@ -131,59 +131,59 @@ def _expand_typestr(type_str): yield type_str -def _search_param_in_docstr(docstr, param_str): - """ - Search `docstr` for type(-s) of `param_str`. - - >>> _search_param_in_docstr(':type param: int', 'param') - ['int'] - >>> _search_param_in_docstr('@type param: int', 'param') - ['int'] - >>> _search_param_in_docstr( - ... ':type param: :class:`threading.Thread`', 'param') - ['threading.Thread'] - >>> bool(_search_param_in_docstr('no document', 'param')) - False - >>> _search_param_in_docstr(':param int param: some description', 'param') - ['int'] - - """ - # look at #40 to see definitions of those params - patterns = [re.compile(p % re.escape(param_str)) - for p in DOCSTRING_PARAM_PATTERNS] - for pattern in patterns: - match = pattern.search(docstr) - if match: - return [_strip_rst_role(match.group(1))] - +def _search_param_in_docstr(docstr, param_str): + """ + Search `docstr` for type(-s) of `param_str`. + + >>> _search_param_in_docstr(':type param: int', 'param') + ['int'] + >>> _search_param_in_docstr('@type param: int', 'param') + ['int'] + >>> _search_param_in_docstr( + ... ':type param: :class:`threading.Thread`', 'param') + ['threading.Thread'] + >>> bool(_search_param_in_docstr('no document', 'param')) + False + >>> _search_param_in_docstr(':param int param: some description', 'param') + ['int'] + + """ + # look at #40 to see definitions of those params + patterns = [re.compile(p % re.escape(param_str)) + for p in DOCSTRING_PARAM_PATTERNS] + for pattern in patterns: + match = pattern.search(docstr) + if match: + return [_strip_rst_role(match.group(1))] + return _search_param_in_numpydocstr(docstr, param_str) - - -def _strip_rst_role(type_str): - """ - Strip off the part looks like a ReST role in `type_str`. - - >>> _strip_rst_role(':class:`ClassName`') # strip off :class: - 'ClassName' - >>> _strip_rst_role(':py:obj:`module.Object`') # works with domain - 'module.Object' - >>> _strip_rst_role('ClassName') # do nothing when not ReST role - 'ClassName' - - See also: - http://sphinx-doc.org/domains.html#cross-referencing-python-objects - - """ - match = REST_ROLE_PATTERN.match(type_str) - if match: - return match.group(1) - else: - return type_str - - + + +def _strip_rst_role(type_str): + """ + Strip off the part looks like a ReST role in `type_str`. + + >>> _strip_rst_role(':class:`ClassName`') # strip off :class: + 'ClassName' + >>> _strip_rst_role(':py:obj:`module.Object`') # works with domain + 'module.Object' + >>> _strip_rst_role('ClassName') # do nothing when not ReST role + 'ClassName' + + See also: + http://sphinx-doc.org/domains.html#cross-referencing-python-objects + + """ + match = REST_ROLE_PATTERN.match(type_str) + if match: + return match.group(1) + else: + return type_str + + def _evaluate_for_statement_string(module_context, string): code = dedent(u(""" - def pseudo_docstring_stuff(): + def pseudo_docstring_stuff(): ''' Create a pseudo function for docstring statements. Need this docstring so that if the below part is not valid Python this @@ -191,30 +191,30 @@ def _evaluate_for_statement_string(module_context, string): ''' {} """)) - if string is None: - return [] - + if string is None: + return [] + for element in re.findall(r'((?:\w+\.)*\w+)\.', string): - # Try to import module part in dotted name. - # (e.g., 'threading' in 'threading.Thread'). - string = 'import %s\n' % element + string - - # Take the default grammar here, if we load the Python 2.7 grammar here, it - # will be impossible to use `...` (Ellipsis) as a token. Docstring types - # don't need to conform with the current grammar. + # Try to import module part in dotted name. + # (e.g., 'threading' in 'threading.Thread'). + string = 'import %s\n' % element + string + + # Take the default grammar here, if we load the Python 2.7 grammar here, it + # will be impossible to use `...` (Ellipsis) as a token. Docstring types + # don't need to conform with the current grammar. grammar = module_context.evaluator.latest_grammar - try: + try: module = grammar.parse(code.format(indent_block(string)), error_recovery=False) except ParserSyntaxError: return [] try: funcdef = next(module.iter_funcdefs()) # First pick suite, then simple_stmt and then the node, - # which is also not the last item, because there's a newline. + # which is also not the last item, because there's a newline. stmt = funcdef.children[-1].children[-1].children[-2] - except (AttributeError, IndexError): - return [] - + except (AttributeError, IndexError): + return [] + if stmt.type not in ('name', 'atom', 'atom_expr'): return [] @@ -225,34 +225,34 @@ def _evaluate_for_statement_string(module_context, string): funcdef ) func_execution_context = function_context.get_function_execution() - # Use the module of the param. - # TODO this module is not the module of the param in case of a function - # call. In that case it's the module of the function call. - # stuffed with content from a function call. + # Use the module of the param. + # TODO this module is not the module of the param in case of a function + # call. In that case it's the module of the function call. + # stuffed with content from a function call. return list(_execute_types_in_stmt(func_execution_context, stmt)) - - + + def _execute_types_in_stmt(module_context, stmt): - """ - Executing all types or general elements that we find in a statement. This - doesn't include tuple, list and dict literals, because the stuff they - contain is executed. (Used as type information). - """ + """ + Executing all types or general elements that we find in a statement. This + doesn't include tuple, list and dict literals, because the stuff they + contain is executed. (Used as type information). + """ definitions = module_context.eval_node(stmt) return ContextSet.from_sets( _execute_array_values(module_context.evaluator, d) for d in definitions ) - - -def _execute_array_values(evaluator, array): - """ - Tuples indicate that there's not just one return value, but the listed - ones. `(str, int)` means that it returns a tuple with both types. - """ + + +def _execute_array_values(evaluator, array): + """ + Tuples indicate that there's not just one return value, but the listed + ones. `(str, int)` means that it returns a tuple with both types. + """ from jedi.evaluate.context.iterable import SequenceLiteralContext, FakeSequence if isinstance(array, SequenceLiteralContext): - values = [] + values = [] for lazy_context in array.py__iter__(): objects = ContextSet.from_sets( _execute_array_values(evaluator, typ) @@ -260,15 +260,15 @@ def _execute_array_values(evaluator, array): ) values.append(LazyKnownContexts(objects)) return {FakeSequence(evaluator, array.array_type, values)} - else: + else: return array.execute_evaluated() - - + + @evaluator_method_cache() def infer_param(execution_context, param): from jedi.evaluate.context.instance import InstanceArguments from jedi.evaluate.context import FunctionExecutionContext - + def eval_docstring(docstring): return ContextSet.from_iterable( p @@ -279,29 +279,29 @@ def infer_param(execution_context, param): func = param.get_parent_function() if func.type == 'lambdef': return NO_CONTEXTS - + types = eval_docstring(execution_context.py__doc__()) if isinstance(execution_context, FunctionExecutionContext) \ and isinstance(execution_context.var_args, InstanceArguments) \ and execution_context.function_context.py__name__() == '__init__': class_context = execution_context.var_args.instance.class_context types |= eval_docstring(class_context.py__doc__()) - + return types @evaluator_method_cache() @iterator_to_context_set def infer_return_types(function_context): - def search_return_in_docstr(code): - for p in DOCSTRING_RETURN_PATTERNS: - match = p.search(code) - if match: + def search_return_in_docstr(code): + for p in DOCSTRING_RETURN_PATTERNS: + match = p.search(code) + if match: yield _strip_rst_role(match.group(1)) # Check for numpy style return hint for type_ in _search_return_in_numpydocstr(code): yield type_ - + for type_str in search_return_in_docstr(function_context.py__doc__()): for type_eval in _evaluate_for_statement_string(function_context.get_root_context(), type_str): yield type_eval diff --git a/contrib/python/jedi/jedi/evaluate/dynamic.py b/contrib/python/jedi/jedi/evaluate/dynamic.py index 7f7b0d87bd..dcf7796d3b 100644 --- a/contrib/python/jedi/jedi/evaluate/dynamic.py +++ b/contrib/python/jedi/jedi/evaluate/dynamic.py @@ -1,27 +1,27 @@ -""" -One of the really important features of |jedi| is to have an option to -understand code like this:: - - def foo(bar): - bar. # completion here - foo(1) - -There's no doubt wheter bar is an ``int`` or not, but if there's also a call -like ``foo('str')``, what would happen? Well, we'll just show both. Because -that's what a human would expect. - -It works as follows: - -- |Jedi| sees a param -- search for function calls named ``foo`` +""" +One of the really important features of |jedi| is to have an option to +understand code like this:: + + def foo(bar): + bar. # completion here + foo(1) + +There's no doubt wheter bar is an ``int`` or not, but if there's also a call +like ``foo('str')``, what would happen? Well, we'll just show both. Because +that's what a human would expect. + +It works as follows: + +- |Jedi| sees a param +- search for function calls named ``foo`` - execute these calls and check the input. -""" - +""" + from parso.python import tree -from jedi import settings -from jedi import debug +from jedi import settings +from jedi import debug from jedi.evaluate.cache import evaluator_function_cache -from jedi.evaluate import imports +from jedi.evaluate import imports from jedi.evaluate.arguments import TreeArguments from jedi.evaluate.param import create_default_params from jedi.evaluate.helpers import is_stdlib_path @@ -30,20 +30,20 @@ from jedi.parser_utils import get_parent_scope from jedi.evaluate.context import ModuleContext, instance from jedi.evaluate.base_context import ContextSet, NO_CONTEXTS from jedi.evaluate import recursion - - + + MAX_PARAM_SEARCHES = 20 class DynamicExecutedParams(object): - """ + """ Simulates being a parameter while actually just being multiple params. - """ - + """ + def __init__(self, evaluator, executed_params): self.evaluator = evaluator self._executed_params = executed_params - + def infer(self): with recursion.execution_allowed(self.evaluator, self) as allowed: # We need to catch recursions that may occur, because an @@ -52,25 +52,25 @@ class DynamicExecutedParams(object): if allowed: return ContextSet.from_sets(p.infer() for p in self._executed_params) return NO_CONTEXTS + - -@debug.increase_indent +@debug.increase_indent def search_params(evaluator, execution_context, funcdef): - """ - A dynamic search for param values. If you try to complete a type: - - >>> def func(foo): - ... foo - >>> func(1) - >>> func("") - - It is not known what the type ``foo`` without analysing the whole code. You - have to look for all calls to ``func`` to find out what ``foo`` possibly - is. - """ - if not settings.dynamic_params: + """ + A dynamic search for param values. If you try to complete a type: + + >>> def func(foo): + ... foo + >>> func(1) + >>> func("") + + It is not known what the type ``foo`` without analysing the whole code. You + have to look for all calls to ``func`` to find out what ``foo`` possibly + is. + """ + if not settings.dynamic_params: return create_default_params(execution_context, funcdef) - + evaluator.dynamic_params_depth += 1 try: path = execution_context.get_root_context().py__file__() @@ -80,7 +80,7 @@ def search_params(evaluator, execution_context, funcdef): # This makes everything slower. Just disable it and run the tests, # you will see the slowdown, especially in 3.6. return create_default_params(execution_context, funcdef) - + if funcdef.type == 'lambdef': string_name = _get_lambda_name(funcdef) if string_name is None: @@ -88,7 +88,7 @@ def search_params(evaluator, execution_context, funcdef): else: string_name = funcdef.name.value debug.dbg('Dynamic param search in %s.', string_name, color='MAGENTA') - + try: module_context = execution_context.get_root_context() function_executions = _search_function_executions( @@ -116,16 +116,16 @@ def search_params(evaluator, execution_context, funcdef): @evaluator_function_cache(default=None) @to_list def _search_function_executions(evaluator, module_context, funcdef, string_name): - """ - Returns a list of param names. - """ + """ + Returns a list of param names. + """ compare_node = funcdef if string_name == '__init__': cls = get_parent_scope(funcdef) if isinstance(cls, tree.Class): string_name = cls.name.value compare_node = cls - + found_executions = False i = 0 for for_mod_context in imports.get_modules_containing_name( @@ -134,25 +134,25 @@ def _search_function_executions(evaluator, module_context, funcdef, string_name) return for name, trailer in _get_possible_nodes(for_mod_context, string_name): i += 1 - + # This is a simple way to stop Jedi's dynamic param recursion # from going wild: The deeper Jedi's in the recursion, the less # code should be evaluated. if i * evaluator.dynamic_params_depth > MAX_PARAM_SEARCHES: return - + random_context = evaluator.create_context(for_mod_context, name) for function_execution in _check_name_for_execution( evaluator, random_context, compare_node, name, trailer): found_executions = True yield function_execution - + # If there are results after processing a module, we're probably # good to process. This is a speed optimization. if found_executions: return - - + + def _get_lambda_name(node): stmt = node.parent if stmt.type == 'expr_stmt': @@ -161,16 +161,16 @@ def _get_lambda_name(node): first = stmt.children[0] if first.type == 'name': return first.value - + return None - + def _get_possible_nodes(module_context, func_string_name): - try: + try: names = module_context.tree_node.get_used_names()[func_string_name] except KeyError: return - + for name in names: bracket = name.get_next_leaf() trailer = bracket.parent diff --git a/contrib/python/jedi/jedi/evaluate/finder.py b/contrib/python/jedi/jedi/evaluate/finder.py index 5e7043f796..1633e3ae65 100644 --- a/contrib/python/jedi/jedi/evaluate/finder.py +++ b/contrib/python/jedi/jedi/evaluate/finder.py @@ -1,41 +1,41 @@ -""" -Searching for names with given scope and name. This is very central in Jedi and -Python. The name resolution is quite complicated with descripter, -``__getattribute__``, ``__getattr__``, ``global``, etc. - +""" +Searching for names with given scope and name. This is very central in Jedi and +Python. The name resolution is quite complicated with descripter, +``__getattribute__``, ``__getattr__``, ``global``, etc. + If you want to understand name resolution, please read the first few chapters in http://blog.ionelmc.ro/2015/02/09/understanding-python-metaclasses/. -Flow checks -+++++++++++ - -Flow checks are not really mature. There's only a check for ``isinstance``. It -would check whether a flow has the form of ``if isinstance(a, type_or_tuple)``. -Unfortunately every other thing is being ignored (e.g. a == '' would be easy to -check for -> a is a string). There's big potential in these checks. -""" - +Flow checks ++++++++++++ + +Flow checks are not really mature. There's only a check for ``isinstance``. It +would check whether a flow has the form of ``if isinstance(a, type_or_tuple)``. +Unfortunately every other thing is being ignored (e.g. a == '' would be easy to +check for -> a is a string). There's big potential in these checks. +""" + from parso.python import tree from parso.tree import search_ancestor -from jedi import debug -from jedi import settings +from jedi import debug +from jedi import settings from jedi.evaluate.context import AbstractInstanceContext -from jedi.evaluate import compiled -from jedi.evaluate import analysis -from jedi.evaluate import flow_analysis +from jedi.evaluate import compiled +from jedi.evaluate import analysis +from jedi.evaluate import flow_analysis from jedi.evaluate.arguments import TreeArguments -from jedi.evaluate import helpers +from jedi.evaluate import helpers from jedi.evaluate.context import iterable from jedi.evaluate.filters import get_global_filters, TreeNameDefinition from jedi.evaluate.base_context import ContextSet from jedi.parser_utils import is_scope, get_parent_scope - - -class NameFinder(object): + + +class NameFinder(object): def __init__(self, evaluator, context, name_context, name_or_str, position=None, analysis_errors=True): - self._evaluator = evaluator - # Make sure that it's not just a syntax tree node. + self._evaluator = evaluator + # Make sure that it's not just a syntax tree node. self._context = context self._name_context = name_context self._name = name_or_str @@ -46,8 +46,8 @@ class NameFinder(object): self._position = position self._found_predefined_types = None self._analysis_errors = analysis_errors - - @debug.increase_indent + + @debug.increase_indent def find(self, filters, attribute_lookup): """ :params bool attribute_lookup: Tell to logic if we're accessing the @@ -63,7 +63,7 @@ class NameFinder(object): if check is flow_analysis.UNREACHABLE: return ContextSet() return self._found_predefined_types - + types = self._names_to_types(names, attribute_lookup) if not names and self._analysis_errors and not types \ @@ -74,12 +74,12 @@ class NameFinder(object): analysis.add_attribute_error( self._name_context, self._context, self._name) else: - message = ("NameError: name '%s' is not defined." + message = ("NameError: name '%s' is not defined." % self._string_name) analysis.add(self._name_context, 'name-error', self._name, message) - - return types - + + return types + def _get_origin_scope(self): if isinstance(self._name, tree.Name): scope = self._name @@ -89,14 +89,14 @@ class NameFinder(object): break scope = scope.parent return scope - else: + else: return None - + def get_filters(self, search_global=False): origin_scope = self._get_origin_scope() if search_global: position = self._position - + # For functions and classes the defaults don't belong to the # function and get evaluated in the context before the function. So # make sure to exclude the function/class name. @@ -113,17 +113,17 @@ class NameFinder(object): if position < colon.start_pos: if lambdef is None or position < lambdef.children[-2].start_pos: position = ancestor.start_pos - + return get_global_filters(self._evaluator, self._context, position, origin_scope) else: return self._context.get_filters(search_global, self._position, origin_scope=origin_scope) - + def filter_name(self, filters): - """ - Searches names that are defined in a scope (the different + """ + Searches names that are defined in a scope (the different ``filters``), until a name fits. - """ - names = [] + """ + names = [] if self._context.predefined_names and isinstance(self._name, tree.Name): node = self._name while node is not None and not is_scope(node): @@ -140,7 +140,7 @@ class NameFinder(object): for filter in filters: names = filter.get(self._string_name) - if names: + if names: if len(names) == 1: n, = names if isinstance(n, TreeNameDefinition): @@ -152,17 +152,17 @@ class NameFinder(object): if n.tree_name == self._name: if self._name.get_definition().type == 'import_from': continue - break - + break + debug.dbg('finder.filter_name %s in (%s): %s@%s', self._string_name, self._context, names, self._position) return list(names) - - def _check_getattr(self, inst): - """Checks for both __getattr__ and __getattribute__ methods""" - # str is important, because it shouldn't be `Name`! + + def _check_getattr(self, inst): + """Checks for both __getattr__ and __getattribute__ methods""" + # str is important, because it shouldn't be `Name`! name = compiled.create_simple_object(self._evaluator, self._string_name) - + # This is a little bit special. `__getattribute__` is in Python # executed before `__getattr__`. But: I know no use case, where # this could be practical and where Jedi would return wrong types. @@ -173,15 +173,15 @@ class NameFinder(object): names = (inst.get_function_slot_names(u'__getattr__') or inst.get_function_slot_names(u'__getattribute__')) return inst.execute_function_slots(names, name) - + def _names_to_types(self, names, attribute_lookup): contexts = ContextSet.from_sets(name.infer() for name in names) - + debug.dbg('finder._names_to_types: %s -> %s', names, contexts) if not names and isinstance(self._context, AbstractInstanceContext): - # handling __getattr__ / __getattribute__ + # handling __getattr__ / __getattribute__ return self._check_getattr(self._context) - + # Add isinstance and other if/assert knowledge. if not contexts and isinstance(self._name, tree.Name) and \ not isinstance(self._name_context, AbstractInstanceContext): @@ -198,25 +198,25 @@ class NameFinder(object): if flow_scope == base_node: break return contexts - - + + def _check_flow_information(context, flow, search_name, pos): - """ Try to find out the type of a variable just with the information that - is given by the flows: e.g. It is also responsible for assert checks.:: - - if isinstance(k, str): - k. # <- completion here - - ensures that `k` is a string. - """ - if not settings.dynamic_flow_information: - return None - + """ Try to find out the type of a variable just with the information that + is given by the flows: e.g. It is also responsible for assert checks.:: + + if isinstance(k, str): + k. # <- completion here + + ensures that `k` is a string. + """ + if not settings.dynamic_flow_information: + return None + result = None if is_scope(flow): - # Check for asserts. + # Check for asserts. module_node = flow.get_root_node() - try: + try: names = module_node.get_used_names()[search_name.value] except KeyError: return None @@ -224,49 +224,49 @@ def _check_flow_information(context, flow, search_name, pos): n for n in names if flow.start_pos <= n.start_pos < (pos or flow.end_pos) ]) - - for name in names: + + for name in names: ass = search_ancestor(name, 'assert_stmt') if ass is not None: result = _check_isinstance_type(context, ass.assertion, search_name) if result is not None: return result - + if flow.type in ('if_stmt', 'while_stmt'): potential_ifs = [c for c in flow.children[1::4] if c != ':'] for if_test in reversed(potential_ifs): if search_name.start_pos > if_test.end_pos: return _check_isinstance_type(context, if_test, search_name) - return result - - + return result + + def _check_isinstance_type(context, element, search_name): - try: + try: assert element.type in ('power', 'atom_expr') - # this might be removed if we analyze and, etc - assert len(element.children) == 2 - first, trailer = element.children + # this might be removed if we analyze and, etc + assert len(element.children) == 2 + first, trailer = element.children assert first.type == 'name' and first.value == 'isinstance' - assert trailer.type == 'trailer' and trailer.children[0] == '(' - assert len(trailer.children) == 3 - - # arglist stuff - arglist = trailer.children[1] + assert trailer.type == 'trailer' and trailer.children[0] == '(' + assert len(trailer.children) == 3 + + # arglist stuff + arglist = trailer.children[1] args = TreeArguments(context.evaluator, context, arglist, trailer) param_list = list(args.unpack()) - # Disallow keyword arguments + # Disallow keyword arguments assert len(param_list) == 2 (key1, lazy_context_object), (key2, lazy_context_cls) = param_list assert key1 is None and key2 is None call = helpers.call_of_leaf(search_name) is_instance_call = helpers.call_of_leaf(lazy_context_object.data) - # Do a simple get_code comparison. They should just have the same code, - # and everything will be all right. + # Do a simple get_code comparison. They should just have the same code, + # and everything will be all right. normalize = context.evaluator.grammar._normalize assert normalize(is_instance_call) == normalize(call) - except AssertionError: + except AssertionError: return None - + context_set = ContextSet() for cls_or_tup in lazy_context_cls.infer(): if isinstance(cls_or_tup, iterable.Sequence) and cls_or_tup.array_type == 'tuple': diff --git a/contrib/python/jedi/jedi/evaluate/flow_analysis.py b/contrib/python/jedi/jedi/evaluate/flow_analysis.py index 474071f14c..2f75cf2f87 100644 --- a/contrib/python/jedi/jedi/evaluate/flow_analysis.py +++ b/contrib/python/jedi/jedi/evaluate/flow_analysis.py @@ -1,52 +1,52 @@ from jedi.parser_utils import get_flow_branch_keyword, is_scope, get_parent_scope from jedi.evaluate.recursion import execution_allowed - - -class Status(object): - lookup_table = {} - - def __init__(self, value, name): - self._value = value - self._name = name - Status.lookup_table[value] = self - - def invert(self): - if self is REACHABLE: - return UNREACHABLE - elif self is UNREACHABLE: - return REACHABLE - else: - return UNSURE - - def __and__(self, other): - if UNSURE in (self, other): - return UNSURE - else: - return REACHABLE if self._value and other._value else UNREACHABLE - - def __repr__(self): - return '<%s: %s>' % (type(self).__name__, self._name) - - -REACHABLE = Status(True, 'reachable') -UNREACHABLE = Status(False, 'unreachable') -UNSURE = Status(None, 'unsure') - - + + +class Status(object): + lookup_table = {} + + def __init__(self, value, name): + self._value = value + self._name = name + Status.lookup_table[value] = self + + def invert(self): + if self is REACHABLE: + return UNREACHABLE + elif self is UNREACHABLE: + return REACHABLE + else: + return UNSURE + + def __and__(self, other): + if UNSURE in (self, other): + return UNSURE + else: + return REACHABLE if self._value and other._value else UNREACHABLE + + def __repr__(self): + return '<%s: %s>' % (type(self).__name__, self._name) + + +REACHABLE = Status(True, 'reachable') +UNREACHABLE = Status(False, 'unreachable') +UNSURE = Status(None, 'unsure') + + def _get_flow_scopes(node): while True: node = get_parent_scope(node, include_flows=True) if node is None or is_scope(node): return yield node - - + + def reachability_check(context, context_scope, node, origin_scope=None): first_flow_scope = get_parent_scope(node, include_flows=True) if origin_scope is not None: origin_flow_scopes = list(_get_flow_scopes(origin_scope)) node_flow_scopes = list(_get_flow_scopes(node)) - + branch_matches = True for flow_scope in origin_flow_scopes: if flow_scope in node_flow_scopes: @@ -79,32 +79,32 @@ def reachability_check(context, context_scope, node, origin_scope=None): def _break_check(context, context_scope, flow_scope, node): - reachable = REACHABLE + reachable = REACHABLE if flow_scope.type == 'if_stmt': if flow_scope.is_node_after_else(node): for check_node in flow_scope.get_test_nodes(): reachable = _check_if(context, check_node) - if reachable in (REACHABLE, UNSURE): - break - reachable = reachable.invert() - else: + if reachable in (REACHABLE, UNSURE): + break + reachable = reachable.invert() + else: flow_node = flow_scope.get_corresponding_test_node(node) if flow_node is not None: reachable = _check_if(context, flow_node) elif flow_scope.type in ('try_stmt', 'while_stmt'): - return UNSURE - - # Only reachable branches need to be examined further. - if reachable in (UNREACHABLE, UNSURE): - return reachable - + return UNSURE + + # Only reachable branches need to be examined further. + if reachable in (UNREACHABLE, UNSURE): + return reachable + if context_scope != flow_scope and context_scope != flow_scope.parent: flow_scope = get_parent_scope(flow_scope, include_flows=True) return reachable & _break_check(context, context_scope, flow_scope, node) else: return reachable - - + + def _check_if(context, node): with execution_allowed(context.evaluator, node) as allowed: if not allowed: diff --git a/contrib/python/jedi/jedi/evaluate/helpers.py b/contrib/python/jedi/jedi/evaluate/helpers.py index c94a1fbe55..989e3676bd 100644 --- a/contrib/python/jedi/jedi/evaluate/helpers.py +++ b/contrib/python/jedi/jedi/evaluate/helpers.py @@ -1,27 +1,27 @@ -import copy +import copy import sys import re import os -from itertools import chain +from itertools import chain from contextlib import contextmanager - + from parso.python import tree - + from jedi._compatibility import unicode from jedi.parser_utils import get_parent_scope - - + + def is_stdlib_path(path): # Python standard library paths look like this: # /usr/lib/python3.5/... # TODO The implementation below is probably incorrect and not complete. if 'dist-packages' in path or 'site-packages' in path: return False - + base_path = os.path.join(sys.prefix, 'lib', 'python') return bool(re.match(re.escape(base_path) + r'\d.\d', path)) - - + + def deep_ast_copy(obj): """ Much, much faster than copy.deepcopy, but just for parser tree nodes. @@ -35,25 +35,25 @@ def deep_ast_copy(obj): if isinstance(child, tree.Leaf): new_child = copy.copy(child) new_child.parent = new_obj - else: + else: new_child = deep_ast_copy(child) new_child.parent = new_obj new_children.append(new_child) new_obj.children = new_children - - return new_obj - - + + return new_obj + + def evaluate_call_of_leaf(context, leaf, cut_own_trailer=False): - """ - Creates a "call" node that consist of all ``trailer`` and ``power`` - objects. E.g. if you call it with ``append``:: - - list([]).append(3) or None - - You would get a node with the content ``list([]).append`` back. - - This generates a copy of the original ast node. + """ + Creates a "call" node that consist of all ``trailer`` and ``power`` + objects. E.g. if you call it with ``append``:: + + list([]).append(3) or None + + You would get a node with the content ``list([]).append`` back. + + This generates a copy of the original ast node. If you're using the leaf, e.g. the bracket `)` it will return ``list([])``. @@ -62,12 +62,12 @@ def evaluate_call_of_leaf(context, leaf, cut_own_trailer=False): - infer the type of ``foo`` to offer completions after foo - infer the type of ``bar`` to be able to jump to the definition of foo The option ``cut_own_trailer`` must be set to true for the second purpose. - """ + """ trailer = leaf.parent if trailer.type == 'fstring': from jedi.evaluate import compiled return compiled.get_string_context_set(context.evaluator) - + # The leaf may not be the last or first child, because there exist three # different trailers: `( x )`, `[ x ]` and `.x`. In the first two examples # we should not match anything more than x. @@ -75,14 +75,14 @@ def evaluate_call_of_leaf(context, leaf, cut_own_trailer=False): if trailer.type == 'atom': return context.eval_node(trailer) return context.eval_node(leaf) - + power = trailer.parent index = power.children.index(trailer) if cut_own_trailer: cut = index else: cut = index + 1 - + if power.type == 'error_node': start = index while True: @@ -94,7 +94,7 @@ def evaluate_call_of_leaf(context, leaf, cut_own_trailer=False): else: base = power.children[0] trailers = power.children[1:cut] - + if base == 'await': base = trailers[0] trailers = trailers[1:] @@ -161,11 +161,11 @@ def get_names_of_node(node): return list(chain.from_iterable(get_names_of_node(c) for c in children)) -def get_module_names(module, all_scopes): - """ - Returns a dictionary with name parts as keys and their call paths as - values. - """ +def get_module_names(module, all_scopes): + """ + Returns a dictionary with name parts as keys and their call paths as + values. + """ names = chain.from_iterable(module.get_used_names().values()) if not all_scopes: # We have to filter all the names that don't have the module as a @@ -194,42 +194,42 @@ def is_compiled(context): def is_string(context): if context.evaluator.environment.version_info.major == 2: str_classes = (unicode, bytes) - else: + else: str_classes = (unicode,) return is_compiled(context) and isinstance(context.get_safe_value(default=None), str_classes) - - + + def is_literal(context): return is_number(context) or is_string(context) - - + + def _get_safe_value_or_none(context, accept): if is_compiled(context): value = context.get_safe_value(default=None) if isinstance(value, accept): return value - - + + def get_int_or_none(context): return _get_safe_value_or_none(context, int) - - + + def is_number(context): return _get_safe_value_or_none(context, (int, float)) is not None - - + + class EvaluatorTypeError(Exception): pass - - + + class EvaluatorIndexError(Exception): pass - - + + class EvaluatorKeyError(Exception): pass - - + + @contextmanager def reraise_as_evaluator(*exception_classes): try: diff --git a/contrib/python/jedi/jedi/evaluate/imports.py b/contrib/python/jedi/jedi/evaluate/imports.py index 3b1df31a44..690d93cd1e 100644 --- a/contrib/python/jedi/jedi/evaluate/imports.py +++ b/contrib/python/jedi/jedi/evaluate/imports.py @@ -1,57 +1,57 @@ -""" -:mod:`jedi.evaluate.imports` is here to resolve import statements and return -the modules/classes/functions/whatever, which they stand for. However there's -not any actual importing done. This module is about finding modules in the -filesystem. This can be quite tricky sometimes, because Python imports are not -always that simple. - -This module uses imp for python up to 3.2 and importlib for python 3.3 on; the -correct implementation is delegated to _compatibility. - -This module also supports import autocompletion, which means to complete +""" +:mod:`jedi.evaluate.imports` is here to resolve import statements and return +the modules/classes/functions/whatever, which they stand for. However there's +not any actual importing done. This module is about finding modules in the +filesystem. This can be quite tricky sometimes, because Python imports are not +always that simple. + +This module uses imp for python up to 3.2 and importlib for python 3.3 on; the +correct implementation is delegated to _compatibility. + +This module also supports import autocompletion, which means to complete statements like ``from datetim`` (cursor at the end would return ``datetime``). -""" -import os - +""" +import os + from parso.python import tree from parso.tree import search_ancestor from parso import python_bytes_to_unicode from jedi._compatibility import (FileNotFoundError, ImplicitNSInfo, force_unicode, unicode) -from jedi import debug +from jedi import debug from jedi import settings from jedi.parser_utils import get_cached_code_lines -from jedi.evaluate import sys_path -from jedi.evaluate import helpers -from jedi.evaluate import compiled -from jedi.evaluate import analysis +from jedi.evaluate import sys_path +from jedi.evaluate import helpers +from jedi.evaluate import compiled +from jedi.evaluate import analysis from jedi.evaluate.utils import unite from jedi.evaluate.cache import evaluator_method_cache from jedi.evaluate.filters import AbstractNameDefinition from jedi.evaluate.base_context import ContextSet, NO_CONTEXTS - - + + class ModuleCache(object): def __init__(self): self._path_cache = {} self._name_cache = {} - + def add(self, module, name): path = module.py__file__() self._path_cache[path] = module self._name_cache[name] = module - + def iterate_modules_with_names(self): return self._name_cache.items() - + def get(self, name): return self._name_cache[name] - + def get_from_path(self, path): return self._path_cache[path] - - + + # This memoization is needed, because otherwise we will infinitely loop on # certain imports. @evaluator_method_cache(default=NO_CONTEXTS) @@ -72,18 +72,18 @@ def infer_import(context, tree_name, is_goto=False): # if from_names exists in the modules. from_import_name = import_path[-1] import_path = from_names - + importer = Importer(evaluator, tuple(import_path), module_context, import_node.level) - + types = importer.follow() - + #if import_node.is_nested() and not self.nested_resolve: # scopes = [NestedImportModule(module, import_node)] - + if not types: return NO_CONTEXTS - + if from_import_name is not None: types = unite( t.py__getattribute__( @@ -96,7 +96,7 @@ def infer_import(context, tree_name, is_goto=False): ) if not is_goto: types = ContextSet.from_set(types) - + if not types: path = import_path + [from_import_name] importer = Importer(evaluator, tuple(path), @@ -109,62 +109,62 @@ def infer_import(context, tree_name, is_goto=False): # goto only accepts `Name` if is_goto: types = set(s.name for s in types) - + debug.dbg('after import: %s', types) return types - - -class NestedImportModule(tree.Module): - """ - TODO while there's no use case for nested import module right now, we might - be able to use them for static analysis checks later on. - """ - def __init__(self, module, nested_import): - self._module = module - self._nested_import = nested_import - - def _get_nested_import_name(self): - """ - Generates an Import statement, that can be used to fake nested imports. - """ - i = self._nested_import - # This is not an existing Import statement. Therefore, set position to - # 0 (0 is not a valid line number). - zero = (0, 0) - names = [unicode(name) for name in i.namespace_names[1:]] - name = helpers.FakeName(names, self._nested_import) - new = tree.Import(i._sub_module, zero, zero, name) - new.parent = self._module - debug.dbg('Generated a nested import: %s', new) - return helpers.FakeName(str(i.namespace_names[1]), new) - - def __getattr__(self, name): - return getattr(self._module, name) - - def __repr__(self): - return "<%s: %s of %s>" % (self.__class__.__name__, self._module, - self._nested_import) - - + + +class NestedImportModule(tree.Module): + """ + TODO while there's no use case for nested import module right now, we might + be able to use them for static analysis checks later on. + """ + def __init__(self, module, nested_import): + self._module = module + self._nested_import = nested_import + + def _get_nested_import_name(self): + """ + Generates an Import statement, that can be used to fake nested imports. + """ + i = self._nested_import + # This is not an existing Import statement. Therefore, set position to + # 0 (0 is not a valid line number). + zero = (0, 0) + names = [unicode(name) for name in i.namespace_names[1:]] + name = helpers.FakeName(names, self._nested_import) + new = tree.Import(i._sub_module, zero, zero, name) + new.parent = self._module + debug.dbg('Generated a nested import: %s', new) + return helpers.FakeName(str(i.namespace_names[1]), new) + + def __getattr__(self, name): + return getattr(self._module, name) + + def __repr__(self): + return "<%s: %s of %s>" % (self.__class__.__name__, self._module, + self._nested_import) + + def _add_error(context, name, message=None): # Should be a name, not a string! if message is None: name_str = str(name.value) if isinstance(name, tree.Name) else name message = 'No module named ' + name_str - if hasattr(name, 'parent'): + if hasattr(name, 'parent'): analysis.add(context, 'import-error', name, message) else: debug.warning('ImportError without origin: ' + message) - - + + class ImportName(AbstractNameDefinition): start_pos = (1, 0) _level = 0 - + def __init__(self, parent_context, string_name): self.parent_context = parent_context self.string_name = string_name - + def infer(self): return Importer( self.parent_context.evaluator, @@ -189,35 +189,35 @@ class SubModuleName(ImportName): _level = 1 -class Importer(object): +class Importer(object): def __init__(self, evaluator, import_path, module_context, level=0): - """ - An implementation similar to ``__import__``. Use `follow` - to actually follow the imports. - - *level* specifies whether to use absolute or relative imports. 0 (the - default) means only perform absolute imports. Positive values for level - indicate the number of parent directories to search relative to the - directory of the module calling ``__import__()`` (see PEP 328 for the - details). - - :param import_path: List of namespaces (strings or Names). - """ - debug.speed('import %s' % (import_path,)) - self._evaluator = evaluator - self.level = level + """ + An implementation similar to ``__import__``. Use `follow` + to actually follow the imports. + + *level* specifies whether to use absolute or relative imports. 0 (the + default) means only perform absolute imports. Positive values for level + indicate the number of parent directories to search relative to the + directory of the module calling ``__import__()`` (see PEP 328 for the + details). + + :param import_path: List of namespaces (strings or Names). + """ + debug.speed('import %s' % (import_path,)) + self._evaluator = evaluator + self.level = level self.module_context = module_context - try: + try: self.file_path = module_context.py__file__() - except AttributeError: - # Can be None for certain compiled modules like 'builtins'. - self.file_path = None - - if level: + except AttributeError: + # Can be None for certain compiled modules like 'builtins'. + self.file_path = None + + if level: base = module_context.py__package__().split('.') if base == [''] or base == ['__main__']: - base = [] - if level > len(base): + base = [] + if level > len(base): path = module_context.py__file__() if path is not None: import_path = list(import_path) @@ -246,64 +246,64 @@ class Importer(object): # are in the file system. Therefore we cannot know what to do. # In this case we just let the path there and ignore that it's # a relative path. Not sure if that's a good idea. - else: - # Here we basically rewrite the level to 0. + else: + # Here we basically rewrite the level to 0. base = tuple(base) if level > 1: base = base[:-level + 1] import_path = base + tuple(import_path) - self.import_path = import_path - - @property - def str_import_path(self): - """Returns the import path as pure strings instead of `Name`.""" + self.import_path = import_path + + @property + def str_import_path(self): + """Returns the import path as pure strings instead of `Name`.""" return tuple( name.value if isinstance(name, tree.Name) else name for name in self.import_path ) - - def sys_path_with_modifications(self): - + + def sys_path_with_modifications(self): + sys_path_mod = ( self._evaluator.get_sys_path() + sys_path.check_sys_path_modifications(self.module_context) ) - + if self.import_path and self.file_path is not None \ and self._evaluator.environment.version_info.major == 2: # Python2 uses an old strange way of importing relative imports. sys_path_mod.append(force_unicode(os.path.dirname(self.file_path))) - + return sys_path_mod - def follow(self): + def follow(self): if not self.import_path or not self._evaluator.infer_enabled: return NO_CONTEXTS - return self._do_import(self.import_path, self.sys_path_with_modifications()) - - def _do_import(self, import_path, sys_path): - """ - This method is very similar to importlib's `_gcd_import`. - """ + return self._do_import(self.import_path, self.sys_path_with_modifications()) + + def _do_import(self, import_path, sys_path): + """ + This method is very similar to importlib's `_gcd_import`. + """ import_parts = [ force_unicode(i.value if isinstance(i, tree.Name) else i) for i in import_path ] - - # Handle "magic" Flask extension imports: - # ``flask.ext.foo`` is really ``flask_foo`` or ``flaskext.foo``. - if len(import_path) > 2 and import_parts[:2] == ['flask', 'ext']: - # New style. - ipath = ('flask_' + str(import_parts[2]),) + import_path[3:] - modules = self._do_import(ipath, sys_path) - if modules: - return modules - else: - # Old style - return self._do_import(('flaskext',) + import_path[2:], sys_path) - + + # Handle "magic" Flask extension imports: + # ``flask.ext.foo`` is really ``flask_foo`` or ``flaskext.foo``. + if len(import_path) > 2 and import_parts[:2] == ['flask', 'ext']: + # New style. + ipath = ('flask_' + str(import_parts[2]),) + import_path[3:] + modules = self._do_import(ipath, sys_path) + if modules: + return modules + else: + # Old style + return self._do_import(('flaskext',) + import_path[2:], sys_path) + if import_parts[0] in settings.auto_import_modules: module = _load_module( self._evaluator, @@ -312,42 +312,42 @@ class Importer(object): ) return ContextSet(module) - module_name = '.'.join(import_parts) - try: + module_name = '.'.join(import_parts) + try: return ContextSet(self._evaluator.module_cache.get(module_name)) - except KeyError: - pass - - if len(import_path) > 1: - # This is a recursive way of importing that works great with - # the module cache. - bases = self._do_import(import_path[:-1], sys_path) - if not bases: + except KeyError: + pass + + if len(import_path) > 1: + # This is a recursive way of importing that works great with + # the module cache. + bases = self._do_import(import_path[:-1], sys_path) + if not bases: return NO_CONTEXTS - # We can take the first element, because only the os special - # case yields multiple modules, which is not important for - # further imports. + # We can take the first element, because only the os special + # case yields multiple modules, which is not important for + # further imports. parent_module = list(bases)[0] - - # This is a huge exception, we follow a nested import - # ``os.path``, because it's a very important one in Python - # that is being achieved by messing with ``sys.modules`` in - # ``os``. + + # This is a huge exception, we follow a nested import + # ``os.path``, because it's a very important one in Python + # that is being achieved by messing with ``sys.modules`` in + # ``os``. if import_parts == ['os', 'path']: return parent_module.py__getattribute__('path') - - try: + + try: method = parent_module.py__path__ - except AttributeError: - # The module is not a package. + except AttributeError: + # The module is not a package. _add_error(self.module_context, import_path[-1]) return NO_CONTEXTS - else: + else: paths = method() - debug.dbg('search_module %s in paths %s', module_name, paths) - for path in paths: - # At the moment we are only using one path. So this is - # not important to be correct. + debug.dbg('search_module %s in paths %s', module_name, paths) + for path in paths: + # At the moment we are only using one path. So this is + # not important to be correct. if not isinstance(path, list): path = [path] code, module_path, is_pkg = self._evaluator.compiled_subprocess.get_module_info( @@ -357,11 +357,11 @@ class Importer(object): is_global_search=False, ) if module_path is not None: - break + break else: _add_error(self.module_context, import_path[-1]) return NO_CONTEXTS - else: + else: debug.dbg('global search_module %s in %s', import_parts[-1], self.file_path) # Override the sys.path. It works only good that way. # Injecting the path directly into `find_module` did not work. @@ -372,111 +372,111 @@ class Importer(object): is_global_search=True, ) if module_path is None: - # The module is not a package. + # The module is not a package. _add_error(self.module_context, import_path[-1]) return NO_CONTEXTS - + module = _load_module( self._evaluator, module_path, code, sys_path, import_names=import_parts, safe_module_name=True, ) - + if module is None: # The file might raise an ImportError e.g. and therefore not be # importable. return NO_CONTEXTS - + return ContextSet(module) - + def _generate_name(self, name, in_module=None): # Create a pseudo import to be able to follow them. if in_module is None: return ImportName(self.module_context, name) return SubModuleName(in_module, name) - + def _get_module_names(self, search_path=None, in_module=None): - """ - Get the names of all modules in the search_path. This means file names - and not names defined in the files. - """ + """ + Get the names of all modules in the search_path. This means file names + and not names defined in the files. + """ sub = self._evaluator.compiled_subprocess - - names = [] - # add builtin module names + + names = [] + # add builtin module names if search_path is None and in_module is None: names += [self._generate_name(name) for name in sub.get_builtin_module_names()] - - if search_path is None: - search_path = self.sys_path_with_modifications() + + if search_path is None: + search_path = self.sys_path_with_modifications() for name in sub.list_module_names(search_path): names.append(self._generate_name(name, in_module=in_module)) - return names - - def completion_names(self, evaluator, only_modules=False): - """ - :param only_modules: Indicates wheter it's possible to import a - definition that is not defined in a module. - """ + return names + + def completion_names(self, evaluator, only_modules=False): + """ + :param only_modules: Indicates wheter it's possible to import a + definition that is not defined in a module. + """ from jedi.evaluate.context import ModuleContext from jedi.evaluate.context.namespace import ImplicitNamespaceContext - names = [] - if self.import_path: - # flask - if self.str_import_path == ('flask', 'ext'): - # List Flask extensions like ``flask_foo`` - for mod in self._get_module_names(): + names = [] + if self.import_path: + # flask + if self.str_import_path == ('flask', 'ext'): + # List Flask extensions like ``flask_foo`` + for mod in self._get_module_names(): modname = mod.string_name - if modname.startswith('flask_'): - extname = modname[len('flask_'):] - names.append(self._generate_name(extname)) - # Now the old style: ``flaskext.foo`` - for dir in self.sys_path_with_modifications(): - flaskext = os.path.join(dir, 'flaskext') - if os.path.isdir(flaskext): - names += self._get_module_names([flaskext]) - + if modname.startswith('flask_'): + extname = modname[len('flask_'):] + names.append(self._generate_name(extname)) + # Now the old style: ``flaskext.foo`` + for dir in self.sys_path_with_modifications(): + flaskext = os.path.join(dir, 'flaskext') + if os.path.isdir(flaskext): + names += self._get_module_names([flaskext]) + for context in self.follow(): - # Non-modules are not completable. + # Non-modules are not completable. if context.api_type != 'module': # not a module - continue - # namespace packages + continue + # namespace packages if isinstance(context, ModuleContext) and context.py__file__().endswith('__init__.py'): paths = context.py__path__() names += self._get_module_names(paths, in_module=context) - + # implicit namespace packages elif isinstance(context, ImplicitNamespaceContext): paths = context.paths names += self._get_module_names(paths, in_module=context) - if only_modules: - # In the case of an import like `from x.` we don't need to - # add all the variables. - if ('os',) == self.str_import_path and not self.level: - # os.path is a hardcoded exception, because it's a - # ``sys.modules`` modification. + if only_modules: + # In the case of an import like `from x.` we don't need to + # add all the variables. + if ('os',) == self.str_import_path and not self.level: + # os.path is a hardcoded exception, because it's a + # ``sys.modules`` modification. names.append(self._generate_name('path', context)) - - continue - + + continue + for filter in context.get_filters(search_global=False): names += filter.values() - else: - # Empty import path=completion after import - if not self.level: - names += self._get_module_names() - - if self.file_path is not None: - path = os.path.abspath(self.file_path) - for i in range(self.level - 1): - path = os.path.dirname(path) - names += self._get_module_names([path]) - - return names - - + else: + # Empty import path=completion after import + if not self.level: + names += self._get_module_names() + + if self.file_path is not None: + path = os.path.abspath(self.file_path) + for i in range(self.level - 1): + path = os.path.dirname(path) + names += self._get_module_names([path]) + + return names + + def _load_module(evaluator, path=None, code=None, sys_path=None, import_names=None, safe_module_name=False): if import_names is None: @@ -515,30 +515,30 @@ def _load_module(evaluator, path=None, code=None, sys_path=None, path=path, code_lines=get_cached_code_lines(evaluator.grammar, path), ) - else: + else: assert dotted_name is not None module = compiled.load_module(evaluator, dotted_name=dotted_name, sys_path=sys_path) - + if module is not None and dotted_name is not None: add_module_to_cache(evaluator, dotted_name, module, safe=safe_module_name) - return module - - + return module + + def add_module_to_cache(evaluator, module_name, module, safe=False): if not safe and '.' not in module_name: - # We cannot add paths with dots, because that would collide with - # the sepatator dots for nested packages. Therefore we return - # `__main__` in ModuleWrapper.py__name__(), which is similar to - # Python behavior. + # We cannot add paths with dots, because that would collide with + # the sepatator dots for nested packages. Therefore we return + # `__main__` in ModuleWrapper.py__name__(), which is similar to + # Python behavior. return evaluator.module_cache.add(module, module_name) - - + + def get_modules_containing_name(evaluator, modules, name): - """ - Search a name in the directories of modules. - """ + """ + Search a name in the directories of modules. + """ def check_directories(paths): for p in paths: if p is not None: @@ -549,8 +549,8 @@ def get_modules_containing_name(evaluator, modules, name): path = os.path.join(d, file_name) if file_name.endswith('.py'): yield path - - def check_fs(path): + + def check_fs(path): try: f = open(path, 'rb') except FileNotFoundError: @@ -565,9 +565,9 @@ def get_modules_containing_name(evaluator, modules, name): sys_path=e_sys_path, import_names=import_names, ) - return module - - # skip non python modules + return module + + # skip non python modules used_mod_paths = set() for m in modules: try: @@ -576,11 +576,11 @@ def get_modules_containing_name(evaluator, modules, name): pass else: used_mod_paths.add(path) - yield m - + yield m + if not settings.dynamic_params_for_other_modules: return - + additional = set(os.path.abspath(p) for p in settings.additional_dynamic_modules) # Check the directories of used modules. paths = (additional | set(check_directories(used_mod_paths))) \ diff --git a/contrib/python/jedi/jedi/evaluate/param.py b/contrib/python/jedi/jedi/evaluate/param.py index 84f281e532..c6bd8376f5 100644 --- a/contrib/python/jedi/jedi/evaluate/param.py +++ b/contrib/python/jedi/jedi/evaluate/param.py @@ -1,74 +1,74 @@ -from collections import defaultdict - +from collections import defaultdict + from jedi.evaluate.utils import PushBackIterator -from jedi.evaluate import analysis +from jedi.evaluate import analysis from jedi.evaluate.lazy_context import LazyKnownContext, \ LazyTreeContext, LazyUnknownContext from jedi.evaluate import docstrings from jedi.evaluate import pep0484 from jedi.evaluate.context import iterable - - + + def _add_argument_issue(parent_context, error_name, lazy_context, message): if isinstance(lazy_context, LazyTreeContext): node = lazy_context.data if node.parent.type == 'argument': node = node.parent analysis.add(parent_context, error_name, node, message) - - + + class ExecutedParam(object): - """Fake a param and give it values.""" + """Fake a param and give it values.""" def __init__(self, execution_context, param_node, lazy_context): self._execution_context = execution_context self._param_node = param_node self._lazy_context = lazy_context self.string_name = param_node.name.value - + def infer(self): pep0484_hints = pep0484.infer_param(self._execution_context, self._param_node) doc_params = docstrings.infer_param(self._execution_context, self._param_node) if pep0484_hints or doc_params: return pep0484_hints | doc_params - + return self._lazy_context.infer() - - @property + + @property def var_args(self): return self._execution_context.var_args - + def __repr__(self): return '<%s: %s>' % (self.__class__.__name__, self.string_name) - - + + def get_executed_params(execution_context, var_args): result_params = [] param_dict = {} funcdef = execution_context.tree_node parent_context = execution_context.parent_context - + for param in funcdef.get_params(): param_dict[param.name.value] = param unpacked_va = list(var_args.unpack(funcdef)) var_arg_iterator = PushBackIterator(iter(unpacked_va)) - - non_matching_keys = defaultdict(lambda: []) - keys_used = {} - keys_only = False - had_multiple_value_error = False + + non_matching_keys = defaultdict(lambda: []) + keys_used = {} + keys_only = False + had_multiple_value_error = False for param in funcdef.get_params(): - # The value and key can both be null. There, the defaults apply. - # args / kwargs will just be empty arrays / dicts, respectively. - # Wrong value count is just ignored. If you try to test cases that are - # not allowed in Python, Jedi will maybe not show any completions. + # The value and key can both be null. There, the defaults apply. + # args / kwargs will just be empty arrays / dicts, respectively. + # Wrong value count is just ignored. If you try to test cases that are + # not allowed in Python, Jedi will maybe not show any completions. key, argument = next(var_arg_iterator, (None, None)) - while key is not None: - keys_only = True - try: + while key is not None: + keys_only = True + try: key_param = param_dict[key] - except KeyError: + except KeyError: non_matching_keys[key] = argument - else: + else: if key in keys_used: had_multiple_value_error = True m = ("TypeError: %s() got multiple values for keyword argument '%s'." @@ -79,15 +79,15 @@ def get_executed_params(execution_context, var_args): else: keys_used[key] = ExecutedParam(execution_context, key_param, argument) key, argument = next(var_arg_iterator, (None, None)) - + try: result_params.append(keys_used[param.name.value]) continue except KeyError: pass - + if param.star_count == 1: - # *args param + # *args param lazy_context_list = [] if argument is not None: lazy_context_list.append(argument) @@ -100,12 +100,12 @@ def get_executed_params(execution_context, var_args): seq = iterable.FakeSequence(execution_context.evaluator, u'tuple', lazy_context_list) result_arg = LazyKnownContext(seq) elif param.star_count == 2: - # **kwargs param + # **kwargs param dct = iterable.FakeDict(execution_context.evaluator, dict(non_matching_keys)) result_arg = LazyKnownContext(dct) - non_matching_keys = {} - else: - # normal param + non_matching_keys = {} + else: + # normal param if argument is None: # No value: Return an empty container if param.default is None: @@ -117,30 +117,30 @@ def get_executed_params(execution_context, var_args): node, message=m) else: result_arg = LazyTreeContext(parent_context, param.default) - else: + else: result_arg = argument - + result_params.append(ExecutedParam(execution_context, param, result_arg)) if not isinstance(result_arg, LazyUnknownContext): keys_used[param.name.value] = result_params[-1] - - if keys_only: - # All arguments should be handed over to the next function. It's not - # about the values inside, it's about the names. Jedi needs to now that - # there's nothing to find for certain names. - for k in set(param_dict) - set(keys_used): - param = param_dict[k] - + + if keys_only: + # All arguments should be handed over to the next function. It's not + # about the values inside, it's about the names. Jedi needs to now that + # there's nothing to find for certain names. + for k in set(param_dict) - set(keys_used): + param = param_dict[k] + if not (non_matching_keys or had_multiple_value_error or param.star_count or param.default): - # add a warning only if there's not another one. + # add a warning only if there's not another one. for node in var_args.get_calling_nodes(): m = _error_argument_count(funcdef, len(unpacked_va)) analysis.add(parent_context, 'type-error-too-few-arguments', node, message=m) - + for key, lazy_context in non_matching_keys.items(): - m = "TypeError: %s() got an unexpected keyword argument '%s'." \ + m = "TypeError: %s() got an unexpected keyword argument '%s'." \ % (funcdef.name, key) _add_argument_issue( parent_context, @@ -148,31 +148,31 @@ def get_executed_params(execution_context, var_args): lazy_context, message=m ) - + remaining_arguments = list(var_arg_iterator) if remaining_arguments: m = _error_argument_count(funcdef, len(unpacked_va)) - # Just report an error for the first param that is not needed (like - # cPython). + # Just report an error for the first param that is not needed (like + # cPython). first_key, lazy_context = remaining_arguments[0] if var_args.get_calling_nodes(): # There might not be a valid calling node so check for that first. _add_argument_issue(parent_context, 'type-error-too-many-arguments', lazy_context, message=m) return result_params - - + + def _error_argument_count(funcdef, actual_count): params = funcdef.get_params() default_arguments = sum(1 for p in params if p.default or p.star_count) if default_arguments == 0: before = 'exactly ' - else: + else: before = 'from %s to ' % (len(params) - default_arguments) return ('TypeError: %s() takes %s%s arguments (%s given).' % (funcdef.name, before, len(params), actual_count)) - - + + def _create_default_param(execution_context, param): if param.star_count == 1: result_arg = LazyKnownContext( @@ -184,11 +184,11 @@ def _create_default_param(execution_context, param): ) elif param.default is None: result_arg = LazyUnknownContext() - else: + else: result_arg = LazyTreeContext(execution_context.parent_context, param.default) return ExecutedParam(execution_context, param, result_arg) - - + + def create_default_params(execution_context, funcdef): return [_create_default_param(execution_context, p) for p in funcdef.get_params()] diff --git a/contrib/python/jedi/jedi/evaluate/recursion.py b/contrib/python/jedi/jedi/evaluate/recursion.py index 1f4f6384e9..0223c47291 100644 --- a/contrib/python/jedi/jedi/evaluate/recursion.py +++ b/contrib/python/jedi/jedi/evaluate/recursion.py @@ -1,37 +1,37 @@ -""" -Recursions are the recipe of |jedi| to conquer Python code. However, someone -must stop recursions going mad. Some settings are here to make |jedi| stop at -the right time. You can read more about them :ref:`here <settings-recursion>`. - -Next to :mod:`jedi.evaluate.cache` this module also makes |jedi| not -thread-safe. Why? ``execution_recursion_decorator`` uses class variables to -count the function calls. - +""" +Recursions are the recipe of |jedi| to conquer Python code. However, someone +must stop recursions going mad. Some settings are here to make |jedi| stop at +the right time. You can read more about them :ref:`here <settings-recursion>`. + +Next to :mod:`jedi.evaluate.cache` this module also makes |jedi| not +thread-safe. Why? ``execution_recursion_decorator`` uses class variables to +count the function calls. + .. _settings-recursion: - + Settings ~~~~~~~~~~ - + Recursion settings are important if you don't want extremly recursive python code to go absolutely crazy. - + The default values are based on experiments while completing the |jedi| library itself (inception!). But I don't think there's any other Python library that uses recursion in a similarly extreme way. Completion should also be fast and therefore the quality might not always be maximal. - + .. autodata:: recursion_limit .. autodata:: total_function_execution_limit .. autodata:: per_function_execution_limit .. autodata:: per_function_recursion_limit """ - + from contextlib import contextmanager - + from jedi import debug from jedi.evaluate.base_context import NO_CONTEXTS - - + + recursion_limit = 15 """ Like ``sys.getrecursionlimit()``, just for |jedi|. @@ -48,13 +48,13 @@ per_function_recursion_limit = 2 """ A function may not be executed more than this number of times recursively. """ - - + + class RecursionDetector(object): def __init__(self): self.pushed_nodes = [] - - + + @contextmanager def execution_allowed(evaluator, node): """ @@ -62,7 +62,7 @@ def execution_allowed(evaluator, node): at the same place, in the same module may not be executed two times. """ pushed_nodes = evaluator.recursion_detector.pushed_nodes - + if node in pushed_nodes: debug.warning('catched stmt recursion: %s @%s', node, getattr(node, 'start_pos', None)) @@ -73,8 +73,8 @@ def execution_allowed(evaluator, node): yield True finally: pushed_nodes.pop() - - + + def execution_recursion_decorator(default=NO_CONTEXTS): def decorator(func): def wrapper(self, **kwargs): @@ -90,43 +90,43 @@ def execution_recursion_decorator(default=NO_CONTEXTS): return result return wrapper return decorator - - -class ExecutionRecursionDetector(object): - """ - Catches recursions of executions. - """ + + +class ExecutionRecursionDetector(object): + """ + Catches recursions of executions. + """ def __init__(self, evaluator): self._evaluator = evaluator - + self._recursion_level = 0 self._parent_execution_funcs = [] self._funcdef_execution_counts = {} self._execution_count = 0 - + def pop_execution(self): self._parent_execution_funcs.pop() self._recursion_level -= 1 - + def push_execution(self, execution): funcdef = execution.tree_node - + # These two will be undone in pop_execution. self._recursion_level += 1 self._parent_execution_funcs.append(funcdef) - + module = execution.get_root_context() if module == self._evaluator.builtins_module: # We have control over builtins so we know they are not recursing # like crazy. Therefore we just let them execute always, because # they usually just help a lot with getting good results. - return False - + return False + if self._recursion_level > recursion_limit: - return True + return True if self._execution_count >= total_function_execution_limit: - return True + return True self._execution_count += 1 if self._funcdef_execution_counts.setdefault(funcdef, 0) >= per_function_execution_limit: @@ -135,4 +135,4 @@ class ExecutionRecursionDetector(object): if self._parent_execution_funcs.count(funcdef) > per_function_recursion_limit: return True - return False + return False diff --git a/contrib/python/jedi/jedi/evaluate/stdlib.py b/contrib/python/jedi/jedi/evaluate/stdlib.py index 52c223838b..5303318235 100644 --- a/contrib/python/jedi/jedi/evaluate/stdlib.py +++ b/contrib/python/jedi/jedi/evaluate/stdlib.py @@ -1,21 +1,21 @@ -""" -Implementations of standard library functions, because it's not possible to -understand them with Jedi. - -To add a new implementation, create a function and add it to the -``_implemented`` dict at the bottom of this module. - +""" +Implementations of standard library functions, because it's not possible to +understand them with Jedi. + +To add a new implementation, create a function and add it to the +``_implemented`` dict at the bottom of this module. + Note that this module exists only to implement very specific functionality in the standard library. The usual way to understand the standard library is the compiled module that returns the types for C-builtins. -""" +""" import parso - + from jedi._compatibility import force_unicode from jedi import debug from jedi.evaluate.arguments import ValuesArguments, repack_with_argument_clinic from jedi.evaluate import analysis -from jedi.evaluate import compiled +from jedi.evaluate import compiled from jedi.evaluate.context.instance import \ AbstractInstanceContext, CompiledInstance, BoundMethod, InstanceArguments from jedi.evaluate.base_context import ContextualizedNode, \ @@ -24,7 +24,7 @@ from jedi.evaluate.context import ClassContext, ModuleContext, FunctionExecution from jedi.evaluate.context import iterable from jedi.evaluate.lazy_context import LazyTreeContext from jedi.evaluate.syntax_tree import is_string - + # Now this is all part of fake tuples in Jedi. However super doesn't work on # __init__ and __new__ doesn't work at all. So adding this to nametuples is # just the easiest way. @@ -32,7 +32,7 @@ _NAMEDTUPLE_INIT = """ def __init__(_cls, {arg_list}): 'A helper function for namedtuple.' self.__iterable = ({arg_list}) - + def __iter__(self): for i in self.__iterable: yield i @@ -43,51 +43,51 @@ _NAMEDTUPLE_INIT = """ """ -class NotInStdLib(LookupError): - pass - - +class NotInStdLib(LookupError): + pass + + def execute(evaluator, obj, arguments): if isinstance(obj, BoundMethod): raise NotInStdLib() - try: + try: obj_name = obj.name.string_name - except AttributeError: - pass - else: + except AttributeError: + pass + else: if obj.parent_context == evaluator.builtins_module: - module_name = 'builtins' + module_name = 'builtins' elif isinstance(obj.parent_context, ModuleContext): module_name = obj.parent_context.name.string_name - else: - module_name = '' - - # for now we just support builtin functions. - try: + else: + module_name = '' + + # for now we just support builtin functions. + try: func = _implemented[module_name][obj_name] - except KeyError: - pass + except KeyError: + pass else: return func(evaluator, obj, arguments=arguments) - raise NotInStdLib() - - + raise NotInStdLib() + + def _follow_param(evaluator, arguments, index): - try: + try: key, lazy_context = list(arguments.unpack())[index] - except IndexError: + except IndexError: return NO_CONTEXTS - else: + else: return lazy_context.infer() - - + + def argument_clinic(string, want_obj=False, want_context=False, want_arguments=False): - """ - Works like Argument Clinic (PEP 436), to validate function params. - """ - - def f(func): + """ + Works like Argument Clinic (PEP 436), to validate function params. + """ + + def f(func): @repack_with_argument_clinic(string, keep_arguments_param=True) def wrapper(evaluator, obj, *args, **kwargs): arguments = kwargs.pop('arguments') @@ -103,11 +103,11 @@ def argument_clinic(string, want_obj=False, want_context=False, want_arguments=F result = func(evaluator, *args, **kwargs) debug.dbg('builtin end: %s', result, color='MAGENTA') return result - - return wrapper - return f - - + + return wrapper + return f + + @argument_clinic('iterator[, default], /') def builtins_next(evaluator, iterators, defaults): """ @@ -132,45 +132,45 @@ def builtins_next(evaluator, iterators, defaults): return defaults -@argument_clinic('object, name[, default], /') -def builtins_getattr(evaluator, objects, names, defaults=None): - # follow the first param - for obj in objects: - for name in names: +@argument_clinic('object, name[, default], /') +def builtins_getattr(evaluator, objects, names, defaults=None): + # follow the first param + for obj in objects: + for name in names: if is_string(name): return obj.py__getattribute__(force_unicode(name.get_safe_value())) - else: - debug.warning('getattr called without str') - continue + else: + debug.warning('getattr called without str') + continue return NO_CONTEXTS - - -@argument_clinic('object[, bases, dict], /') -def builtins_type(evaluator, objects, bases, dicts): - if bases or dicts: + + +@argument_clinic('object[, bases, dict], /') +def builtins_type(evaluator, objects, bases, dicts): + if bases or dicts: # It's a type creation... maybe someday... return NO_CONTEXTS - else: + else: return objects.py__class__() - - + + class SuperInstance(AbstractInstanceContext): - """To be used like the object ``super`` returns.""" - def __init__(self, evaluator, cls): - su = cls.py_mro()[1] - super().__init__(evaluator, su and su[0] or self) - - + """To be used like the object ``super`` returns.""" + def __init__(self, evaluator, cls): + su = cls.py_mro()[1] + super().__init__(evaluator, su and su[0] or self) + + @argument_clinic('[type[, obj]], /', want_context=True) def builtins_super(evaluator, types, objects, context): - # TODO make this able to detect multiple inheritance super + # TODO make this able to detect multiple inheritance super if isinstance(context, FunctionExecutionContext): if isinstance(context.var_args, InstanceArguments): su = context.var_args.instance.py__class__().py__bases__() return su[0].infer().execute_evaluated() - + return NO_CONTEXTS - + @argument_clinic('sequence, /', want_obj=True, want_arguments=True) def builtins_reversed(evaluator, sequences, obj, arguments): @@ -185,42 +185,42 @@ def builtins_reversed(evaluator, sequences, obj, arguments): ordered = list(sequences.iterate(cn)) rev = list(reversed(ordered)) - # Repack iterator values and then run it the normal way. This is - # necessary, because `reversed` is a function and autocompletion - # would fail in certain cases like `reversed(x).__iter__` if we - # just returned the result directly. + # Repack iterator values and then run it the normal way. This is + # necessary, because `reversed` is a function and autocompletion + # would fail in certain cases like `reversed(x).__iter__` if we + # just returned the result directly. seq = iterable.FakeSequence(evaluator, u'list', rev) arguments = ValuesArguments([ContextSet(seq)]) return ContextSet(CompiledInstance(evaluator, evaluator.builtins_module, obj, arguments)) - - + + @argument_clinic('obj, type, /', want_arguments=True) def builtins_isinstance(evaluator, objects, types, arguments): bool_results = set() - for o in objects: + for o in objects: cls = o.py__class__() - try: + try: mro_func = cls.py__mro__ - except AttributeError: - # This is temporary. Everything should have a class attribute in - # Python?! Maybe we'll leave it here, because some numpy objects or - # whatever might not. + except AttributeError: + # This is temporary. Everything should have a class attribute in + # Python?! Maybe we'll leave it here, because some numpy objects or + # whatever might not. bool_results = set([True, False]) break - + mro = mro_func() - - for cls_or_tup in types: - if cls_or_tup.is_class(): - bool_results.add(cls_or_tup in mro) + + for cls_or_tup in types: + if cls_or_tup.is_class(): + bool_results.add(cls_or_tup in mro) elif cls_or_tup.name.string_name == 'tuple' \ and cls_or_tup.get_root_context() == evaluator.builtins_module: - # Check for tuples. + # Check for tuples. classes = ContextSet.from_sets( lazy_context.infer() for lazy_context in cls_or_tup.iterate() ) - bool_results.add(any(cls in mro for cls in classes)) + bool_results.add(any(cls in mro for cls in classes)) else: _, lazy_context = list(arguments.unpack())[1] if isinstance(lazy_context, LazyTreeContext): @@ -229,34 +229,34 @@ def builtins_isinstance(evaluator, objects, types, arguments): 'class, type, or tuple of classes and types, ' \ 'not %s.' % cls_or_tup analysis.add(lazy_context._context, 'type-error-isinstance', node, message) - + return ContextSet.from_iterable( compiled.builtin_from_name(evaluator, force_unicode(str(b))) for b in bool_results ) - - + + def collections_namedtuple(evaluator, obj, arguments): - """ - Implementation of the namedtuple function. - - This has to be done by processing the namedtuple class template and - evaluating the result. - - """ + """ + Implementation of the namedtuple function. + + This has to be done by processing the namedtuple class template and + evaluating the result. + + """ collections_context = obj.parent_context _class_template_set = collections_context.py__getattribute__(u'_class_template') if not _class_template_set: # Namedtuples are not supported on Python 2.6, early 2.7, because the # _class_template variable is not defined, there. return NO_CONTEXTS - - # Process arguments + + # Process arguments # TODO here we only use one of the types, we should use all. # TODO this is buggy, doesn't need to be a string name = list(_follow_param(evaluator, arguments, 0))[0].get_safe_value() _fields = list(_follow_param(evaluator, arguments, 1))[0] - if isinstance(_fields, compiled.CompiledObject): + if isinstance(_fields, compiled.CompiledObject): fields = _fields.get_safe_value().replace(',', ' ').split() elif isinstance(_fields, iterable.Sequence): fields = [ @@ -264,9 +264,9 @@ def collections_namedtuple(evaluator, obj, arguments): for lazy_context in _fields.py__iter__() for v in lazy_context.infer() if is_string(v) ] - else: + else: return NO_CONTEXTS - + def get_var(name): x, = collections_context.py__getattribute__(name) return x.get_safe_value() @@ -275,15 +275,15 @@ def collections_namedtuple(evaluator, obj, arguments): base += _NAMEDTUPLE_INIT # Build source code code = base.format( - typename=name, + typename=name, field_names=tuple(fields), - num_fields=len(fields), + num_fields=len(fields), arg_list=repr(tuple(fields)).replace("u'", "").replace("'", "")[1:-1], repr_fmt=', '.join(get_var(u'_repr_template').format(name=name) for name in fields), field_defs='\n'.join(get_var(u'_field_template').format(index=index, name=name) - for index, name in enumerate(fields)) - ) - + for index, name in enumerate(fields)) + ) + # Parse source code module = evaluator.grammar.parse(code) generated_class = next(module.iter_classdefs()) @@ -292,30 +292,30 @@ def collections_namedtuple(evaluator, obj, arguments): code_lines=parso.split_lines(code, keepends=True), ) return ContextSet(ClassContext(evaluator, parent_context, generated_class)) - - -@argument_clinic('first, /') -def _return_first_param(evaluator, firsts): - return firsts - - -_implemented = { - 'builtins': { - 'getattr': builtins_getattr, - 'type': builtins_type, - 'super': builtins_super, - 'reversed': builtins_reversed, - 'isinstance': builtins_isinstance, - }, - 'copy': { - 'copy': _return_first_param, - 'deepcopy': _return_first_param, - }, - 'json': { + + +@argument_clinic('first, /') +def _return_first_param(evaluator, firsts): + return firsts + + +_implemented = { + 'builtins': { + 'getattr': builtins_getattr, + 'type': builtins_type, + 'super': builtins_super, + 'reversed': builtins_reversed, + 'isinstance': builtins_isinstance, + }, + 'copy': { + 'copy': _return_first_param, + 'deepcopy': _return_first_param, + }, + 'json': { 'load': lambda evaluator, obj, arguments: NO_CONTEXTS, 'loads': lambda evaluator, obj, arguments: NO_CONTEXTS, - }, - 'collections': { - 'namedtuple': collections_namedtuple, - }, -} + }, + 'collections': { + 'namedtuple': collections_namedtuple, + }, +} diff --git a/contrib/python/jedi/jedi/evaluate/sys_path.py b/contrib/python/jedi/jedi/evaluate/sys_path.py index 8fb1843f05..a43d2ba177 100644 --- a/contrib/python/jedi/jedi/evaluate/sys_path.py +++ b/contrib/python/jedi/jedi/evaluate/sys_path.py @@ -1,5 +1,5 @@ -import os - +import os + from jedi._compatibility import unicode, force_unicode, all_suffixes from jedi.evaluate.cache import evaluator_method_cache from jedi.evaluate.base_context import ContextualizedNode @@ -7,58 +7,58 @@ from jedi.evaluate.helpers import is_string from jedi.common.utils import traverse_parents from jedi.parser_utils import get_cached_code_lines from jedi import settings -from jedi import debug - - +from jedi import debug + + def _abs_path(module_context, path): if os.path.isabs(path): return path - + module_path = module_context.py__file__() if module_path is None: # In this case we have no idea where we actually are in the file # system. return None - + base_dir = os.path.dirname(module_path) path = force_unicode(path) return os.path.abspath(os.path.join(base_dir, path)) - - + + def _paths_from_assignment(module_context, expr_stmt): - """ - Extracts the assigned strings from an assignment that looks as follows:: - + """ + Extracts the assigned strings from an assignment that looks as follows:: + sys.path[0:0] = ['module/path', 'another/module/path'] - - This function is in general pretty tolerant (and therefore 'buggy'). - However, it's not a big issue usually to add more paths to Jedi's sys_path, - because it will only affect Jedi in very random situations and by adding - more paths than necessary, it usually benefits the general user. - """ - for assignee, operator in zip(expr_stmt.children[::2], expr_stmt.children[1::2]): - try: - assert operator in ['=', '+='] + + This function is in general pretty tolerant (and therefore 'buggy'). + However, it's not a big issue usually to add more paths to Jedi's sys_path, + because it will only affect Jedi in very random situations and by adding + more paths than necessary, it usually benefits the general user. + """ + for assignee, operator in zip(expr_stmt.children[::2], expr_stmt.children[1::2]): + try: + assert operator in ['=', '+='] assert assignee.type in ('power', 'atom_expr') and \ len(assignee.children) > 1 - c = assignee.children - assert c[0].type == 'name' and c[0].value == 'sys' - trailer = c[1] - assert trailer.children[0] == '.' and trailer.children[1].value == 'path' - # TODO Essentially we're not checking details on sys.path - # manipulation. Both assigment of the sys.path and changing/adding + c = assignee.children + assert c[0].type == 'name' and c[0].value == 'sys' + trailer = c[1] + assert trailer.children[0] == '.' and trailer.children[1].value == 'path' + # TODO Essentially we're not checking details on sys.path + # manipulation. Both assigment of the sys.path and changing/adding # parts of the sys.path are the same: They get added to the end of # the current sys.path. - """ - execution = c[2] - assert execution.children[0] == '[' - subscript = execution.children[1] - assert subscript.type == 'subscript' - assert ':' in subscript.children - """ - except AssertionError: - continue - + """ + execution = c[2] + assert execution.children[0] == '[' + subscript = execution.children[1] + assert subscript.type == 'subscript' + assert ':' in subscript.children + """ + except AssertionError: + continue + cn = ContextualizedNode(module_context.create_context(expr_stmt), expr_stmt) for lazy_context in cn.infer().iterate(cn): for context in lazy_context.infer(): @@ -66,59 +66,59 @@ def _paths_from_assignment(module_context, expr_stmt): abs_path = _abs_path(module_context, context.get_safe_value()) if abs_path is not None: yield abs_path - - + + def _paths_from_list_modifications(module_context, trailer1, trailer2): - """ extract the path from either "sys.path.append" or "sys.path.insert" """ - # Guarantee that both are trailers, the first one a name and the second one - # a function execution with at least one param. + """ extract the path from either "sys.path.append" or "sys.path.insert" """ + # Guarantee that both are trailers, the first one a name and the second one + # a function execution with at least one param. if not (trailer1.type == 'trailer' and trailer1.children[0] == '.' and trailer2.type == 'trailer' and trailer2.children[0] == '(' - and len(trailer2.children) == 3): + and len(trailer2.children) == 3): return - - name = trailer1.children[1].value - if name not in ['insert', 'append']: + + name = trailer1.children[1].value + if name not in ['insert', 'append']: return - arg = trailer2.children[1] - if name == 'insert' and len(arg.children) in (3, 4): # Possible trailing comma. - arg = arg.children[2] - + arg = trailer2.children[1] + if name == 'insert' and len(arg.children) in (3, 4): # Possible trailing comma. + arg = arg.children[2] + for context in module_context.create_context(arg).eval_node(arg): if is_string(context): abs_path = _abs_path(module_context, context.get_safe_value()) if abs_path is not None: yield abs_path - + @evaluator_method_cache(default=[]) def check_sys_path_modifications(module_context): """ Detect sys.path modifications within module. """ - def get_sys_path_powers(names): - for name in names: - power = name.parent.parent + def get_sys_path_powers(names): + for name in names: + power = name.parent.parent if power.type in ('power', 'atom_expr'): - c = power.children + c = power.children if c[0].type == 'name' and c[0].value == 'sys' \ and c[1].type == 'trailer': - n = c[1].children[1] + n = c[1].children[1] if n.type == 'name' and n.value == 'path': - yield name, power - + yield name, power + if module_context.tree_node is None: return [] added = [] - try: + try: possible_names = module_context.tree_node.get_used_names()['path'] - except KeyError: - pass - else: - for name, power in get_sys_path_powers(possible_names): + except KeyError: + pass + else: + for name, power in get_sys_path_powers(possible_names): expr_stmt = power.parent - if len(power.children) >= 4: + if len(power.children) >= 4: added.extend( _paths_from_list_modifications( module_context, *power.children[2:4] @@ -127,18 +127,18 @@ def check_sys_path_modifications(module_context): elif expr_stmt is not None and expr_stmt.type == 'expr_stmt': added.extend(_paths_from_assignment(module_context, expr_stmt)) return added - - + + def discover_buildout_paths(evaluator, script_path): - buildout_script_paths = set() - + buildout_script_paths = set() + for buildout_script_path in _get_buildout_script_paths(script_path): for path in _get_paths_from_buildout_script(evaluator, buildout_script_path): - buildout_script_paths.add(path) - + buildout_script_paths.add(path) + return buildout_script_paths - - + + def _get_paths_from_buildout_script(evaluator, buildout_script_path): try: module_node = evaluator.parse( @@ -148,52 +148,52 @@ def _get_paths_from_buildout_script(evaluator, buildout_script_path): ) except IOError: debug.warning('Error trying to read buildout_script: %s', buildout_script_path) - return - + return + from jedi.evaluate.context import ModuleContext module = ModuleContext( evaluator, module_node, buildout_script_path, code_lines=get_cached_code_lines(evaluator.grammar, buildout_script_path), ) for path in check_sys_path_modifications(module): - yield path - - -def _get_parent_dir_with_file(path, filename): - for parent in traverse_parents(path): - if os.path.isfile(os.path.join(parent, filename)): - return parent - return None - - + yield path + + +def _get_parent_dir_with_file(path, filename): + for parent in traverse_parents(path): + if os.path.isfile(os.path.join(parent, filename)): + return parent + return None + + def _get_buildout_script_paths(search_path): - """ - if there is a 'buildout.cfg' file in one of the parent directories of the - given module it will return a list of all files in the buildout bin - directory that look like python files. - + """ + if there is a 'buildout.cfg' file in one of the parent directories of the + given module it will return a list of all files in the buildout bin + directory that look like python files. + :param search_path: absolute path to the module. :type search_path: str - """ + """ project_root = _get_parent_dir_with_file(search_path, 'buildout.cfg') - if not project_root: + if not project_root: return - bin_path = os.path.join(project_root, 'bin') - if not os.path.exists(bin_path): + bin_path = os.path.join(project_root, 'bin') + if not os.path.exists(bin_path): return - for filename in os.listdir(bin_path): - try: - filepath = os.path.join(bin_path, filename) - with open(filepath, 'r') as f: - firstline = f.readline() - if firstline.startswith('#!') and 'python' in firstline: + for filename in os.listdir(bin_path): + try: + filepath = os.path.join(bin_path, filename) + with open(filepath, 'r') as f: + firstline = f.readline() + if firstline.startswith('#!') and 'python' in firstline: yield filepath except (UnicodeDecodeError, IOError) as e: # Probably a binary file; permission error or race cond. because # file got deleted. Ignore it. - debug.warning(unicode(e)) - continue + debug.warning(unicode(e)) + continue def dotted_path_in_sys_path(sys_path, module_path): diff --git a/contrib/python/jedi/jedi/refactoring.py b/contrib/python/jedi/jedi/refactoring.py index 6c1d74d1bd..d887a16d08 100644 --- a/contrib/python/jedi/jedi/refactoring.py +++ b/contrib/python/jedi/jedi/refactoring.py @@ -1,172 +1,172 @@ -""" +""" THIS is not in active development, please check https://github.com/davidhalter/jedi/issues/667 first before editing. -Introduce some basic refactoring functions to |jedi|. This module is still in a -very early development stage and needs much testing and improvement. - -.. warning:: I won't do too much here, but if anyone wants to step in, please - do. Refactoring is none of my priorities - +Introduce some basic refactoring functions to |jedi|. This module is still in a +very early development stage and needs much testing and improvement. + +.. warning:: I won't do too much here, but if anyone wants to step in, please + do. Refactoring is none of my priorities + It uses the |jedi| `API <api.html>`_ and supports currently the -following functions (sometimes bug-prone): - -- rename -- extract variable -- inline variable -""" -import difflib - +following functions (sometimes bug-prone): + +- rename +- extract variable +- inline variable +""" +import difflib + from parso import python_bytes_to_unicode, split_lines -from jedi.evaluate import helpers - - -class Refactoring(object): - def __init__(self, change_dct): - """ - :param change_dct: dict(old_path=(new_path, old_lines, new_lines)) - """ - self.change_dct = change_dct - - def old_files(self): - dct = {} - for old_path, (new_path, old_l, new_l) in self.change_dct.items(): +from jedi.evaluate import helpers + + +class Refactoring(object): + def __init__(self, change_dct): + """ + :param change_dct: dict(old_path=(new_path, old_lines, new_lines)) + """ + self.change_dct = change_dct + + def old_files(self): + dct = {} + for old_path, (new_path, old_l, new_l) in self.change_dct.items(): dct[old_path] = '\n'.join(old_l) - return dct - - def new_files(self): - dct = {} - for old_path, (new_path, old_l, new_l) in self.change_dct.items(): - dct[new_path] = '\n'.join(new_l) - return dct - - def diff(self): - texts = [] - for old_path, (new_path, old_l, new_l) in self.change_dct.items(): - if old_path: - udiff = difflib.unified_diff(old_l, new_l) - else: - udiff = difflib.unified_diff(old_l, new_l, old_path, new_path) - texts.append('\n'.join(udiff)) - return '\n'.join(texts) - - -def rename(script, new_name): - """ The `args` / `kwargs` params are the same as in `api.Script`. + return dct + + def new_files(self): + dct = {} + for old_path, (new_path, old_l, new_l) in self.change_dct.items(): + dct[new_path] = '\n'.join(new_l) + return dct + + def diff(self): + texts = [] + for old_path, (new_path, old_l, new_l) in self.change_dct.items(): + if old_path: + udiff = difflib.unified_diff(old_l, new_l) + else: + udiff = difflib.unified_diff(old_l, new_l, old_path, new_path) + texts.append('\n'.join(udiff)) + return '\n'.join(texts) + + +def rename(script, new_name): + """ The `args` / `kwargs` params are the same as in `api.Script`. :param new_name: The new name of the script. :param script: The source Script object. - :return: list of changed lines/changed files - """ - return Refactoring(_rename(script.usages(), new_name)) - - -def _rename(names, replace_str): - """ For both rename and inline. """ - order = sorted(names, key=lambda x: (x.module_path, x.line, x.column), - reverse=True) - - def process(path, old_lines, new_lines): - if new_lines is not None: # goto next file, save last - dct[path] = path, old_lines, new_lines - - dct = {} - current_path = object() - new_lines = old_lines = None - for name in order: - if name.in_builtin_module(): - continue - if current_path != name.module_path: - current_path = name.module_path - - process(current_path, old_lines, new_lines) - if current_path is not None: - # None means take the source that is a normal param. - with open(current_path) as f: - source = f.read() - + :return: list of changed lines/changed files + """ + return Refactoring(_rename(script.usages(), new_name)) + + +def _rename(names, replace_str): + """ For both rename and inline. """ + order = sorted(names, key=lambda x: (x.module_path, x.line, x.column), + reverse=True) + + def process(path, old_lines, new_lines): + if new_lines is not None: # goto next file, save last + dct[path] = path, old_lines, new_lines + + dct = {} + current_path = object() + new_lines = old_lines = None + for name in order: + if name.in_builtin_module(): + continue + if current_path != name.module_path: + current_path = name.module_path + + process(current_path, old_lines, new_lines) + if current_path is not None: + # None means take the source that is a normal param. + with open(current_path) as f: + source = f.read() + new_lines = split_lines(python_bytes_to_unicode(source)) - old_lines = new_lines[:] - - nr, indent = name.line, name.column - line = new_lines[nr - 1] - new_lines[nr - 1] = line[:indent] + replace_str + \ - line[indent + len(name.name):] - process(current_path, old_lines, new_lines) - return dct - - -def extract(script, new_name): - """ The `args` / `kwargs` params are the same as in `api.Script`. - :param operation: The refactoring operation to execute. - :type operation: str - :type source: str - :return: list of changed lines/changed files - """ + old_lines = new_lines[:] + + nr, indent = name.line, name.column + line = new_lines[nr - 1] + new_lines[nr - 1] = line[:indent] + replace_str + \ + line[indent + len(name.name):] + process(current_path, old_lines, new_lines) + return dct + + +def extract(script, new_name): + """ The `args` / `kwargs` params are the same as in `api.Script`. + :param operation: The refactoring operation to execute. + :type operation: str + :type source: str + :return: list of changed lines/changed files + """ new_lines = split_lines(python_bytes_to_unicode(script.source)) - old_lines = new_lines[:] - - user_stmt = script._parser.user_stmt() - + old_lines = new_lines[:] + + user_stmt = script._parser.user_stmt() + # TODO care for multi-line extracts - dct = {} - if user_stmt: - pos = script._pos - line_index = pos[0] - 1 + dct = {} + if user_stmt: + pos = script._pos + line_index = pos[0] - 1 # Be careful here. 'array_for_pos' does not exist in 'helpers'. - arr, index = helpers.array_for_pos(user_stmt, pos) - if arr is not None: - start_pos = arr[index].start_pos - end_pos = arr[index].end_pos - - # take full line if the start line is different from end line - e = end_pos[1] if end_pos[0] == start_pos[0] else None - start_line = new_lines[start_pos[0] - 1] - text = start_line[start_pos[1]:e] - for l in range(start_pos[0], end_pos[0] - 1): + arr, index = helpers.array_for_pos(user_stmt, pos) + if arr is not None: + start_pos = arr[index].start_pos + end_pos = arr[index].end_pos + + # take full line if the start line is different from end line + e = end_pos[1] if end_pos[0] == start_pos[0] else None + start_line = new_lines[start_pos[0] - 1] + text = start_line[start_pos[1]:e] + for l in range(start_pos[0], end_pos[0] - 1): text += '\n' + str(l) - if e is None: - end_line = new_lines[end_pos[0] - 1] - text += '\n' + end_line[:end_pos[1]] - - # remove code from new lines - t = text.lstrip() - del_start = start_pos[1] + len(text) - len(t) - - text = t.rstrip() - del_end = len(t) - len(text) - if e is None: - new_lines[end_pos[0] - 1] = end_line[end_pos[1] - del_end:] - e = len(start_line) - else: - e = e - del_end - start_line = start_line[:del_start] + new_name + start_line[e:] - new_lines[start_pos[0] - 1] = start_line - new_lines[start_pos[0]:end_pos[0] - 1] = [] - + if e is None: + end_line = new_lines[end_pos[0] - 1] + text += '\n' + end_line[:end_pos[1]] + + # remove code from new lines + t = text.lstrip() + del_start = start_pos[1] + len(text) - len(t) + + text = t.rstrip() + del_end = len(t) - len(text) + if e is None: + new_lines[end_pos[0] - 1] = end_line[end_pos[1] - del_end:] + e = len(start_line) + else: + e = e - del_end + start_line = start_line[:del_start] + new_name + start_line[e:] + new_lines[start_pos[0] - 1] = start_line + new_lines[start_pos[0]:end_pos[0] - 1] = [] + # add parentheses in multi-line case - open_brackets = ['(', '[', '{'] - close_brackets = [')', ']', '}'] - if '\n' in text and not (text[0] in open_brackets and text[-1] == - close_brackets[open_brackets.index(text[0])]): - text = '(%s)' % text - - # add new line before statement - indent = user_stmt.start_pos[1] - new = "%s%s = %s" % (' ' * indent, new_name, text) - new_lines.insert(line_index, new) - dct[script.path] = script.path, old_lines, new_lines - return Refactoring(dct) - - -def inline(script): - """ - :type script: api.Script - """ + open_brackets = ['(', '[', '{'] + close_brackets = [')', ']', '}'] + if '\n' in text and not (text[0] in open_brackets and text[-1] == + close_brackets[open_brackets.index(text[0])]): + text = '(%s)' % text + + # add new line before statement + indent = user_stmt.start_pos[1] + new = "%s%s = %s" % (' ' * indent, new_name, text) + new_lines.insert(line_index, new) + dct[script.path] = script.path, old_lines, new_lines + return Refactoring(dct) + + +def inline(script): + """ + :type script: api.Script + """ new_lines = split_lines(python_bytes_to_unicode(script.source)) - - dct = {} - - definitions = script.goto_assignments() + + dct = {} + + definitions = script.goto_assignments() assert len(definitions) == 1 stmt = definitions[0]._definition usages = script.usages() @@ -178,7 +178,7 @@ def inline(script): # don't allow multi-line refactorings for now. assert stmt.start_pos[0] == stmt.end_pos[0] index = stmt.start_pos[0] - 1 - + line = new_lines[index] replace_str = line[expression_list[0].start_pos[1]:stmt.end_pos[1] + 1] replace_str = replace_str.strip() @@ -187,11 +187,11 @@ def inline(script): arr = expression_list[0] if replace_str[0] not in ['(', '[', '{'] and len(arr) > 1: replace_str = '(%s)' % replace_str - + # if it's the only assignment, remove the statement if len(stmt.get_defined_names()) == 1: line = line[:stmt.start_pos[1]] + line[stmt.end_pos[1]:] - + dct = _rename(inlines, replace_str) # remove the empty line new_lines = dct[script.path][2] @@ -199,5 +199,5 @@ def inline(script): new_lines[index] = line else: new_lines.pop(index) - - return Refactoring(dct) + + return Refactoring(dct) diff --git a/contrib/python/jedi/jedi/settings.py b/contrib/python/jedi/jedi/settings.py index 436fadfc0e..2ee60aaeb3 100644 --- a/contrib/python/jedi/jedi/settings.py +++ b/contrib/python/jedi/jedi/settings.py @@ -1,164 +1,164 @@ -""" -This module contains variables with global |jedi| settings. To change the -behavior of |jedi|, change the variables defined in :mod:`jedi.settings`. - -Plugins should expose an interface so that the user can adjust the -configuration. - - -Example usage:: - - from jedi import settings - settings.case_insensitive_completion = True - - -Completion output -~~~~~~~~~~~~~~~~~ - -.. autodata:: case_insensitive_completion -.. autodata:: add_bracket_after_function -.. autodata:: no_completion_duplicates - - -Filesystem cache -~~~~~~~~~~~~~~~~ - -.. autodata:: cache_directory -.. autodata:: use_filesystem_cache - - -Parser -~~~~~~ - -.. autodata:: fast_parser - - -Dynamic stuff -~~~~~~~~~~~~~ - -.. autodata:: dynamic_array_additions -.. autodata:: dynamic_params -.. autodata:: dynamic_params_for_other_modules -.. autodata:: additional_dynamic_modules -.. autodata:: auto_import_modules - - -Caching -~~~~~~~ - -.. autodata:: call_signatures_validity - - -""" -import os -import platform - -# ---------------- -# completion output settings -# ---------------- - -case_insensitive_completion = True -""" -The completion is by default case insensitive. -""" - -add_bracket_after_function = False -""" -Adds an opening bracket after a function, because that's normal behaviour. -Removed it again, because in VIM that is not very practical. -""" - -no_completion_duplicates = True -""" -If set, completions with the same name don't appear in the output anymore, -but are in the `same_name_completions` attribute. -""" - -# ---------------- -# Filesystem cache -# ---------------- - -use_filesystem_cache = True -""" -Use filesystem cache to save once parsed files with pickle. -""" - -if platform.system().lower() == 'windows': - _cache_directory = os.path.join(os.getenv('APPDATA') or '~', 'Jedi', - 'Jedi') -elif platform.system().lower() == 'darwin': - _cache_directory = os.path.join('~', 'Library', 'Caches', 'Jedi') -else: - _cache_directory = os.path.join(os.getenv('XDG_CACHE_HOME') or '~/.cache', - 'jedi') -cache_directory = os.path.expanduser(_cache_directory) -""" +""" +This module contains variables with global |jedi| settings. To change the +behavior of |jedi|, change the variables defined in :mod:`jedi.settings`. + +Plugins should expose an interface so that the user can adjust the +configuration. + + +Example usage:: + + from jedi import settings + settings.case_insensitive_completion = True + + +Completion output +~~~~~~~~~~~~~~~~~ + +.. autodata:: case_insensitive_completion +.. autodata:: add_bracket_after_function +.. autodata:: no_completion_duplicates + + +Filesystem cache +~~~~~~~~~~~~~~~~ + +.. autodata:: cache_directory +.. autodata:: use_filesystem_cache + + +Parser +~~~~~~ + +.. autodata:: fast_parser + + +Dynamic stuff +~~~~~~~~~~~~~ + +.. autodata:: dynamic_array_additions +.. autodata:: dynamic_params +.. autodata:: dynamic_params_for_other_modules +.. autodata:: additional_dynamic_modules +.. autodata:: auto_import_modules + + +Caching +~~~~~~~ + +.. autodata:: call_signatures_validity + + +""" +import os +import platform + +# ---------------- +# completion output settings +# ---------------- + +case_insensitive_completion = True +""" +The completion is by default case insensitive. +""" + +add_bracket_after_function = False +""" +Adds an opening bracket after a function, because that's normal behaviour. +Removed it again, because in VIM that is not very practical. +""" + +no_completion_duplicates = True +""" +If set, completions with the same name don't appear in the output anymore, +but are in the `same_name_completions` attribute. +""" + +# ---------------- +# Filesystem cache +# ---------------- + +use_filesystem_cache = True +""" +Use filesystem cache to save once parsed files with pickle. +""" + +if platform.system().lower() == 'windows': + _cache_directory = os.path.join(os.getenv('APPDATA') or '~', 'Jedi', + 'Jedi') +elif platform.system().lower() == 'darwin': + _cache_directory = os.path.join('~', 'Library', 'Caches', 'Jedi') +else: + _cache_directory = os.path.join(os.getenv('XDG_CACHE_HOME') or '~/.cache', + 'jedi') +cache_directory = os.path.expanduser(_cache_directory) +""" The path where the cache is stored. - -On Linux, this defaults to ``~/.cache/jedi/``, on OS X to -``~/Library/Caches/Jedi/`` and on Windows to ``%APPDATA%\\Jedi\\Jedi\\``. -On Linux, if environment variable ``$XDG_CACHE_HOME`` is set, -``$XDG_CACHE_HOME/jedi`` is used instead of the default one. -""" - -# ---------------- -# parser -# ---------------- - -fast_parser = True -""" -Use the fast parser. This means that reparsing is only being done if -something has been changed e.g. to a function. If this happens, only the -function is being reparsed. -""" - -# ---------------- -# dynamic stuff -# ---------------- - -dynamic_array_additions = True -""" -check for `append`, etc. on arrays: [], {}, () as well as list/set calls. -""" - -dynamic_params = True -""" -A dynamic param completion, finds the callees of the function, which define -the params of a function. -""" - -dynamic_params_for_other_modules = True -""" -Do the same for other modules. -""" - -additional_dynamic_modules = [] -""" -Additional modules in which |jedi| checks if statements are to be found. This -is practical for IDEs, that want to administrate their modules themselves. -""" - -dynamic_flow_information = True -""" -Check for `isinstance` and other information to infer a type. -""" - -auto_import_modules = [ + +On Linux, this defaults to ``~/.cache/jedi/``, on OS X to +``~/Library/Caches/Jedi/`` and on Windows to ``%APPDATA%\\Jedi\\Jedi\\``. +On Linux, if environment variable ``$XDG_CACHE_HOME`` is set, +``$XDG_CACHE_HOME/jedi`` is used instead of the default one. +""" + +# ---------------- +# parser +# ---------------- + +fast_parser = True +""" +Use the fast parser. This means that reparsing is only being done if +something has been changed e.g. to a function. If this happens, only the +function is being reparsed. +""" + +# ---------------- +# dynamic stuff +# ---------------- + +dynamic_array_additions = True +""" +check for `append`, etc. on arrays: [], {}, () as well as list/set calls. +""" + +dynamic_params = True +""" +A dynamic param completion, finds the callees of the function, which define +the params of a function. +""" + +dynamic_params_for_other_modules = True +""" +Do the same for other modules. +""" + +additional_dynamic_modules = [] +""" +Additional modules in which |jedi| checks if statements are to be found. This +is practical for IDEs, that want to administrate their modules themselves. +""" + +dynamic_flow_information = True +""" +Check for `isinstance` and other information to infer a type. +""" + +auto_import_modules = [ 'hashlib', # hashlib is mostly using setattr, which jedi doesn't understand 'gi', # This third-party repository (GTK stuff) doesn't really work with jedi -] -""" -Modules that are not analyzed but imported, although they contain Python code. -This improves autocompletion for libraries that use ``setattr`` or -``globals()`` modifications a lot. -""" - -# ---------------- -# caching validity (time) -# ---------------- - -call_signatures_validity = 3.0 -""" -Finding function calls might be slow (0.1-0.5s). This is not acceptible for -normal writing. Therefore cache it for a short time. -""" +] +""" +Modules that are not analyzed but imported, although they contain Python code. +This improves autocompletion for libraries that use ``setattr`` or +``globals()`` modifications a lot. +""" + +# ---------------- +# caching validity (time) +# ---------------- + +call_signatures_validity = 3.0 +""" +Finding function calls might be slow (0.1-0.5s). This is not acceptible for +normal writing. Therefore cache it for a short time. +""" diff --git a/contrib/python/jedi/jedi/utils.py b/contrib/python/jedi/jedi/utils.py index 04e87c301c..0a75afe984 100644 --- a/contrib/python/jedi/jedi/utils.py +++ b/contrib/python/jedi/jedi/utils.py @@ -1,66 +1,66 @@ -""" -Utilities for end-users. -""" - -from __future__ import absolute_import -import __main__ -from collections import namedtuple +""" +Utilities for end-users. +""" + +from __future__ import absolute_import +import __main__ +from collections import namedtuple import logging import traceback -import re -import os -import sys - +import re +import os +import sys + from parso import split_lines -from jedi import Interpreter +from jedi import Interpreter from jedi.api.helpers import get_on_completion_name - - + + READLINE_DEBUG = False -def setup_readline(namespace_module=__main__): - """ - Install Jedi completer to :mod:`readline`. - - This function setups :mod:`readline` to use Jedi in Python interactive - shell. If you want to use a custom ``PYTHONSTARTUP`` file (typically - ``$HOME/.pythonrc.py``), you can add this piece of code:: - - try: - from jedi.utils import setup_readline - setup_readline() - except ImportError: - # Fallback to the stdlib readline completer if it is installed. - # Taken from http://docs.python.org/2/library/rlcompleter.html - print("Jedi is not installed, falling back to readline") - try: - import readline - import rlcompleter - readline.parse_and_bind("tab: complete") - except ImportError: - print("Readline is not installed either. No tab completion is enabled.") - - This will fallback to the readline completer if Jedi is not installed. - The readline completer will only complete names in the global namespace, - so for example:: - - ran<TAB> - - will complete to ``range`` - - with both Jedi and readline, but:: - - range(10).cou<TAB> - - will show complete to ``range(10).count`` only with Jedi. - - You'll also need to add ``export PYTHONSTARTUP=$HOME/.pythonrc.py`` to - your shell profile (usually ``.bash_profile`` or ``.profile`` if you use - bash). - - """ +def setup_readline(namespace_module=__main__): + """ + Install Jedi completer to :mod:`readline`. + + This function setups :mod:`readline` to use Jedi in Python interactive + shell. If you want to use a custom ``PYTHONSTARTUP`` file (typically + ``$HOME/.pythonrc.py``), you can add this piece of code:: + + try: + from jedi.utils import setup_readline + setup_readline() + except ImportError: + # Fallback to the stdlib readline completer if it is installed. + # Taken from http://docs.python.org/2/library/rlcompleter.html + print("Jedi is not installed, falling back to readline") + try: + import readline + import rlcompleter + readline.parse_and_bind("tab: complete") + except ImportError: + print("Readline is not installed either. No tab completion is enabled.") + + This will fallback to the readline completer if Jedi is not installed. + The readline completer will only complete names in the global namespace, + so for example:: + + ran<TAB> + + will complete to ``range`` + + with both Jedi and readline, but:: + + range(10).cou<TAB> + + will show complete to ``range(10).count`` only with Jedi. + + You'll also need to add ``export PYTHONSTARTUP=$HOME/.pythonrc.py`` to + your shell profile (usually ``.bash_profile`` or ``.profile`` if you use + bash). + + """ if READLINE_DEBUG: logging.basicConfig( filename='/tmp/jedi.log', @@ -68,24 +68,24 @@ def setup_readline(namespace_module=__main__): level=logging.DEBUG ) - class JediRL(object): - def complete(self, text, state): - """ - This complete stuff is pretty weird, a generator would make - a lot more sense, but probably due to backwards compatibility - this is still the way how it works. - - The only important part is stuff in the ``state == 0`` flow, - everything else has been copied from the ``rlcompleter`` std. - library module. - """ - if state == 0: - sys.path.insert(0, os.getcwd()) - # Calling python doesn't have a path, so add to sys.path. - try: + class JediRL(object): + def complete(self, text, state): + """ + This complete stuff is pretty weird, a generator would make + a lot more sense, but probably due to backwards compatibility + this is still the way how it works. + + The only important part is stuff in the ``state == 0`` flow, + everything else has been copied from the ``rlcompleter`` std. + library module. + """ + if state == 0: + sys.path.insert(0, os.getcwd()) + # Calling python doesn't have a path, so add to sys.path. + try: logging.debug("Start REPL completion: " + repr(text)) - interpreter = Interpreter(text, [namespace_module.__dict__]) - + interpreter = Interpreter(text, [namespace_module.__dict__]) + lines = split_lines(text) position = (len(lines), len(lines[-1])) name = get_on_completion_name( @@ -94,49 +94,49 @@ def setup_readline(namespace_module=__main__): position ) before = text[:len(text) - len(name)] - completions = interpreter.completions() + completions = interpreter.completions() logging.debug("REPL completions: %s", completions) except: logging.error("REPL Completion error:\n" + traceback.format_exc()) raise - finally: - sys.path.pop(0) - - self.matches = [before + c.name_with_symbols for c in completions] - try: - return self.matches[state] - except IndexError: - return None - - try: + finally: + sys.path.pop(0) + + self.matches = [before + c.name_with_symbols for c in completions] + try: + return self.matches[state] + except IndexError: + return None + + try: # Need to import this one as well to make sure it's executed before # this code. This didn't use to be an issue until 3.3. Starting with # 3.4 this is different, it always overwrites the completer if it's not # already imported here. import rlcompleter # noqa: F401 - import readline - except ImportError: + import readline + except ImportError: print("Jedi: Module readline not available.") - else: - readline.set_completer(JediRL().complete) - readline.parse_and_bind("tab: complete") - # jedi itself does the case matching - readline.parse_and_bind("set completion-ignore-case on") - # because it's easier to hit the tab just once - readline.parse_and_bind("set show-all-if-unmodified") - readline.parse_and_bind("set show-all-if-ambiguous on") - # don't repeat all the things written in the readline all the time - readline.parse_and_bind("set completion-prefix-display-length 2") - # No delimiters, Jedi handles that. - readline.set_completer_delims('') - - -def version_info(): - """ - Returns a namedtuple of Jedi's version, similar to Python's - ``sys.version_info``. - """ - Version = namedtuple('Version', 'major, minor, micro') - from jedi import __version__ + else: + readline.set_completer(JediRL().complete) + readline.parse_and_bind("tab: complete") + # jedi itself does the case matching + readline.parse_and_bind("set completion-ignore-case on") + # because it's easier to hit the tab just once + readline.parse_and_bind("set show-all-if-unmodified") + readline.parse_and_bind("set show-all-if-ambiguous on") + # don't repeat all the things written in the readline all the time + readline.parse_and_bind("set completion-prefix-display-length 2") + # No delimiters, Jedi handles that. + readline.set_completer_delims('') + + +def version_info(): + """ + Returns a namedtuple of Jedi's version, similar to Python's + ``sys.version_info``. + """ + Version = namedtuple('Version', 'major, minor, micro') + from jedi import __version__ tupl = re.findall(r'[a-z]+|\d+', __version__) - return Version(*[x if i == 3 else int(x) for i, x in enumerate(tupl)]) + return Version(*[x if i == 3 else int(x) for i, x in enumerate(tupl)]) diff --git a/contrib/python/jedi/ya.make b/contrib/python/jedi/ya.make index eff2fef2a7..dfc0c7ecb8 100644 --- a/contrib/python/jedi/ya.make +++ b/contrib/python/jedi/ya.make @@ -1,45 +1,45 @@ PY23_LIBRARY(jedi) - + LICENSE(MIT) OWNER(g:python-contrib borman) - + VERSION(0.13.3) -PEERDIR( +PEERDIR( contrib/python/parso contrib/python/setuptools -) - -PY_SRCS( - TOP_LEVEL - jedi/__init__.py +) + +PY_SRCS( + TOP_LEVEL + jedi/__init__.py jedi/__main__.py - jedi/_compatibility.py - jedi/api/__init__.py - jedi/api/classes.py + jedi/_compatibility.py + jedi/api/__init__.py + jedi/api/classes.py jedi/api/completion.py jedi/api/environment.py jedi/api/exceptions.py - jedi/api/helpers.py - jedi/api/interpreter.py - jedi/api/keywords.py + jedi/api/helpers.py + jedi/api/interpreter.py + jedi/api/keywords.py jedi/api/project.py - jedi/api/replstartup.py - jedi/cache.py + jedi/api/replstartup.py + jedi/cache.py jedi/common/__init__.py jedi/common/context.py jedi/common/utils.py - jedi/debug.py - jedi/evaluate/__init__.py - jedi/evaluate/analysis.py + jedi/debug.py + jedi/evaluate/__init__.py + jedi/evaluate/analysis.py jedi/evaluate/arguments.py jedi/evaluate/base_context.py - jedi/evaluate/cache.py - jedi/evaluate/compiled/__init__.py + jedi/evaluate/cache.py + jedi/evaluate/compiled/__init__.py jedi/evaluate/compiled/access.py jedi/evaluate/compiled/context.py - jedi/evaluate/compiled/fake.py + jedi/evaluate/compiled/fake.py jedi/evaluate/compiled/getattr_static.py jedi/evaluate/compiled/mixed.py jedi/evaluate/compiled/subprocess/__init__.py @@ -53,30 +53,30 @@ PY_SRCS( jedi/evaluate/context/klass.py jedi/evaluate/context/module.py jedi/evaluate/context/namespace.py - jedi/evaluate/docstrings.py - jedi/evaluate/dynamic.py + jedi/evaluate/docstrings.py + jedi/evaluate/dynamic.py jedi/evaluate/filters.py - jedi/evaluate/finder.py - jedi/evaluate/flow_analysis.py - jedi/evaluate/helpers.py - jedi/evaluate/imports.py + jedi/evaluate/finder.py + jedi/evaluate/flow_analysis.py + jedi/evaluate/helpers.py + jedi/evaluate/imports.py jedi/evaluate/jedi_typing.py jedi/evaluate/lazy_context.py - jedi/evaluate/param.py + jedi/evaluate/param.py jedi/evaluate/parser_cache.py jedi/evaluate/pep0484.py - jedi/evaluate/recursion.py - jedi/evaluate/stdlib.py + jedi/evaluate/recursion.py + jedi/evaluate/stdlib.py jedi/evaluate/syntax_tree.py - jedi/evaluate/sys_path.py + jedi/evaluate/sys_path.py jedi/evaluate/usages.py jedi/evaluate/utils.py jedi/parser_utils.py - jedi/refactoring.py - jedi/settings.py - jedi/utils.py -) - + jedi/refactoring.py + jedi/settings.py + jedi/utils.py +) + RESOURCE_FILES( PREFIX contrib/python/jedi/ .dist-info/METADATA @@ -94,4 +94,4 @@ RESOURCE_FILES( NO_LINT() -END() +END() |