aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/python/jedi
diff options
context:
space:
mode:
authormonster <monster@ydb.tech>2022-07-07 14:41:37 +0300
committermonster <monster@ydb.tech>2022-07-07 14:41:37 +0300
commit06e5c21a835c0e923506c4ff27929f34e00761c2 (patch)
tree75efcbc6854ef9bd476eb8bf00cc5c900da436a2 /contrib/python/jedi
parent03f024c4412e3aa613bb543cf1660176320ba8f4 (diff)
downloadydb-06e5c21a835c0e923506c4ff27929f34e00761c2.tar.gz
fix ya.make
Diffstat (limited to 'contrib/python/jedi')
-rw-r--r--contrib/python/jedi/.dist-info/METADATA410
-rw-r--r--contrib/python/jedi/.dist-info/top_level.txt1
-rw-r--r--contrib/python/jedi/.yandex_meta/yamaker.yaml3
-rw-r--r--contrib/python/jedi/AUTHORS.txt55
-rw-r--r--contrib/python/jedi/LICENSE.txt24
-rw-r--r--contrib/python/jedi/README.rst231
-rw-r--r--contrib/python/jedi/jedi/__init__.py47
-rw-r--r--contrib/python/jedi/jedi/__main__.py48
-rw-r--r--contrib/python/jedi/jedi/_compatibility.py594
-rw-r--r--contrib/python/jedi/jedi/api/__init__.py509
-rw-r--r--contrib/python/jedi/jedi/api/classes.py681
-rw-r--r--contrib/python/jedi/jedi/api/completion.py292
-rw-r--r--contrib/python/jedi/jedi/api/environment.py445
-rw-r--r--contrib/python/jedi/jedi/api/exceptions.py10
-rw-r--r--contrib/python/jedi/jedi/api/helpers.py260
-rw-r--r--contrib/python/jedi/jedi/api/interpreter.py61
-rw-r--r--contrib/python/jedi/jedi/api/keywords.py84
-rw-r--r--contrib/python/jedi/jedi/api/project.py195
-rw-r--r--contrib/python/jedi/jedi/api/replstartup.py29
-rw-r--r--contrib/python/jedi/jedi/cache.py146
-rw-r--r--contrib/python/jedi/jedi/common/__init__.py1
-rw-r--r--contrib/python/jedi/jedi/common/context.py67
-rw-r--r--contrib/python/jedi/jedi/common/utils.py26
-rw-r--r--contrib/python/jedi/jedi/debug.py130
-rw-r--r--contrib/python/jedi/jedi/evaluate/__init__.py391
-rw-r--r--contrib/python/jedi/jedi/evaluate/analysis.py218
-rw-r--r--contrib/python/jedi/jedi/evaluate/arguments.py305
-rw-r--r--contrib/python/jedi/jedi/evaluate/base_context.py279
-rw-r--r--contrib/python/jedi/jedi/evaluate/cache.py77
-rw-r--r--contrib/python/jedi/jedi/evaluate/compiled/__init__.py43
-rw-r--r--contrib/python/jedi/jedi/evaluate/compiled/access.py483
-rw-r--r--contrib/python/jedi/jedi/evaluate/compiled/context.py483
-rw-r--r--contrib/python/jedi/jedi/evaluate/compiled/fake.py87
-rw-r--r--contrib/python/jedi/jedi/evaluate/compiled/fake/_functools.pym9
-rw-r--r--contrib/python/jedi/jedi/evaluate/compiled/fake/_sqlite3.pym26
-rw-r--r--contrib/python/jedi/jedi/evaluate/compiled/fake/_sre.pym99
-rw-r--r--contrib/python/jedi/jedi/evaluate/compiled/fake/_weakref.pym9
-rw-r--r--contrib/python/jedi/jedi/evaluate/compiled/fake/builtins.pym277
-rw-r--r--contrib/python/jedi/jedi/evaluate/compiled/fake/datetime.pym4
-rw-r--r--contrib/python/jedi/jedi/evaluate/compiled/fake/io.pym12
-rw-r--r--contrib/python/jedi/jedi/evaluate/compiled/fake/operator.pym33
-rw-r--r--contrib/python/jedi/jedi/evaluate/compiled/fake/posix.pym5
-rw-r--r--contrib/python/jedi/jedi/evaluate/compiled/getattr_static.py176
-rw-r--r--contrib/python/jedi/jedi/evaluate/compiled/mixed.py238
-rw-r--r--contrib/python/jedi/jedi/evaluate/compiled/subprocess/__init__.py397
-rw-r--r--contrib/python/jedi/jedi/evaluate/compiled/subprocess/__main__.py55
-rw-r--r--contrib/python/jedi/jedi/evaluate/compiled/subprocess/functions.py113
-rw-r--r--contrib/python/jedi/jedi/evaluate/context/__init__.py5
-rw-r--r--contrib/python/jedi/jedi/evaluate/context/asynchronous.py38
-rw-r--r--contrib/python/jedi/jedi/evaluate/context/function.py253
-rw-r--r--contrib/python/jedi/jedi/evaluate/context/instance.py483
-rw-r--r--contrib/python/jedi/jedi/evaluate/context/iterable.py732
-rw-r--r--contrib/python/jedi/jedi/evaluate/context/klass.py221
-rw-r--r--contrib/python/jedi/jedi/evaluate/context/module.py219
-rw-r--r--contrib/python/jedi/jedi/evaluate/context/namespace.py72
-rw-r--r--contrib/python/jedi/jedi/evaluate/docstrings.py307
-rw-r--r--contrib/python/jedi/jedi/evaluate/dynamic.py231
-rw-r--r--contrib/python/jedi/jedi/evaluate/filters.py492
-rw-r--r--contrib/python/jedi/jedi/evaluate/finder.py278
-rw-r--r--contrib/python/jedi/jedi/evaluate/flow_analysis.py118
-rw-r--r--contrib/python/jedi/jedi/evaluate/helpers.py239
-rw-r--r--contrib/python/jedi/jedi/evaluate/imports.py594
-rw-r--r--contrib/python/jedi/jedi/evaluate/jedi_typing.py104
-rw-r--r--contrib/python/jedi/jedi/evaluate/lazy_context.py59
-rw-r--r--contrib/python/jedi/jedi/evaluate/param.py194
-rw-r--r--contrib/python/jedi/jedi/evaluate/parser_cache.py6
-rw-r--r--contrib/python/jedi/jedi/evaluate/pep0484.py330
-rw-r--r--contrib/python/jedi/jedi/evaluate/recursion.py138
-rw-r--r--contrib/python/jedi/jedi/evaluate/stdlib.py321
-rw-r--r--contrib/python/jedi/jedi/evaluate/syntax_tree.py662
-rw-r--r--contrib/python/jedi/jedi/evaluate/sys_path.py226
-rw-r--r--contrib/python/jedi/jedi/evaluate/usages.py62
-rw-r--r--contrib/python/jedi/jedi/evaluate/utils.py119
-rw-r--r--contrib/python/jedi/jedi/parser_utils.py260
-rw-r--r--contrib/python/jedi/jedi/refactoring.py203
-rw-r--r--contrib/python/jedi/jedi/settings.py164
-rw-r--r--contrib/python/jedi/jedi/utils.py142
-rw-r--r--contrib/python/jedi/patches/01-arcadia.patch46
78 files changed, 0 insertions, 15486 deletions
diff --git a/contrib/python/jedi/.dist-info/METADATA b/contrib/python/jedi/.dist-info/METADATA
deleted file mode 100644
index 7fc8ce1fef..0000000000
--- a/contrib/python/jedi/.dist-info/METADATA
+++ /dev/null
@@ -1,410 +0,0 @@
-Metadata-Version: 2.0
-Name: jedi
-Version: 0.13.3
-Summary: An autocompletion tool for Python that can be used for text editors.
-Home-page: https://github.com/davidhalter/jedi
-Author: David Halter
-Author-email: davidhalter88@gmail.com
-Maintainer: David Halter
-Maintainer-email: davidhalter88@gmail.com
-License: MIT
-Keywords: python completion refactoring vim
-Platform: any
-Classifier: Development Status :: 4 - Beta
-Classifier: Environment :: Plugins
-Classifier: Intended Audience :: Developers
-Classifier: License :: OSI Approved :: MIT License
-Classifier: Operating System :: OS Independent
-Classifier: Programming Language :: Python :: 2
-Classifier: Programming Language :: Python :: 2.7
-Classifier: Programming Language :: Python :: 3
-Classifier: Programming Language :: Python :: 3.4
-Classifier: Programming Language :: Python :: 3.5
-Classifier: Programming Language :: Python :: 3.6
-Classifier: Programming Language :: Python :: 3.7
-Classifier: Topic :: Software Development :: Libraries :: Python Modules
-Classifier: Topic :: Text Editors :: Integrated Development Environments (IDE)
-Classifier: Topic :: Utilities
-Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*
-Provides-Extra: testing
-Requires-Dist: parso (>=0.3.0)
-Provides-Extra: testing
-Requires-Dist: colorama; extra == 'testing'
-Requires-Dist: docopt; extra == 'testing'
-Requires-Dist: pytest (>=3.1.0); extra == 'testing'
-
-###################################################################
-Jedi - an awesome autocompletion/static analysis library for Python
-###################################################################
-
-.. image:: https://img.shields.io/pypi/v/jedi.svg?style=flat
- :target: https://pypi.python.org/pypi/jedi
- :alt: PyPI version
-
-.. image:: https://img.shields.io/pypi/pyversions/jedi.svg
- :target: https://pypi.python.org/pypi/jedi
- :alt: Supported Python versions
-
-.. image:: https://travis-ci.org/davidhalter/jedi.svg?branch=master
- :target: https://travis-ci.org/davidhalter/jedi
- :alt: Linux Tests
-
-.. image:: https://ci.appveyor.com/api/projects/status/mgva3bbawyma1new/branch/master?svg=true
- :target: https://ci.appveyor.com/project/davidhalter/jedi/branch/master
- :alt: Windows Tests
-
-.. image:: https://coveralls.io/repos/davidhalter/jedi/badge.svg?branch=master
- :target: https://coveralls.io/r/davidhalter/jedi
- :alt: Coverage status
-
-
-*If you have specific questions, please add an issue or ask on* `Stack Overflow
-<https://stackoverflow.com/questions/tagged/python-jedi>`_ *with the label* ``python-jedi``.
-
-
-Jedi is a static analysis tool for Python that can be used in IDEs/editors. Its
-historic focus is autocompletion, but does static analysis for now as well.
-Jedi is fast and is very well tested. It understands Python on a deeper level
-than all other static analysis frameworks for Python.
-
-Jedi has support for two different goto functions. It's possible to search for
-related names and to list all names in a Python file and infer them. Jedi
-understands docstrings and you can use Jedi autocompletion in your REPL as
-well.
-
-Jedi uses a very simple API to connect with IDEs. There's a reference
-implementation as a `VIM-Plugin <https://github.com/davidhalter/jedi-vim>`_,
-which uses Jedi's autocompletion. We encourage you to use Jedi in your IDEs.
-It's really easy.
-
-Jedi can currently be used with the following editors/projects:
-
-- Vim (jedi-vim_, YouCompleteMe_, deoplete-jedi_, completor.vim_)
-- Emacs (Jedi.el_, company-mode_, elpy_, anaconda-mode_, ycmd_)
-- Sublime Text (SublimeJEDI_ [ST2 + ST3], anaconda_ [only ST3])
-- TextMate_ (Not sure if it's actually working)
-- Kate_ version 4.13+ supports it natively, you have to enable it, though. [`proof
- <https://projects.kde.org/projects/kde/applications/kate/repository/show?rev=KDE%2F4.13>`_]
-- Atom_ (autocomplete-python-jedi_)
-- `GNOME Builder`_ (with support for GObject Introspection)
-- `Visual Studio Code`_ (via `Python Extension <https://marketplace.visualstudio.com/items?itemName=ms-python.python>`_)
-- Gedit (gedi_)
-- wdb_ - Web Debugger
-- `Eric IDE`_ (Available as a plugin)
-- `IPython 6.0.0+ <https://ipython.readthedocs.io/en/stable/whatsnew/version6.html>`_
-
-and many more!
-
-
-Here are some pictures taken from jedi-vim_:
-
-.. image:: https://github.com/davidhalter/jedi/raw/master/docs/_screenshots/screenshot_complete.png
-
-Completion for almost anything (Ctrl+Space).
-
-.. image:: https://github.com/davidhalter/jedi/raw/master/docs/_screenshots/screenshot_function.png
-
-Display of function/class bodies, docstrings.
-
-.. image:: https://github.com/davidhalter/jedi/raw/master/docs/_screenshots/screenshot_pydoc.png
-
-Pydoc support (Shift+k).
-
-There is also support for goto and renaming.
-
-Get the latest version from `github <https://github.com/davidhalter/jedi>`_
-(master branch should always be kind of stable/working).
-
-Docs are available at `https://jedi.readthedocs.org/en/latest/
-<https://jedi.readthedocs.org/en/latest/>`_. Pull requests with documentation
-enhancements and/or fixes are awesome and most welcome. Jedi uses `semantic
-versioning <https://semver.org/>`_.
-
-If you want to stay up-to-date (News / RFCs), please subscribe to this `github
-thread <https://github.com/davidhalter/jedi/issues/1063>`_.:
-
-
-
-Installation
-============
-
- pip install jedi
-
-Note: This just installs the Jedi library, not the editor plugins. For
-information about how to make it work with your editor, refer to the
-corresponding documentation.
-
-You don't want to use ``pip``? Please refer to the `manual
-<https://jedi.readthedocs.org/en/latest/docs/installation.html>`_.
-
-
-Feature Support and Caveats
-===========================
-
-Jedi really understands your Python code. For a comprehensive list what Jedi
-understands, see: `Features
-<https://jedi.readthedocs.org/en/latest/docs/features.html>`_. A list of
-caveats can be found on the same page.
-
-You can run Jedi on CPython 2.7 or 3.4+ but it should also
-understand/parse code older than those versions. Additionally you should be able
-to use `Virtualenvs <https://jedi.readthedocs.org/en/latest/docs/api.html#environments>`_
-very well.
-
-Tips on how to use Jedi efficiently can be found `here
-<https://jedi.readthedocs.org/en/latest/docs/features.html#recipes>`_.
-
-API
----
-
-You can find the documentation for the `API here <https://jedi.readthedocs.org/en/latest/docs/api.html>`_.
-
-
-Autocompletion / Goto / Pydoc
------------------------------
-
-Please check the API for a good explanation. There are the following commands:
-
-- ``jedi.Script.goto_assignments``
-- ``jedi.Script.completions``
-- ``jedi.Script.usages``
-
-The returned objects are very powerful and really all you might need.
-
-
-Autocompletion in your REPL (IPython, etc.)
--------------------------------------------
-
-Starting with IPython `6.0.0` Jedi is a dependency of IPython. Autocompletion
-in IPython is therefore possible without additional configuration.
-
-It's possible to have Jedi autocompletion in REPL modes - `example video <https://vimeo.com/122332037>`_.
-This means that in Python you can enable tab completion in a `REPL
-<https://jedi.readthedocs.org/en/latest/docs/usage.html#tab-completion-in-the-python-shell>`_.
-
-
-Static Analysis / Linter
-------------------------
-
-To do all forms of static analysis, please try to use ``jedi.names``. It will
-return a list of names that you can use to infer types and so on.
-
-Linting is another thing that is going to be part of Jedi. For now you can try
-an alpha version ``python -m jedi linter``. The API might change though and
-it's still buggy. It's Jedi's goal to be smarter than classic linter and
-understand ``AttributeError`` and other code issues.
-
-
-Refactoring
------------
-
-Jedi's parser would support refactoring, but there's no API to use it right
-now. If you're interested in helping out here, let me know. With the latest
-parser changes, it should be very easy to actually make it work.
-
-
-Development
-===========
-
-There's a pretty good and extensive `development documentation
-<https://jedi.readthedocs.org/en/latest/docs/development.html>`_.
-
-
-Testing
-=======
-
-The test suite depends on ``tox`` and ``pytest``::
-
- pip install tox pytest
-
-To run the tests for all supported Python versions::
-
- tox
-
-If you want to test only a specific Python version (e.g. Python 2.7), it's as
-easy as ::
-
- tox -e py27
-
-Tests are also run automatically on `Travis CI
-<https://travis-ci.org/davidhalter/jedi/>`_.
-
-For more detailed information visit the `testing documentation
-<https://jedi.readthedocs.org/en/latest/docs/testing.html>`_.
-
-
-Acknowledgements
-================
-
-- Takafumi Arakaki (@tkf) for creating a solid test environment and a lot of
- other things.
-- Danilo Bargen (@dbrgn) for general housekeeping and being a good friend :).
-- Guido van Rossum (@gvanrossum) for creating the parser generator pgen2
- (originally used in lib2to3).
-
-
-
-.. _jedi-vim: https://github.com/davidhalter/jedi-vim
-.. _youcompleteme: https://valloric.github.io/YouCompleteMe/
-.. _deoplete-jedi: https://github.com/zchee/deoplete-jedi
-.. _completor.vim: https://github.com/maralla/completor.vim
-.. _Jedi.el: https://github.com/tkf/emacs-jedi
-.. _company-mode: https://github.com/syohex/emacs-company-jedi
-.. _elpy: https://github.com/jorgenschaefer/elpy
-.. _anaconda-mode: https://github.com/proofit404/anaconda-mode
-.. _ycmd: https://github.com/abingham/emacs-ycmd
-.. _sublimejedi: https://github.com/srusskih/SublimeJEDI
-.. _anaconda: https://github.com/DamnWidget/anaconda
-.. _wdb: https://github.com/Kozea/wdb
-.. _TextMate: https://github.com/lawrenceakka/python-jedi.tmbundle
-.. _Kate: https://kate-editor.org
-.. _Atom: https://atom.io/
-.. _autocomplete-python-jedi: https://atom.io/packages/autocomplete-python-jedi
-.. _GNOME Builder: https://wiki.gnome.org/Apps/Builder
-.. _Visual Studio Code: https://code.visualstudio.com/
-.. _gedi: https://github.com/isamert/gedi
-.. _Eric IDE: https://eric-ide.python-projects.org
-
-
-.. :changelog:
-
-Changelog
----------
-
-0.13.3 (2019-02-24)
-+++++++++++++++++++
-
-- Fixed an issue with embedded Pytho, see https://github.com/davidhalter/jedi-vim/issues/870
-
-0.13.2 (2018-12-15)
-+++++++++++++++++++
-
-- Fixed a bug that led to Jedi spawning a lot of subprocesses.
-
-0.13.1 (2018-10-02)
-+++++++++++++++++++
-
-- Bugfixes, because tensorflow completions were still slow.
-
-0.13.0 (2018-10-02)
-+++++++++++++++++++
-
-- A small release. Some bug fixes.
-- Remove Python 3.3 support. Python 3.3 support has been dropped by the Python
- foundation.
-- Default environments are now using the same Python version as the Python
- process. In 0.12.x, we used to load the latest Python version on the system.
-- Added ``include_builtins`` as a parameter to usages.
-- ``goto_assignments`` has a new ``follow_builtin_imports`` parameter that
- changes the previous behavior slightly.
-
-0.12.1 (2018-06-30)
-+++++++++++++++++++
-
-- This release forces you to upgrade parso. If you don't, nothing will work
- anymore. Otherwise changes should be limited to bug fixes. Unfortunately Jedi
- still uses a few internals of parso that make it hard to keep compatibility
- over multiple releases. Parso >=0.3.0 is going to be needed.
-
-0.12.0 (2018-04-15)
-+++++++++++++++++++
-
-- Virtualenv/Environment support
-- F-String Completion/Goto Support
-- Cannot crash with segfaults anymore
-- Cleaned up import logic
-- Understand async/await and autocomplete it (including async generators)
-- Better namespace completions
-- Passing tests for Windows (including CI for Windows)
-- Remove Python 2.6 support
-
-0.11.1 (2017-12-14)
-+++++++++++++++++++
-
-- Parso update - the caching layer was broken
-- Better usages - a lot of internal code was ripped out and improved.
-
-0.11.0 (2017-09-20)
-+++++++++++++++++++
-
-- Split Jedi's parser into a separate project called ``parso``.
-- Avoiding side effects in REPL completion.
-- Numpy docstring support should be much better.
-- Moved the `settings.*recursion*` away, they are no longer usable.
-
-0.10.2 (2017-04-05)
-+++++++++++++++++++
-
-- Python Packaging sucks. Some files were not included in 0.10.1.
-
-0.10.1 (2017-04-05)
-+++++++++++++++++++
-
-- Fixed a few very annoying bugs.
-- Prepared the parser to be factored out of Jedi.
-
-0.10.0 (2017-02-03)
-+++++++++++++++++++
-
-- Actual semantic completions for the complete Python syntax.
-- Basic type inference for ``yield from`` PEP 380.
-- PEP 484 support (most of the important features of it). Thanks Claude! (@reinhrst)
-- Added ``get_line_code`` to ``Definition`` and ``Completion`` objects.
-- Completely rewritten the type inference engine.
-- A new and better parser for (fast) parsing diffs of Python code.
-
-0.9.0 (2015-04-10)
-++++++++++++++++++
-
-- The import logic has been rewritten to look more like Python's. There is now
- an ``Evaluator.modules`` import cache, which resembles ``sys.modules``.
-- Integrated the parser of 2to3. This will make refactoring possible. It will
- also be possible to check for error messages (like compiling an AST would give)
- in the future.
-- With the new parser, the evaluation also completely changed. It's now simpler
- and more readable.
-- Completely rewritten REPL completion.
-- Added ``jedi.names``, a command to do static analysis. Thanks to that
- sourcegraph guys for sponsoring this!
-- Alpha version of the linter.
-
-
-0.8.1 (2014-07-23)
-+++++++++++++++++++
-
-- Bugfix release, the last release forgot to include files that improve
- autocompletion for builtin libraries. Fixed.
-
-0.8.0 (2014-05-05)
-+++++++++++++++++++
-
-- Memory Consumption for compiled modules (e.g. builtins, sys) has been reduced
- drastically. Loading times are down as well (it takes basically as long as an
- import).
-- REPL completion is starting to become usable.
-- Various small API changes. Generally this release focuses on stability and
- refactoring of internal APIs.
-- Introducing operator precedence, which makes calculating correct Array
- indices and ``__getattr__`` strings possible.
-
-0.7.0 (2013-08-09)
-++++++++++++++++++
-
-- Switched from LGPL to MIT license.
-- Added an Interpreter class to the API to make autocompletion in REPL
- possible.
-- Added autocompletion support for namespace packages.
-- Add sith.py, a new random testing method.
-
-0.6.0 (2013-05-14)
-++++++++++++++++++
-
-- Much faster parser with builtin part caching.
-- A test suite, thanks @tkf.
-
-0.5 versions (2012)
-+++++++++++++++++++
-
-- Initial development.
-
-
diff --git a/contrib/python/jedi/.dist-info/top_level.txt b/contrib/python/jedi/.dist-info/top_level.txt
deleted file mode 100644
index 86c1cb19e2..0000000000
--- a/contrib/python/jedi/.dist-info/top_level.txt
+++ /dev/null
@@ -1 +0,0 @@
-jedi
diff --git a/contrib/python/jedi/.yandex_meta/yamaker.yaml b/contrib/python/jedi/.yandex_meta/yamaker.yaml
deleted file mode 100644
index 9f790bfdd7..0000000000
--- a/contrib/python/jedi/.yandex_meta/yamaker.yaml
+++ /dev/null
@@ -1,3 +0,0 @@
-additional_requirements:
-- setuptools
-
diff --git a/contrib/python/jedi/AUTHORS.txt b/contrib/python/jedi/AUTHORS.txt
deleted file mode 100644
index 0d4ac10e92..0000000000
--- a/contrib/python/jedi/AUTHORS.txt
+++ /dev/null
@@ -1,55 +0,0 @@
-Main Authors
-============
-
-David Halter (@davidhalter) <davidhalter88@gmail.com>
-Takafumi Arakaki (@tkf) <aka.tkf@gmail.com>
-
-Code Contributors
-=================
-
-Danilo Bargen (@dbrgn) <mail@dbrgn.ch>
-Laurens Van Houtven (@lvh) <_@lvh.cc>
-Aldo Stracquadanio (@Astrac) <aldo.strac@gmail.com>
-Jean-Louis Fuchs (@ganwell) <ganwell@fangorn.ch>
-tek (@tek)
-Yasha Borevich (@jjay) <j.borevich@gmail.com>
-Aaron Griffin <aaronmgriffin@gmail.com>
-andviro (@andviro)
-Mike Gilbert (@floppym) <floppym@gentoo.org>
-Aaron Meurer (@asmeurer) <asmeurer@gmail.com>
-Lubos Trilety <ltrilety@redhat.com>
-Akinori Hattori (@hattya) <hattya@gmail.com>
-srusskih (@srusskih)
-Steven Silvester (@blink1073)
-Colin Duquesnoy (@ColinDuquesnoy) <colin.duquesnoy@gmail.com>
-Jorgen Schaefer (@jorgenschaefer) <contact@jorgenschaefer.de>
-Fredrik Bergroth (@fbergroth)
-Mathias Fußenegger (@mfussenegger)
-Syohei Yoshida (@syohex) <syohex@gmail.com>
-ppalucky (@ppalucky)
-immerrr (@immerrr) immerrr@gmail.com
-Albertas Agejevas (@alga)
-Savor d'Isavano (@KenetJervet) <newelevenken@163.com>
-Phillip Berndt (@phillipberndt) <phillip.berndt@gmail.com>
-Ian Lee (@IanLee1521) <IanLee1521@gmail.com>
-Farkhad Khatamov (@hatamov) <comsgn@gmail.com>
-Kevin Kelley (@kelleyk) <kelleyk@kelleyk.net>
-Sid Shanker (@squidarth) <sid.p.shanker@gmail.com>
-Reinoud Elhorst (@reinhrst)
-Guido van Rossum (@gvanrossum) <guido@python.org>
-Dmytro Sadovnychyi (@sadovnychyi) <jedi@dmit.ro>
-Cristi Burcă (@scribu)
-bstaint (@bstaint)
-Mathias Rav (@Mortal) <rav@cs.au.dk>
-Daniel Fiterman (@dfit99) <fitermandaniel2@gmail.com>
-Simon Ruggier (@sruggier)
-Élie Gouzien (@ElieGouzien)
-Robin Roth (@robinro)
-Malte Plath (@langsamer)
-Anton Zub (@zabulazza)
-Maksim Novikov (@m-novikov) <mnovikov.work@gmail.com>
-Tobias Rzepka (@TobiasRzepka)
-micbou (@micbou)
-Dima Gerasimov (@karlicoss) <karlicoss@gmail.com>
-
-Note: (@user) means a github user name.
diff --git a/contrib/python/jedi/LICENSE.txt b/contrib/python/jedi/LICENSE.txt
deleted file mode 100644
index 94f954567b..0000000000
--- a/contrib/python/jedi/LICENSE.txt
+++ /dev/null
@@ -1,24 +0,0 @@
-All contributions towards Jedi are MIT licensed.
-
--------------------------------------------------------------------------------
-The MIT License (MIT)
-
-Copyright (c) <2013> <David Halter and others, see AUTHORS.txt>
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
diff --git a/contrib/python/jedi/README.rst b/contrib/python/jedi/README.rst
deleted file mode 100644
index 64c7f90339..0000000000
--- a/contrib/python/jedi/README.rst
+++ /dev/null
@@ -1,231 +0,0 @@
-###################################################################
-Jedi - an awesome autocompletion/static analysis library for Python
-###################################################################
-
-.. image:: https://img.shields.io/pypi/v/jedi.svg?style=flat
- :target: https://pypi.python.org/pypi/jedi
- :alt: PyPI version
-
-.. image:: https://img.shields.io/pypi/pyversions/jedi.svg
- :target: https://pypi.python.org/pypi/jedi
- :alt: Supported Python versions
-
-.. image:: https://travis-ci.org/davidhalter/jedi.svg?branch=master
- :target: https://travis-ci.org/davidhalter/jedi
- :alt: Linux Tests
-
-.. image:: https://ci.appveyor.com/api/projects/status/mgva3bbawyma1new/branch/master?svg=true
- :target: https://ci.appveyor.com/project/davidhalter/jedi/branch/master
- :alt: Windows Tests
-
-.. image:: https://coveralls.io/repos/davidhalter/jedi/badge.svg?branch=master
- :target: https://coveralls.io/r/davidhalter/jedi
- :alt: Coverage status
-
-
-*If you have specific questions, please add an issue or ask on* `Stack Overflow
-<https://stackoverflow.com/questions/tagged/python-jedi>`_ *with the label* ``python-jedi``.
-
-
-Jedi is a static analysis tool for Python that can be used in IDEs/editors. Its
-historic focus is autocompletion, but does static analysis for now as well.
-Jedi is fast and is very well tested. It understands Python on a deeper level
-than all other static analysis frameworks for Python.
-
-Jedi has support for two different goto functions. It's possible to search for
-related names and to list all names in a Python file and infer them. Jedi
-understands docstrings and you can use Jedi autocompletion in your REPL as
-well.
-
-Jedi uses a very simple API to connect with IDEs. There's a reference
-implementation as a `VIM-Plugin <https://github.com/davidhalter/jedi-vim>`_,
-which uses Jedi's autocompletion. We encourage you to use Jedi in your IDEs.
-It's really easy.
-
-Jedi can currently be used with the following editors/projects:
-
-- Vim (jedi-vim_, YouCompleteMe_, deoplete-jedi_, completor.vim_)
-- Emacs (Jedi.el_, company-mode_, elpy_, anaconda-mode_, ycmd_)
-- Sublime Text (SublimeJEDI_ [ST2 + ST3], anaconda_ [only ST3])
-- TextMate_ (Not sure if it's actually working)
-- Kate_ version 4.13+ supports it natively, you have to enable it, though. [`proof
- <https://projects.kde.org/projects/kde/applications/kate/repository/show?rev=KDE%2F4.13>`_]
-- Atom_ (autocomplete-python-jedi_)
-- `GNOME Builder`_ (with support for GObject Introspection)
-- `Visual Studio Code`_ (via `Python Extension <https://marketplace.visualstudio.com/items?itemName=ms-python.python>`_)
-- Gedit (gedi_)
-- wdb_ - Web Debugger
-- `Eric IDE`_ (Available as a plugin)
-- `IPython 6.0.0+ <https://ipython.readthedocs.io/en/stable/whatsnew/version6.html>`_
-
-and many more!
-
-
-Here are some pictures taken from jedi-vim_:
-
-.. image:: https://github.com/davidhalter/jedi/raw/master/docs/_screenshots/screenshot_complete.png
-
-Completion for almost anything (Ctrl+Space).
-
-.. image:: https://github.com/davidhalter/jedi/raw/master/docs/_screenshots/screenshot_function.png
-
-Display of function/class bodies, docstrings.
-
-.. image:: https://github.com/davidhalter/jedi/raw/master/docs/_screenshots/screenshot_pydoc.png
-
-Pydoc support (Shift+k).
-
-There is also support for goto and renaming.
-
-Get the latest version from `github <https://github.com/davidhalter/jedi>`_
-(master branch should always be kind of stable/working).
-
-Docs are available at `https://jedi.readthedocs.org/en/latest/
-<https://jedi.readthedocs.org/en/latest/>`_. Pull requests with documentation
-enhancements and/or fixes are awesome and most welcome. Jedi uses `semantic
-versioning <https://semver.org/>`_.
-
-If you want to stay up-to-date (News / RFCs), please subscribe to this `github
-thread <https://github.com/davidhalter/jedi/issues/1063>`_.:
-
-
-
-Installation
-============
-
- pip install jedi
-
-Note: This just installs the Jedi library, not the editor plugins. For
-information about how to make it work with your editor, refer to the
-corresponding documentation.
-
-You don't want to use ``pip``? Please refer to the `manual
-<https://jedi.readthedocs.org/en/latest/docs/installation.html>`_.
-
-
-Feature Support and Caveats
-===========================
-
-Jedi really understands your Python code. For a comprehensive list what Jedi
-understands, see: `Features
-<https://jedi.readthedocs.org/en/latest/docs/features.html>`_. A list of
-caveats can be found on the same page.
-
-You can run Jedi on CPython 2.7 or 3.4+ but it should also
-understand/parse code older than those versions. Additionally you should be able
-to use `Virtualenvs <https://jedi.readthedocs.org/en/latest/docs/api.html#environments>`_
-very well.
-
-Tips on how to use Jedi efficiently can be found `here
-<https://jedi.readthedocs.org/en/latest/docs/features.html#recipes>`_.
-
-API
----
-
-You can find the documentation for the `API here <https://jedi.readthedocs.org/en/latest/docs/api.html>`_.
-
-
-Autocompletion / Goto / Pydoc
------------------------------
-
-Please check the API for a good explanation. There are the following commands:
-
-- ``jedi.Script.goto_assignments``
-- ``jedi.Script.completions``
-- ``jedi.Script.usages``
-
-The returned objects are very powerful and really all you might need.
-
-
-Autocompletion in your REPL (IPython, etc.)
--------------------------------------------
-
-Starting with IPython `6.0.0` Jedi is a dependency of IPython. Autocompletion
-in IPython is therefore possible without additional configuration.
-
-It's possible to have Jedi autocompletion in REPL modes - `example video <https://vimeo.com/122332037>`_.
-This means that in Python you can enable tab completion in a `REPL
-<https://jedi.readthedocs.org/en/latest/docs/usage.html#tab-completion-in-the-python-shell>`_.
-
-
-Static Analysis / Linter
-------------------------
-
-To do all forms of static analysis, please try to use ``jedi.names``. It will
-return a list of names that you can use to infer types and so on.
-
-Linting is another thing that is going to be part of Jedi. For now you can try
-an alpha version ``python -m jedi linter``. The API might change though and
-it's still buggy. It's Jedi's goal to be smarter than classic linter and
-understand ``AttributeError`` and other code issues.
-
-
-Refactoring
------------
-
-Jedi's parser would support refactoring, but there's no API to use it right
-now. If you're interested in helping out here, let me know. With the latest
-parser changes, it should be very easy to actually make it work.
-
-
-Development
-===========
-
-There's a pretty good and extensive `development documentation
-<https://jedi.readthedocs.org/en/latest/docs/development.html>`_.
-
-
-Testing
-=======
-
-The test suite depends on ``tox`` and ``pytest``::
-
- pip install tox pytest
-
-To run the tests for all supported Python versions::
-
- tox
-
-If you want to test only a specific Python version (e.g. Python 2.7), it's as
-easy as ::
-
- tox -e py27
-
-Tests are also run automatically on `Travis CI
-<https://travis-ci.org/davidhalter/jedi/>`_.
-
-For more detailed information visit the `testing documentation
-<https://jedi.readthedocs.org/en/latest/docs/testing.html>`_.
-
-
-Acknowledgements
-================
-
-- Takafumi Arakaki (@tkf) for creating a solid test environment and a lot of
- other things.
-- Danilo Bargen (@dbrgn) for general housekeeping and being a good friend :).
-- Guido van Rossum (@gvanrossum) for creating the parser generator pgen2
- (originally used in lib2to3).
-
-
-
-.. _jedi-vim: https://github.com/davidhalter/jedi-vim
-.. _youcompleteme: https://valloric.github.io/YouCompleteMe/
-.. _deoplete-jedi: https://github.com/zchee/deoplete-jedi
-.. _completor.vim: https://github.com/maralla/completor.vim
-.. _Jedi.el: https://github.com/tkf/emacs-jedi
-.. _company-mode: https://github.com/syohex/emacs-company-jedi
-.. _elpy: https://github.com/jorgenschaefer/elpy
-.. _anaconda-mode: https://github.com/proofit404/anaconda-mode
-.. _ycmd: https://github.com/abingham/emacs-ycmd
-.. _sublimejedi: https://github.com/srusskih/SublimeJEDI
-.. _anaconda: https://github.com/DamnWidget/anaconda
-.. _wdb: https://github.com/Kozea/wdb
-.. _TextMate: https://github.com/lawrenceakka/python-jedi.tmbundle
-.. _Kate: https://kate-editor.org
-.. _Atom: https://atom.io/
-.. _autocomplete-python-jedi: https://atom.io/packages/autocomplete-python-jedi
-.. _GNOME Builder: https://wiki.gnome.org/Apps/Builder
-.. _Visual Studio Code: https://code.visualstudio.com/
-.. _gedi: https://github.com/isamert/gedi
-.. _Eric IDE: https://eric-ide.python-projects.org
diff --git a/contrib/python/jedi/jedi/__init__.py b/contrib/python/jedi/jedi/__init__.py
deleted file mode 100644
index d23739bee3..0000000000
--- a/contrib/python/jedi/jedi/__init__.py
+++ /dev/null
@@ -1,47 +0,0 @@
-"""
-Jedi is a static analysis tool for Python that can be used in IDEs/editors. Its
-historic focus is autocompletion, but does static analysis for now as well.
-Jedi is fast and is very well tested. It understands Python on a deeper level
-than all other static analysis frameworks for Python.
-
-Jedi has support for two different goto functions. It's possible to search for
-related names and to list all names in a Python file and infer them. Jedi
-understands docstrings and you can use Jedi autocompletion in your REPL as
-well.
-
-Jedi uses a very simple API to connect with IDE's. There's a reference
-implementation as a `VIM-Plugin <https://github.com/davidhalter/jedi-vim>`_,
-which uses Jedi's autocompletion. We encourage you to use Jedi in your IDEs.
-It's really easy.
-
-To give you a simple example how you can use the Jedi library, here is an
-example for the autocompletion feature:
-
->>> import jedi
->>> source = '''
-... import datetime
-... datetime.da'''
->>> script = jedi.Script(source, 3, len('datetime.da'), 'example.py')
->>> script
-<Script: 'example.py' ...>
->>> completions = script.completions()
->>> completions #doctest: +ELLIPSIS
-[<Completion: date>, <Completion: datetime>, ...]
->>> print(completions[0].complete)
-te
->>> print(completions[0].name)
-date
-
-As you see Jedi is pretty simple and allows you to concentrate on writing a
-good text editor, while still having very good IDE features for Python.
-"""
-
-__version__ = '0.13.3'
-
-from jedi.api import Script, Interpreter, set_debug_function, \
- preload_module, names
-from jedi import settings
-from jedi.api.environment import find_virtualenvs, find_system_environments, \
- get_default_environment, InvalidPythonEnvironment, create_environment, \
- get_system_environment
-from jedi.api.exceptions import InternalError
diff --git a/contrib/python/jedi/jedi/__main__.py b/contrib/python/jedi/jedi/__main__.py
deleted file mode 100644
index f2ee047769..0000000000
--- a/contrib/python/jedi/jedi/__main__.py
+++ /dev/null
@@ -1,48 +0,0 @@
-import sys
-from os.path import join, dirname, abspath, isdir
-
-
-def _start_linter():
- """
- This is a pre-alpha API. You're not supposed to use it at all, except for
- testing. It will very likely change.
- """
- import jedi
-
- if '--debug' in sys.argv:
- jedi.set_debug_function()
-
- for path in sys.argv[2:]:
- if path.startswith('--'):
- continue
- if isdir(path):
- import fnmatch
- import os
-
- paths = []
- for root, dirnames, filenames in os.walk(path):
- for filename in fnmatch.filter(filenames, '*.py'):
- paths.append(os.path.join(root, filename))
- else:
- paths = [path]
-
- try:
- for path in paths:
- for error in jedi.Script(path=path)._analysis():
- print(error)
- except Exception:
- if '--pdb' in sys.argv:
- import traceback
- traceback.print_exc()
- import pdb
- pdb.post_mortem()
- else:
- raise
-
-
-if len(sys.argv) == 2 and sys.argv[1] == 'repl':
- # don't want to use __main__ only for repl yet, maybe we want to use it for
- # something else. So just use the keyword ``repl`` for now.
- print(join(dirname(abspath(__file__)), 'api', 'replstartup.py'))
-elif len(sys.argv) > 1 and sys.argv[1] == 'linter':
- _start_linter()
diff --git a/contrib/python/jedi/jedi/_compatibility.py b/contrib/python/jedi/jedi/_compatibility.py
deleted file mode 100644
index f8f93c1063..0000000000
--- a/contrib/python/jedi/jedi/_compatibility.py
+++ /dev/null
@@ -1,594 +0,0 @@
-"""
-To ensure compatibility from Python ``2.7`` - ``3.x``, a module has been
-created. Clearly there is huge need to use conforming syntax.
-"""
-import errno
-import sys
-import os
-import re
-import pkgutil
-import warnings
-import inspect
-import subprocess
-try:
- import importlib
-except ImportError:
- pass
-
-is_py3 = sys.version_info[0] >= 3
-is_py35 = is_py3 and sys.version_info[1] >= 5
-py_version = int(str(sys.version_info[0]) + str(sys.version_info[1]))
-
-
-class DummyFile(object):
- def __init__(self, loader, string):
- self.loader = loader
- self.string = string
-
- def read(self):
- return self.loader.get_source(self.string)
-
- def close(self):
- del self.loader
-
-
-def find_module_py34(string, path=None, full_name=None, is_global_search=True):
- spec = None
- loader = None
-
- for finder in sys.meta_path:
- if is_global_search and finder != importlib.machinery.PathFinder:
- p = None
- else:
- p = path
- try:
- find_spec = finder.find_spec
- except AttributeError:
- # These are old-school clases that still have a different API, just
- # ignore those.
- continue
-
- spec = find_spec(string, p)
- if spec is not None:
- loader = spec.loader
- if loader is None and not spec.has_location:
- # This is a namespace package.
- full_name = string if not path else full_name
- implicit_ns_info = ImplicitNSInfo(full_name, spec.submodule_search_locations._path)
- return None, implicit_ns_info, False
- break
-
- return find_module_py33(string, path, loader)
-
-
-def find_module_py33(string, path=None, loader=None, full_name=None, is_global_search=True):
- loader = loader or importlib.machinery.PathFinder.find_module(string, path)
-
- if loader is None and path is None: # Fallback to find builtins
- try:
- with warnings.catch_warnings(record=True):
- # Mute "DeprecationWarning: Use importlib.util.find_spec()
- # instead." While we should replace that in the future, it's
- # probably good to wait until we deprecate Python 3.3, since
- # it was added in Python 3.4 and find_loader hasn't been
- # removed in 3.6.
- loader = importlib.find_loader(string)
- except ValueError as e:
- # See #491. Importlib might raise a ValueError, to avoid this, we
- # just raise an ImportError to fix the issue.
- raise ImportError("Originally " + repr(e))
-
- if loader is None:
- raise ImportError("Couldn't find a loader for {}".format(string))
-
- try:
- is_package = loader.is_package(string)
- if is_package:
- if hasattr(loader, 'path'):
- module_path = os.path.dirname(loader.path)
- else:
- # At least zipimporter does not have path attribute
- module_path = os.path.dirname(loader.get_filename(string))
- if hasattr(loader, 'archive'):
- module_file = DummyFile(loader, string)
- else:
- module_file = None
- else:
- module_path = loader.get_filename(string)
- module_file = DummyFile(loader, string)
- except AttributeError:
- # ExtensionLoader has not attribute get_filename, instead it has a
- # path attribute that we can use to retrieve the module path
- try:
- module_path = loader.path
- module_file = DummyFile(loader, string)
- except AttributeError:
- module_path = string
- module_file = None
- finally:
- is_package = False
-
- if hasattr(loader, 'archive'):
- module_path = loader.archive
-
- return module_file, module_path, is_package
-
-
-def find_module_pre_py34(string, path=None, full_name=None, is_global_search=True):
- # This import is here, because in other places it will raise a
- # DeprecationWarning.
- import imp
- try:
- module_file, module_path, description = imp.find_module(string, path)
- module_type = description[2]
- return module_file, module_path, module_type is imp.PKG_DIRECTORY
- except ImportError:
- pass
-
- if path is None:
- path = sys.path
- for item in path:
- loader = pkgutil.get_importer(item)
- if loader:
- try:
- loader = loader.find_module(string)
- if loader:
- is_package = loader.is_package(string)
- is_archive = hasattr(loader, 'archive')
- module_path = loader.get_filename(string)
- if is_package:
- module_path = os.path.dirname(module_path)
- if is_archive:
- module_path = loader.archive
- file = None
- if not is_package or is_archive:
- file = DummyFile(loader, string)
- return file, module_path, is_package
- except ImportError:
- pass
- raise ImportError("No module named {}".format(string))
-
-
-find_module = find_module_py34 if is_py3 else find_module_pre_py34
-find_module.__doc__ = """
-Provides information about a module.
-
-This function isolates the differences in importing libraries introduced with
-python 3.3 on; it gets a module name and optionally a path. It will return a
-tuple containin an open file for the module (if not builtin), the filename
-or the name of the module if it is a builtin one and a boolean indicating
-if the module is contained in a package.
-"""
-
-
-def _iter_modules(paths, prefix=''):
- # Copy of pkgutil.iter_modules adapted to work with namespaces
-
- for path in paths:
- importer = pkgutil.get_importer(path)
-
- if not isinstance(importer, importlib.machinery.FileFinder):
- # We're only modifying the case for FileFinder. All the other cases
- # still need to be checked (like zip-importing). Do this by just
- # calling the pkgutil version.
- for mod_info in pkgutil.iter_modules([path], prefix):
- yield mod_info
- continue
-
- # START COPY OF pkutils._iter_file_finder_modules.
- if importer.path is None or not os.path.isdir(importer.path):
- return
-
- yielded = {}
-
- try:
- filenames = os.listdir(importer.path)
- except OSError:
- # ignore unreadable directories like import does
- filenames = []
- filenames.sort() # handle packages before same-named modules
-
- for fn in filenames:
- modname = inspect.getmodulename(fn)
- if modname == '__init__' or modname in yielded:
- continue
-
- # jedi addition: Avoid traversing special directories
- if fn.startswith('.') or fn == '__pycache__':
- continue
-
- path = os.path.join(importer.path, fn)
- ispkg = False
-
- if not modname and os.path.isdir(path) and '.' not in fn:
- modname = fn
- # A few jedi modifications: Don't check if there's an
- # __init__.py
- try:
- os.listdir(path)
- except OSError:
- # ignore unreadable directories like import does
- continue
- ispkg = True
-
- if modname and '.' not in modname:
- yielded[modname] = 1
- yield importer, prefix + modname, ispkg
- # END COPY
-
-
-iter_modules = _iter_modules if py_version >= 34 else pkgutil.iter_modules
-
-
-class ImplicitNSInfo(object):
- """Stores information returned from an implicit namespace spec"""
- def __init__(self, name, paths):
- self.name = name
- self.paths = paths
-
-
-if is_py3:
- all_suffixes = importlib.machinery.all_suffixes
-else:
- def all_suffixes():
- # Is deprecated and raises a warning in Python 3.6.
- import imp
- return [suffix for suffix, _, _ in imp.get_suffixes()]
-
-
-# unicode function
-try:
- unicode = unicode
-except NameError:
- unicode = str
-
-
-# re-raise function
-if is_py3:
- def reraise(exception, traceback):
- raise exception.with_traceback(traceback)
-else:
- eval(compile("""
-def reraise(exception, traceback):
- raise exception, None, traceback
-""", 'blub', 'exec'))
-
-reraise.__doc__ = """
-Re-raise `exception` with a `traceback` object.
-
-Usage::
-
- reraise(Exception, sys.exc_info()[2])
-
-"""
-
-
-class Python3Method(object):
- def __init__(self, func):
- self.func = func
-
- def __get__(self, obj, objtype):
- if obj is None:
- return lambda *args, **kwargs: self.func(*args, **kwargs)
- else:
- return lambda *args, **kwargs: self.func(obj, *args, **kwargs)
-
-
-def use_metaclass(meta, *bases):
- """ Create a class with a metaclass. """
- if not bases:
- bases = (object,)
- return meta("Py2CompatibilityMetaClass", bases, {})
-
-
-try:
- encoding = sys.stdout.encoding
- if encoding is None:
- encoding = 'utf-8'
-except AttributeError:
- encoding = 'ascii'
-
-
-def u(string, errors='strict'):
- """Cast to unicode DAMMIT!
- Written because Python2 repr always implicitly casts to a string, so we
- have to cast back to a unicode (and we now that we always deal with valid
- unicode, because we check that in the beginning).
- """
- if isinstance(string, bytes):
- return unicode(string, encoding='UTF-8', errors=errors)
- return string
-
-
-def cast_path(obj):
- """
- Take a bytes or str path and cast it to unicode.
-
- Apparently it is perfectly fine to pass both byte and unicode objects into
- the sys.path. This probably means that byte paths are normal at other
- places as well.
-
- Since this just really complicates everything and Python 2.7 will be EOL
- soon anyway, just go with always strings.
- """
- return u(obj, errors='replace')
-
-
-def force_unicode(obj):
- # Intentionally don't mix those two up, because those two code paths might
- # be different in the future (maybe windows?).
- return cast_path(obj)
-
-
-try:
- import builtins # module name in python 3
-except ImportError:
- import __builtin__ as builtins # noqa: F401
-
-
-import ast # noqa: F401
-
-
-def literal_eval(string):
- return ast.literal_eval(string)
-
-
-try:
- from itertools import zip_longest
-except ImportError:
- from itertools import izip_longest as zip_longest # Python 2 # noqa: F401
-
-try:
- FileNotFoundError = FileNotFoundError
-except NameError:
- FileNotFoundError = IOError
-
-try:
- NotADirectoryError = NotADirectoryError
-except NameError:
- NotADirectoryError = IOError
-
-try:
- PermissionError = PermissionError
-except NameError:
- PermissionError = IOError
-
-
-def no_unicode_pprint(dct):
- """
- Python 2/3 dict __repr__ may be different, because of unicode differens
- (with or without a `u` prefix). Normally in doctests we could use `pprint`
- to sort dicts and check for equality, but here we have to write a separate
- function to do that.
- """
- import pprint
- s = pprint.pformat(dct)
- print(re.sub("u'", "'", s))
-
-
-def print_to_stderr(*args):
- if is_py3:
- eval("print(*args, file=sys.stderr)")
- else:
- print >> sys.stderr, args
- sys.stderr.flush()
-
-
-def utf8_repr(func):
- """
- ``__repr__`` methods in Python 2 don't allow unicode objects to be
- returned. Therefore cast them to utf-8 bytes in this decorator.
- """
- def wrapper(self):
- result = func(self)
- if isinstance(result, unicode):
- return result.encode('utf-8')
- else:
- return result
-
- if is_py3:
- return func
- else:
- return wrapper
-
-
-if is_py3:
- import queue
-else:
- import Queue as queue # noqa: F401
-
-try:
- # Attempt to load the C implementation of pickle on Python 2 as it is way
- # faster.
- import cPickle as pickle
-except ImportError:
- import pickle
-if sys.version_info[:2] == (3, 3):
- """
- Monkeypatch the unpickler in Python 3.3. This is needed, because the
- argument `encoding='bytes'` is not supported in 3.3, but badly needed to
- communicate with Python 2.
- """
-
- class NewUnpickler(pickle._Unpickler):
- dispatch = dict(pickle._Unpickler.dispatch)
-
- def _decode_string(self, value):
- # Used to allow strings from Python 2 to be decoded either as
- # bytes or Unicode strings. This should be used only with the
- # STRING, BINSTRING and SHORT_BINSTRING opcodes.
- if self.encoding == "bytes":
- return value
- else:
- return value.decode(self.encoding, self.errors)
-
- def load_string(self):
- data = self.readline()[:-1]
- # Strip outermost quotes
- if len(data) >= 2 and data[0] == data[-1] and data[0] in b'"\'':
- data = data[1:-1]
- else:
- raise pickle.UnpicklingError("the STRING opcode argument must be quoted")
- self.append(self._decode_string(pickle.codecs.escape_decode(data)[0]))
- dispatch[pickle.STRING[0]] = load_string
-
- def load_binstring(self):
- # Deprecated BINSTRING uses signed 32-bit length
- len, = pickle.struct.unpack('<i', self.read(4))
- if len < 0:
- raise pickle.UnpicklingError("BINSTRING pickle has negative byte count")
- data = self.read(len)
- self.append(self._decode_string(data))
- dispatch[pickle.BINSTRING[0]] = load_binstring
-
- def load_short_binstring(self):
- len = self.read(1)[0]
- data = self.read(len)
- self.append(self._decode_string(data))
- dispatch[pickle.SHORT_BINSTRING[0]] = load_short_binstring
-
- def load(file, fix_imports=True, encoding="ASCII", errors="strict"):
- return NewUnpickler(file, fix_imports=fix_imports,
- encoding=encoding, errors=errors).load()
-
- def loads(s, fix_imports=True, encoding="ASCII", errors="strict"):
- if isinstance(s, str):
- raise TypeError("Can't load pickle from unicode string")
- file = pickle.io.BytesIO(s)
- return NewUnpickler(file, fix_imports=fix_imports,
- encoding=encoding, errors=errors).load()
-
- pickle.Unpickler = NewUnpickler
- pickle.load = load
- pickle.loads = loads
-
-
-def pickle_load(file):
- try:
- if is_py3:
- return pickle.load(file, encoding='bytes')
- return pickle.load(file)
- # Python on Windows don't throw EOF errors for pipes. So reraise them with
- # the correct type, which is caught upwards.
- except OSError:
- if sys.platform == 'win32':
- raise EOFError()
- raise
-
-
-def pickle_dump(data, file, protocol):
- try:
- pickle.dump(data, file, protocol)
- # On Python 3.3 flush throws sometimes an error even though the writing
- # operation should be completed.
- file.flush()
- # Python on Windows don't throw EPIPE errors for pipes. So reraise them with
- # the correct type and error number.
- except OSError:
- if sys.platform == 'win32':
- raise IOError(errno.EPIPE, "Broken pipe")
- raise
-
-
-# Determine the highest protocol version compatible for a given list of Python
-# versions.
-def highest_pickle_protocol(python_versions):
- protocol = 4
- for version in python_versions:
- if version[0] == 2:
- # The minimum protocol version for the versions of Python that we
- # support (2.7 and 3.3+) is 2.
- return 2
- if version[1] < 4:
- protocol = 3
- return protocol
-
-
-try:
- from inspect import Parameter
-except ImportError:
- class Parameter(object):
- POSITIONAL_ONLY = object()
- POSITIONAL_OR_KEYWORD = object()
- VAR_POSITIONAL = object()
- KEYWORD_ONLY = object()
- VAR_KEYWORD = object()
-
-
-class GeneralizedPopen(subprocess.Popen):
- def __init__(self, *args, **kwargs):
- if os.name == 'nt':
- try:
- # Was introduced in Python 3.7.
- CREATE_NO_WINDOW = subprocess.CREATE_NO_WINDOW
- except AttributeError:
- CREATE_NO_WINDOW = 0x08000000
- kwargs['creationflags'] = CREATE_NO_WINDOW
- # The child process doesn't need file descriptors except 0, 1, 2.
- # This is unix only.
- kwargs['close_fds'] = 'posix' in sys.builtin_module_names
- super(GeneralizedPopen, self).__init__(*args, **kwargs)
-
-
-# shutil.which is not available on Python 2.7.
-def which(cmd, mode=os.F_OK | os.X_OK, path=None):
- """Given a command, mode, and a PATH string, return the path which
- conforms to the given mode on the PATH, or None if there is no such
- file.
-
- `mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result
- of os.environ.get("PATH"), or can be overridden with a custom search
- path.
-
- """
- # Check that a given file can be accessed with the correct mode.
- # Additionally check that `file` is not a directory, as on Windows
- # directories pass the os.access check.
- def _access_check(fn, mode):
- return (os.path.exists(fn) and os.access(fn, mode)
- and not os.path.isdir(fn))
-
- # If we're given a path with a directory part, look it up directly rather
- # than referring to PATH directories. This includes checking relative to the
- # current directory, e.g. ./script
- if os.path.dirname(cmd):
- if _access_check(cmd, mode):
- return cmd
- return None
-
- if path is None:
- path = os.environ.get("PATH", os.defpath)
- if not path:
- return None
- path = path.split(os.pathsep)
-
- if sys.platform == "win32":
- # The current directory takes precedence on Windows.
- if os.curdir not in path:
- path.insert(0, os.curdir)
-
- # PATHEXT is necessary to check on Windows.
- pathext = os.environ.get("PATHEXT", "").split(os.pathsep)
- # See if the given file matches any of the expected path extensions.
- # This will allow us to short circuit when given "python.exe".
- # If it does match, only test that one, otherwise we have to try
- # others.
- if any(cmd.lower().endswith(ext.lower()) for ext in pathext):
- files = [cmd]
- else:
- files = [cmd + ext for ext in pathext]
- else:
- # On other platforms you don't have things like PATHEXT to tell you
- # what file suffixes are executable, so just pass on cmd as-is.
- files = [cmd]
-
- seen = set()
- for dir in path:
- normdir = os.path.normcase(dir)
- if normdir not in seen:
- seen.add(normdir)
- for thefile in files:
- name = os.path.join(dir, thefile)
- if _access_check(name, mode):
- return name
- return None
diff --git a/contrib/python/jedi/jedi/api/__init__.py b/contrib/python/jedi/jedi/api/__init__.py
deleted file mode 100644
index 9e568cf8f8..0000000000
--- a/contrib/python/jedi/jedi/api/__init__.py
+++ /dev/null
@@ -1,509 +0,0 @@
-"""
-The API basically only provides one class. You can create a :class:`Script` and
-use its methods.
-
-Additionally you can add a debug function with :func:`set_debug_function`.
-Alternatively, if you don't need a custom function and are happy with printing
-debug messages to stdout, simply call :func:`set_debug_function` without
-arguments.
-
-.. warning:: Please, note that Jedi is **not thread safe**.
-"""
-import os
-import sys
-import warnings
-
-import parso
-from parso.python import tree
-
-from jedi._compatibility import force_unicode, is_py3
-from jedi.parser_utils import get_executable_nodes
-from jedi import debug
-from jedi import settings
-from jedi import cache
-from jedi.api import classes
-from jedi.api import interpreter
-from jedi.api import helpers
-from jedi.api.completion import Completion
-from jedi.api.environment import InterpreterEnvironment
-from jedi.api.project import get_default_project
-from jedi.evaluate import Evaluator
-from jedi.evaluate import imports
-from jedi.evaluate import usages
-from jedi.evaluate.arguments import try_iter_content
-from jedi.evaluate.helpers import get_module_names, evaluate_call_of_leaf
-from jedi.evaluate.sys_path import dotted_path_in_sys_path
-from jedi.evaluate.filters import TreeNameDefinition, ParamName
-from jedi.evaluate.syntax_tree import tree_name_to_contexts
-from jedi.evaluate.context import ModuleContext
-from jedi.evaluate.context.iterable import unpack_tuple_to_dict
-
-# Jedi uses lots and lots of recursion. By setting this a little bit higher, we
-# can remove some "maximum recursion depth" errors.
-sys.setrecursionlimit(3000)
-
-
-class Script(object):
- """
- A Script is the base for completions, goto or whatever you want to do with
- |jedi|.
-
- You can either use the ``source`` parameter or ``path`` to read a file.
- Usually you're going to want to use both of them (in an editor).
-
- The script might be analyzed in a different ``sys.path`` than |jedi|:
-
- - if `sys_path` parameter is not ``None``, it will be used as ``sys.path``
- for the script;
-
- - if `sys_path` parameter is ``None`` and ``VIRTUAL_ENV`` environment
- variable is defined, ``sys.path`` for the specified environment will be
- guessed (see :func:`jedi.evaluate.sys_path.get_venv_path`) and used for
- the script;
-
- - otherwise ``sys.path`` will match that of |jedi|.
-
- :param source: The source code of the current file, separated by newlines.
- :type source: str
- :param line: The line to perform actions on (starting with 1).
- :type line: int
- :param column: The column of the cursor (starting with 0).
- :type column: int
- :param path: The path of the file in the file system, or ``''`` if
- it hasn't been saved yet.
- :type path: str or None
- :param encoding: The encoding of ``source``, if it is not a
- ``unicode`` object (default ``'utf-8'``).
- :type encoding: str
- :param sys_path: ``sys.path`` to use during analysis of the script
- :type sys_path: list
- :param environment: TODO
- :type sys_path: Environment
- """
- def __init__(self, source=None, line=None, column=None, path=None,
- encoding='utf-8', sys_path=None, environment=None):
- self._orig_path = path
- # An empty path (also empty string) should always result in no path.
- self.path = os.path.abspath(path) if path else None
-
- if source is None:
- # TODO add a better warning than the traceback!
- with open(path, 'rb') as f:
- source = f.read()
-
- # Load the Python grammar of the current interpreter.
- self._grammar = parso.load_grammar()
-
- if sys_path is not None and not is_py3:
- sys_path = list(map(force_unicode, sys_path))
-
- # Load the Python grammar of the current interpreter.
- project = get_default_project(
- os.path.dirname(self.path)if path else os.getcwd()
- )
- # TODO deprecate and remove sys_path from the Script API.
- if sys_path is not None:
- project._sys_path = sys_path
- self._evaluator = Evaluator(
- project, environment=environment, script_path=self.path
- )
- self._project = project
- debug.speed('init')
- self._module_node, source = self._evaluator.parse_and_get_code(
- code=source,
- path=self.path,
- encoding=encoding,
- cache=False, # No disk cache, because the current script often changes.
- diff_cache=settings.fast_parser,
- cache_path=settings.cache_directory,
- )
- debug.speed('parsed')
- self._code_lines = parso.split_lines(source, keepends=True)
- self._code = source
- line = max(len(self._code_lines), 1) if line is None else line
- if not (0 < line <= len(self._code_lines)):
- raise ValueError('`line` parameter is not in a valid range.')
-
- line_string = self._code_lines[line - 1]
- line_len = len(line_string)
- if line_string.endswith('\r\n'):
- line_len -= 1
- if line_string.endswith('\n'):
- line_len -= 1
-
- column = line_len if column is None else column
- if not (0 <= column <= line_len):
- raise ValueError('`column` parameter (%d) is not in a valid range '
- '(0-%d) for line %d (%r).' % (
- column, line_len, line, line_string))
- self._pos = line, column
- self._path = path
-
- cache.clear_time_caches()
- debug.reset_time()
-
- def _get_module(self):
- name = '__main__'
- if self.path is not None:
- import_names = dotted_path_in_sys_path(self._evaluator.get_sys_path(), self.path)
- if import_names is not None:
- name = '.'.join(import_names)
-
- module = ModuleContext(
- self._evaluator, self._module_node, self.path,
- code_lines=self._code_lines
- )
- imports.add_module_to_cache(self._evaluator, name, module)
- return module
-
- def __repr__(self):
- return '<%s: %s %r>' % (
- self.__class__.__name__,
- repr(self._orig_path),
- self._evaluator.environment,
- )
-
- def completions(self):
- """
- Return :class:`classes.Completion` objects. Those objects contain
- information about the completions, more than just names.
-
- :return: Completion objects, sorted by name and __ comes last.
- :rtype: list of :class:`classes.Completion`
- """
- debug.speed('completions start')
- completion = Completion(
- self._evaluator, self._get_module(), self._code_lines,
- self._pos, self.call_signatures
- )
- completions = completion.completions()
-
- def iter_import_completions():
- for c in completions:
- tree_name = c._name.tree_name
- if tree_name is None:
- continue
- definition = tree_name.get_definition()
- if definition is not None \
- and definition.type in ('import_name', 'import_from'):
- yield c
-
- if len(list(iter_import_completions())) > 10:
- # For now disable completions if there's a lot of imports that
- # might potentially be resolved. This is the case for tensorflow
- # and has been fixed for it. This is obviously temporary until we
- # have a better solution.
- self._evaluator.infer_enabled = False
-
- debug.speed('completions end')
- return completions
-
- def goto_definitions(self):
- """
- Return the definitions of a the path under the cursor. goto function!
- This follows complicated paths and returns the end, not the first
- definition. The big difference between :meth:`goto_assignments` and
- :meth:`goto_definitions` is that :meth:`goto_assignments` doesn't
- follow imports and statements. Multiple objects may be returned,
- because Python itself is a dynamic language, which means depending on
- an option you can have two different versions of a function.
-
- :rtype: list of :class:`classes.Definition`
- """
- leaf = self._module_node.get_name_of_position(self._pos)
- if leaf is None:
- leaf = self._module_node.get_leaf_for_position(self._pos)
- if leaf is None:
- return []
-
- context = self._evaluator.create_context(self._get_module(), leaf)
- definitions = helpers.evaluate_goto_definition(self._evaluator, context, leaf)
-
- names = [s.name for s in definitions]
- defs = [classes.Definition(self._evaluator, name) for name in names]
- # The additional set here allows the definitions to become unique in an
- # API sense. In the internals we want to separate more things than in
- # the API.
- return helpers.sorted_definitions(set(defs))
-
- def goto_assignments(self, follow_imports=False, follow_builtin_imports=False):
- """
- Return the first definition found, while optionally following imports.
- Multiple objects may be returned, because Python itself is a
- dynamic language, which means depending on an option you can have two
- different versions of a function.
-
- :param follow_imports: The goto call will follow imports.
- :param follow_builtin_imports: If follow_imports is True will decide if
- it follow builtin imports.
- :rtype: list of :class:`classes.Definition`
- """
- def filter_follow_imports(names, check):
- for name in names:
- if check(name):
- new_names = list(filter_follow_imports(name.goto(), check))
- found_builtin = False
- if follow_builtin_imports:
- for new_name in new_names:
- if new_name.start_pos is None:
- found_builtin = True
-
- if found_builtin and not isinstance(name, imports.SubModuleName):
- yield name
- else:
- for new_name in new_names:
- yield new_name
- else:
- yield name
-
- tree_name = self._module_node.get_name_of_position(self._pos)
- if tree_name is None:
- return []
- context = self._evaluator.create_context(self._get_module(), tree_name)
- names = list(self._evaluator.goto(context, tree_name))
-
- if follow_imports:
- def check(name):
- return name.is_import()
- else:
- def check(name):
- return isinstance(name, imports.SubModuleName)
-
- names = filter_follow_imports(names, check)
-
- defs = [classes.Definition(self._evaluator, d) for d in set(names)]
- return helpers.sorted_definitions(defs)
-
- def usages(self, additional_module_paths=(), **kwargs):
- """
- Return :class:`classes.Definition` objects, which contain all
- names that point to the definition of the name under the cursor. This
- is very useful for refactoring (renaming), or to show all usages of a
- variable.
-
- .. todo:: Implement additional_module_paths
-
- :param additional_module_paths: Deprecated, never ever worked.
- :param include_builtins: Default True, checks if a usage is a builtin
- (e.g. ``sys``) and in that case does not return it.
- :rtype: list of :class:`classes.Definition`
- """
- if additional_module_paths:
- warnings.warn(
- "Deprecated since version 0.12.0. This never even worked, just ignore it.",
- DeprecationWarning,
- stacklevel=2
- )
-
- def _usages(include_builtins=True):
- tree_name = self._module_node.get_name_of_position(self._pos)
- if tree_name is None:
- # Must be syntax
- return []
-
- names = usages.usages(self._get_module(), tree_name)
-
- definitions = [classes.Definition(self._evaluator, n) for n in names]
- if not include_builtins:
- definitions = [d for d in definitions if not d.in_builtin_module()]
- return helpers.sorted_definitions(definitions)
- return _usages(**kwargs)
-
- def call_signatures(self):
- """
- Return the function object of the call you're currently in.
-
- E.g. if the cursor is here::
-
- abs(# <-- cursor is here
-
- This would return the ``abs`` function. On the other hand::
-
- abs()# <-- cursor is here
-
- This would return an empty list..
-
- :rtype: list of :class:`classes.CallSignature`
- """
- call_signature_details = \
- helpers.get_call_signature_details(self._module_node, self._pos)
- if call_signature_details is None:
- return []
-
- context = self._evaluator.create_context(
- self._get_module(),
- call_signature_details.bracket_leaf
- )
- definitions = helpers.cache_call_signatures(
- self._evaluator,
- context,
- call_signature_details.bracket_leaf,
- self._code_lines,
- self._pos
- )
- debug.speed('func_call followed')
-
- return [classes.CallSignature(self._evaluator, d.name,
- call_signature_details.bracket_leaf.start_pos,
- call_signature_details.call_index,
- call_signature_details.keyword_name_str)
- for d in definitions if hasattr(d, 'py__call__')]
-
- def _analysis(self):
- self._evaluator.is_analysis = True
- self._evaluator.analysis_modules = [self._module_node]
- module = self._get_module()
- try:
- for node in get_executable_nodes(self._module_node):
- context = module.create_context(node)
- if node.type in ('funcdef', 'classdef'):
- # Resolve the decorators.
- tree_name_to_contexts(self._evaluator, context, node.children[1])
- elif isinstance(node, tree.Import):
- import_names = set(node.get_defined_names())
- if node.is_nested():
- import_names |= set(path[-1] for path in node.get_paths())
- for n in import_names:
- imports.infer_import(context, n)
- elif node.type == 'expr_stmt':
- types = context.eval_node(node)
- for testlist in node.children[:-1:2]:
- # Iterate tuples.
- unpack_tuple_to_dict(context, types, testlist)
- else:
- if node.type == 'name':
- defs = self._evaluator.goto_definitions(context, node)
- else:
- defs = evaluate_call_of_leaf(context, node)
- try_iter_content(defs)
- self._evaluator.reset_recursion_limitations()
-
- ana = [a for a in self._evaluator.analysis if self.path == a.path]
- return sorted(set(ana), key=lambda x: x.line)
- finally:
- self._evaluator.is_analysis = False
-
-
-class Interpreter(Script):
- """
- Jedi API for Python REPLs.
-
- In addition to completion of simple attribute access, Jedi
- supports code completion based on static code analysis.
- Jedi can complete attributes of object which is not initialized
- yet.
-
- >>> from os.path import join
- >>> namespace = locals()
- >>> script = Interpreter('join("").up', [namespace])
- >>> print(script.completions()[0].name)
- upper
- """
-
- def __init__(self, source, namespaces, **kwds):
- """
- Parse `source` and mixin interpreted Python objects from `namespaces`.
-
- :type source: str
- :arg source: Code to parse.
- :type namespaces: list of dict
- :arg namespaces: a list of namespace dictionaries such as the one
- returned by :func:`locals`.
-
- Other optional arguments are same as the ones for :class:`Script`.
- If `line` and `column` are None, they are assumed be at the end of
- `source`.
- """
- try:
- namespaces = [dict(n) for n in namespaces]
- except Exception:
- raise TypeError("namespaces must be a non-empty list of dicts.")
-
- environment = kwds.get('environment', None)
- if environment is None:
- environment = InterpreterEnvironment()
- else:
- if not isinstance(environment, InterpreterEnvironment):
- raise TypeError("The environment needs to be an InterpreterEnvironment subclass.")
-
- super(Interpreter, self).__init__(source, environment=environment, **kwds)
- self.namespaces = namespaces
-
- def _get_module(self):
- return interpreter.MixedModuleContext(
- self._evaluator,
- self._module_node,
- self.namespaces,
- path=self.path,
- code_lines=self._code_lines,
- )
-
-
-def names(source=None, path=None, encoding='utf-8', all_scopes=False,
- definitions=True, references=False, environment=None):
- """
- Returns a list of `Definition` objects, containing name parts.
- This means you can call ``Definition.goto_assignments()`` and get the
- reference of a name.
- The parameters are the same as in :py:class:`Script`, except or the
- following ones:
-
- :param all_scopes: If True lists the names of all scopes instead of only
- the module namespace.
- :param definitions: If True lists the names that have been defined by a
- class, function or a statement (``a = b`` returns ``a``).
- :param references: If True lists all the names that are not listed by
- ``definitions=True``. E.g. ``a = b`` returns ``b``.
- """
- def def_ref_filter(_def):
- is_def = _def._name.tree_name.is_definition()
- return definitions and is_def or references and not is_def
-
- def create_name(name):
- if name.parent.type == 'param':
- cls = ParamName
- else:
- cls = TreeNameDefinition
- is_module = name.parent.type == 'file_input'
- return cls(
- module_context.create_context(name if is_module else name.parent),
- name
- )
-
- # Set line/column to a random position, because they don't matter.
- script = Script(source, line=1, column=0, path=path, encoding=encoding, environment=environment)
- module_context = script._get_module()
- defs = [
- classes.Definition(
- script._evaluator,
- create_name(name)
- ) for name in get_module_names(script._module_node, all_scopes)
- ]
- return sorted(filter(def_ref_filter, defs), key=lambda x: (x.line, x.column))
-
-
-def preload_module(*modules):
- """
- Preloading modules tells Jedi to load a module now, instead of lazy parsing
- of modules. Usful for IDEs, to control which modules to load on startup.
-
- :param modules: different module names, list of string.
- """
- for m in modules:
- s = "import %s as x; x." % m
- Script(s, 1, len(s), None).completions()
-
-
-def set_debug_function(func_cb=debug.print_to_stdout, warnings=True,
- notices=True, speed=True):
- """
- Define a callback debug function to get all the debug messages.
-
- If you don't specify any arguments, debug messages will be printed to stdout.
-
- :param func_cb: The callback function for debug messages, with n params.
- """
- debug.debug_function = func_cb
- debug.enable_warning = warnings
- debug.enable_notice = notices
- debug.enable_speed = speed
diff --git a/contrib/python/jedi/jedi/api/classes.py b/contrib/python/jedi/jedi/api/classes.py
deleted file mode 100644
index 9602e04a8b..0000000000
--- a/contrib/python/jedi/jedi/api/classes.py
+++ /dev/null
@@ -1,681 +0,0 @@
-"""
-The :mod:`jedi.api.classes` module contains the return classes of the API.
-These classes are the much bigger part of the whole API, because they contain
-the interesting information about completion and goto operations.
-"""
-import re
-
-from parso.python.tree import search_ancestor
-
-from jedi import settings
-from jedi.evaluate.utils import ignored, unite
-from jedi.cache import memoize_method
-from jedi.evaluate import imports
-from jedi.evaluate import compiled
-from jedi.evaluate.imports import ImportName
-from jedi.evaluate.context import instance
-from jedi.evaluate.context import ClassContext, FunctionExecutionContext
-from jedi.api.keywords import KeywordName
-
-
-def _sort_names_by_start_pos(names):
- return sorted(names, key=lambda s: s.start_pos or (0, 0))
-
-
-def defined_names(evaluator, context):
- """
- List sub-definitions (e.g., methods in class).
-
- :type scope: Scope
- :rtype: list of Definition
- """
- filter = next(context.get_filters(search_global=True))
- names = [name for name in filter.values()]
- return [Definition(evaluator, n) for n in _sort_names_by_start_pos(names)]
-
-
-class BaseDefinition(object):
- _mapping = {
- 'posixpath': 'os.path',
- 'riscospath': 'os.path',
- 'ntpath': 'os.path',
- 'os2emxpath': 'os.path',
- 'macpath': 'os.path',
- 'genericpath': 'os.path',
- 'posix': 'os',
- '_io': 'io',
- '_functools': 'functools',
- '_sqlite3': 'sqlite3',
- '__builtin__': '',
- 'builtins': '',
- }
-
- _tuple_mapping = dict((tuple(k.split('.')), v) for (k, v) in {
- 'argparse._ActionsContainer': 'argparse.ArgumentParser',
- }.items())
-
- def __init__(self, evaluator, name):
- self._evaluator = evaluator
- self._name = name
- """
- An instance of :class:`parso.reprsentation.Name` subclass.
- """
- self.is_keyword = isinstance(self._name, KeywordName)
-
- # generate a path to the definition
- self._module = name.get_root_context()
- if self.in_builtin_module():
- self.module_path = None
- else:
- self.module_path = self._module.py__file__()
- """Shows the file path of a module. e.g. ``/usr/lib/python2.7/os.py``"""
-
- @property
- def name(self):
- """
- Name of variable/function/class/module.
-
- For example, for ``x = None`` it returns ``'x'``.
-
- :rtype: str or None
- """
- return self._name.string_name
-
- @property
- def type(self):
- """
- The type of the definition.
-
- Here is an example of the value of this attribute. Let's consider
- the following source. As what is in ``variable`` is unambiguous
- to Jedi, :meth:`jedi.Script.goto_definitions` should return a list of
- definition for ``sys``, ``f``, ``C`` and ``x``.
-
- >>> from jedi import Script
- >>> source = '''
- ... import keyword
- ...
- ... class C:
- ... pass
- ...
- ... class D:
- ... pass
- ...
- ... x = D()
- ...
- ... def f():
- ... pass
- ...
- ... for variable in [keyword, f, C, x]:
- ... variable'''
-
- >>> script = Script(source)
- >>> defs = script.goto_definitions()
-
- Before showing what is in ``defs``, let's sort it by :attr:`line`
- so that it is easy to relate the result to the source code.
-
- >>> defs = sorted(defs, key=lambda d: d.line)
- >>> defs # doctest: +NORMALIZE_WHITESPACE
- [<Definition module keyword>, <Definition class C>,
- <Definition instance D>, <Definition def f>]
-
- Finally, here is what you can get from :attr:`type`:
-
- >>> defs = [str(d.type) for d in defs] # It's unicode and in Py2 has u before it.
- >>> defs[0]
- 'module'
- >>> defs[1]
- 'class'
- >>> defs[2]
- 'instance'
- >>> defs[3]
- 'function'
-
- """
- tree_name = self._name.tree_name
- resolve = False
- if tree_name is not None:
- # TODO move this to their respective names.
- definition = tree_name.get_definition()
- if definition is not None and definition.type == 'import_from' and \
- tree_name.is_definition():
- resolve = True
-
- if isinstance(self._name, imports.SubModuleName) or resolve:
- for context in self._name.infer():
- return context.api_type
- return self._name.api_type
-
- def _path(self):
- """The path to a module/class/function definition."""
- def to_reverse():
- name = self._name
- if name.api_type == 'module':
- try:
- name = list(name.infer())[0].name
- except IndexError:
- pass
-
- if name.api_type in 'module':
- module_contexts = name.infer()
- if module_contexts:
- module_context, = module_contexts
- for n in reversed(module_context.py__name__().split('.')):
- yield n
- else:
- # We don't really know anything about the path here. This
- # module is just an import that would lead in an
- # ImportError. So simply return the name.
- yield name.string_name
- return
- else:
- yield name.string_name
-
- parent_context = name.parent_context
- while parent_context is not None:
- try:
- method = parent_context.py__name__
- except AttributeError:
- try:
- yield parent_context.name.string_name
- except AttributeError:
- pass
- else:
- for name in reversed(method().split('.')):
- yield name
- parent_context = parent_context.parent_context
- return reversed(list(to_reverse()))
-
- @property
- def module_name(self):
- """
- The module name.
-
- >>> from jedi import Script
- >>> source = 'import json'
- >>> script = Script(source, path='example.py')
- >>> d = script.goto_definitions()[0]
- >>> print(d.module_name) # doctest: +ELLIPSIS
- json
- """
- return self._module.name.string_name
-
- def in_builtin_module(self):
- """Whether this is a builtin module."""
- return isinstance(self._module, compiled.CompiledObject)
-
- @property
- def line(self):
- """The line where the definition occurs (starting with 1)."""
- start_pos = self._name.start_pos
- if start_pos is None:
- return None
- return start_pos[0]
-
- @property
- def column(self):
- """The column where the definition occurs (starting with 0)."""
- start_pos = self._name.start_pos
- if start_pos is None:
- return None
- return start_pos[1]
-
- def docstring(self, raw=False, fast=True):
- r"""
- Return a document string for this completion object.
-
- Example:
-
- >>> from jedi import Script
- >>> source = '''\
- ... def f(a, b=1):
- ... "Document for function f."
- ... '''
- >>> script = Script(source, 1, len('def f'), 'example.py')
- >>> doc = script.goto_definitions()[0].docstring()
- >>> print(doc)
- f(a, b=1)
- <BLANKLINE>
- Document for function f.
-
- Notice that useful extra information is added to the actual
- docstring. For function, it is call signature. If you need
- actual docstring, use ``raw=True`` instead.
-
- >>> print(script.goto_definitions()[0].docstring(raw=True))
- Document for function f.
-
- :param fast: Don't follow imports that are only one level deep like
- ``import foo``, but follow ``from foo import bar``. This makes
- sense for speed reasons. Completing `import a` is slow if you use
- the ``foo.docstring(fast=False)`` on every object, because it
- parses all libraries starting with ``a``.
- """
- return _Help(self._name).docstring(fast=fast, raw=raw)
-
- @property
- def description(self):
- """A textual description of the object."""
- return self._name.string_name
-
- @property
- def full_name(self):
- """
- Dot-separated path of this object.
-
- It is in the form of ``<module>[.<submodule>[...]][.<object>]``.
- It is useful when you want to look up Python manual of the
- object at hand.
-
- Example:
-
- >>> from jedi import Script
- >>> source = '''
- ... import os
- ... os.path.join'''
- >>> script = Script(source, 3, len('os.path.join'), 'example.py')
- >>> print(script.goto_definitions()[0].full_name)
- os.path.join
-
- Notice that it returns ``'os.path.join'`` instead of (for example)
- ``'posixpath.join'``. This is not correct, since the modules name would
- be ``<module 'posixpath' ...>```. However most users find the latter
- more practical.
- """
- path = list(self._path())
- # TODO add further checks, the mapping should only occur on stdlib.
- if not path:
- return None # for keywords the path is empty
-
- with ignored(KeyError):
- path[0] = self._mapping[path[0]]
- for key, repl in self._tuple_mapping.items():
- if tuple(path[:len(key)]) == key:
- path = [repl] + path[len(key):]
-
- return '.'.join(path if path[0] else path[1:])
-
- def goto_assignments(self):
- if self._name.tree_name is None:
- return self
-
- names = self._evaluator.goto(self._name.parent_context, self._name.tree_name)
- return [Definition(self._evaluator, n) for n in names]
-
- def _goto_definitions(self):
- # TODO make this function public.
- return [Definition(self._evaluator, d.name) for d in self._name.infer()]
-
- @property
- @memoize_method
- def params(self):
- """
- Raises an ``AttributeError``if the definition is not callable.
- Otherwise returns a list of `Definition` that represents the params.
- """
- def get_param_names(context):
- param_names = []
- if context.api_type == 'function':
- param_names = list(context.get_param_names())
- if isinstance(context, instance.BoundMethod):
- param_names = param_names[1:]
- elif isinstance(context, (instance.AbstractInstanceContext, ClassContext)):
- if isinstance(context, ClassContext):
- search = u'__init__'
- else:
- search = u'__call__'
- names = context.get_function_slot_names(search)
- if not names:
- return []
-
- # Just take the first one here, not optimal, but currently
- # there's no better solution.
- inferred = names[0].infer()
- param_names = get_param_names(next(iter(inferred)))
- if isinstance(context, ClassContext):
- param_names = param_names[1:]
- return param_names
- elif isinstance(context, compiled.CompiledObject):
- return list(context.get_param_names())
- return param_names
-
- followed = list(self._name.infer())
- if not followed or not hasattr(followed[0], 'py__call__'):
- raise AttributeError('There are no params defined on this.')
- context = followed[0] # only check the first one.
-
- return [Definition(self._evaluator, n) for n in get_param_names(context)]
-
- def parent(self):
- context = self._name.parent_context
- if context is None:
- return None
-
- if isinstance(context, FunctionExecutionContext):
- context = context.function_context
- return Definition(self._evaluator, context.name)
-
- def __repr__(self):
- return "<%s %s>" % (type(self).__name__, self.description)
-
- def get_line_code(self, before=0, after=0):
- """
- Returns the line of code where this object was defined.
-
- :param before: Add n lines before the current line to the output.
- :param after: Add n lines after the current line to the output.
-
- :return str: Returns the line(s) of code or an empty string if it's a
- builtin.
- """
- if self.in_builtin_module():
- return ''
-
- lines = self._name.get_root_context().code_lines
-
- index = self._name.start_pos[0] - 1
- start_index = max(index - before, 0)
- return ''.join(lines[start_index:index + after + 1])
-
-
-class Completion(BaseDefinition):
- """
- `Completion` objects are returned from :meth:`api.Script.completions`. They
- provide additional information about a completion.
- """
- def __init__(self, evaluator, name, stack, like_name_length):
- super(Completion, self).__init__(evaluator, name)
-
- self._like_name_length = like_name_length
- self._stack = stack
-
- # Completion objects with the same Completion name (which means
- # duplicate items in the completion)
- self._same_name_completions = []
-
- def _complete(self, like_name):
- append = ''
- if settings.add_bracket_after_function \
- and self.type == 'Function':
- append = '('
-
- if self._name.api_type == 'param' and self._stack is not None:
- nonterminals = [stack_node.nonterminal for stack_node in self._stack]
- if 'trailer' in nonterminals and 'argument' not in nonterminals:
- # TODO this doesn't work for nested calls.
- append += '='
-
- name = self._name.string_name
- if like_name:
- name = name[self._like_name_length:]
- return name + append
-
- @property
- def complete(self):
- """
- Return the rest of the word, e.g. completing ``isinstance``::
-
- isinstan# <-- Cursor is here
-
- would return the string 'ce'. It also adds additional stuff, depending
- on your `settings.py`.
-
- Assuming the following function definition::
-
- def foo(param=0):
- pass
-
- completing ``foo(par`` would give a ``Completion`` which `complete`
- would be `am=`
-
-
- """
- return self._complete(True)
-
- @property
- def name_with_symbols(self):
- """
- Similar to :attr:`name`, but like :attr:`name` returns also the
- symbols, for example assuming the following function definition::
-
- def foo(param=0):
- pass
-
- completing ``foo(`` would give a ``Completion`` which
- ``name_with_symbols`` would be "param=".
-
- """
- return self._complete(False)
-
- def docstring(self, raw=False, fast=True):
- if self._like_name_length >= 3:
- # In this case we can just resolve the like name, because we
- # wouldn't load like > 100 Python modules anymore.
- fast = False
- return super(Completion, self).docstring(raw=raw, fast=fast)
-
- @property
- def description(self):
- """Provide a description of the completion object."""
- # TODO improve the class structure.
- return Definition.description.__get__(self)
-
- def __repr__(self):
- return '<%s: %s>' % (type(self).__name__, self._name.string_name)
-
- @memoize_method
- def follow_definition(self):
- """
- Return the original definitions. I strongly recommend not using it for
- your completions, because it might slow down |jedi|. If you want to
- read only a few objects (<=20), it might be useful, especially to get
- the original docstrings. The basic problem of this function is that it
- follows all results. This means with 1000 completions (e.g. numpy),
- it's just PITA-slow.
- """
- defs = self._name.infer()
- return [Definition(self._evaluator, d.name) for d in defs]
-
-
-class Definition(BaseDefinition):
- """
- *Definition* objects are returned from :meth:`api.Script.goto_assignments`
- or :meth:`api.Script.goto_definitions`.
- """
- def __init__(self, evaluator, definition):
- super(Definition, self).__init__(evaluator, definition)
-
- @property
- def description(self):
- """
- A description of the :class:`.Definition` object, which is heavily used
- in testing. e.g. for ``isinstance`` it returns ``def isinstance``.
-
- Example:
-
- >>> from jedi import Script
- >>> source = '''
- ... def f():
- ... pass
- ...
- ... class C:
- ... pass
- ...
- ... variable = f if random.choice([0,1]) else C'''
- >>> script = Script(source, column=3) # line is maximum by default
- >>> defs = script.goto_definitions()
- >>> defs = sorted(defs, key=lambda d: d.line)
- >>> defs
- [<Definition def f>, <Definition class C>]
- >>> str(defs[0].description) # strip literals in python2
- 'def f'
- >>> str(defs[1].description)
- 'class C'
-
- """
- typ = self.type
- tree_name = self._name.tree_name
- if typ in ('function', 'class', 'module', 'instance') or tree_name is None:
- if typ == 'function':
- # For the description we want a short and a pythonic way.
- typ = 'def'
- return typ + ' ' + self._name.string_name
- elif typ == 'param':
- code = search_ancestor(tree_name, 'param').get_code(
- include_prefix=False,
- include_comma=False
- )
- return typ + ' ' + code
-
- definition = tree_name.get_definition() or tree_name
- # Remove the prefix, because that's not what we want for get_code
- # here.
- txt = definition.get_code(include_prefix=False)
- # Delete comments:
- txt = re.sub(r'#[^\n]+\n', ' ', txt)
- # Delete multi spaces/newlines
- txt = re.sub(r'\s+', ' ', txt).strip()
- return txt
-
- @property
- def desc_with_module(self):
- """
- In addition to the definition, also return the module.
-
- .. warning:: Don't use this function yet, its behaviour may change. If
- you really need it, talk to me.
-
- .. todo:: Add full path. This function is should return a
- `module.class.function` path.
- """
- position = '' if self.in_builtin_module else '@%s' % self.line
- return "%s:%s%s" % (self.module_name, self.description, position)
-
- @memoize_method
- def defined_names(self):
- """
- List sub-definitions (e.g., methods in class).
-
- :rtype: list of Definition
- """
- defs = self._name.infer()
- return sorted(
- unite(defined_names(self._evaluator, d) for d in defs),
- key=lambda s: s._name.start_pos or (0, 0)
- )
-
- def is_definition(self):
- """
- Returns True, if defined as a name in a statement, function or class.
- Returns False, if it's a reference to such a definition.
- """
- if self._name.tree_name is None:
- return True
- else:
- return self._name.tree_name.is_definition()
-
- def __eq__(self, other):
- return self._name.start_pos == other._name.start_pos \
- and self.module_path == other.module_path \
- and self.name == other.name \
- and self._evaluator == other._evaluator
-
- def __ne__(self, other):
- return not self.__eq__(other)
-
- def __hash__(self):
- return hash((self._name.start_pos, self.module_path, self.name, self._evaluator))
-
-
-class CallSignature(Definition):
- """
- `CallSignature` objects is the return value of `Script.function_definition`.
- It knows what functions you are currently in. e.g. `isinstance(` would
- return the `isinstance` function. without `(` it would return nothing.
- """
- def __init__(self, evaluator, executable_name, bracket_start_pos, index, key_name_str):
- super(CallSignature, self).__init__(evaluator, executable_name)
- self._index = index
- self._key_name_str = key_name_str
- self._bracket_start_pos = bracket_start_pos
-
- @property
- def index(self):
- """
- The Param index of the current call.
- Returns None if the index cannot be found in the curent call.
- """
- if self._key_name_str is not None:
- for i, param in enumerate(self.params):
- if self._key_name_str == param.name:
- return i
- if self.params:
- param_name = self.params[-1]._name
- if param_name.tree_name is not None:
- if param_name.tree_name.get_definition().star_count == 2:
- return i
- return None
-
- if self._index >= len(self.params):
- for i, param in enumerate(self.params):
- tree_name = param._name.tree_name
- if tree_name is not None:
- # *args case
- if tree_name.get_definition().star_count == 1:
- return i
- return None
- return self._index
-
- @property
- def bracket_start(self):
- """
- The indent of the bracket that is responsible for the last function
- call.
- """
- return self._bracket_start_pos
-
- @property
- def _params_str(self):
- return ', '.join([p.description[6:]
- for p in self.params])
-
- def __repr__(self):
- return '<%s: %s index=%r params=[%s]>' % (
- type(self).__name__,
- self._name.string_name,
- self._index,
- self._params_str,
- )
-
-
-class _Help(object):
- """
- Temporary implementation, will be used as `Script.help() or something in
- the future.
- """
- def __init__(self, definition):
- self._name = definition
-
- @memoize_method
- def _get_contexts(self, fast):
- if isinstance(self._name, ImportName) and fast:
- return {}
-
- if self._name.api_type == 'statement':
- return {}
-
- return self._name.infer()
-
- def docstring(self, fast=True, raw=True):
- """
- The docstring ``__doc__`` for any object.
-
- See :attr:`doc` for example.
- """
- # TODO: Use all of the followed objects as output. Possibly divinding
- # them by a few dashes.
- for context in self._get_contexts(fast=fast):
- return context.py__doc__(include_call_signature=not raw)
-
- return ''
diff --git a/contrib/python/jedi/jedi/api/completion.py b/contrib/python/jedi/jedi/api/completion.py
deleted file mode 100644
index 358d726bea..0000000000
--- a/contrib/python/jedi/jedi/api/completion.py
+++ /dev/null
@@ -1,292 +0,0 @@
-from parso.python.token import PythonTokenTypes
-from parso.python import tree
-from parso.tree import search_ancestor, Leaf
-
-from jedi._compatibility import Parameter
-from jedi import debug
-from jedi import settings
-from jedi.api import classes
-from jedi.api import helpers
-from jedi.evaluate import imports
-from jedi.api import keywords
-from jedi.evaluate.helpers import evaluate_call_of_leaf
-from jedi.evaluate.filters import get_global_filters
-from jedi.parser_utils import get_statement_of_position
-
-
-def get_call_signature_param_names(call_signatures):
- # add named params
- for call_sig in call_signatures:
- for p in call_sig.params:
- # Allow protected access, because it's a public API.
- if p._name.get_kind() in (Parameter.POSITIONAL_OR_KEYWORD,
- Parameter.KEYWORD_ONLY):
- yield p._name
-
-
-def filter_names(evaluator, completion_names, stack, like_name):
- comp_dct = {}
- if settings.case_insensitive_completion:
- like_name = like_name.lower()
- for name in completion_names:
- string = name.string_name
- if settings.case_insensitive_completion:
- string = string.lower()
-
- if string.startswith(like_name):
- new = classes.Completion(
- evaluator,
- name,
- stack,
- len(like_name)
- )
- k = (new.name, new.complete) # key
- if k in comp_dct and settings.no_completion_duplicates:
- comp_dct[k]._same_name_completions.append(new)
- else:
- comp_dct[k] = new
- yield new
-
-
-def get_user_scope(module_context, position):
- """
- Returns the scope in which the user resides. This includes flows.
- """
- user_stmt = get_statement_of_position(module_context.tree_node, position)
- if user_stmt is None:
- def scan(scope):
- for s in scope.children:
- if s.start_pos <= position <= s.end_pos:
- if isinstance(s, (tree.Scope, tree.Flow)) \
- or s.type in ('async_stmt', 'async_funcdef'):
- return scan(s) or s
- elif s.type in ('suite', 'decorated'):
- return scan(s)
- return None
-
- scanned_node = scan(module_context.tree_node)
- if scanned_node:
- return module_context.create_context(scanned_node, node_is_context=True)
- return module_context
- else:
- return module_context.create_context(user_stmt)
-
-
-def get_flow_scope_node(module_node, position):
- node = module_node.get_leaf_for_position(position, include_prefixes=True)
- while not isinstance(node, (tree.Scope, tree.Flow)):
- node = node.parent
-
- return node
-
-
-class Completion:
- def __init__(self, evaluator, module, code_lines, position, call_signatures_method):
- self._evaluator = evaluator
- self._module_context = module
- self._module_node = module.tree_node
- self._code_lines = code_lines
-
- # The first step of completions is to get the name
- self._like_name = helpers.get_on_completion_name(self._module_node, code_lines, position)
- # The actual cursor position is not what we need to calculate
- # everything. We want the start of the name we're on.
- self._position = position[0], position[1] - len(self._like_name)
- self._call_signatures_method = call_signatures_method
-
- def completions(self):
- completion_names = self._get_context_completions()
-
- completions = filter_names(self._evaluator, completion_names,
- self.stack, self._like_name)
-
- return sorted(completions, key=lambda x: (x.name.startswith('__'),
- x.name.startswith('_'),
- x.name.lower()))
-
- def _get_context_completions(self):
- """
- Analyzes the context that a completion is made in and decides what to
- return.
-
- Technically this works by generating a parser stack and analysing the
- current stack for possible grammar nodes.
-
- Possible enhancements:
- - global/nonlocal search global
- - yield from / raise from <- could be only exceptions/generators
- - In args: */**: no completion
- - In params (also lambda): no completion before =
- """
-
- grammar = self._evaluator.grammar
-
- try:
- self.stack = stack = helpers.get_stack_at_position(
- grammar, self._code_lines, self._module_node, self._position
- )
- except helpers.OnErrorLeaf as e:
- self.stack = stack = None
- if e.error_leaf.value == '.':
- # After ErrorLeaf's that are dots, we will not do any
- # completions since this probably just confuses the user.
- return []
- # If we don't have a context, just use global completion.
-
- return self._global_completions()
-
- allowed_transitions = \
- list(stack._allowed_transition_names_and_token_types())
-
- if 'if' in allowed_transitions:
- leaf = self._module_node.get_leaf_for_position(self._position, include_prefixes=True)
- previous_leaf = leaf.get_previous_leaf()
-
- indent = self._position[1]
- if not (leaf.start_pos <= self._position <= leaf.end_pos):
- indent = leaf.start_pos[1]
-
- if previous_leaf is not None:
- stmt = previous_leaf
- while True:
- stmt = search_ancestor(
- stmt, 'if_stmt', 'for_stmt', 'while_stmt', 'try_stmt',
- 'error_node',
- )
- if stmt is None:
- break
-
- type_ = stmt.type
- if type_ == 'error_node':
- first = stmt.children[0]
- if isinstance(first, Leaf):
- type_ = first.value + '_stmt'
- # Compare indents
- if stmt.start_pos[1] == indent:
- if type_ == 'if_stmt':
- allowed_transitions += ['elif', 'else']
- elif type_ == 'try_stmt':
- allowed_transitions += ['except', 'finally', 'else']
- elif type_ == 'for_stmt':
- allowed_transitions.append('else')
-
- completion_names = list(self._get_keyword_completion_names(allowed_transitions))
-
- if any(t in allowed_transitions for t in (PythonTokenTypes.NAME,
- PythonTokenTypes.INDENT)):
- # This means that we actually have to do type inference.
-
- nonterminals = [stack_node.nonterminal for stack_node in stack]
-
- nodes = [node for stack_node in stack for node in stack_node.nodes]
-
- if nodes and nodes[-1] in ('as', 'def', 'class'):
- # No completions for ``with x as foo`` and ``import x as foo``.
- # Also true for defining names as a class or function.
- return list(self._get_class_context_completions(is_function=True))
- elif "import_stmt" in nonterminals:
- level, names = self._parse_dotted_names(nodes, "import_from" in nonterminals)
-
- only_modules = not ("import_from" in nonterminals and 'import' in nodes)
- completion_names += self._get_importer_names(
- names,
- level,
- only_modules=only_modules,
- )
- elif nonterminals[-1] in ('trailer', 'dotted_name') and nodes[-1] == '.':
- dot = self._module_node.get_leaf_for_position(self._position)
- completion_names += self._trailer_completions(dot.get_previous_leaf())
- else:
- completion_names += self._global_completions()
- completion_names += self._get_class_context_completions(is_function=False)
-
- if 'trailer' in nonterminals:
- call_signatures = self._call_signatures_method()
- completion_names += get_call_signature_param_names(call_signatures)
-
- return completion_names
-
- def _get_keyword_completion_names(self, allowed_transitions):
- for k in allowed_transitions:
- if isinstance(k, str) and k.isalpha():
- yield keywords.KeywordName(self._evaluator, k)
-
- def _global_completions(self):
- context = get_user_scope(self._module_context, self._position)
- debug.dbg('global completion scope: %s', context)
- flow_scope_node = get_flow_scope_node(self._module_node, self._position)
- filters = get_global_filters(
- self._evaluator,
- context,
- self._position,
- origin_scope=flow_scope_node
- )
- completion_names = []
- for filter in filters:
- completion_names += filter.values()
- return completion_names
-
- def _trailer_completions(self, previous_leaf):
- user_context = get_user_scope(self._module_context, self._position)
- evaluation_context = self._evaluator.create_context(
- self._module_context, previous_leaf
- )
- contexts = evaluate_call_of_leaf(evaluation_context, previous_leaf)
- completion_names = []
- debug.dbg('trailer completion contexts: %s', contexts)
- for context in contexts:
- for filter in context.get_filters(
- search_global=False, origin_scope=user_context.tree_node):
- completion_names += filter.values()
- return completion_names
-
- def _parse_dotted_names(self, nodes, is_import_from):
- level = 0
- names = []
- for node in nodes[1:]:
- if node in ('.', '...'):
- if not names:
- level += len(node.value)
- elif node.type == 'dotted_name':
- names += node.children[::2]
- elif node.type == 'name':
- names.append(node)
- elif node == ',':
- if not is_import_from:
- names = []
- else:
- # Here if the keyword `import` comes along it stops checking
- # for names.
- break
- return level, names
-
- def _get_importer_names(self, names, level=0, only_modules=True):
- names = [n.value for n in names]
- i = imports.Importer(self._evaluator, names, self._module_context, level)
- return i.completion_names(self._evaluator, only_modules=only_modules)
-
- def _get_class_context_completions(self, is_function=True):
- """
- Autocomplete inherited methods when overriding in child class.
- """
- leaf = self._module_node.get_leaf_for_position(self._position, include_prefixes=True)
- cls = tree.search_ancestor(leaf, 'classdef')
- if isinstance(cls, (tree.Class, tree.Function)):
- # Complete the methods that are defined in the super classes.
- random_context = self._module_context.create_context(
- cls,
- node_is_context=True
- )
- else:
- return
-
- if cls.start_pos[1] >= leaf.start_pos[1]:
- return
-
- filters = random_context.get_filters(search_global=False, is_instance=True)
- # The first dict is the dictionary of class itself.
- next(filters)
- for filter in filters:
- for name in filter.values():
- if (name.api_type == 'function') == is_function:
- yield name
diff --git a/contrib/python/jedi/jedi/api/environment.py b/contrib/python/jedi/jedi/api/environment.py
deleted file mode 100644
index a6616c5a1d..0000000000
--- a/contrib/python/jedi/jedi/api/environment.py
+++ /dev/null
@@ -1,445 +0,0 @@
-"""
-Environments are a way to activate different Python versions or Virtualenvs for
-static analysis. The Python binary in that environment is going to be executed.
-"""
-import os
-import sys
-import hashlib
-import filecmp
-from collections import namedtuple
-
-from jedi._compatibility import highest_pickle_protocol, which
-from jedi.cache import memoize_method, time_cache
-from jedi.evaluate.compiled.subprocess import CompiledSubprocess, \
- EvaluatorSameProcess, EvaluatorSubprocess
-
-import parso
-
-_VersionInfo = namedtuple('VersionInfo', 'major minor micro')
-
-_SUPPORTED_PYTHONS = ['3.7', '3.6', '3.5', '3.4', '3.3', '2.7']
-_SAFE_PATHS = ['/usr/bin', '/usr/local/bin']
-_CURRENT_VERSION = '%s.%s' % (sys.version_info.major, sys.version_info.minor)
-
-
-class InvalidPythonEnvironment(Exception):
- """
- If you see this exception, the Python executable or Virtualenv you have
- been trying to use is probably not a correct Python version.
- """
-
-
-class _BaseEnvironment(object):
- @memoize_method
- def get_grammar(self):
- version_string = '%s.%s' % (self.version_info.major, self.version_info.minor)
- return parso.load_grammar(version=version_string)
-
- @property
- def _sha256(self):
- try:
- return self._hash
- except AttributeError:
- self._hash = _calculate_sha256_for_file(self.executable)
- return self._hash
-
-
-def _get_info():
- return (
- sys.executable,
- sys.prefix,
- sys.version_info[:3],
- )
-
-
-class Environment(_BaseEnvironment):
- """
- This class is supposed to be created by internal Jedi architecture. You
- should not create it directly. Please use create_environment or the other
- functions instead. It is then returned by that function.
- """
- _subprocess = None
-
- def __init__(self, executable):
- self._start_executable = executable
- # Initialize the environment
- self._get_subprocess()
-
- def _get_subprocess(self):
- if self._subprocess is not None and not self._subprocess.is_crashed:
- return self._subprocess
-
- try:
- self._subprocess = CompiledSubprocess(self._start_executable)
- info = self._subprocess._send(None, _get_info)
- except Exception as exc:
- raise InvalidPythonEnvironment(
- "Could not get version information for %r: %r" % (
- self._start_executable,
- exc))
-
- # Since it could change and might not be the same(?) as the one given,
- # set it here.
- self.executable = info[0]
- """
- The Python executable, matches ``sys.executable``.
- """
- self.path = info[1]
- """
- The path to an environment, matches ``sys.prefix``.
- """
- self.version_info = _VersionInfo(*info[2])
- """
- Like ``sys.version_info``. A tuple to show the current Environment's
- Python version.
- """
-
- # py2 sends bytes via pickle apparently?!
- if self.version_info.major == 2:
- self.executable = self.executable.decode()
- self.path = self.path.decode()
-
- # Adjust pickle protocol according to host and client version.
- self._subprocess._pickle_protocol = highest_pickle_protocol([
- sys.version_info, self.version_info])
-
- return self._subprocess
-
- def __repr__(self):
- version = '.'.join(str(i) for i in self.version_info)
- return '<%s: %s in %s>' % (self.__class__.__name__, version, self.path)
-
- def get_evaluator_subprocess(self, evaluator):
- return EvaluatorSubprocess(evaluator, self._get_subprocess())
-
- @memoize_method
- def get_sys_path(self):
- """
- The sys path for this environment. Does not include potential
- modifications like ``sys.path.append``.
-
- :returns: list of str
- """
- # It's pretty much impossible to generate the sys path without actually
- # executing Python. The sys path (when starting with -S) itself depends
- # on how the Python version was compiled (ENV variables).
- # If you omit -S when starting Python (normal case), additionally
- # site.py gets executed.
- return self._get_subprocess().get_sys_path()
-
-
-class SameEnvironment(Environment):
- def __init__(self):
- self._start_executable = self.executable = sys.executable
- self.path = sys.prefix
- self.version_info = _VersionInfo(*sys.version_info[:3])
-
-
-class InterpreterEnvironment(_BaseEnvironment):
- def __init__(self):
- self.version_info = _VersionInfo(*sys.version_info[:3])
-
- def get_evaluator_subprocess(self, evaluator):
- return EvaluatorSameProcess(evaluator)
-
- def get_sys_path(self):
- return sys.path
-
-
-def _get_virtual_env_from_var():
- """Get virtualenv environment from VIRTUAL_ENV environment variable.
-
- It uses `safe=False` with ``create_environment``, because the environment
- variable is considered to be safe / controlled by the user solely.
- """
- var = os.environ.get('VIRTUAL_ENV')
- if var:
- if var == sys.prefix:
- return _try_get_same_env()
-
- try:
- return create_environment(var, safe=False)
- except InvalidPythonEnvironment:
- pass
-
-
-def _calculate_sha256_for_file(path):
- sha256 = hashlib.sha256()
- with open(path, 'rb') as f:
- for block in iter(lambda: f.read(filecmp.BUFSIZE), b''):
- sha256.update(block)
- return sha256.hexdigest()
-
-
-def get_default_environment():
- """
- Tries to return an active Virtualenv. If there is no VIRTUAL_ENV variable
- set it will return the latest Python version installed on the system. This
- makes it possible to use as many new Python features as possible when using
- autocompletion and other functionality.
-
- :returns: :class:`Environment`
- """
- virtual_env = _get_virtual_env_from_var()
- if virtual_env is not None:
- return virtual_env
-
- return _try_get_same_env()
-
-
-def _try_get_same_env():
- env = SameEnvironment()
- if not os.path.basename(env.executable).lower().startswith('python'):
- # This tries to counter issues with embedding. In some cases (e.g.
- # VIM's Python Mac/Windows, sys.executable is /foo/bar/vim. This
- # happens, because for Mac a function called `_NSGetExecutablePath` is
- # used and for Windows `GetModuleFileNameW`. These are both platform
- # specific functions. For all other systems sys.executable should be
- # alright. However here we try to generalize:
- #
- # 1. Check if the executable looks like python (heuristic)
- # 2. In case it's not try to find the executable
- # 3. In case we don't find it use an interpreter environment.
- #
- # The last option will always work, but leads to potential crashes of
- # Jedi - which is ok, because it happens very rarely and even less,
- # because the code below should work for most cases.
- if os.name == 'nt':
- # The first case would be a virtualenv and the second a normal
- # Python installation.
- checks = (r'Scripts\python.exe', 'python.exe')
- else:
- # For unix it looks like Python is always in a bin folder.
- checks = (
- 'bin/python%s.%s' % (sys.version_info[0], sys.version[1]),
- 'bin/python%s' % (sys.version_info[0]),
- 'bin/python',
- )
- for check in checks:
- guess = os.path.join(sys.exec_prefix, check)
- if os.path.isfile(guess):
- # Bingo - We think we have our Python.
- return Environment(guess)
- # It looks like there is no reasonable Python to be found.
- return InterpreterEnvironment()
- # If no virtualenv is found, use the environment we're already
- # using.
- return env
-
-
-def get_cached_default_environment():
- var = os.environ.get('VIRTUAL_ENV')
- environment = _get_cached_default_environment()
- if var and var != environment.path:
- _get_cached_default_environment.clear_cache()
- return _get_cached_default_environment()
- return environment
-
-
-@time_cache(seconds=10 * 60) # 10 Minutes
-def _get_cached_default_environment():
- return get_default_environment()
-
-
-def find_virtualenvs(paths=None, **kwargs):
- """
- :param paths: A list of paths in your file system to be scanned for
- Virtualenvs. It will search in these paths and potentially execute the
- Python binaries. Also the VIRTUAL_ENV variable will be checked if it
- contains a valid Virtualenv.
- :param safe: Default True. In case this is False, it will allow this
- function to execute potential `python` environments. An attacker might
- be able to drop an executable in a path this function is searching by
- default. If the executable has not been installed by root, it will not
- be executed.
-
- :yields: :class:`Environment`
- """
- def py27_comp(paths=None, safe=True):
- if paths is None:
- paths = []
-
- _used_paths = set()
-
- # Using this variable should be safe, because attackers might be able
- # to drop files (via git) but not environment variables.
- virtual_env = _get_virtual_env_from_var()
- if virtual_env is not None:
- yield virtual_env
- _used_paths.add(virtual_env.path)
-
- for directory in paths:
- if not os.path.isdir(directory):
- continue
-
- directory = os.path.abspath(directory)
- for path in os.listdir(directory):
- path = os.path.join(directory, path)
- if path in _used_paths:
- # A path shouldn't be evaluated twice.
- continue
- _used_paths.add(path)
-
- try:
- executable = _get_executable_path(path, safe=safe)
- yield Environment(executable)
- except InvalidPythonEnvironment:
- pass
-
- return py27_comp(paths, **kwargs)
-
-
-def find_system_environments():
- """
- Ignores virtualenvs and returns the Python versions that were installed on
- your system. This might return nothing, if you're running Python e.g. from
- a portable version.
-
- The environments are sorted from latest to oldest Python version.
-
- :yields: :class:`Environment`
- """
- for version_string in _SUPPORTED_PYTHONS:
- try:
- yield get_system_environment(version_string)
- except InvalidPythonEnvironment:
- pass
-
-
-# TODO: this function should probably return a list of environments since
-# multiple Python installations can be found on a system for the same version.
-def get_system_environment(version):
- """
- Return the first Python environment found for a string of the form 'X.Y'
- where X and Y are the major and minor versions of Python.
-
- :raises: :exc:`.InvalidPythonEnvironment`
- :returns: :class:`Environment`
- """
- exe = which('python' + version)
- if exe:
- if exe == sys.executable:
- return SameEnvironment()
- return Environment(exe)
-
- if os.name == 'nt':
- for exe in _get_executables_from_windows_registry(version):
- return Environment(exe)
- raise InvalidPythonEnvironment("Cannot find executable python%s." % version)
-
-
-def create_environment(path, safe=True):
- """
- Make it possible to manually create an Environment object by specifying a
- Virtualenv path or an executable path.
-
- :raises: :exc:`.InvalidPythonEnvironment`
- :returns: :class:`Environment`
- """
- if os.path.isfile(path):
- _assert_safe(path, safe)
- return Environment(path)
- return Environment(_get_executable_path(path, safe=safe))
-
-
-def _get_executable_path(path, safe=True):
- """
- Returns None if it's not actually a virtual env.
- """
-
- if os.name == 'nt':
- python = os.path.join(path, 'Scripts', 'python.exe')
- else:
- python = os.path.join(path, 'bin', 'python')
- if not os.path.exists(python):
- raise InvalidPythonEnvironment("%s seems to be missing." % python)
-
- _assert_safe(python, safe)
- return python
-
-
-def _get_executables_from_windows_registry(version):
- # The winreg module is named _winreg on Python 2.
- try:
- import winreg
- except ImportError:
- import _winreg as winreg
-
- # TODO: support Python Anaconda.
- sub_keys = [
- r'SOFTWARE\Python\PythonCore\{version}\InstallPath',
- r'SOFTWARE\Wow6432Node\Python\PythonCore\{version}\InstallPath',
- r'SOFTWARE\Python\PythonCore\{version}-32\InstallPath',
- r'SOFTWARE\Wow6432Node\Python\PythonCore\{version}-32\InstallPath'
- ]
- for root_key in [winreg.HKEY_CURRENT_USER, winreg.HKEY_LOCAL_MACHINE]:
- for sub_key in sub_keys:
- sub_key = sub_key.format(version=version)
- try:
- with winreg.OpenKey(root_key, sub_key) as key:
- prefix = winreg.QueryValueEx(key, '')[0]
- exe = os.path.join(prefix, 'python.exe')
- if os.path.isfile(exe):
- yield exe
- except WindowsError:
- pass
-
-
-def _assert_safe(executable_path, safe):
- if safe and not _is_safe(executable_path):
- raise InvalidPythonEnvironment(
- "The python binary is potentially unsafe.")
-
-
-def _is_safe(executable_path):
- # Resolve sym links. A venv typically is a symlink to a known Python
- # binary. Only virtualenvs copy symlinks around.
- real_path = os.path.realpath(executable_path)
-
- if _is_unix_safe_simple(real_path):
- return True
-
- # Just check the list of known Python versions. If it's not in there,
- # it's likely an attacker or some Python that was not properly
- # installed in the system.
- for environment in find_system_environments():
- if environment.executable == real_path:
- return True
-
- # If the versions don't match, just compare the binary files. If we
- # don't do that, only venvs will be working and not virtualenvs.
- # venvs are symlinks while virtualenvs are actual copies of the
- # Python files.
- # This still means that if the system Python is updated and the
- # virtualenv's Python is not (which is probably never going to get
- # upgraded), it will not work with Jedi. IMO that's fine, because
- # people should just be using venv. ~ dave
- if environment._sha256 == _calculate_sha256_for_file(real_path):
- return True
- return False
-
-
-def _is_unix_safe_simple(real_path):
- if _is_unix_admin():
- # In case we are root, just be conservative and
- # only execute known paths.
- return any(real_path.startswith(p) for p in _SAFE_PATHS)
-
- uid = os.stat(real_path).st_uid
- # The interpreter needs to be owned by root. This means that it wasn't
- # written by a user and therefore attacking Jedi is not as simple.
- # The attack could look like the following:
- # 1. A user clones a repository.
- # 2. The repository has an innocent looking folder called foobar. jedi
- # searches for the folder and executes foobar/bin/python --version if
- # there's also a foobar/bin/activate.
- # 3. The bin/python is obviously not a python script but a bash script or
- # whatever the attacker wants.
- return uid == 0
-
-
-def _is_unix_admin():
- try:
- return os.getuid() == 0
- except AttributeError:
- return False # Windows
diff --git a/contrib/python/jedi/jedi/api/exceptions.py b/contrib/python/jedi/jedi/api/exceptions.py
deleted file mode 100644
index 99cebdb7dd..0000000000
--- a/contrib/python/jedi/jedi/api/exceptions.py
+++ /dev/null
@@ -1,10 +0,0 @@
-class _JediError(Exception):
- pass
-
-
-class InternalError(_JediError):
- pass
-
-
-class WrongVersion(_JediError):
- pass
diff --git a/contrib/python/jedi/jedi/api/helpers.py b/contrib/python/jedi/jedi/api/helpers.py
deleted file mode 100644
index 7cf4bc6fc4..0000000000
--- a/contrib/python/jedi/jedi/api/helpers.py
+++ /dev/null
@@ -1,260 +0,0 @@
-"""
-Helpers for the API
-"""
-import re
-from collections import namedtuple
-from textwrap import dedent
-
-from parso.python.parser import Parser
-from parso.python import tree
-
-from jedi._compatibility import u
-from jedi.evaluate.syntax_tree import eval_atom
-from jedi.evaluate.helpers import evaluate_call_of_leaf
-from jedi.evaluate.compiled import get_string_context_set
-from jedi.cache import call_signature_time_cache
-
-
-CompletionParts = namedtuple('CompletionParts', ['path', 'has_dot', 'name'])
-
-
-def sorted_definitions(defs):
- # Note: `or ''` below is required because `module_path` could be
- return sorted(defs, key=lambda x: (x.module_path or '', x.line or 0, x.column or 0))
-
-
-def get_on_completion_name(module_node, lines, position):
- leaf = module_node.get_leaf_for_position(position)
- if leaf is None or leaf.type in ('string', 'error_leaf'):
- # Completions inside strings are a bit special, we need to parse the
- # string. The same is true for comments and error_leafs.
- line = lines[position[0] - 1]
- # The first step of completions is to get the name
- return re.search(r'(?!\d)\w+$|$', line[:position[1]]).group(0)
- elif leaf.type not in ('name', 'keyword'):
- return ''
-
- return leaf.value[:position[1] - leaf.start_pos[1]]
-
-
-def _get_code(code_lines, start_pos, end_pos):
- # Get relevant lines.
- lines = code_lines[start_pos[0] - 1:end_pos[0]]
- # Remove the parts at the end of the line.
- lines[-1] = lines[-1][:end_pos[1]]
- # Remove first line indentation.
- lines[0] = lines[0][start_pos[1]:]
- return ''.join(lines)
-
-
-class OnErrorLeaf(Exception):
- @property
- def error_leaf(self):
- return self.args[0]
-
-
-def _get_code_for_stack(code_lines, module_node, position):
- leaf = module_node.get_leaf_for_position(position, include_prefixes=True)
- # It might happen that we're on whitespace or on a comment. This means
- # that we would not get the right leaf.
- if leaf.start_pos >= position:
- # If we're not on a comment simply get the previous leaf and proceed.
- leaf = leaf.get_previous_leaf()
- if leaf is None:
- return u('') # At the beginning of the file.
-
- is_after_newline = leaf.type == 'newline'
- while leaf.type == 'newline':
- leaf = leaf.get_previous_leaf()
- if leaf is None:
- return u('')
-
- if leaf.type == 'error_leaf' or leaf.type == 'string':
- if leaf.start_pos[0] < position[0]:
- # On a different line, we just begin anew.
- return u('')
-
- # Error leafs cannot be parsed, completion in strings is also
- # impossible.
- raise OnErrorLeaf(leaf)
- else:
- user_stmt = leaf
- while True:
- if user_stmt.parent.type in ('file_input', 'suite', 'simple_stmt'):
- break
- user_stmt = user_stmt.parent
-
- if is_after_newline:
- if user_stmt.start_pos[1] > position[1]:
- # This means that it's actually a dedent and that means that we
- # start without context (part of a suite).
- return u('')
-
- # This is basically getting the relevant lines.
- return _get_code(code_lines, user_stmt.get_start_pos_of_prefix(), position)
-
-
-def get_stack_at_position(grammar, code_lines, module_node, pos):
- """
- Returns the possible node names (e.g. import_from, xor_test or yield_stmt).
- """
- class EndMarkerReached(Exception):
- pass
-
- def tokenize_without_endmarker(code):
- # TODO This is for now not an official parso API that exists purely
- # for Jedi.
- tokens = grammar._tokenize(code)
- for token in tokens:
- if token.string == safeword:
- raise EndMarkerReached()
- elif token.prefix.endswith(safeword):
- # This happens with comments.
- raise EndMarkerReached()
- elif token.string.endswith(safeword):
- yield token # Probably an f-string literal that was not finished.
- raise EndMarkerReached()
- else:
- yield token
-
- # The code might be indedented, just remove it.
- code = dedent(_get_code_for_stack(code_lines, module_node, pos))
- # We use a word to tell Jedi when we have reached the start of the
- # completion.
- # Use Z as a prefix because it's not part of a number suffix.
- safeword = 'ZZZ_USER_WANTS_TO_COMPLETE_HERE_WITH_JEDI'
- code = code + ' ' + safeword
-
- p = Parser(grammar._pgen_grammar, error_recovery=True)
- try:
- p.parse(tokens=tokenize_without_endmarker(code))
- except EndMarkerReached:
- return p.stack
- raise SystemError(
- "This really shouldn't happen. There's a bug in Jedi:\n%s"
- % list(tokenize_without_endmarker(code))
- )
-
-
-def evaluate_goto_definition(evaluator, context, leaf):
- if leaf.type == 'name':
- # In case of a name we can just use goto_definition which does all the
- # magic itself.
- return evaluator.goto_definitions(context, leaf)
-
- parent = leaf.parent
- if parent.type == 'atom':
- return context.eval_node(leaf.parent)
- elif parent.type == 'trailer':
- return evaluate_call_of_leaf(context, leaf)
- elif isinstance(leaf, tree.Literal):
- return eval_atom(context, leaf)
- elif leaf.type in ('fstring_string', 'fstring_start', 'fstring_end'):
- return get_string_context_set(evaluator)
- return []
-
-
-CallSignatureDetails = namedtuple(
- 'CallSignatureDetails',
- ['bracket_leaf', 'call_index', 'keyword_name_str']
-)
-
-
-def _get_index_and_key(nodes, position):
- """
- Returns the amount of commas and the keyword argument string.
- """
- nodes_before = [c for c in nodes if c.start_pos < position]
- if nodes_before[-1].type == 'arglist':
- nodes_before = [c for c in nodes_before[-1].children if c.start_pos < position]
-
- key_str = None
-
- if nodes_before:
- last = nodes_before[-1]
- if last.type == 'argument' and last.children[1] == '=' \
- and last.children[1].end_pos <= position:
- # Checked if the argument
- key_str = last.children[0].value
- elif last == '=':
- key_str = nodes_before[-2].value
-
- return nodes_before.count(','), key_str
-
-
-def _get_call_signature_details_from_error_node(node, position):
- for index, element in reversed(list(enumerate(node.children))):
- # `index > 0` means that it's a trailer and not an atom.
- if element == '(' and element.end_pos <= position and index > 0:
- # It's an error node, we don't want to match too much, just
- # until the parentheses is enough.
- children = node.children[index:]
- name = element.get_previous_leaf()
- if name is None:
- continue
- if name.type == 'name' or name.parent.type in ('trailer', 'atom'):
- return CallSignatureDetails(
- element,
- *_get_index_and_key(children, position)
- )
-
-
-def get_call_signature_details(module, position):
- leaf = module.get_leaf_for_position(position, include_prefixes=True)
- if leaf.start_pos >= position:
- # Whitespace / comments after the leaf count towards the previous leaf.
- leaf = leaf.get_previous_leaf()
- if leaf is None:
- return None
-
- if leaf == ')':
- if leaf.end_pos == position:
- leaf = leaf.get_next_leaf()
-
- # Now that we know where we are in the syntax tree, we start to look at
- # parents for possible function definitions.
- node = leaf.parent
- while node is not None:
- if node.type in ('funcdef', 'classdef'):
- # Don't show call signatures if there's stuff before it that just
- # makes it feel strange to have a call signature.
- return None
-
- for n in node.children[::-1]:
- if n.start_pos < position and n.type == 'error_node':
- result = _get_call_signature_details_from_error_node(n, position)
- if result is not None:
- return result
-
- if node.type == 'trailer' and node.children[0] == '(':
- leaf = node.get_previous_leaf()
- if leaf is None:
- return None
- return CallSignatureDetails(
- node.children[0], *_get_index_and_key(node.children, position))
-
- node = node.parent
-
- return None
-
-
-@call_signature_time_cache("call_signatures_validity")
-def cache_call_signatures(evaluator, context, bracket_leaf, code_lines, user_pos):
- """This function calculates the cache key."""
- line_index = user_pos[0] - 1
-
- before_cursor = code_lines[line_index][:user_pos[1]]
- other_lines = code_lines[bracket_leaf.start_pos[0]:line_index]
- whole = ''.join(other_lines + [before_cursor])
- before_bracket = re.match(r'.*\(', whole, re.DOTALL)
-
- module_path = context.get_root_context().py__file__()
- if module_path is None:
- yield None # Don't cache!
- else:
- yield (module_path, before_bracket, bracket_leaf.start_pos)
- yield evaluate_goto_definition(
- evaluator,
- context,
- bracket_leaf.get_previous_leaf()
- )
diff --git a/contrib/python/jedi/jedi/api/interpreter.py b/contrib/python/jedi/jedi/api/interpreter.py
deleted file mode 100644
index c9b7bd69bb..0000000000
--- a/contrib/python/jedi/jedi/api/interpreter.py
+++ /dev/null
@@ -1,61 +0,0 @@
-"""
-TODO Some parts of this module are still not well documented.
-"""
-
-from jedi.evaluate.context import ModuleContext
-from jedi.evaluate import compiled
-from jedi.evaluate.compiled import mixed
-from jedi.evaluate.compiled.access import create_access_path
-from jedi.evaluate.base_context import Context
-
-
-def _create(evaluator, obj):
- return compiled.create_from_access_path(
- evaluator, create_access_path(evaluator, obj)
- )
-
-
-class NamespaceObject(object):
- def __init__(self, dct):
- self.__dict__ = dct
-
-
-class MixedModuleContext(Context):
- type = 'mixed_module'
-
- def __init__(self, evaluator, tree_module, namespaces, path, code_lines):
- self.evaluator = evaluator
- self._namespaces = namespaces
-
- self._namespace_objects = [NamespaceObject(n) for n in namespaces]
- self._module_context = ModuleContext(
- evaluator, tree_module,
- path=path,
- code_lines=code_lines
- )
- self.tree_node = tree_module
-
- def get_node(self):
- return self.tree_node
-
- def get_filters(self, *args, **kwargs):
- for filter in self._module_context.get_filters(*args, **kwargs):
- yield filter
-
- for namespace_obj in self._namespace_objects:
- compiled_object = _create(self.evaluator, namespace_obj)
- mixed_object = mixed.MixedObject(
- self.evaluator,
- parent_context=self,
- compiled_object=compiled_object,
- tree_context=self._module_context
- )
- for filter in mixed_object.get_filters(*args, **kwargs):
- yield filter
-
- @property
- def code_lines(self):
- return self._module_context.code_lines
-
- def __getattr__(self, name):
- return getattr(self._module_context, name)
diff --git a/contrib/python/jedi/jedi/api/keywords.py b/contrib/python/jedi/jedi/api/keywords.py
deleted file mode 100644
index 2991a0f81a..0000000000
--- a/contrib/python/jedi/jedi/api/keywords.py
+++ /dev/null
@@ -1,84 +0,0 @@
-import pydoc
-
-from jedi.evaluate.utils import ignored
-from jedi.evaluate.filters import AbstractNameDefinition
-
-try:
- from pydoc_data import topics as pydoc_topics
-except ImportError:
- # Python 2
- try:
- import pydoc_topics
- except ImportError:
- # This is for Python 3 embeddable version, which dont have
- # pydoc_data module in its file python3x.zip.
- pydoc_topics = None
-
-
-def get_operator(evaluator, string, pos):
- return Keyword(evaluator, string, pos)
-
-
-class KeywordName(AbstractNameDefinition):
- api_type = u'keyword'
-
- def __init__(self, evaluator, name):
- self.evaluator = evaluator
- self.string_name = name
- self.parent_context = evaluator.builtins_module
-
- def infer(self):
- return [Keyword(self.evaluator, self.string_name, (0, 0))]
-
-
-class Keyword(object):
- api_type = u'keyword'
-
- def __init__(self, evaluator, name, pos):
- self.name = KeywordName(evaluator, name)
- self.start_pos = pos
- self.parent = evaluator.builtins_module
-
- @property
- def names(self):
- """ For a `parsing.Name` like comparision """
- return [self.name]
-
- def py__doc__(self, include_call_signature=False):
- return imitate_pydoc(self.name.string_name)
-
- def __repr__(self):
- return '<%s: %s>' % (type(self).__name__, self.name)
-
-
-def imitate_pydoc(string):
- """
- It's not possible to get the pydoc's without starting the annoying pager
- stuff.
- """
- if pydoc_topics is None:
- return ''
-
- # str needed because of possible unicode stuff in py2k (pydoc doesn't work
- # with unicode strings)
- string = str(string)
- h = pydoc.help
- with ignored(KeyError):
- # try to access symbols
- string = h.symbols[string]
- string, _, related = string.partition(' ')
-
- get_target = lambda s: h.topics.get(s, h.keywords.get(s))
- while isinstance(string, str):
- string = get_target(string)
-
- try:
- # is a tuple now
- label, related = string
- except TypeError:
- return ''
-
- try:
- return pydoc_topics.topics[label].strip() if pydoc_topics else ''
- except KeyError:
- return ''
diff --git a/contrib/python/jedi/jedi/api/project.py b/contrib/python/jedi/jedi/api/project.py
deleted file mode 100644
index 1e4bc08cf8..0000000000
--- a/contrib/python/jedi/jedi/api/project.py
+++ /dev/null
@@ -1,195 +0,0 @@
-import os
-import json
-
-from jedi._compatibility import FileNotFoundError, NotADirectoryError, PermissionError
-from jedi.api.environment import SameEnvironment, \
- get_cached_default_environment
-from jedi.api.exceptions import WrongVersion
-from jedi._compatibility import force_unicode
-from jedi.evaluate.sys_path import discover_buildout_paths
-from jedi.evaluate.cache import evaluator_as_method_param_cache
-from jedi.common.utils import traverse_parents
-
-_CONFIG_FOLDER = '.jedi'
-_CONTAINS_POTENTIAL_PROJECT = 'setup.py', '.git', '.hg', 'requirements.txt', 'MANIFEST.in'
-
-_SERIALIZER_VERSION = 1
-
-
-def _remove_duplicates_from_path(path):
- used = set()
- for p in path:
- if p in used:
- continue
- used.add(p)
- yield p
-
-
-def _force_unicode_list(lst):
- return list(map(force_unicode, lst))
-
-
-class Project(object):
- # TODO serialize environment
- _serializer_ignore_attributes = ('_environment',)
- _environment = None
-
- @staticmethod
- def _get_json_path(base_path):
- return os.path.join(base_path, _CONFIG_FOLDER, 'project.json')
-
- @classmethod
- def load(cls, path):
- """
- :param path: The path of the directory you want to use as a project.
- """
- with open(cls._get_json_path(path)) as f:
- version, data = json.load(f)
-
- if version == 1:
- self = cls.__new__()
- self.__dict__.update(data)
- return self
- else:
- raise WrongVersion(
- "The Jedi version of this project seems newer than what we can handle."
- )
-
- def __init__(self, path, **kwargs):
- """
- :param path: The base path for this project.
- :param sys_path: list of str. You can override the sys path if you
- want. By default the ``sys.path.`` is generated from the
- environment (virtualenvs, etc).
- :param smart_sys_path: If this is enabled (default), adds paths from
- local directories. Otherwise you will have to rely on your packages
- being properly configured on the ``sys.path``.
- """
- def py2_comp(path, environment=None, sys_path=None,
- smart_sys_path=True, _django=False):
- self._path = path
- if isinstance(environment, SameEnvironment):
- self._environment = environment
-
- self._sys_path = sys_path
- self._smart_sys_path = smart_sys_path
- self._django = _django
-
- py2_comp(path, **kwargs)
-
- def _get_base_sys_path(self, environment=None):
- if self._sys_path is not None:
- return self._sys_path
-
- # The sys path has not been set explicitly.
- if environment is None:
- environment = self.get_environment()
-
- sys_path = list(environment.get_sys_path())
- try:
- sys_path.remove('')
- except ValueError:
- pass
- return sys_path
-
- @evaluator_as_method_param_cache()
- def _get_sys_path(self, evaluator, environment=None):
- """
- Keep this method private for all users of jedi. However internally this
- one is used like a public method.
- """
- suffixed = []
- prefixed = []
-
- sys_path = list(self._get_base_sys_path(environment))
- if self._smart_sys_path:
- prefixed.append(self._path)
-
- if evaluator.script_path is not None:
- suffixed += discover_buildout_paths(evaluator, evaluator.script_path)
-
- traversed = list(traverse_parents(evaluator.script_path))
-
- # AFAIK some libraries have imports like `foo.foo.bar`, which
- # leads to the conclusion to by default prefer longer paths
- # rather than shorter ones by default.
- suffixed += reversed(traversed)
-
- if self._django:
- prefixed.append(self._path)
-
- path = prefixed + sys_path + suffixed
- return list(_force_unicode_list(_remove_duplicates_from_path(path)))
-
- def save(self):
- data = dict(self.__dict__)
- for attribute in self._serializer_ignore_attributes:
- data.pop(attribute, None)
-
- with open(self._get_json_path(self._path), 'wb') as f:
- return json.dump((_SERIALIZER_VERSION, data), f)
-
- def get_environment(self):
- if self._environment is None:
- return get_cached_default_environment()
-
- return self._environment
-
- def __repr__(self):
- return '<%s: %s>' % (self.__class__.__name__, self._path)
-
-
-def _is_potential_project(path):
- for name in _CONTAINS_POTENTIAL_PROJECT:
- if os.path.exists(os.path.join(path, name)):
- return True
- return False
-
-
-def _is_django_path(directory):
- """ Detects the path of the very well known Django library (if used) """
- try:
- with open(os.path.join(directory, 'manage.py'), 'rb') as f:
- return b"DJANGO_SETTINGS_MODULE" in f.read()
- except (FileNotFoundError, NotADirectoryError, PermissionError):
- return False
-
- return False
-
-
-def get_default_project(path=None):
- if path is None:
- path = os.getcwd()
-
- check = os.path.realpath(path)
- probable_path = None
- first_no_init_file = None
- for dir in traverse_parents(check, include_current=True):
- try:
- return Project.load(dir)
- except (FileNotFoundError, NotADirectoryError, PermissionError):
- pass
-
- if first_no_init_file is None:
- if os.path.exists(os.path.join(dir, '__init__.py')):
- # In the case that a __init__.py exists, it's in 99% just a
- # Python package and the project sits at least one level above.
- continue
- else:
- first_no_init_file = dir
-
- if _is_django_path(dir):
- return Project(dir, _django=True)
-
- if probable_path is None and _is_potential_project(dir):
- probable_path = dir
-
- if probable_path is not None:
- # TODO search for setup.py etc
- return Project(probable_path)
-
- if first_no_init_file is not None:
- return Project(first_no_init_file)
-
- curdir = path if os.path.isdir(path) else os.path.dirname(path)
- return Project(curdir)
diff --git a/contrib/python/jedi/jedi/api/replstartup.py b/contrib/python/jedi/jedi/api/replstartup.py
deleted file mode 100644
index 3ac8470877..0000000000
--- a/contrib/python/jedi/jedi/api/replstartup.py
+++ /dev/null
@@ -1,29 +0,0 @@
-"""
-To use Jedi completion in Python interpreter, add the following in your shell
-setup (e.g., ``.bashrc``). This works only on Linux/Mac, because readline is
-not available on Windows. If you still want Jedi autocompletion in your REPL,
-just use IPython instead::
-
- export PYTHONSTARTUP="$(python -m jedi repl)"
-
-Then you will be able to use Jedi completer in your Python interpreter::
-
- $ python
- Python 2.7.2+ (default, Jul 20 2012, 22:15:08)
- [GCC 4.6.1] on linux2
- Type "help", "copyright", "credits" or "license" for more information.
- >>> import os
- >>> os.path.join('a', 'b').split().in<TAB> # doctest: +SKIP
- ..dex ..sert
-
-"""
-import jedi.utils
-from jedi import __version__ as __jedi_version__
-
-print('REPL completion using Jedi %s' % __jedi_version__)
-jedi.utils.setup_readline()
-
-del jedi
-
-# Note: try not to do many things here, as it will contaminate global
-# namespace of the interpreter.
diff --git a/contrib/python/jedi/jedi/cache.py b/contrib/python/jedi/jedi/cache.py
deleted file mode 100644
index 93e2bd7fcf..0000000000
--- a/contrib/python/jedi/jedi/cache.py
+++ /dev/null
@@ -1,146 +0,0 @@
-"""
-This caching is very important for speed and memory optimizations. There's
-nothing really spectacular, just some decorators. The following cache types are
-available:
-
-- ``time_cache`` can be used to cache something for just a limited time span,
- which can be useful if there's user interaction and the user cannot react
- faster than a certain time.
-
-This module is one of the reasons why |jedi| is not thread-safe. As you can see
-there are global variables, which are holding the cache information. Some of
-these variables are being cleaned after every API usage.
-"""
-import time
-from functools import wraps
-
-from jedi import settings
-from parso.cache import parser_cache
-
-_time_caches = {}
-
-
-def underscore_memoization(func):
- """
- Decorator for methods::
-
- class A(object):
- def x(self):
- if self._x:
- self._x = 10
- return self._x
-
- Becomes::
-
- class A(object):
- @underscore_memoization
- def x(self):
- return 10
-
- A now has an attribute ``_x`` written by this decorator.
- """
- name = '_' + func.__name__
-
- def wrapper(self):
- try:
- return getattr(self, name)
- except AttributeError:
- result = func(self)
- setattr(self, name, result)
- return result
-
- return wrapper
-
-
-def clear_time_caches(delete_all=False):
- """ Jedi caches many things, that should be completed after each completion
- finishes.
-
- :param delete_all: Deletes also the cache that is normally not deleted,
- like parser cache, which is important for faster parsing.
- """
- global _time_caches
-
- if delete_all:
- for cache in _time_caches.values():
- cache.clear()
- parser_cache.clear()
- else:
- # normally just kill the expired entries, not all
- for tc in _time_caches.values():
- # check time_cache for expired entries
- for key, (t, value) in list(tc.items()):
- if t < time.time():
- # delete expired entries
- del tc[key]
-
-
-def call_signature_time_cache(time_add_setting):
- """
- This decorator works as follows: Call it with a setting and after that
- use the function with a callable that returns the key.
- But: This function is only called if the key is not available. After a
- certain amount of time (`time_add_setting`) the cache is invalid.
-
- If the given key is None, the function will not be cached.
- """
- def _temp(key_func):
- dct = {}
- _time_caches[time_add_setting] = dct
-
- def wrapper(*args, **kwargs):
- generator = key_func(*args, **kwargs)
- key = next(generator)
- try:
- expiry, value = dct[key]
- if expiry > time.time():
- return value
- except KeyError:
- pass
-
- value = next(generator)
- time_add = getattr(settings, time_add_setting)
- if key is not None:
- dct[key] = time.time() + time_add, value
- return value
- return wrapper
- return _temp
-
-
-def time_cache(seconds):
- def decorator(func):
- cache = {}
-
- @wraps(func)
- def wrapper(*args, **kwargs):
- key = (args, frozenset(kwargs.items()))
- try:
- created, result = cache[key]
- if time.time() < created + seconds:
- return result
- except KeyError:
- pass
- result = func(*args, **kwargs)
- cache[key] = time.time(), result
- return result
-
- wrapper.clear_cache = lambda: cache.clear()
- return wrapper
-
- return decorator
-
-
-def memoize_method(method):
- """A normal memoize function."""
- @wraps(method)
- def wrapper(self, *args, **kwargs):
- cache_dict = self.__dict__.setdefault('_memoize_method_dct', {})
- dct = cache_dict.setdefault(method, {})
- key = (args, frozenset(kwargs.items()))
- try:
- return dct[key]
- except KeyError:
- result = method(self, *args, **kwargs)
- dct[key] = result
- return result
- return wrapper
diff --git a/contrib/python/jedi/jedi/common/__init__.py b/contrib/python/jedi/jedi/common/__init__.py
deleted file mode 100644
index 702a5e6099..0000000000
--- a/contrib/python/jedi/jedi/common/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-from jedi.common.context import BaseContextSet, BaseContext
diff --git a/contrib/python/jedi/jedi/common/context.py b/contrib/python/jedi/jedi/common/context.py
deleted file mode 100644
index dfcf5a9e17..0000000000
--- a/contrib/python/jedi/jedi/common/context.py
+++ /dev/null
@@ -1,67 +0,0 @@
-class BaseContext(object):
- def __init__(self, evaluator, parent_context=None):
- self.evaluator = evaluator
- self.parent_context = parent_context
-
- def get_root_context(self):
- context = self
- while True:
- if context.parent_context is None:
- return context
- context = context.parent_context
-
-
-class BaseContextSet(object):
- def __init__(self, *args):
- self._set = set(args)
-
- @classmethod
- def from_iterable(cls, iterable):
- return cls.from_set(set(iterable))
-
- @classmethod
- def from_set(cls, set_):
- self = cls()
- self._set = set_
- return self
-
- @classmethod
- def from_sets(cls, sets):
- """
- Used to work with an iterable of set.
- """
- aggregated = set()
- sets = list(sets)
- for set_ in sets:
- if isinstance(set_, BaseContextSet):
- aggregated |= set_._set
- else:
- aggregated |= set_
- return cls.from_set(aggregated)
-
- def __or__(self, other):
- return type(self).from_set(self._set | other._set)
-
- def __iter__(self):
- for element in self._set:
- yield element
-
- def __bool__(self):
- return bool(self._set)
-
- def __len__(self):
- return len(self._set)
-
- def __repr__(self):
- return '%s(%s)' % (self.__class__.__name__, ', '.join(str(s) for s in self._set))
-
- def filter(self, filter_func):
- return type(self).from_iterable(filter(filter_func, self._set))
-
- def __getattr__(self, name):
- def mapper(*args, **kwargs):
- return type(self).from_sets(
- getattr(context, name)(*args, **kwargs)
- for context in self._set
- )
- return mapper
diff --git a/contrib/python/jedi/jedi/common/utils.py b/contrib/python/jedi/jedi/common/utils.py
deleted file mode 100644
index 92ff52e25e..0000000000
--- a/contrib/python/jedi/jedi/common/utils.py
+++ /dev/null
@@ -1,26 +0,0 @@
-import os
-from contextlib import contextmanager
-
-
-def traverse_parents(path, include_current=False):
- if not include_current:
- path = os.path.dirname(path)
-
- previous = None
- while previous != path:
- yield path
- previous = path
- path = os.path.dirname(path)
-
-
-@contextmanager
-def monkeypatch(obj, attribute_name, new_value):
- """
- Like pytest's monkeypatch, but as a context manager.
- """
- old_value = getattr(obj, attribute_name)
- try:
- setattr(obj, attribute_name, new_value)
- yield
- finally:
- setattr(obj, attribute_name, old_value)
diff --git a/contrib/python/jedi/jedi/debug.py b/contrib/python/jedi/jedi/debug.py
deleted file mode 100644
index 49fda4bcac..0000000000
--- a/contrib/python/jedi/jedi/debug.py
+++ /dev/null
@@ -1,130 +0,0 @@
-from jedi._compatibility import encoding, is_py3, u
-import os
-import time
-
-_inited = False
-
-
-def _lazy_colorama_init():
- """
- Lazily init colorama if necessary, not to screw up stdout if debugging is
- not enabled.
-
- This version of the function does nothing.
- """
-
-
-try:
- if os.name == 'nt':
- # Does not work on Windows, as pyreadline and colorama interfere
- raise ImportError
- else:
- # Use colorama for nicer console output.
- from colorama import Fore, init
- from colorama import initialise
-
- def _lazy_colorama_init(): # noqa: F811
- """
- Lazily init colorama if necessary, not to screw up stdout is
- debug not enabled.
-
- This version of the function does init colorama.
- """
- global _inited
- if not _inited:
- # pytest resets the stream at the end - causes troubles. Since
- # after every output the stream is reset automatically we don't
- # need this.
- initialise.atexit_done = True
- try:
- init(strip=False)
- except Exception:
- # Colorama fails with initializing under vim and is buggy in
- # version 0.3.6.
- pass
- _inited = True
-
-except ImportError:
- class Fore(object):
- RED = ''
- GREEN = ''
- YELLOW = ''
- MAGENTA = ''
- RESET = ''
-
-NOTICE = object()
-WARNING = object()
-SPEED = object()
-
-enable_speed = False
-enable_warning = False
-enable_notice = False
-
-# callback, interface: level, str
-debug_function = None
-_debug_indent = 0
-_start_time = time.time()
-
-
-def reset_time():
- global _start_time, _debug_indent
- _start_time = time.time()
- _debug_indent = 0
-
-
-def increase_indent(func):
- """Decorator for makin """
- def wrapper(*args, **kwargs):
- global _debug_indent
- _debug_indent += 1
- try:
- return func(*args, **kwargs)
- finally:
- _debug_indent -= 1
- return wrapper
-
-
-def dbg(message, *args, **kwargs):
- """ Looks at the stack, to see if a debug message should be printed. """
- # Python 2 compatibility, because it doesn't understand default args
- color = kwargs.pop('color', 'GREEN')
- assert color
-
- if debug_function and enable_notice:
- i = ' ' * _debug_indent
- _lazy_colorama_init()
- debug_function(color, i + 'dbg: ' + message % tuple(u(repr(a)) for a in args))
-
-
-def warning(message, *args, **kwargs):
- format = kwargs.pop('format', True)
- assert not kwargs
-
- if debug_function and enable_warning:
- i = ' ' * _debug_indent
- if format:
- message = message % tuple(u(repr(a)) for a in args)
- debug_function('RED', i + 'warning: ' + message)
-
-
-def speed(name):
- if debug_function and enable_speed:
- now = time.time()
- i = ' ' * _debug_indent
- debug_function('YELLOW', i + 'speed: ' + '%s %s' % (name, now - _start_time))
-
-
-def print_to_stdout(color, str_out):
- """
- The default debug function that prints to standard out.
-
- :param str color: A string that is an attribute of ``colorama.Fore``.
- """
- col = getattr(Fore, color)
- _lazy_colorama_init()
- if not is_py3:
- str_out = str_out.encode(encoding, 'replace')
- print(col + str_out + Fore.RESET)
-
-
-# debug_function = print_to_stdout
diff --git a/contrib/python/jedi/jedi/evaluate/__init__.py b/contrib/python/jedi/jedi/evaluate/__init__.py
deleted file mode 100644
index 48339439ee..0000000000
--- a/contrib/python/jedi/jedi/evaluate/__init__.py
+++ /dev/null
@@ -1,391 +0,0 @@
-"""
-Evaluation of Python code in |jedi| is based on three assumptions:
-
-* The code uses as least side effects as possible. Jedi understands certain
- list/tuple/set modifications, but there's no guarantee that Jedi detects
- everything (list.append in different modules for example).
-* No magic is being used:
-
- - metaclasses
- - ``setattr()`` / ``__import__()``
- - writing to ``globals()``, ``locals()``, ``object.__dict__``
-* The programmer is not a total dick, e.g. like `this
- <https://github.com/davidhalter/jedi/issues/24>`_ :-)
-
-The actual algorithm is based on a principle called lazy evaluation. That
-said, the typical entry point for static analysis is calling
-``eval_expr_stmt``. There's separate logic for autocompletion in the API, the
-evaluator is all about evaluating an expression.
-
-TODO this paragraph is not what jedi does anymore, it's similar, but not the
-same.
-
-Now you need to understand what follows after ``eval_expr_stmt``. Let's
-make an example::
-
- import datetime
- datetime.date.toda# <-- cursor here
-
-First of all, this module doesn't care about completion. It really just cares
-about ``datetime.date``. At the end of the procedure ``eval_expr_stmt`` will
-return the ``date`` class.
-
-To *visualize* this (simplified):
-
-- ``Evaluator.eval_expr_stmt`` doesn't do much, because there's no assignment.
-- ``Context.eval_node`` cares for resolving the dotted path
-- ``Evaluator.find_types`` searches for global definitions of datetime, which
- it finds in the definition of an import, by scanning the syntax tree.
-- Using the import logic, the datetime module is found.
-- Now ``find_types`` is called again by ``eval_node`` to find ``date``
- inside the datetime module.
-
-Now what would happen if we wanted ``datetime.date.foo.bar``? Two more
-calls to ``find_types``. However the second call would be ignored, because the
-first one would return nothing (there's no foo attribute in ``date``).
-
-What if the import would contain another ``ExprStmt`` like this::
-
- from foo import bar
- Date = bar.baz
-
-Well... You get it. Just another ``eval_expr_stmt`` recursion. It's really
-easy. Python can obviously get way more complicated then this. To understand
-tuple assignments, list comprehensions and everything else, a lot more code had
-to be written.
-
-Jedi has been tested very well, so you can just start modifying code. It's best
-to write your own test first for your "new" feature. Don't be scared of
-breaking stuff. As long as the tests pass, you're most likely to be fine.
-
-I need to mention now that lazy evaluation is really good because it
-only *evaluates* what needs to be *evaluated*. All the statements and modules
-that are not used are just being ignored.
-"""
-
-from parso.python import tree
-import parso
-from parso import python_bytes_to_unicode
-
-from jedi import debug
-from jedi import parser_utils
-from jedi.evaluate.utils import unite
-from jedi.evaluate import imports
-from jedi.evaluate import recursion
-from jedi.evaluate.cache import evaluator_function_cache
-from jedi.evaluate import compiled
-from jedi.evaluate import helpers
-from jedi.evaluate.filters import TreeNameDefinition, ParamName
-from jedi.evaluate.base_context import ContextualizedName, ContextualizedNode, \
- ContextSet, NO_CONTEXTS, iterate_contexts
-from jedi.evaluate.context import ClassContext, FunctionContext, \
- AnonymousInstance, BoundMethod
-from jedi.evaluate.context.iterable import CompForContext
-from jedi.evaluate.syntax_tree import eval_trailer, eval_expr_stmt, \
- eval_node, check_tuple_assignments
-
-
-class Evaluator(object):
- def __init__(self, project, environment=None, script_path=None):
- if environment is None:
- environment = project.get_environment()
- self.environment = environment
- self.script_path = script_path
- self.compiled_subprocess = environment.get_evaluator_subprocess(self)
- self.grammar = environment.get_grammar()
-
- self.latest_grammar = parso.load_grammar(version='3.6')
- self.memoize_cache = {} # for memoize decorators
- self.module_cache = imports.ModuleCache() # does the job of `sys.modules`.
- self.compiled_cache = {} # see `evaluate.compiled.create()`
- self.inferred_element_counts = {}
- self.mixed_cache = {} # see `evaluate.compiled.mixed._create()`
- self.analysis = []
- self.dynamic_params_depth = 0
- self.is_analysis = False
- self.project = project
- self.access_cache = {}
- # This setting is only temporary to limit the work we have to do with
- # tensorflow and others.
- self.infer_enabled = True
-
- self.reset_recursion_limitations()
- self.allow_different_encoding = True
-
- @property
- @evaluator_function_cache()
- def builtins_module(self):
- return compiled.get_special_object(self, u'BUILTINS')
-
- def reset_recursion_limitations(self):
- self.recursion_detector = recursion.RecursionDetector()
- self.execution_recursion_detector = recursion.ExecutionRecursionDetector(self)
-
- def get_sys_path(self):
- """Convenience function"""
- return self.project._get_sys_path(self, environment=self.environment)
-
- def eval_element(self, context, element):
- if not self.infer_enabled:
- return NO_CONTEXTS
-
- if isinstance(context, CompForContext):
- return eval_node(context, element)
-
- if_stmt = element
- while if_stmt is not None:
- if_stmt = if_stmt.parent
- if if_stmt.type in ('if_stmt', 'for_stmt'):
- break
- if parser_utils.is_scope(if_stmt):
- if_stmt = None
- break
- predefined_if_name_dict = context.predefined_names.get(if_stmt)
- # TODO there's a lot of issues with this one. We actually should do
- # this in a different way. Caching should only be active in certain
- # cases and this all sucks.
- if predefined_if_name_dict is None and if_stmt \
- and if_stmt.type == 'if_stmt' and self.is_analysis:
- if_stmt_test = if_stmt.children[1]
- name_dicts = [{}]
- # If we already did a check, we don't want to do it again -> If
- # context.predefined_names is filled, we stop.
- # We don't want to check the if stmt itself, it's just about
- # the content.
- if element.start_pos > if_stmt_test.end_pos:
- # Now we need to check if the names in the if_stmt match the
- # names in the suite.
- if_names = helpers.get_names_of_node(if_stmt_test)
- element_names = helpers.get_names_of_node(element)
- str_element_names = [e.value for e in element_names]
- if any(i.value in str_element_names for i in if_names):
- for if_name in if_names:
- definitions = self.goto_definitions(context, if_name)
- # Every name that has multiple different definitions
- # causes the complexity to rise. The complexity should
- # never fall below 1.
- if len(definitions) > 1:
- if len(name_dicts) * len(definitions) > 16:
- debug.dbg('Too many options for if branch evaluation %s.', if_stmt)
- # There's only a certain amount of branches
- # Jedi can evaluate, otherwise it will take to
- # long.
- name_dicts = [{}]
- break
-
- original_name_dicts = list(name_dicts)
- name_dicts = []
- for definition in definitions:
- new_name_dicts = list(original_name_dicts)
- for i, name_dict in enumerate(new_name_dicts):
- new_name_dicts[i] = name_dict.copy()
- new_name_dicts[i][if_name.value] = ContextSet(definition)
-
- name_dicts += new_name_dicts
- else:
- for name_dict in name_dicts:
- name_dict[if_name.value] = definitions
- if len(name_dicts) > 1:
- result = ContextSet()
- for name_dict in name_dicts:
- with helpers.predefine_names(context, if_stmt, name_dict):
- result |= eval_node(context, element)
- return result
- else:
- return self._eval_element_if_evaluated(context, element)
- else:
- if predefined_if_name_dict:
- return eval_node(context, element)
- else:
- return self._eval_element_if_evaluated(context, element)
-
- def _eval_element_if_evaluated(self, context, element):
- """
- TODO This function is temporary: Merge with eval_element.
- """
- parent = element
- while parent is not None:
- parent = parent.parent
- predefined_if_name_dict = context.predefined_names.get(parent)
- if predefined_if_name_dict is not None:
- return eval_node(context, element)
- return self._eval_element_cached(context, element)
-
- @evaluator_function_cache(default=NO_CONTEXTS)
- def _eval_element_cached(self, context, element):
- return eval_node(context, element)
-
- def goto_definitions(self, context, name):
- def_ = name.get_definition(import_name_always=True)
- if def_ is not None:
- type_ = def_.type
- if type_ == 'classdef':
- return [ClassContext(self, context, name.parent)]
- elif type_ == 'funcdef':
- return [FunctionContext.from_context(context, name.parent)]
-
- if type_ == 'expr_stmt':
- is_simple_name = name.parent.type not in ('power', 'trailer')
- if is_simple_name:
- return eval_expr_stmt(context, def_, name)
- if type_ == 'for_stmt':
- container_types = context.eval_node(def_.children[3])
- cn = ContextualizedNode(context, def_.children[3])
- for_types = iterate_contexts(container_types, cn)
- c_node = ContextualizedName(context, name)
- return check_tuple_assignments(self, c_node, for_types)
- if type_ in ('import_from', 'import_name'):
- return imports.infer_import(context, name)
-
- return helpers.evaluate_call_of_leaf(context, name)
-
- def goto(self, context, name):
- definition = name.get_definition(import_name_always=True)
- if definition is not None:
- type_ = definition.type
- if type_ == 'expr_stmt':
- # Only take the parent, because if it's more complicated than just
- # a name it's something you can "goto" again.
- is_simple_name = name.parent.type not in ('power', 'trailer')
- if is_simple_name:
- return [TreeNameDefinition(context, name)]
- elif type_ == 'param':
- return [ParamName(context, name)]
- elif type_ in ('funcdef', 'classdef'):
- return [TreeNameDefinition(context, name)]
- elif type_ in ('import_from', 'import_name'):
- module_names = imports.infer_import(context, name, is_goto=True)
- return module_names
-
- par = name.parent
- node_type = par.type
- if node_type == 'argument' and par.children[1] == '=' and par.children[0] == name:
- # Named param goto.
- trailer = par.parent
- if trailer.type == 'arglist':
- trailer = trailer.parent
- if trailer.type != 'classdef':
- if trailer.type == 'decorator':
- context_set = context.eval_node(trailer.children[1])
- else:
- i = trailer.parent.children.index(trailer)
- to_evaluate = trailer.parent.children[:i]
- if to_evaluate[0] == 'await':
- to_evaluate.pop(0)
- context_set = context.eval_node(to_evaluate[0])
- for trailer in to_evaluate[1:]:
- context_set = eval_trailer(context, context_set, trailer)
- param_names = []
- for context in context_set:
- try:
- get_param_names = context.get_param_names
- except AttributeError:
- pass
- else:
- for param_name in get_param_names():
- if param_name.string_name == name.value:
- param_names.append(param_name)
- return param_names
- elif node_type == 'dotted_name': # Is a decorator.
- index = par.children.index(name)
- if index > 0:
- new_dotted = helpers.deep_ast_copy(par)
- new_dotted.children[index - 1:] = []
- values = context.eval_node(new_dotted)
- return unite(
- value.py__getattribute__(name, name_context=context, is_goto=True)
- for value in values
- )
-
- if node_type == 'trailer' and par.children[0] == '.':
- values = helpers.evaluate_call_of_leaf(context, name, cut_own_trailer=True)
- return unite(
- value.py__getattribute__(name, name_context=context, is_goto=True)
- for value in values
- )
- else:
- stmt = tree.search_ancestor(
- name, 'expr_stmt', 'lambdef'
- ) or name
- if stmt.type == 'lambdef':
- stmt = name
- return context.py__getattribute__(
- name,
- position=stmt.start_pos,
- search_global=True, is_goto=True
- )
-
- def create_context(self, base_context, node, node_is_context=False, node_is_object=False):
- def parent_scope(node):
- while True:
- node = node.parent
-
- if parser_utils.is_scope(node):
- return node
- elif node.type in ('argument', 'testlist_comp'):
- if node.children[1].type == 'comp_for':
- return node.children[1]
- elif node.type == 'dictorsetmaker':
- for n in node.children[1:4]:
- # In dictionaries it can be pretty much anything.
- if n.type == 'comp_for':
- return n
-
- def from_scope_node(scope_node, child_is_funcdef=None, is_nested=True, node_is_object=False):
- if scope_node == base_node:
- return base_context
-
- is_funcdef = scope_node.type in ('funcdef', 'lambdef')
- parent_scope = parser_utils.get_parent_scope(scope_node)
- parent_context = from_scope_node(parent_scope, child_is_funcdef=is_funcdef)
-
- if is_funcdef:
- func = FunctionContext.from_context(
- parent_context,
- scope_node
- )
- if isinstance(parent_context, AnonymousInstance):
- func = BoundMethod(
- instance=parent_context,
- klass=parent_context.class_context,
- function=func
- )
- if is_nested and not node_is_object:
- return func.get_function_execution()
- return func
- elif scope_node.type == 'classdef':
- class_context = ClassContext(self, parent_context, scope_node)
- if child_is_funcdef:
- # anonymous instance
- return AnonymousInstance(self, parent_context, class_context)
- else:
- return class_context
- elif scope_node.type == 'comp_for':
- if node.start_pos >= scope_node.children[-1].start_pos:
- return parent_context
- return CompForContext.from_comp_for(parent_context, scope_node)
- raise Exception("There's a scope that was not managed.")
-
- base_node = base_context.tree_node
-
- if node_is_context and parser_utils.is_scope(node):
- scope_node = node
- else:
- if node.parent.type in ('funcdef', 'classdef') and node.parent.name == node:
- # When we're on class/function names/leafs that define the
- # object itself and not its contents.
- node = node.parent
- scope_node = parent_scope(node)
- return from_scope_node(scope_node, is_nested=True, node_is_object=node_is_object)
-
- def parse_and_get_code(self, code=None, path=None, encoding='utf-8', **kwargs):
- if self.allow_different_encoding:
- if code is None:
- with open(path, 'rb') as f:
- code = f.read()
- code = python_bytes_to_unicode(code, encoding=encoding, errors='replace')
-
- return self.grammar.parse(code=code, path=path, **kwargs), code
-
- def parse(self, *args, **kwargs):
- return self.parse_and_get_code(*args, **kwargs)[0]
diff --git a/contrib/python/jedi/jedi/evaluate/analysis.py b/contrib/python/jedi/jedi/evaluate/analysis.py
deleted file mode 100644
index ded4e9f208..0000000000
--- a/contrib/python/jedi/jedi/evaluate/analysis.py
+++ /dev/null
@@ -1,218 +0,0 @@
-"""
-Module for statical analysis.
-"""
-from parso.python import tree
-
-from jedi._compatibility import force_unicode
-from jedi import debug
-from jedi.evaluate.compiled import CompiledObject
-from jedi.evaluate.helpers import is_string
-
-
-CODES = {
- 'attribute-error': (1, AttributeError, 'Potential AttributeError.'),
- 'name-error': (2, NameError, 'Potential NameError.'),
- 'import-error': (3, ImportError, 'Potential ImportError.'),
- 'type-error-too-many-arguments': (4, TypeError, None),
- 'type-error-too-few-arguments': (5, TypeError, None),
- 'type-error-keyword-argument': (6, TypeError, None),
- 'type-error-multiple-values': (7, TypeError, None),
- 'type-error-star-star': (8, TypeError, None),
- 'type-error-star': (9, TypeError, None),
- 'type-error-operation': (10, TypeError, None),
- 'type-error-not-iterable': (11, TypeError, None),
- 'type-error-isinstance': (12, TypeError, None),
- 'type-error-not-subscriptable': (13, TypeError, None),
- 'value-error-too-many-values': (14, ValueError, None),
- 'value-error-too-few-values': (15, ValueError, None),
-}
-
-
-class Error(object):
- def __init__(self, name, module_path, start_pos, message=None):
- self.path = module_path
- self._start_pos = start_pos
- self.name = name
- if message is None:
- message = CODES[self.name][2]
- self.message = message
-
- @property
- def line(self):
- return self._start_pos[0]
-
- @property
- def column(self):
- return self._start_pos[1]
-
- @property
- def code(self):
- # The class name start
- first = self.__class__.__name__[0]
- return first + str(CODES[self.name][0])
-
- def __unicode__(self):
- return '%s:%s:%s: %s %s' % (self.path, self.line, self.column,
- self.code, self.message)
-
- def __str__(self):
- return self.__unicode__()
-
- def __eq__(self, other):
- return (self.path == other.path and self.name == other.name and
- self._start_pos == other._start_pos)
-
- def __ne__(self, other):
- return not self.__eq__(other)
-
- def __hash__(self):
- return hash((self.path, self._start_pos, self.name))
-
- def __repr__(self):
- return '<%s %s: %s@%s,%s>' % (self.__class__.__name__,
- self.name, self.path,
- self._start_pos[0], self._start_pos[1])
-
-
-class Warning(Error):
- pass
-
-
-def add(node_context, error_name, node, message=None, typ=Error, payload=None):
- exception = CODES[error_name][1]
- if _check_for_exception_catch(node_context, node, exception, payload):
- return
-
- # TODO this path is probably not right
- module_context = node_context.get_root_context()
- module_path = module_context.py__file__()
- instance = typ(error_name, module_path, node.start_pos, message)
- debug.warning(str(instance), format=False)
- node_context.evaluator.analysis.append(instance)
-
-
-def _check_for_setattr(instance):
- """
- Check if there's any setattr method inside an instance. If so, return True.
- """
- from jedi.evaluate.context import ModuleContext
- module = instance.get_root_context()
- if not isinstance(module, ModuleContext):
- return False
-
- node = module.tree_node
- try:
- stmts = node.get_used_names()['setattr']
- except KeyError:
- return False
-
- return any(node.start_pos < stmt.start_pos < node.end_pos
- for stmt in stmts)
-
-
-def add_attribute_error(name_context, lookup_context, name):
- message = ('AttributeError: %s has no attribute %s.' % (lookup_context, name))
- from jedi.evaluate.context.instance import AbstractInstanceContext, CompiledInstanceName
- # Check for __getattr__/__getattribute__ existance and issue a warning
- # instead of an error, if that happens.
- typ = Error
- if isinstance(lookup_context, AbstractInstanceContext):
- slot_names = lookup_context.get_function_slot_names(u'__getattr__') + \
- lookup_context.get_function_slot_names(u'__getattribute__')
- for n in slot_names:
- # TODO do we even get here?
- if isinstance(name, CompiledInstanceName) and \
- n.parent_context.obj == object:
- typ = Warning
- break
-
- if _check_for_setattr(lookup_context):
- typ = Warning
-
- payload = lookup_context, name
- add(name_context, 'attribute-error', name, message, typ, payload)
-
-
-def _check_for_exception_catch(node_context, jedi_name, exception, payload=None):
- """
- Checks if a jedi object (e.g. `Statement`) sits inside a try/catch and
- doesn't count as an error (if equal to `exception`).
- Also checks `hasattr` for AttributeErrors and uses the `payload` to compare
- it.
- Returns True if the exception was catched.
- """
- def check_match(cls, exception):
- try:
- return isinstance(cls, CompiledObject) and cls.is_super_class(exception)
- except TypeError:
- return False
-
- def check_try_for_except(obj, exception):
- # Only nodes in try
- iterator = iter(obj.children)
- for branch_type in iterator:
- colon = next(iterator)
- suite = next(iterator)
- if branch_type == 'try' \
- and not (branch_type.start_pos < jedi_name.start_pos <= suite.end_pos):
- return False
-
- for node in obj.get_except_clause_tests():
- if node is None:
- return True # An exception block that catches everything.
- else:
- except_classes = node_context.eval_node(node)
- for cls in except_classes:
- from jedi.evaluate.context import iterable
- if isinstance(cls, iterable.Sequence) and \
- cls.array_type == 'tuple':
- # multiple exceptions
- for lazy_context in cls.py__iter__():
- for typ in lazy_context.infer():
- if check_match(typ, exception):
- return True
- else:
- if check_match(cls, exception):
- return True
-
- def check_hasattr(node, suite):
- try:
- assert suite.start_pos <= jedi_name.start_pos < suite.end_pos
- assert node.type in ('power', 'atom_expr')
- base = node.children[0]
- assert base.type == 'name' and base.value == 'hasattr'
- trailer = node.children[1]
- assert trailer.type == 'trailer'
- arglist = trailer.children[1]
- assert arglist.type == 'arglist'
- from jedi.evaluate.arguments import TreeArguments
- args = list(TreeArguments(node_context.evaluator, node_context, arglist).unpack())
- # Arguments should be very simple
- assert len(args) == 2
-
- # Check name
- key, lazy_context = args[1]
- names = list(lazy_context.infer())
- assert len(names) == 1 and is_string(names[0])
- assert force_unicode(names[0].get_safe_value()) == payload[1].value
-
- # Check objects
- key, lazy_context = args[0]
- objects = lazy_context.infer()
- return payload[0] in objects
- except AssertionError:
- return False
-
- obj = jedi_name
- while obj is not None and not isinstance(obj, (tree.Function, tree.Class)):
- if isinstance(obj, tree.Flow):
- # try/except catch check
- if obj.type == 'try_stmt' and check_try_for_except(obj, exception):
- return True
- # hasattr check
- if exception == AttributeError and obj.type in ('if_stmt', 'while_stmt'):
- if check_hasattr(obj.children[1], obj.children[3]):
- return True
- obj = obj.parent
-
- return False
diff --git a/contrib/python/jedi/jedi/evaluate/arguments.py b/contrib/python/jedi/jedi/evaluate/arguments.py
deleted file mode 100644
index a1320fbb54..0000000000
--- a/contrib/python/jedi/jedi/evaluate/arguments.py
+++ /dev/null
@@ -1,305 +0,0 @@
-import re
-
-from parso.python import tree
-
-from jedi._compatibility import zip_longest
-from jedi import debug
-from jedi.evaluate import analysis
-from jedi.evaluate.lazy_context import LazyKnownContext, LazyKnownContexts, \
- LazyTreeContext, get_merged_lazy_context
-from jedi.evaluate.filters import ParamName
-from jedi.evaluate.base_context import NO_CONTEXTS
-from jedi.evaluate.context import iterable
-from jedi.evaluate.param import get_executed_params, ExecutedParam
-
-
-def try_iter_content(types, depth=0):
- """Helper method for static analysis."""
- if depth > 10:
- # It's possible that a loop has references on itself (especially with
- # CompiledObject). Therefore don't loop infinitely.
- return
-
- for typ in types:
- try:
- f = typ.py__iter__
- except AttributeError:
- pass
- else:
- for lazy_context in f():
- try_iter_content(lazy_context.infer(), depth + 1)
-
-
-def repack_with_argument_clinic(string, keep_arguments_param=False):
- """
- Transforms a function or method with arguments to the signature that is
- given as an argument clinic notation.
-
- Argument clinic is part of CPython and used for all the functions that are
- implemented in C (Python 3.7):
-
- str.split.__text_signature__
- # Results in: '($self, /, sep=None, maxsplit=-1)'
- """
- clinic_args = list(_parse_argument_clinic(string))
-
- def decorator(func):
- def wrapper(*args, **kwargs):
- if keep_arguments_param:
- arguments = kwargs['arguments']
- else:
- arguments = kwargs.pop('arguments')
- try:
- args += tuple(_iterate_argument_clinic(arguments, clinic_args))
- except ValueError:
- return NO_CONTEXTS
- else:
- return func(*args, **kwargs)
-
- return wrapper
- return decorator
-
-
-def _iterate_argument_clinic(arguments, parameters):
- """Uses a list with argument clinic information (see PEP 436)."""
- iterator = arguments.unpack()
- for i, (name, optional, allow_kwargs) in enumerate(parameters):
- key, argument = next(iterator, (None, None))
- if key is not None:
- debug.warning('Keyword arguments in argument clinic are currently not supported.')
- raise ValueError
- if argument is None and not optional:
- debug.warning('TypeError: %s expected at least %s arguments, got %s',
- name, len(parameters), i)
- raise ValueError
-
- context_set = NO_CONTEXTS if argument is None else argument.infer()
-
- if not context_set and not optional:
- # For the stdlib we always want values. If we don't get them,
- # that's ok, maybe something is too hard to resolve, however,
- # we will not proceed with the evaluation of that function.
- debug.warning('argument_clinic "%s" not resolvable.', name)
- raise ValueError
- yield context_set
-
-
-def _parse_argument_clinic(string):
- allow_kwargs = False
- optional = False
- while string:
- # Optional arguments have to begin with a bracket. And should always be
- # at the end of the arguments. This is therefore not a proper argument
- # clinic implementation. `range()` for exmple allows an optional start
- # value at the beginning.
- match = re.match('(?:(?:(\[),? ?|, ?|)(\w+)|, ?/)\]*', string)
- string = string[len(match.group(0)):]
- if not match.group(2): # A slash -> allow named arguments
- allow_kwargs = True
- continue
- optional = optional or bool(match.group(1))
- word = match.group(2)
- yield (word, optional, allow_kwargs)
-
-
-class AbstractArguments(object):
- context = None
- argument_node = None
- trailer = None
-
- def eval_all(self, funcdef=None):
- """
- Evaluates all arguments as a support for static analysis
- (normally Jedi).
- """
- for key, lazy_context in self.unpack():
- types = lazy_context.infer()
- try_iter_content(types)
-
- def get_calling_nodes(self):
- return []
-
- def unpack(self, funcdef=None):
- raise NotImplementedError
-
- def get_executed_params(self, execution_context):
- return get_executed_params(execution_context, self)
-
-
-class AnonymousArguments(AbstractArguments):
- def get_executed_params(self, execution_context):
- from jedi.evaluate.dynamic import search_params
- return search_params(
- execution_context.evaluator,
- execution_context,
- execution_context.tree_node
- )
-
- def __repr__(self):
- return '%s()' % self.__class__.__name__
-
-
-class TreeArguments(AbstractArguments):
- def __init__(self, evaluator, context, argument_node, trailer=None):
- """
- The argument_node is either a parser node or a list of evaluated
- objects. Those evaluated objects may be lists of evaluated objects
- themselves (one list for the first argument, one for the second, etc).
-
- :param argument_node: May be an argument_node or a list of nodes.
- """
- self.argument_node = argument_node
- self.context = context
- self._evaluator = evaluator
- self.trailer = trailer # Can be None, e.g. in a class definition.
-
- def _split(self):
- if self.argument_node is None:
- return
-
- # Allow testlist here as well for Python2's class inheritance
- # definitions.
- if not (self.argument_node.type in ('arglist', 'testlist') or (
- # in python 3.5 **arg is an argument, not arglist
- (self.argument_node.type == 'argument') and
- self.argument_node.children[0] in ('*', '**'))):
- yield 0, self.argument_node
- return
-
- iterator = iter(self.argument_node.children)
- for child in iterator:
- if child == ',':
- continue
- elif child in ('*', '**'):
- yield len(child.value), next(iterator)
- elif child.type == 'argument' and \
- child.children[0] in ('*', '**'):
- assert len(child.children) == 2
- yield len(child.children[0].value), child.children[1]
- else:
- yield 0, child
-
- def unpack(self, funcdef=None):
- named_args = []
- for star_count, el in self._split():
- if star_count == 1:
- arrays = self.context.eval_node(el)
- iterators = [_iterate_star_args(self.context, a, el, funcdef)
- for a in arrays]
- for values in list(zip_longest(*iterators)):
- # TODO zip_longest yields None, that means this would raise
- # an exception?
- yield None, get_merged_lazy_context(
- [v for v in values if v is not None]
- )
- elif star_count == 2:
- arrays = self.context.eval_node(el)
- for dct in arrays:
- for key, values in _star_star_dict(self.context, dct, el, funcdef):
- yield key, values
- else:
- if el.type == 'argument':
- c = el.children
- if len(c) == 3: # Keyword argument.
- named_args.append((c[0].value, LazyTreeContext(self.context, c[2]),))
- else: # Generator comprehension.
- # Include the brackets with the parent.
- comp = iterable.GeneratorComprehension(
- self._evaluator, self.context, self.argument_node.parent)
- yield None, LazyKnownContext(comp)
- else:
- yield None, LazyTreeContext(self.context, el)
-
- # Reordering var_args is necessary, because star args sometimes appear
- # after named argument, but in the actual order it's prepended.
- for named_arg in named_args:
- yield named_arg
-
- def as_tree_tuple_objects(self):
- for star_count, argument in self._split():
- if argument.type == 'argument':
- argument, default = argument.children[::2]
- else:
- default = None
- yield argument, default, star_count
-
- def __repr__(self):
- return '<%s: %s>' % (self.__class__.__name__, self.argument_node)
-
- def get_calling_nodes(self):
- from jedi.evaluate.dynamic import DynamicExecutedParams
- old_arguments_list = []
- arguments = self
-
- while arguments not in old_arguments_list:
- if not isinstance(arguments, TreeArguments):
- break
-
- old_arguments_list.append(arguments)
- for name, default, star_count in reversed(list(arguments.as_tree_tuple_objects())):
- if not star_count or not isinstance(name, tree.Name):
- continue
-
- names = self._evaluator.goto(arguments.context, name)
- if len(names) != 1:
- break
- if not isinstance(names[0], ParamName):
- break
- param = names[0].get_param()
- if isinstance(param, DynamicExecutedParams):
- # For dynamic searches we don't even want to see errors.
- return []
- if not isinstance(param, ExecutedParam):
- break
- if param.var_args is None:
- break
- arguments = param.var_args
- break
-
- if arguments.argument_node is not None:
- return [arguments.argument_node]
- if arguments.trailer is not None:
- return [arguments.trailer]
- return []
-
-
-class ValuesArguments(AbstractArguments):
- def __init__(self, values_list):
- self._values_list = values_list
-
- def unpack(self, funcdef=None):
- for values in self._values_list:
- yield None, LazyKnownContexts(values)
-
- def __repr__(self):
- return '<%s: %s>' % (self.__class__.__name__, self._values_list)
-
-
-def _iterate_star_args(context, array, input_node, funcdef=None):
- try:
- iter_ = array.py__iter__
- except AttributeError:
- if funcdef is not None:
- # TODO this funcdef should not be needed.
- m = "TypeError: %s() argument after * must be a sequence, not %s" \
- % (funcdef.name.value, array)
- analysis.add(context, 'type-error-star', input_node, message=m)
- else:
- for lazy_context in iter_():
- yield lazy_context
-
-
-def _star_star_dict(context, array, input_node, funcdef):
- from jedi.evaluate.context.instance import CompiledInstance
- if isinstance(array, CompiledInstance) and array.name.string_name == 'dict':
- # For now ignore this case. In the future add proper iterators and just
- # make one call without crazy isinstance checks.
- return {}
- elif isinstance(array, iterable.Sequence) and array.array_type == 'dict':
- return array.exact_key_items()
- else:
- if funcdef is not None:
- m = "TypeError: %s argument after ** must be a mapping, not %s" \
- % (funcdef.name.value, array)
- analysis.add(context, 'type-error-star-star', input_node, message=m)
- return {}
diff --git a/contrib/python/jedi/jedi/evaluate/base_context.py b/contrib/python/jedi/jedi/evaluate/base_context.py
deleted file mode 100644
index cffd8da2c9..0000000000
--- a/contrib/python/jedi/jedi/evaluate/base_context.py
+++ /dev/null
@@ -1,279 +0,0 @@
-"""
-Contexts are the "values" that Python would return. However Contexts are at the
-same time also the "contexts" that a user is currently sitting in.
-
-A ContextSet is typically used to specify the return of a function or any other
-static analysis operation. In jedi there are always multiple returns and not
-just one.
-"""
-from parso.python.tree import ExprStmt, CompFor
-
-from jedi import debug
-from jedi._compatibility import Python3Method, zip_longest, unicode
-from jedi.parser_utils import clean_scope_docstring, get_doc_with_call_signature
-from jedi.common import BaseContextSet, BaseContext
-from jedi.evaluate.helpers import EvaluatorIndexError, EvaluatorTypeError, \
- EvaluatorKeyError
-
-
-class Context(BaseContext):
- """
- Should be defined, otherwise the API returns empty types.
- """
-
- predefined_names = {}
- tree_node = None
- """
- To be defined by subclasses.
- """
-
- @property
- def api_type(self):
- # By default just lower name of the class. Can and should be
- # overwritten.
- return self.__class__.__name__.lower()
-
- @debug.increase_indent
- def execute(self, arguments):
- """
- In contrast to py__call__ this function is always available.
-
- `hasattr(x, py__call__)` can also be checked to see if a context is
- executable.
- """
- if self.evaluator.is_analysis:
- arguments.eval_all()
-
- debug.dbg('execute: %s %s', self, arguments)
- from jedi.evaluate import stdlib
- try:
- # Some stdlib functions like super(), namedtuple(), etc. have been
- # hard-coded in Jedi to support them.
- return stdlib.execute(self.evaluator, self, arguments)
- except stdlib.NotInStdLib:
- pass
-
- try:
- func = self.py__call__
- except AttributeError:
- debug.warning("no execution possible %s", self)
- return NO_CONTEXTS
- else:
- context_set = func(arguments)
- debug.dbg('execute result: %s in %s', context_set, self)
- return context_set
-
- return self.evaluator.execute(self, arguments)
-
- def execute_evaluated(self, *value_list):
- """
- Execute a function with already executed arguments.
- """
- from jedi.evaluate.arguments import ValuesArguments
- arguments = ValuesArguments([ContextSet(value) for value in value_list])
- return self.execute(arguments)
-
- def iterate(self, contextualized_node=None, is_async=False):
- debug.dbg('iterate %s', self)
- try:
- if is_async:
- iter_method = self.py__aiter__
- else:
- iter_method = self.py__iter__
- except AttributeError:
- if contextualized_node is not None:
- from jedi.evaluate import analysis
- analysis.add(
- contextualized_node.context,
- 'type-error-not-iterable',
- contextualized_node.node,
- message="TypeError: '%s' object is not iterable" % self)
- return iter([])
- else:
- return iter_method()
-
- def get_item(self, index_contexts, contextualized_node):
- from jedi.evaluate.compiled import CompiledObject
- from jedi.evaluate.context.iterable import Slice, Sequence
- result = ContextSet()
-
- for index in index_contexts:
- if isinstance(index, Slice):
- index = index.obj
- if isinstance(index, CompiledObject):
- try:
- index = index.get_safe_value()
- except ValueError:
- pass
-
- if type(index) not in (float, int, str, unicode, slice, bytes):
- # If the index is not clearly defined, we have to get all the
- # possiblities.
- if isinstance(self, Sequence) and self.array_type == 'dict':
- result |= self.dict_values()
- else:
- result |= iterate_contexts(ContextSet(self))
- continue
-
- # The actual getitem call.
- try:
- getitem = self.py__getitem__
- except AttributeError:
- from jedi.evaluate import analysis
- # TODO this context is probably not right.
- analysis.add(
- contextualized_node.context,
- 'type-error-not-subscriptable',
- contextualized_node.node,
- message="TypeError: '%s' object is not subscriptable" % self
- )
- else:
- try:
- result |= getitem(index)
- except EvaluatorIndexError:
- result |= iterate_contexts(ContextSet(self))
- except EvaluatorKeyError:
- # Must be a dict. Lists don't raise KeyErrors.
- result |= self.dict_values()
- except EvaluatorTypeError:
- # The type is wrong and therefore it makes no sense to do
- # anything anymore.
- result = NO_CONTEXTS
- return result
-
- def eval_node(self, node):
- return self.evaluator.eval_element(self, node)
-
- @Python3Method
- def py__getattribute__(self, name_or_str, name_context=None, position=None,
- search_global=False, is_goto=False,
- analysis_errors=True):
- """
- :param position: Position of the last statement -> tuple of line, column
- """
- if name_context is None:
- name_context = self
- from jedi.evaluate import finder
- f = finder.NameFinder(self.evaluator, self, name_context, name_or_str,
- position, analysis_errors=analysis_errors)
- filters = f.get_filters(search_global)
- if is_goto:
- return f.filter_name(filters)
- return f.find(filters, attribute_lookup=not search_global)
-
- def create_context(self, node, node_is_context=False, node_is_object=False):
- return self.evaluator.create_context(self, node, node_is_context, node_is_object)
-
- def is_class(self):
- return False
-
- def py__bool__(self):
- """
- Since Wrapper is a super class for classes, functions and modules,
- the return value will always be true.
- """
- return True
-
- def py__doc__(self, include_call_signature=False):
- try:
- self.tree_node.get_doc_node
- except AttributeError:
- return ''
- else:
- if include_call_signature:
- return get_doc_with_call_signature(self.tree_node)
- else:
- return clean_scope_docstring(self.tree_node)
- return None
-
-
-def iterate_contexts(contexts, contextualized_node=None, is_async=False):
- """
- Calls `iterate`, on all contexts but ignores the ordering and just returns
- all contexts that the iterate functions yield.
- """
- return ContextSet.from_sets(
- lazy_context.infer()
- for lazy_context in contexts.iterate(contextualized_node, is_async=is_async)
- )
-
-
-class TreeContext(Context):
- def __init__(self, evaluator, parent_context, tree_node):
- super(TreeContext, self).__init__(evaluator, parent_context)
- self.predefined_names = {}
- self.tree_node = tree_node
-
- def __repr__(self):
- return '<%s: %s>' % (self.__class__.__name__, self.tree_node)
-
-
-class ContextualizedNode(object):
- def __init__(self, context, node):
- self.context = context
- self.node = node
-
- def get_root_context(self):
- return self.context.get_root_context()
-
- def infer(self):
- return self.context.eval_node(self.node)
-
-
-class ContextualizedName(ContextualizedNode):
- # TODO merge with TreeNameDefinition?!
- @property
- def name(self):
- return self.node
-
- def assignment_indexes(self):
- """
- Returns an array of tuple(int, node) of the indexes that are used in
- tuple assignments.
-
- For example if the name is ``y`` in the following code::
-
- x, (y, z) = 2, ''
-
- would result in ``[(1, xyz_node), (0, yz_node)]``.
- """
- indexes = []
- node = self.node.parent
- compare = self.node
- while node is not None:
- if node.type in ('testlist', 'testlist_comp', 'testlist_star_expr', 'exprlist'):
- for i, child in enumerate(node.children):
- if child == compare:
- indexes.insert(0, (int(i / 2), node))
- break
- else:
- raise LookupError("Couldn't find the assignment.")
- elif isinstance(node, (ExprStmt, CompFor)):
- break
-
- compare = node
- node = node.parent
- return indexes
-
-
-class ContextSet(BaseContextSet):
- def py__class__(self):
- return ContextSet.from_iterable(c.py__class__() for c in self._set)
-
- def iterate(self, contextualized_node=None, is_async=False):
- from jedi.evaluate.lazy_context import get_merged_lazy_context
- type_iters = [c.iterate(contextualized_node, is_async=is_async) for c in self._set]
- for lazy_contexts in zip_longest(*type_iters):
- yield get_merged_lazy_context(
- [l for l in lazy_contexts if l is not None]
- )
-
-
-NO_CONTEXTS = ContextSet()
-
-
-def iterator_to_context_set(func):
- def wrapper(*args, **kwargs):
- return ContextSet.from_iterable(func(*args, **kwargs))
-
- return wrapper
diff --git a/contrib/python/jedi/jedi/evaluate/cache.py b/contrib/python/jedi/jedi/evaluate/cache.py
deleted file mode 100644
index c619e698a3..0000000000
--- a/contrib/python/jedi/jedi/evaluate/cache.py
+++ /dev/null
@@ -1,77 +0,0 @@
-"""
-- the popular ``_memoize_default`` works like a typical memoize and returns the
- default otherwise.
-- ``CachedMetaClass`` uses ``_memoize_default`` to do the same with classes.
-"""
-
-_NO_DEFAULT = object()
-
-
-def _memoize_default(default=_NO_DEFAULT, evaluator_is_first_arg=False, second_arg_is_evaluator=False):
- """ This is a typical memoization decorator, BUT there is one difference:
- To prevent recursion it sets defaults.
-
- Preventing recursion is in this case the much bigger use than speed. I
- don't think, that there is a big speed difference, but there are many cases
- where recursion could happen (think about a = b; b = a).
- """
- def func(function):
- def wrapper(obj, *args, **kwargs):
- # TODO These checks are kind of ugly and slow.
- if evaluator_is_first_arg:
- cache = obj.memoize_cache
- elif second_arg_is_evaluator:
- cache = args[0].memoize_cache # needed for meta classes
- else:
- cache = obj.evaluator.memoize_cache
-
- try:
- memo = cache[function]
- except KeyError:
- memo = {}
- cache[function] = memo
-
- key = (obj, args, frozenset(kwargs.items()))
- if key in memo:
- return memo[key]
- else:
- if default is not _NO_DEFAULT:
- memo[key] = default
- rv = function(obj, *args, **kwargs)
- memo[key] = rv
- return rv
- return wrapper
-
- return func
-
-
-def evaluator_function_cache(default=_NO_DEFAULT):
- def decorator(func):
- return _memoize_default(default=default, evaluator_is_first_arg=True)(func)
-
- return decorator
-
-
-def evaluator_method_cache(default=_NO_DEFAULT):
- def decorator(func):
- return _memoize_default(default=default)(func)
-
- return decorator
-
-
-def evaluator_as_method_param_cache():
- def decorator(call):
- return _memoize_default(second_arg_is_evaluator=True)(call)
-
- return decorator
-
-
-class CachedMetaClass(type):
- """
- This is basically almost the same than the decorator above, it just caches
- class initializations. Either you do it this way or with decorators, but
- with decorators you lose class access (isinstance, etc).
- """
- @evaluator_as_method_param_cache()
- def __call__(self, *args, **kwargs):
- return super(CachedMetaClass, self).__call__(*args, **kwargs)
diff --git a/contrib/python/jedi/jedi/evaluate/compiled/__init__.py b/contrib/python/jedi/jedi/evaluate/compiled/__init__.py
deleted file mode 100644
index 757aec5234..0000000000
--- a/contrib/python/jedi/jedi/evaluate/compiled/__init__.py
+++ /dev/null
@@ -1,43 +0,0 @@
-from jedi._compatibility import unicode
-from jedi.evaluate.compiled.context import CompiledObject, CompiledName, \
- CompiledObjectFilter, CompiledContextName, create_from_access_path, \
- create_from_name
-
-
-def builtin_from_name(evaluator, string):
- builtins = evaluator.builtins_module
- return create_from_name(evaluator, builtins, string)
-
-
-def create_simple_object(evaluator, obj):
- """
- Only allows creations of objects that are easily picklable across Python
- versions.
- """
- assert isinstance(obj, (int, float, str, bytes, unicode, slice, complex))
- return create_from_access_path(
- evaluator,
- evaluator.compiled_subprocess.create_simple_object(obj)
- )
-
-
-def get_special_object(evaluator, identifier):
- return create_from_access_path(
- evaluator,
- evaluator.compiled_subprocess.get_special_object(identifier)
- )
-
-
-def get_string_context_set(evaluator):
- return builtin_from_name(evaluator, u'str').execute_evaluated()
-
-
-def load_module(evaluator, dotted_name, **kwargs):
- # Temporary, some tensorflow builtins cannot be loaded, so it's tried again
- # and again and it's really slow.
- if dotted_name.startswith('tensorflow.'):
- return None
- access_path = evaluator.compiled_subprocess.load_module(dotted_name=dotted_name, **kwargs)
- if access_path is None:
- return None
- return create_from_access_path(evaluator, access_path)
diff --git a/contrib/python/jedi/jedi/evaluate/compiled/access.py b/contrib/python/jedi/jedi/evaluate/compiled/access.py
deleted file mode 100644
index ab1d3545cf..0000000000
--- a/contrib/python/jedi/jedi/evaluate/compiled/access.py
+++ /dev/null
@@ -1,483 +0,0 @@
-import inspect
-import types
-import sys
-from textwrap import dedent
-import operator as op
-from collections import namedtuple
-
-from jedi._compatibility import unicode, is_py3, builtins, \
- py_version, force_unicode, print_to_stderr
-from jedi.evaluate.compiled.getattr_static import getattr_static
-
-
-MethodDescriptorType = type(str.replace)
-# These are not considered classes and access is granted even though they have
-# a __class__ attribute.
-NOT_CLASS_TYPES = (
- types.BuiltinFunctionType,
- types.CodeType,
- types.FrameType,
- types.FunctionType,
- types.GeneratorType,
- types.GetSetDescriptorType,
- types.LambdaType,
- types.MemberDescriptorType,
- types.MethodType,
- types.ModuleType,
- types.TracebackType,
- MethodDescriptorType
-)
-
-if is_py3:
- NOT_CLASS_TYPES += (
- types.MappingProxyType,
- types.SimpleNamespace,
- types.DynamicClassAttribute,
- )
-
-
-# Those types don't exist in typing.
-MethodDescriptorType = type(str.replace)
-WrapperDescriptorType = type(set.__iter__)
-# `object.__subclasshook__` is an already executed descriptor.
-object_class_dict = type.__dict__["__dict__"].__get__(object)
-ClassMethodDescriptorType = type(object_class_dict['__subclasshook__'])
-
-def _a_generator(foo):
- """Used to have an object to return for generators."""
- yield 42
- yield foo
-
-
-_sentinel = object()
-
-# Maps Python syntax to the operator module.
-COMPARISON_OPERATORS = {
- '==': op.eq,
- '!=': op.ne,
- 'is': op.is_,
- 'is not': op.is_not,
- '<': op.lt,
- '<=': op.le,
- '>': op.gt,
- '>=': op.ge,
-}
-
-_OPERATORS = {
- '+': op.add,
- '-': op.sub,
-}
-_OPERATORS.update(COMPARISON_OPERATORS)
-
-ALLOWED_DESCRIPTOR_ACCESS = (
- types.FunctionType,
- types.GetSetDescriptorType,
- types.MemberDescriptorType,
- MethodDescriptorType,
- WrapperDescriptorType,
- ClassMethodDescriptorType,
- staticmethod,
- classmethod,
-)
-
-
-def safe_getattr(obj, name, default=_sentinel):
- try:
- attr, is_get_descriptor = getattr_static(obj, name)
- except AttributeError:
- if default is _sentinel:
- raise
- return default
- else:
- if type(attr) in ALLOWED_DESCRIPTOR_ACCESS:
- # In case of descriptors that have get methods we cannot return
- # it's value, because that would mean code execution.
- return getattr(obj, name)
- return attr
-
-
-SignatureParam = namedtuple(
- 'SignatureParam',
- 'name has_default default has_annotation annotation kind_name'
-)
-
-
-def compiled_objects_cache(attribute_name):
- def decorator(func):
- """
- This decorator caches just the ids, oopposed to caching the object itself.
- Caching the id has the advantage that an object doesn't need to be
- hashable.
- """
- def wrapper(evaluator, obj, parent_context=None):
- cache = getattr(evaluator, attribute_name)
- # Do a very cheap form of caching here.
- key = id(obj)
- try:
- cache[key]
- return cache[key][0]
- except KeyError:
- # TODO wuaaaarrghhhhhhhh
- if attribute_name == 'mixed_cache':
- result = func(evaluator, obj, parent_context)
- else:
- result = func(evaluator, obj)
- # Need to cache all of them, otherwise the id could be overwritten.
- cache[key] = result, obj, parent_context
- return result
- return wrapper
-
- return decorator
-
-
-def create_access(evaluator, obj):
- return evaluator.compiled_subprocess.get_or_create_access_handle(obj)
-
-
-def load_module(evaluator, dotted_name, sys_path):
- temp, sys.path = sys.path, sys_path
- try:
- __import__(dotted_name)
- except ImportError:
- # If a module is "corrupt" or not really a Python module or whatever.
- print_to_stderr('Module %s not importable in path %s.' % (dotted_name, sys_path))
- return None
- except Exception:
- # Since __import__ pretty much makes code execution possible, just
- # catch any error here and print it.
- import traceback
- print_to_stderr("Cannot import:\n%s" % traceback.format_exc())
- return None
- finally:
- sys.path = temp
-
- # Just access the cache after import, because of #59 as well as the very
- # complicated import structure of Python.
- module = sys.modules[dotted_name]
- return create_access_path(evaluator, module)
-
-
-class AccessPath(object):
- def __init__(self, accesses):
- self.accesses = accesses
-
- # Writing both of these methods here looks a bit ridiculous. However with
- # the differences of Python 2/3 it's actually necessary, because we will
- # otherwise have a accesses attribute that is bytes instead of unicode.
- def __getstate__(self):
- return self.accesses
-
- def __setstate__(self, value):
- self.accesses = value
-
-
-def create_access_path(evaluator, obj):
- access = create_access(evaluator, obj)
- return AccessPath(access.get_access_path_tuples())
-
-
-def _force_unicode_decorator(func):
- return lambda *args, **kwargs: force_unicode(func(*args, **kwargs))
-
-
-class DirectObjectAccess(object):
- def __init__(self, evaluator, obj):
- self._evaluator = evaluator
- self._obj = obj
-
- def __repr__(self):
- return '%s(%s)' % (self.__class__.__name__, self.get_repr())
-
- def _create_access(self, obj):
- return create_access(self._evaluator, obj)
-
- def _create_access_path(self, obj):
- return create_access_path(self._evaluator, obj)
-
- def py__bool__(self):
- return bool(self._obj)
-
- def py__file__(self):
- try:
- return self._obj.__file__
- except AttributeError:
- return None
-
- def py__doc__(self, include_call_signature=False):
- return force_unicode(inspect.getdoc(self._obj)) or u''
-
- def py__name__(self):
- if not _is_class_instance(self._obj) or \
- inspect.ismethoddescriptor(self._obj): # slots
- cls = self._obj
- else:
- try:
- cls = self._obj.__class__
- except AttributeError:
- # happens with numpy.core.umath._UFUNC_API (you get it
- # automatically by doing `import numpy`.
- return None
-
- try:
- return force_unicode(cls.__name__)
- except AttributeError:
- return None
-
- def py__mro__accesses(self):
- return tuple(self._create_access_path(cls) for cls in self._obj.__mro__[1:])
-
- def py__getitem__(self, index):
- if type(self._obj) not in (str, list, tuple, unicode, bytes, bytearray, dict):
- # Get rid of side effects, we won't call custom `__getitem__`s.
- return None
-
- return self._create_access_path(self._obj[index])
-
- def py__iter__list(self):
- if type(self._obj) not in (str, list, tuple, unicode, bytes, bytearray, dict):
- # Get rid of side effects, we won't call custom `__getitem__`s.
- return []
-
- lst = []
- for i, part in enumerate(self._obj):
- if i > 20:
- # Should not go crazy with large iterators
- break
- lst.append(self._create_access_path(part))
- return lst
-
- def py__class__(self):
- return self._create_access_path(self._obj.__class__)
-
- def py__bases__(self):
- return [self._create_access_path(base) for base in self._obj.__bases__]
-
- def py__path__(self):
- return self._obj.__path__
-
- @_force_unicode_decorator
- def get_repr(self):
- builtins = 'builtins', '__builtin__'
-
- if inspect.ismodule(self._obj):
- return repr(self._obj)
- # Try to avoid execution of the property.
- if safe_getattr(self._obj, '__module__', default='') in builtins:
- return repr(self._obj)
-
- type_ = type(self._obj)
- if type_ == type:
- return type.__repr__(self._obj)
-
- if safe_getattr(type_, '__module__', default='') in builtins:
- # Allow direct execution of repr for builtins.
- return repr(self._obj)
- return object.__repr__(self._obj)
-
- def is_class(self):
- return inspect.isclass(self._obj)
-
- def ismethoddescriptor(self):
- return inspect.ismethoddescriptor(self._obj)
-
- def dir(self):
- return list(map(force_unicode, dir(self._obj)))
-
- def has_iter(self):
- try:
- iter(self._obj)
- return True
- except TypeError:
- return False
-
- def is_allowed_getattr(self, name):
- # TODO this API is ugly.
- try:
- attr, is_get_descriptor = getattr_static(self._obj, name)
- except AttributeError:
- return False, False
- else:
- if is_get_descriptor and type(attr) not in ALLOWED_DESCRIPTOR_ACCESS:
- # In case of descriptors that have get methods we cannot return
- # it's value, because that would mean code execution.
- return True, True
- return True, False
-
- def getattr(self, name, default=_sentinel):
- try:
- return self._create_access(getattr(self._obj, name))
- except AttributeError:
- # Happens e.g. in properties of
- # PyQt4.QtGui.QStyleOptionComboBox.currentText
- # -> just set it to None
- if default is _sentinel:
- raise
- return self._create_access(default)
-
- def get_safe_value(self):
- if type(self._obj) in (bool, bytes, float, int, str, unicode, slice):
- return self._obj
- raise ValueError("Object is type %s and not simple" % type(self._obj))
-
- def get_api_type(self):
- obj = self._obj
- if self.is_class():
- return u'class'
- elif inspect.ismodule(obj):
- return u'module'
- elif inspect.isbuiltin(obj) or inspect.ismethod(obj) \
- or inspect.ismethoddescriptor(obj) or inspect.isfunction(obj):
- return u'function'
- # Everything else...
- return u'instance'
-
- def get_access_path_tuples(self):
- accesses = [create_access(self._evaluator, o) for o in self._get_objects_path()]
- return [(access.py__name__(), access) for access in accesses]
-
- def _get_objects_path(self):
- def get():
- obj = self._obj
- yield obj
- try:
- obj = obj.__objclass__
- except AttributeError:
- pass
- else:
- yield obj
-
- try:
- # Returns a dotted string path.
- imp_plz = obj.__module__
- except AttributeError:
- # Unfortunately in some cases like `int` there's no __module__
- if not inspect.ismodule(obj):
- yield builtins
- else:
- if imp_plz is None:
- # Happens for example in `(_ for _ in []).send.__module__`.
- yield builtins
- else:
- try:
- # TODO use sys.modules, __module__ can be faked.
- yield sys.modules[imp_plz]
- except KeyError:
- # __module__ can be something arbitrary that doesn't exist.
- yield builtins
-
- return list(reversed(list(get())))
-
- def execute_operation(self, other_access_handle, operator):
- other_access = other_access_handle.access
- op = _OPERATORS[operator]
- return self._create_access_path(op(self._obj, other_access._obj))
-
- def needs_type_completions(self):
- return inspect.isclass(self._obj) and self._obj != type
-
- def get_signature_params(self):
- obj = self._obj
- if py_version < 33:
- raise ValueError("inspect.signature was introduced in 3.3")
- if py_version == 34:
- # In 3.4 inspect.signature are wrong for str and int. This has
- # been fixed in 3.5. The signature of object is returned,
- # because no signature was found for str. Here we imitate 3.5
- # logic and just ignore the signature if the magic methods
- # don't match object.
- # 3.3 doesn't even have the logic and returns nothing for str
- # and classes that inherit from object.
- user_def = inspect._signature_get_user_defined_method
- if (inspect.isclass(obj)
- and not user_def(type(obj), '__init__')
- and not user_def(type(obj), '__new__')
- and (obj.__init__ != object.__init__
- or obj.__new__ != object.__new__)):
- raise ValueError
-
- try:
- signature = inspect.signature(obj)
- except (RuntimeError, TypeError):
- # Reading the code of the function in Python 3.6 implies there are
- # at least these errors that might occur if something is wrong with
- # the signature. In that case we just want a simple escape for now.
- raise ValueError
- return [
- SignatureParam(
- name=p.name,
- has_default=p.default is not p.empty,
- default=self._create_access_path(p.default),
- has_annotation=p.annotation is not p.empty,
- annotation=self._create_access_path(p.annotation),
- kind_name=str(p.kind)
- ) for p in signature.parameters.values()
- ]
-
- def negate(self):
- return self._create_access_path(-self._obj)
-
- def dict_values(self):
- return [self._create_access_path(v) for v in self._obj.values()]
-
- def is_super_class(self, exception):
- return issubclass(exception, self._obj)
-
- def get_dir_infos(self):
- """
- Used to return a couple of infos that are needed when accessing the sub
- objects of an objects
- """
- # TODO is_allowed_getattr might raise an AttributeError
- tuples = dict(
- (force_unicode(name), self.is_allowed_getattr(name))
- for name in self.dir()
- )
- return self.needs_type_completions(), tuples
-
-
-def _is_class_instance(obj):
- """Like inspect.* methods."""
- try:
- cls = obj.__class__
- except AttributeError:
- return False
- else:
- return cls != type and not issubclass(cls, NOT_CLASS_TYPES)
-
-
-if py_version >= 35:
- exec(compile(dedent("""
- async def _coroutine(): pass
- _coroutine = _coroutine()
- CoroutineType = type(_coroutine)
- _coroutine.close() # Prevent ResourceWarning
- """), 'blub', 'exec'))
- _coroutine_wrapper = _coroutine.__await__()
-else:
- _coroutine = None
- _coroutine_wrapper = None
-
-if py_version >= 36:
- exec(compile(dedent("""
- async def _async_generator():
- yield
- _async_generator = _async_generator()
- AsyncGeneratorType = type(_async_generator)
- """), 'blub', 'exec'))
-else:
- _async_generator = None
-
-class _SPECIAL_OBJECTS(object):
- FUNCTION_CLASS = types.FunctionType
- BOUND_METHOD_CLASS = type(DirectObjectAccess(None, None).py__bool__)
- MODULE_CLASS = types.ModuleType
- GENERATOR_OBJECT = _a_generator(1.0)
- BUILTINS = builtins
- COROUTINE = _coroutine
- COROUTINE_WRAPPER = _coroutine_wrapper
- ASYNC_GENERATOR = _async_generator
-
-
-def get_special_object(evaluator, identifier):
- obj = getattr(_SPECIAL_OBJECTS, identifier)
- return create_access_path(evaluator, obj)
diff --git a/contrib/python/jedi/jedi/evaluate/compiled/context.py b/contrib/python/jedi/jedi/evaluate/compiled/context.py
deleted file mode 100644
index 631c1a74ea..0000000000
--- a/contrib/python/jedi/jedi/evaluate/compiled/context.py
+++ /dev/null
@@ -1,483 +0,0 @@
-"""
-Imitate the parser representation.
-"""
-import re
-from functools import partial
-
-from jedi import debug
-from jedi._compatibility import force_unicode, Parameter
-from jedi.cache import underscore_memoization, memoize_method
-from jedi.evaluate.filters import AbstractFilter, AbstractNameDefinition, \
- ContextNameMixin
-from jedi.evaluate.base_context import Context, ContextSet
-from jedi.evaluate.lazy_context import LazyKnownContext
-from jedi.evaluate.compiled.access import _sentinel
-from jedi.evaluate.cache import evaluator_function_cache
-from jedi.evaluate.helpers import reraise_as_evaluator
-from . import fake
-
-
-class CheckAttribute(object):
- """Raises an AttributeError if the attribute X isn't available."""
- def __init__(self, func):
- self.func = func
- # Remove the py in front of e.g. py__call__.
- self.check_name = force_unicode(func.__name__[2:])
-
- def __get__(self, instance, owner):
- if instance is None:
- return self
-
- # This might raise an AttributeError. That's wanted.
- if self.check_name == '__iter__':
- # Python iterators are a bit strange, because there's no need for
- # the __iter__ function as long as __getitem__ is defined (it will
- # just start with __getitem__(0). This is especially true for
- # Python 2 strings, where `str.__iter__` is not even defined.
- if not instance.access_handle.has_iter():
- raise AttributeError
- else:
- instance.access_handle.getattr(self.check_name)
- return partial(self.func, instance)
-
-
-class CompiledObject(Context):
- def __init__(self, evaluator, access_handle, parent_context=None, faked_class=None):
- super(CompiledObject, self).__init__(evaluator, parent_context)
- self.access_handle = access_handle
- # This attribute will not be set for most classes, except for fakes.
- self.tree_node = faked_class
-
- @CheckAttribute
- def py__call__(self, params):
- if self.tree_node is not None and self.tree_node.type == 'funcdef':
- from jedi.evaluate.context.function import FunctionContext
- return FunctionContext(
- self.evaluator,
- parent_context=self.parent_context,
- tree_node=self.tree_node
- ).py__call__(params)
- if self.access_handle.is_class():
- from jedi.evaluate.context import CompiledInstance
- return ContextSet(CompiledInstance(self.evaluator, self.parent_context, self, params))
- else:
- return ContextSet.from_iterable(self._execute_function(params))
-
- @CheckAttribute
- def py__class__(self):
- return create_from_access_path(self.evaluator, self.access_handle.py__class__())
-
- @CheckAttribute
- def py__mro__(self):
- return (self,) + tuple(
- create_from_access_path(self.evaluator, access)
- for access in self.access_handle.py__mro__accesses()
- )
-
- @CheckAttribute
- def py__bases__(self):
- return tuple(
- create_from_access_path(self.evaluator, access)
- for access in self.access_handle.py__bases__()
- )
-
- @CheckAttribute
- def py__path__(self):
- return self.access_handle.py__path__()
-
- def py__bool__(self):
- return self.access_handle.py__bool__()
-
- def py__file__(self):
- return self.access_handle.py__file__()
-
- def is_class(self):
- return self.access_handle.is_class()
-
- def py__doc__(self, include_call_signature=False):
- return self.access_handle.py__doc__()
-
- def get_param_names(self):
- try:
- signature_params = self.access_handle.get_signature_params()
- except ValueError: # Has no signature
- params_str, ret = self._parse_function_doc()
- tokens = params_str.split(',')
- if self.access_handle.ismethoddescriptor():
- tokens.insert(0, 'self')
- for p in tokens:
- parts = p.strip().split('=')
- yield UnresolvableParamName(self, parts[0])
- else:
- for signature_param in signature_params:
- yield SignatureParamName(self, signature_param)
-
- def __repr__(self):
- return '<%s: %s>' % (self.__class__.__name__, self.access_handle.get_repr())
-
- @underscore_memoization
- def _parse_function_doc(self):
- doc = self.py__doc__()
- if doc is None:
- return '', ''
-
- return _parse_function_doc(doc)
-
- @property
- def api_type(self):
- return self.access_handle.get_api_type()
-
- @underscore_memoization
- def _cls(self):
- """
- We used to limit the lookups for instantiated objects like list(), but
- this is not the case anymore. Python itself
- """
- # Ensures that a CompiledObject is returned that is not an instance (like list)
- return self
-
- def get_filters(self, search_global=False, is_instance=False,
- until_position=None, origin_scope=None):
- yield self._ensure_one_filter(is_instance)
-
- @memoize_method
- def _ensure_one_filter(self, is_instance):
- """
- search_global shouldn't change the fact that there's one dict, this way
- there's only one `object`.
- """
- return CompiledObjectFilter(self.evaluator, self, is_instance)
-
- @CheckAttribute
- def py__getitem__(self, index):
- with reraise_as_evaluator(IndexError, KeyError, TypeError):
- access = self.access_handle.py__getitem__(index)
- if access is None:
- return ContextSet()
-
- return ContextSet(create_from_access_path(self.evaluator, access))
-
- @CheckAttribute
- def py__iter__(self):
- for access in self.access_handle.py__iter__list():
- yield LazyKnownContext(create_from_access_path(self.evaluator, access))
-
- def py__name__(self):
- return self.access_handle.py__name__()
-
- @property
- def name(self):
- name = self.py__name__()
- if name is None:
- name = self.access_handle.get_repr()
- return CompiledContextName(self, name)
-
- def _execute_function(self, params):
- from jedi.evaluate import docstrings
- from jedi.evaluate.compiled import builtin_from_name
- if self.api_type != 'function':
- return
-
- for name in self._parse_function_doc()[1].split():
- try:
- # TODO wtf is this? this is exactly the same as the thing
- # below. It uses getattr as well.
- self.evaluator.builtins_module.access_handle.getattr(name)
- except AttributeError:
- continue
- else:
- bltn_obj = builtin_from_name(self.evaluator, name)
- for result in bltn_obj.execute(params):
- yield result
- for type_ in docstrings.infer_return_types(self):
- yield type_
-
- def dict_values(self):
- return ContextSet.from_iterable(
- create_from_access_path(self.evaluator, access)
- for access in self.access_handle.dict_values()
- )
-
- def get_safe_value(self, default=_sentinel):
- try:
- return self.access_handle.get_safe_value()
- except ValueError:
- if default == _sentinel:
- raise
- return default
-
- def execute_operation(self, other, operator):
- return create_from_access_path(
- self.evaluator,
- self.access_handle.execute_operation(other.access_handle, operator)
- )
-
- def negate(self):
- return create_from_access_path(self.evaluator, self.access_handle.negate())
-
- def is_super_class(self, exception):
- return self.access_handle.is_super_class(exception)
-
-
-class CompiledName(AbstractNameDefinition):
- def __init__(self, evaluator, parent_context, name):
- self._evaluator = evaluator
- self.parent_context = parent_context
- self.string_name = name
-
- def __repr__(self):
- try:
- name = self.parent_context.name # __name__ is not defined all the time
- except AttributeError:
- name = None
- return '<%s: (%s).%s>' % (self.__class__.__name__, name, self.string_name)
-
- @property
- def api_type(self):
- return next(iter(self.infer())).api_type
-
- @underscore_memoization
- def infer(self):
- return ContextSet(create_from_name(
- self._evaluator, self.parent_context, self.string_name
- ))
-
-
-class SignatureParamName(AbstractNameDefinition):
- api_type = u'param'
-
- def __init__(self, compiled_obj, signature_param):
- self.parent_context = compiled_obj.parent_context
- self._signature_param = signature_param
-
- @property
- def string_name(self):
- return self._signature_param.name
-
- def get_kind(self):
- return getattr(Parameter, self._signature_param.kind_name)
-
- def is_keyword_param(self):
- return self._signature_param
-
- def infer(self):
- p = self._signature_param
- evaluator = self.parent_context.evaluator
- contexts = ContextSet()
- if p.has_default:
- contexts = ContextSet(create_from_access_path(evaluator, p.default))
- if p.has_annotation:
- annotation = create_from_access_path(evaluator, p.annotation)
- contexts |= annotation.execute_evaluated()
- return contexts
-
-
-class UnresolvableParamName(AbstractNameDefinition):
- api_type = u'param'
-
- def __init__(self, compiled_obj, name):
- self.parent_context = compiled_obj.parent_context
- self.string_name = name
-
- def get_kind(self):
- return Parameter.POSITIONAL_ONLY
-
- def infer(self):
- return ContextSet()
-
-
-class CompiledContextName(ContextNameMixin, AbstractNameDefinition):
- def __init__(self, context, name):
- self.string_name = name
- self._context = context
- self.parent_context = context.parent_context
-
-
-class EmptyCompiledName(AbstractNameDefinition):
- """
- Accessing some names will raise an exception. To avoid not having any
- completions, just give Jedi the option to return this object. It infers to
- nothing.
- """
- def __init__(self, evaluator, name):
- self.parent_context = evaluator.builtins_module
- self.string_name = name
-
- def infer(self):
- return ContextSet()
-
-
-class CompiledObjectFilter(AbstractFilter):
- name_class = CompiledName
-
- def __init__(self, evaluator, compiled_object, is_instance=False):
- self._evaluator = evaluator
- self._compiled_object = compiled_object
- self._is_instance = is_instance
-
- def get(self, name):
- return self._get(
- name,
- lambda: self._compiled_object.access_handle.is_allowed_getattr(name),
- lambda: self._compiled_object.access_handle.dir(),
- check_has_attribute=True
- )
-
- def _get(self, name, allowed_getattr_callback, dir_callback, check_has_attribute=False):
- """
- To remove quite a few access calls we introduced the callback here.
- """
- has_attribute, is_descriptor = allowed_getattr_callback()
- if check_has_attribute and not has_attribute:
- return []
-
- # Always use unicode objects in Python 2 from here.
- name = force_unicode(name)
-
- if is_descriptor or not has_attribute:
- return [self._get_cached_name(name, is_empty=True)]
-
- if self._is_instance and name not in dir_callback():
- return []
- return [self._get_cached_name(name)]
-
- @memoize_method
- def _get_cached_name(self, name, is_empty=False):
- if is_empty:
- return EmptyCompiledName(self._evaluator, name)
- else:
- return self._create_name(name)
-
- def values(self):
- from jedi.evaluate.compiled import builtin_from_name
- names = []
- needs_type_completions, dir_infos = self._compiled_object.access_handle.get_dir_infos()
- for name in dir_infos:
- names += self._get(
- name,
- lambda: dir_infos[name],
- lambda: dir_infos.keys(),
- )
-
- # ``dir`` doesn't include the type names.
- if not self._is_instance and needs_type_completions:
- for filter in builtin_from_name(self._evaluator, u'type').get_filters():
- names += filter.values()
- return names
-
- def _create_name(self, name):
- return self.name_class(self._evaluator, self._compiled_object, name)
-
-
-docstr_defaults = {
- 'floating point number': u'float',
- 'character': u'str',
- 'integer': u'int',
- 'dictionary': u'dict',
- 'string': u'str',
-}
-
-
-def _parse_function_doc(doc):
- """
- Takes a function and returns the params and return value as a tuple.
- This is nothing more than a docstring parser.
-
- TODO docstrings like utime(path, (atime, mtime)) and a(b [, b]) -> None
- TODO docstrings like 'tuple of integers'
- """
- doc = force_unicode(doc)
- # parse round parentheses: def func(a, (b,c))
- try:
- count = 0
- start = doc.index('(')
- for i, s in enumerate(doc[start:]):
- if s == '(':
- count += 1
- elif s == ')':
- count -= 1
- if count == 0:
- end = start + i
- break
- param_str = doc[start + 1:end]
- except (ValueError, UnboundLocalError):
- # ValueError for doc.index
- # UnboundLocalError for undefined end in last line
- debug.dbg('no brackets found - no param')
- end = 0
- param_str = u''
- else:
- # remove square brackets, that show an optional param ( = None)
- def change_options(m):
- args = m.group(1).split(',')
- for i, a in enumerate(args):
- if a and '=' not in a:
- args[i] += '=None'
- return ','.join(args)
-
- while True:
- param_str, changes = re.subn(r' ?\[([^\[\]]+)\]',
- change_options, param_str)
- if changes == 0:
- break
- param_str = param_str.replace('-', '_') # see: isinstance.__doc__
-
- # parse return value
- r = re.search(u'-[>-]* ', doc[end:end + 7])
- if r is None:
- ret = u''
- else:
- index = end + r.end()
- # get result type, which can contain newlines
- pattern = re.compile(r'(,\n|[^\n-])+')
- ret_str = pattern.match(doc, index).group(0).strip()
- # New object -> object()
- ret_str = re.sub(r'[nN]ew (.*)', r'\1()', ret_str)
-
- ret = docstr_defaults.get(ret_str, ret_str)
-
- return param_str, ret
-
-
-def create_from_name(evaluator, compiled_object, name):
- faked = None
- try:
- faked = fake.get_faked_with_parent_context(compiled_object, name)
- except fake.FakeDoesNotExist:
- pass
-
- access = compiled_object.access_handle.getattr(name, default=None)
- parent_context = compiled_object
- if parent_context.is_class():
- parent_context = parent_context.parent_context
- return create_cached_compiled_object(
- evaluator, access, parent_context=parent_context, faked=faked
- )
-
-
-def _normalize_create_args(func):
- """The cache doesn't care about keyword vs. normal args."""
- def wrapper(evaluator, obj, parent_context=None, faked=None):
- return func(evaluator, obj, parent_context, faked)
- return wrapper
-
-
-def create_from_access_path(evaluator, access_path):
- parent_context = None
- for name, access in access_path.accesses:
- try:
- if parent_context is None:
- faked = fake.get_faked_module(evaluator, access_path.accesses[0][0])
- else:
- faked = fake.get_faked_with_parent_context(parent_context, name)
- except fake.FakeDoesNotExist:
- faked = None
-
- parent_context = create_cached_compiled_object(evaluator, access, parent_context, faked)
- return parent_context
-
-
-@_normalize_create_args
-@evaluator_function_cache()
-def create_cached_compiled_object(evaluator, access_handle, parent_context, faked):
- return CompiledObject(evaluator, access_handle, parent_context, faked)
diff --git a/contrib/python/jedi/jedi/evaluate/compiled/fake.py b/contrib/python/jedi/jedi/evaluate/compiled/fake.py
deleted file mode 100644
index a38ff34938..0000000000
--- a/contrib/python/jedi/jedi/evaluate/compiled/fake.py
+++ /dev/null
@@ -1,87 +0,0 @@
-"""
-Loads functions that are mixed in to the standard library. E.g. builtins are
-written in C (binaries), but my autocompletion only understands Python code. By
-mixing in Python code, the autocompletion should work much better for builtins.
-"""
-import sys
-import os
-from itertools import chain
-
-import __res
-
-from jedi._compatibility import unicode
-
-fake_modules = {}
-
-
-def _get_path_dict():
- path = os.path.dirname(__file__)
- base_path = os.path.join(path, 'fake')
- dct = {}
- for file_name in __res.resfs_files():
- if sys.version_info[0] == 3:
- file_name = str(file_name, 'ascii')
- if file_name.startswith(base_path) and file_name.endswith('.pym'):
- dct[file_name[len(base_path) + 1:-4]] = file_name
- return dct
-
-
-_path_dict = _get_path_dict()
-
-
-class FakeDoesNotExist(Exception):
- pass
-
-
-def _load_faked_module(evaluator, module_name):
- try:
- return fake_modules[module_name]
- except KeyError:
- pass
-
- check_module_name = module_name
- if module_name == '__builtin__' and evaluator.environment.version_info.major == 2:
- check_module_name = 'builtins'
-
- try:
- path = _path_dict[check_module_name]
- except KeyError:
- fake_modules[module_name] = None
- return
-
- if sys.version_info[0] == 3:
- path = bytes(path, 'ascii')
- source = __res.resfs_read(path)
-
- fake_modules[module_name] = m = evaluator.latest_grammar.parse(unicode(source))
-
- if check_module_name != module_name:
- # There are two implementations of `open` for either python 2/3.
- # -> Rename the python2 version (`look at fake/builtins.pym`).
- open_func = _search_scope(m, 'open')
- open_func.children[1].value = 'open_python3'
- open_func = _search_scope(m, 'open_python2')
- open_func.children[1].value = 'open'
- return m
-
-
-def _search_scope(scope, obj_name):
- for s in chain(scope.iter_classdefs(), scope.iter_funcdefs()):
- if s.name.value == obj_name:
- return s
-
-
-def get_faked_with_parent_context(parent_context, name):
- if parent_context.tree_node is not None:
- # Try to search in already clearly defined stuff.
- found = _search_scope(parent_context.tree_node, name)
- if found is not None:
- return found
- raise FakeDoesNotExist
-
-
-def get_faked_module(evaluator, string_name):
- module = _load_faked_module(evaluator, string_name)
- if module is None:
- raise FakeDoesNotExist
- return module
diff --git a/contrib/python/jedi/jedi/evaluate/compiled/fake/_functools.pym b/contrib/python/jedi/jedi/evaluate/compiled/fake/_functools.pym
deleted file mode 100644
index 909ef03fc3..0000000000
--- a/contrib/python/jedi/jedi/evaluate/compiled/fake/_functools.pym
+++ /dev/null
@@ -1,9 +0,0 @@
-class partial():
- def __init__(self, func, *args, **keywords):
- self.__func = func
- self.__args = args
- self.__keywords = keywords
-
- def __call__(self, *args, **kwargs):
- # TODO should be **dict(self.__keywords, **kwargs)
- return self.__func(*(self.__args + args), **self.__keywords)
diff --git a/contrib/python/jedi/jedi/evaluate/compiled/fake/_sqlite3.pym b/contrib/python/jedi/jedi/evaluate/compiled/fake/_sqlite3.pym
deleted file mode 100644
index 2151e652b4..0000000000
--- a/contrib/python/jedi/jedi/evaluate/compiled/fake/_sqlite3.pym
+++ /dev/null
@@ -1,26 +0,0 @@
-def connect(database, timeout=None, isolation_level=None, detect_types=None, factory=None):
- return Connection()
-
-
-class Connection():
- def cursor(self):
- return Cursor()
-
-
-class Cursor():
- def cursor(self):
- return Cursor()
-
- def fetchone(self):
- return Row()
-
- def fetchmany(self, size=cursor.arraysize):
- return [self.fetchone()]
-
- def fetchall(self):
- return [self.fetchone()]
-
-
-class Row():
- def keys(self):
- return ['']
diff --git a/contrib/python/jedi/jedi/evaluate/compiled/fake/_sre.pym b/contrib/python/jedi/jedi/evaluate/compiled/fake/_sre.pym
deleted file mode 100644
index 217be56339..0000000000
--- a/contrib/python/jedi/jedi/evaluate/compiled/fake/_sre.pym
+++ /dev/null
@@ -1,99 +0,0 @@
-def compile():
- class SRE_Match():
- endpos = int()
- lastgroup = int()
- lastindex = int()
- pos = int()
- string = str()
- regs = ((int(), int()),)
-
- def __init__(self, pattern):
- self.re = pattern
-
- def start(self):
- return int()
-
- def end(self):
- return int()
-
- def span(self):
- return int(), int()
-
- def expand(self):
- return str()
-
- def group(self, nr):
- return str()
-
- def groupdict(self):
- return {str(): str()}
-
- def groups(self):
- return (str(),)
-
- class SRE_Pattern():
- flags = int()
- groupindex = {}
- groups = int()
- pattern = str()
-
- def findall(self, string, pos=None, endpos=None):
- """
- findall(string[, pos[, endpos]]) --> list.
- Return a list of all non-overlapping matches of pattern in string.
- """
- return [str()]
-
- def finditer(self, string, pos=None, endpos=None):
- """
- finditer(string[, pos[, endpos]]) --> iterator.
- Return an iterator over all non-overlapping matches for the
- RE pattern in string. For each match, the iterator returns a
- match object.
- """
- yield SRE_Match(self)
-
- def match(self, string, pos=None, endpos=None):
- """
- match(string[, pos[, endpos]]) --> match object or None.
- Matches zero or more characters at the beginning of the string
- pattern
- """
- return SRE_Match(self)
-
- def scanner(self, string, pos=None, endpos=None):
- pass
-
- def search(self, string, pos=None, endpos=None):
- """
- search(string[, pos[, endpos]]) --> match object or None.
- Scan through string looking for a match, and return a corresponding
- MatchObject instance. Return None if no position in the string matches.
- """
- return SRE_Match(self)
-
- def split(self, string, maxsplit=0]):
- """
- split(string[, maxsplit = 0]) --> list.
- Split string by the occurrences of pattern.
- """
- return [str()]
-
- def sub(self, repl, string, count=0):
- """
- sub(repl, string[, count = 0]) --> newstring
- Return the string obtained by replacing the leftmost non-overlapping
- occurrences of pattern in string by the replacement repl.
- """
- return str()
-
- def subn(self, repl, string, count=0):
- """
- subn(repl, string[, count = 0]) --> (newstring, number of subs)
- Return the tuple (new_string, number_of_subs_made) found by replacing
- the leftmost non-overlapping occurrences of pattern with the
- replacement repl.
- """
- return (str(), int())
-
- return SRE_Pattern()
diff --git a/contrib/python/jedi/jedi/evaluate/compiled/fake/_weakref.pym b/contrib/python/jedi/jedi/evaluate/compiled/fake/_weakref.pym
deleted file mode 100644
index 298d0b0dba..0000000000
--- a/contrib/python/jedi/jedi/evaluate/compiled/fake/_weakref.pym
+++ /dev/null
@@ -1,9 +0,0 @@
-def proxy(object, callback=None):
- return object
-
-class ref():
- def __init__(self, object, callback=None):
- self.__object = object
-
- def __call__(self):
- return self.__object
diff --git a/contrib/python/jedi/jedi/evaluate/compiled/fake/builtins.pym b/contrib/python/jedi/jedi/evaluate/compiled/fake/builtins.pym
deleted file mode 100644
index 46ec619fb4..0000000000
--- a/contrib/python/jedi/jedi/evaluate/compiled/fake/builtins.pym
+++ /dev/null
@@ -1,277 +0,0 @@
-"""
-Pure Python implementation of some builtins.
-This code is not going to be executed anywhere.
-These implementations are not always correct, but should work as good as
-possible for the auto completion.
-"""
-
-
-def next(iterator, default=None):
- if random.choice([0, 1]):
- if hasattr("next"):
- return iterator.next()
- else:
- return iterator.__next__()
- else:
- if default is not None:
- return default
-
-
-def iter(collection, sentinel=None):
- if sentinel:
- yield collection()
- else:
- for c in collection:
- yield c
-
-
-def range(start, stop=None, step=1):
- return [0]
-
-
-class file():
- def __iter__(self):
- yield ''
-
- def next(self):
- return ''
-
- def readlines(self):
- return ['']
-
- def __enter__(self):
- return self
-
-
-class xrange():
- # Attention: this function doesn't exist in Py3k (there it is range).
- def __iter__(self):
- yield 1
-
- def count(self):
- return 1
-
- def index(self):
- return 1
-
-
-def open(file, mode='r', buffering=-1, encoding=None, errors=None, newline=None, closefd=True):
- import io
- return io.TextIOWrapper(file, mode, buffering, encoding, errors, newline, closefd)
-
-
-def open_python2(name, mode=None, buffering=None):
- return file(name, mode, buffering)
-
-
-#--------------------------------------------------------
-# descriptors
-#--------------------------------------------------------
-class property():
- def __init__(self, fget, fset=None, fdel=None, doc=None):
- self.fget = fget
- self.fset = fset
- self.fdel = fdel
- self.__doc__ = doc
-
- def __get__(self, obj, cls):
- return self.fget(obj)
-
- def __set__(self, obj, value):
- self.fset(obj, value)
-
- def __delete__(self, obj):
- self.fdel(obj)
-
- def setter(self, func):
- self.fset = func
- return self
-
- def getter(self, func):
- self.fget = func
- return self
-
- def deleter(self, func):
- self.fdel = func
- return self
-
-
-class staticmethod():
- def __init__(self, func):
- self.__func = func
-
- def __get__(self, obj, cls):
- return self.__func
-
-
-class classmethod():
- def __init__(self, func):
- self.__func = func
-
- def __get__(self, obj, cls):
- def _method(*args, **kwargs):
- return self.__func(cls, *args, **kwargs)
- return _method
-
-
-#--------------------------------------------------------
-# array stuff
-#--------------------------------------------------------
-class list():
- def __init__(self, iterable=[]):
- self.__iterable = []
- for i in iterable:
- self.__iterable += [i]
-
- def __iter__(self):
- for i in self.__iterable:
- yield i
-
- def __getitem__(self, y):
- return self.__iterable[y]
-
- def pop(self):
- return self.__iterable[int()]
-
-
-class tuple():
- def __init__(self, iterable=[]):
- self.__iterable = []
- for i in iterable:
- self.__iterable += [i]
-
- def __iter__(self):
- for i in self.__iterable:
- yield i
-
- def __getitem__(self, y):
- return self.__iterable[y]
-
- def index(self):
- return 1
-
- def count(self):
- return 1
-
-
-class set():
- def __init__(self, iterable=[]):
- self.__iterable = iterable
-
- def __iter__(self):
- for i in self.__iterable:
- yield i
-
- def pop(self):
- return list(self.__iterable)[-1]
-
- def copy(self):
- return self
-
- def difference(self, other):
- return self - other
-
- def intersection(self, other):
- return self & other
-
- def symmetric_difference(self, other):
- return self ^ other
-
- def union(self, other):
- return self | other
-
-
-class frozenset():
- def __init__(self, iterable=[]):
- self.__iterable = iterable
-
- def __iter__(self):
- for i in self.__iterable:
- yield i
-
- def copy(self):
- return self
-
-
-class dict():
- def __init__(self, **elements):
- self.__elements = elements
-
- def clear(self):
- # has a strange docstr
- pass
-
- def __getitem__(self, obj):
- return self.__elements[obj]
-
- def get(self, k, d=None):
- # TODO implement
- try:
- return self.__elements[k]
- pass
- except KeyError:
- return d
-
- def values(self):
- return self.__elements.values()
-
- def setdefault(self, k, d):
- # TODO maybe also return the content
- return d
-
-
-class enumerate():
- def __init__(self, sequence, start=0):
- self.__sequence = sequence
-
- def __iter__(self):
- for i in self.__sequence:
- yield 1, i
-
- def __next__(self):
- return next(self.__iter__())
-
- def next(self):
- return next(self.__iter__())
-
-
-class reversed():
- def __init__(self, sequence):
- self.__sequence = sequence
-
- def __iter__(self):
- for i in self.__sequence:
- yield i
-
- def __next__(self):
- return next(self.__iter__())
-
- def next(self):
- return next(self.__iter__())
-
-
-def sorted(iterable, cmp=None, key=None, reverse=False):
- return iterable
-
-
-#--------------------------------------------------------
-# basic types
-#--------------------------------------------------------
-class int():
- def __init__(self, x, base=None):
- pass
-
-
-class str():
- def __init__(self, obj):
- pass
-
- def strip(self):
- return str()
-
- def split(self):
- return [str()]
-
-class type():
- def mro():
- return [object]
diff --git a/contrib/python/jedi/jedi/evaluate/compiled/fake/datetime.pym b/contrib/python/jedi/jedi/evaluate/compiled/fake/datetime.pym
deleted file mode 100644
index 823ac5b7fd..0000000000
--- a/contrib/python/jedi/jedi/evaluate/compiled/fake/datetime.pym
+++ /dev/null
@@ -1,4 +0,0 @@
-class datetime():
- @staticmethod
- def now():
- return datetime()
diff --git a/contrib/python/jedi/jedi/evaluate/compiled/fake/io.pym b/contrib/python/jedi/jedi/evaluate/compiled/fake/io.pym
deleted file mode 100644
index c1f4fc0116..0000000000
--- a/contrib/python/jedi/jedi/evaluate/compiled/fake/io.pym
+++ /dev/null
@@ -1,12 +0,0 @@
-class TextIOWrapper():
- def __next__(self):
- return str()
-
- def __iter__(self):
- yield str()
-
- def readlines(self):
- return ['']
-
- def __enter__(self):
- return self
diff --git a/contrib/python/jedi/jedi/evaluate/compiled/fake/operator.pym b/contrib/python/jedi/jedi/evaluate/compiled/fake/operator.pym
deleted file mode 100644
index d40d468179..0000000000
--- a/contrib/python/jedi/jedi/evaluate/compiled/fake/operator.pym
+++ /dev/null
@@ -1,33 +0,0 @@
-# Just copied this code from Python 3.6.
-
-class itemgetter:
- """
- Return a callable object that fetches the given item(s) from its operand.
- After f = itemgetter(2), the call f(r) returns r[2].
- After g = itemgetter(2, 5, 3), the call g(r) returns (r[2], r[5], r[3])
- """
- __slots__ = ('_items', '_call')
-
- def __init__(self, item, *items):
- if not items:
- self._items = (item,)
- def func(obj):
- return obj[item]
- self._call = func
- else:
- self._items = items = (item,) + items
- def func(obj):
- return tuple(obj[i] for i in items)
- self._call = func
-
- def __call__(self, obj):
- return self._call(obj)
-
- def __repr__(self):
- return '%s.%s(%s)' % (self.__class__.__module__,
- self.__class__.__name__,
- ', '.join(map(repr, self._items)))
-
- def __reduce__(self):
- return self.__class__, self._items
-
diff --git a/contrib/python/jedi/jedi/evaluate/compiled/fake/posix.pym b/contrib/python/jedi/jedi/evaluate/compiled/fake/posix.pym
deleted file mode 100644
index 4417f7cb04..0000000000
--- a/contrib/python/jedi/jedi/evaluate/compiled/fake/posix.pym
+++ /dev/null
@@ -1,5 +0,0 @@
-def getcwd():
- return ''
-
-def getcwdu():
- return ''
diff --git a/contrib/python/jedi/jedi/evaluate/compiled/getattr_static.py b/contrib/python/jedi/jedi/evaluate/compiled/getattr_static.py
deleted file mode 100644
index 946ac09b6b..0000000000
--- a/contrib/python/jedi/jedi/evaluate/compiled/getattr_static.py
+++ /dev/null
@@ -1,176 +0,0 @@
-"""
-A static version of getattr.
-This is a backport of the Python 3 code with a little bit of additional
-information returned to enable Jedi to make decisions.
-"""
-
-import types
-
-from jedi._compatibility import py_version
-
-_sentinel = object()
-
-
-def _check_instance(obj, attr):
- instance_dict = {}
- try:
- instance_dict = object.__getattribute__(obj, "__dict__")
- except AttributeError:
- pass
- return dict.get(instance_dict, attr, _sentinel)
-
-
-def _check_class(klass, attr):
- for entry in _static_getmro(klass):
- if _shadowed_dict(type(entry)) is _sentinel:
- try:
- return entry.__dict__[attr]
- except KeyError:
- pass
- return _sentinel
-
-
-def _is_type(obj):
- try:
- _static_getmro(obj)
- except TypeError:
- return False
- return True
-
-
-def _shadowed_dict_newstyle(klass):
- dict_attr = type.__dict__["__dict__"]
- for entry in _static_getmro(klass):
- try:
- class_dict = dict_attr.__get__(entry)["__dict__"]
- except KeyError:
- pass
- else:
- if not (type(class_dict) is types.GetSetDescriptorType and
- class_dict.__name__ == "__dict__" and
- class_dict.__objclass__ is entry):
- return class_dict
- return _sentinel
-
-
-def _static_getmro_newstyle(klass):
- return type.__dict__['__mro__'].__get__(klass)
-
-
-if py_version >= 30:
- _shadowed_dict = _shadowed_dict_newstyle
- _get_type = type
- _static_getmro = _static_getmro_newstyle
-else:
- def _shadowed_dict(klass):
- """
- In Python 2 __dict__ is not overwritable:
-
- class Foo(object): pass
- setattr(Foo, '__dict__', 4)
-
- Traceback (most recent call last):
- File "<stdin>", line 1, in <module>
- TypeError: __dict__ must be a dictionary object
-
- It applies to both newstyle and oldstyle classes:
-
- class Foo(object): pass
- setattr(Foo, '__dict__', 4)
- Traceback (most recent call last):
- File "<stdin>", line 1, in <module>
- AttributeError: attribute '__dict__' of 'type' objects is not writable
-
- It also applies to instances of those objects. However to keep things
- straight forward, newstyle classes always use the complicated way of
- accessing it while oldstyle classes just use getattr.
- """
- if type(klass) is _oldstyle_class_type:
- return getattr(klass, '__dict__', _sentinel)
- return _shadowed_dict_newstyle(klass)
-
- class _OldStyleClass:
- pass
-
- _oldstyle_instance_type = type(_OldStyleClass())
- _oldstyle_class_type = type(_OldStyleClass)
-
- def _get_type(obj):
- type_ = object.__getattribute__(obj, '__class__')
- if type_ is _oldstyle_instance_type:
- # Somehow for old style classes we need to access it directly.
- return obj.__class__
- return type_
-
- def _static_getmro(klass):
- if type(klass) is _oldstyle_class_type:
- def oldstyle_mro(klass):
- """
- Oldstyle mro is a really simplistic way of look up mro:
- https://stackoverflow.com/questions/54867/what-is-the-difference-between-old-style-and-new-style-classes-in-python
- """
- yield klass
- for base in klass.__bases__:
- for yield_from in oldstyle_mro(base):
- yield yield_from
-
- return oldstyle_mro(klass)
-
- return _static_getmro_newstyle(klass)
-
-
-def _safe_hasattr(obj, name):
- return _check_class(_get_type(obj), name) is not _sentinel
-
-
-def _safe_is_data_descriptor(obj):
- return _safe_hasattr(obj, '__set__') or _safe_hasattr(obj, '__delete__')
-
-
-def getattr_static(obj, attr, default=_sentinel):
- """Retrieve attributes without triggering dynamic lookup via the
- descriptor protocol, __getattr__ or __getattribute__.
-
- Note: this function may not be able to retrieve all attributes
- that getattr can fetch (like dynamically created attributes)
- and may find attributes that getattr can't (like descriptors
- that raise AttributeError). It can also return descriptor objects
- instead of instance members in some cases. See the
- documentation for details.
-
- Returns a tuple `(attr, is_get_descriptor)`. is_get_descripter means that
- the attribute is a descriptor that has a `__get__` attribute.
- """
- instance_result = _sentinel
- if not _is_type(obj):
- klass = _get_type(obj)
- dict_attr = _shadowed_dict(klass)
- if (dict_attr is _sentinel or type(dict_attr) is types.MemberDescriptorType):
- instance_result = _check_instance(obj, attr)
- else:
- klass = obj
-
- klass_result = _check_class(klass, attr)
-
- if instance_result is not _sentinel and klass_result is not _sentinel:
- if _safe_hasattr(klass_result, '__get__') \
- and _safe_is_data_descriptor(klass_result):
- # A get/set descriptor has priority over everything.
- return klass_result, True
-
- if instance_result is not _sentinel:
- return instance_result, False
- if klass_result is not _sentinel:
- return klass_result, _safe_hasattr(klass_result, '__get__')
-
- if obj is klass:
- # for types we check the metaclass too
- for entry in _static_getmro(type(klass)):
- if _shadowed_dict(type(entry)) is _sentinel:
- try:
- return entry.__dict__[attr], False
- except KeyError:
- pass
- if default is not _sentinel:
- return default, False
- raise AttributeError(attr)
diff --git a/contrib/python/jedi/jedi/evaluate/compiled/mixed.py b/contrib/python/jedi/jedi/evaluate/compiled/mixed.py
deleted file mode 100644
index f2cb21528c..0000000000
--- a/contrib/python/jedi/jedi/evaluate/compiled/mixed.py
+++ /dev/null
@@ -1,238 +0,0 @@
-"""
-Used only for REPL Completion.
-"""
-
-import inspect
-import os
-
-from jedi.parser_utils import get_cached_code_lines
-
-from jedi import settings
-from jedi.evaluate import compiled
-from jedi.cache import underscore_memoization
-from jedi.evaluate import imports
-from jedi.evaluate.base_context import Context, ContextSet
-from jedi.evaluate.context import ModuleContext
-from jedi.evaluate.cache import evaluator_function_cache
-from jedi.evaluate.compiled.getattr_static import getattr_static
-from jedi.evaluate.compiled.access import compiled_objects_cache
-from jedi.evaluate.compiled.context import create_cached_compiled_object
-
-
-class MixedObject(object):
- """
- A ``MixedObject`` is used in two ways:
-
- 1. It uses the default logic of ``parser.python.tree`` objects,
- 2. except for getattr calls. The names dicts are generated in a fashion
- like ``CompiledObject``.
-
- This combined logic makes it possible to provide more powerful REPL
- completion. It allows side effects that are not noticable with the default
- parser structure to still be completeable.
-
- The biggest difference from CompiledObject to MixedObject is that we are
- generally dealing with Python code and not with C code. This will generate
- fewer special cases, because we in Python you don't have the same freedoms
- to modify the runtime.
- """
- def __init__(self, evaluator, parent_context, compiled_object, tree_context):
- self.evaluator = evaluator
- self.parent_context = parent_context
- self.compiled_object = compiled_object
- self._context = tree_context
- self.access_handle = compiled_object.access_handle
-
- # We have to overwrite everything that has to do with trailers, name
- # lookups and filters to make it possible to route name lookups towards
- # compiled objects and the rest towards tree node contexts.
- def py__getattribute__(*args, **kwargs):
- return Context.py__getattribute__(*args, **kwargs)
-
- def get_filters(self, *args, **kwargs):
- yield MixedObjectFilter(self.evaluator, self)
-
- def __repr__(self):
- return '<%s: %s>' % (type(self).__name__, self.access_handle.get_repr())
-
- def __getattr__(self, name):
- return getattr(self._context, name)
-
-
-class MixedName(compiled.CompiledName):
- """
- The ``CompiledName._compiled_object`` is our MixedObject.
- """
- @property
- def start_pos(self):
- contexts = list(self.infer())
- if not contexts:
- # This means a start_pos that doesn't exist (compiled objects).
- return 0, 0
- return contexts[0].name.start_pos
-
- @start_pos.setter
- def start_pos(self, value):
- # Ignore the __init__'s start_pos setter call.
- pass
-
- @underscore_memoization
- def infer(self):
- access_handle = self.parent_context.access_handle
- # TODO use logic from compiled.CompiledObjectFilter
- access_handle = access_handle.getattr(self.string_name, default=None)
- return ContextSet(
- _create(self._evaluator, access_handle, parent_context=self.parent_context)
- )
-
- @property
- def api_type(self):
- return next(iter(self.infer())).api_type
-
-
-class MixedObjectFilter(compiled.CompiledObjectFilter):
- name_class = MixedName
-
- def __init__(self, evaluator, mixed_object, is_instance=False):
- super(MixedObjectFilter, self).__init__(
- evaluator, mixed_object, is_instance)
- self._mixed_object = mixed_object
-
- #def _create(self, name):
- #return MixedName(self._evaluator, self._compiled_object, name)
-
-
-@evaluator_function_cache()
-def _load_module(evaluator, path):
- module_node = evaluator.grammar.parse(
- path=path,
- cache=True,
- diff_cache=settings.fast_parser,
- cache_path=settings.cache_directory
- ).get_root_node()
- # python_module = inspect.getmodule(python_object)
- # TODO we should actually make something like this possible.
- #evaluator.modules[python_module.__name__] = module_node
- return module_node
-
-
-def _get_object_to_check(python_object):
- """Check if inspect.getfile has a chance to find the source."""
- if (inspect.ismodule(python_object) or
- inspect.isclass(python_object) or
- inspect.ismethod(python_object) or
- inspect.isfunction(python_object) or
- inspect.istraceback(python_object) or
- inspect.isframe(python_object) or
- inspect.iscode(python_object)):
- return python_object
-
- try:
- return python_object.__class__
- except AttributeError:
- raise TypeError # Prevents computation of `repr` within inspect.
-
-
-def _find_syntax_node_name(evaluator, access_handle):
- # TODO accessing this is bad, but it probably doesn't matter that much,
- # because we're working with interpreteters only here.
- python_object = access_handle.access._obj
- try:
- python_object = _get_object_to_check(python_object)
- path = inspect.getsourcefile(python_object)
- except TypeError:
- # The type might not be known (e.g. class_with_dict.__weakref__)
- return None
- if path is None or not os.path.exists(path):
- # The path might not exist or be e.g. <stdin>.
- return None
-
- module_node = _load_module(evaluator, path)
-
- if inspect.ismodule(python_object):
- # We don't need to check names for modules, because there's not really
- # a way to write a module in a module in Python (and also __name__ can
- # be something like ``email.utils``).
- code_lines = get_cached_code_lines(evaluator.grammar, path)
- return module_node, module_node, path, code_lines
-
- try:
- name_str = python_object.__name__
- except AttributeError:
- # Stuff like python_function.__code__.
- return None
-
- if name_str == '<lambda>':
- return None # It's too hard to find lambdas.
-
- # Doesn't always work (e.g. os.stat_result)
- names = module_node.get_used_names().get(name_str, [])
- names = [n for n in names if n.is_definition()]
- if not names:
- return None
-
- try:
- code = python_object.__code__
- # By using the line number of a code object we make the lookup in a
- # file pretty easy. There's still a possibility of people defining
- # stuff like ``a = 3; foo(a); a = 4`` on the same line, but if people
- # do so we just don't care.
- line_nr = code.co_firstlineno
- except AttributeError:
- pass
- else:
- line_names = [name for name in names if name.start_pos[0] == line_nr]
- # There's a chance that the object is not available anymore, because
- # the code has changed in the background.
- if line_names:
- names = line_names
-
- code_lines = get_cached_code_lines(evaluator.grammar, path)
- # It's really hard to actually get the right definition, here as a last
- # resort we just return the last one. This chance might lead to odd
- # completions at some points but will lead to mostly correct type
- # inference, because people tend to define a public name in a module only
- # once.
- return module_node, names[-1].parent, path, code_lines
-
-
-@compiled_objects_cache('mixed_cache')
-def _create(evaluator, access_handle, parent_context, *args):
- compiled_object = create_cached_compiled_object(
- evaluator, access_handle, parent_context=parent_context.compiled_object)
-
- result = _find_syntax_node_name(evaluator, access_handle)
- if result is None:
- return compiled_object
-
- module_node, tree_node, path, code_lines = result
-
- if parent_context.tree_node.get_root_node() == module_node:
- module_context = parent_context.get_root_context()
- else:
- module_context = ModuleContext(
- evaluator, module_node,
- path=path,
- code_lines=code_lines,
- )
- # TODO this __name__ is probably wrong.
- name = compiled_object.get_root_context().py__name__()
- if name is not None:
- imports.add_module_to_cache(evaluator, name, module_context)
-
- tree_context = module_context.create_context(
- tree_node,
- node_is_context=True,
- node_is_object=True
- )
- if tree_node.type == 'classdef':
- if not access_handle.is_class():
- # Is an instance, not a class.
- tree_context, = tree_context.execute_evaluated()
-
- return MixedObject(
- evaluator,
- parent_context,
- compiled_object,
- tree_context=tree_context
- )
diff --git a/contrib/python/jedi/jedi/evaluate/compiled/subprocess/__init__.py b/contrib/python/jedi/jedi/evaluate/compiled/subprocess/__init__.py
deleted file mode 100644
index cf21b51b53..0000000000
--- a/contrib/python/jedi/jedi/evaluate/compiled/subprocess/__init__.py
+++ /dev/null
@@ -1,397 +0,0 @@
-"""
-Makes it possible to do the compiled analysis in a subprocess. This has two
-goals:
-
-1. Making it safer - Segfaults and RuntimeErrors as well as stdout/stderr can
- be ignored and dealt with.
-2. Make it possible to handle different Python versions as well as virtualenvs.
-"""
-
-import os
-import sys
-import subprocess
-import socket
-import errno
-import weakref
-import traceback
-from functools import partial
-from threading import Thread
-try:
- from queue import Queue, Empty
-except ImportError:
- from Queue import Queue, Empty # python 2.7
-
-from jedi._compatibility import queue, is_py3, force_unicode, \
- pickle_dump, pickle_load, GeneralizedPopen, print_to_stderr
-from jedi import debug
-from jedi.cache import memoize_method
-from jedi.evaluate.compiled.subprocess import functions
-from jedi.evaluate.compiled.access import DirectObjectAccess, AccessPath, \
- SignatureParam
-from jedi.api.exceptions import InternalError
-
-
-_MAIN_PATH = os.path.join(os.path.dirname(__file__), '__main__.py')
-
-
-def _enqueue_output(out, queue):
- for line in iter(out.readline, b''):
- queue.put(line)
- out.close()
-
-
-def _add_stderr_to_debug(stderr_queue):
- while True:
- # Try to do some error reporting from the subprocess and print its
- # stderr contents.
- try:
- line = stderr_queue.get_nowait()
- line = line.decode('utf-8', 'replace')
- debug.warning('stderr output: %s' % line.rstrip('\n'))
- except Empty:
- break
-
-
-def _get_function(name):
- return getattr(functions, name)
-
-
-class _EvaluatorProcess(object):
- def __init__(self, evaluator):
- self._evaluator_weakref = weakref.ref(evaluator)
- self._evaluator_id = id(evaluator)
- self._handles = {}
-
- def get_or_create_access_handle(self, obj):
- id_ = id(obj)
- try:
- return self.get_access_handle(id_)
- except KeyError:
- access = DirectObjectAccess(self._evaluator_weakref(), obj)
- handle = AccessHandle(self, access, id_)
- self.set_access_handle(handle)
- return handle
-
- def get_access_handle(self, id_):
- return self._handles[id_]
-
- def set_access_handle(self, handle):
- self._handles[handle.id] = handle
-
-
-class EvaluatorSameProcess(_EvaluatorProcess):
- """
- Basically just an easy access to functions.py. It has the same API
- as EvaluatorSubprocess and does the same thing without using a subprocess.
- This is necessary for the Interpreter process.
- """
- def __getattr__(self, name):
- return partial(_get_function(name), self._evaluator_weakref())
-
-
-class EvaluatorSubprocess(_EvaluatorProcess):
- def __init__(self, evaluator, compiled_subprocess):
- super(EvaluatorSubprocess, self).__init__(evaluator)
- self._used = False
- self._compiled_subprocess = compiled_subprocess
-
- def __getattr__(self, name):
- func = _get_function(name)
-
- def wrapper(*args, **kwargs):
- self._used = True
-
- result = self._compiled_subprocess.run(
- self._evaluator_weakref(),
- func,
- args=args,
- kwargs=kwargs,
- )
- # IMO it should be possible to create a hook in pickle.load to
- # mess with the loaded objects. However it's extremely complicated
- # to work around this so just do it with this call. ~ dave
- return self._convert_access_handles(result)
-
- return wrapper
-
- def _convert_access_handles(self, obj):
- if isinstance(obj, SignatureParam):
- return SignatureParam(*self._convert_access_handles(tuple(obj)))
- elif isinstance(obj, tuple):
- return tuple(self._convert_access_handles(o) for o in obj)
- elif isinstance(obj, list):
- return [self._convert_access_handles(o) for o in obj]
- elif isinstance(obj, AccessHandle):
- try:
- # Rewrite the access handle to one we're already having.
- obj = self.get_access_handle(obj.id)
- except KeyError:
- obj.add_subprocess(self)
- self.set_access_handle(obj)
- elif isinstance(obj, AccessPath):
- return AccessPath(self._convert_access_handles(obj.accesses))
- return obj
-
- def __del__(self):
- if self._used and not self._compiled_subprocess.is_crashed:
- self._compiled_subprocess.delete_evaluator(self._evaluator_id)
-
-
-class CompiledSubprocess(object):
- is_crashed = False
- # Start with 2, gets set after _get_info.
- _pickle_protocol = 2
-
- def __init__(self, executable):
- self._executable = executable
- self._evaluator_deletion_queue = queue.deque()
-
- def __repr__(self):
- pid = os.getpid()
- return '<%s _executable=%r, _pickle_protocol=%r, is_crashed=%r, pid=%r>' % (
- self.__class__.__name__,
- self._executable,
- self._pickle_protocol,
- self.is_crashed,
- pid,
- )
-
- @property
- @memoize_method
- def _process(self):
- debug.dbg('Start environment subprocess %s', self._executable)
- parso_path = sys.modules['parso'].__file__
- args = (
- self._executable,
- _MAIN_PATH,
- os.path.dirname(os.path.dirname(parso_path)),
- '.'.join(str(x) for x in sys.version_info[:3]),
- )
- process = GeneralizedPopen(
- args,
- stdin=subprocess.PIPE,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE,
- # Use system default buffering on Python 2 to improve performance
- # (this is already the case on Python 3).
- bufsize=-1
- )
- self._stderr_queue = Queue()
- self._stderr_thread = t = Thread(
- target=_enqueue_output,
- args=(process.stderr, self._stderr_queue)
- )
- t.daemon = True
- t.start()
- return process
-
- def run(self, evaluator, function, args=(), kwargs={}):
- # Delete old evaluators.
- while True:
- try:
- evaluator_id = self._evaluator_deletion_queue.pop()
- except IndexError:
- break
- else:
- self._send(evaluator_id, None)
-
- assert callable(function)
- return self._send(id(evaluator), function, args, kwargs)
-
- def get_sys_path(self):
- return self._send(None, functions.get_sys_path, (), {})
-
- def _kill(self):
- self.is_crashed = True
- try:
- self._process.kill()
- self._process.wait()
- except (AttributeError, TypeError):
- # If the Python process is terminating, it will remove some modules
- # earlier than others and in general it's unclear how to deal with
- # that so we just ignore the exceptions here.
- pass
-
- def __del__(self):
- if not self.is_crashed:
- self._kill()
-
- def _send(self, evaluator_id, function, args=(), kwargs={}):
- if self.is_crashed:
- raise InternalError("The subprocess %s has crashed." % self._executable)
-
- if not is_py3:
- # Python 2 compatibility
- kwargs = {force_unicode(key): value for key, value in kwargs.items()}
-
- data = evaluator_id, function, args, kwargs
- try:
- pickle_dump(data, self._process.stdin, self._pickle_protocol)
- except (socket.error, IOError) as e:
- # Once Python2 will be removed we can just use `BrokenPipeError`.
- # Also, somehow in windows it returns EINVAL instead of EPIPE if
- # the subprocess dies.
- if e.errno not in (errno.EPIPE, errno.EINVAL):
- # Not a broken pipe
- raise
- self._kill()
- raise InternalError("The subprocess %s was killed. Maybe out of memory?"
- % self._executable)
-
- try:
- is_exception, traceback, result = pickle_load(self._process.stdout)
- except EOFError as eof_error:
- try:
- stderr = self._process.stderr.read().decode('utf-8', 'replace')
- except Exception as exc:
- stderr = '<empty/not available (%r)>' % exc
- self._kill()
- _add_stderr_to_debug(self._stderr_queue)
- raise InternalError(
- "The subprocess %s has crashed (%r, stderr=%s)." % (
- self._executable,
- eof_error,
- stderr,
- ))
-
- _add_stderr_to_debug(self._stderr_queue)
-
- if is_exception:
- # Replace the attribute error message with a the traceback. It's
- # way more informative.
- result.args = (traceback,)
- raise result
- return result
-
- def delete_evaluator(self, evaluator_id):
- """
- Currently we are not deleting evalutors instantly. They only get
- deleted once the subprocess is used again. It would probably a better
- solution to move all of this into a thread. However, the memory usage
- of a single evaluator shouldn't be that high.
- """
- # With an argument - the evaluator gets deleted.
- self._evaluator_deletion_queue.append(evaluator_id)
-
-
-class Listener(object):
- def __init__(self, pickle_protocol):
- self._evaluators = {}
- # TODO refactor so we don't need to process anymore just handle
- # controlling.
- self._process = _EvaluatorProcess(Listener)
- self._pickle_protocol = pickle_protocol
-
- def _get_evaluator(self, function, evaluator_id):
- from jedi.evaluate import Evaluator
-
- try:
- evaluator = self._evaluators[evaluator_id]
- except KeyError:
- from jedi.api.environment import InterpreterEnvironment
- evaluator = Evaluator(
- # The project is not actually needed. Nothing should need to
- # access it.
- project=None,
- environment=InterpreterEnvironment()
- )
- self._evaluators[evaluator_id] = evaluator
- return evaluator
-
- def _run(self, evaluator_id, function, args, kwargs):
- if evaluator_id is None:
- return function(*args, **kwargs)
- elif function is None:
- del self._evaluators[evaluator_id]
- else:
- evaluator = self._get_evaluator(function, evaluator_id)
-
- # Exchange all handles
- args = list(args)
- for i, arg in enumerate(args):
- if isinstance(arg, AccessHandle):
- args[i] = evaluator.compiled_subprocess.get_access_handle(arg.id)
- for key, value in kwargs.items():
- if isinstance(value, AccessHandle):
- kwargs[key] = evaluator.compiled_subprocess.get_access_handle(value.id)
-
- return function(evaluator, *args, **kwargs)
-
- def listen(self):
- stdout = sys.stdout
- # Mute stdout. Nobody should actually be able to write to it,
- # because stdout is used for IPC.
- sys.stdout = open(os.devnull, 'w')
- stdin = sys.stdin
- if sys.version_info[0] > 2:
- stdout = stdout.buffer
- stdin = stdin.buffer
- # Python 2 opens streams in text mode on Windows. Set stdout and stdin
- # to binary mode.
- elif sys.platform == 'win32':
- import msvcrt
- msvcrt.setmode(stdout.fileno(), os.O_BINARY)
- msvcrt.setmode(stdin.fileno(), os.O_BINARY)
-
- while True:
- try:
- payload = pickle_load(stdin)
- except EOFError:
- # It looks like the parent process closed.
- # Don't make a big fuss here and just exit.
- exit(0)
- try:
- result = False, None, self._run(*payload)
- except Exception as e:
- result = True, traceback.format_exc(), e
-
- pickle_dump(result, stdout, self._pickle_protocol)
-
-
-class AccessHandle(object):
- def __init__(self, subprocess, access, id_):
- self.access = access
- self._subprocess = subprocess
- self.id = id_
-
- def add_subprocess(self, subprocess):
- self._subprocess = subprocess
-
- def __repr__(self):
- try:
- detail = self.access
- except AttributeError:
- detail = '#' + str(self.id)
- return '<%s of %s>' % (self.__class__.__name__, detail)
-
- def __getstate__(self):
- return self.id
-
- def __setstate__(self, state):
- self.id = state
-
- def __getattr__(self, name):
- if name in ('id', 'access') or name.startswith('_'):
- raise AttributeError("Something went wrong with unpickling")
-
- #if not is_py3: print >> sys.stderr, name
- #print('getattr', name, file=sys.stderr)
- return partial(self._workaround, force_unicode(name))
-
- def _workaround(self, name, *args, **kwargs):
- """
- TODO Currently we're passing slice objects around. This should not
- happen. They are also the only unhashable objects that we're passing
- around.
- """
- if args and isinstance(args[0], slice):
- return self._subprocess.get_compiled_method_return(self.id, name, *args, **kwargs)
- return self._cached_results(name, *args, **kwargs)
-
- @memoize_method
- def _cached_results(self, name, *args, **kwargs):
- #if type(self._subprocess) == EvaluatorSubprocess:
- #print(name, args, kwargs,
- #self._subprocess.get_compiled_method_return(self.id, name, *args, **kwargs)
- #)
- return self._subprocess.get_compiled_method_return(self.id, name, *args, **kwargs)
diff --git a/contrib/python/jedi/jedi/evaluate/compiled/subprocess/__main__.py b/contrib/python/jedi/jedi/evaluate/compiled/subprocess/__main__.py
deleted file mode 100644
index 4be28204dc..0000000000
--- a/contrib/python/jedi/jedi/evaluate/compiled/subprocess/__main__.py
+++ /dev/null
@@ -1,55 +0,0 @@
-import os
-import sys
-
-
-def _get_paths():
- # Get the path to jedi.
- _d = os.path.dirname
- _jedi_path = _d(_d(_d(_d(_d(__file__)))))
- _parso_path = sys.argv[1]
- # The paths are the directory that jedi and parso lie in.
- return {'jedi': _jedi_path, 'parso': _parso_path}
-
-
-# Remove the first entry, because it's simply a directory entry that equals
-# this directory.
-del sys.path[0]
-
-if sys.version_info > (3, 4):
- from importlib.machinery import PathFinder
-
- class _ExactImporter(object):
- def __init__(self, path_dct):
- self._path_dct = path_dct
-
- def find_module(self, fullname, path=None):
- if path is None and fullname in self._path_dct:
- p = self._path_dct[fullname]
- loader = PathFinder.find_module(fullname, path=[p])
- return loader
- return None
-
- # Try to import jedi/parso.
- sys.meta_path.insert(0, _ExactImporter(_get_paths()))
- from jedi.evaluate.compiled import subprocess # NOQA
- sys.meta_path.pop(0)
-else:
- import imp
-
- def load(name):
- paths = list(_get_paths().values())
- fp, pathname, description = imp.find_module(name, paths)
- return imp.load_module(name, fp, pathname, description)
-
- load('parso')
- load('jedi')
- from jedi.evaluate.compiled import subprocess # NOQA
-
-from jedi._compatibility import highest_pickle_protocol # noqa: E402
-
-
-# Retrieve the pickle protocol.
-host_sys_version = [int(x) for x in sys.argv[2].split('.')]
-pickle_protocol = highest_pickle_protocol([sys.version_info, host_sys_version])
-# And finally start the client.
-subprocess.Listener(pickle_protocol=pickle_protocol).listen()
diff --git a/contrib/python/jedi/jedi/evaluate/compiled/subprocess/functions.py b/contrib/python/jedi/jedi/evaluate/compiled/subprocess/functions.py
deleted file mode 100644
index c0fc6d137a..0000000000
--- a/contrib/python/jedi/jedi/evaluate/compiled/subprocess/functions.py
+++ /dev/null
@@ -1,113 +0,0 @@
-import sys
-import os
-
-from jedi._compatibility import find_module, cast_path, force_unicode, \
- iter_modules, all_suffixes, print_to_stderr
-from jedi.evaluate.compiled import access
-from jedi import parser_utils
-
-
-def get_sys_path():
- return list(map(cast_path, sys.path))
-
-
-def load_module(evaluator, **kwargs):
- return access.load_module(evaluator, **kwargs)
-
-
-def get_compiled_method_return(evaluator, id, attribute, *args, **kwargs):
- handle = evaluator.compiled_subprocess.get_access_handle(id)
- return getattr(handle.access, attribute)(*args, **kwargs)
-
-
-def get_special_object(evaluator, identifier):
- return access.get_special_object(evaluator, identifier)
-
-
-def create_simple_object(evaluator, obj):
- return access.create_access_path(evaluator, obj)
-
-
-def get_module_info(evaluator, sys_path=None, full_name=None, **kwargs):
- if sys_path is not None:
- sys.path, temp = sys_path, sys.path
- try:
- module_file, module_path, is_pkg = find_module(full_name=full_name, **kwargs)
- except ImportError:
- return None, None, None
- finally:
- if sys_path is not None:
- sys.path = temp
-
- code = None
- if is_pkg:
- # In this case, we don't have a file yet. Search for the
- # __init__ file.
- if module_path.endswith(('.zip', '.egg')):
- code = module_file.loader.get_source(full_name)
- else:
- module_path = _get_init_path(module_path)
- elif module_file:
- if module_path.endswith(('.zip', '.egg')):
- # Unfortunately we are reading unicode here already, not byes.
- # It seems however hard to get bytes, because the zip importer
- # logic just unpacks the zip file and returns a file descriptor
- # that we cannot as easily access. Therefore we just read it as
- # a string.
- code = module_file.read()
- else:
- # Read the code with a binary file, because the binary file
- # might not be proper unicode. This is handled by the parser
- # wrapper.
- with open(module_path, 'rb') as f:
- code = f.read()
-
- module_file.close()
-
- return code, cast_path(module_path), is_pkg
-
-
-def list_module_names(evaluator, search_path):
- return [
- force_unicode(name)
- for module_loader, name, is_pkg in iter_modules(search_path)
- ]
-
-
-def get_builtin_module_names(evaluator):
- return list(map(force_unicode, sys.builtin_module_names))
-
-
-def _test_raise_error(evaluator, exception_type):
- """
- Raise an error to simulate certain problems for unit tests.
- """
- raise exception_type
-
-
-def _test_print(evaluator, stderr=None, stdout=None):
- """
- Force some prints in the subprocesses. This exists for unit tests.
- """
- if stderr is not None:
- print_to_stderr(stderr)
- sys.stderr.flush()
- if stdout is not None:
- print(stdout)
- sys.stdout.flush()
-
-
-def _get_init_path(directory_path):
- """
- The __init__ file can be searched in a directory. If found return it, else
- None.
- """
- for suffix in all_suffixes():
- path = os.path.join(directory_path, '__init__' + suffix)
- if os.path.exists(path):
- return path
- return None
-
-
-def safe_literal_eval(evaluator, value):
- return parser_utils.safe_literal_eval(value)
diff --git a/contrib/python/jedi/jedi/evaluate/context/__init__.py b/contrib/python/jedi/jedi/evaluate/context/__init__.py
deleted file mode 100644
index 4e7ce4d6c8..0000000000
--- a/contrib/python/jedi/jedi/evaluate/context/__init__.py
+++ /dev/null
@@ -1,5 +0,0 @@
-from jedi.evaluate.context.module import ModuleContext
-from jedi.evaluate.context.klass import ClassContext
-from jedi.evaluate.context.function import FunctionContext, FunctionExecutionContext
-from jedi.evaluate.context.instance import AnonymousInstance, BoundMethod, \
- CompiledInstance, AbstractInstanceContext, TreeInstance
diff --git a/contrib/python/jedi/jedi/evaluate/context/asynchronous.py b/contrib/python/jedi/jedi/evaluate/context/asynchronous.py
deleted file mode 100644
index 51e59a4826..0000000000
--- a/contrib/python/jedi/jedi/evaluate/context/asynchronous.py
+++ /dev/null
@@ -1,38 +0,0 @@
-from jedi.evaluate.filters import publish_method, BuiltinOverwrite
-from jedi.evaluate.base_context import ContextSet
-
-
-class AsyncBase(BuiltinOverwrite):
- def __init__(self, evaluator, func_execution_context):
- super(AsyncBase, self).__init__(evaluator)
- self.func_execution_context = func_execution_context
-
- @property
- def name(self):
- return self.get_object().name
-
- def __repr__(self):
- return "<%s of %s>" % (type(self).__name__, self.func_execution_context)
-
-
-class Coroutine(AsyncBase):
- special_object_identifier = u'COROUTINE'
-
- @publish_method('__await__')
- def _await(self):
- return ContextSet(CoroutineWrapper(self.evaluator, self.func_execution_context))
-
-
-class CoroutineWrapper(AsyncBase):
- special_object_identifier = u'COROUTINE_WRAPPER'
-
- def py__stop_iteration_returns(self):
- return self.func_execution_context.get_return_values()
-
-
-class AsyncGenerator(AsyncBase):
- """Handling of `yield` functions."""
- special_object_identifier = u'ASYNC_GENERATOR'
-
- def py__aiter__(self):
- return self.func_execution_context.get_yield_lazy_contexts(is_async=True)
diff --git a/contrib/python/jedi/jedi/evaluate/context/function.py b/contrib/python/jedi/jedi/evaluate/context/function.py
deleted file mode 100644
index cfd10ab44c..0000000000
--- a/contrib/python/jedi/jedi/evaluate/context/function.py
+++ /dev/null
@@ -1,253 +0,0 @@
-from parso.python import tree
-
-from jedi._compatibility import use_metaclass
-from jedi import debug
-from jedi.evaluate.cache import evaluator_method_cache, CachedMetaClass
-from jedi.evaluate import compiled
-from jedi.evaluate import recursion
-from jedi.evaluate import docstrings
-from jedi.evaluate import pep0484
-from jedi.evaluate import flow_analysis
-from jedi.evaluate import helpers
-from jedi.evaluate.arguments import AnonymousArguments
-from jedi.evaluate.filters import ParserTreeFilter, FunctionExecutionFilter, \
- ContextName, AbstractNameDefinition, ParamName
-from jedi.evaluate.base_context import ContextualizedNode, NO_CONTEXTS, \
- ContextSet, TreeContext
-from jedi.evaluate.lazy_context import LazyKnownContexts, LazyKnownContext, \
- LazyTreeContext
-from jedi.evaluate.context import iterable
-from jedi.evaluate.context import asynchronous
-from jedi import parser_utils
-from jedi.evaluate.parser_cache import get_yield_exprs
-
-
-class LambdaName(AbstractNameDefinition):
- string_name = '<lambda>'
- api_type = u'function'
-
- def __init__(self, lambda_context):
- self._lambda_context = lambda_context
- self.parent_context = lambda_context.parent_context
-
- @property
- def start_pos(self):
- return self._lambda_context.tree_node.start_pos
-
- def infer(self):
- return ContextSet(self._lambda_context)
-
-
-class AbstractFunction(TreeContext):
- api_type = u'function'
-
- def get_filters(self, search_global, until_position=None, origin_scope=None):
- if search_global:
- yield ParserTreeFilter(
- self.evaluator,
- context=self,
- until_position=until_position,
- origin_scope=origin_scope
- )
- else:
- scope = self.py__class__()
- for filter in scope.get_filters(search_global=False, origin_scope=origin_scope):
- yield filter
-
- def get_param_names(self):
- function_execution = self.get_function_execution()
- return [ParamName(function_execution, param.name)
- for param in self.tree_node.get_params()]
-
- @property
- def name(self):
- if self.tree_node.type == 'lambdef':
- return LambdaName(self)
- return ContextName(self, self.tree_node.name)
-
- def get_function_execution(self, arguments=None):
- raise NotImplementedError
-
- def py__call__(self, arguments):
- function_execution = self.get_function_execution(arguments)
- return self.infer_function_execution(function_execution)
-
- def infer_function_execution(self, function_execution):
- """
- Created to be used by inheritance.
- """
- is_coroutine = self.tree_node.parent.type == 'async_stmt'
- is_generator = bool(get_yield_exprs(self.evaluator, self.tree_node))
-
- if is_coroutine:
- if is_generator:
- if self.evaluator.environment.version_info < (3, 6):
- return NO_CONTEXTS
- return ContextSet(asynchronous.AsyncGenerator(self.evaluator, function_execution))
- else:
- if self.evaluator.environment.version_info < (3, 5):
- return NO_CONTEXTS
- return ContextSet(asynchronous.Coroutine(self.evaluator, function_execution))
- else:
- if is_generator:
- return ContextSet(iterable.Generator(self.evaluator, function_execution))
- else:
- return function_execution.get_return_values()
-
- def py__name__(self):
- return self.name.string_name
-
-
-class FunctionContext(use_metaclass(CachedMetaClass, AbstractFunction)):
- """
- Needed because of decorators. Decorators are evaluated here.
- """
- @classmethod
- def from_context(cls, context, tree_node):
- from jedi.evaluate.context import AbstractInstanceContext
-
- while context.is_class() or isinstance(context, AbstractInstanceContext):
- context = context.parent_context
-
- return cls(context.evaluator, parent_context=context, tree_node=tree_node)
-
- def get_function_execution(self, arguments=None):
- if arguments is None:
- arguments = AnonymousArguments()
-
- return FunctionExecutionContext(self.evaluator, self.parent_context, self, arguments)
-
- def py__class__(self):
- return compiled.get_special_object(self.evaluator, u'FUNCTION_CLASS')
-
-
-class FunctionExecutionContext(TreeContext):
- """
- This class is used to evaluate functions and their returns.
-
- This is the most complicated class, because it contains the logic to
- transfer parameters. It is even more complicated, because there may be
- multiple calls to functions and recursion has to be avoided. But this is
- responsibility of the decorators.
- """
- function_execution_filter = FunctionExecutionFilter
-
- def __init__(self, evaluator, parent_context, function_context, var_args):
- super(FunctionExecutionContext, self).__init__(
- evaluator,
- parent_context,
- function_context.tree_node,
- )
- self.function_context = function_context
- self.var_args = var_args
-
- @evaluator_method_cache(default=NO_CONTEXTS)
- @recursion.execution_recursion_decorator()
- def get_return_values(self, check_yields=False):
- funcdef = self.tree_node
- if funcdef.type == 'lambdef':
- return self.eval_node(funcdef.children[-1])
-
- if check_yields:
- context_set = NO_CONTEXTS
- returns = get_yield_exprs(self.evaluator, funcdef)
- else:
- returns = funcdef.iter_return_stmts()
- context_set = docstrings.infer_return_types(self.function_context)
- context_set |= pep0484.infer_return_types(self.function_context)
-
- for r in returns:
- check = flow_analysis.reachability_check(self, funcdef, r)
- if check is flow_analysis.UNREACHABLE:
- debug.dbg('Return unreachable: %s', r)
- else:
- if check_yields:
- context_set |= ContextSet.from_sets(
- lazy_context.infer()
- for lazy_context in self._get_yield_lazy_context(r)
- )
- else:
- try:
- children = r.children
- except AttributeError:
- ctx = compiled.builtin_from_name(self.evaluator, u'None')
- context_set |= ContextSet(ctx)
- else:
- context_set |= self.eval_node(children[1])
- if check is flow_analysis.REACHABLE:
- debug.dbg('Return reachable: %s', r)
- break
- return context_set
-
- def _get_yield_lazy_context(self, yield_expr):
- if yield_expr.type == 'keyword':
- # `yield` just yields None.
- ctx = compiled.builtin_from_name(self.evaluator, u'None')
- yield LazyKnownContext(ctx)
- return
-
- node = yield_expr.children[1]
- if node.type == 'yield_arg': # It must be a yield from.
- cn = ContextualizedNode(self, node.children[1])
- for lazy_context in cn.infer().iterate(cn):
- yield lazy_context
- else:
- yield LazyTreeContext(self, node)
-
- @recursion.execution_recursion_decorator(default=iter([]))
- def get_yield_lazy_contexts(self, is_async=False):
- # TODO: if is_async, wrap yield statements in Awaitable/async_generator_asend
- for_parents = [(y, tree.search_ancestor(y, 'for_stmt', 'funcdef',
- 'while_stmt', 'if_stmt'))
- for y in get_yield_exprs(self.evaluator, self.tree_node)]
-
- # Calculate if the yields are placed within the same for loop.
- yields_order = []
- last_for_stmt = None
- for yield_, for_stmt in for_parents:
- # For really simple for loops we can predict the order. Otherwise
- # we just ignore it.
- parent = for_stmt.parent
- if parent.type == 'suite':
- parent = parent.parent
- if for_stmt.type == 'for_stmt' and parent == self.tree_node \
- and parser_utils.for_stmt_defines_one_name(for_stmt): # Simplicity for now.
- if for_stmt == last_for_stmt:
- yields_order[-1][1].append(yield_)
- else:
- yields_order.append((for_stmt, [yield_]))
- elif for_stmt == self.tree_node:
- yields_order.append((None, [yield_]))
- else:
- types = self.get_return_values(check_yields=True)
- if types:
- yield LazyKnownContexts(types)
- return
- last_for_stmt = for_stmt
-
- for for_stmt, yields in yields_order:
- if for_stmt is None:
- # No for_stmt, just normal yields.
- for yield_ in yields:
- for result in self._get_yield_lazy_context(yield_):
- yield result
- else:
- input_node = for_stmt.get_testlist()
- cn = ContextualizedNode(self, input_node)
- ordered = cn.infer().iterate(cn)
- ordered = list(ordered)
- for lazy_context in ordered:
- dct = {str(for_stmt.children[1].value): lazy_context.infer()}
- with helpers.predefine_names(self, for_stmt, dct):
- for yield_in_same_for_stmt in yields:
- for result in self._get_yield_lazy_context(yield_in_same_for_stmt):
- yield result
-
- def get_filters(self, search_global, until_position=None, origin_scope=None):
- yield self.function_execution_filter(self.evaluator, self,
- until_position=until_position,
- origin_scope=origin_scope)
-
- @evaluator_method_cache()
- def get_executed_params(self):
- return self.var_args.get_executed_params(self)
diff --git a/contrib/python/jedi/jedi/evaluate/context/instance.py b/contrib/python/jedi/jedi/evaluate/context/instance.py
deleted file mode 100644
index db94c91a63..0000000000
--- a/contrib/python/jedi/jedi/evaluate/context/instance.py
+++ /dev/null
@@ -1,483 +0,0 @@
-from abc import abstractproperty
-
-from jedi import debug
-from jedi import settings
-from jedi.evaluate import compiled
-from jedi.evaluate import filters
-from jedi.evaluate.base_context import Context, NO_CONTEXTS, ContextSet, \
- iterator_to_context_set
-from jedi.evaluate.lazy_context import LazyKnownContext, LazyKnownContexts
-from jedi.evaluate.cache import evaluator_method_cache
-from jedi.evaluate.arguments import AbstractArguments, AnonymousArguments
-from jedi.evaluate.context.function import FunctionExecutionContext, \
- FunctionContext, AbstractFunction
-from jedi.evaluate.context.klass import ClassContext, apply_py__get__, ClassFilter
-from jedi.evaluate.context import iterable
-from jedi.parser_utils import get_parent_scope
-
-
-class InstanceExecutedParam(object):
- def __init__(self, instance):
- self._instance = instance
-
- def infer(self):
- return ContextSet(self._instance)
-
-
-class AnonymousInstanceArguments(AnonymousArguments):
- def __init__(self, instance):
- self._instance = instance
-
- def get_executed_params(self, execution_context):
- from jedi.evaluate.dynamic import search_params
- self_param = InstanceExecutedParam(self._instance)
- tree_params = execution_context.tree_node.get_params()
- if len(tree_params) == 1:
- # If the only param is self, we don't need to try to find
- # executions of this function, we have all the params already.
- return [self_param]
- executed_params = list(search_params(
- execution_context.evaluator,
- execution_context,
- execution_context.tree_node
- ))
- executed_params[0] = self_param
- return executed_params
-
-
-class AbstractInstanceContext(Context):
- """
- This class is used to evaluate instances.
- """
- api_type = u'instance'
-
- def __init__(self, evaluator, parent_context, class_context, var_args):
- super(AbstractInstanceContext, self).__init__(evaluator, parent_context)
- # Generated instances are classes that are just generated by self
- # (No var_args) used.
- self.class_context = class_context
- self.var_args = var_args
-
- def is_class(self):
- return False
-
- @property
- def py__call__(self):
- names = self.get_function_slot_names(u'__call__')
- if not names:
- # Means the Instance is not callable.
- raise AttributeError
-
- def execute(arguments):
- return ContextSet.from_sets(name.infer().execute(arguments) for name in names)
-
- return execute
-
- def py__class__(self):
- return self.class_context
-
- def py__bool__(self):
- # Signalize that we don't know about the bool type.
- return None
-
- def get_function_slot_names(self, name):
- # Python classes don't look at the dictionary of the instance when
- # looking up `__call__`. This is something that has to do with Python's
- # internal slot system (note: not __slots__, but C slots).
- for filter in self.get_filters(include_self_names=False):
- names = filter.get(name)
- if names:
- return names
- return []
-
- def execute_function_slots(self, names, *evaluated_args):
- return ContextSet.from_sets(
- name.infer().execute_evaluated(*evaluated_args)
- for name in names
- )
-
- def py__get__(self, obj):
- # Arguments in __get__ descriptors are obj, class.
- # `method` is the new parent of the array, don't know if that's good.
- names = self.get_function_slot_names(u'__get__')
- if names:
- if isinstance(obj, AbstractInstanceContext):
- return self.execute_function_slots(names, obj, obj.class_context)
- else:
- none_obj = compiled.builtin_from_name(self.evaluator, u'None')
- return self.execute_function_slots(names, none_obj, obj)
- else:
- return ContextSet(self)
-
- def get_filters(self, search_global=None, until_position=None,
- origin_scope=None, include_self_names=True):
- if include_self_names:
- for cls in self.class_context.py__mro__():
- if not isinstance(cls, compiled.CompiledObject) \
- or cls.tree_node is not None:
- # In this case we're excluding compiled objects that are
- # not fake objects. It doesn't make sense for normal
- # compiled objects to search for self variables.
- yield SelfAttributeFilter(self.evaluator, self, cls, origin_scope)
-
- for cls in self.class_context.py__mro__():
- if isinstance(cls, compiled.CompiledObject):
- yield CompiledInstanceClassFilter(self.evaluator, self, cls)
- else:
- yield InstanceClassFilter(self.evaluator, self, cls, origin_scope)
-
- def py__getitem__(self, index):
- try:
- names = self.get_function_slot_names(u'__getitem__')
- except KeyError:
- debug.warning('No __getitem__, cannot access the array.')
- return NO_CONTEXTS
- else:
- index_obj = compiled.create_simple_object(self.evaluator, index)
- return self.execute_function_slots(names, index_obj)
-
- def py__iter__(self):
- iter_slot_names = self.get_function_slot_names(u'__iter__')
- if not iter_slot_names:
- debug.warning('No __iter__ on %s.' % self)
- return
-
- for generator in self.execute_function_slots(iter_slot_names):
- if isinstance(generator, AbstractInstanceContext):
- # `__next__` logic.
- if self.evaluator.environment.version_info.major == 2:
- name = u'next'
- else:
- name = u'__next__'
- iter_slot_names = generator.get_function_slot_names(name)
- if iter_slot_names:
- yield LazyKnownContexts(
- generator.execute_function_slots(iter_slot_names)
- )
- else:
- debug.warning('Instance has no __next__ function in %s.', generator)
- else:
- for lazy_context in generator.py__iter__():
- yield lazy_context
-
- @abstractproperty
- def name(self):
- pass
-
- def _create_init_execution(self, class_context, bound_method):
- return bound_method.get_function_execution(self.var_args)
-
- def create_init_executions(self):
- for name in self.get_function_slot_names(u'__init__'):
- if isinstance(name, LazyInstanceClassName):
- function = FunctionContext.from_context(
- self.parent_context,
- name.tree_name.parent
- )
- bound_method = BoundMethod(self, name.class_context, function)
- yield self._create_init_execution(name.class_context, bound_method)
-
- @evaluator_method_cache()
- def create_instance_context(self, class_context, node):
- if node.parent.type in ('funcdef', 'classdef'):
- node = node.parent
- scope = get_parent_scope(node)
- if scope == class_context.tree_node:
- return class_context
- else:
- parent_context = self.create_instance_context(class_context, scope)
- if scope.type == 'funcdef':
- func = FunctionContext.from_context(
- parent_context,
- scope,
- )
- bound_method = BoundMethod(self, class_context, func)
- if scope.name.value == '__init__' and parent_context == class_context:
- return self._create_init_execution(class_context, bound_method)
- else:
- return bound_method.get_function_execution()
- elif scope.type == 'classdef':
- class_context = ClassContext(self.evaluator, parent_context, scope)
- return class_context
- elif scope.type == 'comp_for':
- # Comprehensions currently don't have a special scope in Jedi.
- return self.create_instance_context(class_context, scope)
- else:
- raise NotImplementedError
- return class_context
-
- def __repr__(self):
- return "<%s of %s(%s)>" % (self.__class__.__name__, self.class_context,
- self.var_args)
-
-
-class CompiledInstance(AbstractInstanceContext):
- def __init__(self, evaluator, parent_context, class_context, var_args):
- self._original_var_args = var_args
-
- # I don't think that dynamic append lookups should happen here. That
- # sounds more like something that should go to py__iter__.
- if class_context.py__name__() in ['list', 'set'] \
- and parent_context.get_root_context() == evaluator.builtins_module:
- # compare the module path with the builtin name.
- if settings.dynamic_array_additions:
- var_args = iterable.get_dynamic_array_instance(self, var_args)
-
- super(CompiledInstance, self).__init__(evaluator, parent_context, class_context, var_args)
-
- @property
- def name(self):
- return compiled.CompiledContextName(self, self.class_context.name.string_name)
-
- def create_instance_context(self, class_context, node):
- if get_parent_scope(node).type == 'classdef':
- return class_context
- else:
- return super(CompiledInstance, self).create_instance_context(class_context, node)
-
- def get_first_non_keyword_argument_contexts(self):
- key, lazy_context = next(self._original_var_args.unpack(), ('', None))
- if key is not None:
- return NO_CONTEXTS
-
- return lazy_context.infer()
-
-
-class TreeInstance(AbstractInstanceContext):
- def __init__(self, evaluator, parent_context, class_context, var_args):
- super(TreeInstance, self).__init__(evaluator, parent_context,
- class_context, var_args)
- self.tree_node = class_context.tree_node
-
- @property
- def name(self):
- return filters.ContextName(self, self.class_context.name.tree_name)
-
-
-class AnonymousInstance(TreeInstance):
- def __init__(self, evaluator, parent_context, class_context):
- super(AnonymousInstance, self).__init__(
- evaluator,
- parent_context,
- class_context,
- var_args=AnonymousInstanceArguments(self),
- )
-
-
-class CompiledInstanceName(compiled.CompiledName):
-
- def __init__(self, evaluator, instance, klass, name):
- super(CompiledInstanceName, self).__init__(
- evaluator,
- klass.parent_context,
- name.string_name
- )
- self._instance = instance
- self._class = klass
- self._class_member_name = name
-
- @iterator_to_context_set
- def infer(self):
- for result_context in self._class_member_name.infer():
- is_function = result_context.api_type == 'function'
- if result_context.tree_node is not None and is_function:
- yield BoundMethod(self._instance, self._class, result_context)
- else:
- if is_function:
- yield CompiledBoundMethod(result_context)
- else:
- yield result_context
-
-
-class CompiledInstanceClassFilter(filters.AbstractFilter):
- name_class = CompiledInstanceName
-
- def __init__(self, evaluator, instance, klass):
- self._evaluator = evaluator
- self._instance = instance
- self._class = klass
- self._class_filter = next(klass.get_filters(is_instance=True))
-
- def get(self, name):
- return self._convert(self._class_filter.get(name))
-
- def values(self):
- return self._convert(self._class_filter.values())
-
- def _convert(self, names):
- return [
- CompiledInstanceName(self._evaluator, self._instance, self._class, n)
- for n in names
- ]
-
-
-class BoundMethod(AbstractFunction):
- def __init__(self, instance, klass, function):
- super(BoundMethod, self).__init__(
- function.evaluator,
- function.parent_context,
- function.tree_node,
- )
- self._instance = instance
- self._class = klass
- self._function = function
-
- def py__class__(self):
- return compiled.get_special_object(self.evaluator, u'BOUND_METHOD_CLASS')
-
- def get_function_execution(self, arguments=None):
- if arguments is None:
- arguments = AnonymousInstanceArguments(self._instance)
-
- arguments = InstanceArguments(self._instance, arguments)
-
- if isinstance(self._function, compiled.CompiledObject):
- # This is kind of weird, because it's coming from a compiled object
- # and we're not sure if we want that in the future.
- return FunctionExecutionContext(
- self.evaluator, self.parent_context, self, arguments
- )
-
- return self._function.get_function_execution(arguments)
-
- def __repr__(self):
- return '<%s: %s>' % (self.__class__.__name__, self._function)
-
-
-class CompiledBoundMethod(compiled.CompiledObject):
- def __init__(self, func):
- super(CompiledBoundMethod, self).__init__(
- func.evaluator, func.access_handle, func.parent_context, func.tree_node)
-
- def get_param_names(self):
- return list(super(CompiledBoundMethod, self).get_param_names())[1:]
-
-
-class SelfName(filters.TreeNameDefinition):
- """
- This name calculates the parent_context lazily.
- """
- def __init__(self, instance, class_context, tree_name):
- self._instance = instance
- self.class_context = class_context
- self.tree_name = tree_name
-
- @property
- def parent_context(self):
- return self._instance.create_instance_context(self.class_context, self.tree_name)
-
-
-class LazyInstanceClassName(object):
- def __init__(self, instance, class_context, class_member_name):
- self._instance = instance
- self.class_context = class_context
- self._class_member_name = class_member_name
-
- @iterator_to_context_set
- def infer(self):
- for result_context in self._class_member_name.infer():
- if isinstance(result_context, FunctionContext):
- # Classes are never used to resolve anything within the
- # functions. Only other functions and modules will resolve
- # those things.
- yield BoundMethod(self._instance, self.class_context, result_context)
- else:
- for c in apply_py__get__(result_context, self._instance):
- yield c
-
- def __getattr__(self, name):
- return getattr(self._class_member_name, name)
-
-
-class InstanceClassFilter(filters.AbstractFilter):
- """
- This filter is special in that it uses the class filter and wraps the
- resulting names in LazyINstanceClassName. The idea is that the class name
- filtering can be very flexible and always be reflected in instances.
- """
- def __init__(self, evaluator, context, class_context, origin_scope):
- self._instance = context
- self._class_context = class_context
- self._class_filter = next(class_context.get_filters(
- search_global=False,
- origin_scope=origin_scope,
- is_instance=True,
- ))
-
- def get(self, name):
- return self._convert(self._class_filter.get(name))
-
- def values(self):
- return self._convert(self._class_filter.values())
-
- def _convert(self, names):
- return [LazyInstanceClassName(self._instance, self._class_context, n) for n in names]
-
-
-class SelfAttributeFilter(ClassFilter):
- """
- This class basically filters all the use cases where `self.*` was assigned.
- """
- name_class = SelfName
-
- def __init__(self, evaluator, context, class_context, origin_scope):
- super(SelfAttributeFilter, self).__init__(
- evaluator=evaluator,
- context=context,
- node_context=class_context,
- origin_scope=origin_scope,
- is_instance=True,
- )
- self._class_context = class_context
-
- def _filter(self, names):
- names = self._filter_self_names(names)
- if isinstance(self._parser_scope, compiled.CompiledObject) and False:
- # This would be for builtin skeletons, which are not yet supported.
- return list(names)
- else:
- start, end = self._parser_scope.start_pos, self._parser_scope.end_pos
- return [n for n in names if start < n.start_pos < end]
-
- def _filter_self_names(self, names):
- for name in names:
- trailer = name.parent
- if trailer.type == 'trailer' \
- and len(trailer.children) == 2 \
- and trailer.children[0] == '.':
- if name.is_definition() and self._access_possible(name):
- yield name
-
- def _convert_names(self, names):
- return [self.name_class(self.context, self._class_context, name) for name in names]
-
- def _check_flows(self, names):
- return names
-
-
-class InstanceArguments(AbstractArguments):
- def __init__(self, instance, var_args):
- self.instance = instance
- self._var_args = var_args
-
- @property
- def argument_node(self):
- return self._var_args.argument_node
-
- @property
- def trailer(self):
- return self._var_args.trailer
-
- def unpack(self, func=None):
- yield None, LazyKnownContext(self.instance)
- for values in self._var_args.unpack(func):
- yield values
-
- def get_calling_nodes(self):
- return self._var_args.get_calling_nodes()
-
- def get_executed_params(self, execution_context):
- if isinstance(self._var_args, AnonymousInstanceArguments):
- return self._var_args.get_executed_params(execution_context)
-
- return super(InstanceArguments, self).get_executed_params(execution_context)
diff --git a/contrib/python/jedi/jedi/evaluate/context/iterable.py b/contrib/python/jedi/jedi/evaluate/context/iterable.py
deleted file mode 100644
index 9958de28e3..0000000000
--- a/contrib/python/jedi/jedi/evaluate/context/iterable.py
+++ /dev/null
@@ -1,732 +0,0 @@
-"""
-Contains all classes and functions to deal with lists, dicts, generators and
-iterators in general.
-
-Array modifications
-*******************
-
-If the content of an array (``set``/``list``) is requested somewhere, the
-current module will be checked for appearances of ``arr.append``,
-``arr.insert``, etc. If the ``arr`` name points to an actual array, the
-content will be added
-
-This can be really cpu intensive, as you can imagine. Because |jedi| has to
-follow **every** ``append`` and check wheter it's the right array. However this
-works pretty good, because in *slow* cases, the recursion detector and other
-settings will stop this process.
-
-It is important to note that:
-
-1. Array modfications work only in the current module.
-2. Jedi only checks Array additions; ``list.pop``, etc are ignored.
-"""
-from jedi import debug
-from jedi import settings
-from jedi._compatibility import force_unicode, is_py3
-from jedi.cache import memoize_method
-from jedi.evaluate import compiled
-from jedi.evaluate import analysis
-from jedi.evaluate import recursion
-from jedi.evaluate.lazy_context import LazyKnownContext, LazyKnownContexts, \
- LazyTreeContext
-from jedi.evaluate.helpers import get_int_or_none, is_string, \
- predefine_names, evaluate_call_of_leaf, reraise_as_evaluator, \
- EvaluatorKeyError
-from jedi.evaluate.utils import safe_property
-from jedi.evaluate.utils import to_list
-from jedi.evaluate.cache import evaluator_method_cache
-from jedi.evaluate.filters import ParserTreeFilter, BuiltinOverwrite, \
- publish_method
-from jedi.evaluate.base_context import ContextSet, NO_CONTEXTS, Context, \
- TreeContext, ContextualizedNode
-from jedi.parser_utils import get_comp_fors
-
-
-class IterableMixin(object):
- def py__stop_iteration_returns(self):
- return ContextSet(compiled.builtin_from_name(self.evaluator, u'None'))
-
-
-class GeneratorBase(BuiltinOverwrite, IterableMixin):
- array_type = None
- special_object_identifier = u'GENERATOR_OBJECT'
-
- @publish_method('send')
- @publish_method('next', python_version_match=2)
- @publish_method('__next__', python_version_match=3)
- def py__next__(self):
- return ContextSet.from_sets(lazy_context.infer() for lazy_context in self.py__iter__())
-
- @property
- def name(self):
- return compiled.CompiledContextName(self, 'generator')
-
-
-class Generator(GeneratorBase):
- """Handling of `yield` functions."""
- def __init__(self, evaluator, func_execution_context):
- super(Generator, self).__init__(evaluator)
- self._func_execution_context = func_execution_context
-
- def py__iter__(self):
- return self._func_execution_context.get_yield_lazy_contexts()
-
- def py__stop_iteration_returns(self):
- return self._func_execution_context.get_return_values()
-
- def __repr__(self):
- return "<%s of %s>" % (type(self).__name__, self._func_execution_context)
-
-
-class CompForContext(TreeContext):
- @classmethod
- def from_comp_for(cls, parent_context, comp_for):
- return cls(parent_context.evaluator, parent_context, comp_for)
-
- def get_node(self):
- return self.tree_node
-
- def get_filters(self, search_global, until_position=None, origin_scope=None):
- yield ParserTreeFilter(self.evaluator, self)
-
-
-def comprehension_from_atom(evaluator, context, atom):
- bracket = atom.children[0]
- if bracket == '{':
- if atom.children[1].children[1] == ':':
- cls = DictComprehension
- else:
- cls = SetComprehension
- elif bracket == '(':
- cls = GeneratorComprehension
- elif bracket == '[':
- cls = ListComprehension
- return cls(evaluator, context, atom)
-
-
-class ComprehensionMixin(object):
- def __init__(self, evaluator, defining_context, atom):
- super(ComprehensionMixin, self).__init__(evaluator)
- self._defining_context = defining_context
- self._atom = atom
-
- def _get_comprehension(self):
- "return 'a for a in b'"
- # The atom contains a testlist_comp
- return self._atom.children[1]
-
- def _get_comp_for(self):
- "return CompFor('for a in b')"
- return self._get_comprehension().children[1]
-
- def _eval_node(self, index=0):
- """
- The first part `x + 1` of the list comprehension:
-
- [x + 1 for x in foo]
- """
- return self._get_comprehension().children[index]
-
- @evaluator_method_cache()
- def _get_comp_for_context(self, parent_context, comp_for):
- # TODO shouldn't this be part of create_context?
- return CompForContext.from_comp_for(parent_context, comp_for)
-
- def _nested(self, comp_fors, parent_context=None):
- comp_for = comp_fors[0]
-
- is_async = 'async' == comp_for.children[comp_for.children.index('for') - 1]
-
- input_node = comp_for.children[comp_for.children.index('in') + 1]
- parent_context = parent_context or self._defining_context
- input_types = parent_context.eval_node(input_node)
- # TODO: simulate await if self.is_async
-
- cn = ContextualizedNode(parent_context, input_node)
- iterated = input_types.iterate(cn, is_async=is_async)
- exprlist = comp_for.children[comp_for.children.index('for') + 1]
- for i, lazy_context in enumerate(iterated):
- types = lazy_context.infer()
- dct = unpack_tuple_to_dict(parent_context, types, exprlist)
- context_ = self._get_comp_for_context(
- parent_context,
- comp_for,
- )
- with predefine_names(context_, comp_for, dct):
- try:
- for result in self._nested(comp_fors[1:], context_):
- yield result
- except IndexError:
- iterated = context_.eval_node(self._eval_node())
- if self.array_type == 'dict':
- yield iterated, context_.eval_node(self._eval_node(2))
- else:
- yield iterated
-
- @evaluator_method_cache(default=[])
- @to_list
- def _iterate(self):
- comp_fors = tuple(get_comp_fors(self._get_comp_for()))
- for result in self._nested(comp_fors):
- yield result
-
- def py__iter__(self):
- for set_ in self._iterate():
- yield LazyKnownContexts(set_)
-
- def __repr__(self):
- return "<%s of %s>" % (type(self).__name__, self._atom)
-
-
-class Sequence(BuiltinOverwrite, IterableMixin):
- api_type = u'instance'
-
- @property
- def name(self):
- return compiled.CompiledContextName(self, self.array_type)
-
- @memoize_method
- def get_object(self):
- compiled_obj = compiled.builtin_from_name(self.evaluator, self.array_type)
- only_obj, = compiled_obj.execute_evaluated(self)
- return only_obj
-
- def py__bool__(self):
- return None # We don't know the length, because of appends.
-
- def py__class__(self):
- return compiled.builtin_from_name(self.evaluator, self.array_type)
-
- @safe_property
- def parent(self):
- return self.evaluator.builtins_module
-
- def dict_values(self):
- return ContextSet.from_sets(
- self._defining_context.eval_node(v)
- for k, v in self._items()
- )
-
-
-class ListComprehension(ComprehensionMixin, Sequence):
- array_type = u'list'
-
- def py__getitem__(self, index):
- if isinstance(index, slice):
- return ContextSet(self)
-
- all_types = list(self.py__iter__())
- with reraise_as_evaluator(IndexError, TypeError):
- lazy_context = all_types[index]
- return lazy_context.infer()
-
-
-class SetComprehension(ComprehensionMixin, Sequence):
- array_type = u'set'
-
-
-class DictComprehension(ComprehensionMixin, Sequence):
- array_type = u'dict'
-
- def _get_comp_for(self):
- return self._get_comprehension().children[3]
-
- def py__iter__(self):
- for keys, values in self._iterate():
- yield LazyKnownContexts(keys)
-
- def py__getitem__(self, index):
- for keys, values in self._iterate():
- for k in keys:
- if isinstance(k, compiled.CompiledObject):
- if k.get_safe_value(default=object()) == index:
- return values
- return self.dict_values()
-
- def dict_values(self):
- return ContextSet.from_sets(values for keys, values in self._iterate())
-
- @publish_method('values')
- def _imitate_values(self):
- lazy_context = LazyKnownContexts(self.dict_values())
- return ContextSet(FakeSequence(self.evaluator, u'list', [lazy_context]))
-
- @publish_method('items')
- def _imitate_items(self):
- lazy_contexts = [
- LazyKnownContext(
- FakeSequence(
- self.evaluator,
- u'tuple',
- [LazyKnownContexts(key),
- LazyKnownContexts(value)]
- )
- )
- for key, value in self._iterate()
- ]
-
- return ContextSet(FakeSequence(self.evaluator, u'list', lazy_contexts))
-
- def exact_key_items(self):
- # NOTE: A smarter thing can probably done here to achieve better
- # completions, but at least like this jedi doesn't crash
- return []
-
-
-class GeneratorComprehension(ComprehensionMixin, GeneratorBase):
- pass
-
-
-class SequenceLiteralContext(Sequence):
- mapping = {'(': u'tuple',
- '[': u'list',
- '{': u'set'}
-
- def __init__(self, evaluator, defining_context, atom):
- super(SequenceLiteralContext, self).__init__(evaluator)
- self.atom = atom
- self._defining_context = defining_context
-
- if self.atom.type in ('testlist_star_expr', 'testlist'):
- self.array_type = u'tuple'
- else:
- self.array_type = SequenceLiteralContext.mapping[atom.children[0]]
- """The builtin name of the array (list, set, tuple or dict)."""
-
- def py__getitem__(self, index):
- """Here the index is an int/str. Raises IndexError/KeyError."""
- if self.array_type == u'dict':
- compiled_obj_index = compiled.create_simple_object(self.evaluator, index)
- for key, value in self._items():
- for k in self._defining_context.eval_node(key):
- if isinstance(k, compiled.CompiledObject) \
- and k.execute_operation(compiled_obj_index, u'==').get_safe_value():
- return self._defining_context.eval_node(value)
- raise EvaluatorKeyError('No key found in dictionary %s.' % self)
-
- # Can raise an IndexError
- if isinstance(index, slice):
- return ContextSet(self)
- else:
- with reraise_as_evaluator(TypeError, KeyError, IndexError):
- node = self._items()[index]
- return self._defining_context.eval_node(node)
-
- def py__iter__(self):
- """
- While values returns the possible values for any array field, this
- function returns the value for a certain index.
- """
- if self.array_type == u'dict':
- # Get keys.
- types = ContextSet()
- for k, _ in self._items():
- types |= self._defining_context.eval_node(k)
- # We don't know which dict index comes first, therefore always
- # yield all the types.
- for _ in types:
- yield LazyKnownContexts(types)
- else:
- for node in self._items():
- yield LazyTreeContext(self._defining_context, node)
-
- for addition in check_array_additions(self._defining_context, self):
- yield addition
-
- def _values(self):
- """Returns a list of a list of node."""
- if self.array_type == u'dict':
- return ContextSet.from_sets(v for k, v in self._items())
- else:
- return self._items()
-
- def _items(self):
- c = self.atom.children
-
- if self.atom.type in ('testlist_star_expr', 'testlist'):
- return c[::2]
-
- array_node = c[1]
- if array_node in (']', '}', ')'):
- return [] # Direct closing bracket, doesn't contain items.
-
- if array_node.type == 'testlist_comp':
- # filter out (for now) pep 448 single-star unpacking
- return [value for value in array_node.children[::2]
- if value.type != "star_expr"]
- elif array_node.type == 'dictorsetmaker':
- kv = []
- iterator = iter(array_node.children)
- for key in iterator:
- if key == "**":
- # dict with pep 448 double-star unpacking
- # for now ignoring the values imported by **
- next(iterator)
- next(iterator, None) # Possible comma.
- else:
- op = next(iterator, None)
- if op is None or op == ',':
- if key.type == "star_expr":
- # pep 448 single-star unpacking
- # for now ignoring values imported by *
- pass
- else:
- kv.append(key) # A set.
- else:
- assert op == ':' # A dict.
- kv.append((key, next(iterator)))
- next(iterator, None) # Possible comma.
- return kv
- else:
- if array_node.type == "star_expr":
- # pep 448 single-star unpacking
- # for now ignoring values imported by *
- return []
- else:
- return [array_node]
-
- def exact_key_items(self):
- """
- Returns a generator of tuples like dict.items(), where the key is
- resolved (as a string) and the values are still lazy contexts.
- """
- for key_node, value in self._items():
- for key in self._defining_context.eval_node(key_node):
- if is_string(key):
- yield key.get_safe_value(), LazyTreeContext(self._defining_context, value)
-
- def __repr__(self):
- return "<%s of %s>" % (self.__class__.__name__, self.atom)
-
-
-class DictLiteralContext(SequenceLiteralContext):
- array_type = u'dict'
-
- def __init__(self, evaluator, defining_context, atom):
- super(SequenceLiteralContext, self).__init__(evaluator)
- self._defining_context = defining_context
- self.atom = atom
-
- @publish_method('values')
- def _imitate_values(self):
- lazy_context = LazyKnownContexts(self.dict_values())
- return ContextSet(FakeSequence(self.evaluator, u'list', [lazy_context]))
-
- @publish_method('items')
- def _imitate_items(self):
- lazy_contexts = [
- LazyKnownContext(FakeSequence(
- self.evaluator, u'tuple',
- (LazyTreeContext(self._defining_context, key_node),
- LazyTreeContext(self._defining_context, value_node))
- )) for key_node, value_node in self._items()
- ]
-
- return ContextSet(FakeSequence(self.evaluator, u'list', lazy_contexts))
-
-
-class _FakeArray(SequenceLiteralContext):
- def __init__(self, evaluator, container, type):
- super(SequenceLiteralContext, self).__init__(evaluator)
- self.array_type = type
- self.atom = container
- # TODO is this class really needed?
-
-
-class FakeSequence(_FakeArray):
- def __init__(self, evaluator, array_type, lazy_context_list):
- """
- type should be one of "tuple", "list"
- """
- super(FakeSequence, self).__init__(evaluator, None, array_type)
- self._lazy_context_list = lazy_context_list
-
- def py__getitem__(self, index):
- with reraise_as_evaluator(IndexError, TypeError):
- lazy_context = self._lazy_context_list[index]
- return lazy_context.infer()
-
- def py__iter__(self):
- return self._lazy_context_list
-
- def py__bool__(self):
- return bool(len(self._lazy_context_list))
-
- def __repr__(self):
- return "<%s of %s>" % (type(self).__name__, self._lazy_context_list)
-
-
-class FakeDict(_FakeArray):
- def __init__(self, evaluator, dct):
- super(FakeDict, self).__init__(evaluator, dct, u'dict')
- self._dct = dct
-
- def py__iter__(self):
- for key in self._dct:
- yield LazyKnownContext(compiled.create_simple_object(self.evaluator, key))
-
- def py__getitem__(self, index):
- if is_py3 and self.evaluator.environment.version_info.major == 2:
- # In Python 2 bytes and unicode compare.
- if isinstance(index, bytes):
- index_unicode = force_unicode(index)
- try:
- return self._dct[index_unicode].infer()
- except KeyError:
- pass
- elif isinstance(index, str):
- index_bytes = index.encode('utf-8')
- try:
- return self._dct[index_bytes].infer()
- except KeyError:
- pass
-
- with reraise_as_evaluator(KeyError):
- lazy_context = self._dct[index]
- return lazy_context.infer()
-
- @publish_method('values')
- def _values(self):
- return ContextSet(FakeSequence(
- self.evaluator, u'tuple',
- [LazyKnownContexts(self.dict_values())]
- ))
-
- def dict_values(self):
- return ContextSet.from_sets(lazy_context.infer() for lazy_context in self._dct.values())
-
- def exact_key_items(self):
- return self._dct.items()
-
-
-class MergedArray(_FakeArray):
- def __init__(self, evaluator, arrays):
- super(MergedArray, self).__init__(evaluator, arrays, arrays[-1].array_type)
- self._arrays = arrays
-
- def py__iter__(self):
- for array in self._arrays:
- for lazy_context in array.py__iter__():
- yield lazy_context
-
- def py__getitem__(self, index):
- return ContextSet.from_sets(lazy_context.infer() for lazy_context in self.py__iter__())
-
- def _items(self):
- for array in self._arrays:
- for a in array._items():
- yield a
-
- def __len__(self):
- return sum(len(a) for a in self._arrays)
-
-
-def unpack_tuple_to_dict(context, types, exprlist):
- """
- Unpacking tuple assignments in for statements and expr_stmts.
- """
- if exprlist.type == 'name':
- return {exprlist.value: types}
- elif exprlist.type == 'atom' and exprlist.children[0] in '([':
- return unpack_tuple_to_dict(context, types, exprlist.children[1])
- elif exprlist.type in ('testlist', 'testlist_comp', 'exprlist',
- 'testlist_star_expr'):
- dct = {}
- parts = iter(exprlist.children[::2])
- n = 0
- for lazy_context in types.iterate(exprlist):
- n += 1
- try:
- part = next(parts)
- except StopIteration:
- # TODO this context is probably not right.
- analysis.add(context, 'value-error-too-many-values', part,
- message="ValueError: too many values to unpack (expected %s)" % n)
- else:
- dct.update(unpack_tuple_to_dict(context, lazy_context.infer(), part))
- has_parts = next(parts, None)
- if types and has_parts is not None:
- # TODO this context is probably not right.
- analysis.add(context, 'value-error-too-few-values', has_parts,
- message="ValueError: need more than %s values to unpack" % n)
- return dct
- elif exprlist.type == 'power' or exprlist.type == 'atom_expr':
- # Something like ``arr[x], var = ...``.
- # This is something that is not yet supported, would also be difficult
- # to write into a dict.
- return {}
- elif exprlist.type == 'star_expr': # `a, *b, c = x` type unpackings
- # Currently we're not supporting them.
- return {}
- raise NotImplementedError
-
-
-def check_array_additions(context, sequence):
- """ Just a mapper function for the internal _check_array_additions """
- if sequence.array_type not in ('list', 'set'):
- # TODO also check for dict updates
- return NO_CONTEXTS
-
- return _check_array_additions(context, sequence)
-
-
-@evaluator_method_cache(default=NO_CONTEXTS)
-@debug.increase_indent
-def _check_array_additions(context, sequence):
- """
- Checks if a `Array` has "add" (append, insert, extend) statements:
-
- >>> a = [""]
- >>> a.append(1)
- """
- from jedi.evaluate import arguments
-
- debug.dbg('Dynamic array search for %s' % sequence, color='MAGENTA')
- module_context = context.get_root_context()
- if not settings.dynamic_array_additions or isinstance(module_context, compiled.CompiledObject):
- debug.dbg('Dynamic array search aborted.', color='MAGENTA')
- return ContextSet()
-
- def find_additions(context, arglist, add_name):
- params = list(arguments.TreeArguments(context.evaluator, context, arglist).unpack())
- result = set()
- if add_name in ['insert']:
- params = params[1:]
- if add_name in ['append', 'add', 'insert']:
- for key, whatever in params:
- result.add(whatever)
- elif add_name in ['extend', 'update']:
- for key, lazy_context in params:
- result |= set(lazy_context.infer().iterate())
- return result
-
- temp_param_add, settings.dynamic_params_for_other_modules = \
- settings.dynamic_params_for_other_modules, False
-
- is_list = sequence.name.string_name == 'list'
- search_names = (['append', 'extend', 'insert'] if is_list else ['add', 'update'])
-
- added_types = set()
- for add_name in search_names:
- try:
- possible_names = module_context.tree_node.get_used_names()[add_name]
- except KeyError:
- continue
- else:
- for name in possible_names:
- context_node = context.tree_node
- if not (context_node.start_pos < name.start_pos < context_node.end_pos):
- continue
- trailer = name.parent
- power = trailer.parent
- trailer_pos = power.children.index(trailer)
- try:
- execution_trailer = power.children[trailer_pos + 1]
- except IndexError:
- continue
- else:
- if execution_trailer.type != 'trailer' \
- or execution_trailer.children[0] != '(' \
- or execution_trailer.children[1] == ')':
- continue
-
- random_context = context.create_context(name)
-
- with recursion.execution_allowed(context.evaluator, power) as allowed:
- if allowed:
- found = evaluate_call_of_leaf(
- random_context,
- name,
- cut_own_trailer=True
- )
- if sequence in found:
- # The arrays match. Now add the results
- added_types |= find_additions(
- random_context,
- execution_trailer.children[1],
- add_name
- )
-
- # reset settings
- settings.dynamic_params_for_other_modules = temp_param_add
- debug.dbg('Dynamic array result %s' % added_types, color='MAGENTA')
- return added_types
-
-
-def get_dynamic_array_instance(instance, arguments):
- """Used for set() and list() instances."""
- ai = _ArrayInstance(instance, arguments)
- from jedi.evaluate import arguments
- return arguments.ValuesArguments([ContextSet(ai)])
-
-
-class _ArrayInstance(object):
- """
- Used for the usage of set() and list().
- This is definitely a hack, but a good one :-)
- It makes it possible to use set/list conversions.
-
- In contrast to Array, ListComprehension and all other iterable types, this
- is something that is only used inside `evaluate/compiled/fake/builtins.py`
- and therefore doesn't need filters, `py__bool__` and so on, because
- we don't use these operations in `builtins.py`.
- """
- def __init__(self, instance, var_args):
- self.instance = instance
- self.var_args = var_args
-
- def py__iter__(self):
- var_args = self.var_args
- try:
- _, lazy_context = next(var_args.unpack())
- except StopIteration:
- pass
- else:
- for lazy in lazy_context.infer().iterate():
- yield lazy
-
- from jedi.evaluate import arguments
- if isinstance(var_args, arguments.TreeArguments):
- additions = _check_array_additions(var_args.context, self.instance)
- for addition in additions:
- yield addition
-
- def iterate(self, contextualized_node=None, is_async=False):
- return self.py__iter__()
-
-
-class Slice(Context):
- def __init__(self, context, start, stop, step):
- super(Slice, self).__init__(
- context.evaluator,
- parent_context=context.evaluator.builtins_module
- )
- self._context = context
- # all of them are either a Precedence or None.
- self._start = start
- self._stop = stop
- self._step = step
-
- @property
- def obj(self):
- """
- Imitate CompiledObject.obj behavior and return a ``builtin.slice()``
- object.
- """
- def get(element):
- if element is None:
- return None
-
- result = self._context.eval_node(element)
- if len(result) != 1:
- # For simplicity, we want slices to be clear defined with just
- # one type. Otherwise we will return an empty slice object.
- raise IndexError
-
- context, = result
- return get_int_or_none(context)
-
- try:
- return slice(get(self._start), get(self._stop), get(self._step))
- except IndexError:
- return slice(None, None, None)
diff --git a/contrib/python/jedi/jedi/evaluate/context/klass.py b/contrib/python/jedi/jedi/evaluate/context/klass.py
deleted file mode 100644
index b676ffdbfb..0000000000
--- a/contrib/python/jedi/jedi/evaluate/context/klass.py
+++ /dev/null
@@ -1,221 +0,0 @@
-"""
-Like described in the :mod:`parso.python.tree` module,
-there's a need for an ast like module to represent the states of parsed
-modules.
-
-But now there are also structures in Python that need a little bit more than
-that. An ``Instance`` for example is only a ``Class`` before it is
-instantiated. This class represents these cases.
-
-So, why is there also a ``Class`` class here? Well, there are decorators and
-they change classes in Python 3.
-
-Representation modules also define "magic methods". Those methods look like
-``py__foo__`` and are typically mappable to the Python equivalents ``__call__``
-and others. Here's a list:
-
-====================================== ========================================
-**Method** **Description**
--------------------------------------- ----------------------------------------
-py__call__(params: Array) On callable objects, returns types.
-py__bool__() Returns True/False/None; None means that
- there's no certainty.
-py__bases__() Returns a list of base classes.
-py__mro__() Returns a list of classes (the mro).
-py__iter__() Returns a generator of a set of types.
-py__class__() Returns the class of an instance.
-py__getitem__(index: int/str) Returns a a set of types of the index.
- Can raise an IndexError/KeyError.
-py__file__() Only on modules. Returns None if does
- not exist.
-py__package__() Only on modules. For the import system.
-py__path__() Only on modules. For the import system.
-py__get__(call_object) Only on instances. Simulates
- descriptors.
-py__doc__(include_call_signature: Returns the docstring for a context.
- bool)
-====================================== ========================================
-
-"""
-from jedi._compatibility import use_metaclass
-from jedi.parser_utils import get_parent_scope
-from jedi.evaluate.cache import evaluator_method_cache, CachedMetaClass
-from jedi.evaluate import compiled
-from jedi.evaluate.lazy_context import LazyKnownContext
-from jedi.evaluate.filters import ParserTreeFilter, TreeNameDefinition, \
- ContextName
-from jedi.evaluate.base_context import ContextSet, iterator_to_context_set, \
- TreeContext
-
-
-def apply_py__get__(context, base_context):
- try:
- method = context.py__get__
- except AttributeError:
- yield context
- else:
- for descriptor_context in method(base_context):
- yield descriptor_context
-
-
-class ClassName(TreeNameDefinition):
- def __init__(self, parent_context, tree_name, name_context, apply_decorators):
- super(ClassName, self).__init__(parent_context, tree_name)
- self._name_context = name_context
- self._apply_decorators = apply_decorators
-
- @iterator_to_context_set
- def infer(self):
- # TODO this _name_to_types might get refactored and be a part of the
- # parent class. Once it is, we can probably just overwrite method to
- # achieve this.
- from jedi.evaluate.syntax_tree import tree_name_to_contexts
- inferred = tree_name_to_contexts(
- self.parent_context.evaluator, self._name_context, self.tree_name)
-
- for result_context in inferred:
- if self._apply_decorators:
- for c in apply_py__get__(result_context, self.parent_context):
- yield c
- else:
- yield result_context
-
-
-class ClassFilter(ParserTreeFilter):
- name_class = ClassName
-
- def __init__(self, *args, **kwargs):
- self._is_instance = kwargs.pop('is_instance') # Python 2 :/
- super(ClassFilter, self).__init__(*args, **kwargs)
-
- def _convert_names(self, names):
- return [
- self.name_class(
- parent_context=self.context,
- tree_name=name,
- name_context=self._node_context,
- apply_decorators=not self._is_instance,
- ) for name in names
- ]
-
- def _equals_origin_scope(self):
- node = self._origin_scope
- while node is not None:
- if node == self._parser_scope or node == self.context:
- return True
- node = get_parent_scope(node)
- return False
-
- def _access_possible(self, name):
- return not name.value.startswith('__') or name.value.endswith('__') \
- or self._equals_origin_scope()
-
- def _filter(self, names):
- names = super(ClassFilter, self)._filter(names)
- return [name for name in names if self._access_possible(name)]
-
-
-class ClassContext(use_metaclass(CachedMetaClass, TreeContext)):
- """
- This class is not only important to extend `tree.Class`, it is also a
- important for descriptors (if the descriptor methods are evaluated or not).
- """
- api_type = u'class'
-
- @evaluator_method_cache(default=())
- def py__mro__(self):
- def add(cls):
- if cls not in mro:
- mro.append(cls)
-
- mro = [self]
- # TODO Do a proper mro resolution. Currently we are just listing
- # classes. However, it's a complicated algorithm.
- for lazy_cls in self.py__bases__():
- # TODO there's multiple different mro paths possible if this yields
- # multiple possibilities. Could be changed to be more correct.
- for cls in lazy_cls.infer():
- # TODO detect for TypeError: duplicate base class str,
- # e.g. `class X(str, str): pass`
- try:
- mro_method = cls.py__mro__
- except AttributeError:
- # TODO add a TypeError like:
- """
- >>> class Y(lambda: test): pass
- Traceback (most recent call last):
- File "<stdin>", line 1, in <module>
- TypeError: function() argument 1 must be code, not str
- >>> class Y(1): pass
- Traceback (most recent call last):
- File "<stdin>", line 1, in <module>
- TypeError: int() takes at most 2 arguments (3 given)
- """
- pass
- else:
- add(cls)
- for cls_new in mro_method():
- add(cls_new)
- return tuple(mro)
-
- @evaluator_method_cache(default=())
- def py__bases__(self):
- arglist = self.tree_node.get_super_arglist()
- if arglist:
- from jedi.evaluate import arguments
- args = arguments.TreeArguments(self.evaluator, self.parent_context, arglist)
- return [value for key, value in args.unpack() if key is None]
- else:
- return [LazyKnownContext(compiled.builtin_from_name(self.evaluator, u'object'))]
-
- def py__call__(self, params):
- from jedi.evaluate.context import TreeInstance
- return ContextSet(TreeInstance(self.evaluator, self.parent_context, self, params))
-
- def py__class__(self):
- return compiled.builtin_from_name(self.evaluator, u'type')
-
- def get_filters(self, search_global, until_position=None, origin_scope=None, is_instance=False):
- if search_global:
- yield ParserTreeFilter(
- self.evaluator,
- context=self,
- until_position=until_position,
- origin_scope=origin_scope
- )
- else:
- for cls in self.py__mro__():
- if isinstance(cls, compiled.CompiledObject):
- for filter in cls.get_filters(is_instance=is_instance):
- yield filter
- else:
- yield ClassFilter(
- self.evaluator, self, node_context=cls,
- origin_scope=origin_scope,
- is_instance=is_instance
- )
-
- def is_class(self):
- return True
-
- def get_function_slot_names(self, name):
- for filter in self.get_filters(search_global=False):
- names = filter.get(name)
- if names:
- return names
- return []
-
- def get_param_names(self):
- for name in self.get_function_slot_names(u'__init__'):
- for context_ in name.infer():
- try:
- method = context_.get_param_names
- except AttributeError:
- pass
- else:
- return list(method())[1:]
- return []
-
- @property
- def name(self):
- return ContextName(self, self.tree_node.name)
diff --git a/contrib/python/jedi/jedi/evaluate/context/module.py b/contrib/python/jedi/jedi/evaluate/context/module.py
deleted file mode 100644
index 5cb4506966..0000000000
--- a/contrib/python/jedi/jedi/evaluate/context/module.py
+++ /dev/null
@@ -1,219 +0,0 @@
-import re
-import os
-
-from parso import python_bytes_to_unicode
-
-from jedi.evaluate.cache import evaluator_method_cache
-from jedi._compatibility import iter_modules, all_suffixes
-from jedi.evaluate.filters import GlobalNameFilter, ContextNameMixin, \
- AbstractNameDefinition, ParserTreeFilter, DictFilter, MergedFilter
-from jedi.evaluate import compiled
-from jedi.evaluate.base_context import TreeContext
-from jedi.evaluate.imports import SubModuleName, infer_import
-
-
-class _ModuleAttributeName(AbstractNameDefinition):
- """
- For module attributes like __file__, __str__ and so on.
- """
- api_type = u'instance'
-
- def __init__(self, parent_module, string_name):
- self.parent_context = parent_module
- self.string_name = string_name
-
- def infer(self):
- return compiled.get_string_context_set(self.parent_context.evaluator)
-
-
-class ModuleName(ContextNameMixin, AbstractNameDefinition):
- start_pos = 1, 0
-
- def __init__(self, context, name):
- self._context = context
- self._name = name
-
- @property
- def string_name(self):
- return self._name
-
-
-class ModuleContext(TreeContext):
- api_type = u'module'
- parent_context = None
-
- def __init__(self, evaluator, module_node, path, code_lines):
- super(ModuleContext, self).__init__(
- evaluator,
- parent_context=None,
- tree_node=module_node
- )
- self._path = path
- self.code_lines = code_lines
-
- def get_filters(self, search_global, until_position=None, origin_scope=None):
- yield MergedFilter(
- ParserTreeFilter(
- self.evaluator,
- context=self,
- until_position=until_position,
- origin_scope=origin_scope
- ),
- GlobalNameFilter(self, self.tree_node),
- )
- yield DictFilter(self._sub_modules_dict())
- yield DictFilter(self._module_attributes_dict())
- for star_module in self.star_imports():
- yield next(star_module.get_filters(search_global))
-
- # I'm not sure if the star import cache is really that effective anymore
- # with all the other really fast import caches. Recheck. Also we would need
- # to push the star imports into Evaluator.module_cache, if we reenable this.
- @evaluator_method_cache([])
- def star_imports(self):
- modules = []
- for i in self.tree_node.iter_imports():
- if i.is_star_import():
- name = i.get_paths()[-1][-1]
- new = infer_import(self, name)
- for module in new:
- if isinstance(module, ModuleContext):
- modules += module.star_imports()
- modules += new
- return modules
-
- @evaluator_method_cache()
- def _module_attributes_dict(self):
- names = ['__file__', '__package__', '__doc__', '__name__']
- # All the additional module attributes are strings.
- return dict((n, _ModuleAttributeName(self, n)) for n in names)
-
- @property
- def _string_name(self):
- """ This is used for the goto functions. """
- if self._path is None:
- return '' # no path -> empty name
- else:
- sep = (re.escape(os.path.sep),) * 2
- r = re.search(r'([^%s]*?)(%s__init__)?(\.py|\.so)?$' % sep, self._path)
- # Remove PEP 3149 names
- return re.sub(r'\.[a-z]+-\d{2}[mud]{0,3}$', '', r.group(1))
-
- @property
- @evaluator_method_cache()
- def name(self):
- return ModuleName(self, self._string_name)
-
- def _get_init_directory(self):
- """
- :return: The path to the directory of a package. None in case it's not
- a package.
- """
- for suffix in all_suffixes():
- ending = '__init__' + suffix
- py__file__ = self.py__file__()
- if py__file__ is not None and py__file__.endswith(ending):
- # Remove the ending, including the separator.
- return self.py__file__()[:-len(ending) - 1]
- return None
-
- def py__name__(self):
- for name, module in self.evaluator.module_cache.iterate_modules_with_names():
- if module == self and name != '':
- return name
-
- return '__main__'
-
- def py__file__(self):
- """
- In contrast to Python's __file__ can be None.
- """
- if self._path is None:
- return None
-
- return os.path.abspath(self._path)
-
- def py__package__(self):
- if self._get_init_directory() is None:
- return re.sub(r'\.?[^.]+$', '', self.py__name__())
- else:
- return self.py__name__()
-
- def _py__path__(self):
- search_path = self.evaluator.get_sys_path()
- init_path = self.py__file__()
- if os.path.basename(init_path) == '__init__.py':
- with open(init_path, 'rb') as f:
- content = python_bytes_to_unicode(f.read(), errors='replace')
- # these are strings that need to be used for namespace packages,
- # the first one is ``pkgutil``, the second ``pkg_resources``.
- options = ('declare_namespace(__name__)', 'extend_path(__path__')
- if options[0] in content or options[1] in content:
- # It is a namespace, now try to find the rest of the
- # modules on sys_path or whatever the search_path is.
- paths = set()
- for s in search_path:
- other = os.path.join(s, self.name.string_name)
- if os.path.isdir(other):
- paths.add(other)
- if paths:
- return list(paths)
- # TODO I'm not sure if this is how nested namespace
- # packages work. The tests are not really good enough to
- # show that.
- # Default to this.
- return [self._get_init_directory()]
-
- @property
- def py__path__(self):
- """
- Not seen here, since it's a property. The callback actually uses a
- variable, so use it like::
-
- foo.py__path__(sys_path)
-
- In case of a package, this returns Python's __path__ attribute, which
- is a list of paths (strings).
- Raises an AttributeError if the module is not a package.
- """
- path = self._get_init_directory()
-
- if path is None:
- raise AttributeError('Only packages have __path__ attributes.')
- else:
- return self._py__path__
-
- @evaluator_method_cache()
- def _sub_modules_dict(self):
- """
- Lists modules in the directory of this module (if this module is a
- package).
- """
- names = {}
- try:
- method = self.py__path__
- except AttributeError:
- pass
- else:
- for path in method():
- mods = iter_modules([path])
- for module_loader, name, is_pkg in mods:
- # It's obviously a relative import to the current module.
- names[name] = SubModuleName(self, name)
-
- # TODO add something like this in the future, its cleaner than the
- # import hacks.
- # ``os.path`` is a hardcoded exception, because it's a
- # ``sys.modules`` modification.
- # if str(self.name) == 'os':
- # names.append(Name('path', parent_context=self))
-
- return names
-
- def py__class__(self):
- return compiled.get_special_object(self.evaluator, u'MODULE_CLASS')
-
- def __repr__(self):
- return "<%s: %s@%s-%s>" % (
- self.__class__.__name__, self._string_name,
- self.tree_node.start_pos[0], self.tree_node.end_pos[0])
diff --git a/contrib/python/jedi/jedi/evaluate/context/namespace.py b/contrib/python/jedi/jedi/evaluate/context/namespace.py
deleted file mode 100644
index f41898c7eb..0000000000
--- a/contrib/python/jedi/jedi/evaluate/context/namespace.py
+++ /dev/null
@@ -1,72 +0,0 @@
-import os
-from itertools import chain
-
-from jedi.evaluate.cache import evaluator_method_cache
-from jedi.evaluate import imports
-from jedi.evaluate.filters import DictFilter, AbstractNameDefinition, ContextNameMixin
-from jedi.evaluate.base_context import Context
-
-
-class ImplicitNSName(ContextNameMixin, AbstractNameDefinition):
- """
- Accessing names for implicit namespace packages should infer to nothing.
- This object will prevent Jedi from raising exceptions
- """
- def __init__(self, implicit_ns_context, string_name):
- self._context = implicit_ns_context
- self.string_name = string_name
-
-
-class ImplicitNamespaceContext(Context):
- """
- Provides support for implicit namespace packages
- """
- # Is a module like every other module, because if you import an empty
- # folder foobar it will be available as an object:
- # <module 'foobar' (namespace)>.
- api_type = u'module'
- parent_context = None
-
- def __init__(self, evaluator, fullname, paths):
- super(ImplicitNamespaceContext, self).__init__(evaluator, parent_context=None)
- self.evaluator = evaluator
- self._fullname = fullname
- self.paths = paths
-
- def get_filters(self, search_global, until_position=None, origin_scope=None):
- yield DictFilter(self._sub_modules_dict())
-
- @property
- @evaluator_method_cache()
- def name(self):
- string_name = self.py__package__().rpartition('.')[-1]
- return ImplicitNSName(self, string_name)
-
- def py__file__(self):
- return None
-
- def py__package__(self):
- """Return the fullname
- """
- return self._fullname
-
- def py__path__(self):
- return [self.paths]
-
- def py__name__(self):
- return self._fullname
-
- @evaluator_method_cache()
- def _sub_modules_dict(self):
- names = {}
-
- file_names = chain.from_iterable(os.listdir(path) for path in self.paths)
- mods = [
- file_name.rpartition('.')[0] if '.' in file_name else file_name
- for file_name in file_names
- if file_name != '__pycache__'
- ]
-
- for name in mods:
- names[name] = imports.SubModuleName(self, name)
- return names
diff --git a/contrib/python/jedi/jedi/evaluate/docstrings.py b/contrib/python/jedi/jedi/evaluate/docstrings.py
deleted file mode 100644
index aaef8ea853..0000000000
--- a/contrib/python/jedi/jedi/evaluate/docstrings.py
+++ /dev/null
@@ -1,307 +0,0 @@
-"""
-Docstrings are another source of information for functions and classes.
-:mod:`jedi.evaluate.dynamic` tries to find all executions of functions, while
-the docstring parsing is much easier. There are three different types of
-docstrings that |jedi| understands:
-
-- `Sphinx <http://sphinx-doc.org/markup/desc.html#info-field-lists>`_
-- `Epydoc <http://epydoc.sourceforge.net/manual-fields.html>`_
-- `Numpydoc <https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt>`_
-
-For example, the sphinx annotation ``:type foo: str`` clearly states that the
-type of ``foo`` is ``str``.
-
-As an addition to parameter searching, this module also provides return
-annotations.
-"""
-
-import re
-from textwrap import dedent
-
-from parso import parse, ParserSyntaxError
-
-from jedi._compatibility import u
-from jedi.evaluate.utils import indent_block
-from jedi.evaluate.cache import evaluator_method_cache
-from jedi.evaluate.base_context import iterator_to_context_set, ContextSet, \
- NO_CONTEXTS
-from jedi.evaluate.lazy_context import LazyKnownContexts
-
-
-DOCSTRING_PARAM_PATTERNS = [
- r'\s*:type\s+%s:\s*([^\n]+)', # Sphinx
- r'\s*:param\s+(\w+)\s+%s:[^\n]*', # Sphinx param with type
- r'\s*@type\s+%s:\s*([^\n]+)', # Epydoc
-]
-
-DOCSTRING_RETURN_PATTERNS = [
- re.compile(r'\s*:rtype:\s*([^\n]+)', re.M), # Sphinx
- re.compile(r'\s*@rtype:\s*([^\n]+)', re.M), # Epydoc
-]
-
-REST_ROLE_PATTERN = re.compile(r':[^`]+:`([^`]+)`')
-
-
-_numpy_doc_string_cache = None
-
-
-def _get_numpy_doc_string_cls():
- global _numpy_doc_string_cache
- if isinstance(_numpy_doc_string_cache, ImportError):
- raise _numpy_doc_string_cache
- try:
- from numpydoc.docscrape import NumpyDocString
- _numpy_doc_string_cache = NumpyDocString
- except ImportError as e:
- _numpy_doc_string_cache = e
- raise
- return _numpy_doc_string_cache
-
-
-def _search_param_in_numpydocstr(docstr, param_str):
- """Search `docstr` (in numpydoc format) for type(-s) of `param_str`."""
- try:
- # This is a non-public API. If it ever changes we should be
- # prepared and return gracefully.
- params = _get_numpy_doc_string_cls()(docstr)._parsed_data['Parameters']
- except (KeyError, AttributeError, ImportError):
- return []
- for p_name, p_type, p_descr in params:
- if p_name == param_str:
- m = re.match(r'([^,]+(,[^,]+)*?)(,[ ]*optional)?$', p_type)
- if m:
- p_type = m.group(1)
- return list(_expand_typestr(p_type))
- return []
-
-
-def _search_return_in_numpydocstr(docstr):
- """
- Search `docstr` (in numpydoc format) for type(-s) of function returns.
- """
- try:
- doc = _get_numpy_doc_string_cls()(docstr)
- except ImportError:
- return
- try:
- # This is a non-public API. If it ever changes we should be
- # prepared and return gracefully.
- returns = doc._parsed_data['Returns']
- returns += doc._parsed_data['Yields']
- except (KeyError, AttributeError):
- return
- for r_name, r_type, r_descr in returns:
- # Return names are optional and if so the type is in the name
- if not r_type:
- r_type = r_name
- for type_ in _expand_typestr(r_type):
- yield type_
-
-
-def _expand_typestr(type_str):
- """
- Attempts to interpret the possible types in `type_str`
- """
- # Check if alternative types are specified with 'or'
- if re.search(r'\bor\b', type_str):
- for t in type_str.split('or'):
- yield t.split('of')[0].strip()
- # Check if like "list of `type`" and set type to list
- elif re.search(r'\bof\b', type_str):
- yield type_str.split('of')[0]
- # Check if type has is a set of valid literal values eg: {'C', 'F', 'A'}
- elif type_str.startswith('{'):
- node = parse(type_str, version='3.6').children[0]
- if node.type == 'atom':
- for leaf in node.children[1].children:
- if leaf.type == 'number':
- if '.' in leaf.value:
- yield 'float'
- else:
- yield 'int'
- elif leaf.type == 'string':
- if 'b' in leaf.string_prefix.lower():
- yield 'bytes'
- else:
- yield 'str'
- # Ignore everything else.
-
- # Otherwise just work with what we have.
- else:
- yield type_str
-
-
-def _search_param_in_docstr(docstr, param_str):
- """
- Search `docstr` for type(-s) of `param_str`.
-
- >>> _search_param_in_docstr(':type param: int', 'param')
- ['int']
- >>> _search_param_in_docstr('@type param: int', 'param')
- ['int']
- >>> _search_param_in_docstr(
- ... ':type param: :class:`threading.Thread`', 'param')
- ['threading.Thread']
- >>> bool(_search_param_in_docstr('no document', 'param'))
- False
- >>> _search_param_in_docstr(':param int param: some description', 'param')
- ['int']
-
- """
- # look at #40 to see definitions of those params
- patterns = [re.compile(p % re.escape(param_str))
- for p in DOCSTRING_PARAM_PATTERNS]
- for pattern in patterns:
- match = pattern.search(docstr)
- if match:
- return [_strip_rst_role(match.group(1))]
-
- return _search_param_in_numpydocstr(docstr, param_str)
-
-
-def _strip_rst_role(type_str):
- """
- Strip off the part looks like a ReST role in `type_str`.
-
- >>> _strip_rst_role(':class:`ClassName`') # strip off :class:
- 'ClassName'
- >>> _strip_rst_role(':py:obj:`module.Object`') # works with domain
- 'module.Object'
- >>> _strip_rst_role('ClassName') # do nothing when not ReST role
- 'ClassName'
-
- See also:
- http://sphinx-doc.org/domains.html#cross-referencing-python-objects
-
- """
- match = REST_ROLE_PATTERN.match(type_str)
- if match:
- return match.group(1)
- else:
- return type_str
-
-
-def _evaluate_for_statement_string(module_context, string):
- code = dedent(u("""
- def pseudo_docstring_stuff():
- '''
- Create a pseudo function for docstring statements.
- Need this docstring so that if the below part is not valid Python this
- is still a function.
- '''
- {}
- """))
- if string is None:
- return []
-
- for element in re.findall(r'((?:\w+\.)*\w+)\.', string):
- # Try to import module part in dotted name.
- # (e.g., 'threading' in 'threading.Thread').
- string = 'import %s\n' % element + string
-
- # Take the default grammar here, if we load the Python 2.7 grammar here, it
- # will be impossible to use `...` (Ellipsis) as a token. Docstring types
- # don't need to conform with the current grammar.
- grammar = module_context.evaluator.latest_grammar
- try:
- module = grammar.parse(code.format(indent_block(string)), error_recovery=False)
- except ParserSyntaxError:
- return []
- try:
- funcdef = next(module.iter_funcdefs())
- # First pick suite, then simple_stmt and then the node,
- # which is also not the last item, because there's a newline.
- stmt = funcdef.children[-1].children[-1].children[-2]
- except (AttributeError, IndexError):
- return []
-
- if stmt.type not in ('name', 'atom', 'atom_expr'):
- return []
-
- from jedi.evaluate.context import FunctionContext
- function_context = FunctionContext(
- module_context.evaluator,
- module_context,
- funcdef
- )
- func_execution_context = function_context.get_function_execution()
- # Use the module of the param.
- # TODO this module is not the module of the param in case of a function
- # call. In that case it's the module of the function call.
- # stuffed with content from a function call.
- return list(_execute_types_in_stmt(func_execution_context, stmt))
-
-
-def _execute_types_in_stmt(module_context, stmt):
- """
- Executing all types or general elements that we find in a statement. This
- doesn't include tuple, list and dict literals, because the stuff they
- contain is executed. (Used as type information).
- """
- definitions = module_context.eval_node(stmt)
- return ContextSet.from_sets(
- _execute_array_values(module_context.evaluator, d)
- for d in definitions
- )
-
-
-def _execute_array_values(evaluator, array):
- """
- Tuples indicate that there's not just one return value, but the listed
- ones. `(str, int)` means that it returns a tuple with both types.
- """
- from jedi.evaluate.context.iterable import SequenceLiteralContext, FakeSequence
- if isinstance(array, SequenceLiteralContext):
- values = []
- for lazy_context in array.py__iter__():
- objects = ContextSet.from_sets(
- _execute_array_values(evaluator, typ)
- for typ in lazy_context.infer()
- )
- values.append(LazyKnownContexts(objects))
- return {FakeSequence(evaluator, array.array_type, values)}
- else:
- return array.execute_evaluated()
-
-
-@evaluator_method_cache()
-def infer_param(execution_context, param):
- from jedi.evaluate.context.instance import InstanceArguments
- from jedi.evaluate.context import FunctionExecutionContext
-
- def eval_docstring(docstring):
- return ContextSet.from_iterable(
- p
- for param_str in _search_param_in_docstr(docstring, param.name.value)
- for p in _evaluate_for_statement_string(module_context, param_str)
- )
- module_context = execution_context.get_root_context()
- func = param.get_parent_function()
- if func.type == 'lambdef':
- return NO_CONTEXTS
-
- types = eval_docstring(execution_context.py__doc__())
- if isinstance(execution_context, FunctionExecutionContext) \
- and isinstance(execution_context.var_args, InstanceArguments) \
- and execution_context.function_context.py__name__() == '__init__':
- class_context = execution_context.var_args.instance.class_context
- types |= eval_docstring(class_context.py__doc__())
-
- return types
-
-
-@evaluator_method_cache()
-@iterator_to_context_set
-def infer_return_types(function_context):
- def search_return_in_docstr(code):
- for p in DOCSTRING_RETURN_PATTERNS:
- match = p.search(code)
- if match:
- yield _strip_rst_role(match.group(1))
- # Check for numpy style return hint
- for type_ in _search_return_in_numpydocstr(code):
- yield type_
-
- for type_str in search_return_in_docstr(function_context.py__doc__()):
- for type_eval in _evaluate_for_statement_string(function_context.get_root_context(), type_str):
- yield type_eval
diff --git a/contrib/python/jedi/jedi/evaluate/dynamic.py b/contrib/python/jedi/jedi/evaluate/dynamic.py
deleted file mode 100644
index 7f7b0d87bd..0000000000
--- a/contrib/python/jedi/jedi/evaluate/dynamic.py
+++ /dev/null
@@ -1,231 +0,0 @@
-"""
-One of the really important features of |jedi| is to have an option to
-understand code like this::
-
- def foo(bar):
- bar. # completion here
- foo(1)
-
-There's no doubt wheter bar is an ``int`` or not, but if there's also a call
-like ``foo('str')``, what would happen? Well, we'll just show both. Because
-that's what a human would expect.
-
-It works as follows:
-
-- |Jedi| sees a param
-- search for function calls named ``foo``
-- execute these calls and check the input.
-"""
-
-from parso.python import tree
-from jedi import settings
-from jedi import debug
-from jedi.evaluate.cache import evaluator_function_cache
-from jedi.evaluate import imports
-from jedi.evaluate.arguments import TreeArguments
-from jedi.evaluate.param import create_default_params
-from jedi.evaluate.helpers import is_stdlib_path
-from jedi.evaluate.utils import to_list
-from jedi.parser_utils import get_parent_scope
-from jedi.evaluate.context import ModuleContext, instance
-from jedi.evaluate.base_context import ContextSet, NO_CONTEXTS
-from jedi.evaluate import recursion
-
-
-MAX_PARAM_SEARCHES = 20
-
-
-class DynamicExecutedParams(object):
- """
- Simulates being a parameter while actually just being multiple params.
- """
-
- def __init__(self, evaluator, executed_params):
- self.evaluator = evaluator
- self._executed_params = executed_params
-
- def infer(self):
- with recursion.execution_allowed(self.evaluator, self) as allowed:
- # We need to catch recursions that may occur, because an
- # anonymous functions can create an anonymous parameter that is
- # more or less self referencing.
- if allowed:
- return ContextSet.from_sets(p.infer() for p in self._executed_params)
- return NO_CONTEXTS
-
-
-@debug.increase_indent
-def search_params(evaluator, execution_context, funcdef):
- """
- A dynamic search for param values. If you try to complete a type:
-
- >>> def func(foo):
- ... foo
- >>> func(1)
- >>> func("")
-
- It is not known what the type ``foo`` without analysing the whole code. You
- have to look for all calls to ``func`` to find out what ``foo`` possibly
- is.
- """
- if not settings.dynamic_params:
- return create_default_params(execution_context, funcdef)
-
- evaluator.dynamic_params_depth += 1
- try:
- path = execution_context.get_root_context().py__file__()
- if path is not None and is_stdlib_path(path):
- # We don't want to search for usages in the stdlib. Usually people
- # don't work with it (except if you are a core maintainer, sorry).
- # This makes everything slower. Just disable it and run the tests,
- # you will see the slowdown, especially in 3.6.
- return create_default_params(execution_context, funcdef)
-
- if funcdef.type == 'lambdef':
- string_name = _get_lambda_name(funcdef)
- if string_name is None:
- return create_default_params(execution_context, funcdef)
- else:
- string_name = funcdef.name.value
- debug.dbg('Dynamic param search in %s.', string_name, color='MAGENTA')
-
- try:
- module_context = execution_context.get_root_context()
- function_executions = _search_function_executions(
- evaluator,
- module_context,
- funcdef,
- string_name=string_name,
- )
- if function_executions:
- zipped_params = zip(*list(
- function_execution.get_executed_params()
- for function_execution in function_executions
- ))
- params = [DynamicExecutedParams(evaluator, executed_params) for executed_params in zipped_params]
- # Evaluate the ExecutedParams to types.
- else:
- return create_default_params(execution_context, funcdef)
- finally:
- debug.dbg('Dynamic param result finished', color='MAGENTA')
- return params
- finally:
- evaluator.dynamic_params_depth -= 1
-
-
-@evaluator_function_cache(default=None)
-@to_list
-def _search_function_executions(evaluator, module_context, funcdef, string_name):
- """
- Returns a list of param names.
- """
- compare_node = funcdef
- if string_name == '__init__':
- cls = get_parent_scope(funcdef)
- if isinstance(cls, tree.Class):
- string_name = cls.name.value
- compare_node = cls
-
- found_executions = False
- i = 0
- for for_mod_context in imports.get_modules_containing_name(
- evaluator, [module_context], string_name):
- if not isinstance(module_context, ModuleContext):
- return
- for name, trailer in _get_possible_nodes(for_mod_context, string_name):
- i += 1
-
- # This is a simple way to stop Jedi's dynamic param recursion
- # from going wild: The deeper Jedi's in the recursion, the less
- # code should be evaluated.
- if i * evaluator.dynamic_params_depth > MAX_PARAM_SEARCHES:
- return
-
- random_context = evaluator.create_context(for_mod_context, name)
- for function_execution in _check_name_for_execution(
- evaluator, random_context, compare_node, name, trailer):
- found_executions = True
- yield function_execution
-
- # If there are results after processing a module, we're probably
- # good to process. This is a speed optimization.
- if found_executions:
- return
-
-
-def _get_lambda_name(node):
- stmt = node.parent
- if stmt.type == 'expr_stmt':
- first_operator = next(stmt.yield_operators(), None)
- if first_operator == '=':
- first = stmt.children[0]
- if first.type == 'name':
- return first.value
-
- return None
-
-
-def _get_possible_nodes(module_context, func_string_name):
- try:
- names = module_context.tree_node.get_used_names()[func_string_name]
- except KeyError:
- return
-
- for name in names:
- bracket = name.get_next_leaf()
- trailer = bracket.parent
- if trailer.type == 'trailer' and bracket == '(':
- yield name, trailer
-
-
-def _check_name_for_execution(evaluator, context, compare_node, name, trailer):
- from jedi.evaluate.context.function import FunctionExecutionContext
-
- def create_func_excs():
- arglist = trailer.children[1]
- if arglist == ')':
- arglist = None
- args = TreeArguments(evaluator, context, arglist, trailer)
- if value_node.type == 'classdef':
- created_instance = instance.TreeInstance(
- evaluator,
- value.parent_context,
- value,
- args
- )
- for execution in created_instance.create_init_executions():
- yield execution
- else:
- yield value.get_function_execution(args)
-
- for value in evaluator.goto_definitions(context, name):
- value_node = value.tree_node
- if compare_node == value_node:
- for func_execution in create_func_excs():
- yield func_execution
- elif isinstance(value.parent_context, FunctionExecutionContext) and \
- compare_node.type == 'funcdef':
- # Here we're trying to find decorators by checking the first
- # parameter. It's not very generic though. Should find a better
- # solution that also applies to nested decorators.
- params = value.parent_context.get_executed_params()
- if len(params) != 1:
- continue
- values = params[0].infer()
- nodes = [v.tree_node for v in values]
- if nodes == [compare_node]:
- # Found a decorator.
- module_context = context.get_root_context()
- execution_context = next(create_func_excs())
- for name, trailer in _get_possible_nodes(module_context, params[0].string_name):
- if value_node.start_pos < name.start_pos < value_node.end_pos:
- random_context = evaluator.create_context(execution_context, name)
- iterator = _check_name_for_execution(
- evaluator,
- random_context,
- compare_node,
- name,
- trailer
- )
- for function_execution in iterator:
- yield function_execution
diff --git a/contrib/python/jedi/jedi/evaluate/filters.py b/contrib/python/jedi/jedi/evaluate/filters.py
deleted file mode 100644
index 77474e2dc8..0000000000
--- a/contrib/python/jedi/jedi/evaluate/filters.py
+++ /dev/null
@@ -1,492 +0,0 @@
-"""
-Filters are objects that you can use to filter names in different scopes. They
-are needed for name resolution.
-"""
-from abc import abstractmethod
-
-from parso.tree import search_ancestor
-
-from jedi._compatibility import use_metaclass, Parameter
-from jedi.cache import memoize_method
-from jedi.evaluate import flow_analysis
-from jedi.evaluate.base_context import ContextSet, Context
-from jedi.parser_utils import get_parent_scope
-from jedi.evaluate.utils import to_list
-
-
-class AbstractNameDefinition(object):
- start_pos = None
- string_name = None
- parent_context = None
- tree_name = None
-
- @abstractmethod
- def infer(self):
- raise NotImplementedError
-
- @abstractmethod
- def goto(self):
- # Typically names are already definitions and therefore a goto on that
- # name will always result on itself.
- return {self}
-
- def get_root_context(self):
- return self.parent_context.get_root_context()
-
- def __repr__(self):
- if self.start_pos is None:
- return '<%s: %s>' % (self.__class__.__name__, self.string_name)
- return '<%s: %s@%s>' % (self.__class__.__name__, self.string_name, self.start_pos)
-
- def is_import(self):
- return False
-
- @property
- def api_type(self):
- return self.parent_context.api_type
-
-
-class AbstractTreeName(AbstractNameDefinition):
- def __init__(self, parent_context, tree_name):
- self.parent_context = parent_context
- self.tree_name = tree_name
-
- def goto(self):
- return self.parent_context.evaluator.goto(self.parent_context, self.tree_name)
-
- def is_import(self):
- imp = search_ancestor(self.tree_name, 'import_from', 'import_name')
- return imp is not None
-
- @property
- def string_name(self):
- return self.tree_name.value
-
- @property
- def start_pos(self):
- return self.tree_name.start_pos
-
-
-class ContextNameMixin(object):
- def infer(self):
- return ContextSet(self._context)
-
- def get_root_context(self):
- if self.parent_context is None:
- return self._context
- return super(ContextNameMixin, self).get_root_context()
-
- @property
- def api_type(self):
- return self._context.api_type
-
-
-class ContextName(ContextNameMixin, AbstractTreeName):
- def __init__(self, context, tree_name):
- super(ContextName, self).__init__(context.parent_context, tree_name)
- self._context = context
-
-
-class TreeNameDefinition(AbstractTreeName):
- _API_TYPES = dict(
- import_name='module',
- import_from='module',
- funcdef='function',
- param='param',
- classdef='class',
- )
-
- def infer(self):
- # Refactor this, should probably be here.
- from jedi.evaluate.syntax_tree import tree_name_to_contexts
- return tree_name_to_contexts(self.parent_context.evaluator, self.parent_context, self.tree_name)
-
- @property
- def api_type(self):
- definition = self.tree_name.get_definition(import_name_always=True)
- if definition is None:
- return 'statement'
- return self._API_TYPES.get(definition.type, 'statement')
-
-
-class ParamName(AbstractTreeName):
- api_type = u'param'
-
- def __init__(self, parent_context, tree_name):
- self.parent_context = parent_context
- self.tree_name = tree_name
-
- def get_kind(self):
- tree_param = search_ancestor(self.tree_name, 'param')
- if tree_param.star_count == 1: # *args
- return Parameter.VAR_POSITIONAL
- if tree_param.star_count == 2: # **kwargs
- return Parameter.VAR_KEYWORD
-
- parent = tree_param.parent
- for p in parent.children:
- if p.type == 'param':
- if p.star_count:
- return Parameter.KEYWORD_ONLY
- if p == tree_param:
- break
- return Parameter.POSITIONAL_OR_KEYWORD
-
- def infer(self):
- return self.get_param().infer()
-
- def get_param(self):
- params = self.parent_context.get_executed_params()
- param_node = search_ancestor(self.tree_name, 'param')
- return params[param_node.position_index]
-
-
-class AbstractFilter(object):
- _until_position = None
-
- def _filter(self, names):
- if self._until_position is not None:
- return [n for n in names if n.start_pos < self._until_position]
- return names
-
- @abstractmethod
- def get(self, name):
- raise NotImplementedError
-
- @abstractmethod
- def values(self):
- raise NotImplementedError
-
-
-class AbstractUsedNamesFilter(AbstractFilter):
- name_class = TreeNameDefinition
-
- def __init__(self, context, parser_scope):
- self._parser_scope = parser_scope
- self._used_names = self._parser_scope.get_root_node().get_used_names()
- self.context = context
-
- def get(self, name):
- try:
- names = self._used_names[name]
- except KeyError:
- return []
-
- return self._convert_names(self._filter(names))
-
- def _convert_names(self, names):
- return [self.name_class(self.context, name) for name in names]
-
- def values(self):
- return self._convert_names(name for name_list in self._used_names.values()
- for name in self._filter(name_list))
-
- def __repr__(self):
- return '<%s: %s>' % (self.__class__.__name__, self.context)
-
-
-class ParserTreeFilter(AbstractUsedNamesFilter):
- def __init__(self, evaluator, context, node_context=None, until_position=None,
- origin_scope=None):
- """
- node_context is an option to specify a second context for use cases
- like the class mro where the parent class of a new name would be the
- context, but for some type inference it's important to have a local
- context of the other classes.
- """
- if node_context is None:
- node_context = context
- super(ParserTreeFilter, self).__init__(context, node_context.tree_node)
- self._node_context = node_context
- self._origin_scope = origin_scope
- self._until_position = until_position
-
- def _filter(self, names):
- names = super(ParserTreeFilter, self)._filter(names)
- names = [n for n in names if self._is_name_reachable(n)]
- return list(self._check_flows(names))
-
- def _is_name_reachable(self, name):
- if not name.is_definition():
- return False
- parent = name.parent
- if parent.type == 'trailer':
- return False
- base_node = parent if parent.type in ('classdef', 'funcdef') else name
- return get_parent_scope(base_node) == self._parser_scope
-
- def _check_flows(self, names):
- for name in sorted(names, key=lambda name: name.start_pos, reverse=True):
- check = flow_analysis.reachability_check(
- context=self._node_context,
- context_scope=self._parser_scope,
- node=name,
- origin_scope=self._origin_scope
- )
- if check is not flow_analysis.UNREACHABLE:
- yield name
-
- if check is flow_analysis.REACHABLE:
- break
-
-
-class FunctionExecutionFilter(ParserTreeFilter):
- param_name = ParamName
-
- def __init__(self, evaluator, context, node_context=None,
- until_position=None, origin_scope=None):
- super(FunctionExecutionFilter, self).__init__(
- evaluator,
- context,
- node_context,
- until_position,
- origin_scope
- )
-
- @to_list
- def _convert_names(self, names):
- for name in names:
- param = search_ancestor(name, 'param')
- if param:
- yield self.param_name(self.context, name)
- else:
- yield TreeNameDefinition(self.context, name)
-
-
-class GlobalNameFilter(AbstractUsedNamesFilter):
- def __init__(self, context, parser_scope):
- super(GlobalNameFilter, self).__init__(context, parser_scope)
-
- @to_list
- def _filter(self, names):
- for name in names:
- if name.parent.type == 'global_stmt':
- yield name
-
-
-class DictFilter(AbstractFilter):
- def __init__(self, dct):
- self._dct = dct
-
- def get(self, name):
- try:
- value = self._convert(name, self._dct[name])
- except KeyError:
- return []
- else:
- return list(self._filter([value]))
-
- def values(self):
- def yielder():
- for item in self._dct.items():
- try:
- yield self._convert(*item)
- except KeyError:
- pass
- return self._filter(yielder())
-
- def _convert(self, name, value):
- return value
-
-
-class MergedFilter(object):
- def __init__(self, *filters):
- self._filters = filters
-
- def get(self, name):
- return [n for filter in self._filters for n in filter.get(name)]
-
- def values(self):
- return [n for filter in self._filters for n in filter.values()]
-
- def __repr__(self):
- return '%s(%s)' % (self.__class__.__name__, ', '.join(str(f) for f in self._filters))
-
-
-class _BuiltinMappedMethod(Context):
- """``Generator.__next__`` ``dict.values`` methods and so on."""
- api_type = u'function'
-
- def __init__(self, builtin_context, method, builtin_func):
- super(_BuiltinMappedMethod, self).__init__(
- builtin_context.evaluator,
- parent_context=builtin_context
- )
- self._method = method
- self._builtin_func = builtin_func
-
- def py__call__(self, params):
- # TODO add TypeError if params are given/or not correct.
- return self._method(self.parent_context)
-
- def __getattr__(self, name):
- return getattr(self._builtin_func, name)
-
-
-class SpecialMethodFilter(DictFilter):
- """
- A filter for methods that are defined in this module on the corresponding
- classes like Generator (for __next__, etc).
- """
- class SpecialMethodName(AbstractNameDefinition):
- api_type = u'function'
-
- def __init__(self, parent_context, string_name, value, builtin_context):
- callable_, python_version = value
- if python_version is not None and \
- python_version != parent_context.evaluator.environment.version_info.major:
- raise KeyError
-
- self.parent_context = parent_context
- self.string_name = string_name
- self._callable = callable_
- self._builtin_context = builtin_context
-
- def infer(self):
- for filter in self._builtin_context.get_filters():
- # We can take the first index, because on builtin methods there's
- # always only going to be one name. The same is true for the
- # inferred values.
- for name in filter.get(self.string_name):
- builtin_func = next(iter(name.infer()))
- break
- else:
- continue
- break
- return ContextSet(
- _BuiltinMappedMethod(self.parent_context, self._callable, builtin_func)
- )
-
- def __init__(self, context, dct, builtin_context):
- super(SpecialMethodFilter, self).__init__(dct)
- self.context = context
- self._builtin_context = builtin_context
- """
- This context is what will be used to introspect the name, where as the
- other context will be used to execute the function.
-
- We distinguish, because we have to.
- """
-
- def _convert(self, name, value):
- return self.SpecialMethodName(self.context, name, value, self._builtin_context)
-
-
-class _OverwriteMeta(type):
- def __init__(cls, name, bases, dct):
- super(_OverwriteMeta, cls).__init__(name, bases, dct)
-
- base_dct = {}
- for base_cls in reversed(cls.__bases__):
- try:
- base_dct.update(base_cls.overwritten_methods)
- except AttributeError:
- pass
-
- for func in cls.__dict__.values():
- try:
- base_dct.update(func.registered_overwritten_methods)
- except AttributeError:
- pass
- cls.overwritten_methods = base_dct
-
-
-class AbstractObjectOverwrite(use_metaclass(_OverwriteMeta, object)):
- def get_object(self):
- raise NotImplementedError
-
- def get_filters(self, search_global, *args, **kwargs):
- yield SpecialMethodFilter(self, self.overwritten_methods, self.get_object())
-
- for filter in self.get_object().get_filters(search_global):
- yield filter
-
-
-class BuiltinOverwrite(Context, AbstractObjectOverwrite):
- special_object_identifier = None
-
- def __init__(self, evaluator):
- super(BuiltinOverwrite, self).__init__(evaluator, evaluator.builtins_module)
-
- @memoize_method
- def get_object(self):
- from jedi.evaluate import compiled
- assert self.special_object_identifier
- return compiled.get_special_object(self.evaluator, self.special_object_identifier)
-
- def py__class__(self):
- return self.get_object().py__class__()
-
-
-def publish_method(method_name, python_version_match=None):
- def decorator(func):
- dct = func.__dict__.setdefault('registered_overwritten_methods', {})
- dct[method_name] = func, python_version_match
- return func
- return decorator
-
-
-def get_global_filters(evaluator, context, until_position, origin_scope):
- """
- Returns all filters in order of priority for name resolution.
-
- For global name lookups. The filters will handle name resolution
- themselves, but here we gather possible filters downwards.
-
- >>> from jedi._compatibility import u, no_unicode_pprint
- >>> from jedi import Script
- >>> script = Script(u('''
- ... x = ['a', 'b', 'c']
- ... def func():
- ... y = None
- ... '''))
- >>> module_node = script._module_node
- >>> scope = next(module_node.iter_funcdefs())
- >>> scope
- <Function: func@3-5>
- >>> context = script._get_module().create_context(scope)
- >>> filters = list(get_global_filters(context.evaluator, context, (4, 0), None))
-
- First we get the names from the function scope.
-
- >>> no_unicode_pprint(filters[0]) #doctest: +ELLIPSIS
- MergedFilter(<ParserTreeFilter: ...>, <GlobalNameFilter: ...>)
- >>> sorted(str(n) for n in filters[0].values())
- ['<TreeNameDefinition: func@(3, 4)>', '<TreeNameDefinition: x@(2, 0)>']
- >>> filters[0]._filters[0]._until_position
- (4, 0)
- >>> filters[0]._filters[1]._until_position
-
- Then it yields the names from one level "lower". In this example, this is
- the module scope (including globals).
- As a side note, you can see, that the position in the filter is None on the
- globals filter, because there the whole module is searched.
-
- >>> list(filters[1].values()) # package modules -> Also empty.
- []
- >>> sorted(name.string_name for name in filters[2].values()) # Module attributes
- ['__doc__', '__file__', '__name__', '__package__']
-
- Finally, it yields the builtin filter, if `include_builtin` is
- true (default).
-
- >>> filters[3].values() #doctest: +ELLIPSIS
- [<CompiledName: ...>, ...]
- """
- from jedi.evaluate.context.function import FunctionExecutionContext
- while context is not None:
- # Names in methods cannot be resolved within the class.
- for filter in context.get_filters(
- search_global=True,
- until_position=until_position,
- origin_scope=origin_scope):
- yield filter
- if isinstance(context, FunctionExecutionContext):
- # The position should be reset if the current scope is a function.
- until_position = None
-
- context = context.parent_context
-
- # Add builtins to the global scope.
- for filter in evaluator.builtins_module.get_filters(search_global=True):
- yield filter
diff --git a/contrib/python/jedi/jedi/evaluate/finder.py b/contrib/python/jedi/jedi/evaluate/finder.py
deleted file mode 100644
index 5e7043f796..0000000000
--- a/contrib/python/jedi/jedi/evaluate/finder.py
+++ /dev/null
@@ -1,278 +0,0 @@
-"""
-Searching for names with given scope and name. This is very central in Jedi and
-Python. The name resolution is quite complicated with descripter,
-``__getattribute__``, ``__getattr__``, ``global``, etc.
-
-If you want to understand name resolution, please read the first few chapters
-in http://blog.ionelmc.ro/2015/02/09/understanding-python-metaclasses/.
-
-Flow checks
-+++++++++++
-
-Flow checks are not really mature. There's only a check for ``isinstance``. It
-would check whether a flow has the form of ``if isinstance(a, type_or_tuple)``.
-Unfortunately every other thing is being ignored (e.g. a == '' would be easy to
-check for -> a is a string). There's big potential in these checks.
-"""
-
-from parso.python import tree
-from parso.tree import search_ancestor
-from jedi import debug
-from jedi import settings
-from jedi.evaluate.context import AbstractInstanceContext
-from jedi.evaluate import compiled
-from jedi.evaluate import analysis
-from jedi.evaluate import flow_analysis
-from jedi.evaluate.arguments import TreeArguments
-from jedi.evaluate import helpers
-from jedi.evaluate.context import iterable
-from jedi.evaluate.filters import get_global_filters, TreeNameDefinition
-from jedi.evaluate.base_context import ContextSet
-from jedi.parser_utils import is_scope, get_parent_scope
-
-
-class NameFinder(object):
- def __init__(self, evaluator, context, name_context, name_or_str,
- position=None, analysis_errors=True):
- self._evaluator = evaluator
- # Make sure that it's not just a syntax tree node.
- self._context = context
- self._name_context = name_context
- self._name = name_or_str
- if isinstance(name_or_str, tree.Name):
- self._string_name = name_or_str.value
- else:
- self._string_name = name_or_str
- self._position = position
- self._found_predefined_types = None
- self._analysis_errors = analysis_errors
-
- @debug.increase_indent
- def find(self, filters, attribute_lookup):
- """
- :params bool attribute_lookup: Tell to logic if we're accessing the
- attribute or the contents of e.g. a function.
- """
- names = self.filter_name(filters)
- if self._found_predefined_types is not None and names:
- check = flow_analysis.reachability_check(
- context=self._context,
- context_scope=self._context.tree_node,
- node=self._name,
- )
- if check is flow_analysis.UNREACHABLE:
- return ContextSet()
- return self._found_predefined_types
-
- types = self._names_to_types(names, attribute_lookup)
-
- if not names and self._analysis_errors and not types \
- and not (isinstance(self._name, tree.Name) and
- isinstance(self._name.parent.parent, tree.Param)):
- if isinstance(self._name, tree.Name):
- if attribute_lookup:
- analysis.add_attribute_error(
- self._name_context, self._context, self._name)
- else:
- message = ("NameError: name '%s' is not defined."
- % self._string_name)
- analysis.add(self._name_context, 'name-error', self._name, message)
-
- return types
-
- def _get_origin_scope(self):
- if isinstance(self._name, tree.Name):
- scope = self._name
- while scope.parent is not None:
- # TODO why if classes?
- if not isinstance(scope, tree.Scope):
- break
- scope = scope.parent
- return scope
- else:
- return None
-
- def get_filters(self, search_global=False):
- origin_scope = self._get_origin_scope()
- if search_global:
- position = self._position
-
- # For functions and classes the defaults don't belong to the
- # function and get evaluated in the context before the function. So
- # make sure to exclude the function/class name.
- if origin_scope is not None:
- ancestor = search_ancestor(origin_scope, 'funcdef', 'classdef', 'lambdef')
- lambdef = None
- if ancestor == 'lambdef':
- # For lambdas it's even more complicated since parts will
- # be evaluated later.
- lambdef = ancestor
- ancestor = search_ancestor(origin_scope, 'funcdef', 'classdef')
- if ancestor is not None:
- colon = ancestor.children[-2]
- if position < colon.start_pos:
- if lambdef is None or position < lambdef.children[-2].start_pos:
- position = ancestor.start_pos
-
- return get_global_filters(self._evaluator, self._context, position, origin_scope)
- else:
- return self._context.get_filters(search_global, self._position, origin_scope=origin_scope)
-
- def filter_name(self, filters):
- """
- Searches names that are defined in a scope (the different
- ``filters``), until a name fits.
- """
- names = []
- if self._context.predefined_names and isinstance(self._name, tree.Name):
- node = self._name
- while node is not None and not is_scope(node):
- node = node.parent
- if node.type in ("if_stmt", "for_stmt", "comp_for"):
- try:
- name_dict = self._context.predefined_names[node]
- types = name_dict[self._string_name]
- except KeyError:
- continue
- else:
- self._found_predefined_types = types
- break
-
- for filter in filters:
- names = filter.get(self._string_name)
- if names:
- if len(names) == 1:
- n, = names
- if isinstance(n, TreeNameDefinition):
- # Something somewhere went terribly wrong. This
- # typically happens when using goto on an import in an
- # __init__ file. I think we need a better solution, but
- # it's kind of hard, because for Jedi it's not clear
- # that that name has not been defined, yet.
- if n.tree_name == self._name:
- if self._name.get_definition().type == 'import_from':
- continue
- break
-
- debug.dbg('finder.filter_name %s in (%s): %s@%s',
- self._string_name, self._context, names, self._position)
- return list(names)
-
- def _check_getattr(self, inst):
- """Checks for both __getattr__ and __getattribute__ methods"""
- # str is important, because it shouldn't be `Name`!
- name = compiled.create_simple_object(self._evaluator, self._string_name)
-
- # This is a little bit special. `__getattribute__` is in Python
- # executed before `__getattr__`. But: I know no use case, where
- # this could be practical and where Jedi would return wrong types.
- # If you ever find something, let me know!
- # We are inversing this, because a hand-crafted `__getattribute__`
- # could still call another hand-crafted `__getattr__`, but not the
- # other way around.
- names = (inst.get_function_slot_names(u'__getattr__') or
- inst.get_function_slot_names(u'__getattribute__'))
- return inst.execute_function_slots(names, name)
-
- def _names_to_types(self, names, attribute_lookup):
- contexts = ContextSet.from_sets(name.infer() for name in names)
-
- debug.dbg('finder._names_to_types: %s -> %s', names, contexts)
- if not names and isinstance(self._context, AbstractInstanceContext):
- # handling __getattr__ / __getattribute__
- return self._check_getattr(self._context)
-
- # Add isinstance and other if/assert knowledge.
- if not contexts and isinstance(self._name, tree.Name) and \
- not isinstance(self._name_context, AbstractInstanceContext):
- flow_scope = self._name
- base_node = self._name_context.tree_node
- if base_node.type == 'comp_for':
- return contexts
- while True:
- flow_scope = get_parent_scope(flow_scope, include_flows=True)
- n = _check_flow_information(self._name_context, flow_scope,
- self._name, self._position)
- if n is not None:
- return n
- if flow_scope == base_node:
- break
- return contexts
-
-
-def _check_flow_information(context, flow, search_name, pos):
- """ Try to find out the type of a variable just with the information that
- is given by the flows: e.g. It is also responsible for assert checks.::
-
- if isinstance(k, str):
- k. # <- completion here
-
- ensures that `k` is a string.
- """
- if not settings.dynamic_flow_information:
- return None
-
- result = None
- if is_scope(flow):
- # Check for asserts.
- module_node = flow.get_root_node()
- try:
- names = module_node.get_used_names()[search_name.value]
- except KeyError:
- return None
- names = reversed([
- n for n in names
- if flow.start_pos <= n.start_pos < (pos or flow.end_pos)
- ])
-
- for name in names:
- ass = search_ancestor(name, 'assert_stmt')
- if ass is not None:
- result = _check_isinstance_type(context, ass.assertion, search_name)
- if result is not None:
- return result
-
- if flow.type in ('if_stmt', 'while_stmt'):
- potential_ifs = [c for c in flow.children[1::4] if c != ':']
- for if_test in reversed(potential_ifs):
- if search_name.start_pos > if_test.end_pos:
- return _check_isinstance_type(context, if_test, search_name)
- return result
-
-
-def _check_isinstance_type(context, element, search_name):
- try:
- assert element.type in ('power', 'atom_expr')
- # this might be removed if we analyze and, etc
- assert len(element.children) == 2
- first, trailer = element.children
- assert first.type == 'name' and first.value == 'isinstance'
- assert trailer.type == 'trailer' and trailer.children[0] == '('
- assert len(trailer.children) == 3
-
- # arglist stuff
- arglist = trailer.children[1]
- args = TreeArguments(context.evaluator, context, arglist, trailer)
- param_list = list(args.unpack())
- # Disallow keyword arguments
- assert len(param_list) == 2
- (key1, lazy_context_object), (key2, lazy_context_cls) = param_list
- assert key1 is None and key2 is None
- call = helpers.call_of_leaf(search_name)
- is_instance_call = helpers.call_of_leaf(lazy_context_object.data)
- # Do a simple get_code comparison. They should just have the same code,
- # and everything will be all right.
- normalize = context.evaluator.grammar._normalize
- assert normalize(is_instance_call) == normalize(call)
- except AssertionError:
- return None
-
- context_set = ContextSet()
- for cls_or_tup in lazy_context_cls.infer():
- if isinstance(cls_or_tup, iterable.Sequence) and cls_or_tup.array_type == 'tuple':
- for lazy_context in cls_or_tup.py__iter__():
- for context in lazy_context.infer():
- context_set |= context.execute_evaluated()
- else:
- context_set |= cls_or_tup.execute_evaluated()
- return context_set
diff --git a/contrib/python/jedi/jedi/evaluate/flow_analysis.py b/contrib/python/jedi/jedi/evaluate/flow_analysis.py
deleted file mode 100644
index 474071f14c..0000000000
--- a/contrib/python/jedi/jedi/evaluate/flow_analysis.py
+++ /dev/null
@@ -1,118 +0,0 @@
-from jedi.parser_utils import get_flow_branch_keyword, is_scope, get_parent_scope
-from jedi.evaluate.recursion import execution_allowed
-
-
-class Status(object):
- lookup_table = {}
-
- def __init__(self, value, name):
- self._value = value
- self._name = name
- Status.lookup_table[value] = self
-
- def invert(self):
- if self is REACHABLE:
- return UNREACHABLE
- elif self is UNREACHABLE:
- return REACHABLE
- else:
- return UNSURE
-
- def __and__(self, other):
- if UNSURE in (self, other):
- return UNSURE
- else:
- return REACHABLE if self._value and other._value else UNREACHABLE
-
- def __repr__(self):
- return '<%s: %s>' % (type(self).__name__, self._name)
-
-
-REACHABLE = Status(True, 'reachable')
-UNREACHABLE = Status(False, 'unreachable')
-UNSURE = Status(None, 'unsure')
-
-
-def _get_flow_scopes(node):
- while True:
- node = get_parent_scope(node, include_flows=True)
- if node is None or is_scope(node):
- return
- yield node
-
-
-def reachability_check(context, context_scope, node, origin_scope=None):
- first_flow_scope = get_parent_scope(node, include_flows=True)
- if origin_scope is not None:
- origin_flow_scopes = list(_get_flow_scopes(origin_scope))
- node_flow_scopes = list(_get_flow_scopes(node))
-
- branch_matches = True
- for flow_scope in origin_flow_scopes:
- if flow_scope in node_flow_scopes:
- node_keyword = get_flow_branch_keyword(flow_scope, node)
- origin_keyword = get_flow_branch_keyword(flow_scope, origin_scope)
- branch_matches = node_keyword == origin_keyword
- if flow_scope.type == 'if_stmt':
- if not branch_matches:
- return UNREACHABLE
- elif flow_scope.type == 'try_stmt':
- if not branch_matches and origin_keyword == 'else' \
- and node_keyword == 'except':
- return UNREACHABLE
- if branch_matches:
- break
-
- # Direct parents get resolved, we filter scopes that are separate
- # branches. This makes sense for autocompletion and static analysis.
- # For actual Python it doesn't matter, because we're talking about
- # potentially unreachable code.
- # e.g. `if 0:` would cause all name lookup within the flow make
- # unaccessible. This is not a "problem" in Python, because the code is
- # never called. In Jedi though, we still want to infer types.
- while origin_scope is not None:
- if first_flow_scope == origin_scope and branch_matches:
- return REACHABLE
- origin_scope = origin_scope.parent
-
- return _break_check(context, context_scope, first_flow_scope, node)
-
-
-def _break_check(context, context_scope, flow_scope, node):
- reachable = REACHABLE
- if flow_scope.type == 'if_stmt':
- if flow_scope.is_node_after_else(node):
- for check_node in flow_scope.get_test_nodes():
- reachable = _check_if(context, check_node)
- if reachable in (REACHABLE, UNSURE):
- break
- reachable = reachable.invert()
- else:
- flow_node = flow_scope.get_corresponding_test_node(node)
- if flow_node is not None:
- reachable = _check_if(context, flow_node)
- elif flow_scope.type in ('try_stmt', 'while_stmt'):
- return UNSURE
-
- # Only reachable branches need to be examined further.
- if reachable in (UNREACHABLE, UNSURE):
- return reachable
-
- if context_scope != flow_scope and context_scope != flow_scope.parent:
- flow_scope = get_parent_scope(flow_scope, include_flows=True)
- return reachable & _break_check(context, context_scope, flow_scope, node)
- else:
- return reachable
-
-
-def _check_if(context, node):
- with execution_allowed(context.evaluator, node) as allowed:
- if not allowed:
- return UNSURE
-
- types = context.eval_node(node)
- values = set(x.py__bool__() for x in types)
- if len(values) == 1:
- return Status.lookup_table[values.pop()]
- else:
- return UNSURE
diff --git a/contrib/python/jedi/jedi/evaluate/helpers.py b/contrib/python/jedi/jedi/evaluate/helpers.py
deleted file mode 100644
index c94a1fbe55..0000000000
--- a/contrib/python/jedi/jedi/evaluate/helpers.py
+++ /dev/null
@@ -1,239 +0,0 @@
-import copy
-import sys
-import re
-import os
-from itertools import chain
-from contextlib import contextmanager
-
-from parso.python import tree
-
-from jedi._compatibility import unicode
-from jedi.parser_utils import get_parent_scope
-
-
-def is_stdlib_path(path):
- # Python standard library paths look like this:
- # /usr/lib/python3.5/...
- # TODO The implementation below is probably incorrect and not complete.
- if 'dist-packages' in path or 'site-packages' in path:
- return False
-
- base_path = os.path.join(sys.prefix, 'lib', 'python')
- return bool(re.match(re.escape(base_path) + r'\d.\d', path))
-
-
-def deep_ast_copy(obj):
- """
- Much, much faster than copy.deepcopy, but just for parser tree nodes.
- """
- # If it's already in the cache, just return it.
- new_obj = copy.copy(obj)
-
- # Copy children
- new_children = []
- for child in obj.children:
- if isinstance(child, tree.Leaf):
- new_child = copy.copy(child)
- new_child.parent = new_obj
- else:
- new_child = deep_ast_copy(child)
- new_child.parent = new_obj
- new_children.append(new_child)
- new_obj.children = new_children
-
- return new_obj
-
-
-def evaluate_call_of_leaf(context, leaf, cut_own_trailer=False):
- """
- Creates a "call" node that consist of all ``trailer`` and ``power``
- objects. E.g. if you call it with ``append``::
-
- list([]).append(3) or None
-
- You would get a node with the content ``list([]).append`` back.
-
- This generates a copy of the original ast node.
-
- If you're using the leaf, e.g. the bracket `)` it will return ``list([])``.
-
- We use this function for two purposes. Given an expression ``bar.foo``,
- we may want to
- - infer the type of ``foo`` to offer completions after foo
- - infer the type of ``bar`` to be able to jump to the definition of foo
- The option ``cut_own_trailer`` must be set to true for the second purpose.
- """
- trailer = leaf.parent
- if trailer.type == 'fstring':
- from jedi.evaluate import compiled
- return compiled.get_string_context_set(context.evaluator)
-
- # The leaf may not be the last or first child, because there exist three
- # different trailers: `( x )`, `[ x ]` and `.x`. In the first two examples
- # we should not match anything more than x.
- if trailer.type != 'trailer' or leaf not in (trailer.children[0], trailer.children[-1]):
- if trailer.type == 'atom':
- return context.eval_node(trailer)
- return context.eval_node(leaf)
-
- power = trailer.parent
- index = power.children.index(trailer)
- if cut_own_trailer:
- cut = index
- else:
- cut = index + 1
-
- if power.type == 'error_node':
- start = index
- while True:
- start -= 1
- base = power.children[start]
- if base.type != 'trailer':
- break
- trailers = power.children[start + 1: index + 1]
- else:
- base = power.children[0]
- trailers = power.children[1:cut]
-
- if base == 'await':
- base = trailers[0]
- trailers = trailers[1:]
-
- values = context.eval_node(base)
- from jedi.evaluate.syntax_tree import eval_trailer
- for trailer in trailers:
- values = eval_trailer(context, values, trailer)
- return values
-
-
-def call_of_leaf(leaf):
- """
- Creates a "call" node that consist of all ``trailer`` and ``power``
- objects. E.g. if you call it with ``append``::
-
- list([]).append(3) or None
-
- You would get a node with the content ``list([]).append`` back.
-
- This generates a copy of the original ast node.
-
- If you're using the leaf, e.g. the bracket `)` it will return ``list([])``.
- """
- # TODO this is the old version of this call. Try to remove it.
- trailer = leaf.parent
- # The leaf may not be the last or first child, because there exist three
- # different trailers: `( x )`, `[ x ]` and `.x`. In the first two examples
- # we should not match anything more than x.
- if trailer.type != 'trailer' or leaf not in (trailer.children[0], trailer.children[-1]):
- if trailer.type == 'atom':
- return trailer
- return leaf
-
- power = trailer.parent
- index = power.children.index(trailer)
-
- new_power = copy.copy(power)
- new_power.children = list(new_power.children)
- new_power.children[index + 1:] = []
-
- if power.type == 'error_node':
- start = index
- while True:
- start -= 1
- if power.children[start].type != 'trailer':
- break
- transformed = tree.Node('power', power.children[start:])
- transformed.parent = power.parent
- return transformed
-
- return power
-
-
-def get_names_of_node(node):
- try:
- children = node.children
- except AttributeError:
- if node.type == 'name':
- return [node]
- else:
- return []
- else:
- return list(chain.from_iterable(get_names_of_node(c) for c in children))
-
-
-def get_module_names(module, all_scopes):
- """
- Returns a dictionary with name parts as keys and their call paths as
- values.
- """
- names = chain.from_iterable(module.get_used_names().values())
- if not all_scopes:
- # We have to filter all the names that don't have the module as a
- # parent_scope. There's None as a parent, because nodes in the module
- # node have the parent module and not suite as all the others.
- # Therefore it's important to catch that case.
- names = [n for n in names if get_parent_scope(n).parent in (module, None)]
- return names
-
-
-@contextmanager
-def predefine_names(context, flow_scope, dct):
- predefined = context.predefined_names
- predefined[flow_scope] = dct
- try:
- yield
- finally:
- del predefined[flow_scope]
-
-
-def is_compiled(context):
- from jedi.evaluate.compiled import CompiledObject
- return isinstance(context, CompiledObject)
-
-
-def is_string(context):
- if context.evaluator.environment.version_info.major == 2:
- str_classes = (unicode, bytes)
- else:
- str_classes = (unicode,)
- return is_compiled(context) and isinstance(context.get_safe_value(default=None), str_classes)
-
-
-def is_literal(context):
- return is_number(context) or is_string(context)
-
-
-def _get_safe_value_or_none(context, accept):
- if is_compiled(context):
- value = context.get_safe_value(default=None)
- if isinstance(value, accept):
- return value
-
-
-def get_int_or_none(context):
- return _get_safe_value_or_none(context, int)
-
-
-def is_number(context):
- return _get_safe_value_or_none(context, (int, float)) is not None
-
-
-class EvaluatorTypeError(Exception):
- pass
-
-
-class EvaluatorIndexError(Exception):
- pass
-
-
-class EvaluatorKeyError(Exception):
- pass
-
-
-@contextmanager
-def reraise_as_evaluator(*exception_classes):
- try:
- yield
- except exception_classes as e:
- new_exc_cls = globals()['Evaluator' + e.__class__.__name__]
- raise new_exc_cls(e)
diff --git a/contrib/python/jedi/jedi/evaluate/imports.py b/contrib/python/jedi/jedi/evaluate/imports.py
deleted file mode 100644
index 3b1df31a44..0000000000
--- a/contrib/python/jedi/jedi/evaluate/imports.py
+++ /dev/null
@@ -1,594 +0,0 @@
-"""
-:mod:`jedi.evaluate.imports` is here to resolve import statements and return
-the modules/classes/functions/whatever, which they stand for. However there's
-not any actual importing done. This module is about finding modules in the
-filesystem. This can be quite tricky sometimes, because Python imports are not
-always that simple.
-
-This module uses imp for python up to 3.2 and importlib for python 3.3 on; the
-correct implementation is delegated to _compatibility.
-
-This module also supports import autocompletion, which means to complete
-statements like ``from datetim`` (cursor at the end would return ``datetime``).
-"""
-import os
-
-from parso.python import tree
-from parso.tree import search_ancestor
-from parso import python_bytes_to_unicode
-
-from jedi._compatibility import (FileNotFoundError, ImplicitNSInfo,
- force_unicode, unicode)
-from jedi import debug
-from jedi import settings
-from jedi.parser_utils import get_cached_code_lines
-from jedi.evaluate import sys_path
-from jedi.evaluate import helpers
-from jedi.evaluate import compiled
-from jedi.evaluate import analysis
-from jedi.evaluate.utils import unite
-from jedi.evaluate.cache import evaluator_method_cache
-from jedi.evaluate.filters import AbstractNameDefinition
-from jedi.evaluate.base_context import ContextSet, NO_CONTEXTS
-
-
-class ModuleCache(object):
- def __init__(self):
- self._path_cache = {}
- self._name_cache = {}
-
- def add(self, module, name):
- path = module.py__file__()
- self._path_cache[path] = module
- self._name_cache[name] = module
-
- def iterate_modules_with_names(self):
- return self._name_cache.items()
-
- def get(self, name):
- return self._name_cache[name]
-
- def get_from_path(self, path):
- return self._path_cache[path]
-
-
-# This memoization is needed, because otherwise we will infinitely loop on
-# certain imports.
-@evaluator_method_cache(default=NO_CONTEXTS)
-def infer_import(context, tree_name, is_goto=False):
- module_context = context.get_root_context()
- import_node = search_ancestor(tree_name, 'import_name', 'import_from')
- import_path = import_node.get_path_for_name(tree_name)
- from_import_name = None
- evaluator = context.evaluator
- try:
- from_names = import_node.get_from_names()
- except AttributeError:
- # Is an import_name
- pass
- else:
- if len(from_names) + 1 == len(import_path):
- # We have to fetch the from_names part first and then check
- # if from_names exists in the modules.
- from_import_name = import_path[-1]
- import_path = from_names
-
- importer = Importer(evaluator, tuple(import_path),
- module_context, import_node.level)
-
- types = importer.follow()
-
- #if import_node.is_nested() and not self.nested_resolve:
- # scopes = [NestedImportModule(module, import_node)]
-
- if not types:
- return NO_CONTEXTS
-
- if from_import_name is not None:
- types = unite(
- t.py__getattribute__(
- from_import_name,
- name_context=context,
- is_goto=is_goto,
- analysis_errors=False
- )
- for t in types
- )
- if not is_goto:
- types = ContextSet.from_set(types)
-
- if not types:
- path = import_path + [from_import_name]
- importer = Importer(evaluator, tuple(path),
- module_context, import_node.level)
- types = importer.follow()
- # goto only accepts `Name`
- if is_goto:
- types = set(s.name for s in types)
- else:
- # goto only accepts `Name`
- if is_goto:
- types = set(s.name for s in types)
-
- debug.dbg('after import: %s', types)
- return types
-
-
-class NestedImportModule(tree.Module):
- """
- TODO while there's no use case for nested import module right now, we might
- be able to use them for static analysis checks later on.
- """
- def __init__(self, module, nested_import):
- self._module = module
- self._nested_import = nested_import
-
- def _get_nested_import_name(self):
- """
- Generates an Import statement, that can be used to fake nested imports.
- """
- i = self._nested_import
- # This is not an existing Import statement. Therefore, set position to
- # 0 (0 is not a valid line number).
- zero = (0, 0)
- names = [unicode(name) for name in i.namespace_names[1:]]
- name = helpers.FakeName(names, self._nested_import)
- new = tree.Import(i._sub_module, zero, zero, name)
- new.parent = self._module
- debug.dbg('Generated a nested import: %s', new)
- return helpers.FakeName(str(i.namespace_names[1]), new)
-
- def __getattr__(self, name):
- return getattr(self._module, name)
-
- def __repr__(self):
- return "<%s: %s of %s>" % (self.__class__.__name__, self._module,
- self._nested_import)
-
-
-def _add_error(context, name, message=None):
- # Should be a name, not a string!
- if message is None:
- name_str = str(name.value) if isinstance(name, tree.Name) else name
- message = 'No module named ' + name_str
- if hasattr(name, 'parent'):
- analysis.add(context, 'import-error', name, message)
- else:
- debug.warning('ImportError without origin: ' + message)
-
-
-class ImportName(AbstractNameDefinition):
- start_pos = (1, 0)
- _level = 0
-
- def __init__(self, parent_context, string_name):
- self.parent_context = parent_context
- self.string_name = string_name
-
- def infer(self):
- return Importer(
- self.parent_context.evaluator,
- [self.string_name],
- self.parent_context,
- level=self._level,
- ).follow()
-
- def goto(self):
- return [m.name for m in self.infer()]
-
- def get_root_context(self):
- # Not sure if this is correct.
- return self.parent_context.get_root_context()
-
- @property
- def api_type(self):
- return 'module'
-
-
-class SubModuleName(ImportName):
- _level = 1
-
-
-class Importer(object):
- def __init__(self, evaluator, import_path, module_context, level=0):
- """
- An implementation similar to ``__import__``. Use `follow`
- to actually follow the imports.
-
- *level* specifies whether to use absolute or relative imports. 0 (the
- default) means only perform absolute imports. Positive values for level
- indicate the number of parent directories to search relative to the
- directory of the module calling ``__import__()`` (see PEP 328 for the
- details).
-
- :param import_path: List of namespaces (strings or Names).
- """
- debug.speed('import %s' % (import_path,))
- self._evaluator = evaluator
- self.level = level
- self.module_context = module_context
- try:
- self.file_path = module_context.py__file__()
- except AttributeError:
- # Can be None for certain compiled modules like 'builtins'.
- self.file_path = None
-
- if level:
- base = module_context.py__package__().split('.')
- if base == [''] or base == ['__main__']:
- base = []
- if level > len(base):
- path = module_context.py__file__()
- if path is not None:
- import_path = list(import_path)
- p = path
- for i in range(level):
- p = os.path.dirname(p)
- dir_name = os.path.basename(p)
- # This is not the proper way to do relative imports. However, since
- # Jedi cannot be sure about the entry point, we just calculate an
- # absolute path here.
- if dir_name:
- # TODO those sys.modules modifications are getting
- # really stupid. this is the 3rd time that we're using
- # this. We should probably refactor.
- if path.endswith(os.path.sep + 'os.py'):
- import_path.insert(0, 'os')
- else:
- import_path.insert(0, dir_name)
- else:
- _add_error(
- module_context, import_path[-1],
- message='Attempted relative import beyond top-level package.'
- )
- import_path = []
- # If no path is defined in the module we have no ideas where we
- # are in the file system. Therefore we cannot know what to do.
- # In this case we just let the path there and ignore that it's
- # a relative path. Not sure if that's a good idea.
- else:
- # Here we basically rewrite the level to 0.
- base = tuple(base)
- if level > 1:
- base = base[:-level + 1]
-
- import_path = base + tuple(import_path)
- self.import_path = import_path
-
- @property
- def str_import_path(self):
- """Returns the import path as pure strings instead of `Name`."""
- return tuple(
- name.value if isinstance(name, tree.Name) else name
- for name in self.import_path
- )
-
- def sys_path_with_modifications(self):
-
- sys_path_mod = (
- self._evaluator.get_sys_path()
- + sys_path.check_sys_path_modifications(self.module_context)
- )
-
- if self.import_path and self.file_path is not None \
- and self._evaluator.environment.version_info.major == 2:
- # Python2 uses an old strange way of importing relative imports.
- sys_path_mod.append(force_unicode(os.path.dirname(self.file_path)))
-
- return sys_path_mod
-
- def follow(self):
- if not self.import_path or not self._evaluator.infer_enabled:
- return NO_CONTEXTS
-
- return self._do_import(self.import_path, self.sys_path_with_modifications())
-
- def _do_import(self, import_path, sys_path):
- """
- This method is very similar to importlib's `_gcd_import`.
- """
- import_parts = [
- force_unicode(i.value if isinstance(i, tree.Name) else i)
- for i in import_path
- ]
-
- # Handle "magic" Flask extension imports:
- # ``flask.ext.foo`` is really ``flask_foo`` or ``flaskext.foo``.
- if len(import_path) > 2 and import_parts[:2] == ['flask', 'ext']:
- # New style.
- ipath = ('flask_' + str(import_parts[2]),) + import_path[3:]
- modules = self._do_import(ipath, sys_path)
- if modules:
- return modules
- else:
- # Old style
- return self._do_import(('flaskext',) + import_path[2:], sys_path)
-
- if import_parts[0] in settings.auto_import_modules:
- module = _load_module(
- self._evaluator,
- import_names=import_parts,
- sys_path=sys_path,
- )
- return ContextSet(module)
-
- module_name = '.'.join(import_parts)
- try:
- return ContextSet(self._evaluator.module_cache.get(module_name))
- except KeyError:
- pass
-
- if len(import_path) > 1:
- # This is a recursive way of importing that works great with
- # the module cache.
- bases = self._do_import(import_path[:-1], sys_path)
- if not bases:
- return NO_CONTEXTS
- # We can take the first element, because only the os special
- # case yields multiple modules, which is not important for
- # further imports.
- parent_module = list(bases)[0]
-
- # This is a huge exception, we follow a nested import
- # ``os.path``, because it's a very important one in Python
- # that is being achieved by messing with ``sys.modules`` in
- # ``os``.
- if import_parts == ['os', 'path']:
- return parent_module.py__getattribute__('path')
-
- try:
- method = parent_module.py__path__
- except AttributeError:
- # The module is not a package.
- _add_error(self.module_context, import_path[-1])
- return NO_CONTEXTS
- else:
- paths = method()
- debug.dbg('search_module %s in paths %s', module_name, paths)
- for path in paths:
- # At the moment we are only using one path. So this is
- # not important to be correct.
- if not isinstance(path, list):
- path = [path]
- code, module_path, is_pkg = self._evaluator.compiled_subprocess.get_module_info(
- string=import_parts[-1],
- path=path,
- full_name=module_name,
- is_global_search=False,
- )
- if module_path is not None:
- break
- else:
- _add_error(self.module_context, import_path[-1])
- return NO_CONTEXTS
- else:
- debug.dbg('global search_module %s in %s', import_parts[-1], self.file_path)
- # Override the sys.path. It works only good that way.
- # Injecting the path directly into `find_module` did not work.
- code, module_path, is_pkg = self._evaluator.compiled_subprocess.get_module_info(
- string=import_parts[-1],
- full_name=module_name,
- sys_path=sys_path,
- is_global_search=True,
- )
- if module_path is None:
- # The module is not a package.
- _add_error(self.module_context, import_path[-1])
- return NO_CONTEXTS
-
- module = _load_module(
- self._evaluator, module_path, code, sys_path,
- import_names=import_parts,
- safe_module_name=True,
- )
-
- if module is None:
- # The file might raise an ImportError e.g. and therefore not be
- # importable.
- return NO_CONTEXTS
-
- return ContextSet(module)
-
- def _generate_name(self, name, in_module=None):
- # Create a pseudo import to be able to follow them.
- if in_module is None:
- return ImportName(self.module_context, name)
- return SubModuleName(in_module, name)
-
- def _get_module_names(self, search_path=None, in_module=None):
- """
- Get the names of all modules in the search_path. This means file names
- and not names defined in the files.
- """
- sub = self._evaluator.compiled_subprocess
-
- names = []
- # add builtin module names
- if search_path is None and in_module is None:
- names += [self._generate_name(name) for name in sub.get_builtin_module_names()]
-
- if search_path is None:
- search_path = self.sys_path_with_modifications()
-
- for name in sub.list_module_names(search_path):
- names.append(self._generate_name(name, in_module=in_module))
- return names
-
- def completion_names(self, evaluator, only_modules=False):
- """
- :param only_modules: Indicates wheter it's possible to import a
- definition that is not defined in a module.
- """
- from jedi.evaluate.context import ModuleContext
- from jedi.evaluate.context.namespace import ImplicitNamespaceContext
- names = []
- if self.import_path:
- # flask
- if self.str_import_path == ('flask', 'ext'):
- # List Flask extensions like ``flask_foo``
- for mod in self._get_module_names():
- modname = mod.string_name
- if modname.startswith('flask_'):
- extname = modname[len('flask_'):]
- names.append(self._generate_name(extname))
- # Now the old style: ``flaskext.foo``
- for dir in self.sys_path_with_modifications():
- flaskext = os.path.join(dir, 'flaskext')
- if os.path.isdir(flaskext):
- names += self._get_module_names([flaskext])
-
- for context in self.follow():
- # Non-modules are not completable.
- if context.api_type != 'module': # not a module
- continue
- # namespace packages
- if isinstance(context, ModuleContext) and context.py__file__().endswith('__init__.py'):
- paths = context.py__path__()
- names += self._get_module_names(paths, in_module=context)
-
- # implicit namespace packages
- elif isinstance(context, ImplicitNamespaceContext):
- paths = context.paths
- names += self._get_module_names(paths, in_module=context)
-
- if only_modules:
- # In the case of an import like `from x.` we don't need to
- # add all the variables.
- if ('os',) == self.str_import_path and not self.level:
- # os.path is a hardcoded exception, because it's a
- # ``sys.modules`` modification.
- names.append(self._generate_name('path', context))
-
- continue
-
- for filter in context.get_filters(search_global=False):
- names += filter.values()
- else:
- # Empty import path=completion after import
- if not self.level:
- names += self._get_module_names()
-
- if self.file_path is not None:
- path = os.path.abspath(self.file_path)
- for i in range(self.level - 1):
- path = os.path.dirname(path)
- names += self._get_module_names([path])
-
- return names
-
-
-def _load_module(evaluator, path=None, code=None, sys_path=None,
- import_names=None, safe_module_name=False):
- if import_names is None:
- dotted_name = None
- else:
- dotted_name = '.'.join(import_names)
- try:
- return evaluator.module_cache.get(dotted_name)
- except KeyError:
- pass
- try:
- return evaluator.module_cache.get_from_path(path)
- except KeyError:
- pass
-
- if isinstance(path, ImplicitNSInfo):
- from jedi.evaluate.context.namespace import ImplicitNamespaceContext
- module = ImplicitNamespaceContext(
- evaluator,
- fullname=path.name,
- paths=path.paths,
- )
- else:
- if sys_path is None:
- sys_path = evaluator.get_sys_path()
-
- if path is not None and path.endswith(('.py', '.zip', '.egg')):
- module_node = evaluator.parse(
- code=code, path=path, cache=True,
- diff_cache=settings.fast_parser,
- cache_path=settings.cache_directory)
-
- from jedi.evaluate.context import ModuleContext
- module = ModuleContext(
- evaluator, module_node,
- path=path,
- code_lines=get_cached_code_lines(evaluator.grammar, path),
- )
- else:
- assert dotted_name is not None
- module = compiled.load_module(evaluator, dotted_name=dotted_name, sys_path=sys_path)
-
- if module is not None and dotted_name is not None:
- add_module_to_cache(evaluator, dotted_name, module, safe=safe_module_name)
-
- return module
-
-
-def add_module_to_cache(evaluator, module_name, module, safe=False):
- if not safe and '.' not in module_name:
- # We cannot add paths with dots, because that would collide with
- # the sepatator dots for nested packages. Therefore we return
- # `__main__` in ModuleWrapper.py__name__(), which is similar to
- # Python behavior.
- return
- evaluator.module_cache.add(module, module_name)
-
-
-def get_modules_containing_name(evaluator, modules, name):
- """
- Search a name in the directories of modules.
- """
- def check_directories(paths):
- for p in paths:
- if p is not None:
- # We need abspath, because the seetings paths might not already
- # have been converted to absolute paths.
- d = os.path.dirname(os.path.abspath(p))
- for file_name in os.listdir(d):
- path = os.path.join(d, file_name)
- if file_name.endswith('.py'):
- yield path
-
- def check_fs(path):
- try:
- f = open(path, 'rb')
- except FileNotFoundError:
- return
- with f:
- code = python_bytes_to_unicode(f.read(), errors='replace')
- if name in code:
- e_sys_path = evaluator.get_sys_path()
- import_names = sys_path.dotted_path_in_sys_path(e_sys_path, path)
- module = _load_module(
- evaluator, path, code,
- sys_path=e_sys_path,
- import_names=import_names,
- )
- return module
-
- # skip non python modules
- used_mod_paths = set()
- for m in modules:
- try:
- path = m.py__file__()
- except AttributeError:
- pass
- else:
- used_mod_paths.add(path)
- yield m
-
- if not settings.dynamic_params_for_other_modules:
- return
-
- additional = set(os.path.abspath(p) for p in settings.additional_dynamic_modules)
- # Check the directories of used modules.
- paths = (additional | set(check_directories(used_mod_paths))) \
- - used_mod_paths
-
- # Sort here to make issues less random.
- for p in sorted(paths):
- # make testing easier, sort it - same results on every interpreter
- m = check_fs(p)
- if m is not None and not isinstance(m, compiled.CompiledObject):
- yield m
diff --git a/contrib/python/jedi/jedi/evaluate/jedi_typing.py b/contrib/python/jedi/jedi/evaluate/jedi_typing.py
deleted file mode 100644
index aeb63a8766..0000000000
--- a/contrib/python/jedi/jedi/evaluate/jedi_typing.py
+++ /dev/null
@@ -1,104 +0,0 @@
-"""
-This module is not intended to be used in jedi, rather it will be fed to the
-jedi-parser to replace classes in the typing module
-"""
-
-try:
- from collections import abc
-except ImportError:
- # python 2
- import collections as abc
-
-
-def factory(typing_name, indextypes):
- class Iterable(abc.Iterable):
- def __iter__(self):
- while True:
- yield indextypes[0]()
-
- class Iterator(Iterable, abc.Iterator):
- def next(self):
- """ needed for python 2 """
- return self.__next__()
-
- def __next__(self):
- return indextypes[0]()
-
- class Sequence(abc.Sequence):
- def __getitem__(self, index):
- return indextypes[0]()
-
- class MutableSequence(Sequence, abc.MutableSequence):
- pass
-
- class List(MutableSequence, list):
- pass
-
- class Tuple(Sequence, tuple):
- def __getitem__(self, index):
- if indextypes[1] == Ellipsis:
- # https://www.python.org/dev/peps/pep-0484/#the-typing-module
- # Tuple[int, ...] means a tuple of ints of indetermined length
- return indextypes[0]()
- else:
- return indextypes[index]()
-
- class AbstractSet(Iterable, abc.Set):
- pass
-
- class MutableSet(AbstractSet, abc.MutableSet):
- pass
-
- class KeysView(Iterable, abc.KeysView):
- pass
-
- class ValuesView(abc.ValuesView):
- def __iter__(self):
- while True:
- yield indextypes[1]()
-
- class ItemsView(abc.ItemsView):
- def __iter__(self):
- while True:
- yield (indextypes[0](), indextypes[1]())
-
- class Mapping(Iterable, abc.Mapping):
- def __getitem__(self, item):
- return indextypes[1]()
-
- def keys(self):
- return KeysView()
-
- def values(self):
- return ValuesView()
-
- def items(self):
- return ItemsView()
-
- class MutableMapping(Mapping, abc.MutableMapping):
- pass
-
- class Dict(MutableMapping, dict):
- pass
-
- class DefaultDict(MutableMapping, dict):
- pass
-
- dct = {
- "Sequence": Sequence,
- "MutableSequence": MutableSequence,
- "List": List,
- "Iterable": Iterable,
- "Iterator": Iterator,
- "AbstractSet": AbstractSet,
- "MutableSet": MutableSet,
- "Mapping": Mapping,
- "MutableMapping": MutableMapping,
- "Tuple": Tuple,
- "KeysView": KeysView,
- "ItemsView": ItemsView,
- "ValuesView": ValuesView,
- "Dict": Dict,
- "DefaultDict": DefaultDict,
- }
- return dct[typing_name]
diff --git a/contrib/python/jedi/jedi/evaluate/lazy_context.py b/contrib/python/jedi/jedi/evaluate/lazy_context.py
deleted file mode 100644
index 84b375c139..0000000000
--- a/contrib/python/jedi/jedi/evaluate/lazy_context.py
+++ /dev/null
@@ -1,59 +0,0 @@
-from jedi.evaluate.base_context import ContextSet, NO_CONTEXTS
-from jedi.common.utils import monkeypatch
-
-
-class AbstractLazyContext(object):
- def __init__(self, data):
- self.data = data
-
- def __repr__(self):
- return '<%s: %s>' % (self.__class__.__name__, self.data)
-
- def infer(self):
- raise NotImplementedError
-
-
-class LazyKnownContext(AbstractLazyContext):
- """data is a context."""
- def infer(self):
- return ContextSet(self.data)
-
-
-class LazyKnownContexts(AbstractLazyContext):
- """data is a ContextSet."""
- def infer(self):
- return self.data
-
-
-class LazyUnknownContext(AbstractLazyContext):
- def __init__(self):
- super(LazyUnknownContext, self).__init__(None)
-
- def infer(self):
- return NO_CONTEXTS
-
-
-class LazyTreeContext(AbstractLazyContext):
- def __init__(self, context, node):
- super(LazyTreeContext, self).__init__(node)
- self._context = context
- # We need to save the predefined names. It's an unfortunate side effect
- # that needs to be tracked otherwise results will be wrong.
- self._predefined_names = dict(context.predefined_names)
-
- def infer(self):
- with monkeypatch(self._context, 'predefined_names', self._predefined_names):
- return self._context.eval_node(self.data)
-
-
-def get_merged_lazy_context(lazy_contexts):
- if len(lazy_contexts) > 1:
- return MergedLazyContexts(lazy_contexts)
- else:
- return lazy_contexts[0]
-
-
-class MergedLazyContexts(AbstractLazyContext):
- """data is a list of lazy contexts."""
- def infer(self):
- return ContextSet.from_sets(l.infer() for l in self.data)
diff --git a/contrib/python/jedi/jedi/evaluate/param.py b/contrib/python/jedi/jedi/evaluate/param.py
deleted file mode 100644
index 84f281e532..0000000000
--- a/contrib/python/jedi/jedi/evaluate/param.py
+++ /dev/null
@@ -1,194 +0,0 @@
-from collections import defaultdict
-
-from jedi.evaluate.utils import PushBackIterator
-from jedi.evaluate import analysis
-from jedi.evaluate.lazy_context import LazyKnownContext, \
- LazyTreeContext, LazyUnknownContext
-from jedi.evaluate import docstrings
-from jedi.evaluate import pep0484
-from jedi.evaluate.context import iterable
-
-
-def _add_argument_issue(parent_context, error_name, lazy_context, message):
- if isinstance(lazy_context, LazyTreeContext):
- node = lazy_context.data
- if node.parent.type == 'argument':
- node = node.parent
- analysis.add(parent_context, error_name, node, message)
-
-
-class ExecutedParam(object):
- """Fake a param and give it values."""
- def __init__(self, execution_context, param_node, lazy_context):
- self._execution_context = execution_context
- self._param_node = param_node
- self._lazy_context = lazy_context
- self.string_name = param_node.name.value
-
- def infer(self):
- pep0484_hints = pep0484.infer_param(self._execution_context, self._param_node)
- doc_params = docstrings.infer_param(self._execution_context, self._param_node)
- if pep0484_hints or doc_params:
- return pep0484_hints | doc_params
-
- return self._lazy_context.infer()
-
- @property
- def var_args(self):
- return self._execution_context.var_args
-
- def __repr__(self):
- return '<%s: %s>' % (self.__class__.__name__, self.string_name)
-
-
-def get_executed_params(execution_context, var_args):
- result_params = []
- param_dict = {}
- funcdef = execution_context.tree_node
- parent_context = execution_context.parent_context
-
- for param in funcdef.get_params():
- param_dict[param.name.value] = param
- unpacked_va = list(var_args.unpack(funcdef))
- var_arg_iterator = PushBackIterator(iter(unpacked_va))
-
- non_matching_keys = defaultdict(lambda: [])
- keys_used = {}
- keys_only = False
- had_multiple_value_error = False
- for param in funcdef.get_params():
- # The value and key can both be null. There, the defaults apply.
- # args / kwargs will just be empty arrays / dicts, respectively.
- # Wrong value count is just ignored. If you try to test cases that are
- # not allowed in Python, Jedi will maybe not show any completions.
- key, argument = next(var_arg_iterator, (None, None))
- while key is not None:
- keys_only = True
- try:
- key_param = param_dict[key]
- except KeyError:
- non_matching_keys[key] = argument
- else:
- if key in keys_used:
- had_multiple_value_error = True
- m = ("TypeError: %s() got multiple values for keyword argument '%s'."
- % (funcdef.name, key))
- for node in var_args.get_calling_nodes():
- analysis.add(parent_context, 'type-error-multiple-values',
- node, message=m)
- else:
- keys_used[key] = ExecutedParam(execution_context, key_param, argument)
- key, argument = next(var_arg_iterator, (None, None))
-
- try:
- result_params.append(keys_used[param.name.value])
- continue
- except KeyError:
- pass
-
- if param.star_count == 1:
- # *args param
- lazy_context_list = []
- if argument is not None:
- lazy_context_list.append(argument)
- for key, argument in var_arg_iterator:
- # Iterate until a key argument is found.
- if key:
- var_arg_iterator.push_back((key, argument))
- break
- lazy_context_list.append(argument)
- seq = iterable.FakeSequence(execution_context.evaluator, u'tuple', lazy_context_list)
- result_arg = LazyKnownContext(seq)
- elif param.star_count == 2:
- # **kwargs param
- dct = iterable.FakeDict(execution_context.evaluator, dict(non_matching_keys))
- result_arg = LazyKnownContext(dct)
- non_matching_keys = {}
- else:
- # normal param
- if argument is None:
- # No value: Return an empty container
- if param.default is None:
- result_arg = LazyUnknownContext()
- if not keys_only:
- for node in var_args.get_calling_nodes():
- m = _error_argument_count(funcdef, len(unpacked_va))
- analysis.add(parent_context, 'type-error-too-few-arguments',
- node, message=m)
- else:
- result_arg = LazyTreeContext(parent_context, param.default)
- else:
- result_arg = argument
-
- result_params.append(ExecutedParam(execution_context, param, result_arg))
- if not isinstance(result_arg, LazyUnknownContext):
- keys_used[param.name.value] = result_params[-1]
-
- if keys_only:
- # All arguments should be handed over to the next function. It's not
- # about the values inside, it's about the names. Jedi needs to now that
- # there's nothing to find for certain names.
- for k in set(param_dict) - set(keys_used):
- param = param_dict[k]
-
- if not (non_matching_keys or had_multiple_value_error or
- param.star_count or param.default):
- # add a warning only if there's not another one.
- for node in var_args.get_calling_nodes():
- m = _error_argument_count(funcdef, len(unpacked_va))
- analysis.add(parent_context, 'type-error-too-few-arguments',
- node, message=m)
-
- for key, lazy_context in non_matching_keys.items():
- m = "TypeError: %s() got an unexpected keyword argument '%s'." \
- % (funcdef.name, key)
- _add_argument_issue(
- parent_context,
- 'type-error-keyword-argument',
- lazy_context,
- message=m
- )
-
- remaining_arguments = list(var_arg_iterator)
- if remaining_arguments:
- m = _error_argument_count(funcdef, len(unpacked_va))
- # Just report an error for the first param that is not needed (like
- # cPython).
- first_key, lazy_context = remaining_arguments[0]
- if var_args.get_calling_nodes():
- # There might not be a valid calling node so check for that first.
- _add_argument_issue(parent_context, 'type-error-too-many-arguments', lazy_context, message=m)
- return result_params
-
-
-def _error_argument_count(funcdef, actual_count):
- params = funcdef.get_params()
- default_arguments = sum(1 for p in params if p.default or p.star_count)
-
- if default_arguments == 0:
- before = 'exactly '
- else:
- before = 'from %s to ' % (len(params) - default_arguments)
- return ('TypeError: %s() takes %s%s arguments (%s given).'
- % (funcdef.name, before, len(params), actual_count))
-
-
-def _create_default_param(execution_context, param):
- if param.star_count == 1:
- result_arg = LazyKnownContext(
- iterable.FakeSequence(execution_context.evaluator, u'tuple', [])
- )
- elif param.star_count == 2:
- result_arg = LazyKnownContext(
- iterable.FakeDict(execution_context.evaluator, {})
- )
- elif param.default is None:
- result_arg = LazyUnknownContext()
- else:
- result_arg = LazyTreeContext(execution_context.parent_context, param.default)
- return ExecutedParam(execution_context, param, result_arg)
-
-
-def create_default_params(execution_context, funcdef):
- return [_create_default_param(execution_context, p)
- for p in funcdef.get_params()]
diff --git a/contrib/python/jedi/jedi/evaluate/parser_cache.py b/contrib/python/jedi/jedi/evaluate/parser_cache.py
deleted file mode 100644
index 84fe52d977..0000000000
--- a/contrib/python/jedi/jedi/evaluate/parser_cache.py
+++ /dev/null
@@ -1,6 +0,0 @@
-from jedi.evaluate.cache import evaluator_function_cache
-
-
-@evaluator_function_cache()
-def get_yield_exprs(evaluator, funcdef):
- return list(funcdef.iter_yield_exprs())
diff --git a/contrib/python/jedi/jedi/evaluate/pep0484.py b/contrib/python/jedi/jedi/evaluate/pep0484.py
deleted file mode 100644
index 0b96cbc325..0000000000
--- a/contrib/python/jedi/jedi/evaluate/pep0484.py
+++ /dev/null
@@ -1,330 +0,0 @@
-"""
-PEP 0484 ( https://www.python.org/dev/peps/pep-0484/ ) describes type hints
-through function annotations. There is a strong suggestion in this document
-that only the type of type hinting defined in PEP0484 should be allowed
-as annotations in future python versions.
-
-The (initial / probably incomplete) implementation todo list for pep-0484:
-v Function parameter annotations with builtin/custom type classes
-v Function returntype annotations with builtin/custom type classes
-v Function parameter annotations with strings (forward reference)
-v Function return type annotations with strings (forward reference)
-v Local variable type hints
-v Assigned types: `Url = str\ndef get(url:Url) -> str:`
-v Type hints in `with` statements
-x Stub files support
-x support `@no_type_check` and `@no_type_check_decorator`
-x support for typing.cast() operator
-x support for type hint comments for functions, `# type: (int, str) -> int`.
- See comment from Guido https://github.com/davidhalter/jedi/issues/662
-"""
-
-import os
-import re
-
-from parso import ParserSyntaxError, parse, split_lines
-from parso.python import tree
-
-from jedi._compatibility import unicode, force_unicode
-from jedi.evaluate.cache import evaluator_method_cache
-from jedi.evaluate import compiled
-from jedi.evaluate.base_context import NO_CONTEXTS, ContextSet
-from jedi.evaluate.lazy_context import LazyTreeContext
-from jedi.evaluate.context import ModuleContext
-from jedi.evaluate.helpers import is_string
-from jedi import debug
-from jedi import parser_utils
-
-
-def _evaluate_for_annotation(context, annotation, index=None):
- """
- Evaluates a string-node, looking for an annotation
- If index is not None, the annotation is expected to be a tuple
- and we're interested in that index
- """
- context_set = context.eval_node(_fix_forward_reference(context, annotation))
- return context_set.execute_evaluated()
-
-
-def _evaluate_annotation_string(context, string, index=None):
- node = _get_forward_reference_node(context, string)
- if node is None:
- return NO_CONTEXTS
-
- context_set = context.eval_node(node)
- if index is not None:
- context_set = context_set.filter(
- lambda context: context.array_type == u'tuple'
- and len(list(context.py__iter__())) >= index
- ).py__getitem__(index)
- return context_set.execute_evaluated()
-
-
-def _fix_forward_reference(context, node):
- evaled_nodes = context.eval_node(node)
- if len(evaled_nodes) != 1:
- debug.warning("Eval'ed typing index %s should lead to 1 object, "
- " not %s" % (node, evaled_nodes))
- return node
-
- evaled_context = list(evaled_nodes)[0]
- if is_string(evaled_context):
- result = _get_forward_reference_node(context, evaled_context.get_safe_value())
- if result is not None:
- return result
-
- return node
-
-
-def _get_forward_reference_node(context, string):
- try:
- new_node = context.evaluator.grammar.parse(
- force_unicode(string),
- start_symbol='eval_input',
- error_recovery=False
- )
- except ParserSyntaxError:
- debug.warning('Annotation not parsed: %s' % string)
- return None
- else:
- module = context.tree_node.get_root_node()
- parser_utils.move(new_node, module.end_pos[0])
- new_node.parent = context.tree_node
- return new_node
-
-
-def _split_comment_param_declaration(decl_text):
- """
- Split decl_text on commas, but group generic expressions
- together.
-
- For example, given "foo, Bar[baz, biz]" we return
- ['foo', 'Bar[baz, biz]'].
-
- """
- try:
- node = parse(decl_text, error_recovery=False).children[0]
- except ParserSyntaxError:
- debug.warning('Comment annotation is not valid Python: %s' % decl_text)
- return []
-
- if node.type == 'name':
- return [node.get_code().strip()]
-
- params = []
- try:
- children = node.children
- except AttributeError:
- return []
- else:
- for child in children:
- if child.type in ['name', 'atom_expr', 'power']:
- params.append(child.get_code().strip())
-
- return params
-
-
-@evaluator_method_cache()
-def infer_param(execution_context, param):
- """
- Infers the type of a function parameter, using type annotations.
- """
- annotation = param.annotation
- if annotation is None:
- # If no Python 3-style annotation, look for a Python 2-style comment
- # annotation.
- # Identify parameters to function in the same sequence as they would
- # appear in a type comment.
- all_params = [child for child in param.parent.children
- if child.type == 'param']
-
- node = param.parent.parent
- comment = parser_utils.get_following_comment_same_line(node)
- if comment is None:
- return NO_CONTEXTS
-
- match = re.match(r"^#\s*type:\s*\(([^#]*)\)\s*->", comment)
- if not match:
- return NO_CONTEXTS
- params_comments = _split_comment_param_declaration(match.group(1))
-
- # Find the specific param being investigated
- index = all_params.index(param)
- # If the number of parameters doesn't match length of type comment,
- # ignore first parameter (assume it's self).
- if len(params_comments) != len(all_params):
- debug.warning(
- "Comments length != Params length %s %s",
- params_comments, all_params
- )
- from jedi.evaluate.context.instance import InstanceArguments
- if isinstance(execution_context.var_args, InstanceArguments):
- if index == 0:
- # Assume it's self, which is already handled
- return NO_CONTEXTS
- index -= 1
- if index >= len(params_comments):
- return NO_CONTEXTS
-
- param_comment = params_comments[index]
- return _evaluate_annotation_string(
- execution_context.get_root_context(),
- param_comment
- )
- module_context = execution_context.get_root_context()
- return _evaluate_for_annotation(module_context, annotation)
-
-
-def py__annotations__(funcdef):
- return_annotation = funcdef.annotation
- if return_annotation:
- dct = {'return': return_annotation}
- else:
- dct = {}
- for function_param in funcdef.get_params():
- param_annotation = function_param.annotation
- if param_annotation is not None:
- dct[function_param.name.value] = param_annotation
- return dct
-
-
-@evaluator_method_cache()
-def infer_return_types(function_context):
- """
- Infers the type of a function's return value,
- according to type annotations.
- """
- annotation = py__annotations__(function_context.tree_node).get("return", None)
- if annotation is None:
- # If there is no Python 3-type annotation, look for a Python 2-type annotation
- node = function_context.tree_node
- comment = parser_utils.get_following_comment_same_line(node)
- if comment is None:
- return NO_CONTEXTS
-
- match = re.match(r"^#\s*type:\s*\([^#]*\)\s*->\s*([^#]*)", comment)
- if not match:
- return NO_CONTEXTS
-
- return _evaluate_annotation_string(
- function_context.get_root_context(),
- match.group(1).strip()
- )
-
- module_context = function_context.get_root_context()
- return _evaluate_for_annotation(module_context, annotation)
-
-
-_typing_module = None
-_typing_module_code_lines = None
-
-
-def _get_typing_replacement_module(grammar):
- """
- The idea is to return our jedi replacement for the PEP-0484 typing module
- as discussed at https://github.com/davidhalter/jedi/issues/663
- """
- global _typing_module, _typing_module_code_lines
- if _typing_module is None:
- typing_path = \
- os.path.abspath(os.path.join(__file__, "../jedi_typing.py"))
- with open(typing_path) as f:
- code = unicode(f.read())
- _typing_module = grammar.parse(code)
- _typing_module_code_lines = split_lines(code, keepends=True)
- return _typing_module, _typing_module_code_lines
-
-
-def py__getitem__(context, typ, node):
- if not typ.get_root_context().name.string_name == "typing":
- return None
- # we assume that any class using [] in a module called
- # "typing" with a name for which we have a replacement
- # should be replaced by that class. This is not 100%
- # airtight but I don't have a better idea to check that it's
- # actually the PEP-0484 typing module and not some other
- if node.type == "subscriptlist":
- nodes = node.children[::2] # skip the commas
- else:
- nodes = [node]
- del node
-
- nodes = [_fix_forward_reference(context, node) for node in nodes]
- type_name = typ.name.string_name
-
- # hacked in Union and Optional, since it's hard to do nicely in parsed code
- if type_name in ("Union", '_Union'):
- # In Python 3.6 it's still called typing.Union but it's an instance
- # called _Union.
- return ContextSet.from_sets(context.eval_node(node) for node in nodes)
- if type_name in ("Optional", '_Optional'):
- # Here we have the same issue like in Union. Therefore we also need to
- # check for the instance typing._Optional (Python 3.6).
- return context.eval_node(nodes[0])
-
- module_node, code_lines = _get_typing_replacement_module(context.evaluator.latest_grammar)
- typing = ModuleContext(
- context.evaluator,
- module_node=module_node,
- path=None,
- code_lines=code_lines,
- )
- factories = typing.py__getattribute__("factory")
- assert len(factories) == 1
- factory = list(factories)[0]
- assert factory
- function_body_nodes = factory.tree_node.children[4].children
- valid_classnames = set(child.name.value
- for child in function_body_nodes
- if isinstance(child, tree.Class))
- if type_name not in valid_classnames:
- return None
- compiled_classname = compiled.create_simple_object(context.evaluator, type_name)
-
- from jedi.evaluate.context.iterable import FakeSequence
- args = FakeSequence(
- context.evaluator,
- u'tuple',
- [LazyTreeContext(context, n) for n in nodes]
- )
-
- result = factory.execute_evaluated(compiled_classname, args)
- return result
-
-
-def find_type_from_comment_hint_for(context, node, name):
- return _find_type_from_comment_hint(context, node, node.children[1], name)
-
-
-def find_type_from_comment_hint_with(context, node, name):
- assert len(node.children[1].children) == 3, \
- "Can only be here when children[1] is 'foo() as f'"
- varlist = node.children[1].children[2]
- return _find_type_from_comment_hint(context, node, varlist, name)
-
-
-def find_type_from_comment_hint_assign(context, node, name):
- return _find_type_from_comment_hint(context, node, node.children[0], name)
-
-
-def _find_type_from_comment_hint(context, node, varlist, name):
- index = None
- if varlist.type in ("testlist_star_expr", "exprlist", "testlist"):
- # something like "a, b = 1, 2"
- index = 0
- for child in varlist.children:
- if child == name:
- break
- if child.type == "operator":
- continue
- index += 1
- else:
- return []
-
- comment = parser_utils.get_following_comment_same_line(node)
- if comment is None:
- return []
- match = re.match(r"^#\s*type:\s*([^#]*)", comment)
- if match is None:
- return []
- return _evaluate_annotation_string(context, match.group(1).strip(), index)
diff --git a/contrib/python/jedi/jedi/evaluate/recursion.py b/contrib/python/jedi/jedi/evaluate/recursion.py
deleted file mode 100644
index 1f4f6384e9..0000000000
--- a/contrib/python/jedi/jedi/evaluate/recursion.py
+++ /dev/null
@@ -1,138 +0,0 @@
-"""
-Recursions are the recipe of |jedi| to conquer Python code. However, someone
-must stop recursions going mad. Some settings are here to make |jedi| stop at
-the right time. You can read more about them :ref:`here <settings-recursion>`.
-
-Next to :mod:`jedi.evaluate.cache` this module also makes |jedi| not
-thread-safe. Why? ``execution_recursion_decorator`` uses class variables to
-count the function calls.
-
-.. _settings-recursion:
-
-Settings
-~~~~~~~~~~
-
-Recursion settings are important if you don't want extremly
-recursive python code to go absolutely crazy.
-
-The default values are based on experiments while completing the |jedi| library
-itself (inception!). But I don't think there's any other Python library that
-uses recursion in a similarly extreme way. Completion should also be fast and
-therefore the quality might not always be maximal.
-
-.. autodata:: recursion_limit
-.. autodata:: total_function_execution_limit
-.. autodata:: per_function_execution_limit
-.. autodata:: per_function_recursion_limit
-"""
-
-from contextlib import contextmanager
-
-from jedi import debug
-from jedi.evaluate.base_context import NO_CONTEXTS
-
-
-recursion_limit = 15
-"""
-Like ``sys.getrecursionlimit()``, just for |jedi|.
-"""
-total_function_execution_limit = 200
-"""
-This is a hard limit of how many non-builtin functions can be executed.
-"""
-per_function_execution_limit = 6
-"""
-The maximal amount of times a specific function may be executed.
-"""
-per_function_recursion_limit = 2
-"""
-A function may not be executed more than this number of times recursively.
-"""
-
-
-class RecursionDetector(object):
- def __init__(self):
- self.pushed_nodes = []
-
-
-@contextmanager
-def execution_allowed(evaluator, node):
- """
- A decorator to detect recursions in statements. In a recursion a statement
- at the same place, in the same module may not be executed two times.
- """
- pushed_nodes = evaluator.recursion_detector.pushed_nodes
-
- if node in pushed_nodes:
- debug.warning('catched stmt recursion: %s @%s', node,
- getattr(node, 'start_pos', None))
- yield False
- else:
- try:
- pushed_nodes.append(node)
- yield True
- finally:
- pushed_nodes.pop()
-
-
-def execution_recursion_decorator(default=NO_CONTEXTS):
- def decorator(func):
- def wrapper(self, **kwargs):
- detector = self.evaluator.execution_recursion_detector
- allowed = detector.push_execution(self)
- try:
- if allowed:
- result = default
- else:
- result = func(self, **kwargs)
- finally:
- detector.pop_execution()
- return result
- return wrapper
- return decorator
-
-
-class ExecutionRecursionDetector(object):
- """
- Catches recursions of executions.
- """
- def __init__(self, evaluator):
- self._evaluator = evaluator
-
- self._recursion_level = 0
- self._parent_execution_funcs = []
- self._funcdef_execution_counts = {}
- self._execution_count = 0
-
- def pop_execution(self):
- self._parent_execution_funcs.pop()
- self._recursion_level -= 1
-
- def push_execution(self, execution):
- funcdef = execution.tree_node
-
- # These two will be undone in pop_execution.
- self._recursion_level += 1
- self._parent_execution_funcs.append(funcdef)
-
- module = execution.get_root_context()
- if module == self._evaluator.builtins_module:
- # We have control over builtins so we know they are not recursing
- # like crazy. Therefore we just let them execute always, because
- # they usually just help a lot with getting good results.
- return False
-
- if self._recursion_level > recursion_limit:
- return True
-
- if self._execution_count >= total_function_execution_limit:
- return True
- self._execution_count += 1
-
- if self._funcdef_execution_counts.setdefault(funcdef, 0) >= per_function_execution_limit:
- return True
- self._funcdef_execution_counts[funcdef] += 1
-
- if self._parent_execution_funcs.count(funcdef) > per_function_recursion_limit:
- return True
- return False
diff --git a/contrib/python/jedi/jedi/evaluate/stdlib.py b/contrib/python/jedi/jedi/evaluate/stdlib.py
deleted file mode 100644
index 52c223838b..0000000000
--- a/contrib/python/jedi/jedi/evaluate/stdlib.py
+++ /dev/null
@@ -1,321 +0,0 @@
-"""
-Implementations of standard library functions, because it's not possible to
-understand them with Jedi.
-
-To add a new implementation, create a function and add it to the
-``_implemented`` dict at the bottom of this module.
-
-Note that this module exists only to implement very specific functionality in
-the standard library. The usual way to understand the standard library is the
-compiled module that returns the types for C-builtins.
-"""
-import parso
-
-from jedi._compatibility import force_unicode
-from jedi import debug
-from jedi.evaluate.arguments import ValuesArguments, repack_with_argument_clinic
-from jedi.evaluate import analysis
-from jedi.evaluate import compiled
-from jedi.evaluate.context.instance import \
- AbstractInstanceContext, CompiledInstance, BoundMethod, InstanceArguments
-from jedi.evaluate.base_context import ContextualizedNode, \
- NO_CONTEXTS, ContextSet
-from jedi.evaluate.context import ClassContext, ModuleContext, FunctionExecutionContext
-from jedi.evaluate.context import iterable
-from jedi.evaluate.lazy_context import LazyTreeContext
-from jedi.evaluate.syntax_tree import is_string
-
-# Now this is all part of fake tuples in Jedi. However super doesn't work on
-# __init__ and __new__ doesn't work at all. So adding this to nametuples is
-# just the easiest way.
-_NAMEDTUPLE_INIT = """
- def __init__(_cls, {arg_list}):
- 'A helper function for namedtuple.'
- self.__iterable = ({arg_list})
-
- def __iter__(self):
- for i in self.__iterable:
- yield i
-
- def __getitem__(self, y):
- return self.__iterable[y]
-
-"""
-
-
-class NotInStdLib(LookupError):
- pass
-
-
-def execute(evaluator, obj, arguments):
- if isinstance(obj, BoundMethod):
- raise NotInStdLib()
-
- try:
- obj_name = obj.name.string_name
- except AttributeError:
- pass
- else:
- if obj.parent_context == evaluator.builtins_module:
- module_name = 'builtins'
- elif isinstance(obj.parent_context, ModuleContext):
- module_name = obj.parent_context.name.string_name
- else:
- module_name = ''
-
- # for now we just support builtin functions.
- try:
- func = _implemented[module_name][obj_name]
- except KeyError:
- pass
- else:
- return func(evaluator, obj, arguments=arguments)
- raise NotInStdLib()
-
-
-def _follow_param(evaluator, arguments, index):
- try:
- key, lazy_context = list(arguments.unpack())[index]
- except IndexError:
- return NO_CONTEXTS
- else:
- return lazy_context.infer()
-
-
-def argument_clinic(string, want_obj=False, want_context=False, want_arguments=False):
- """
- Works like Argument Clinic (PEP 436), to validate function params.
- """
-
- def f(func):
- @repack_with_argument_clinic(string, keep_arguments_param=True)
- def wrapper(evaluator, obj, *args, **kwargs):
- arguments = kwargs.pop('arguments')
- assert not kwargs # Python 2...
- debug.dbg('builtin start %s' % obj, color='MAGENTA')
- result = NO_CONTEXTS
- if want_context:
- kwargs['context'] = arguments.context
- if want_obj:
- kwargs['obj'] = obj
- if want_arguments:
- kwargs['arguments'] = arguments
- result = func(evaluator, *args, **kwargs)
- debug.dbg('builtin end: %s', result, color='MAGENTA')
- return result
-
- return wrapper
- return f
-
-
-@argument_clinic('iterator[, default], /')
-def builtins_next(evaluator, iterators, defaults):
- """
- TODO this function is currently not used. It's a stab at implementing next
- in a different way than fake objects. This would be a bit more flexible.
- """
- if evaluator.environment.version_info.major == 2:
- name = 'next'
- else:
- name = '__next__'
-
- context_set = NO_CONTEXTS
- for iterator in iterators:
- if isinstance(iterator, AbstractInstanceContext):
- context_set = ContextSet.from_sets(
- n.infer()
- for filter in iterator.get_filters(include_self_names=True)
- for n in filter.get(name)
- ).execute_evaluated()
- if context_set:
- return context_set
- return defaults
-
-
-@argument_clinic('object, name[, default], /')
-def builtins_getattr(evaluator, objects, names, defaults=None):
- # follow the first param
- for obj in objects:
- for name in names:
- if is_string(name):
- return obj.py__getattribute__(force_unicode(name.get_safe_value()))
- else:
- debug.warning('getattr called without str')
- continue
- return NO_CONTEXTS
-
-
-@argument_clinic('object[, bases, dict], /')
-def builtins_type(evaluator, objects, bases, dicts):
- if bases or dicts:
- # It's a type creation... maybe someday...
- return NO_CONTEXTS
- else:
- return objects.py__class__()
-
-
-class SuperInstance(AbstractInstanceContext):
- """To be used like the object ``super`` returns."""
- def __init__(self, evaluator, cls):
- su = cls.py_mro()[1]
- super().__init__(evaluator, su and su[0] or self)
-
-
-@argument_clinic('[type[, obj]], /', want_context=True)
-def builtins_super(evaluator, types, objects, context):
- # TODO make this able to detect multiple inheritance super
- if isinstance(context, FunctionExecutionContext):
- if isinstance(context.var_args, InstanceArguments):
- su = context.var_args.instance.py__class__().py__bases__()
- return su[0].infer().execute_evaluated()
-
- return NO_CONTEXTS
-
-
-@argument_clinic('sequence, /', want_obj=True, want_arguments=True)
-def builtins_reversed(evaluator, sequences, obj, arguments):
- # While we could do without this variable (just by using sequences), we
- # want static analysis to work well. Therefore we need to generated the
- # values again.
- key, lazy_context = next(arguments.unpack())
- cn = None
- if isinstance(lazy_context, LazyTreeContext):
- # TODO access private
- cn = ContextualizedNode(lazy_context._context, lazy_context.data)
- ordered = list(sequences.iterate(cn))
-
- rev = list(reversed(ordered))
- # Repack iterator values and then run it the normal way. This is
- # necessary, because `reversed` is a function and autocompletion
- # would fail in certain cases like `reversed(x).__iter__` if we
- # just returned the result directly.
- seq = iterable.FakeSequence(evaluator, u'list', rev)
- arguments = ValuesArguments([ContextSet(seq)])
- return ContextSet(CompiledInstance(evaluator, evaluator.builtins_module, obj, arguments))
-
-
-@argument_clinic('obj, type, /', want_arguments=True)
-def builtins_isinstance(evaluator, objects, types, arguments):
- bool_results = set()
- for o in objects:
- cls = o.py__class__()
- try:
- mro_func = cls.py__mro__
- except AttributeError:
- # This is temporary. Everything should have a class attribute in
- # Python?! Maybe we'll leave it here, because some numpy objects or
- # whatever might not.
- bool_results = set([True, False])
- break
-
- mro = mro_func()
-
- for cls_or_tup in types:
- if cls_or_tup.is_class():
- bool_results.add(cls_or_tup in mro)
- elif cls_or_tup.name.string_name == 'tuple' \
- and cls_or_tup.get_root_context() == evaluator.builtins_module:
- # Check for tuples.
- classes = ContextSet.from_sets(
- lazy_context.infer()
- for lazy_context in cls_or_tup.iterate()
- )
- bool_results.add(any(cls in mro for cls in classes))
- else:
- _, lazy_context = list(arguments.unpack())[1]
- if isinstance(lazy_context, LazyTreeContext):
- node = lazy_context.data
- message = 'TypeError: isinstance() arg 2 must be a ' \
- 'class, type, or tuple of classes and types, ' \
- 'not %s.' % cls_or_tup
- analysis.add(lazy_context._context, 'type-error-isinstance', node, message)
-
- return ContextSet.from_iterable(
- compiled.builtin_from_name(evaluator, force_unicode(str(b)))
- for b in bool_results
- )
-
-
-def collections_namedtuple(evaluator, obj, arguments):
- """
- Implementation of the namedtuple function.
-
- This has to be done by processing the namedtuple class template and
- evaluating the result.
-
- """
- collections_context = obj.parent_context
- _class_template_set = collections_context.py__getattribute__(u'_class_template')
- if not _class_template_set:
- # Namedtuples are not supported on Python 2.6, early 2.7, because the
- # _class_template variable is not defined, there.
- return NO_CONTEXTS
-
- # Process arguments
- # TODO here we only use one of the types, we should use all.
- # TODO this is buggy, doesn't need to be a string
- name = list(_follow_param(evaluator, arguments, 0))[0].get_safe_value()
- _fields = list(_follow_param(evaluator, arguments, 1))[0]
- if isinstance(_fields, compiled.CompiledObject):
- fields = _fields.get_safe_value().replace(',', ' ').split()
- elif isinstance(_fields, iterable.Sequence):
- fields = [
- v.get_safe_value()
- for lazy_context in _fields.py__iter__()
- for v in lazy_context.infer() if is_string(v)
- ]
- else:
- return NO_CONTEXTS
-
- def get_var(name):
- x, = collections_context.py__getattribute__(name)
- return x.get_safe_value()
-
- base = next(iter(_class_template_set)).get_safe_value()
- base += _NAMEDTUPLE_INIT
- # Build source code
- code = base.format(
- typename=name,
- field_names=tuple(fields),
- num_fields=len(fields),
- arg_list=repr(tuple(fields)).replace("u'", "").replace("'", "")[1:-1],
- repr_fmt=', '.join(get_var(u'_repr_template').format(name=name) for name in fields),
- field_defs='\n'.join(get_var(u'_field_template').format(index=index, name=name)
- for index, name in enumerate(fields))
- )
-
- # Parse source code
- module = evaluator.grammar.parse(code)
- generated_class = next(module.iter_classdefs())
- parent_context = ModuleContext(
- evaluator, module, None,
- code_lines=parso.split_lines(code, keepends=True),
- )
- return ContextSet(ClassContext(evaluator, parent_context, generated_class))
-
-
-@argument_clinic('first, /')
-def _return_first_param(evaluator, firsts):
- return firsts
-
-
-_implemented = {
- 'builtins': {
- 'getattr': builtins_getattr,
- 'type': builtins_type,
- 'super': builtins_super,
- 'reversed': builtins_reversed,
- 'isinstance': builtins_isinstance,
- },
- 'copy': {
- 'copy': _return_first_param,
- 'deepcopy': _return_first_param,
- },
- 'json': {
- 'load': lambda evaluator, obj, arguments: NO_CONTEXTS,
- 'loads': lambda evaluator, obj, arguments: NO_CONTEXTS,
- },
- 'collections': {
- 'namedtuple': collections_namedtuple,
- },
-}
diff --git a/contrib/python/jedi/jedi/evaluate/syntax_tree.py b/contrib/python/jedi/jedi/evaluate/syntax_tree.py
deleted file mode 100644
index 720ea349e7..0000000000
--- a/contrib/python/jedi/jedi/evaluate/syntax_tree.py
+++ /dev/null
@@ -1,662 +0,0 @@
-"""
-Functions evaluating the syntax tree.
-"""
-import copy
-
-from parso.python import tree
-
-from jedi._compatibility import force_unicode, unicode
-from jedi import debug
-from jedi import parser_utils
-from jedi.evaluate.base_context import ContextSet, NO_CONTEXTS, ContextualizedNode, \
- ContextualizedName, iterator_to_context_set, iterate_contexts
-from jedi.evaluate import compiled
-from jedi.evaluate import pep0484
-from jedi.evaluate import recursion
-from jedi.evaluate import helpers
-from jedi.evaluate import analysis
-from jedi.evaluate import imports
-from jedi.evaluate import arguments
-from jedi.evaluate.pep0484 import _evaluate_for_annotation
-from jedi.evaluate.context import ClassContext, FunctionContext
-from jedi.evaluate.context import iterable
-from jedi.evaluate.context import TreeInstance, CompiledInstance
-from jedi.evaluate.finder import NameFinder
-from jedi.evaluate.helpers import is_string, is_literal, is_number, is_compiled
-from jedi.evaluate.compiled.access import COMPARISON_OPERATORS
-
-
-def _limit_context_infers(func):
- """
- This is for now the way how we limit type inference going wild. There are
- other ways to ensure recursion limits as well. This is mostly necessary
- because of instance (self) access that can be quite tricky to limit.
-
- I'm still not sure this is the way to go, but it looks okay for now and we
- can still go anther way in the future. Tests are there. ~ dave
- """
- def wrapper(context, *args, **kwargs):
- n = context.tree_node
- evaluator = context.evaluator
- try:
- evaluator.inferred_element_counts[n] += 1
- if evaluator.inferred_element_counts[n] > 300:
- debug.warning('In context %s there were too many inferences.', n)
- return NO_CONTEXTS
- except KeyError:
- evaluator.inferred_element_counts[n] = 1
- return func(context, *args, **kwargs)
-
- return wrapper
-
-
-def _py__stop_iteration_returns(generators):
- results = ContextSet()
- for generator in generators:
- try:
- method = generator.py__stop_iteration_returns
- except AttributeError:
- debug.warning('%s is not actually a generator', generator)
- else:
- results |= method()
- return results
-
-
-@debug.increase_indent
-@_limit_context_infers
-def eval_node(context, element):
- debug.dbg('eval_node %s@%s', element, element.start_pos)
- evaluator = context.evaluator
- typ = element.type
- if typ in ('name', 'number', 'string', 'atom', 'strings', 'keyword'):
- return eval_atom(context, element)
- elif typ == 'lambdef':
- return ContextSet(FunctionContext.from_context(context, element))
- elif typ == 'expr_stmt':
- return eval_expr_stmt(context, element)
- elif typ in ('power', 'atom_expr'):
- first_child = element.children[0]
- children = element.children[1:]
- had_await = False
- if first_child.type == 'keyword' and first_child.value == 'await':
- had_await = True
- first_child = children.pop(0)
-
- context_set = eval_atom(context, first_child)
- for trailer in children:
- if trailer == '**': # has a power operation.
- right = context.eval_node(children[1])
- context_set = _eval_comparison(
- evaluator,
- context,
- context_set,
- trailer,
- right
- )
- break
- context_set = eval_trailer(context, context_set, trailer)
-
- if had_await:
- await_context_set = context_set.py__getattribute__(u"__await__")
- if not await_context_set:
- debug.warning('Tried to run py__await__ on context %s', context)
- context_set = ContextSet()
- return _py__stop_iteration_returns(await_context_set.execute_evaluated())
- return context_set
- elif typ in ('testlist_star_expr', 'testlist',):
- # The implicit tuple in statements.
- return ContextSet(iterable.SequenceLiteralContext(evaluator, context, element))
- elif typ in ('not_test', 'factor'):
- context_set = context.eval_node(element.children[-1])
- for operator in element.children[:-1]:
- context_set = eval_factor(context_set, operator)
- return context_set
- elif typ == 'test':
- # `x if foo else y` case.
- return (context.eval_node(element.children[0]) |
- context.eval_node(element.children[-1]))
- elif typ == 'operator':
- # Must be an ellipsis, other operators are not evaluated.
- # In Python 2 ellipsis is coded as three single dot tokens, not
- # as one token 3 dot token.
- if element.value not in ('.', '...'):
- origin = element.parent
- raise AssertionError("unhandled operator %s in %s " % (repr(element.value), origin))
- return ContextSet(compiled.builtin_from_name(evaluator, u'Ellipsis'))
- elif typ == 'dotted_name':
- context_set = eval_atom(context, element.children[0])
- for next_name in element.children[2::2]:
- # TODO add search_global=True?
- context_set = context_set.py__getattribute__(next_name, name_context=context)
- return context_set
- elif typ == 'eval_input':
- return eval_node(context, element.children[0])
- elif typ == 'annassign':
- return pep0484._evaluate_for_annotation(context, element.children[1])
- elif typ == 'yield_expr':
- if len(element.children) and element.children[1].type == 'yield_arg':
- # Implies that it's a yield from.
- element = element.children[1].children[1]
- generators = context.eval_node(element)
- return _py__stop_iteration_returns(generators)
-
- # Generator.send() is not implemented.
- return NO_CONTEXTS
- else:
- return eval_or_test(context, element)
-
-
-def eval_trailer(context, base_contexts, trailer):
- trailer_op, node = trailer.children[:2]
- if node == ')': # `arglist` is optional.
- node = None
-
- if trailer_op == '[':
- trailer_op, node, _ = trailer.children
-
- # TODO It's kind of stupid to cast this from a context set to a set.
- foo = set(base_contexts)
- # special case: PEP0484 typing module, see
- # https://github.com/davidhalter/jedi/issues/663
- result = ContextSet()
- for typ in list(foo):
- if isinstance(typ, (ClassContext, TreeInstance)):
- typing_module_types = pep0484.py__getitem__(context, typ, node)
- if typing_module_types is not None:
- foo.remove(typ)
- result |= typing_module_types
-
- return result | base_contexts.get_item(
- eval_subscript_list(context.evaluator, context, node),
- ContextualizedNode(context, trailer)
- )
- else:
- debug.dbg('eval_trailer: %s in %s', trailer, base_contexts)
- if trailer_op == '.':
- return base_contexts.py__getattribute__(
- name_context=context,
- name_or_str=node
- )
- else:
- assert trailer_op == '(', 'trailer_op is actually %s' % trailer_op
- args = arguments.TreeArguments(context.evaluator, context, node, trailer)
- return base_contexts.execute(args)
-
-
-def eval_atom(context, atom):
- """
- Basically to process ``atom`` nodes. The parser sometimes doesn't
- generate the node (because it has just one child). In that case an atom
- might be a name or a literal as well.
- """
- if atom.type == 'name':
- # This is the first global lookup.
- stmt = tree.search_ancestor(
- atom, 'expr_stmt', 'lambdef'
- ) or atom
- if stmt.type == 'lambdef':
- stmt = atom
- return context.py__getattribute__(
- name_or_str=atom,
- position=stmt.start_pos,
- search_global=True
- )
- elif atom.type == 'keyword':
- # For False/True/None
- if atom.value in ('False', 'True', 'None'):
- return ContextSet(compiled.builtin_from_name(context.evaluator, atom.value))
- elif atom.value == 'print':
- # print e.g. could be evaluated like this in Python 2.7
- return NO_CONTEXTS
- elif atom.value == 'yield':
- # Contrary to yield from, yield can just appear alone to return a
- # value when used with `.send()`.
- return NO_CONTEXTS
- assert False, 'Cannot evaluate the keyword %s' % atom
-
- elif isinstance(atom, tree.Literal):
- string = context.evaluator.compiled_subprocess.safe_literal_eval(atom.value)
- return ContextSet(compiled.create_simple_object(context.evaluator, string))
- elif atom.type == 'strings':
- # Will be multiple string.
- context_set = eval_atom(context, atom.children[0])
- for string in atom.children[1:]:
- right = eval_atom(context, string)
- context_set = _eval_comparison(context.evaluator, context, context_set, u'+', right)
- return context_set
- else:
- c = atom.children
- # Parentheses without commas are not tuples.
- if c[0] == '(' and not len(c) == 2 \
- and not(c[1].type == 'testlist_comp' and
- len(c[1].children) > 1):
- return context.eval_node(c[1])
-
- try:
- comp_for = c[1].children[1]
- except (IndexError, AttributeError):
- pass
- else:
- if comp_for == ':':
- # Dict comprehensions have a colon at the 3rd index.
- try:
- comp_for = c[1].children[3]
- except IndexError:
- pass
-
- if comp_for.type == 'comp_for':
- return ContextSet(iterable.comprehension_from_atom(
- context.evaluator, context, atom
- ))
-
- # It's a dict/list/tuple literal.
- array_node = c[1]
- try:
- array_node_c = array_node.children
- except AttributeError:
- array_node_c = []
- if c[0] == '{' and (array_node == '}' or ':' in array_node_c or
- '**' in array_node_c):
- context = iterable.DictLiteralContext(context.evaluator, context, atom)
- else:
- context = iterable.SequenceLiteralContext(context.evaluator, context, atom)
- return ContextSet(context)
-
-
-@_limit_context_infers
-def eval_expr_stmt(context, stmt, seek_name=None):
- with recursion.execution_allowed(context.evaluator, stmt) as allowed:
- # Here we allow list/set to recurse under certain conditions. To make
- # it possible to resolve stuff like list(set(list(x))), this is
- # necessary.
- if not allowed and context.get_root_context() == context.evaluator.builtins_module:
- try:
- instance = context.var_args.instance
- except AttributeError:
- pass
- else:
- if instance.name.string_name in ('list', 'set'):
- c = instance.get_first_non_keyword_argument_contexts()
- if instance not in c:
- allowed = True
-
- if allowed:
- return _eval_expr_stmt(context, stmt, seek_name)
- return NO_CONTEXTS
-
-
-@debug.increase_indent
-def _eval_expr_stmt(context, stmt, seek_name=None):
- """
- The starting point of the completion. A statement always owns a call
- list, which are the calls, that a statement does. In case multiple
- names are defined in the statement, `seek_name` returns the result for
- this name.
-
- :param stmt: A `tree.ExprStmt`.
- """
- debug.dbg('eval_expr_stmt %s (%s)', stmt, seek_name)
- rhs = stmt.get_rhs()
- context_set = context.eval_node(rhs)
-
- if seek_name:
- c_node = ContextualizedName(context, seek_name)
- context_set = check_tuple_assignments(context.evaluator, c_node, context_set)
-
- first_operator = next(stmt.yield_operators(), None)
- if first_operator not in ('=', None) and first_operator.type == 'operator':
- # `=` is always the last character in aug assignments -> -1
- operator = copy.copy(first_operator)
- operator.value = operator.value[:-1]
- name = stmt.get_defined_names()[0].value
- left = context.py__getattribute__(
- name, position=stmt.start_pos, search_global=True)
-
- for_stmt = tree.search_ancestor(stmt, 'for_stmt')
- if for_stmt is not None and for_stmt.type == 'for_stmt' and context_set \
- and parser_utils.for_stmt_defines_one_name(for_stmt):
- # Iterate through result and add the values, that's possible
- # only in for loops without clutter, because they are
- # predictable. Also only do it, if the variable is not a tuple.
- node = for_stmt.get_testlist()
- cn = ContextualizedNode(context, node)
- ordered = list(cn.infer().iterate(cn))
-
- for lazy_context in ordered:
- dct = {for_stmt.children[1].value: lazy_context.infer()}
- with helpers.predefine_names(context, for_stmt, dct):
- t = context.eval_node(rhs)
- left = _eval_comparison(context.evaluator, context, left, operator, t)
- context_set = left
- else:
- context_set = _eval_comparison(context.evaluator, context, left, operator, context_set)
- debug.dbg('eval_expr_stmt result %s', context_set)
- return context_set
-
-
-def eval_or_test(context, or_test):
- iterator = iter(or_test.children)
- types = context.eval_node(next(iterator))
- for operator in iterator:
- right = next(iterator)
- if operator.type == 'comp_op': # not in / is not
- operator = ' '.join(c.value for c in operator.children)
-
- # handle lazy evaluation of and/or here.
- if operator in ('and', 'or'):
- left_bools = set(left.py__bool__() for left in types)
- if left_bools == {True}:
- if operator == 'and':
- types = context.eval_node(right)
- elif left_bools == {False}:
- if operator != 'and':
- types = context.eval_node(right)
- # Otherwise continue, because of uncertainty.
- else:
- types = _eval_comparison(context.evaluator, context, types, operator,
- context.eval_node(right))
- debug.dbg('eval_or_test types %s', types)
- return types
-
-
-@iterator_to_context_set
-def eval_factor(context_set, operator):
- """
- Calculates `+`, `-`, `~` and `not` prefixes.
- """
- for context in context_set:
- if operator == '-':
- if is_number(context):
- yield context.negate()
- elif operator == 'not':
- value = context.py__bool__()
- if value is None: # Uncertainty.
- return
- yield compiled.create_simple_object(context.evaluator, not value)
- else:
- yield context
-
-
-def _literals_to_types(evaluator, result):
- # Changes literals ('a', 1, 1.0, etc) to its type instances (str(),
- # int(), float(), etc).
- new_result = NO_CONTEXTS
- for typ in result:
- if is_literal(typ):
- # Literals are only valid as long as the operations are
- # correct. Otherwise add a value-free instance.
- cls = compiled.builtin_from_name(evaluator, typ.name.string_name)
- new_result |= cls.execute_evaluated()
- else:
- new_result |= ContextSet(typ)
- return new_result
-
-
-def _eval_comparison(evaluator, context, left_contexts, operator, right_contexts):
- if not left_contexts or not right_contexts:
- # illegal slices e.g. cause left/right_result to be None
- result = (left_contexts or NO_CONTEXTS) | (right_contexts or NO_CONTEXTS)
- return _literals_to_types(evaluator, result)
- else:
- # I don't think there's a reasonable chance that a string
- # operation is still correct, once we pass something like six
- # objects.
- if len(left_contexts) * len(right_contexts) > 6:
- return _literals_to_types(evaluator, left_contexts | right_contexts)
- else:
- return ContextSet.from_sets(
- _eval_comparison_part(evaluator, context, left, operator, right)
- for left in left_contexts
- for right in right_contexts
- )
-
-
-def _is_tuple(context):
- return isinstance(context, iterable.Sequence) and context.array_type == 'tuple'
-
-
-def _is_list(context):
- return isinstance(context, iterable.Sequence) and context.array_type == 'list'
-
-
-def _bool_to_context(evaluator, bool_):
- return compiled.builtin_from_name(evaluator, force_unicode(str(bool_)))
-
-
-def _eval_comparison_part(evaluator, context, left, operator, right):
- l_is_num = is_number(left)
- r_is_num = is_number(right)
- if isinstance(operator, unicode):
- str_operator = operator
- else:
- str_operator = force_unicode(str(operator.value))
-
- if str_operator == '*':
- # for iterables, ignore * operations
- if isinstance(left, iterable.Sequence) or is_string(left):
- return ContextSet(left)
- elif isinstance(right, iterable.Sequence) or is_string(right):
- return ContextSet(right)
- elif str_operator == '+':
- if l_is_num and r_is_num or is_string(left) and is_string(right):
- return ContextSet(left.execute_operation(right, str_operator))
- elif _is_tuple(left) and _is_tuple(right) or _is_list(left) and _is_list(right):
- return ContextSet(iterable.MergedArray(evaluator, (left, right)))
- elif str_operator == '-':
- if l_is_num and r_is_num:
- return ContextSet(left.execute_operation(right, str_operator))
- elif str_operator == '%':
- # With strings and numbers the left type typically remains. Except for
- # `int() % float()`.
- return ContextSet(left)
- elif str_operator in COMPARISON_OPERATORS:
- if is_compiled(left) and is_compiled(right):
- # Possible, because the return is not an option. Just compare.
- try:
- return ContextSet(left.execute_operation(right, str_operator))
- except TypeError:
- # Could be True or False.
- pass
- else:
- if str_operator in ('is', '!=', '==', 'is not'):
- operation = COMPARISON_OPERATORS[str_operator]
- bool_ = operation(left, right)
- return ContextSet(_bool_to_context(evaluator, bool_))
-
- return ContextSet(_bool_to_context(evaluator, True), _bool_to_context(evaluator, False))
- elif str_operator == 'in':
- return NO_CONTEXTS
-
- def check(obj):
- """Checks if a Jedi object is either a float or an int."""
- return isinstance(obj, CompiledInstance) and \
- obj.name.string_name in ('int', 'float')
-
- # Static analysis, one is a number, the other one is not.
- if str_operator in ('+', '-') and l_is_num != r_is_num \
- and not (check(left) or check(right)):
- message = "TypeError: unsupported operand type(s) for +: %s and %s"
- analysis.add(context, 'type-error-operation', operator,
- message % (left, right))
-
- return ContextSet(left, right)
-
-
-def _remove_statements(evaluator, context, stmt, name):
- """
- This is the part where statements are being stripped.
-
- Due to lazy evaluation, statements like a = func; b = a; b() have to be
- evaluated.
- """
- pep0484_contexts = \
- pep0484.find_type_from_comment_hint_assign(context, stmt, name)
- if pep0484_contexts:
- return pep0484_contexts
-
- return eval_expr_stmt(context, stmt, seek_name=name)
-
-
-def tree_name_to_contexts(evaluator, context, tree_name):
-
- context_set = ContextSet()
- module_node = context.get_root_context().tree_node
- if module_node is not None:
- names = module_node.get_used_names().get(tree_name.value, [])
- for name in names:
- expr_stmt = name.parent
-
- correct_scope = parser_utils.get_parent_scope(name) == context.tree_node
-
- if expr_stmt.type == "expr_stmt" and expr_stmt.children[1].type == "annassign" and correct_scope:
- context_set |= _evaluate_for_annotation(context, expr_stmt.children[1].children[1])
-
- if context_set:
- return context_set
-
- types = []
- node = tree_name.get_definition(import_name_always=True)
- if node is None:
- node = tree_name.parent
- if node.type == 'global_stmt':
- context = evaluator.create_context(context, tree_name)
- finder = NameFinder(evaluator, context, context, tree_name.value)
- filters = finder.get_filters(search_global=True)
- # For global_stmt lookups, we only need the first possible scope,
- # which means the function itself.
- filters = [next(filters)]
- return finder.find(filters, attribute_lookup=False)
- elif node.type not in ('import_from', 'import_name'):
- raise ValueError("Should not happen. type: %s", node.type)
-
- typ = node.type
- if typ == 'for_stmt':
- types = pep0484.find_type_from_comment_hint_for(context, node, tree_name)
- if types:
- return types
- if typ == 'with_stmt':
- types = pep0484.find_type_from_comment_hint_with(context, node, tree_name)
- if types:
- return types
-
- if typ in ('for_stmt', 'comp_for'):
- try:
- types = context.predefined_names[node][tree_name.value]
- except KeyError:
- cn = ContextualizedNode(context, node.children[3])
- for_types = iterate_contexts(
- cn.infer(),
- contextualized_node=cn,
- is_async=node.parent.type == 'async_stmt',
- )
- c_node = ContextualizedName(context, tree_name)
- types = check_tuple_assignments(evaluator, c_node, for_types)
- elif typ == 'expr_stmt':
- types = _remove_statements(evaluator, context, node, tree_name)
- elif typ == 'with_stmt':
- context_managers = context.eval_node(node.get_test_node_from_name(tree_name))
- enter_methods = context_managers.py__getattribute__(u'__enter__')
- return enter_methods.execute_evaluated()
- elif typ in ('import_from', 'import_name'):
- types = imports.infer_import(context, tree_name)
- elif typ in ('funcdef', 'classdef'):
- types = _apply_decorators(context, node)
- elif typ == 'try_stmt':
- # TODO an exception can also be a tuple. Check for those.
- # TODO check for types that are not classes and add it to
- # the static analysis report.
- exceptions = context.eval_node(tree_name.get_previous_sibling().get_previous_sibling())
- types = exceptions.execute_evaluated()
- else:
- raise ValueError("Should not happen. type: %s" % typ)
- return types
-
-
-def _apply_decorators(context, node):
- """
- Returns the function, that should to be executed in the end.
- This is also the places where the decorators are processed.
- """
- if node.type == 'classdef':
- decoratee_context = ClassContext(
- context.evaluator,
- parent_context=context,
- tree_node=node
- )
- else:
- decoratee_context = FunctionContext.from_context(context, node)
- initial = values = ContextSet(decoratee_context)
- for dec in reversed(node.get_decorators()):
- debug.dbg('decorator: %s %s', dec, values)
- dec_values = context.eval_node(dec.children[1])
- trailer_nodes = dec.children[2:-1]
- if trailer_nodes:
- # Create a trailer and evaluate it.
- trailer = tree.PythonNode('trailer', trailer_nodes)
- trailer.parent = dec
- dec_values = eval_trailer(context, dec_values, trailer)
-
- if not len(dec_values):
- debug.warning('decorator not found: %s on %s', dec, node)
- return initial
-
- values = dec_values.execute(arguments.ValuesArguments([values]))
- if not len(values):
- debug.warning('not possible to resolve wrappers found %s', node)
- return initial
-
- debug.dbg('decorator end %s', values)
- return values
-
-
-def check_tuple_assignments(evaluator, contextualized_name, context_set):
- """
- Checks if tuples are assigned.
- """
- lazy_context = None
- for index, node in contextualized_name.assignment_indexes():
- cn = ContextualizedNode(contextualized_name.context, node)
- iterated = context_set.iterate(cn)
- for _ in range(index + 1):
- try:
- lazy_context = next(iterated)
- except StopIteration:
- # We could do this with the default param in next. But this
- # would allow this loop to run for a very long time if the
- # index number is high. Therefore break if the loop is
- # finished.
- return ContextSet()
- context_set = lazy_context.infer()
- return context_set
-
-
-def eval_subscript_list(evaluator, context, index):
- """
- Handles slices in subscript nodes.
- """
- if index == ':':
- # Like array[:]
- return ContextSet(iterable.Slice(context, None, None, None))
-
- elif index.type == 'subscript' and not index.children[0] == '.':
- # subscript basically implies a slice operation, except for Python 2's
- # Ellipsis.
- # e.g. array[:3]
- result = []
- for el in index.children:
- if el == ':':
- if not result:
- result.append(None)
- elif el.type == 'sliceop':
- if len(el.children) == 2:
- result.append(el.children[1])
- else:
- result.append(el)
- result += [None] * (3 - len(result))
-
- return ContextSet(iterable.Slice(context, *result))
- elif index.type == 'subscriptlist':
- return NO_CONTEXTS
-
- # No slices
- return context.eval_node(index)
diff --git a/contrib/python/jedi/jedi/evaluate/sys_path.py b/contrib/python/jedi/jedi/evaluate/sys_path.py
deleted file mode 100644
index 8fb1843f05..0000000000
--- a/contrib/python/jedi/jedi/evaluate/sys_path.py
+++ /dev/null
@@ -1,226 +0,0 @@
-import os
-
-from jedi._compatibility import unicode, force_unicode, all_suffixes
-from jedi.evaluate.cache import evaluator_method_cache
-from jedi.evaluate.base_context import ContextualizedNode
-from jedi.evaluate.helpers import is_string
-from jedi.common.utils import traverse_parents
-from jedi.parser_utils import get_cached_code_lines
-from jedi import settings
-from jedi import debug
-
-
-def _abs_path(module_context, path):
- if os.path.isabs(path):
- return path
-
- module_path = module_context.py__file__()
- if module_path is None:
- # In this case we have no idea where we actually are in the file
- # system.
- return None
-
- base_dir = os.path.dirname(module_path)
- path = force_unicode(path)
- return os.path.abspath(os.path.join(base_dir, path))
-
-
-def _paths_from_assignment(module_context, expr_stmt):
- """
- Extracts the assigned strings from an assignment that looks as follows::
-
- sys.path[0:0] = ['module/path', 'another/module/path']
-
- This function is in general pretty tolerant (and therefore 'buggy').
- However, it's not a big issue usually to add more paths to Jedi's sys_path,
- because it will only affect Jedi in very random situations and by adding
- more paths than necessary, it usually benefits the general user.
- """
- for assignee, operator in zip(expr_stmt.children[::2], expr_stmt.children[1::2]):
- try:
- assert operator in ['=', '+=']
- assert assignee.type in ('power', 'atom_expr') and \
- len(assignee.children) > 1
- c = assignee.children
- assert c[0].type == 'name' and c[0].value == 'sys'
- trailer = c[1]
- assert trailer.children[0] == '.' and trailer.children[1].value == 'path'
- # TODO Essentially we're not checking details on sys.path
- # manipulation. Both assigment of the sys.path and changing/adding
- # parts of the sys.path are the same: They get added to the end of
- # the current sys.path.
- """
- execution = c[2]
- assert execution.children[0] == '['
- subscript = execution.children[1]
- assert subscript.type == 'subscript'
- assert ':' in subscript.children
- """
- except AssertionError:
- continue
-
- cn = ContextualizedNode(module_context.create_context(expr_stmt), expr_stmt)
- for lazy_context in cn.infer().iterate(cn):
- for context in lazy_context.infer():
- if is_string(context):
- abs_path = _abs_path(module_context, context.get_safe_value())
- if abs_path is not None:
- yield abs_path
-
-
-def _paths_from_list_modifications(module_context, trailer1, trailer2):
- """ extract the path from either "sys.path.append" or "sys.path.insert" """
- # Guarantee that both are trailers, the first one a name and the second one
- # a function execution with at least one param.
- if not (trailer1.type == 'trailer' and trailer1.children[0] == '.'
- and trailer2.type == 'trailer' and trailer2.children[0] == '('
- and len(trailer2.children) == 3):
- return
-
- name = trailer1.children[1].value
- if name not in ['insert', 'append']:
- return
- arg = trailer2.children[1]
- if name == 'insert' and len(arg.children) in (3, 4): # Possible trailing comma.
- arg = arg.children[2]
-
- for context in module_context.create_context(arg).eval_node(arg):
- if is_string(context):
- abs_path = _abs_path(module_context, context.get_safe_value())
- if abs_path is not None:
- yield abs_path
-
-
-@evaluator_method_cache(default=[])
-def check_sys_path_modifications(module_context):
- """
- Detect sys.path modifications within module.
- """
- def get_sys_path_powers(names):
- for name in names:
- power = name.parent.parent
- if power.type in ('power', 'atom_expr'):
- c = power.children
- if c[0].type == 'name' and c[0].value == 'sys' \
- and c[1].type == 'trailer':
- n = c[1].children[1]
- if n.type == 'name' and n.value == 'path':
- yield name, power
-
- if module_context.tree_node is None:
- return []
-
- added = []
- try:
- possible_names = module_context.tree_node.get_used_names()['path']
- except KeyError:
- pass
- else:
- for name, power in get_sys_path_powers(possible_names):
- expr_stmt = power.parent
- if len(power.children) >= 4:
- added.extend(
- _paths_from_list_modifications(
- module_context, *power.children[2:4]
- )
- )
- elif expr_stmt is not None and expr_stmt.type == 'expr_stmt':
- added.extend(_paths_from_assignment(module_context, expr_stmt))
- return added
-
-
-def discover_buildout_paths(evaluator, script_path):
- buildout_script_paths = set()
-
- for buildout_script_path in _get_buildout_script_paths(script_path):
- for path in _get_paths_from_buildout_script(evaluator, buildout_script_path):
- buildout_script_paths.add(path)
-
- return buildout_script_paths
-
-
-def _get_paths_from_buildout_script(evaluator, buildout_script_path):
- try:
- module_node = evaluator.parse(
- path=buildout_script_path,
- cache=True,
- cache_path=settings.cache_directory
- )
- except IOError:
- debug.warning('Error trying to read buildout_script: %s', buildout_script_path)
- return
-
- from jedi.evaluate.context import ModuleContext
- module = ModuleContext(
- evaluator, module_node, buildout_script_path,
- code_lines=get_cached_code_lines(evaluator.grammar, buildout_script_path),
- )
- for path in check_sys_path_modifications(module):
- yield path
-
-
-def _get_parent_dir_with_file(path, filename):
- for parent in traverse_parents(path):
- if os.path.isfile(os.path.join(parent, filename)):
- return parent
- return None
-
-
-def _get_buildout_script_paths(search_path):
- """
- if there is a 'buildout.cfg' file in one of the parent directories of the
- given module it will return a list of all files in the buildout bin
- directory that look like python files.
-
- :param search_path: absolute path to the module.
- :type search_path: str
- """
- project_root = _get_parent_dir_with_file(search_path, 'buildout.cfg')
- if not project_root:
- return
- bin_path = os.path.join(project_root, 'bin')
- if not os.path.exists(bin_path):
- return
-
- for filename in os.listdir(bin_path):
- try:
- filepath = os.path.join(bin_path, filename)
- with open(filepath, 'r') as f:
- firstline = f.readline()
- if firstline.startswith('#!') and 'python' in firstline:
- yield filepath
- except (UnicodeDecodeError, IOError) as e:
- # Probably a binary file; permission error or race cond. because
- # file got deleted. Ignore it.
- debug.warning(unicode(e))
- continue
-
-
-def dotted_path_in_sys_path(sys_path, module_path):
- """
- Returns the dotted path inside a sys.path as a list of names.
- """
- # First remove the suffix.
- for suffix in all_suffixes():
- if module_path.endswith(suffix):
- module_path = module_path[:-len(suffix)]
- break
- else:
- # There should always be a suffix in a valid Python file on the path.
- return None
-
- if module_path.startswith(os.path.sep):
- # The paths in sys.path most of the times don't end with a slash.
- module_path = module_path[1:]
-
- for p in sys_path:
- if module_path.startswith(p):
- rest = module_path[len(p):]
- if rest:
- split = rest.split(os.path.sep)
- for string in split:
- if not string or '.' in string:
- return None
- return split
-
- return None
diff --git a/contrib/python/jedi/jedi/evaluate/usages.py b/contrib/python/jedi/jedi/evaluate/usages.py
deleted file mode 100644
index 290c4695b1..0000000000
--- a/contrib/python/jedi/jedi/evaluate/usages.py
+++ /dev/null
@@ -1,62 +0,0 @@
-from jedi.evaluate import imports
-from jedi.evaluate.filters import TreeNameDefinition
-from jedi.evaluate.context import ModuleContext
-
-
-def _resolve_names(definition_names, avoid_names=()):
- for name in definition_names:
- if name in avoid_names:
- # Avoiding recursions here, because goto on a module name lands
- # on the same module.
- continue
-
- if not isinstance(name, imports.SubModuleName):
- # SubModuleNames are not actually existing names but created
- # names when importing something like `import foo.bar.baz`.
- yield name
-
- if name.api_type == 'module':
- for name in _resolve_names(name.goto(), definition_names):
- yield name
-
-
-def _dictionarize(names):
- return dict(
- (n if n.tree_name is None else n.tree_name, n)
- for n in names
- )
-
-
-def _find_names(module_context, tree_name):
- context = module_context.create_context(tree_name)
- name = TreeNameDefinition(context, tree_name)
- found_names = set(name.goto())
- found_names.add(name)
- return _dictionarize(_resolve_names(found_names))
-
-
-def usages(module_context, tree_name):
- search_name = tree_name.value
- found_names = _find_names(module_context, tree_name)
- modules = set(d.get_root_context() for d in found_names.values())
- modules = set(m for m in modules if isinstance(m, ModuleContext))
-
- non_matching_usage_maps = {}
- for m in imports.get_modules_containing_name(module_context.evaluator, modules, search_name):
- for name_leaf in m.tree_node.get_used_names().get(search_name, []):
- new = _find_names(m, name_leaf)
- if any(tree_name in found_names for tree_name in new):
- found_names.update(new)
- for tree_name in new:
- for dct in non_matching_usage_maps.get(tree_name, []):
- # A usage that was previously searched for matches with
- # a now found name. Merge.
- found_names.update(dct)
- try:
- del non_matching_usage_maps[tree_name]
- except KeyError:
- pass
- else:
- for name in new:
- non_matching_usage_maps.setdefault(name, []).append(new)
- return found_names.values()
diff --git a/contrib/python/jedi/jedi/evaluate/utils.py b/contrib/python/jedi/jedi/evaluate/utils.py
deleted file mode 100644
index 990a995e8a..0000000000
--- a/contrib/python/jedi/jedi/evaluate/utils.py
+++ /dev/null
@@ -1,119 +0,0 @@
-""" A universal module with functions / classes without dependencies. """
-import sys
-import contextlib
-import functools
-import re
-import os
-
-from jedi._compatibility import reraise
-
-
-_sep = os.path.sep
-if os.path.altsep is not None:
- _sep += os.path.altsep
-_path_re = re.compile(r'(?:\.[^{0}]+|[{0}]__init__\.py)$'.format(re.escape(_sep)))
-del _sep
-
-
-def to_list(func):
- def wrapper(*args, **kwargs):
- return list(func(*args, **kwargs))
- return wrapper
-
-
-def unite(iterable):
- """Turns a two dimensional array into a one dimensional."""
- return set(typ for types in iterable for typ in types)
-
-
-class UncaughtAttributeError(Exception):
- """
- Important, because `__getattr__` and `hasattr` catch AttributeErrors
- implicitly. This is really evil (mainly because of `__getattr__`).
- `hasattr` in Python 2 is even more evil, because it catches ALL exceptions.
- Therefore this class originally had to be derived from `BaseException`
- instead of `Exception`. But because I removed relevant `hasattr` from
- the code base, we can now switch back to `Exception`.
-
- :param base: return values of sys.exc_info().
- """
-
-
-def safe_property(func):
- return property(reraise_uncaught(func))
-
-
-def reraise_uncaught(func):
- """
- Re-throw uncaught `AttributeError`.
-
- Usage: Put ``@rethrow_uncaught`` in front of the function
- which does **not** suppose to raise `AttributeError`.
-
- AttributeError is easily get caught by `hasattr` and another
- ``except AttributeError`` clause. This becomes problem when you use
- a lot of "dynamic" attributes (e.g., using ``@property``) because you
- can't distinguish if the property does not exist for real or some code
- inside of the "dynamic" attribute through that error. In a well
- written code, such error should not exist but getting there is very
- difficult. This decorator is to help us getting there by changing
- `AttributeError` to `UncaughtAttributeError` to avoid unexpected catch.
- This helps us noticing bugs earlier and facilitates debugging.
-
- .. note:: Treating StopIteration here is easy.
- Add that feature when needed.
- """
- @functools.wraps(func)
- def wrapper(*args, **kwds):
- try:
- return func(*args, **kwds)
- except AttributeError:
- exc_info = sys.exc_info()
- reraise(UncaughtAttributeError(exc_info[1]), exc_info[2])
- return wrapper
-
-
-class PushBackIterator(object):
- def __init__(self, iterator):
- self.pushes = []
- self.iterator = iterator
- self.current = None
-
- def push_back(self, value):
- self.pushes.append(value)
-
- def __iter__(self):
- return self
-
- def next(self):
- """ Python 2 Compatibility """
- return self.__next__()
-
- def __next__(self):
- if self.pushes:
- self.current = self.pushes.pop()
- else:
- self.current = next(self.iterator)
- return self.current
-
-
-@contextlib.contextmanager
-def ignored(*exceptions):
- """
- Context manager that ignores all of the specified exceptions. This will
- be in the standard library starting with Python 3.4.
- """
- try:
- yield
- except exceptions:
- pass
-
-
-def indent_block(text, indention=' '):
- """This function indents a text block with a default of four spaces."""
- temp = ''
- while text and text[-1] == '\n':
- temp += text[-1]
- text = text[:-1]
- lines = text.split('\n')
- return '\n'.join(map(lambda s: indention + s, lines)) + temp
diff --git a/contrib/python/jedi/jedi/parser_utils.py b/contrib/python/jedi/jedi/parser_utils.py
deleted file mode 100644
index 212d70f2b6..0000000000
--- a/contrib/python/jedi/jedi/parser_utils.py
+++ /dev/null
@@ -1,260 +0,0 @@
-import re
-import textwrap
-from inspect import cleandoc
-
-from parso.python import tree
-from parso.cache import parser_cache
-
-from jedi._compatibility import literal_eval, force_unicode
-
-_EXECUTE_NODES = {'funcdef', 'classdef', 'import_from', 'import_name', 'test',
- 'or_test', 'and_test', 'not_test', 'comparison', 'expr',
- 'xor_expr', 'and_expr', 'shift_expr', 'arith_expr',
- 'atom_expr', 'term', 'factor', 'power', 'atom'}
-
-_FLOW_KEYWORDS = (
- 'try', 'except', 'finally', 'else', 'if', 'elif', 'with', 'for', 'while'
-)
-
-
-def get_executable_nodes(node, last_added=False):
- """
- For static analysis.
- """
- result = []
- typ = node.type
- if typ == 'name':
- next_leaf = node.get_next_leaf()
- if last_added is False and node.parent.type != 'param' and next_leaf != '=':
- result.append(node)
- elif typ == 'expr_stmt':
- # I think evaluating the statement (and possibly returned arrays),
- # should be enough for static analysis.
- result.append(node)
- for child in node.children:
- result += get_executable_nodes(child, last_added=True)
- elif typ == 'decorator':
- # decorator
- if node.children[-2] == ')':
- node = node.children[-3]
- if node != '(':
- result += get_executable_nodes(node)
- else:
- try:
- children = node.children
- except AttributeError:
- pass
- else:
- if node.type in _EXECUTE_NODES and not last_added:
- result.append(node)
-
- for child in children:
- result += get_executable_nodes(child, last_added)
-
- return result
-
-
-def get_comp_fors(comp_for):
- yield comp_for
- last = comp_for.children[-1]
- while True:
- if last.type == 'comp_for':
- yield last
- elif not last.type == 'comp_if':
- break
- last = last.children[-1]
-
-
-def for_stmt_defines_one_name(for_stmt):
- """
- Returns True if only one name is returned: ``for x in y``.
- Returns False if the for loop is more complicated: ``for x, z in y``.
-
- :returns: bool
- """
- return for_stmt.children[1].type == 'name'
-
-
-def get_flow_branch_keyword(flow_node, node):
- start_pos = node.start_pos
- if not (flow_node.start_pos < start_pos <= flow_node.end_pos):
- raise ValueError('The node is not part of the flow.')
-
- keyword = None
- for i, child in enumerate(flow_node.children):
- if start_pos < child.start_pos:
- return keyword
- first_leaf = child.get_first_leaf()
- if first_leaf in _FLOW_KEYWORDS:
- keyword = first_leaf
- return 0
-
-
-def get_statement_of_position(node, pos):
- for c in node.children:
- if c.start_pos <= pos <= c.end_pos:
- if c.type not in ('decorated', 'simple_stmt', 'suite',
- 'async_stmt', 'async_funcdef') \
- and not isinstance(c, (tree.Flow, tree.ClassOrFunc)):
- return c
- else:
- try:
- return get_statement_of_position(c, pos)
- except AttributeError:
- pass # Must be a non-scope
- return None
-
-
-def clean_scope_docstring(scope_node):
- """ Returns a cleaned version of the docstring token. """
- node = scope_node.get_doc_node()
- if node is not None:
- # TODO We have to check next leaves until there are no new
- # leaves anymore that might be part of the docstring. A
- # docstring can also look like this: ``'foo' 'bar'
- # Returns a literal cleaned version of the ``Token``.
- cleaned = cleandoc(safe_literal_eval(node.value))
- # Since we want the docstr output to be always unicode, just
- # force it.
- return force_unicode(cleaned)
- return ''
-
-
-def safe_literal_eval(value):
- first_two = value[:2].lower()
- if first_two[0] == 'f' or first_two in ('fr', 'rf'):
- # literal_eval is not able to resovle f literals. We have to do that
- # manually, but that's right now not implemented.
- return ''
-
- try:
- return literal_eval(value)
- except SyntaxError:
- # It's possible to create syntax errors with literals like rb'' in
- # Python 2. This should not be possible and in that case just return an
- # empty string.
- # Before Python 3.3 there was a more strict definition in which order
- # you could define literals.
- return ''
-
-
-def get_call_signature(funcdef, width=72, call_string=None):
- """
- Generate call signature of this function.
-
- :param width: Fold lines if a line is longer than this value.
- :type width: int
- :arg func_name: Override function name when given.
- :type func_name: str
-
- :rtype: str
- """
- # Lambdas have no name.
- if call_string is None:
- if funcdef.type == 'lambdef':
- call_string = '<lambda>'
- else:
- call_string = funcdef.name.value
- if funcdef.type == 'lambdef':
- p = '(' + ''.join(param.get_code() for param in funcdef.get_params()).strip() + ')'
- else:
- p = funcdef.children[2].get_code()
- p = re.sub(r'\s+', ' ', p)
- if funcdef.annotation:
- rtype = " ->" + funcdef.annotation.get_code()
- else:
- rtype = ""
- code = call_string + p + rtype
-
- return '\n'.join(textwrap.wrap(code, width))
-
-
-def get_doc_with_call_signature(scope_node):
- """
- Return a document string including call signature.
- """
- call_signature = None
- if scope_node.type == 'classdef':
- for funcdef in scope_node.iter_funcdefs():
- if funcdef.name.value == '__init__':
- call_signature = \
- get_call_signature(funcdef, call_string=scope_node.name.value)
- elif scope_node.type in ('funcdef', 'lambdef'):
- call_signature = get_call_signature(scope_node)
-
- doc = clean_scope_docstring(scope_node)
- if call_signature is None:
- return doc
- if not doc:
- return call_signature
- return '%s\n\n%s' % (call_signature, doc)
-
-
-def move(node, line_offset):
- """
- Move the `Node` start_pos.
- """
- try:
- children = node.children
- except AttributeError:
- node.line += line_offset
- else:
- for c in children:
- move(c, line_offset)
-
-
-def get_following_comment_same_line(node):
- """
- returns (as string) any comment that appears on the same line,
- after the node, including the #
- """
- try:
- if node.type == 'for_stmt':
- whitespace = node.children[5].get_first_leaf().prefix
- elif node.type == 'with_stmt':
- whitespace = node.children[3].get_first_leaf().prefix
- elif node.type == 'funcdef':
- # actually on the next line
- whitespace = node.children[4].get_first_leaf().get_next_leaf().prefix
- else:
- whitespace = node.get_last_leaf().get_next_leaf().prefix
- except AttributeError:
- return None
- except ValueError:
- # TODO in some particular cases, the tree doesn't seem to be linked
- # correctly
- return None
- if "#" not in whitespace:
- return None
- comment = whitespace[whitespace.index("#"):]
- if "\r" in comment:
- comment = comment[:comment.index("\r")]
- if "\n" in comment:
- comment = comment[:comment.index("\n")]
- return comment
-
-
-def is_scope(node):
- return node.type in ('file_input', 'classdef', 'funcdef', 'lambdef', 'comp_for')
-
-
-def get_parent_scope(node, include_flows=False):
- """
- Returns the underlying scope.
- """
- scope = node.parent
- while scope is not None:
- if include_flows and isinstance(scope, tree.Flow):
- return scope
- if is_scope(scope):
- break
- scope = scope.parent
- return scope
-
-
-def get_cached_code_lines(grammar, path):
- """
- Basically access the cached code lines in parso. This is not the nicest way
- to do this, but we avoid splitting all the lines again.
- """
- return parser_cache[grammar._hashed][path].lines
diff --git a/contrib/python/jedi/jedi/refactoring.py b/contrib/python/jedi/jedi/refactoring.py
deleted file mode 100644
index 6c1d74d1bd..0000000000
--- a/contrib/python/jedi/jedi/refactoring.py
+++ /dev/null
@@ -1,203 +0,0 @@
-"""
-THIS is not in active development, please check
-https://github.com/davidhalter/jedi/issues/667 first before editing.
-
-Introduce some basic refactoring functions to |jedi|. This module is still in a
-very early development stage and needs much testing and improvement.
-
-.. warning:: I won't do too much here, but if anyone wants to step in, please
- do. Refactoring is none of my priorities
-
-It uses the |jedi| `API <api.html>`_ and supports currently the
-following functions (sometimes bug-prone):
-
-- rename
-- extract variable
-- inline variable
-"""
-import difflib
-
-from parso import python_bytes_to_unicode, split_lines
-from jedi.evaluate import helpers
-
-
-class Refactoring(object):
- def __init__(self, change_dct):
- """
- :param change_dct: dict(old_path=(new_path, old_lines, new_lines))
- """
- self.change_dct = change_dct
-
- def old_files(self):
- dct = {}
- for old_path, (new_path, old_l, new_l) in self.change_dct.items():
- dct[old_path] = '\n'.join(old_l)
- return dct
-
- def new_files(self):
- dct = {}
- for old_path, (new_path, old_l, new_l) in self.change_dct.items():
- dct[new_path] = '\n'.join(new_l)
- return dct
-
- def diff(self):
- texts = []
- for old_path, (new_path, old_l, new_l) in self.change_dct.items():
- if old_path:
- udiff = difflib.unified_diff(old_l, new_l)
- else:
- udiff = difflib.unified_diff(old_l, new_l, old_path, new_path)
- texts.append('\n'.join(udiff))
- return '\n'.join(texts)
-
-
-def rename(script, new_name):
- """ The `args` / `kwargs` params are the same as in `api.Script`.
- :param new_name: The new name of the script.
- :param script: The source Script object.
- :return: list of changed lines/changed files
- """
- return Refactoring(_rename(script.usages(), new_name))
-
-
-def _rename(names, replace_str):
- """ For both rename and inline. """
- order = sorted(names, key=lambda x: (x.module_path, x.line, x.column),
- reverse=True)
-
- def process(path, old_lines, new_lines):
- if new_lines is not None: # goto next file, save last
- dct[path] = path, old_lines, new_lines
-
- dct = {}
- current_path = object()
- new_lines = old_lines = None
- for name in order:
- if name.in_builtin_module():
- continue
- if current_path != name.module_path:
- current_path = name.module_path
-
- process(current_path, old_lines, new_lines)
- if current_path is not None:
- # None means take the source that is a normal param.
- with open(current_path) as f:
- source = f.read()
-
- new_lines = split_lines(python_bytes_to_unicode(source))
- old_lines = new_lines[:]
-
- nr, indent = name.line, name.column
- line = new_lines[nr - 1]
- new_lines[nr - 1] = line[:indent] + replace_str + \
- line[indent + len(name.name):]
- process(current_path, old_lines, new_lines)
- return dct
-
-
-def extract(script, new_name):
- """ The `args` / `kwargs` params are the same as in `api.Script`.
- :param operation: The refactoring operation to execute.
- :type operation: str
- :type source: str
- :return: list of changed lines/changed files
- """
- new_lines = split_lines(python_bytes_to_unicode(script.source))
- old_lines = new_lines[:]
-
- user_stmt = script._parser.user_stmt()
-
- # TODO care for multi-line extracts
- dct = {}
- if user_stmt:
- pos = script._pos
- line_index = pos[0] - 1
- # Be careful here. 'array_for_pos' does not exist in 'helpers'.
- arr, index = helpers.array_for_pos(user_stmt, pos)
- if arr is not None:
- start_pos = arr[index].start_pos
- end_pos = arr[index].end_pos
-
- # take full line if the start line is different from end line
- e = end_pos[1] if end_pos[0] == start_pos[0] else None
- start_line = new_lines[start_pos[0] - 1]
- text = start_line[start_pos[1]:e]
- for l in range(start_pos[0], end_pos[0] - 1):
- text += '\n' + str(l)
- if e is None:
- end_line = new_lines[end_pos[0] - 1]
- text += '\n' + end_line[:end_pos[1]]
-
- # remove code from new lines
- t = text.lstrip()
- del_start = start_pos[1] + len(text) - len(t)
-
- text = t.rstrip()
- del_end = len(t) - len(text)
- if e is None:
- new_lines[end_pos[0] - 1] = end_line[end_pos[1] - del_end:]
- e = len(start_line)
- else:
- e = e - del_end
- start_line = start_line[:del_start] + new_name + start_line[e:]
- new_lines[start_pos[0] - 1] = start_line
- new_lines[start_pos[0]:end_pos[0] - 1] = []
-
- # add parentheses in multi-line case
- open_brackets = ['(', '[', '{']
- close_brackets = [')', ']', '}']
- if '\n' in text and not (text[0] in open_brackets and text[-1] ==
- close_brackets[open_brackets.index(text[0])]):
- text = '(%s)' % text
-
- # add new line before statement
- indent = user_stmt.start_pos[1]
- new = "%s%s = %s" % (' ' * indent, new_name, text)
- new_lines.insert(line_index, new)
- dct[script.path] = script.path, old_lines, new_lines
- return Refactoring(dct)
-
-
-def inline(script):
- """
- :type script: api.Script
- """
- new_lines = split_lines(python_bytes_to_unicode(script.source))
-
- dct = {}
-
- definitions = script.goto_assignments()
- assert len(definitions) == 1
- stmt = definitions[0]._definition
- usages = script.usages()
- inlines = [r for r in usages
- if not stmt.start_pos <= (r.line, r.column) <= stmt.end_pos]
- inlines = sorted(inlines, key=lambda x: (x.module_path, x.line, x.column),
- reverse=True)
- expression_list = stmt.expression_list()
- # don't allow multi-line refactorings for now.
- assert stmt.start_pos[0] == stmt.end_pos[0]
- index = stmt.start_pos[0] - 1
-
- line = new_lines[index]
- replace_str = line[expression_list[0].start_pos[1]:stmt.end_pos[1] + 1]
- replace_str = replace_str.strip()
- # tuples need parentheses
- if expression_list and isinstance(expression_list[0], pr.Array):
- arr = expression_list[0]
- if replace_str[0] not in ['(', '[', '{'] and len(arr) > 1:
- replace_str = '(%s)' % replace_str
-
- # if it's the only assignment, remove the statement
- if len(stmt.get_defined_names()) == 1:
- line = line[:stmt.start_pos[1]] + line[stmt.end_pos[1]:]
-
- dct = _rename(inlines, replace_str)
- # remove the empty line
- new_lines = dct[script.path][2]
- if line.strip():
- new_lines[index] = line
- else:
- new_lines.pop(index)
-
- return Refactoring(dct)
diff --git a/contrib/python/jedi/jedi/settings.py b/contrib/python/jedi/jedi/settings.py
deleted file mode 100644
index 436fadfc0e..0000000000
--- a/contrib/python/jedi/jedi/settings.py
+++ /dev/null
@@ -1,164 +0,0 @@
-"""
-This module contains variables with global |jedi| settings. To change the
-behavior of |jedi|, change the variables defined in :mod:`jedi.settings`.
-
-Plugins should expose an interface so that the user can adjust the
-configuration.
-
-
-Example usage::
-
- from jedi import settings
- settings.case_insensitive_completion = True
-
-
-Completion output
-~~~~~~~~~~~~~~~~~
-
-.. autodata:: case_insensitive_completion
-.. autodata:: add_bracket_after_function
-.. autodata:: no_completion_duplicates
-
-
-Filesystem cache
-~~~~~~~~~~~~~~~~
-
-.. autodata:: cache_directory
-.. autodata:: use_filesystem_cache
-
-
-Parser
-~~~~~~
-
-.. autodata:: fast_parser
-
-
-Dynamic stuff
-~~~~~~~~~~~~~
-
-.. autodata:: dynamic_array_additions
-.. autodata:: dynamic_params
-.. autodata:: dynamic_params_for_other_modules
-.. autodata:: additional_dynamic_modules
-.. autodata:: auto_import_modules
-
-
-Caching
-~~~~~~~
-
-.. autodata:: call_signatures_validity
-
-
-"""
-import os
-import platform
-
-# ----------------
-# completion output settings
-# ----------------
-
-case_insensitive_completion = True
-"""
-The completion is by default case insensitive.
-"""
-
-add_bracket_after_function = False
-"""
-Adds an opening bracket after a function, because that's normal behaviour.
-Removed it again, because in VIM that is not very practical.
-"""
-
-no_completion_duplicates = True
-"""
-If set, completions with the same name don't appear in the output anymore,
-but are in the `same_name_completions` attribute.
-"""
-
-# ----------------
-# Filesystem cache
-# ----------------
-
-use_filesystem_cache = True
-"""
-Use filesystem cache to save once parsed files with pickle.
-"""
-
-if platform.system().lower() == 'windows':
- _cache_directory = os.path.join(os.getenv('APPDATA') or '~', 'Jedi',
- 'Jedi')
-elif platform.system().lower() == 'darwin':
- _cache_directory = os.path.join('~', 'Library', 'Caches', 'Jedi')
-else:
- _cache_directory = os.path.join(os.getenv('XDG_CACHE_HOME') or '~/.cache',
- 'jedi')
-cache_directory = os.path.expanduser(_cache_directory)
-"""
-The path where the cache is stored.
-
-On Linux, this defaults to ``~/.cache/jedi/``, on OS X to
-``~/Library/Caches/Jedi/`` and on Windows to ``%APPDATA%\\Jedi\\Jedi\\``.
-On Linux, if environment variable ``$XDG_CACHE_HOME`` is set,
-``$XDG_CACHE_HOME/jedi`` is used instead of the default one.
-"""
-
-# ----------------
-# parser
-# ----------------
-
-fast_parser = True
-"""
-Use the fast parser. This means that reparsing is only being done if
-something has been changed e.g. to a function. If this happens, only the
-function is being reparsed.
-"""
-
-# ----------------
-# dynamic stuff
-# ----------------
-
-dynamic_array_additions = True
-"""
-check for `append`, etc. on arrays: [], {}, () as well as list/set calls.
-"""
-
-dynamic_params = True
-"""
-A dynamic param completion, finds the callees of the function, which define
-the params of a function.
-"""
-
-dynamic_params_for_other_modules = True
-"""
-Do the same for other modules.
-"""
-
-additional_dynamic_modules = []
-"""
-Additional modules in which |jedi| checks if statements are to be found. This
-is practical for IDEs, that want to administrate their modules themselves.
-"""
-
-dynamic_flow_information = True
-"""
-Check for `isinstance` and other information to infer a type.
-"""
-
-auto_import_modules = [
- 'hashlib', # hashlib is mostly using setattr, which jedi doesn't understand
- 'gi', # This third-party repository (GTK stuff) doesn't really work with jedi
-]
-"""
-Modules that are not analyzed but imported, although they contain Python code.
-This improves autocompletion for libraries that use ``setattr`` or
-``globals()`` modifications a lot.
-"""
-
-# ----------------
-# caching validity (time)
-# ----------------
-
-call_signatures_validity = 3.0
-"""
-Finding function calls might be slow (0.1-0.5s). This is not acceptible for
-normal writing. Therefore cache it for a short time.
-"""
diff --git a/contrib/python/jedi/jedi/utils.py b/contrib/python/jedi/jedi/utils.py
deleted file mode 100644
index 04e87c301c..0000000000
--- a/contrib/python/jedi/jedi/utils.py
+++ /dev/null
@@ -1,142 +0,0 @@
-"""
-Utilities for end-users.
-"""
-
-from __future__ import absolute_import
-import __main__
-from collections import namedtuple
-import logging
-import traceback
-import re
-import os
-import sys
-
-from parso import split_lines
-
-from jedi import Interpreter
-from jedi.api.helpers import get_on_completion_name
-
-
-READLINE_DEBUG = False
-
-
-def setup_readline(namespace_module=__main__):
- """
- Install Jedi completer to :mod:`readline`.
-
- This function setups :mod:`readline` to use Jedi in Python interactive
- shell. If you want to use a custom ``PYTHONSTARTUP`` file (typically
- ``$HOME/.pythonrc.py``), you can add this piece of code::
-
- try:
- from jedi.utils import setup_readline
- setup_readline()
- except ImportError:
- # Fallback to the stdlib readline completer if it is installed.
- # Taken from http://docs.python.org/2/library/rlcompleter.html
- print("Jedi is not installed, falling back to readline")
- try:
- import readline
- import rlcompleter
- readline.parse_and_bind("tab: complete")
- except ImportError:
- print("Readline is not installed either. No tab completion is enabled.")
-
- This will fallback to the readline completer if Jedi is not installed.
- The readline completer will only complete names in the global namespace,
- so for example::
-
- ran<TAB>
-
- will complete to ``range``
-
- with both Jedi and readline, but::
-
- range(10).cou<TAB>
-
- will show complete to ``range(10).count`` only with Jedi.
-
- You'll also need to add ``export PYTHONSTARTUP=$HOME/.pythonrc.py`` to
- your shell profile (usually ``.bash_profile`` or ``.profile`` if you use
- bash).
-
- """
- if READLINE_DEBUG:
- logging.basicConfig(
- filename='/tmp/jedi.log',
- filemode='a',
- level=logging.DEBUG
- )
-
- class JediRL(object):
- def complete(self, text, state):
- """
- This complete stuff is pretty weird, a generator would make
- a lot more sense, but probably due to backwards compatibility
- this is still the way how it works.
-
- The only important part is stuff in the ``state == 0`` flow,
- everything else has been copied from the ``rlcompleter`` std.
- library module.
- """
- if state == 0:
- sys.path.insert(0, os.getcwd())
- # Calling python doesn't have a path, so add to sys.path.
- try:
- logging.debug("Start REPL completion: " + repr(text))
- interpreter = Interpreter(text, [namespace_module.__dict__])
-
- lines = split_lines(text)
- position = (len(lines), len(lines[-1]))
- name = get_on_completion_name(
- interpreter._module_node,
- lines,
- position
- )
- before = text[:len(text) - len(name)]
- completions = interpreter.completions()
- logging.debug("REPL completions: %s", completions)
- except:
- logging.error("REPL Completion error:\n" + traceback.format_exc())
- raise
- finally:
- sys.path.pop(0)
-
- self.matches = [before + c.name_with_symbols for c in completions]
- try:
- return self.matches[state]
- except IndexError:
- return None
-
- try:
- # Need to import this one as well to make sure it's executed before
- # this code. This didn't use to be an issue until 3.3. Starting with
- # 3.4 this is different, it always overwrites the completer if it's not
- # already imported here.
- import rlcompleter # noqa: F401
- import readline
- except ImportError:
- print("Jedi: Module readline not available.")
- else:
- readline.set_completer(JediRL().complete)
- readline.parse_and_bind("tab: complete")
- # jedi itself does the case matching
- readline.parse_and_bind("set completion-ignore-case on")
- # because it's easier to hit the tab just once
- readline.parse_and_bind("set show-all-if-unmodified")
- readline.parse_and_bind("set show-all-if-ambiguous on")
- # don't repeat all the things written in the readline all the time
- readline.parse_and_bind("set completion-prefix-display-length 2")
- # No delimiters, Jedi handles that.
- readline.set_completer_delims('')
-
-
-def version_info():
- """
- Returns a namedtuple of Jedi's version, similar to Python's
- ``sys.version_info``.
- """
- Version = namedtuple('Version', 'major, minor, micro')
- from jedi import __version__
- tupl = re.findall(r'[a-z]+|\d+', __version__)
- return Version(*[x if i == 3 else int(x) for i, x in enumerate(tupl)])
diff --git a/contrib/python/jedi/patches/01-arcadia.patch b/contrib/python/jedi/patches/01-arcadia.patch
deleted file mode 100644
index d1e2311fd9..0000000000
--- a/contrib/python/jedi/patches/01-arcadia.patch
+++ /dev/null
@@ -1,46 +0,0 @@
---- contrib/python/jedi/jedi/evaluate/compiled/fake.py (index)
-+++ contrib/python/jedi/jedi/evaluate/compiled/fake.py (working tree)
-@@ -3,22 +3,26 @@ Loads functions that are mixed in to the standard library. E.g. builtins are
- written in C (binaries), but my autocompletion only understands Python code. By
- mixing in Python code, the autocompletion should work much better for builtins.
- """
--
-+import sys
- import os
- from itertools import chain
-
-+import __res
-+
- from jedi._compatibility import unicode
-
- fake_modules = {}
-
-
- def _get_path_dict():
-- path = os.path.dirname(os.path.abspath(__file__))
-+ path = os.path.dirname(__file__)
- base_path = os.path.join(path, 'fake')
- dct = {}
-- for file_name in os.listdir(base_path):
-- if file_name.endswith('.pym'):
-- dct[file_name[:-4]] = os.path.join(base_path, file_name)
-+ for file_name in __res.resfs_files():
-+ if sys.version_info[0] == 3:
-+ file_name = str(file_name, 'ascii')
-+ if file_name.startswith(base_path) and file_name.endswith('.pym'):
-+ dct[file_name[len(base_path) + 1:-4]] = file_name
- return dct
-
-
-@@ -45,8 +49,9 @@ def _load_faked_module(evaluator, module_name):
- fake_modules[module_name] = None
- return
-
-- with open(path) as f:
-- source = f.read()
-+ if sys.version_info[0] == 3:
-+ path = bytes(path, 'ascii')
-+ source = __res.resfs_read(path)
-
- fake_modules[module_name] = m = evaluator.latest_grammar.parse(unicode(source))
-