summaryrefslogtreecommitdiffstats
path: root/contrib/python/setuptools/py3
diff options
context:
space:
mode:
authormonster <[email protected]>2022-07-07 14:41:37 +0300
committermonster <[email protected]>2022-07-07 14:41:37 +0300
commit06e5c21a835c0e923506c4ff27929f34e00761c2 (patch)
tree75efcbc6854ef9bd476eb8bf00cc5c900da436a2 /contrib/python/setuptools/py3
parent03f024c4412e3aa613bb543cf1660176320ba8f4 (diff)
fix ya.make
Diffstat (limited to 'contrib/python/setuptools/py3')
-rw-r--r--contrib/python/setuptools/py3/.dist-info/METADATA124
-rw-r--r--contrib/python/setuptools/py3/.dist-info/entry_points.txt56
-rw-r--r--contrib/python/setuptools/py3/.dist-info/top_level.txt3
-rw-r--r--contrib/python/setuptools/py3/.yandex_meta/yamaker.yaml4
-rw-r--r--contrib/python/setuptools/py3/LICENSE19
-rw-r--r--contrib/python/setuptools/py3/README.rst69
-rw-r--r--contrib/python/setuptools/py3/_distutils_hack/__init__.py132
-rw-r--r--contrib/python/setuptools/py3/_distutils_hack/override.py1
-rw-r--r--contrib/python/setuptools/py3/patches/01-arcadia.patch93
-rw-r--r--contrib/python/setuptools/py3/pkg_resources/__init__.py3387
-rw-r--r--contrib/python/setuptools/py3/pkg_resources/_vendor/__init__.py0
-rw-r--r--contrib/python/setuptools/py3/pkg_resources/_vendor/appdirs.py608
-rw-r--r--contrib/python/setuptools/py3/pkg_resources/_vendor/packaging/__about__.py26
-rw-r--r--contrib/python/setuptools/py3/pkg_resources/_vendor/packaging/__init__.py25
-rw-r--r--contrib/python/setuptools/py3/pkg_resources/_vendor/packaging/_manylinux.py301
-rw-r--r--contrib/python/setuptools/py3/pkg_resources/_vendor/packaging/_musllinux.py136
-rw-r--r--contrib/python/setuptools/py3/pkg_resources/_vendor/packaging/_structures.py67
-rw-r--r--contrib/python/setuptools/py3/pkg_resources/_vendor/packaging/markers.py304
-rw-r--r--contrib/python/setuptools/py3/pkg_resources/_vendor/packaging/requirements.py146
-rw-r--r--contrib/python/setuptools/py3/pkg_resources/_vendor/packaging/specifiers.py828
-rw-r--r--contrib/python/setuptools/py3/pkg_resources/_vendor/packaging/tags.py484
-rw-r--r--contrib/python/setuptools/py3/pkg_resources/_vendor/packaging/utils.py136
-rw-r--r--contrib/python/setuptools/py3/pkg_resources/_vendor/packaging/version.py504
-rw-r--r--contrib/python/setuptools/py3/pkg_resources/_vendor/pyparsing.py5742
-rw-r--r--contrib/python/setuptools/py3/pkg_resources/extern/__init__.py73
-rw-r--r--contrib/python/setuptools/py3/setuptools/__init__.py242
-rw-r--r--contrib/python/setuptools/py3/setuptools/_deprecation_warning.py7
-rw-r--r--contrib/python/setuptools/py3/setuptools/_distutils/__init__.py24
-rw-r--r--contrib/python/setuptools/py3/setuptools/_distutils/_msvccompiler.py561
-rw-r--r--contrib/python/setuptools/py3/setuptools/_distutils/archive_util.py256
-rw-r--r--contrib/python/setuptools/py3/setuptools/_distutils/bcppcompiler.py393
-rw-r--r--contrib/python/setuptools/py3/setuptools/_distutils/ccompiler.py1123
-rw-r--r--contrib/python/setuptools/py3/setuptools/_distutils/cmd.py403
-rw-r--r--contrib/python/setuptools/py3/setuptools/_distutils/command/__init__.py31
-rw-r--r--contrib/python/setuptools/py3/setuptools/_distutils/command/bdist.py143
-rw-r--r--contrib/python/setuptools/py3/setuptools/_distutils/command/bdist_dumb.py123
-rw-r--r--contrib/python/setuptools/py3/setuptools/_distutils/command/bdist_msi.py749
-rw-r--r--contrib/python/setuptools/py3/setuptools/_distutils/command/bdist_rpm.py579
-rw-r--r--contrib/python/setuptools/py3/setuptools/_distutils/command/bdist_wininst.py377
-rw-r--r--contrib/python/setuptools/py3/setuptools/_distutils/command/build.py157
-rw-r--r--contrib/python/setuptools/py3/setuptools/_distutils/command/build_clib.py209
-rw-r--r--contrib/python/setuptools/py3/setuptools/_distutils/command/build_ext.py755
-rw-r--r--contrib/python/setuptools/py3/setuptools/_distutils/command/build_py.py392
-rw-r--r--contrib/python/setuptools/py3/setuptools/_distutils/command/build_scripts.py152
-rw-r--r--contrib/python/setuptools/py3/setuptools/_distutils/command/check.py148
-rw-r--r--contrib/python/setuptools/py3/setuptools/_distutils/command/clean.py76
-rw-r--r--contrib/python/setuptools/py3/setuptools/_distutils/command/config.py344
-rw-r--r--contrib/python/setuptools/py3/setuptools/_distutils/command/install.py721
-rw-r--r--contrib/python/setuptools/py3/setuptools/_distutils/command/install_data.py79
-rw-r--r--contrib/python/setuptools/py3/setuptools/_distutils/command/install_egg_info.py84
-rw-r--r--contrib/python/setuptools/py3/setuptools/_distutils/command/install_headers.py47
-rw-r--r--contrib/python/setuptools/py3/setuptools/_distutils/command/install_lib.py217
-rw-r--r--contrib/python/setuptools/py3/setuptools/_distutils/command/install_scripts.py60
-rw-r--r--contrib/python/setuptools/py3/setuptools/_distutils/command/py37compat.py30
-rw-r--r--contrib/python/setuptools/py3/setuptools/_distutils/command/register.py304
-rw-r--r--contrib/python/setuptools/py3/setuptools/_distutils/command/sdist.py494
-rw-r--r--contrib/python/setuptools/py3/setuptools/_distutils/command/upload.py214
-rw-r--r--contrib/python/setuptools/py3/setuptools/_distutils/config.py130
-rw-r--r--contrib/python/setuptools/py3/setuptools/_distutils/core.py249
-rw-r--r--contrib/python/setuptools/py3/setuptools/_distutils/cygwinccompiler.py425
-rw-r--r--contrib/python/setuptools/py3/setuptools/_distutils/debug.py5
-rw-r--r--contrib/python/setuptools/py3/setuptools/_distutils/dep_util.py92
-rw-r--r--contrib/python/setuptools/py3/setuptools/_distutils/dir_util.py210
-rw-r--r--contrib/python/setuptools/py3/setuptools/_distutils/dist.py1257
-rw-r--r--contrib/python/setuptools/py3/setuptools/_distutils/errors.py97
-rw-r--r--contrib/python/setuptools/py3/setuptools/_distutils/extension.py240
-rw-r--r--contrib/python/setuptools/py3/setuptools/_distutils/fancy_getopt.py457
-rw-r--r--contrib/python/setuptools/py3/setuptools/_distutils/file_util.py238
-rw-r--r--contrib/python/setuptools/py3/setuptools/_distutils/filelist.py355
-rw-r--r--contrib/python/setuptools/py3/setuptools/_distutils/log.py77
-rw-r--r--contrib/python/setuptools/py3/setuptools/_distutils/msvc9compiler.py788
-rw-r--r--contrib/python/setuptools/py3/setuptools/_distutils/msvccompiler.py643
-rw-r--r--contrib/python/setuptools/py3/setuptools/_distutils/py35compat.py19
-rw-r--r--contrib/python/setuptools/py3/setuptools/_distutils/py38compat.py7
-rw-r--r--contrib/python/setuptools/py3/setuptools/_distutils/spawn.py106
-rw-r--r--contrib/python/setuptools/py3/setuptools/_distutils/sysconfig.py601
-rw-r--r--contrib/python/setuptools/py3/setuptools/_distutils/text_file.py286
-rw-r--r--contrib/python/setuptools/py3/setuptools/_distutils/unixccompiler.py325
-rw-r--r--contrib/python/setuptools/py3/setuptools/_distutils/util.py548
-rw-r--r--contrib/python/setuptools/py3/setuptools/_distutils/version.py363
-rw-r--r--contrib/python/setuptools/py3/setuptools/_distutils/versionpredicate.py169
-rw-r--r--contrib/python/setuptools/py3/setuptools/_imp.py82
-rw-r--r--contrib/python/setuptools/py3/setuptools/_vendor/__init__.py0
-rw-r--r--contrib/python/setuptools/py3/setuptools/_vendor/more_itertools/__init__.py4
-rw-r--r--contrib/python/setuptools/py3/setuptools/_vendor/more_itertools/more.py3825
-rw-r--r--contrib/python/setuptools/py3/setuptools/_vendor/more_itertools/recipes.py620
-rw-r--r--contrib/python/setuptools/py3/setuptools/_vendor/ordered_set.py488
-rw-r--r--contrib/python/setuptools/py3/setuptools/_vendor/packaging/__about__.py26
-rw-r--r--contrib/python/setuptools/py3/setuptools/_vendor/packaging/__init__.py25
-rw-r--r--contrib/python/setuptools/py3/setuptools/_vendor/packaging/_manylinux.py301
-rw-r--r--contrib/python/setuptools/py3/setuptools/_vendor/packaging/_musllinux.py136
-rw-r--r--contrib/python/setuptools/py3/setuptools/_vendor/packaging/_structures.py67
-rw-r--r--contrib/python/setuptools/py3/setuptools/_vendor/packaging/markers.py304
-rw-r--r--contrib/python/setuptools/py3/setuptools/_vendor/packaging/requirements.py146
-rw-r--r--contrib/python/setuptools/py3/setuptools/_vendor/packaging/specifiers.py828
-rw-r--r--contrib/python/setuptools/py3/setuptools/_vendor/packaging/tags.py484
-rw-r--r--contrib/python/setuptools/py3/setuptools/_vendor/packaging/utils.py136
-rw-r--r--contrib/python/setuptools/py3/setuptools/_vendor/packaging/version.py504
-rw-r--r--contrib/python/setuptools/py3/setuptools/_vendor/pyparsing.py5742
-rw-r--r--contrib/python/setuptools/py3/setuptools/archive_util.py205
-rw-r--r--contrib/python/setuptools/py3/setuptools/build_meta.py290
-rw-r--r--contrib/python/setuptools/py3/setuptools/command/__init__.py8
-rw-r--r--contrib/python/setuptools/py3/setuptools/command/alias.py78
-rw-r--r--contrib/python/setuptools/py3/setuptools/command/bdist_egg.py456
-rw-r--r--contrib/python/setuptools/py3/setuptools/command/bdist_rpm.py40
-rw-r--r--contrib/python/setuptools/py3/setuptools/command/build_clib.py101
-rw-r--r--contrib/python/setuptools/py3/setuptools/command/build_ext.py328
-rw-r--r--contrib/python/setuptools/py3/setuptools/command/build_py.py242
-rw-r--r--contrib/python/setuptools/py3/setuptools/command/develop.py193
-rw-r--r--contrib/python/setuptools/py3/setuptools/command/dist_info.py36
-rw-r--r--contrib/python/setuptools/py3/setuptools/command/easy_install.py2299
-rw-r--r--contrib/python/setuptools/py3/setuptools/command/egg_info.py755
-rw-r--r--contrib/python/setuptools/py3/setuptools/command/install.py132
-rw-r--r--contrib/python/setuptools/py3/setuptools/command/install_egg_info.py62
-rw-r--r--contrib/python/setuptools/py3/setuptools/command/install_lib.py122
-rw-r--r--contrib/python/setuptools/py3/setuptools/command/install_scripts.py69
-rw-r--r--contrib/python/setuptools/py3/setuptools/command/launcher manifest.xml15
-rw-r--r--contrib/python/setuptools/py3/setuptools/command/py36compat.py134
-rw-r--r--contrib/python/setuptools/py3/setuptools/command/register.py18
-rw-r--r--contrib/python/setuptools/py3/setuptools/command/rotate.py64
-rw-r--r--contrib/python/setuptools/py3/setuptools/command/saveopts.py22
-rw-r--r--contrib/python/setuptools/py3/setuptools/command/sdist.py196
-rw-r--r--contrib/python/setuptools/py3/setuptools/command/setopt.py149
-rw-r--r--contrib/python/setuptools/py3/setuptools/command/test.py252
-rw-r--r--contrib/python/setuptools/py3/setuptools/command/upload.py17
-rw-r--r--contrib/python/setuptools/py3/setuptools/command/upload_docs.py202
-rw-r--r--contrib/python/setuptools/py3/setuptools/config.py751
-rw-r--r--contrib/python/setuptools/py3/setuptools/dep_util.py25
-rw-r--r--contrib/python/setuptools/py3/setuptools/depends.py176
-rw-r--r--contrib/python/setuptools/py3/setuptools/dist.py1156
-rw-r--r--contrib/python/setuptools/py3/setuptools/errors.py40
-rw-r--r--contrib/python/setuptools/py3/setuptools/extension.py55
-rw-r--r--contrib/python/setuptools/py3/setuptools/extern/__init__.py73
-rw-r--r--contrib/python/setuptools/py3/setuptools/glob.py167
-rw-r--r--contrib/python/setuptools/py3/setuptools/installer.py104
-rw-r--r--contrib/python/setuptools/py3/setuptools/launch.py36
-rw-r--r--contrib/python/setuptools/py3/setuptools/monkey.py177
-rw-r--r--contrib/python/setuptools/py3/setuptools/msvc.py1805
-rw-r--r--contrib/python/setuptools/py3/setuptools/namespaces.py107
-rw-r--r--contrib/python/setuptools/py3/setuptools/package_index.py1127
-rw-r--r--contrib/python/setuptools/py3/setuptools/py34compat.py13
-rw-r--r--contrib/python/setuptools/py3/setuptools/sandbox.py530
-rw-r--r--contrib/python/setuptools/py3/setuptools/script (dev).tmpl6
-rw-r--r--contrib/python/setuptools/py3/setuptools/script.tmpl3
-rw-r--r--contrib/python/setuptools/py3/setuptools/unicode_utils.py42
-rw-r--r--contrib/python/setuptools/py3/setuptools/version.py6
-rw-r--r--contrib/python/setuptools/py3/setuptools/wheel.py213
-rw-r--r--contrib/python/setuptools/py3/setuptools/windows_support.py29
148 files changed, 0 insertions, 57293 deletions
diff --git a/contrib/python/setuptools/py3/.dist-info/METADATA b/contrib/python/setuptools/py3/.dist-info/METADATA
deleted file mode 100644
index b2480296724..00000000000
--- a/contrib/python/setuptools/py3/.dist-info/METADATA
+++ /dev/null
@@ -1,124 +0,0 @@
-Metadata-Version: 2.1
-Name: setuptools
-Version: 59.7.0
-Summary: Easily download, build, install, upgrade, and uninstall Python packages
-Home-page: https://github.com/pypa/setuptools
-Author: Python Packaging Authority
-Author-email: [email protected]
-License: UNKNOWN
-Project-URL: Documentation, https://setuptools.pypa.io/
-Keywords: CPAN PyPI distutils eggs package management
-Platform: UNKNOWN
-Classifier: Development Status :: 5 - Production/Stable
-Classifier: Intended Audience :: Developers
-Classifier: License :: OSI Approved :: MIT License
-Classifier: Programming Language :: Python :: 3
-Classifier: Programming Language :: Python :: 3 :: Only
-Classifier: Topic :: Software Development :: Libraries :: Python Modules
-Classifier: Topic :: System :: Archiving :: Packaging
-Classifier: Topic :: System :: Systems Administration
-Classifier: Topic :: Utilities
-Requires-Python: >=3.7
-License-File: LICENSE
-Provides-Extra: certs
-Provides-Extra: docs
-Requires-Dist: sphinx ; extra == 'docs'
-Requires-Dist: jaraco.packaging (>=8.2) ; extra == 'docs'
-Requires-Dist: rst.linker (>=1.9) ; extra == 'docs'
-Requires-Dist: jaraco.tidelift (>=1.4) ; extra == 'docs'
-Requires-Dist: pygments-github-lexers (==0.0.5) ; extra == 'docs'
-Requires-Dist: sphinx-inline-tabs ; extra == 'docs'
-Requires-Dist: sphinxcontrib-towncrier ; extra == 'docs'
-Requires-Dist: furo ; extra == 'docs'
-Provides-Extra: ssl
-Provides-Extra: testing
-Requires-Dist: pytest (>=6) ; extra == 'testing'
-Requires-Dist: pytest-checkdocs (>=2.4) ; extra == 'testing'
-Requires-Dist: pytest-flake8 ; extra == 'testing'
-Requires-Dist: pytest-cov ; extra == 'testing'
-Requires-Dist: pytest-enabler (>=1.0.1) ; extra == 'testing'
-Requires-Dist: mock ; extra == 'testing'
-Requires-Dist: flake8-2020 ; extra == 'testing'
-Requires-Dist: virtualenv (>=13.0.0) ; extra == 'testing'
-Requires-Dist: pytest-virtualenv (>=1.2.7) ; extra == 'testing'
-Requires-Dist: wheel ; extra == 'testing'
-Requires-Dist: paver ; extra == 'testing'
-Requires-Dist: pip (>=19.1) ; extra == 'testing'
-Requires-Dist: jaraco.envs (>=2.2) ; extra == 'testing'
-Requires-Dist: pytest-xdist ; extra == 'testing'
-Requires-Dist: sphinx ; extra == 'testing'
-Requires-Dist: jaraco.path (>=3.2.0) ; extra == 'testing'
-Requires-Dist: pytest-black (>=0.3.7) ; (platform_python_implementation != "PyPy") and extra == 'testing'
-Requires-Dist: pytest-mypy ; (platform_python_implementation != "PyPy") and extra == 'testing'
-
-.. image:: https://raw.githubusercontent.com/pypa/setuptools/main/docs/images/banner-640x320.svg
- :align: center
-
-|
-
-.. image:: https://img.shields.io/pypi/v/setuptools.svg
- :target: `PyPI link`_
-
-.. image:: https://img.shields.io/pypi/pyversions/setuptools.svg
- :target: `PyPI link`_
-
-.. _PyPI link: https://pypi.org/project/setuptools
-
-.. image:: https://github.com/pypa/setuptools/workflows/tests/badge.svg
- :target: https://github.com/pypa/setuptools/actions?query=workflow%3A%22tests%22
- :alt: tests
-
-.. image:: https://img.shields.io/badge/code%20style-black-000000.svg
- :target: https://github.com/psf/black
- :alt: Code style: Black
-
-.. image:: https://img.shields.io/readthedocs/setuptools/latest.svg
- :target: https://setuptools.pypa.io
-
-.. image:: https://img.shields.io/badge/skeleton-2021-informational
- :target: https://blog.jaraco.com/skeleton
-
-.. image:: https://img.shields.io/codecov/c/github/pypa/setuptools/master.svg?logo=codecov&logoColor=white
- :target: https://codecov.io/gh/pypa/setuptools
-
-.. image:: https://tidelift.com/badges/github/pypa/setuptools?style=flat
- :target: https://tidelift.com/subscription/pkg/pypi-setuptools?utm_source=pypi-setuptools&utm_medium=readme
-
-See the `Installation Instructions
-<https://packaging.python.org/installing/>`_ in the Python Packaging
-User's Guide for instructions on installing, upgrading, and uninstalling
-Setuptools.
-
-Questions and comments should be directed to the `distutils-sig
-mailing list <http://mail.python.org/pipermail/distutils-sig/>`_.
-Bug reports and especially tested patches may be
-submitted directly to the `bug tracker
-<https://github.com/pypa/setuptools/issues>`_.
-
-
-Code of Conduct
-===============
-
-Everyone interacting in the setuptools project's codebases, issue trackers,
-chat rooms, and mailing lists is expected to follow the
-`PSF Code of Conduct <https://github.com/pypa/.github/blob/main/CODE_OF_CONDUCT.md>`_.
-
-
-For Enterprise
-==============
-
-Available as part of the Tidelift Subscription.
-
-Setuptools and the maintainers of thousands of other packages are working with Tidelift to deliver one enterprise subscription that covers all of the open source you use.
-
-`Learn more <https://tidelift.com/subscription/pkg/pypi-setuptools?utm_source=pypi-setuptools&utm_medium=referral&utm_campaign=github>`_.
-
-
-Security Contact
-================
-
-To report a security vulnerability, please use the
-`Tidelift security contact <https://tidelift.com/security>`_.
-Tidelift will coordinate the fix and disclosure.
-
-
diff --git a/contrib/python/setuptools/py3/.dist-info/entry_points.txt b/contrib/python/setuptools/py3/.dist-info/entry_points.txt
deleted file mode 100644
index 9466bf63201..00000000000
--- a/contrib/python/setuptools/py3/.dist-info/entry_points.txt
+++ /dev/null
@@ -1,56 +0,0 @@
-[distutils.commands]
-alias = setuptools.command.alias:alias
-bdist_egg = setuptools.command.bdist_egg:bdist_egg
-bdist_rpm = setuptools.command.bdist_rpm:bdist_rpm
-build_clib = setuptools.command.build_clib:build_clib
-build_ext = setuptools.command.build_ext:build_ext
-build_py = setuptools.command.build_py:build_py
-develop = setuptools.command.develop:develop
-dist_info = setuptools.command.dist_info:dist_info
-easy_install = setuptools.command.easy_install:easy_install
-egg_info = setuptools.command.egg_info:egg_info
-install = setuptools.command.install:install
-install_egg_info = setuptools.command.install_egg_info:install_egg_info
-install_lib = setuptools.command.install_lib:install_lib
-install_scripts = setuptools.command.install_scripts:install_scripts
-rotate = setuptools.command.rotate:rotate
-saveopts = setuptools.command.saveopts:saveopts
-sdist = setuptools.command.sdist:sdist
-setopt = setuptools.command.setopt:setopt
-test = setuptools.command.test:test
-upload_docs = setuptools.command.upload_docs:upload_docs
-
-[distutils.setup_keywords]
-dependency_links = setuptools.dist:assert_string_list
-eager_resources = setuptools.dist:assert_string_list
-entry_points = setuptools.dist:check_entry_points
-exclude_package_data = setuptools.dist:check_package_data
-extras_require = setuptools.dist:check_extras
-include_package_data = setuptools.dist:assert_bool
-install_requires = setuptools.dist:check_requirements
-namespace_packages = setuptools.dist:check_nsp
-package_data = setuptools.dist:check_package_data
-packages = setuptools.dist:check_packages
-python_requires = setuptools.dist:check_specifier
-setup_requires = setuptools.dist:check_requirements
-test_loader = setuptools.dist:check_importable
-test_runner = setuptools.dist:check_importable
-test_suite = setuptools.dist:check_test_suite
-tests_require = setuptools.dist:check_requirements
-use_2to3 = setuptools.dist:invalid_unless_false
-zip_safe = setuptools.dist:assert_bool
-
-[egg_info.writers]
-PKG-INFO = setuptools.command.egg_info:write_pkg_info
-dependency_links.txt = setuptools.command.egg_info:overwrite_arg
-depends.txt = setuptools.command.egg_info:warn_depends_obsolete
-eager_resources.txt = setuptools.command.egg_info:overwrite_arg
-entry_points.txt = setuptools.command.egg_info:write_entries
-namespace_packages.txt = setuptools.command.egg_info:overwrite_arg
-requires.txt = setuptools.command.egg_info:write_requirements
-top_level.txt = setuptools.command.egg_info:write_toplevel_names
-
-[setuptools.finalize_distribution_options]
-keywords = setuptools.dist:Distribution._finalize_setup_keywords
-parent_finalize = setuptools.dist:_Distribution.finalize_options
-
diff --git a/contrib/python/setuptools/py3/.dist-info/top_level.txt b/contrib/python/setuptools/py3/.dist-info/top_level.txt
deleted file mode 100644
index b5ac1070294..00000000000
--- a/contrib/python/setuptools/py3/.dist-info/top_level.txt
+++ /dev/null
@@ -1,3 +0,0 @@
-_distutils_hack
-pkg_resources
-setuptools
diff --git a/contrib/python/setuptools/py3/.yandex_meta/yamaker.yaml b/contrib/python/setuptools/py3/.yandex_meta/yamaker.yaml
deleted file mode 100644
index 6c4ffe73dc5..00000000000
--- a/contrib/python/setuptools/py3/.yandex_meta/yamaker.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-additional_requirements:
-- library/python/resource
-mark_as_sources:
-- setuptools/command/test.py
diff --git a/contrib/python/setuptools/py3/LICENSE b/contrib/python/setuptools/py3/LICENSE
deleted file mode 100644
index 353924be0e5..00000000000
--- a/contrib/python/setuptools/py3/LICENSE
+++ /dev/null
@@ -1,19 +0,0 @@
-Copyright Jason R. Coombs
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to
-deal in the Software without restriction, including without limitation the
-rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
-sell copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-IN THE SOFTWARE.
diff --git a/contrib/python/setuptools/py3/README.rst b/contrib/python/setuptools/py3/README.rst
deleted file mode 100644
index fab41118869..00000000000
--- a/contrib/python/setuptools/py3/README.rst
+++ /dev/null
@@ -1,69 +0,0 @@
-.. image:: https://raw.githubusercontent.com/pypa/setuptools/main/docs/images/banner-640x320.svg
- :align: center
-
-|
-
-.. image:: https://img.shields.io/pypi/v/setuptools.svg
- :target: `PyPI link`_
-
-.. image:: https://img.shields.io/pypi/pyversions/setuptools.svg
- :target: `PyPI link`_
-
-.. _PyPI link: https://pypi.org/project/setuptools
-
-.. image:: https://github.com/pypa/setuptools/workflows/tests/badge.svg
- :target: https://github.com/pypa/setuptools/actions?query=workflow%3A%22tests%22
- :alt: tests
-
-.. image:: https://img.shields.io/badge/code%20style-black-000000.svg
- :target: https://github.com/psf/black
- :alt: Code style: Black
-
-.. image:: https://img.shields.io/readthedocs/setuptools/latest.svg
- :target: https://setuptools.pypa.io
-
-.. image:: https://img.shields.io/badge/skeleton-2021-informational
- :target: https://blog.jaraco.com/skeleton
-
-.. image:: https://img.shields.io/codecov/c/github/pypa/setuptools/master.svg?logo=codecov&logoColor=white
- :target: https://codecov.io/gh/pypa/setuptools
-
-.. image:: https://tidelift.com/badges/github/pypa/setuptools?style=flat
- :target: https://tidelift.com/subscription/pkg/pypi-setuptools?utm_source=pypi-setuptools&utm_medium=readme
-
-See the `Installation Instructions
-<https://packaging.python.org/installing/>`_ in the Python Packaging
-User's Guide for instructions on installing, upgrading, and uninstalling
-Setuptools.
-
-Questions and comments should be directed to the `distutils-sig
-mailing list <http://mail.python.org/pipermail/distutils-sig/>`_.
-Bug reports and especially tested patches may be
-submitted directly to the `bug tracker
-<https://github.com/pypa/setuptools/issues>`_.
-
-
-Code of Conduct
-===============
-
-Everyone interacting in the setuptools project's codebases, issue trackers,
-chat rooms, and mailing lists is expected to follow the
-`PSF Code of Conduct <https://github.com/pypa/.github/blob/main/CODE_OF_CONDUCT.md>`_.
-
-
-For Enterprise
-==============
-
-Available as part of the Tidelift Subscription.
-
-Setuptools and the maintainers of thousands of other packages are working with Tidelift to deliver one enterprise subscription that covers all of the open source you use.
-
-`Learn more <https://tidelift.com/subscription/pkg/pypi-setuptools?utm_source=pypi-setuptools&utm_medium=referral&utm_campaign=github>`_.
-
-
-Security Contact
-================
-
-To report a security vulnerability, please use the
-`Tidelift security contact <https://tidelift.com/security>`_.
-Tidelift will coordinate the fix and disclosure.
diff --git a/contrib/python/setuptools/py3/_distutils_hack/__init__.py b/contrib/python/setuptools/py3/_distutils_hack/__init__.py
deleted file mode 100644
index f707416286b..00000000000
--- a/contrib/python/setuptools/py3/_distutils_hack/__init__.py
+++ /dev/null
@@ -1,132 +0,0 @@
-import sys
-import os
-import re
-import importlib
-import warnings
-
-
-is_pypy = '__pypy__' in sys.builtin_module_names
-
-
-warnings.filterwarnings('ignore',
- r'.+ distutils\b.+ deprecated',
- DeprecationWarning)
-
-
-def warn_distutils_present():
- if 'distutils' not in sys.modules:
- return
- if is_pypy and sys.version_info < (3, 7):
- # PyPy for 3.6 unconditionally imports distutils, so bypass the warning
- # https://foss.heptapod.net/pypy/pypy/-/blob/be829135bc0d758997b3566062999ee8b23872b4/lib-python/3/site.py#L250
- return
- warnings.warn(
- "Distutils was imported before Setuptools, but importing Setuptools "
- "also replaces the `distutils` module in `sys.modules`. This may lead "
- "to undesirable behaviors or errors. To avoid these issues, avoid "
- "using distutils directly, ensure that setuptools is installed in the "
- "traditional way (e.g. not an editable install), and/or make sure "
- "that setuptools is always imported before distutils.")
-
-
-def clear_distutils():
- if 'distutils' not in sys.modules:
- return
- warnings.warn("Setuptools is replacing distutils.")
- mods = [name for name in sys.modules if re.match(r'distutils\b', name)]
- for name in mods:
- del sys.modules[name]
-
-
-def enabled():
- """
- Allow selection of distutils by environment variable.
- """
- which = os.environ.get('SETUPTOOLS_USE_DISTUTILS', 'stdlib')
- return which == 'local'
-
-
-def ensure_local_distutils():
- clear_distutils()
-
- # With the DistutilsMetaFinder in place,
- # perform an import to cause distutils to be
- # loaded from setuptools._distutils. Ref #2906.
- add_shim()
- importlib.import_module('distutils')
- remove_shim()
-
- # check that submodules load as expected
- core = importlib.import_module('distutils.core')
- assert '_distutils' in core.__file__, core.__file__
-
-
-def do_override():
- """
- Ensure that the local copy of distutils is preferred over stdlib.
-
- See https://github.com/pypa/setuptools/issues/417#issuecomment-392298401
- for more motivation.
- """
- if enabled():
- warn_distutils_present()
- ensure_local_distutils()
-
-
-class DistutilsMetaFinder:
- def find_spec(self, fullname, path, target=None):
- if path is not None:
- return
-
- method_name = 'spec_for_{fullname}'.format(**locals())
- method = getattr(self, method_name, lambda: None)
- return method()
-
- def spec_for_distutils(self):
- import importlib.abc
- import importlib.util
-
- class DistutilsLoader(importlib.abc.Loader):
-
- def create_module(self, spec):
- return importlib.import_module('setuptools._distutils')
-
- def exec_module(self, module):
- pass
-
- return importlib.util.spec_from_loader('distutils', DistutilsLoader())
-
- def spec_for_pip(self):
- """
- Ensure stdlib distutils when running under pip.
- See pypa/pip#8761 for rationale.
- """
- if self.pip_imported_during_build():
- return
- clear_distutils()
- self.spec_for_distutils = lambda: None
-
- @staticmethod
- def pip_imported_during_build():
- """
- Detect if pip is being imported in a build script. Ref #2355.
- """
- import traceback
- return any(
- frame.f_globals['__file__'].endswith('setup.py')
- for frame, line in traceback.walk_stack(None)
- )
-
-
-DISTUTILS_FINDER = DistutilsMetaFinder()
-
-
-def add_shim():
- sys.meta_path.insert(0, DISTUTILS_FINDER)
-
-
-def remove_shim():
- try:
- sys.meta_path.remove(DISTUTILS_FINDER)
- except ValueError:
- pass
diff --git a/contrib/python/setuptools/py3/_distutils_hack/override.py b/contrib/python/setuptools/py3/_distutils_hack/override.py
deleted file mode 100644
index 2cc433a4a55..00000000000
--- a/contrib/python/setuptools/py3/_distutils_hack/override.py
+++ /dev/null
@@ -1 +0,0 @@
-__import__('_distutils_hack').do_override()
diff --git a/contrib/python/setuptools/py3/patches/01-arcadia.patch b/contrib/python/setuptools/py3/patches/01-arcadia.patch
deleted file mode 100644
index 6f873d30e54..00000000000
--- a/contrib/python/setuptools/py3/patches/01-arcadia.patch
+++ /dev/null
@@ -1,93 +0,0 @@
---- contrib/python/setuptools/py3/pkg_resources/__init__.py (index)
-+++ contrib/python/setuptools/py3/pkg_resources/__init__.py (working tree)
-@@ -3229,6 +3229,90 @@ def _mkstemp(*args, **kw):
- os.open = old_open
-
-
-+# Yandex resource support
-+from __res import Y_PYTHON_SOURCE_ROOT, ResourceImporter, executable
-+from library.python import resource
-+
-+
-+class ResProvider(EmptyProvider):
-+ _resource_fs = {}
-+
-+ def __init__(self, prefix):
-+ if hasattr(prefix, '__file__'):
-+ key = prefix.__file__.rsplit('/', 1)[0]
-+ self.module_path = 'resfs/file/{}/'.format(key)
-+ # Метаданные лежат на уровень выше самого пакета
-+ key = key.rsplit('/', 1)[0]
-+ self.egg_info = 'resfs/file/{}/.dist-info/'.format(key)
-+ else:
-+ # Сюда попадаем только из ResDistribution, который работает
-+ # только метаданными, поэтому self.module_path не используется
-+ self.egg_info = prefix
-+
-+ @staticmethod
-+ def from_module(module):
-+ if Y_PYTHON_SOURCE_ROOT:
-+ return DefaultProvider(module)
-+ else:
-+ return ResProvider(module)
-+
-+ def _fn(self, base, resource_name):
-+ return base + resource_name
-+
-+ def _has(self, path):
-+ return resource.find(path) is not None
-+
-+ def _get(self, path):
-+ result = resource.find(path)
-+ if result is None:
-+ raise IOError(path)
-+ return result
-+
-+ @classmethod
-+ def _init_resource_fs(cls):
-+ for path in resource.iterkeys(b'resfs/file/'):
-+ path_str = path.decode('utf-8')
-+ components = path_str.split('/')
-+ for l in range(len(components)):
-+ subpath = os.path.normpath('/'.join(components[:l]))
-+ cls._resource_fs.setdefault(subpath, set()).add(components[l])
-+
-+ def __lookup(self, path):
-+ if not self._resource_fs:
-+ self._init_resource_fs()
-+ path = os.path.normpath(path)
-+ return self._resource_fs.get(path)
-+
-+ def _listdir(self, path):
-+ result = self.__lookup(path)
-+ if result is None:
-+ return []
-+ return list(result)
-+
-+ def _isdir(self, path):
-+ return bool(self.__lookup(path))
-+
-+
-+class ResDistribution(DistInfoDistribution):
-+ def __init__(self, prefix):
-+ super(ResDistribution, self).__init__(
-+ location=executable,
-+ metadata=ResProvider(prefix),
-+ precedence=BINARY_DIST,
-+ )
-+ self.project_name = self._parsed_pkg_info.get('Name', self.project_name)
-+
-+
-+def find_in_res(importer, path_item, only=False):
-+ for key in resource.iterkeys():
-+ if key.endswith('.dist-info/METADATA') and not key.startswith('resfs/src/'):
-+ yield ResDistribution(key[:-8])
-+
-+
-+register_finder(ResourceImporter, find_in_res)
-+register_loader_type(ResourceImporter, ResProvider.from_module)
-+
-+
- # Silence the PEP440Warning by default, so that end users don't get hit by it
- # randomly just because they use pkg_resources. We want to append the rule
- # because we want earlier uses of filterwarnings to take precedence over this
diff --git a/contrib/python/setuptools/py3/pkg_resources/__init__.py b/contrib/python/setuptools/py3/pkg_resources/__init__.py
deleted file mode 100644
index 6b947a49a79..00000000000
--- a/contrib/python/setuptools/py3/pkg_resources/__init__.py
+++ /dev/null
@@ -1,3387 +0,0 @@
-"""
-Package resource API
---------------------
-
-A resource is a logical file contained within a package, or a logical
-subdirectory thereof. The package resource API expects resource names
-to have their path parts separated with ``/``, *not* whatever the local
-path separator is. Do not use os.path operations to manipulate resource
-names being passed into the API.
-
-The package resource API is designed to work with normal filesystem packages,
-.egg files, and unpacked .egg files. It can also work in a limited way with
-.zip files and with custom PEP 302 loaders that support the ``get_data()``
-method.
-"""
-
-import sys
-import os
-import io
-import time
-import re
-import types
-import zipfile
-import zipimport
-import warnings
-import stat
-import functools
-import pkgutil
-import operator
-import platform
-import collections
-import plistlib
-import email.parser
-import errno
-import tempfile
-import textwrap
-import itertools
-import inspect
-import ntpath
-import posixpath
-import importlib
-from pkgutil import get_importer
-
-try:
- import _imp
-except ImportError:
- # Python 3.2 compatibility
- import imp as _imp
-
-try:
- FileExistsError
-except NameError:
- FileExistsError = OSError
-
-# capture these to bypass sandboxing
-from os import utime
-try:
- from os import mkdir, rename, unlink
- WRITE_SUPPORT = True
-except ImportError:
- # no write support, probably under GAE
- WRITE_SUPPORT = False
-
-from os import open as os_open
-from os.path import isdir, split
-
-try:
- import importlib.machinery as importlib_machinery
- # access attribute to force import under delayed import mechanisms.
- importlib_machinery.__name__
-except ImportError:
- importlib_machinery = None
-
-from pkg_resources.extern import appdirs
-from pkg_resources.extern import packaging
-__import__('pkg_resources.extern.packaging.version')
-__import__('pkg_resources.extern.packaging.specifiers')
-__import__('pkg_resources.extern.packaging.requirements')
-__import__('pkg_resources.extern.packaging.markers')
-
-if sys.version_info < (3, 5):
- raise RuntimeError("Python 3.5 or later is required")
-
-# declare some globals that will be defined later to
-# satisfy the linters.
-require = None
-working_set = None
-add_activation_listener = None
-resources_stream = None
-cleanup_resources = None
-resource_dir = None
-resource_stream = None
-set_extraction_path = None
-resource_isdir = None
-resource_string = None
-iter_entry_points = None
-resource_listdir = None
-resource_filename = None
-resource_exists = None
-_distribution_finders = None
-_namespace_handlers = None
-_namespace_packages = None
-
-
-class PEP440Warning(RuntimeWarning):
- """
- Used when there is an issue with a version or specifier not complying with
- PEP 440.
- """
-
-
-def parse_version(v):
- try:
- return packaging.version.Version(v)
- except packaging.version.InvalidVersion:
- warnings.warn(
- f"{v} is an invalid version and will not be supported in "
- "a future release",
- PkgResourcesDeprecationWarning,
- )
- return packaging.version.LegacyVersion(v)
-
-
-_state_vars = {}
-
-
-def _declare_state(vartype, **kw):
- globals().update(kw)
- _state_vars.update(dict.fromkeys(kw, vartype))
-
-
-def __getstate__():
- state = {}
- g = globals()
- for k, v in _state_vars.items():
- state[k] = g['_sget_' + v](g[k])
- return state
-
-
-def __setstate__(state):
- g = globals()
- for k, v in state.items():
- g['_sset_' + _state_vars[k]](k, g[k], v)
- return state
-
-
-def _sget_dict(val):
- return val.copy()
-
-
-def _sset_dict(key, ob, state):
- ob.clear()
- ob.update(state)
-
-
-def _sget_object(val):
- return val.__getstate__()
-
-
-def _sset_object(key, ob, state):
- ob.__setstate__(state)
-
-
-_sget_none = _sset_none = lambda *args: None
-
-
-def get_supported_platform():
- """Return this platform's maximum compatible version.
-
- distutils.util.get_platform() normally reports the minimum version
- of macOS that would be required to *use* extensions produced by
- distutils. But what we want when checking compatibility is to know the
- version of macOS that we are *running*. To allow usage of packages that
- explicitly require a newer version of macOS, we must also know the
- current version of the OS.
-
- If this condition occurs for any other platform with a version in its
- platform strings, this function should be extended accordingly.
- """
- plat = get_build_platform()
- m = macosVersionString.match(plat)
- if m is not None and sys.platform == "darwin":
- try:
- plat = 'macosx-%s-%s' % ('.'.join(_macos_vers()[:2]), m.group(3))
- except ValueError:
- # not macOS
- pass
- return plat
-
-
-__all__ = [
- # Basic resource access and distribution/entry point discovery
- 'require', 'run_script', 'get_provider', 'get_distribution',
- 'load_entry_point', 'get_entry_map', 'get_entry_info',
- 'iter_entry_points',
- 'resource_string', 'resource_stream', 'resource_filename',
- 'resource_listdir', 'resource_exists', 'resource_isdir',
-
- # Environmental control
- 'declare_namespace', 'working_set', 'add_activation_listener',
- 'find_distributions', 'set_extraction_path', 'cleanup_resources',
- 'get_default_cache',
-
- # Primary implementation classes
- 'Environment', 'WorkingSet', 'ResourceManager',
- 'Distribution', 'Requirement', 'EntryPoint',
-
- # Exceptions
- 'ResolutionError', 'VersionConflict', 'DistributionNotFound',
- 'UnknownExtra', 'ExtractionError',
-
- # Warnings
- 'PEP440Warning',
-
- # Parsing functions and string utilities
- 'parse_requirements', 'parse_version', 'safe_name', 'safe_version',
- 'get_platform', 'compatible_platforms', 'yield_lines', 'split_sections',
- 'safe_extra', 'to_filename', 'invalid_marker', 'evaluate_marker',
-
- # filesystem utilities
- 'ensure_directory', 'normalize_path',
-
- # Distribution "precedence" constants
- 'EGG_DIST', 'BINARY_DIST', 'SOURCE_DIST', 'CHECKOUT_DIST', 'DEVELOP_DIST',
-
- # "Provider" interfaces, implementations, and registration/lookup APIs
- 'IMetadataProvider', 'IResourceProvider', 'FileMetadata',
- 'PathMetadata', 'EggMetadata', 'EmptyProvider', 'empty_provider',
- 'NullProvider', 'EggProvider', 'DefaultProvider', 'ZipProvider',
- 'register_finder', 'register_namespace_handler', 'register_loader_type',
- 'fixup_namespace_packages', 'get_importer',
-
- # Warnings
- 'PkgResourcesDeprecationWarning',
-
- # Deprecated/backward compatibility only
- 'run_main', 'AvailableDistributions',
-]
-
-
-class ResolutionError(Exception):
- """Abstract base for dependency resolution errors"""
-
- def __repr__(self):
- return self.__class__.__name__ + repr(self.args)
-
-
-class VersionConflict(ResolutionError):
- """
- An already-installed version conflicts with the requested version.
-
- Should be initialized with the installed Distribution and the requested
- Requirement.
- """
-
- _template = "{self.dist} is installed but {self.req} is required"
-
- @property
- def dist(self):
- return self.args[0]
-
- @property
- def req(self):
- return self.args[1]
-
- def report(self):
- return self._template.format(**locals())
-
- def with_context(self, required_by):
- """
- If required_by is non-empty, return a version of self that is a
- ContextualVersionConflict.
- """
- if not required_by:
- return self
- args = self.args + (required_by,)
- return ContextualVersionConflict(*args)
-
-
-class ContextualVersionConflict(VersionConflict):
- """
- A VersionConflict that accepts a third parameter, the set of the
- requirements that required the installed Distribution.
- """
-
- _template = VersionConflict._template + ' by {self.required_by}'
-
- @property
- def required_by(self):
- return self.args[2]
-
-
-class DistributionNotFound(ResolutionError):
- """A requested distribution was not found"""
-
- _template = ("The '{self.req}' distribution was not found "
- "and is required by {self.requirers_str}")
-
- @property
- def req(self):
- return self.args[0]
-
- @property
- def requirers(self):
- return self.args[1]
-
- @property
- def requirers_str(self):
- if not self.requirers:
- return 'the application'
- return ', '.join(self.requirers)
-
- def report(self):
- return self._template.format(**locals())
-
- def __str__(self):
- return self.report()
-
-
-class UnknownExtra(ResolutionError):
- """Distribution doesn't have an "extra feature" of the given name"""
-
-
-_provider_factories = {}
-
-PY_MAJOR = '{}.{}'.format(*sys.version_info)
-EGG_DIST = 3
-BINARY_DIST = 2
-SOURCE_DIST = 1
-CHECKOUT_DIST = 0
-DEVELOP_DIST = -1
-
-
-def register_loader_type(loader_type, provider_factory):
- """Register `provider_factory` to make providers for `loader_type`
-
- `loader_type` is the type or class of a PEP 302 ``module.__loader__``,
- and `provider_factory` is a function that, passed a *module* object,
- returns an ``IResourceProvider`` for that module.
- """
- _provider_factories[loader_type] = provider_factory
-
-
-def get_provider(moduleOrReq):
- """Return an IResourceProvider for the named module or requirement"""
- if isinstance(moduleOrReq, Requirement):
- return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0]
- try:
- module = sys.modules[moduleOrReq]
- except KeyError:
- __import__(moduleOrReq)
- module = sys.modules[moduleOrReq]
- loader = getattr(module, '__loader__', None)
- return _find_adapter(_provider_factories, loader)(module)
-
-
-def _macos_vers(_cache=[]):
- if not _cache:
- version = platform.mac_ver()[0]
- # fallback for MacPorts
- if version == '':
- plist = '/System/Library/CoreServices/SystemVersion.plist'
- if os.path.exists(plist):
- if hasattr(plistlib, 'readPlist'):
- plist_content = plistlib.readPlist(plist)
- if 'ProductVersion' in plist_content:
- version = plist_content['ProductVersion']
-
- _cache.append(version.split('.'))
- return _cache[0]
-
-
-def _macos_arch(machine):
- return {'PowerPC': 'ppc', 'Power_Macintosh': 'ppc'}.get(machine, machine)
-
-
-def get_build_platform():
- """Return this platform's string for platform-specific distributions
-
- XXX Currently this is the same as ``distutils.util.get_platform()``, but it
- needs some hacks for Linux and macOS.
- """
- from sysconfig import get_platform
-
- plat = get_platform()
- if sys.platform == "darwin" and not plat.startswith('macosx-'):
- try:
- version = _macos_vers()
- machine = os.uname()[4].replace(" ", "_")
- return "macosx-%d.%d-%s" % (
- int(version[0]), int(version[1]),
- _macos_arch(machine),
- )
- except ValueError:
- # if someone is running a non-Mac darwin system, this will fall
- # through to the default implementation
- pass
- return plat
-
-
-macosVersionString = re.compile(r"macosx-(\d+)\.(\d+)-(.*)")
-darwinVersionString = re.compile(r"darwin-(\d+)\.(\d+)\.(\d+)-(.*)")
-# XXX backward compat
-get_platform = get_build_platform
-
-
-def compatible_platforms(provided, required):
- """Can code for the `provided` platform run on the `required` platform?
-
- Returns true if either platform is ``None``, or the platforms are equal.
-
- XXX Needs compatibility checks for Linux and other unixy OSes.
- """
- if provided is None or required is None or provided == required:
- # easy case
- return True
-
- # macOS special cases
- reqMac = macosVersionString.match(required)
- if reqMac:
- provMac = macosVersionString.match(provided)
-
- # is this a Mac package?
- if not provMac:
- # this is backwards compatibility for packages built before
- # setuptools 0.6. All packages built after this point will
- # use the new macOS designation.
- provDarwin = darwinVersionString.match(provided)
- if provDarwin:
- dversion = int(provDarwin.group(1))
- macosversion = "%s.%s" % (reqMac.group(1), reqMac.group(2))
- if dversion == 7 and macosversion >= "10.3" or \
- dversion == 8 and macosversion >= "10.4":
- return True
- # egg isn't macOS or legacy darwin
- return False
-
- # are they the same major version and machine type?
- if provMac.group(1) != reqMac.group(1) or \
- provMac.group(3) != reqMac.group(3):
- return False
-
- # is the required OS major update >= the provided one?
- if int(provMac.group(2)) > int(reqMac.group(2)):
- return False
-
- return True
-
- # XXX Linux and other platforms' special cases should go here
- return False
-
-
-def run_script(dist_spec, script_name):
- """Locate distribution `dist_spec` and run its `script_name` script"""
- ns = sys._getframe(1).f_globals
- name = ns['__name__']
- ns.clear()
- ns['__name__'] = name
- require(dist_spec)[0].run_script(script_name, ns)
-
-
-# backward compatibility
-run_main = run_script
-
-
-def get_distribution(dist):
- """Return a current distribution object for a Requirement or string"""
- if isinstance(dist, str):
- dist = Requirement.parse(dist)
- if isinstance(dist, Requirement):
- dist = get_provider(dist)
- if not isinstance(dist, Distribution):
- raise TypeError("Expected string, Requirement, or Distribution", dist)
- return dist
-
-
-def load_entry_point(dist, group, name):
- """Return `name` entry point of `group` for `dist` or raise ImportError"""
- return get_distribution(dist).load_entry_point(group, name)
-
-
-def get_entry_map(dist, group=None):
- """Return the entry point map for `group`, or the full entry map"""
- return get_distribution(dist).get_entry_map(group)
-
-
-def get_entry_info(dist, group, name):
- """Return the EntryPoint object for `group`+`name`, or ``None``"""
- return get_distribution(dist).get_entry_info(group, name)
-
-
-class IMetadataProvider:
- def has_metadata(name):
- """Does the package's distribution contain the named metadata?"""
-
- def get_metadata(name):
- """The named metadata resource as a string"""
-
- def get_metadata_lines(name):
- """Yield named metadata resource as list of non-blank non-comment lines
-
- Leading and trailing whitespace is stripped from each line, and lines
- with ``#`` as the first non-blank character are omitted."""
-
- def metadata_isdir(name):
- """Is the named metadata a directory? (like ``os.path.isdir()``)"""
-
- def metadata_listdir(name):
- """List of metadata names in the directory (like ``os.listdir()``)"""
-
- def run_script(script_name, namespace):
- """Execute the named script in the supplied namespace dictionary"""
-
-
-class IResourceProvider(IMetadataProvider):
- """An object that provides access to package resources"""
-
- def get_resource_filename(manager, resource_name):
- """Return a true filesystem path for `resource_name`
-
- `manager` must be an ``IResourceManager``"""
-
- def get_resource_stream(manager, resource_name):
- """Return a readable file-like object for `resource_name`
-
- `manager` must be an ``IResourceManager``"""
-
- def get_resource_string(manager, resource_name):
- """Return a string containing the contents of `resource_name`
-
- `manager` must be an ``IResourceManager``"""
-
- def has_resource(resource_name):
- """Does the package contain the named resource?"""
-
- def resource_isdir(resource_name):
- """Is the named resource a directory? (like ``os.path.isdir()``)"""
-
- def resource_listdir(resource_name):
- """List of resource names in the directory (like ``os.listdir()``)"""
-
-
-class WorkingSet:
- """A collection of active distributions on sys.path (or a similar list)"""
-
- def __init__(self, entries=None):
- """Create working set from list of path entries (default=sys.path)"""
- self.entries = []
- self.entry_keys = {}
- self.by_key = {}
- self.callbacks = []
-
- if entries is None:
- entries = sys.path
-
- for entry in entries:
- self.add_entry(entry)
-
- @classmethod
- def _build_master(cls):
- """
- Prepare the master working set.
- """
- ws = cls()
- try:
- from __main__ import __requires__
- except ImportError:
- # The main program does not list any requirements
- return ws
-
- # ensure the requirements are met
- try:
- ws.require(__requires__)
- except VersionConflict:
- return cls._build_from_requirements(__requires__)
-
- return ws
-
- @classmethod
- def _build_from_requirements(cls, req_spec):
- """
- Build a working set from a requirement spec. Rewrites sys.path.
- """
- # try it without defaults already on sys.path
- # by starting with an empty path
- ws = cls([])
- reqs = parse_requirements(req_spec)
- dists = ws.resolve(reqs, Environment())
- for dist in dists:
- ws.add(dist)
-
- # add any missing entries from sys.path
- for entry in sys.path:
- if entry not in ws.entries:
- ws.add_entry(entry)
-
- # then copy back to sys.path
- sys.path[:] = ws.entries
- return ws
-
- def add_entry(self, entry):
- """Add a path item to ``.entries``, finding any distributions on it
-
- ``find_distributions(entry, True)`` is used to find distributions
- corresponding to the path entry, and they are added. `entry` is
- always appended to ``.entries``, even if it is already present.
- (This is because ``sys.path`` can contain the same value more than
- once, and the ``.entries`` of the ``sys.path`` WorkingSet should always
- equal ``sys.path``.)
- """
- self.entry_keys.setdefault(entry, [])
- self.entries.append(entry)
- for dist in find_distributions(entry, True):
- self.add(dist, entry, False)
-
- def __contains__(self, dist):
- """True if `dist` is the active distribution for its project"""
- return self.by_key.get(dist.key) == dist
-
- def find(self, req):
- """Find a distribution matching requirement `req`
-
- If there is an active distribution for the requested project, this
- returns it as long as it meets the version requirement specified by
- `req`. But, if there is an active distribution for the project and it
- does *not* meet the `req` requirement, ``VersionConflict`` is raised.
- If there is no active distribution for the requested project, ``None``
- is returned.
- """
- dist = self.by_key.get(req.key)
- if dist is not None and dist not in req:
- # XXX add more info
- raise VersionConflict(dist, req)
- return dist
-
- def iter_entry_points(self, group, name=None):
- """Yield entry point objects from `group` matching `name`
-
- If `name` is None, yields all entry points in `group` from all
- distributions in the working set, otherwise only ones matching
- both `group` and `name` are yielded (in distribution order).
- """
- return (
- entry
- for dist in self
- for entry in dist.get_entry_map(group).values()
- if name is None or name == entry.name
- )
-
- def run_script(self, requires, script_name):
- """Locate distribution for `requires` and run `script_name` script"""
- ns = sys._getframe(1).f_globals
- name = ns['__name__']
- ns.clear()
- ns['__name__'] = name
- self.require(requires)[0].run_script(script_name, ns)
-
- def __iter__(self):
- """Yield distributions for non-duplicate projects in the working set
-
- The yield order is the order in which the items' path entries were
- added to the working set.
- """
- seen = {}
- for item in self.entries:
- if item not in self.entry_keys:
- # workaround a cache issue
- continue
-
- for key in self.entry_keys[item]:
- if key not in seen:
- seen[key] = 1
- yield self.by_key[key]
-
- def add(self, dist, entry=None, insert=True, replace=False):
- """Add `dist` to working set, associated with `entry`
-
- If `entry` is unspecified, it defaults to the ``.location`` of `dist`.
- On exit from this routine, `entry` is added to the end of the working
- set's ``.entries`` (if it wasn't already present).
-
- `dist` is only added to the working set if it's for a project that
- doesn't already have a distribution in the set, unless `replace=True`.
- If it's added, any callbacks registered with the ``subscribe()`` method
- will be called.
- """
- if insert:
- dist.insert_on(self.entries, entry, replace=replace)
-
- if entry is None:
- entry = dist.location
- keys = self.entry_keys.setdefault(entry, [])
- keys2 = self.entry_keys.setdefault(dist.location, [])
- if not replace and dist.key in self.by_key:
- # ignore hidden distros
- return
-
- self.by_key[dist.key] = dist
- if dist.key not in keys:
- keys.append(dist.key)
- if dist.key not in keys2:
- keys2.append(dist.key)
- self._added_new(dist)
-
- # FIXME: 'WorkingSet.resolve' is too complex (11)
- def resolve(self, requirements, env=None, installer=None, # noqa: C901
- replace_conflicting=False, extras=None):
- """List all distributions needed to (recursively) meet `requirements`
-
- `requirements` must be a sequence of ``Requirement`` objects. `env`,
- if supplied, should be an ``Environment`` instance. If
- not supplied, it defaults to all distributions available within any
- entry or distribution in the working set. `installer`, if supplied,
- will be invoked with each requirement that cannot be met by an
- already-installed distribution; it should return a ``Distribution`` or
- ``None``.
-
- Unless `replace_conflicting=True`, raises a VersionConflict exception
- if
- any requirements are found on the path that have the correct name but
- the wrong version. Otherwise, if an `installer` is supplied it will be
- invoked to obtain the correct version of the requirement and activate
- it.
-
- `extras` is a list of the extras to be used with these requirements.
- This is important because extra requirements may look like `my_req;
- extra = "my_extra"`, which would otherwise be interpreted as a purely
- optional requirement. Instead, we want to be able to assert that these
- requirements are truly required.
- """
-
- # set up the stack
- requirements = list(requirements)[::-1]
- # set of processed requirements
- processed = {}
- # key -> dist
- best = {}
- to_activate = []
-
- req_extras = _ReqExtras()
-
- # Mapping of requirement to set of distributions that required it;
- # useful for reporting info about conflicts.
- required_by = collections.defaultdict(set)
-
- while requirements:
- # process dependencies breadth-first
- req = requirements.pop(0)
- if req in processed:
- # Ignore cyclic or redundant dependencies
- continue
-
- if not req_extras.markers_pass(req, extras):
- continue
-
- dist = best.get(req.key)
- if dist is None:
- # Find the best distribution and add it to the map
- dist = self.by_key.get(req.key)
- if dist is None or (dist not in req and replace_conflicting):
- ws = self
- if env is None:
- if dist is None:
- env = Environment(self.entries)
- else:
- # Use an empty environment and workingset to avoid
- # any further conflicts with the conflicting
- # distribution
- env = Environment([])
- ws = WorkingSet([])
- dist = best[req.key] = env.best_match(
- req, ws, installer,
- replace_conflicting=replace_conflicting
- )
- if dist is None:
- requirers = required_by.get(req, None)
- raise DistributionNotFound(req, requirers)
- to_activate.append(dist)
- if dist not in req:
- # Oops, the "best" so far conflicts with a dependency
- dependent_req = required_by[req]
- raise VersionConflict(dist, req).with_context(dependent_req)
-
- # push the new requirements onto the stack
- new_requirements = dist.requires(req.extras)[::-1]
- requirements.extend(new_requirements)
-
- # Register the new requirements needed by req
- for new_requirement in new_requirements:
- required_by[new_requirement].add(req.project_name)
- req_extras[new_requirement] = req.extras
-
- processed[req] = True
-
- # return list of distros to activate
- return to_activate
-
- def find_plugins(
- self, plugin_env, full_env=None, installer=None, fallback=True):
- """Find all activatable distributions in `plugin_env`
-
- Example usage::
-
- distributions, errors = working_set.find_plugins(
- Environment(plugin_dirlist)
- )
- # add plugins+libs to sys.path
- map(working_set.add, distributions)
- # display errors
- print('Could not load', errors)
-
- The `plugin_env` should be an ``Environment`` instance that contains
- only distributions that are in the project's "plugin directory" or
- directories. The `full_env`, if supplied, should be an ``Environment``
- contains all currently-available distributions. If `full_env` is not
- supplied, one is created automatically from the ``WorkingSet`` this
- method is called on, which will typically mean that every directory on
- ``sys.path`` will be scanned for distributions.
-
- `installer` is a standard installer callback as used by the
- ``resolve()`` method. The `fallback` flag indicates whether we should
- attempt to resolve older versions of a plugin if the newest version
- cannot be resolved.
-
- This method returns a 2-tuple: (`distributions`, `error_info`), where
- `distributions` is a list of the distributions found in `plugin_env`
- that were loadable, along with any other distributions that are needed
- to resolve their dependencies. `error_info` is a dictionary mapping
- unloadable plugin distributions to an exception instance describing the
- error that occurred. Usually this will be a ``DistributionNotFound`` or
- ``VersionConflict`` instance.
- """
-
- plugin_projects = list(plugin_env)
- # scan project names in alphabetic order
- plugin_projects.sort()
-
- error_info = {}
- distributions = {}
-
- if full_env is None:
- env = Environment(self.entries)
- env += plugin_env
- else:
- env = full_env + plugin_env
-
- shadow_set = self.__class__([])
- # put all our entries in shadow_set
- list(map(shadow_set.add, self))
-
- for project_name in plugin_projects:
-
- for dist in plugin_env[project_name]:
-
- req = [dist.as_requirement()]
-
- try:
- resolvees = shadow_set.resolve(req, env, installer)
-
- except ResolutionError as v:
- # save error info
- error_info[dist] = v
- if fallback:
- # try the next older version of project
- continue
- else:
- # give up on this project, keep going
- break
-
- else:
- list(map(shadow_set.add, resolvees))
- distributions.update(dict.fromkeys(resolvees))
-
- # success, no need to try any more versions of this project
- break
-
- distributions = list(distributions)
- distributions.sort()
-
- return distributions, error_info
-
- def require(self, *requirements):
- """Ensure that distributions matching `requirements` are activated
-
- `requirements` must be a string or a (possibly-nested) sequence
- thereof, specifying the distributions and versions required. The
- return value is a sequence of the distributions that needed to be
- activated to fulfill the requirements; all relevant distributions are
- included, even if they were already activated in this working set.
- """
- needed = self.resolve(parse_requirements(requirements))
-
- for dist in needed:
- self.add(dist)
-
- return needed
-
- def subscribe(self, callback, existing=True):
- """Invoke `callback` for all distributions
-
- If `existing=True` (default),
- call on all existing ones, as well.
- """
- if callback in self.callbacks:
- return
- self.callbacks.append(callback)
- if not existing:
- return
- for dist in self:
- callback(dist)
-
- def _added_new(self, dist):
- for callback in self.callbacks:
- callback(dist)
-
- def __getstate__(self):
- return (
- self.entries[:], self.entry_keys.copy(), self.by_key.copy(),
- self.callbacks[:]
- )
-
- def __setstate__(self, e_k_b_c):
- entries, keys, by_key, callbacks = e_k_b_c
- self.entries = entries[:]
- self.entry_keys = keys.copy()
- self.by_key = by_key.copy()
- self.callbacks = callbacks[:]
-
-
-class _ReqExtras(dict):
- """
- Map each requirement to the extras that demanded it.
- """
-
- def markers_pass(self, req, extras=None):
- """
- Evaluate markers for req against each extra that
- demanded it.
-
- Return False if the req has a marker and fails
- evaluation. Otherwise, return True.
- """
- extra_evals = (
- req.marker.evaluate({'extra': extra})
- for extra in self.get(req, ()) + (extras or (None,))
- )
- return not req.marker or any(extra_evals)
-
-
-class Environment:
- """Searchable snapshot of distributions on a search path"""
-
- def __init__(
- self, search_path=None, platform=get_supported_platform(),
- python=PY_MAJOR):
- """Snapshot distributions available on a search path
-
- Any distributions found on `search_path` are added to the environment.
- `search_path` should be a sequence of ``sys.path`` items. If not
- supplied, ``sys.path`` is used.
-
- `platform` is an optional string specifying the name of the platform
- that platform-specific distributions must be compatible with. If
- unspecified, it defaults to the current platform. `python` is an
- optional string naming the desired version of Python (e.g. ``'3.6'``);
- it defaults to the current version.
-
- You may explicitly set `platform` (and/or `python`) to ``None`` if you
- wish to map *all* distributions, not just those compatible with the
- running platform or Python version.
- """
- self._distmap = {}
- self.platform = platform
- self.python = python
- self.scan(search_path)
-
- def can_add(self, dist):
- """Is distribution `dist` acceptable for this environment?
-
- The distribution must match the platform and python version
- requirements specified when this environment was created, or False
- is returned.
- """
- py_compat = (
- self.python is None
- or dist.py_version is None
- or dist.py_version == self.python
- )
- return py_compat and compatible_platforms(dist.platform, self.platform)
-
- def remove(self, dist):
- """Remove `dist` from the environment"""
- self._distmap[dist.key].remove(dist)
-
- def scan(self, search_path=None):
- """Scan `search_path` for distributions usable in this environment
-
- Any distributions found are added to the environment.
- `search_path` should be a sequence of ``sys.path`` items. If not
- supplied, ``sys.path`` is used. Only distributions conforming to
- the platform/python version defined at initialization are added.
- """
- if search_path is None:
- search_path = sys.path
-
- for item in search_path:
- for dist in find_distributions(item):
- self.add(dist)
-
- def __getitem__(self, project_name):
- """Return a newest-to-oldest list of distributions for `project_name`
-
- Uses case-insensitive `project_name` comparison, assuming all the
- project's distributions use their project's name converted to all
- lowercase as their key.
-
- """
- distribution_key = project_name.lower()
- return self._distmap.get(distribution_key, [])
-
- def add(self, dist):
- """Add `dist` if we ``can_add()`` it and it has not already been added
- """
- if self.can_add(dist) and dist.has_version():
- dists = self._distmap.setdefault(dist.key, [])
- if dist not in dists:
- dists.append(dist)
- dists.sort(key=operator.attrgetter('hashcmp'), reverse=True)
-
- def best_match(
- self, req, working_set, installer=None, replace_conflicting=False):
- """Find distribution best matching `req` and usable on `working_set`
-
- This calls the ``find(req)`` method of the `working_set` to see if a
- suitable distribution is already active. (This may raise
- ``VersionConflict`` if an unsuitable version of the project is already
- active in the specified `working_set`.) If a suitable distribution
- isn't active, this method returns the newest distribution in the
- environment that meets the ``Requirement`` in `req`. If no suitable
- distribution is found, and `installer` is supplied, then the result of
- calling the environment's ``obtain(req, installer)`` method will be
- returned.
- """
- try:
- dist = working_set.find(req)
- except VersionConflict:
- if not replace_conflicting:
- raise
- dist = None
- if dist is not None:
- return dist
- for dist in self[req.key]:
- if dist in req:
- return dist
- # try to download/install
- return self.obtain(req, installer)
-
- def obtain(self, requirement, installer=None):
- """Obtain a distribution matching `requirement` (e.g. via download)
-
- Obtain a distro that matches requirement (e.g. via download). In the
- base ``Environment`` class, this routine just returns
- ``installer(requirement)``, unless `installer` is None, in which case
- None is returned instead. This method is a hook that allows subclasses
- to attempt other ways of obtaining a distribution before falling back
- to the `installer` argument."""
- if installer is not None:
- return installer(requirement)
-
- def __iter__(self):
- """Yield the unique project names of the available distributions"""
- for key in self._distmap.keys():
- if self[key]:
- yield key
-
- def __iadd__(self, other):
- """In-place addition of a distribution or environment"""
- if isinstance(other, Distribution):
- self.add(other)
- elif isinstance(other, Environment):
- for project in other:
- for dist in other[project]:
- self.add(dist)
- else:
- raise TypeError("Can't add %r to environment" % (other,))
- return self
-
- def __add__(self, other):
- """Add an environment or distribution to an environment"""
- new = self.__class__([], platform=None, python=None)
- for env in self, other:
- new += env
- return new
-
-
-# XXX backward compatibility
-AvailableDistributions = Environment
-
-
-class ExtractionError(RuntimeError):
- """An error occurred extracting a resource
-
- The following attributes are available from instances of this exception:
-
- manager
- The resource manager that raised this exception
-
- cache_path
- The base directory for resource extraction
-
- original_error
- The exception instance that caused extraction to fail
- """
-
-
-class ResourceManager:
- """Manage resource extraction and packages"""
- extraction_path = None
-
- def __init__(self):
- self.cached_files = {}
-
- def resource_exists(self, package_or_requirement, resource_name):
- """Does the named resource exist?"""
- return get_provider(package_or_requirement).has_resource(resource_name)
-
- def resource_isdir(self, package_or_requirement, resource_name):
- """Is the named resource an existing directory?"""
- return get_provider(package_or_requirement).resource_isdir(
- resource_name
- )
-
- def resource_filename(self, package_or_requirement, resource_name):
- """Return a true filesystem path for specified resource"""
- return get_provider(package_or_requirement).get_resource_filename(
- self, resource_name
- )
-
- def resource_stream(self, package_or_requirement, resource_name):
- """Return a readable file-like object for specified resource"""
- return get_provider(package_or_requirement).get_resource_stream(
- self, resource_name
- )
-
- def resource_string(self, package_or_requirement, resource_name):
- """Return specified resource as a string"""
- return get_provider(package_or_requirement).get_resource_string(
- self, resource_name
- )
-
- def resource_listdir(self, package_or_requirement, resource_name):
- """List the contents of the named resource directory"""
- return get_provider(package_or_requirement).resource_listdir(
- resource_name
- )
-
- def extraction_error(self):
- """Give an error message for problems extracting file(s)"""
-
- old_exc = sys.exc_info()[1]
- cache_path = self.extraction_path or get_default_cache()
-
- tmpl = textwrap.dedent("""
- Can't extract file(s) to egg cache
-
- The following error occurred while trying to extract file(s)
- to the Python egg cache:
-
- {old_exc}
-
- The Python egg cache directory is currently set to:
-
- {cache_path}
-
- Perhaps your account does not have write access to this directory?
- You can change the cache directory by setting the PYTHON_EGG_CACHE
- environment variable to point to an accessible directory.
- """).lstrip()
- err = ExtractionError(tmpl.format(**locals()))
- err.manager = self
- err.cache_path = cache_path
- err.original_error = old_exc
- raise err
-
- def get_cache_path(self, archive_name, names=()):
- """Return absolute location in cache for `archive_name` and `names`
-
- The parent directory of the resulting path will be created if it does
- not already exist. `archive_name` should be the base filename of the
- enclosing egg (which may not be the name of the enclosing zipfile!),
- including its ".egg" extension. `names`, if provided, should be a
- sequence of path name parts "under" the egg's extraction location.
-
- This method should only be called by resource providers that need to
- obtain an extraction location, and only for names they intend to
- extract, as it tracks the generated names for possible cleanup later.
- """
- extract_path = self.extraction_path or get_default_cache()
- target_path = os.path.join(extract_path, archive_name + '-tmp', *names)
- try:
- _bypass_ensure_directory(target_path)
- except Exception:
- self.extraction_error()
-
- self._warn_unsafe_extraction_path(extract_path)
-
- self.cached_files[target_path] = 1
- return target_path
-
- @staticmethod
- def _warn_unsafe_extraction_path(path):
- """
- If the default extraction path is overridden and set to an insecure
- location, such as /tmp, it opens up an opportunity for an attacker to
- replace an extracted file with an unauthorized payload. Warn the user
- if a known insecure location is used.
-
- See Distribute #375 for more details.
- """
- if os.name == 'nt' and not path.startswith(os.environ['windir']):
- # On Windows, permissions are generally restrictive by default
- # and temp directories are not writable by other users, so
- # bypass the warning.
- return
- mode = os.stat(path).st_mode
- if mode & stat.S_IWOTH or mode & stat.S_IWGRP:
- msg = (
- "Extraction path is writable by group/others "
- "and vulnerable to attack when "
- "used with get_resource_filename ({path}). "
- "Consider a more secure "
- "location (set with .set_extraction_path or the "
- "PYTHON_EGG_CACHE environment variable)."
- ).format(**locals())
- warnings.warn(msg, UserWarning)
-
- def postprocess(self, tempname, filename):
- """Perform any platform-specific postprocessing of `tempname`
-
- This is where Mac header rewrites should be done; other platforms don't
- have anything special they should do.
-
- Resource providers should call this method ONLY after successfully
- extracting a compressed resource. They must NOT call it on resources
- that are already in the filesystem.
-
- `tempname` is the current (temporary) name of the file, and `filename`
- is the name it will be renamed to by the caller after this routine
- returns.
- """
-
- if os.name == 'posix':
- # Make the resource executable
- mode = ((os.stat(tempname).st_mode) | 0o555) & 0o7777
- os.chmod(tempname, mode)
-
- def set_extraction_path(self, path):
- """Set the base path where resources will be extracted to, if needed.
-
- If you do not call this routine before any extractions take place, the
- path defaults to the return value of ``get_default_cache()``. (Which
- is based on the ``PYTHON_EGG_CACHE`` environment variable, with various
- platform-specific fallbacks. See that routine's documentation for more
- details.)
-
- Resources are extracted to subdirectories of this path based upon
- information given by the ``IResourceProvider``. You may set this to a
- temporary directory, but then you must call ``cleanup_resources()`` to
- delete the extracted files when done. There is no guarantee that
- ``cleanup_resources()`` will be able to remove all extracted files.
-
- (Note: you may not change the extraction path for a given resource
- manager once resources have been extracted, unless you first call
- ``cleanup_resources()``.)
- """
- if self.cached_files:
- raise ValueError(
- "Can't change extraction path, files already extracted"
- )
-
- self.extraction_path = path
-
- def cleanup_resources(self, force=False):
- """
- Delete all extracted resource files and directories, returning a list
- of the file and directory names that could not be successfully removed.
- This function does not have any concurrency protection, so it should
- generally only be called when the extraction path is a temporary
- directory exclusive to a single process. This method is not
- automatically called; you must call it explicitly or register it as an
- ``atexit`` function if you wish to ensure cleanup of a temporary
- directory used for extractions.
- """
- # XXX
-
-
-def get_default_cache():
- """
- Return the ``PYTHON_EGG_CACHE`` environment variable
- or a platform-relevant user cache dir for an app
- named "Python-Eggs".
- """
- return (
- os.environ.get('PYTHON_EGG_CACHE')
- or appdirs.user_cache_dir(appname='Python-Eggs')
- )
-
-
-def safe_name(name):
- """Convert an arbitrary string to a standard distribution name
-
- Any runs of non-alphanumeric/. characters are replaced with a single '-'.
- """
- return re.sub('[^A-Za-z0-9.]+', '-', name)
-
-
-def safe_version(version):
- """
- Convert an arbitrary string to a standard version string
- """
- try:
- # normalize the version
- return str(packaging.version.Version(version))
- except packaging.version.InvalidVersion:
- version = version.replace(' ', '.')
- return re.sub('[^A-Za-z0-9.]+', '-', version)
-
-
-def safe_extra(extra):
- """Convert an arbitrary string to a standard 'extra' name
-
- Any runs of non-alphanumeric characters are replaced with a single '_',
- and the result is always lowercased.
- """
- return re.sub('[^A-Za-z0-9.-]+', '_', extra).lower()
-
-
-def to_filename(name):
- """Convert a project or version name to its filename-escaped form
-
- Any '-' characters are currently replaced with '_'.
- """
- return name.replace('-', '_')
-
-
-def invalid_marker(text):
- """
- Validate text as a PEP 508 environment marker; return an exception
- if invalid or False otherwise.
- """
- try:
- evaluate_marker(text)
- except SyntaxError as e:
- e.filename = None
- e.lineno = None
- return e
- return False
-
-
-def evaluate_marker(text, extra=None):
- """
- Evaluate a PEP 508 environment marker.
- Return a boolean indicating the marker result in this environment.
- Raise SyntaxError if marker is invalid.
-
- This implementation uses the 'pyparsing' module.
- """
- try:
- marker = packaging.markers.Marker(text)
- return marker.evaluate()
- except packaging.markers.InvalidMarker as e:
- raise SyntaxError(e) from e
-
-
-class NullProvider:
- """Try to implement resources and metadata for arbitrary PEP 302 loaders"""
-
- egg_name = None
- egg_info = None
- loader = None
-
- def __init__(self, module):
- self.loader = getattr(module, '__loader__', None)
- self.module_path = os.path.dirname(getattr(module, '__file__', ''))
-
- def get_resource_filename(self, manager, resource_name):
- return self._fn(self.module_path, resource_name)
-
- def get_resource_stream(self, manager, resource_name):
- return io.BytesIO(self.get_resource_string(manager, resource_name))
-
- def get_resource_string(self, manager, resource_name):
- return self._get(self._fn(self.module_path, resource_name))
-
- def has_resource(self, resource_name):
- return self._has(self._fn(self.module_path, resource_name))
-
- def _get_metadata_path(self, name):
- return self._fn(self.egg_info, name)
-
- def has_metadata(self, name):
- if not self.egg_info:
- return self.egg_info
-
- path = self._get_metadata_path(name)
- return self._has(path)
-
- def get_metadata(self, name):
- if not self.egg_info:
- return ""
- path = self._get_metadata_path(name)
- value = self._get(path)
- try:
- return value.decode('utf-8')
- except UnicodeDecodeError as exc:
- # Include the path in the error message to simplify
- # troubleshooting, and without changing the exception type.
- exc.reason += ' in {} file at path: {}'.format(name, path)
- raise
-
- def get_metadata_lines(self, name):
- return yield_lines(self.get_metadata(name))
-
- def resource_isdir(self, resource_name):
- return self._isdir(self._fn(self.module_path, resource_name))
-
- def metadata_isdir(self, name):
- return self.egg_info and self._isdir(self._fn(self.egg_info, name))
-
- def resource_listdir(self, resource_name):
- return self._listdir(self._fn(self.module_path, resource_name))
-
- def metadata_listdir(self, name):
- if self.egg_info:
- return self._listdir(self._fn(self.egg_info, name))
- return []
-
- def run_script(self, script_name, namespace):
- script = 'scripts/' + script_name
- if not self.has_metadata(script):
- raise ResolutionError(
- "Script {script!r} not found in metadata at {self.egg_info!r}"
- .format(**locals()),
- )
- script_text = self.get_metadata(script).replace('\r\n', '\n')
- script_text = script_text.replace('\r', '\n')
- script_filename = self._fn(self.egg_info, script)
- namespace['__file__'] = script_filename
- if os.path.exists(script_filename):
- with open(script_filename) as fid:
- source = fid.read()
- code = compile(source, script_filename, 'exec')
- exec(code, namespace, namespace)
- else:
- from linecache import cache
- cache[script_filename] = (
- len(script_text), 0, script_text.split('\n'), script_filename
- )
- script_code = compile(script_text, script_filename, 'exec')
- exec(script_code, namespace, namespace)
-
- def _has(self, path):
- raise NotImplementedError(
- "Can't perform this operation for unregistered loader type"
- )
-
- def _isdir(self, path):
- raise NotImplementedError(
- "Can't perform this operation for unregistered loader type"
- )
-
- def _listdir(self, path):
- raise NotImplementedError(
- "Can't perform this operation for unregistered loader type"
- )
-
- def _fn(self, base, resource_name):
- self._validate_resource_path(resource_name)
- if resource_name:
- return os.path.join(base, *resource_name.split('/'))
- return base
-
- @staticmethod
- def _validate_resource_path(path):
- """
- Validate the resource paths according to the docs.
- https://setuptools.pypa.io/en/latest/pkg_resources.html#basic-resource-access
-
- >>> warned = getfixture('recwarn')
- >>> warnings.simplefilter('always')
- >>> vrp = NullProvider._validate_resource_path
- >>> vrp('foo/bar.txt')
- >>> bool(warned)
- False
- >>> vrp('../foo/bar.txt')
- >>> bool(warned)
- True
- >>> warned.clear()
- >>> vrp('/foo/bar.txt')
- >>> bool(warned)
- True
- >>> vrp('foo/../../bar.txt')
- >>> bool(warned)
- True
- >>> warned.clear()
- >>> vrp('foo/f../bar.txt')
- >>> bool(warned)
- False
-
- Windows path separators are straight-up disallowed.
- >>> vrp(r'\\foo/bar.txt')
- Traceback (most recent call last):
- ...
- ValueError: Use of .. or absolute path in a resource path \
-is not allowed.
-
- >>> vrp(r'C:\\foo/bar.txt')
- Traceback (most recent call last):
- ...
- ValueError: Use of .. or absolute path in a resource path \
-is not allowed.
-
- Blank values are allowed
-
- >>> vrp('')
- >>> bool(warned)
- False
-
- Non-string values are not.
-
- >>> vrp(None)
- Traceback (most recent call last):
- ...
- AttributeError: ...
- """
- invalid = (
- os.path.pardir in path.split(posixpath.sep) or
- posixpath.isabs(path) or
- ntpath.isabs(path)
- )
- if not invalid:
- return
-
- msg = "Use of .. or absolute path in a resource path is not allowed."
-
- # Aggressively disallow Windows absolute paths
- if ntpath.isabs(path) and not posixpath.isabs(path):
- raise ValueError(msg)
-
- # for compatibility, warn; in future
- # raise ValueError(msg)
- warnings.warn(
- msg[:-1] + " and will raise exceptions in a future release.",
- DeprecationWarning,
- stacklevel=4,
- )
-
- def _get(self, path):
- if hasattr(self.loader, 'get_data'):
- return self.loader.get_data(path)
- raise NotImplementedError(
- "Can't perform this operation for loaders without 'get_data()'"
- )
-
-
-register_loader_type(object, NullProvider)
-
-
-def _parents(path):
- """
- yield all parents of path including path
- """
- last = None
- while path != last:
- yield path
- last = path
- path, _ = os.path.split(path)
-
-
-class EggProvider(NullProvider):
- """Provider based on a virtual filesystem"""
-
- def __init__(self, module):
- NullProvider.__init__(self, module)
- self._setup_prefix()
-
- def _setup_prefix(self):
- # Assume that metadata may be nested inside a "basket"
- # of multiple eggs and use module_path instead of .archive.
- eggs = filter(_is_egg_path, _parents(self.module_path))
- egg = next(eggs, None)
- egg and self._set_egg(egg)
-
- def _set_egg(self, path):
- self.egg_name = os.path.basename(path)
- self.egg_info = os.path.join(path, 'EGG-INFO')
- self.egg_root = path
-
-
-class DefaultProvider(EggProvider):
- """Provides access to package resources in the filesystem"""
-
- def _has(self, path):
- return os.path.exists(path)
-
- def _isdir(self, path):
- return os.path.isdir(path)
-
- def _listdir(self, path):
- return os.listdir(path)
-
- def get_resource_stream(self, manager, resource_name):
- return open(self._fn(self.module_path, resource_name), 'rb')
-
- def _get(self, path):
- with open(path, 'rb') as stream:
- return stream.read()
-
- @classmethod
- def _register(cls):
- loader_names = 'SourceFileLoader', 'SourcelessFileLoader',
- for name in loader_names:
- loader_cls = getattr(importlib_machinery, name, type(None))
- register_loader_type(loader_cls, cls)
-
-
-DefaultProvider._register()
-
-
-class EmptyProvider(NullProvider):
- """Provider that returns nothing for all requests"""
-
- module_path = None
-
- _isdir = _has = lambda self, path: False
-
- def _get(self, path):
- return ''
-
- def _listdir(self, path):
- return []
-
- def __init__(self):
- pass
-
-
-empty_provider = EmptyProvider()
-
-
-class ZipManifests(dict):
- """
- zip manifest builder
- """
-
- @classmethod
- def build(cls, path):
- """
- Build a dictionary similar to the zipimport directory
- caches, except instead of tuples, store ZipInfo objects.
-
- Use a platform-specific path separator (os.sep) for the path keys
- for compatibility with pypy on Windows.
- """
- with zipfile.ZipFile(path) as zfile:
- items = (
- (
- name.replace('/', os.sep),
- zfile.getinfo(name),
- )
- for name in zfile.namelist()
- )
- return dict(items)
-
- load = build
-
-
-class MemoizedZipManifests(ZipManifests):
- """
- Memoized zipfile manifests.
- """
- manifest_mod = collections.namedtuple('manifest_mod', 'manifest mtime')
-
- def load(self, path):
- """
- Load a manifest at path or return a suitable manifest already loaded.
- """
- path = os.path.normpath(path)
- mtime = os.stat(path).st_mtime
-
- if path not in self or self[path].mtime != mtime:
- manifest = self.build(path)
- self[path] = self.manifest_mod(manifest, mtime)
-
- return self[path].manifest
-
-
-class ZipProvider(EggProvider):
- """Resource support for zips and eggs"""
-
- eagers = None
- _zip_manifests = MemoizedZipManifests()
-
- def __init__(self, module):
- EggProvider.__init__(self, module)
- self.zip_pre = self.loader.archive + os.sep
-
- def _zipinfo_name(self, fspath):
- # Convert a virtual filename (full path to file) into a zipfile subpath
- # usable with the zipimport directory cache for our target archive
- fspath = fspath.rstrip(os.sep)
- if fspath == self.loader.archive:
- return ''
- if fspath.startswith(self.zip_pre):
- return fspath[len(self.zip_pre):]
- raise AssertionError(
- "%s is not a subpath of %s" % (fspath, self.zip_pre)
- )
-
- def _parts(self, zip_path):
- # Convert a zipfile subpath into an egg-relative path part list.
- # pseudo-fs path
- fspath = self.zip_pre + zip_path
- if fspath.startswith(self.egg_root + os.sep):
- return fspath[len(self.egg_root) + 1:].split(os.sep)
- raise AssertionError(
- "%s is not a subpath of %s" % (fspath, self.egg_root)
- )
-
- @property
- def zipinfo(self):
- return self._zip_manifests.load(self.loader.archive)
-
- def get_resource_filename(self, manager, resource_name):
- if not self.egg_name:
- raise NotImplementedError(
- "resource_filename() only supported for .egg, not .zip"
- )
- # no need to lock for extraction, since we use temp names
- zip_path = self._resource_to_zip(resource_name)
- eagers = self._get_eager_resources()
- if '/'.join(self._parts(zip_path)) in eagers:
- for name in eagers:
- self._extract_resource(manager, self._eager_to_zip(name))
- return self._extract_resource(manager, zip_path)
-
- @staticmethod
- def _get_date_and_size(zip_stat):
- size = zip_stat.file_size
- # ymdhms+wday, yday, dst
- date_time = zip_stat.date_time + (0, 0, -1)
- # 1980 offset already done
- timestamp = time.mktime(date_time)
- return timestamp, size
-
- # FIXME: 'ZipProvider._extract_resource' is too complex (12)
- def _extract_resource(self, manager, zip_path): # noqa: C901
-
- if zip_path in self._index():
- for name in self._index()[zip_path]:
- last = self._extract_resource(
- manager, os.path.join(zip_path, name)
- )
- # return the extracted directory name
- return os.path.dirname(last)
-
- timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
-
- if not WRITE_SUPPORT:
- raise IOError('"os.rename" and "os.unlink" are not supported '
- 'on this platform')
- try:
-
- real_path = manager.get_cache_path(
- self.egg_name, self._parts(zip_path)
- )
-
- if self._is_current(real_path, zip_path):
- return real_path
-
- outf, tmpnam = _mkstemp(
- ".$extract",
- dir=os.path.dirname(real_path),
- )
- os.write(outf, self.loader.get_data(zip_path))
- os.close(outf)
- utime(tmpnam, (timestamp, timestamp))
- manager.postprocess(tmpnam, real_path)
-
- try:
- rename(tmpnam, real_path)
-
- except os.error:
- if os.path.isfile(real_path):
- if self._is_current(real_path, zip_path):
- # the file became current since it was checked above,
- # so proceed.
- return real_path
- # Windows, del old file and retry
- elif os.name == 'nt':
- unlink(real_path)
- rename(tmpnam, real_path)
- return real_path
- raise
-
- except os.error:
- # report a user-friendly error
- manager.extraction_error()
-
- return real_path
-
- def _is_current(self, file_path, zip_path):
- """
- Return True if the file_path is current for this zip_path
- """
- timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
- if not os.path.isfile(file_path):
- return False
- stat = os.stat(file_path)
- if stat.st_size != size or stat.st_mtime != timestamp:
- return False
- # check that the contents match
- zip_contents = self.loader.get_data(zip_path)
- with open(file_path, 'rb') as f:
- file_contents = f.read()
- return zip_contents == file_contents
-
- def _get_eager_resources(self):
- if self.eagers is None:
- eagers = []
- for name in ('native_libs.txt', 'eager_resources.txt'):
- if self.has_metadata(name):
- eagers.extend(self.get_metadata_lines(name))
- self.eagers = eagers
- return self.eagers
-
- def _index(self):
- try:
- return self._dirindex
- except AttributeError:
- ind = {}
- for path in self.zipinfo:
- parts = path.split(os.sep)
- while parts:
- parent = os.sep.join(parts[:-1])
- if parent in ind:
- ind[parent].append(parts[-1])
- break
- else:
- ind[parent] = [parts.pop()]
- self._dirindex = ind
- return ind
-
- def _has(self, fspath):
- zip_path = self._zipinfo_name(fspath)
- return zip_path in self.zipinfo or zip_path in self._index()
-
- def _isdir(self, fspath):
- return self._zipinfo_name(fspath) in self._index()
-
- def _listdir(self, fspath):
- return list(self._index().get(self._zipinfo_name(fspath), ()))
-
- def _eager_to_zip(self, resource_name):
- return self._zipinfo_name(self._fn(self.egg_root, resource_name))
-
- def _resource_to_zip(self, resource_name):
- return self._zipinfo_name(self._fn(self.module_path, resource_name))
-
-
-register_loader_type(zipimport.zipimporter, ZipProvider)
-
-
-class FileMetadata(EmptyProvider):
- """Metadata handler for standalone PKG-INFO files
-
- Usage::
-
- metadata = FileMetadata("/path/to/PKG-INFO")
-
- This provider rejects all data and metadata requests except for PKG-INFO,
- which is treated as existing, and will be the contents of the file at
- the provided location.
- """
-
- def __init__(self, path):
- self.path = path
-
- def _get_metadata_path(self, name):
- return self.path
-
- def has_metadata(self, name):
- return name == 'PKG-INFO' and os.path.isfile(self.path)
-
- def get_metadata(self, name):
- if name != 'PKG-INFO':
- raise KeyError("No metadata except PKG-INFO is available")
-
- with io.open(self.path, encoding='utf-8', errors="replace") as f:
- metadata = f.read()
- self._warn_on_replacement(metadata)
- return metadata
-
- def _warn_on_replacement(self, metadata):
- replacement_char = '�'
- if replacement_char in metadata:
- tmpl = "{self.path} could not be properly decoded in UTF-8"
- msg = tmpl.format(**locals())
- warnings.warn(msg)
-
- def get_metadata_lines(self, name):
- return yield_lines(self.get_metadata(name))
-
-
-class PathMetadata(DefaultProvider):
- """Metadata provider for egg directories
-
- Usage::
-
- # Development eggs:
-
- egg_info = "/path/to/PackageName.egg-info"
- base_dir = os.path.dirname(egg_info)
- metadata = PathMetadata(base_dir, egg_info)
- dist_name = os.path.splitext(os.path.basename(egg_info))[0]
- dist = Distribution(basedir, project_name=dist_name, metadata=metadata)
-
- # Unpacked egg directories:
-
- egg_path = "/path/to/PackageName-ver-pyver-etc.egg"
- metadata = PathMetadata(egg_path, os.path.join(egg_path,'EGG-INFO'))
- dist = Distribution.from_filename(egg_path, metadata=metadata)
- """
-
- def __init__(self, path, egg_info):
- self.module_path = path
- self.egg_info = egg_info
-
-
-class EggMetadata(ZipProvider):
- """Metadata provider for .egg files"""
-
- def __init__(self, importer):
- """Create a metadata provider from a zipimporter"""
-
- self.zip_pre = importer.archive + os.sep
- self.loader = importer
- if importer.prefix:
- self.module_path = os.path.join(importer.archive, importer.prefix)
- else:
- self.module_path = importer.archive
- self._setup_prefix()
-
-
-_declare_state('dict', _distribution_finders={})
-
-
-def register_finder(importer_type, distribution_finder):
- """Register `distribution_finder` to find distributions in sys.path items
-
- `importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
- handler), and `distribution_finder` is a callable that, passed a path
- item and the importer instance, yields ``Distribution`` instances found on
- that path item. See ``pkg_resources.find_on_path`` for an example."""
- _distribution_finders[importer_type] = distribution_finder
-
-
-def find_distributions(path_item, only=False):
- """Yield distributions accessible via `path_item`"""
- importer = get_importer(path_item)
- finder = _find_adapter(_distribution_finders, importer)
- return finder(importer, path_item, only)
-
-
-def find_eggs_in_zip(importer, path_item, only=False):
- """
- Find eggs in zip files; possibly multiple nested eggs.
- """
- if importer.archive.endswith('.whl'):
- # wheels are not supported with this finder
- # they don't have PKG-INFO metadata, and won't ever contain eggs
- return
- metadata = EggMetadata(importer)
- if metadata.has_metadata('PKG-INFO'):
- yield Distribution.from_filename(path_item, metadata=metadata)
- if only:
- # don't yield nested distros
- return
- for subitem in metadata.resource_listdir(''):
- if _is_egg_path(subitem):
- subpath = os.path.join(path_item, subitem)
- dists = find_eggs_in_zip(zipimport.zipimporter(subpath), subpath)
- for dist in dists:
- yield dist
- elif subitem.lower().endswith(('.dist-info', '.egg-info')):
- subpath = os.path.join(path_item, subitem)
- submeta = EggMetadata(zipimport.zipimporter(subpath))
- submeta.egg_info = subpath
- yield Distribution.from_location(path_item, subitem, submeta)
-
-
-register_finder(zipimport.zipimporter, find_eggs_in_zip)
-
-
-def find_nothing(importer, path_item, only=False):
- return ()
-
-
-register_finder(object, find_nothing)
-
-
-def _by_version_descending(names):
- """
- Given a list of filenames, return them in descending order
- by version number.
-
- >>> names = 'bar', 'foo', 'Python-2.7.10.egg', 'Python-2.7.2.egg'
- >>> _by_version_descending(names)
- ['Python-2.7.10.egg', 'Python-2.7.2.egg', 'bar', 'foo']
- >>> names = 'Setuptools-1.2.3b1.egg', 'Setuptools-1.2.3.egg'
- >>> _by_version_descending(names)
- ['Setuptools-1.2.3.egg', 'Setuptools-1.2.3b1.egg']
- >>> names = 'Setuptools-1.2.3b1.egg', 'Setuptools-1.2.3.post1.egg'
- >>> _by_version_descending(names)
- ['Setuptools-1.2.3.post1.egg', 'Setuptools-1.2.3b1.egg']
- """
- def try_parse(name):
- """
- Attempt to parse as a version or return a null version.
- """
- try:
- return packaging.version.Version(name)
- except Exception:
- return packaging.version.Version('0')
-
- def _by_version(name):
- """
- Parse each component of the filename
- """
- name, ext = os.path.splitext(name)
- parts = itertools.chain(name.split('-'), [ext])
- return [try_parse(part) for part in parts]
-
- return sorted(names, key=_by_version, reverse=True)
-
-
-def find_on_path(importer, path_item, only=False):
- """Yield distributions accessible on a sys.path directory"""
- path_item = _normalize_cached(path_item)
-
- if _is_unpacked_egg(path_item):
- yield Distribution.from_filename(
- path_item, metadata=PathMetadata(
- path_item, os.path.join(path_item, 'EGG-INFO')
- )
- )
- return
-
- entries = (
- os.path.join(path_item, child)
- for child in safe_listdir(path_item)
- )
-
- # for performance, before sorting by version,
- # screen entries for only those that will yield
- # distributions
- filtered = (
- entry
- for entry in entries
- if dist_factory(path_item, entry, only)
- )
-
- # scan for .egg and .egg-info in directory
- path_item_entries = _by_version_descending(filtered)
- for entry in path_item_entries:
- fullpath = os.path.join(path_item, entry)
- factory = dist_factory(path_item, entry, only)
- for dist in factory(fullpath):
- yield dist
-
-
-def dist_factory(path_item, entry, only):
- """Return a dist_factory for the given entry."""
- lower = entry.lower()
- is_egg_info = lower.endswith('.egg-info')
- is_dist_info = (
- lower.endswith('.dist-info') and
- os.path.isdir(os.path.join(path_item, entry))
- )
- is_meta = is_egg_info or is_dist_info
- return (
- distributions_from_metadata
- if is_meta else
- find_distributions
- if not only and _is_egg_path(entry) else
- resolve_egg_link
- if not only and lower.endswith('.egg-link') else
- NoDists()
- )
-
-
-class NoDists:
- """
- >>> bool(NoDists())
- False
-
- >>> list(NoDists()('anything'))
- []
- """
- def __bool__(self):
- return False
-
- def __call__(self, fullpath):
- return iter(())
-
-
-def safe_listdir(path):
- """
- Attempt to list contents of path, but suppress some exceptions.
- """
- try:
- return os.listdir(path)
- except (PermissionError, NotADirectoryError):
- pass
- except OSError as e:
- # Ignore the directory if does not exist, not a directory or
- # permission denied
- if e.errno not in (errno.ENOTDIR, errno.EACCES, errno.ENOENT):
- raise
- return ()
-
-
-def distributions_from_metadata(path):
- root = os.path.dirname(path)
- if os.path.isdir(path):
- if len(os.listdir(path)) == 0:
- # empty metadata dir; skip
- return
- metadata = PathMetadata(root, path)
- else:
- metadata = FileMetadata(path)
- entry = os.path.basename(path)
- yield Distribution.from_location(
- root, entry, metadata, precedence=DEVELOP_DIST,
- )
-
-
-def non_empty_lines(path):
- """
- Yield non-empty lines from file at path
- """
- with open(path) as f:
- for line in f:
- line = line.strip()
- if line:
- yield line
-
-
-def resolve_egg_link(path):
- """
- Given a path to an .egg-link, resolve distributions
- present in the referenced path.
- """
- referenced_paths = non_empty_lines(path)
- resolved_paths = (
- os.path.join(os.path.dirname(path), ref)
- for ref in referenced_paths
- )
- dist_groups = map(find_distributions, resolved_paths)
- return next(dist_groups, ())
-
-
-register_finder(pkgutil.ImpImporter, find_on_path)
-
-if hasattr(importlib_machinery, 'FileFinder'):
- register_finder(importlib_machinery.FileFinder, find_on_path)
-
-_declare_state('dict', _namespace_handlers={})
-_declare_state('dict', _namespace_packages={})
-
-
-def register_namespace_handler(importer_type, namespace_handler):
- """Register `namespace_handler` to declare namespace packages
-
- `importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
- handler), and `namespace_handler` is a callable like this::
-
- def namespace_handler(importer, path_entry, moduleName, module):
- # return a path_entry to use for child packages
-
- Namespace handlers are only called if the importer object has already
- agreed that it can handle the relevant path item, and they should only
- return a subpath if the module __path__ does not already contain an
- equivalent subpath. For an example namespace handler, see
- ``pkg_resources.file_ns_handler``.
- """
- _namespace_handlers[importer_type] = namespace_handler
-
-
-def _handle_ns(packageName, path_item):
- """Ensure that named package includes a subpath of path_item (if needed)"""
-
- importer = get_importer(path_item)
- if importer is None:
- return None
-
- # use find_spec (PEP 451) and fall-back to find_module (PEP 302)
- try:
- loader = importer.find_spec(packageName).loader
- except AttributeError:
- # capture warnings due to #1111
- with warnings.catch_warnings():
- warnings.simplefilter("ignore")
- loader = importer.find_module(packageName)
-
- if loader is None:
- return None
- module = sys.modules.get(packageName)
- if module is None:
- module = sys.modules[packageName] = types.ModuleType(packageName)
- module.__path__ = []
- _set_parent_ns(packageName)
- elif not hasattr(module, '__path__'):
- raise TypeError("Not a package:", packageName)
- handler = _find_adapter(_namespace_handlers, importer)
- subpath = handler(importer, path_item, packageName, module)
- if subpath is not None:
- path = module.__path__
- path.append(subpath)
- importlib.import_module(packageName)
- _rebuild_mod_path(path, packageName, module)
- return subpath
-
-
-def _rebuild_mod_path(orig_path, package_name, module):
- """
- Rebuild module.__path__ ensuring that all entries are ordered
- corresponding to their sys.path order
- """
- sys_path = [_normalize_cached(p) for p in sys.path]
-
- def safe_sys_path_index(entry):
- """
- Workaround for #520 and #513.
- """
- try:
- return sys_path.index(entry)
- except ValueError:
- return float('inf')
-
- def position_in_sys_path(path):
- """
- Return the ordinal of the path based on its position in sys.path
- """
- path_parts = path.split(os.sep)
- module_parts = package_name.count('.') + 1
- parts = path_parts[:-module_parts]
- return safe_sys_path_index(_normalize_cached(os.sep.join(parts)))
-
- new_path = sorted(orig_path, key=position_in_sys_path)
- new_path = [_normalize_cached(p) for p in new_path]
-
- if isinstance(module.__path__, list):
- module.__path__[:] = new_path
- else:
- module.__path__ = new_path
-
-
-def declare_namespace(packageName):
- """Declare that package 'packageName' is a namespace package"""
-
- _imp.acquire_lock()
- try:
- if packageName in _namespace_packages:
- return
-
- path = sys.path
- parent, _, _ = packageName.rpartition('.')
-
- if parent:
- declare_namespace(parent)
- if parent not in _namespace_packages:
- __import__(parent)
- try:
- path = sys.modules[parent].__path__
- except AttributeError as e:
- raise TypeError("Not a package:", parent) from e
-
- # Track what packages are namespaces, so when new path items are added,
- # they can be updated
- _namespace_packages.setdefault(parent or None, []).append(packageName)
- _namespace_packages.setdefault(packageName, [])
-
- for path_item in path:
- # Ensure all the parent's path items are reflected in the child,
- # if they apply
- _handle_ns(packageName, path_item)
-
- finally:
- _imp.release_lock()
-
-
-def fixup_namespace_packages(path_item, parent=None):
- """Ensure that previously-declared namespace packages include path_item"""
- _imp.acquire_lock()
- try:
- for package in _namespace_packages.get(parent, ()):
- subpath = _handle_ns(package, path_item)
- if subpath:
- fixup_namespace_packages(subpath, package)
- finally:
- _imp.release_lock()
-
-
-def file_ns_handler(importer, path_item, packageName, module):
- """Compute an ns-package subpath for a filesystem or zipfile importer"""
-
- subpath = os.path.join(path_item, packageName.split('.')[-1])
- normalized = _normalize_cached(subpath)
- for item in module.__path__:
- if _normalize_cached(item) == normalized:
- break
- else:
- # Only return the path if it's not already there
- return subpath
-
-
-register_namespace_handler(pkgutil.ImpImporter, file_ns_handler)
-register_namespace_handler(zipimport.zipimporter, file_ns_handler)
-
-if hasattr(importlib_machinery, 'FileFinder'):
- register_namespace_handler(importlib_machinery.FileFinder, file_ns_handler)
-
-
-def null_ns_handler(importer, path_item, packageName, module):
- return None
-
-
-register_namespace_handler(object, null_ns_handler)
-
-
-def normalize_path(filename):
- """Normalize a file/dir name for comparison purposes"""
- return os.path.normcase(os.path.realpath(os.path.normpath(
- _cygwin_patch(filename))))
-
-
-def _cygwin_patch(filename): # pragma: nocover
- """
- Contrary to POSIX 2008, on Cygwin, getcwd (3) contains
- symlink components. Using
- os.path.abspath() works around this limitation. A fix in os.getcwd()
- would probably better, in Cygwin even more so, except
- that this seems to be by design...
- """
- return os.path.abspath(filename) if sys.platform == 'cygwin' else filename
-
-
-def _normalize_cached(filename, _cache={}):
- try:
- return _cache[filename]
- except KeyError:
- _cache[filename] = result = normalize_path(filename)
- return result
-
-
-def _is_egg_path(path):
- """
- Determine if given path appears to be an egg.
- """
- return _is_zip_egg(path) or _is_unpacked_egg(path)
-
-
-def _is_zip_egg(path):
- return (
- path.lower().endswith('.egg') and
- os.path.isfile(path) and
- zipfile.is_zipfile(path)
- )
-
-
-def _is_unpacked_egg(path):
- """
- Determine if given path appears to be an unpacked egg.
- """
- return (
- path.lower().endswith('.egg') and
- os.path.isfile(os.path.join(path, 'EGG-INFO', 'PKG-INFO'))
- )
-
-
-def _set_parent_ns(packageName):
- parts = packageName.split('.')
- name = parts.pop()
- if parts:
- parent = '.'.join(parts)
- setattr(sys.modules[parent], name, sys.modules[packageName])
-
-
-def _nonblank(str):
- return str and not str.startswith('#')
-
-
-def yield_lines(iterable):
- """Yield valid lines of a string or iterable"""
- return itertools.chain.from_iterable(map(yield_lines, iterable))
-
-
-@yield_lines.register(str)
-def _(text):
- return filter(_nonblank, map(str.strip, text.splitlines()))
-
-
-MODULE = re.compile(r"\w+(\.\w+)*$").match
-EGG_NAME = re.compile(
- r"""
- (?P<name>[^-]+) (
- -(?P<ver>[^-]+) (
- -py(?P<pyver>[^-]+) (
- -(?P<plat>.+)
- )?
- )?
- )?
- """,
- re.VERBOSE | re.IGNORECASE,
-).match
-
-
-class EntryPoint:
- """Object representing an advertised importable object"""
-
- def __init__(self, name, module_name, attrs=(), extras=(), dist=None):
- if not MODULE(module_name):
- raise ValueError("Invalid module name", module_name)
- self.name = name
- self.module_name = module_name
- self.attrs = tuple(attrs)
- self.extras = tuple(extras)
- self.dist = dist
-
- def __str__(self):
- s = "%s = %s" % (self.name, self.module_name)
- if self.attrs:
- s += ':' + '.'.join(self.attrs)
- if self.extras:
- s += ' [%s]' % ','.join(self.extras)
- return s
-
- def __repr__(self):
- return "EntryPoint.parse(%r)" % str(self)
-
- def load(self, require=True, *args, **kwargs):
- """
- Require packages for this EntryPoint, then resolve it.
- """
- if not require or args or kwargs:
- warnings.warn(
- "Parameters to load are deprecated. Call .resolve and "
- ".require separately.",
- PkgResourcesDeprecationWarning,
- stacklevel=2,
- )
- if require:
- self.require(*args, **kwargs)
- return self.resolve()
-
- def resolve(self):
- """
- Resolve the entry point from its module and attrs.
- """
- module = __import__(self.module_name, fromlist=['__name__'], level=0)
- try:
- return functools.reduce(getattr, self.attrs, module)
- except AttributeError as exc:
- raise ImportError(str(exc)) from exc
-
- def require(self, env=None, installer=None):
- if self.extras and not self.dist:
- raise UnknownExtra("Can't require() without a distribution", self)
-
- # Get the requirements for this entry point with all its extras and
- # then resolve them. We have to pass `extras` along when resolving so
- # that the working set knows what extras we want. Otherwise, for
- # dist-info distributions, the working set will assume that the
- # requirements for that extra are purely optional and skip over them.
- reqs = self.dist.requires(self.extras)
- items = working_set.resolve(reqs, env, installer, extras=self.extras)
- list(map(working_set.add, items))
-
- pattern = re.compile(
- r'\s*'
- r'(?P<name>.+?)\s*'
- r'=\s*'
- r'(?P<module>[\w.]+)\s*'
- r'(:\s*(?P<attr>[\w.]+))?\s*'
- r'(?P<extras>\[.*\])?\s*$'
- )
-
- @classmethod
- def parse(cls, src, dist=None):
- """Parse a single entry point from string `src`
-
- Entry point syntax follows the form::
-
- name = some.module:some.attr [extra1, extra2]
-
- The entry name and module name are required, but the ``:attrs`` and
- ``[extras]`` parts are optional
- """
- m = cls.pattern.match(src)
- if not m:
- msg = "EntryPoint must be in 'name=module:attrs [extras]' format"
- raise ValueError(msg, src)
- res = m.groupdict()
- extras = cls._parse_extras(res['extras'])
- attrs = res['attr'].split('.') if res['attr'] else ()
- return cls(res['name'], res['module'], attrs, extras, dist)
-
- @classmethod
- def _parse_extras(cls, extras_spec):
- if not extras_spec:
- return ()
- req = Requirement.parse('x' + extras_spec)
- if req.specs:
- raise ValueError()
- return req.extras
-
- @classmethod
- def parse_group(cls, group, lines, dist=None):
- """Parse an entry point group"""
- if not MODULE(group):
- raise ValueError("Invalid group name", group)
- this = {}
- for line in yield_lines(lines):
- ep = cls.parse(line, dist)
- if ep.name in this:
- raise ValueError("Duplicate entry point", group, ep.name)
- this[ep.name] = ep
- return this
-
- @classmethod
- def parse_map(cls, data, dist=None):
- """Parse a map of entry point groups"""
- if isinstance(data, dict):
- data = data.items()
- else:
- data = split_sections(data)
- maps = {}
- for group, lines in data:
- if group is None:
- if not lines:
- continue
- raise ValueError("Entry points must be listed in groups")
- group = group.strip()
- if group in maps:
- raise ValueError("Duplicate group name", group)
- maps[group] = cls.parse_group(group, lines, dist)
- return maps
-
-
-def _version_from_file(lines):
- """
- Given an iterable of lines from a Metadata file, return
- the value of the Version field, if present, or None otherwise.
- """
- def is_version_line(line):
- return line.lower().startswith('version:')
- version_lines = filter(is_version_line, lines)
- line = next(iter(version_lines), '')
- _, _, value = line.partition(':')
- return safe_version(value.strip()) or None
-
-
-class Distribution:
- """Wrap an actual or potential sys.path entry w/metadata"""
- PKG_INFO = 'PKG-INFO'
-
- def __init__(
- self, location=None, metadata=None, project_name=None,
- version=None, py_version=PY_MAJOR, platform=None,
- precedence=EGG_DIST):
- self.project_name = safe_name(project_name or 'Unknown')
- if version is not None:
- self._version = safe_version(version)
- self.py_version = py_version
- self.platform = platform
- self.location = location
- self.precedence = precedence
- self._provider = metadata or empty_provider
-
- @classmethod
- def from_location(cls, location, basename, metadata=None, **kw):
- project_name, version, py_version, platform = [None] * 4
- basename, ext = os.path.splitext(basename)
- if ext.lower() in _distributionImpl:
- cls = _distributionImpl[ext.lower()]
-
- match = EGG_NAME(basename)
- if match:
- project_name, version, py_version, platform = match.group(
- 'name', 'ver', 'pyver', 'plat'
- )
- return cls(
- location, metadata, project_name=project_name, version=version,
- py_version=py_version, platform=platform, **kw
- )._reload_version()
-
- def _reload_version(self):
- return self
-
- @property
- def hashcmp(self):
- return (
- self.parsed_version,
- self.precedence,
- self.key,
- self.location,
- self.py_version or '',
- self.platform or '',
- )
-
- def __hash__(self):
- return hash(self.hashcmp)
-
- def __lt__(self, other):
- return self.hashcmp < other.hashcmp
-
- def __le__(self, other):
- return self.hashcmp <= other.hashcmp
-
- def __gt__(self, other):
- return self.hashcmp > other.hashcmp
-
- def __ge__(self, other):
- return self.hashcmp >= other.hashcmp
-
- def __eq__(self, other):
- if not isinstance(other, self.__class__):
- # It's not a Distribution, so they are not equal
- return False
- return self.hashcmp == other.hashcmp
-
- def __ne__(self, other):
- return not self == other
-
- # These properties have to be lazy so that we don't have to load any
- # metadata until/unless it's actually needed. (i.e., some distributions
- # may not know their name or version without loading PKG-INFO)
-
- @property
- def key(self):
- try:
- return self._key
- except AttributeError:
- self._key = key = self.project_name.lower()
- return key
-
- @property
- def parsed_version(self):
- if not hasattr(self, "_parsed_version"):
- self._parsed_version = parse_version(self.version)
-
- return self._parsed_version
-
- def _warn_legacy_version(self):
- LV = packaging.version.LegacyVersion
- is_legacy = isinstance(self._parsed_version, LV)
- if not is_legacy:
- return
-
- # While an empty version is technically a legacy version and
- # is not a valid PEP 440 version, it's also unlikely to
- # actually come from someone and instead it is more likely that
- # it comes from setuptools attempting to parse a filename and
- # including it in the list. So for that we'll gate this warning
- # on if the version is anything at all or not.
- if not self.version:
- return
-
- tmpl = textwrap.dedent("""
- '{project_name} ({version})' is being parsed as a legacy,
- non PEP 440,
- version. You may find odd behavior and sort order.
- In particular it will be sorted as less than 0.0. It
- is recommended to migrate to PEP 440 compatible
- versions.
- """).strip().replace('\n', ' ')
-
- warnings.warn(tmpl.format(**vars(self)), PEP440Warning)
-
- @property
- def version(self):
- try:
- return self._version
- except AttributeError as e:
- version = self._get_version()
- if version is None:
- path = self._get_metadata_path_for_display(self.PKG_INFO)
- msg = (
- "Missing 'Version:' header and/or {} file at path: {}"
- ).format(self.PKG_INFO, path)
- raise ValueError(msg, self) from e
-
- return version
-
- @property
- def _dep_map(self):
- """
- A map of extra to its list of (direct) requirements
- for this distribution, including the null extra.
- """
- try:
- return self.__dep_map
- except AttributeError:
- self.__dep_map = self._filter_extras(self._build_dep_map())
- return self.__dep_map
-
- @staticmethod
- def _filter_extras(dm):
- """
- Given a mapping of extras to dependencies, strip off
- environment markers and filter out any dependencies
- not matching the markers.
- """
- for extra in list(filter(None, dm)):
- new_extra = extra
- reqs = dm.pop(extra)
- new_extra, _, marker = extra.partition(':')
- fails_marker = marker and (
- invalid_marker(marker)
- or not evaluate_marker(marker)
- )
- if fails_marker:
- reqs = []
- new_extra = safe_extra(new_extra) or None
-
- dm.setdefault(new_extra, []).extend(reqs)
- return dm
-
- def _build_dep_map(self):
- dm = {}
- for name in 'requires.txt', 'depends.txt':
- for extra, reqs in split_sections(self._get_metadata(name)):
- dm.setdefault(extra, []).extend(parse_requirements(reqs))
- return dm
-
- def requires(self, extras=()):
- """List of Requirements needed for this distro if `extras` are used"""
- dm = self._dep_map
- deps = []
- deps.extend(dm.get(None, ()))
- for ext in extras:
- try:
- deps.extend(dm[safe_extra(ext)])
- except KeyError as e:
- raise UnknownExtra(
- "%s has no such extra feature %r" % (self, ext)
- ) from e
- return deps
-
- def _get_metadata_path_for_display(self, name):
- """
- Return the path to the given metadata file, if available.
- """
- try:
- # We need to access _get_metadata_path() on the provider object
- # directly rather than through this class's __getattr__()
- # since _get_metadata_path() is marked private.
- path = self._provider._get_metadata_path(name)
-
- # Handle exceptions e.g. in case the distribution's metadata
- # provider doesn't support _get_metadata_path().
- except Exception:
- return '[could not detect]'
-
- return path
-
- def _get_metadata(self, name):
- if self.has_metadata(name):
- for line in self.get_metadata_lines(name):
- yield line
-
- def _get_version(self):
- lines = self._get_metadata(self.PKG_INFO)
- version = _version_from_file(lines)
-
- return version
-
- def activate(self, path=None, replace=False):
- """Ensure distribution is importable on `path` (default=sys.path)"""
- if path is None:
- path = sys.path
- self.insert_on(path, replace=replace)
- if path is sys.path:
- fixup_namespace_packages(self.location)
- for pkg in self._get_metadata('namespace_packages.txt'):
- if pkg in sys.modules:
- declare_namespace(pkg)
-
- def egg_name(self):
- """Return what this distribution's standard .egg filename should be"""
- filename = "%s-%s-py%s" % (
- to_filename(self.project_name), to_filename(self.version),
- self.py_version or PY_MAJOR
- )
-
- if self.platform:
- filename += '-' + self.platform
- return filename
-
- def __repr__(self):
- if self.location:
- return "%s (%s)" % (self, self.location)
- else:
- return str(self)
-
- def __str__(self):
- try:
- version = getattr(self, 'version', None)
- except ValueError:
- version = None
- version = version or "[unknown version]"
- return "%s %s" % (self.project_name, version)
-
- def __getattr__(self, attr):
- """Delegate all unrecognized public attributes to .metadata provider"""
- if attr.startswith('_'):
- raise AttributeError(attr)
- return getattr(self._provider, attr)
-
- def __dir__(self):
- return list(
- set(super(Distribution, self).__dir__())
- | set(
- attr for attr in self._provider.__dir__()
- if not attr.startswith('_')
- )
- )
-
- @classmethod
- def from_filename(cls, filename, metadata=None, **kw):
- return cls.from_location(
- _normalize_cached(filename), os.path.basename(filename), metadata,
- **kw
- )
-
- def as_requirement(self):
- """Return a ``Requirement`` that matches this distribution exactly"""
- if isinstance(self.parsed_version, packaging.version.Version):
- spec = "%s==%s" % (self.project_name, self.parsed_version)
- else:
- spec = "%s===%s" % (self.project_name, self.parsed_version)
-
- return Requirement.parse(spec)
-
- def load_entry_point(self, group, name):
- """Return the `name` entry point of `group` or raise ImportError"""
- ep = self.get_entry_info(group, name)
- if ep is None:
- raise ImportError("Entry point %r not found" % ((group, name),))
- return ep.load()
-
- def get_entry_map(self, group=None):
- """Return the entry point map for `group`, or the full entry map"""
- try:
- ep_map = self._ep_map
- except AttributeError:
- ep_map = self._ep_map = EntryPoint.parse_map(
- self._get_metadata('entry_points.txt'), self
- )
- if group is not None:
- return ep_map.get(group, {})
- return ep_map
-
- def get_entry_info(self, group, name):
- """Return the EntryPoint object for `group`+`name`, or ``None``"""
- return self.get_entry_map(group).get(name)
-
- # FIXME: 'Distribution.insert_on' is too complex (13)
- def insert_on(self, path, loc=None, replace=False): # noqa: C901
- """Ensure self.location is on path
-
- If replace=False (default):
- - If location is already in path anywhere, do nothing.
- - Else:
- - If it's an egg and its parent directory is on path,
- insert just ahead of the parent.
- - Else: add to the end of path.
- If replace=True:
- - If location is already on path anywhere (not eggs)
- or higher priority than its parent (eggs)
- do nothing.
- - Else:
- - If it's an egg and its parent directory is on path,
- insert just ahead of the parent,
- removing any lower-priority entries.
- - Else: add it to the front of path.
- """
-
- loc = loc or self.location
- if not loc:
- return
-
- nloc = _normalize_cached(loc)
- bdir = os.path.dirname(nloc)
- npath = [(p and _normalize_cached(p) or p) for p in path]
-
- for p, item in enumerate(npath):
- if item == nloc:
- if replace:
- break
- else:
- # don't modify path (even removing duplicates) if
- # found and not replace
- return
- elif item == bdir and self.precedence == EGG_DIST:
- # if it's an .egg, give it precedence over its directory
- # UNLESS it's already been added to sys.path and replace=False
- if (not replace) and nloc in npath[p:]:
- return
- if path is sys.path:
- self.check_version_conflict()
- path.insert(p, loc)
- npath.insert(p, nloc)
- break
- else:
- if path is sys.path:
- self.check_version_conflict()
- if replace:
- path.insert(0, loc)
- else:
- path.append(loc)
- return
-
- # p is the spot where we found or inserted loc; now remove duplicates
- while True:
- try:
- np = npath.index(nloc, p + 1)
- except ValueError:
- break
- else:
- del npath[np], path[np]
- # ha!
- p = np
-
- return
-
- def check_version_conflict(self):
- if self.key == 'setuptools':
- # ignore the inevitable setuptools self-conflicts :(
- return
-
- nsp = dict.fromkeys(self._get_metadata('namespace_packages.txt'))
- loc = normalize_path(self.location)
- for modname in self._get_metadata('top_level.txt'):
- if (modname not in sys.modules or modname in nsp
- or modname in _namespace_packages):
- continue
- if modname in ('pkg_resources', 'setuptools', 'site'):
- continue
- fn = getattr(sys.modules[modname], '__file__', None)
- if fn and (normalize_path(fn).startswith(loc) or
- fn.startswith(self.location)):
- continue
- issue_warning(
- "Module %s was already imported from %s, but %s is being added"
- " to sys.path" % (modname, fn, self.location),
- )
-
- def has_version(self):
- try:
- self.version
- except ValueError:
- issue_warning("Unbuilt egg for " + repr(self))
- return False
- return True
-
- def clone(self, **kw):
- """Copy this distribution, substituting in any changed keyword args"""
- names = 'project_name version py_version platform location precedence'
- for attr in names.split():
- kw.setdefault(attr, getattr(self, attr, None))
- kw.setdefault('metadata', self._provider)
- return self.__class__(**kw)
-
- @property
- def extras(self):
- return [dep for dep in self._dep_map if dep]
-
-
-class EggInfoDistribution(Distribution):
- def _reload_version(self):
- """
- Packages installed by distutils (e.g. numpy or scipy),
- which uses an old safe_version, and so
- their version numbers can get mangled when
- converted to filenames (e.g., 1.11.0.dev0+2329eae to
- 1.11.0.dev0_2329eae). These distributions will not be
- parsed properly
- downstream by Distribution and safe_version, so
- take an extra step and try to get the version number from
- the metadata file itself instead of the filename.
- """
- md_version = self._get_version()
- if md_version:
- self._version = md_version
- return self
-
-
-class DistInfoDistribution(Distribution):
- """
- Wrap an actual or potential sys.path entry
- w/metadata, .dist-info style.
- """
- PKG_INFO = 'METADATA'
- EQEQ = re.compile(r"([\(,])\s*(\d.*?)\s*([,\)])")
-
- @property
- def _parsed_pkg_info(self):
- """Parse and cache metadata"""
- try:
- return self._pkg_info
- except AttributeError:
- metadata = self.get_metadata(self.PKG_INFO)
- self._pkg_info = email.parser.Parser().parsestr(metadata)
- return self._pkg_info
-
- @property
- def _dep_map(self):
- try:
- return self.__dep_map
- except AttributeError:
- self.__dep_map = self._compute_dependencies()
- return self.__dep_map
-
- def _compute_dependencies(self):
- """Recompute this distribution's dependencies."""
- dm = self.__dep_map = {None: []}
-
- reqs = []
- # Including any condition expressions
- for req in self._parsed_pkg_info.get_all('Requires-Dist') or []:
- reqs.extend(parse_requirements(req))
-
- def reqs_for_extra(extra):
- for req in reqs:
- if not req.marker or req.marker.evaluate({'extra': extra}):
- yield req
-
- common = frozenset(reqs_for_extra(None))
- dm[None].extend(common)
-
- for extra in self._parsed_pkg_info.get_all('Provides-Extra') or []:
- s_extra = safe_extra(extra.strip())
- dm[s_extra] = list(frozenset(reqs_for_extra(extra)) - common)
-
- return dm
-
-
-_distributionImpl = {
- '.egg': Distribution,
- '.egg-info': EggInfoDistribution,
- '.dist-info': DistInfoDistribution,
-}
-
-
-def issue_warning(*args, **kw):
- level = 1
- g = globals()
- try:
- # find the first stack frame that is *not* code in
- # the pkg_resources module, to use for the warning
- while sys._getframe(level).f_globals is g:
- level += 1
- except ValueError:
- pass
- warnings.warn(stacklevel=level + 1, *args, **kw)
-
-
-def parse_requirements(strs):
- """Yield ``Requirement`` objects for each specification in `strs`
-
- `strs` must be a string, or a (possibly-nested) iterable thereof.
- """
- # create a steppable iterator, so we can handle \-continuations
- lines = iter(yield_lines(strs))
-
- for line in lines:
- # Drop comments -- a hash without a space may be in a URL.
- if ' #' in line:
- line = line[:line.find(' #')]
- # If there is a line continuation, drop it, and append the next line.
- if line.endswith('\\'):
- line = line[:-2].strip()
- try:
- line += next(lines)
- except StopIteration:
- return
- yield Requirement(line)
-
-
-class RequirementParseError(packaging.requirements.InvalidRequirement):
- "Compatibility wrapper for InvalidRequirement"
-
-
-class Requirement(packaging.requirements.Requirement):
- def __init__(self, requirement_string):
- """DO NOT CALL THIS UNDOCUMENTED METHOD; use Requirement.parse()!"""
- super(Requirement, self).__init__(requirement_string)
- self.unsafe_name = self.name
- project_name = safe_name(self.name)
- self.project_name, self.key = project_name, project_name.lower()
- self.specs = [
- (spec.operator, spec.version) for spec in self.specifier]
- self.extras = tuple(map(safe_extra, self.extras))
- self.hashCmp = (
- self.key,
- self.url,
- self.specifier,
- frozenset(self.extras),
- str(self.marker) if self.marker else None,
- )
- self.__hash = hash(self.hashCmp)
-
- def __eq__(self, other):
- return (
- isinstance(other, Requirement) and
- self.hashCmp == other.hashCmp
- )
-
- def __ne__(self, other):
- return not self == other
-
- def __contains__(self, item):
- if isinstance(item, Distribution):
- if item.key != self.key:
- return False
-
- item = item.version
-
- # Allow prereleases always in order to match the previous behavior of
- # this method. In the future this should be smarter and follow PEP 440
- # more accurately.
- return self.specifier.contains(item, prereleases=True)
-
- def __hash__(self):
- return self.__hash
-
- def __repr__(self):
- return "Requirement.parse(%r)" % str(self)
-
- @staticmethod
- def parse(s):
- req, = parse_requirements(s)
- return req
-
-
-def _always_object(classes):
- """
- Ensure object appears in the mro even
- for old-style classes.
- """
- if object not in classes:
- return classes + (object,)
- return classes
-
-
-def _find_adapter(registry, ob):
- """Return an adapter factory for `ob` from `registry`"""
- types = _always_object(inspect.getmro(getattr(ob, '__class__', type(ob))))
- for t in types:
- if t in registry:
- return registry[t]
-
-
-def ensure_directory(path):
- """Ensure that the parent directory of `path` exists"""
- dirname = os.path.dirname(path)
- os.makedirs(dirname, exist_ok=True)
-
-
-def _bypass_ensure_directory(path):
- """Sandbox-bypassing version of ensure_directory()"""
- if not WRITE_SUPPORT:
- raise IOError('"os.mkdir" not supported on this platform.')
- dirname, filename = split(path)
- if dirname and filename and not isdir(dirname):
- _bypass_ensure_directory(dirname)
- try:
- mkdir(dirname, 0o755)
- except FileExistsError:
- pass
-
-
-def split_sections(s):
- """Split a string or iterable thereof into (section, content) pairs
-
- Each ``section`` is a stripped version of the section header ("[section]")
- and each ``content`` is a list of stripped lines excluding blank lines and
- comment-only lines. If there are any such lines before the first section
- header, they're returned in a first ``section`` of ``None``.
- """
- section = None
- content = []
- for line in yield_lines(s):
- if line.startswith("["):
- if line.endswith("]"):
- if section or content:
- yield section, content
- section = line[1:-1].strip()
- content = []
- else:
- raise ValueError("Invalid section heading", line)
- else:
- content.append(line)
-
- # wrap up last segment
- yield section, content
-
-
-def _mkstemp(*args, **kw):
- old_open = os.open
- try:
- # temporarily bypass sandboxing
- os.open = os_open
- return tempfile.mkstemp(*args, **kw)
- finally:
- # and then put it back
- os.open = old_open
-
-
-# Yandex resource support
-from __res import Y_PYTHON_SOURCE_ROOT, ResourceImporter, executable
-from library.python import resource
-
-
-class ResProvider(EmptyProvider):
- _resource_fs = {}
-
- def __init__(self, prefix):
- if hasattr(prefix, '__file__'):
- key = prefix.__file__.rsplit('/', 1)[0]
- self.module_path = 'resfs/file/{}/'.format(key)
- # Метаданные лежат на уровень выше самого пакета
- key = key.rsplit('/', 1)[0]
- self.egg_info = 'resfs/file/{}/.dist-info/'.format(key)
- else:
- # Сюда попадаем только из ResDistribution, который работает
- # только метаданными, поэтому self.module_path не используется
- self.egg_info = prefix
-
- @staticmethod
- def from_module(module):
- if Y_PYTHON_SOURCE_ROOT:
- return DefaultProvider(module)
- else:
- return ResProvider(module)
-
- def _fn(self, base, resource_name):
- return base + resource_name
-
- def _has(self, path):
- return resource.find(path) is not None
-
- def _get(self, path):
- result = resource.find(path)
- if result is None:
- raise IOError(path)
- return result
-
- @classmethod
- def _init_resource_fs(cls):
- for path in resource.iterkeys(b'resfs/file/'):
- path_str = path.decode('utf-8')
- components = path_str.split('/')
- for l in range(len(components)):
- subpath = os.path.normpath('/'.join(components[:l]))
- cls._resource_fs.setdefault(subpath, set()).add(components[l])
-
- def __lookup(self, path):
- if not self._resource_fs:
- self._init_resource_fs()
- path = os.path.normpath(path)
- return self._resource_fs.get(path)
-
- def _listdir(self, path):
- result = self.__lookup(path)
- if result is None:
- return []
- return list(result)
-
- def _isdir(self, path):
- return bool(self.__lookup(path))
-
-
-class ResDistribution(DistInfoDistribution):
- def __init__(self, prefix):
- super(ResDistribution, self).__init__(
- location=executable,
- metadata=ResProvider(prefix),
- precedence=BINARY_DIST,
- )
- self.project_name = self._parsed_pkg_info.get('Name', self.project_name)
-
-
-def find_in_res(importer, path_item, only=False):
- for key in resource.iterkeys():
- if key.endswith('.dist-info/METADATA') and not key.startswith('resfs/src/'):
- yield ResDistribution(key[:-8])
-
-
-register_finder(ResourceImporter, find_in_res)
-register_loader_type(ResourceImporter, ResProvider.from_module)
-
-
-# Silence the PEP440Warning by default, so that end users don't get hit by it
-# randomly just because they use pkg_resources. We want to append the rule
-# because we want earlier uses of filterwarnings to take precedence over this
-# one.
-warnings.filterwarnings("ignore", category=PEP440Warning, append=True)
-
-
-# from jaraco.functools 1.3
-def _call_aside(f, *args, **kwargs):
- f(*args, **kwargs)
- return f
-
-
-@_call_aside
-def _initialize(g=globals()):
- "Set up global resource manager (deliberately not state-saved)"
- manager = ResourceManager()
- g['_manager'] = manager
- g.update(
- (name, getattr(manager, name))
- for name in dir(manager)
- if not name.startswith('_')
- )
-
-
-class PkgResourcesDeprecationWarning(Warning):
- """
- Base class for warning about deprecations in ``pkg_resources``
-
- This class is not derived from ``DeprecationWarning``, and as such is
- visible by default.
- """
-
-
-@_call_aside
-def _initialize_master_working_set():
- """
- Prepare the master working set and make the ``require()``
- API available.
-
- This function has explicit effects on the global state
- of pkg_resources. It is intended to be invoked once at
- the initialization of this module.
-
- Invocation by other packages is unsupported and done
- at their own risk.
- """
- working_set = WorkingSet._build_master()
- _declare_state('object', working_set=working_set)
-
- require = working_set.require
- iter_entry_points = working_set.iter_entry_points
- add_activation_listener = working_set.subscribe
- run_script = working_set.run_script
- # backward compatibility
- run_main = run_script
- # Activate all distributions already on sys.path with replace=False and
- # ensure that all distributions added to the working set in the future
- # (e.g. by calling ``require()``) will get activated as well,
- # with higher priority (replace=True).
- tuple(
- dist.activate(replace=False)
- for dist in working_set
- )
- add_activation_listener(
- lambda dist: dist.activate(replace=True),
- existing=False,
- )
- working_set.entries = []
- # match order
- list(map(working_set.add_entry, sys.path))
- globals().update(locals())
diff --git a/contrib/python/setuptools/py3/pkg_resources/_vendor/__init__.py b/contrib/python/setuptools/py3/pkg_resources/_vendor/__init__.py
deleted file mode 100644
index e69de29bb2d..00000000000
--- a/contrib/python/setuptools/py3/pkg_resources/_vendor/__init__.py
+++ /dev/null
diff --git a/contrib/python/setuptools/py3/pkg_resources/_vendor/appdirs.py b/contrib/python/setuptools/py3/pkg_resources/_vendor/appdirs.py
deleted file mode 100644
index ae67001af8b..00000000000
--- a/contrib/python/setuptools/py3/pkg_resources/_vendor/appdirs.py
+++ /dev/null
@@ -1,608 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-# Copyright (c) 2005-2010 ActiveState Software Inc.
-# Copyright (c) 2013 Eddy Petrișor
-
-"""Utilities for determining application-specific dirs.
-
-See <http://github.com/ActiveState/appdirs> for details and usage.
-"""
-# Dev Notes:
-# - MSDN on where to store app data files:
-# http://support.microsoft.com/default.aspx?scid=kb;en-us;310294#XSLTH3194121123120121120120
-# - Mac OS X: http://developer.apple.com/documentation/MacOSX/Conceptual/BPFileSystem/index.html
-# - XDG spec for Un*x: http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html
-
-__version_info__ = (1, 4, 3)
-__version__ = '.'.join(map(str, __version_info__))
-
-
-import sys
-import os
-
-PY3 = sys.version_info[0] == 3
-
-if PY3:
- unicode = str
-
-if sys.platform.startswith('java'):
- import platform
- os_name = platform.java_ver()[3][0]
- if os_name.startswith('Windows'): # "Windows XP", "Windows 7", etc.
- system = 'win32'
- elif os_name.startswith('Mac'): # "Mac OS X", etc.
- system = 'darwin'
- else: # "Linux", "SunOS", "FreeBSD", etc.
- # Setting this to "linux2" is not ideal, but only Windows or Mac
- # are actually checked for and the rest of the module expects
- # *sys.platform* style strings.
- system = 'linux2'
-else:
- system = sys.platform
-
-
-
-def user_data_dir(appname=None, appauthor=None, version=None, roaming=False):
- r"""Return full path to the user-specific data dir for this application.
-
- "appname" is the name of application.
- If None, just the system directory is returned.
- "appauthor" (only used on Windows) is the name of the
- appauthor or distributing body for this application. Typically
- it is the owning company name. This falls back to appname. You may
- pass False to disable it.
- "version" is an optional version path element to append to the
- path. You might want to use this if you want multiple versions
- of your app to be able to run independently. If used, this
- would typically be "<major>.<minor>".
- Only applied when appname is present.
- "roaming" (boolean, default False) can be set True to use the Windows
- roaming appdata directory. That means that for users on a Windows
- network setup for roaming profiles, this user data will be
- sync'd on login. See
- <http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
- for a discussion of issues.
-
- Typical user data directories are:
- Mac OS X: ~/Library/Application Support/<AppName>
- Unix: ~/.local/share/<AppName> # or in $XDG_DATA_HOME, if defined
- Win XP (not roaming): C:\Documents and Settings\<username>\Application Data\<AppAuthor>\<AppName>
- Win XP (roaming): C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>
- Win 7 (not roaming): C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>
- Win 7 (roaming): C:\Users\<username>\AppData\Roaming\<AppAuthor>\<AppName>
-
- For Unix, we follow the XDG spec and support $XDG_DATA_HOME.
- That means, by default "~/.local/share/<AppName>".
- """
- if system == "win32":
- if appauthor is None:
- appauthor = appname
- const = roaming and "CSIDL_APPDATA" or "CSIDL_LOCAL_APPDATA"
- path = os.path.normpath(_get_win_folder(const))
- if appname:
- if appauthor is not False:
- path = os.path.join(path, appauthor, appname)
- else:
- path = os.path.join(path, appname)
- elif system == 'darwin':
- path = os.path.expanduser('~/Library/Application Support/')
- if appname:
- path = os.path.join(path, appname)
- else:
- path = os.getenv('XDG_DATA_HOME', os.path.expanduser("~/.local/share"))
- if appname:
- path = os.path.join(path, appname)
- if appname and version:
- path = os.path.join(path, version)
- return path
-
-
-def site_data_dir(appname=None, appauthor=None, version=None, multipath=False):
- r"""Return full path to the user-shared data dir for this application.
-
- "appname" is the name of application.
- If None, just the system directory is returned.
- "appauthor" (only used on Windows) is the name of the
- appauthor or distributing body for this application. Typically
- it is the owning company name. This falls back to appname. You may
- pass False to disable it.
- "version" is an optional version path element to append to the
- path. You might want to use this if you want multiple versions
- of your app to be able to run independently. If used, this
- would typically be "<major>.<minor>".
- Only applied when appname is present.
- "multipath" is an optional parameter only applicable to *nix
- which indicates that the entire list of data dirs should be
- returned. By default, the first item from XDG_DATA_DIRS is
- returned, or '/usr/local/share/<AppName>',
- if XDG_DATA_DIRS is not set
-
- Typical site data directories are:
- Mac OS X: /Library/Application Support/<AppName>
- Unix: /usr/local/share/<AppName> or /usr/share/<AppName>
- Win XP: C:\Documents and Settings\All Users\Application Data\<AppAuthor>\<AppName>
- Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
- Win 7: C:\ProgramData\<AppAuthor>\<AppName> # Hidden, but writeable on Win 7.
-
- For Unix, this is using the $XDG_DATA_DIRS[0] default.
-
- WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
- """
- if system == "win32":
- if appauthor is None:
- appauthor = appname
- path = os.path.normpath(_get_win_folder("CSIDL_COMMON_APPDATA"))
- if appname:
- if appauthor is not False:
- path = os.path.join(path, appauthor, appname)
- else:
- path = os.path.join(path, appname)
- elif system == 'darwin':
- path = os.path.expanduser('/Library/Application Support')
- if appname:
- path = os.path.join(path, appname)
- else:
- # XDG default for $XDG_DATA_DIRS
- # only first, if multipath is False
- path = os.getenv('XDG_DATA_DIRS',
- os.pathsep.join(['/usr/local/share', '/usr/share']))
- pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)]
- if appname:
- if version:
- appname = os.path.join(appname, version)
- pathlist = [os.sep.join([x, appname]) for x in pathlist]
-
- if multipath:
- path = os.pathsep.join(pathlist)
- else:
- path = pathlist[0]
- return path
-
- if appname and version:
- path = os.path.join(path, version)
- return path
-
-
-def user_config_dir(appname=None, appauthor=None, version=None, roaming=False):
- r"""Return full path to the user-specific config dir for this application.
-
- "appname" is the name of application.
- If None, just the system directory is returned.
- "appauthor" (only used on Windows) is the name of the
- appauthor or distributing body for this application. Typically
- it is the owning company name. This falls back to appname. You may
- pass False to disable it.
- "version" is an optional version path element to append to the
- path. You might want to use this if you want multiple versions
- of your app to be able to run independently. If used, this
- would typically be "<major>.<minor>".
- Only applied when appname is present.
- "roaming" (boolean, default False) can be set True to use the Windows
- roaming appdata directory. That means that for users on a Windows
- network setup for roaming profiles, this user data will be
- sync'd on login. See
- <http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
- for a discussion of issues.
-
- Typical user config directories are:
- Mac OS X: same as user_data_dir
- Unix: ~/.config/<AppName> # or in $XDG_CONFIG_HOME, if defined
- Win *: same as user_data_dir
-
- For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME.
- That means, by default "~/.config/<AppName>".
- """
- if system in ["win32", "darwin"]:
- path = user_data_dir(appname, appauthor, None, roaming)
- else:
- path = os.getenv('XDG_CONFIG_HOME', os.path.expanduser("~/.config"))
- if appname:
- path = os.path.join(path, appname)
- if appname and version:
- path = os.path.join(path, version)
- return path
-
-
-def site_config_dir(appname=None, appauthor=None, version=None, multipath=False):
- r"""Return full path to the user-shared data dir for this application.
-
- "appname" is the name of application.
- If None, just the system directory is returned.
- "appauthor" (only used on Windows) is the name of the
- appauthor or distributing body for this application. Typically
- it is the owning company name. This falls back to appname. You may
- pass False to disable it.
- "version" is an optional version path element to append to the
- path. You might want to use this if you want multiple versions
- of your app to be able to run independently. If used, this
- would typically be "<major>.<minor>".
- Only applied when appname is present.
- "multipath" is an optional parameter only applicable to *nix
- which indicates that the entire list of config dirs should be
- returned. By default, the first item from XDG_CONFIG_DIRS is
- returned, or '/etc/xdg/<AppName>', if XDG_CONFIG_DIRS is not set
-
- Typical site config directories are:
- Mac OS X: same as site_data_dir
- Unix: /etc/xdg/<AppName> or $XDG_CONFIG_DIRS[i]/<AppName> for each value in
- $XDG_CONFIG_DIRS
- Win *: same as site_data_dir
- Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
-
- For Unix, this is using the $XDG_CONFIG_DIRS[0] default, if multipath=False
-
- WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
- """
- if system in ["win32", "darwin"]:
- path = site_data_dir(appname, appauthor)
- if appname and version:
- path = os.path.join(path, version)
- else:
- # XDG default for $XDG_CONFIG_DIRS
- # only first, if multipath is False
- path = os.getenv('XDG_CONFIG_DIRS', '/etc/xdg')
- pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)]
- if appname:
- if version:
- appname = os.path.join(appname, version)
- pathlist = [os.sep.join([x, appname]) for x in pathlist]
-
- if multipath:
- path = os.pathsep.join(pathlist)
- else:
- path = pathlist[0]
- return path
-
-
-def user_cache_dir(appname=None, appauthor=None, version=None, opinion=True):
- r"""Return full path to the user-specific cache dir for this application.
-
- "appname" is the name of application.
- If None, just the system directory is returned.
- "appauthor" (only used on Windows) is the name of the
- appauthor or distributing body for this application. Typically
- it is the owning company name. This falls back to appname. You may
- pass False to disable it.
- "version" is an optional version path element to append to the
- path. You might want to use this if you want multiple versions
- of your app to be able to run independently. If used, this
- would typically be "<major>.<minor>".
- Only applied when appname is present.
- "opinion" (boolean) can be False to disable the appending of
- "Cache" to the base app data dir for Windows. See
- discussion below.
-
- Typical user cache directories are:
- Mac OS X: ~/Library/Caches/<AppName>
- Unix: ~/.cache/<AppName> (XDG default)
- Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Cache
- Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Cache
-
- On Windows the only suggestion in the MSDN docs is that local settings go in
- the `CSIDL_LOCAL_APPDATA` directory. This is identical to the non-roaming
- app data dir (the default returned by `user_data_dir` above). Apps typically
- put cache data somewhere *under* the given dir here. Some examples:
- ...\Mozilla\Firefox\Profiles\<ProfileName>\Cache
- ...\Acme\SuperApp\Cache\1.0
- OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value.
- This can be disabled with the `opinion=False` option.
- """
- if system == "win32":
- if appauthor is None:
- appauthor = appname
- path = os.path.normpath(_get_win_folder("CSIDL_LOCAL_APPDATA"))
- if appname:
- if appauthor is not False:
- path = os.path.join(path, appauthor, appname)
- else:
- path = os.path.join(path, appname)
- if opinion:
- path = os.path.join(path, "Cache")
- elif system == 'darwin':
- path = os.path.expanduser('~/Library/Caches')
- if appname:
- path = os.path.join(path, appname)
- else:
- path = os.getenv('XDG_CACHE_HOME', os.path.expanduser('~/.cache'))
- if appname:
- path = os.path.join(path, appname)
- if appname and version:
- path = os.path.join(path, version)
- return path
-
-
-def user_state_dir(appname=None, appauthor=None, version=None, roaming=False):
- r"""Return full path to the user-specific state dir for this application.
-
- "appname" is the name of application.
- If None, just the system directory is returned.
- "appauthor" (only used on Windows) is the name of the
- appauthor or distributing body for this application. Typically
- it is the owning company name. This falls back to appname. You may
- pass False to disable it.
- "version" is an optional version path element to append to the
- path. You might want to use this if you want multiple versions
- of your app to be able to run independently. If used, this
- would typically be "<major>.<minor>".
- Only applied when appname is present.
- "roaming" (boolean, default False) can be set True to use the Windows
- roaming appdata directory. That means that for users on a Windows
- network setup for roaming profiles, this user data will be
- sync'd on login. See
- <http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
- for a discussion of issues.
-
- Typical user state directories are:
- Mac OS X: same as user_data_dir
- Unix: ~/.local/state/<AppName> # or in $XDG_STATE_HOME, if defined
- Win *: same as user_data_dir
-
- For Unix, we follow this Debian proposal <https://wiki.debian.org/XDGBaseDirectorySpecification#state>
- to extend the XDG spec and support $XDG_STATE_HOME.
-
- That means, by default "~/.local/state/<AppName>".
- """
- if system in ["win32", "darwin"]:
- path = user_data_dir(appname, appauthor, None, roaming)
- else:
- path = os.getenv('XDG_STATE_HOME', os.path.expanduser("~/.local/state"))
- if appname:
- path = os.path.join(path, appname)
- if appname and version:
- path = os.path.join(path, version)
- return path
-
-
-def user_log_dir(appname=None, appauthor=None, version=None, opinion=True):
- r"""Return full path to the user-specific log dir for this application.
-
- "appname" is the name of application.
- If None, just the system directory is returned.
- "appauthor" (only used on Windows) is the name of the
- appauthor or distributing body for this application. Typically
- it is the owning company name. This falls back to appname. You may
- pass False to disable it.
- "version" is an optional version path element to append to the
- path. You might want to use this if you want multiple versions
- of your app to be able to run independently. If used, this
- would typically be "<major>.<minor>".
- Only applied when appname is present.
- "opinion" (boolean) can be False to disable the appending of
- "Logs" to the base app data dir for Windows, and "log" to the
- base cache dir for Unix. See discussion below.
-
- Typical user log directories are:
- Mac OS X: ~/Library/Logs/<AppName>
- Unix: ~/.cache/<AppName>/log # or under $XDG_CACHE_HOME if defined
- Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Logs
- Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Logs
-
- On Windows the only suggestion in the MSDN docs is that local settings
- go in the `CSIDL_LOCAL_APPDATA` directory. (Note: I'm interested in
- examples of what some windows apps use for a logs dir.)
-
- OPINION: This function appends "Logs" to the `CSIDL_LOCAL_APPDATA`
- value for Windows and appends "log" to the user cache dir for Unix.
- This can be disabled with the `opinion=False` option.
- """
- if system == "darwin":
- path = os.path.join(
- os.path.expanduser('~/Library/Logs'),
- appname)
- elif system == "win32":
- path = user_data_dir(appname, appauthor, version)
- version = False
- if opinion:
- path = os.path.join(path, "Logs")
- else:
- path = user_cache_dir(appname, appauthor, version)
- version = False
- if opinion:
- path = os.path.join(path, "log")
- if appname and version:
- path = os.path.join(path, version)
- return path
-
-
-class AppDirs(object):
- """Convenience wrapper for getting application dirs."""
- def __init__(self, appname=None, appauthor=None, version=None,
- roaming=False, multipath=False):
- self.appname = appname
- self.appauthor = appauthor
- self.version = version
- self.roaming = roaming
- self.multipath = multipath
-
- @property
- def user_data_dir(self):
- return user_data_dir(self.appname, self.appauthor,
- version=self.version, roaming=self.roaming)
-
- @property
- def site_data_dir(self):
- return site_data_dir(self.appname, self.appauthor,
- version=self.version, multipath=self.multipath)
-
- @property
- def user_config_dir(self):
- return user_config_dir(self.appname, self.appauthor,
- version=self.version, roaming=self.roaming)
-
- @property
- def site_config_dir(self):
- return site_config_dir(self.appname, self.appauthor,
- version=self.version, multipath=self.multipath)
-
- @property
- def user_cache_dir(self):
- return user_cache_dir(self.appname, self.appauthor,
- version=self.version)
-
- @property
- def user_state_dir(self):
- return user_state_dir(self.appname, self.appauthor,
- version=self.version)
-
- @property
- def user_log_dir(self):
- return user_log_dir(self.appname, self.appauthor,
- version=self.version)
-
-
-#---- internal support stuff
-
-def _get_win_folder_from_registry(csidl_name):
- """This is a fallback technique at best. I'm not sure if using the
- registry for this guarantees us the correct answer for all CSIDL_*
- names.
- """
- if PY3:
- import winreg as _winreg
- else:
- import _winreg
-
- shell_folder_name = {
- "CSIDL_APPDATA": "AppData",
- "CSIDL_COMMON_APPDATA": "Common AppData",
- "CSIDL_LOCAL_APPDATA": "Local AppData",
- }[csidl_name]
-
- key = _winreg.OpenKey(
- _winreg.HKEY_CURRENT_USER,
- r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders"
- )
- dir, type = _winreg.QueryValueEx(key, shell_folder_name)
- return dir
-
-
-def _get_win_folder_with_pywin32(csidl_name):
- from win32com.shell import shellcon, shell
- dir = shell.SHGetFolderPath(0, getattr(shellcon, csidl_name), 0, 0)
- # Try to make this a unicode path because SHGetFolderPath does
- # not return unicode strings when there is unicode data in the
- # path.
- try:
- dir = unicode(dir)
-
- # Downgrade to short path name if have highbit chars. See
- # <http://bugs.activestate.com/show_bug.cgi?id=85099>.
- has_high_char = False
- for c in dir:
- if ord(c) > 255:
- has_high_char = True
- break
- if has_high_char:
- try:
- import win32api
- dir = win32api.GetShortPathName(dir)
- except ImportError:
- pass
- except UnicodeError:
- pass
- return dir
-
-
-def _get_win_folder_with_ctypes(csidl_name):
- import ctypes
-
- csidl_const = {
- "CSIDL_APPDATA": 26,
- "CSIDL_COMMON_APPDATA": 35,
- "CSIDL_LOCAL_APPDATA": 28,
- }[csidl_name]
-
- buf = ctypes.create_unicode_buffer(1024)
- ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf)
-
- # Downgrade to short path name if have highbit chars. See
- # <http://bugs.activestate.com/show_bug.cgi?id=85099>.
- has_high_char = False
- for c in buf:
- if ord(c) > 255:
- has_high_char = True
- break
- if has_high_char:
- buf2 = ctypes.create_unicode_buffer(1024)
- if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024):
- buf = buf2
-
- return buf.value
-
-def _get_win_folder_with_jna(csidl_name):
- import array
- from com.sun import jna
- from com.sun.jna.platform import win32
-
- buf_size = win32.WinDef.MAX_PATH * 2
- buf = array.zeros('c', buf_size)
- shell = win32.Shell32.INSTANCE
- shell.SHGetFolderPath(None, getattr(win32.ShlObj, csidl_name), None, win32.ShlObj.SHGFP_TYPE_CURRENT, buf)
- dir = jna.Native.toString(buf.tostring()).rstrip("\0")
-
- # Downgrade to short path name if have highbit chars. See
- # <http://bugs.activestate.com/show_bug.cgi?id=85099>.
- has_high_char = False
- for c in dir:
- if ord(c) > 255:
- has_high_char = True
- break
- if has_high_char:
- buf = array.zeros('c', buf_size)
- kernel = win32.Kernel32.INSTANCE
- if kernel.GetShortPathName(dir, buf, buf_size):
- dir = jna.Native.toString(buf.tostring()).rstrip("\0")
-
- return dir
-
-if system == "win32":
- try:
- import win32com.shell
- _get_win_folder = _get_win_folder_with_pywin32
- except ImportError:
- try:
- from ctypes import windll
- _get_win_folder = _get_win_folder_with_ctypes
- except ImportError:
- try:
- import com.sun.jna
- _get_win_folder = _get_win_folder_with_jna
- except ImportError:
- _get_win_folder = _get_win_folder_from_registry
-
-
-#---- self test code
-
-if __name__ == "__main__":
- appname = "MyApp"
- appauthor = "MyCompany"
-
- props = ("user_data_dir",
- "user_config_dir",
- "user_cache_dir",
- "user_state_dir",
- "user_log_dir",
- "site_data_dir",
- "site_config_dir")
-
- print("-- app dirs %s --" % __version__)
-
- print("-- app dirs (with optional 'version')")
- dirs = AppDirs(appname, appauthor, version="1.0")
- for prop in props:
- print("%s: %s" % (prop, getattr(dirs, prop)))
-
- print("\n-- app dirs (without optional 'version')")
- dirs = AppDirs(appname, appauthor)
- for prop in props:
- print("%s: %s" % (prop, getattr(dirs, prop)))
-
- print("\n-- app dirs (without optional 'appauthor')")
- dirs = AppDirs(appname)
- for prop in props:
- print("%s: %s" % (prop, getattr(dirs, prop)))
-
- print("\n-- app dirs (with disabled 'appauthor')")
- dirs = AppDirs(appname, appauthor=False)
- for prop in props:
- print("%s: %s" % (prop, getattr(dirs, prop)))
diff --git a/contrib/python/setuptools/py3/pkg_resources/_vendor/packaging/__about__.py b/contrib/python/setuptools/py3/pkg_resources/_vendor/packaging/__about__.py
deleted file mode 100644
index c359122f971..00000000000
--- a/contrib/python/setuptools/py3/pkg_resources/_vendor/packaging/__about__.py
+++ /dev/null
@@ -1,26 +0,0 @@
-# This file is dual licensed under the terms of the Apache License, Version
-# 2.0, and the BSD License. See the LICENSE file in the root of this repository
-# for complete details.
-
-__all__ = [
- "__title__",
- "__summary__",
- "__uri__",
- "__version__",
- "__author__",
- "__email__",
- "__license__",
- "__copyright__",
-]
-
-__title__ = "packaging"
-__summary__ = "Core utilities for Python packages"
-__uri__ = "https://github.com/pypa/packaging"
-
-__version__ = "21.2"
-
-__author__ = "Donald Stufft and individual contributors"
-__email__ = "[email protected]"
-
-__license__ = "BSD-2-Clause or Apache-2.0"
-__copyright__ = "2014-2019 %s" % __author__
diff --git a/contrib/python/setuptools/py3/pkg_resources/_vendor/packaging/__init__.py b/contrib/python/setuptools/py3/pkg_resources/_vendor/packaging/__init__.py
deleted file mode 100644
index 3c50c5dcfee..00000000000
--- a/contrib/python/setuptools/py3/pkg_resources/_vendor/packaging/__init__.py
+++ /dev/null
@@ -1,25 +0,0 @@
-# This file is dual licensed under the terms of the Apache License, Version
-# 2.0, and the BSD License. See the LICENSE file in the root of this repository
-# for complete details.
-
-from .__about__ import (
- __author__,
- __copyright__,
- __email__,
- __license__,
- __summary__,
- __title__,
- __uri__,
- __version__,
-)
-
-__all__ = [
- "__title__",
- "__summary__",
- "__uri__",
- "__version__",
- "__author__",
- "__email__",
- "__license__",
- "__copyright__",
-]
diff --git a/contrib/python/setuptools/py3/pkg_resources/_vendor/packaging/_manylinux.py b/contrib/python/setuptools/py3/pkg_resources/_vendor/packaging/_manylinux.py
deleted file mode 100644
index 4c379aa6f69..00000000000
--- a/contrib/python/setuptools/py3/pkg_resources/_vendor/packaging/_manylinux.py
+++ /dev/null
@@ -1,301 +0,0 @@
-import collections
-import functools
-import os
-import re
-import struct
-import sys
-import warnings
-from typing import IO, Dict, Iterator, NamedTuple, Optional, Tuple
-
-
-# Python does not provide platform information at sufficient granularity to
-# identify the architecture of the running executable in some cases, so we
-# determine it dynamically by reading the information from the running
-# process. This only applies on Linux, which uses the ELF format.
-class _ELFFileHeader:
- # https://en.wikipedia.org/wiki/Executable_and_Linkable_Format#File_header
- class _InvalidELFFileHeader(ValueError):
- """
- An invalid ELF file header was found.
- """
-
- ELF_MAGIC_NUMBER = 0x7F454C46
- ELFCLASS32 = 1
- ELFCLASS64 = 2
- ELFDATA2LSB = 1
- ELFDATA2MSB = 2
- EM_386 = 3
- EM_S390 = 22
- EM_ARM = 40
- EM_X86_64 = 62
- EF_ARM_ABIMASK = 0xFF000000
- EF_ARM_ABI_VER5 = 0x05000000
- EF_ARM_ABI_FLOAT_HARD = 0x00000400
-
- def __init__(self, file: IO[bytes]) -> None:
- def unpack(fmt: str) -> int:
- try:
- data = file.read(struct.calcsize(fmt))
- result: Tuple[int, ...] = struct.unpack(fmt, data)
- except struct.error:
- raise _ELFFileHeader._InvalidELFFileHeader()
- return result[0]
-
- self.e_ident_magic = unpack(">I")
- if self.e_ident_magic != self.ELF_MAGIC_NUMBER:
- raise _ELFFileHeader._InvalidELFFileHeader()
- self.e_ident_class = unpack("B")
- if self.e_ident_class not in {self.ELFCLASS32, self.ELFCLASS64}:
- raise _ELFFileHeader._InvalidELFFileHeader()
- self.e_ident_data = unpack("B")
- if self.e_ident_data not in {self.ELFDATA2LSB, self.ELFDATA2MSB}:
- raise _ELFFileHeader._InvalidELFFileHeader()
- self.e_ident_version = unpack("B")
- self.e_ident_osabi = unpack("B")
- self.e_ident_abiversion = unpack("B")
- self.e_ident_pad = file.read(7)
- format_h = "<H" if self.e_ident_data == self.ELFDATA2LSB else ">H"
- format_i = "<I" if self.e_ident_data == self.ELFDATA2LSB else ">I"
- format_q = "<Q" if self.e_ident_data == self.ELFDATA2LSB else ">Q"
- format_p = format_i if self.e_ident_class == self.ELFCLASS32 else format_q
- self.e_type = unpack(format_h)
- self.e_machine = unpack(format_h)
- self.e_version = unpack(format_i)
- self.e_entry = unpack(format_p)
- self.e_phoff = unpack(format_p)
- self.e_shoff = unpack(format_p)
- self.e_flags = unpack(format_i)
- self.e_ehsize = unpack(format_h)
- self.e_phentsize = unpack(format_h)
- self.e_phnum = unpack(format_h)
- self.e_shentsize = unpack(format_h)
- self.e_shnum = unpack(format_h)
- self.e_shstrndx = unpack(format_h)
-
-
-def _get_elf_header() -> Optional[_ELFFileHeader]:
- try:
- with open(sys.executable, "rb") as f:
- elf_header = _ELFFileHeader(f)
- except (OSError, TypeError, _ELFFileHeader._InvalidELFFileHeader):
- return None
- return elf_header
-
-
-def _is_linux_armhf() -> bool:
- # hard-float ABI can be detected from the ELF header of the running
- # process
- # https://static.docs.arm.com/ihi0044/g/aaelf32.pdf
- elf_header = _get_elf_header()
- if elf_header is None:
- return False
- result = elf_header.e_ident_class == elf_header.ELFCLASS32
- result &= elf_header.e_ident_data == elf_header.ELFDATA2LSB
- result &= elf_header.e_machine == elf_header.EM_ARM
- result &= (
- elf_header.e_flags & elf_header.EF_ARM_ABIMASK
- ) == elf_header.EF_ARM_ABI_VER5
- result &= (
- elf_header.e_flags & elf_header.EF_ARM_ABI_FLOAT_HARD
- ) == elf_header.EF_ARM_ABI_FLOAT_HARD
- return result
-
-
-def _is_linux_i686() -> bool:
- elf_header = _get_elf_header()
- if elf_header is None:
- return False
- result = elf_header.e_ident_class == elf_header.ELFCLASS32
- result &= elf_header.e_ident_data == elf_header.ELFDATA2LSB
- result &= elf_header.e_machine == elf_header.EM_386
- return result
-
-
-def _have_compatible_abi(arch: str) -> bool:
- if arch == "armv7l":
- return _is_linux_armhf()
- if arch == "i686":
- return _is_linux_i686()
- return arch in {"x86_64", "aarch64", "ppc64", "ppc64le", "s390x"}
-
-
-# If glibc ever changes its major version, we need to know what the last
-# minor version was, so we can build the complete list of all versions.
-# For now, guess what the highest minor version might be, assume it will
-# be 50 for testing. Once this actually happens, update the dictionary
-# with the actual value.
-_LAST_GLIBC_MINOR: Dict[int, int] = collections.defaultdict(lambda: 50)
-
-
-class _GLibCVersion(NamedTuple):
- major: int
- minor: int
-
-
-def _glibc_version_string_confstr() -> Optional[str]:
- """
- Primary implementation of glibc_version_string using os.confstr.
- """
- # os.confstr is quite a bit faster than ctypes.DLL. It's also less likely
- # to be broken or missing. This strategy is used in the standard library
- # platform module.
- # https://github.com/python/cpython/blob/fcf1d003bf4f0100c/Lib/platform.py#L175-L183
- try:
- # os.confstr("CS_GNU_LIBC_VERSION") returns a string like "glibc 2.17".
- version_string = os.confstr("CS_GNU_LIBC_VERSION")
- assert version_string is not None
- _, version = version_string.split()
- except (AssertionError, AttributeError, OSError, ValueError):
- # os.confstr() or CS_GNU_LIBC_VERSION not available (or a bad value)...
- return None
- return version
-
-
-def _glibc_version_string_ctypes() -> Optional[str]:
- """
- Fallback implementation of glibc_version_string using ctypes.
- """
- try:
- import ctypes
- except ImportError:
- return None
-
- # ctypes.CDLL(None) internally calls dlopen(NULL), and as the dlopen
- # manpage says, "If filename is NULL, then the returned handle is for the
- # main program". This way we can let the linker do the work to figure out
- # which libc our process is actually using.
- #
- # We must also handle the special case where the executable is not a
- # dynamically linked executable. This can occur when using musl libc,
- # for example. In this situation, dlopen() will error, leading to an
- # OSError. Interestingly, at least in the case of musl, there is no
- # errno set on the OSError. The single string argument used to construct
- # OSError comes from libc itself and is therefore not portable to
- # hard code here. In any case, failure to call dlopen() means we
- # can proceed, so we bail on our attempt.
- try:
- process_namespace = ctypes.CDLL(None)
- except OSError:
- return None
-
- try:
- gnu_get_libc_version = process_namespace.gnu_get_libc_version
- except AttributeError:
- # Symbol doesn't exist -> therefore, we are not linked to
- # glibc.
- return None
-
- # Call gnu_get_libc_version, which returns a string like "2.5"
- gnu_get_libc_version.restype = ctypes.c_char_p
- version_str: str = gnu_get_libc_version()
- # py2 / py3 compatibility:
- if not isinstance(version_str, str):
- version_str = version_str.decode("ascii")
-
- return version_str
-
-
-def _glibc_version_string() -> Optional[str]:
- """Returns glibc version string, or None if not using glibc."""
- return _glibc_version_string_confstr() or _glibc_version_string_ctypes()
-
-
-def _parse_glibc_version(version_str: str) -> Tuple[int, int]:
- """Parse glibc version.
-
- We use a regexp instead of str.split because we want to discard any
- random junk that might come after the minor version -- this might happen
- in patched/forked versions of glibc (e.g. Linaro's version of glibc
- uses version strings like "2.20-2014.11"). See gh-3588.
- """
- m = re.match(r"(?P<major>[0-9]+)\.(?P<minor>[0-9]+)", version_str)
- if not m:
- warnings.warn(
- "Expected glibc version with 2 components major.minor,"
- " got: %s" % version_str,
- RuntimeWarning,
- )
- return -1, -1
- return int(m.group("major")), int(m.group("minor"))
-
-
-def _get_glibc_version() -> Tuple[int, int]:
- version_str = _glibc_version_string()
- if version_str is None:
- return (-1, -1)
- return _parse_glibc_version(version_str)
-
-
-# From PEP 513, PEP 600
-def _is_compatible(name: str, arch: str, version: _GLibCVersion) -> bool:
- sys_glibc = _get_glibc_version()
- if sys_glibc < version:
- return False
- # Check for presence of _manylinux module.
- try:
- import _manylinux # noqa
- except ImportError:
- return True
- if hasattr(_manylinux, "manylinux_compatible"):
- result = _manylinux.manylinux_compatible(version[0], version[1], arch)
- if result is not None:
- return bool(result)
- return True
- if version == _GLibCVersion(2, 5):
- if hasattr(_manylinux, "manylinux1_compatible"):
- return bool(_manylinux.manylinux1_compatible)
- if version == _GLibCVersion(2, 12):
- if hasattr(_manylinux, "manylinux2010_compatible"):
- return bool(_manylinux.manylinux2010_compatible)
- if version == _GLibCVersion(2, 17):
- if hasattr(_manylinux, "manylinux2014_compatible"):
- return bool(_manylinux.manylinux2014_compatible)
- return True
-
-
-_LEGACY_MANYLINUX_MAP = {
- # CentOS 7 w/ glibc 2.17 (PEP 599)
- (2, 17): "manylinux2014",
- # CentOS 6 w/ glibc 2.12 (PEP 571)
- (2, 12): "manylinux2010",
- # CentOS 5 w/ glibc 2.5 (PEP 513)
- (2, 5): "manylinux1",
-}
-
-
-def platform_tags(linux: str, arch: str) -> Iterator[str]:
- if not _have_compatible_abi(arch):
- return
- # Oldest glibc to be supported regardless of architecture is (2, 17).
- too_old_glibc2 = _GLibCVersion(2, 16)
- if arch in {"x86_64", "i686"}:
- # On x86/i686 also oldest glibc to be supported is (2, 5).
- too_old_glibc2 = _GLibCVersion(2, 4)
- current_glibc = _GLibCVersion(*_get_glibc_version())
- glibc_max_list = [current_glibc]
- # We can assume compatibility across glibc major versions.
- # https://sourceware.org/bugzilla/show_bug.cgi?id=24636
- #
- # Build a list of maximum glibc versions so that we can
- # output the canonical list of all glibc from current_glibc
- # down to too_old_glibc2, including all intermediary versions.
- for glibc_major in range(current_glibc.major - 1, 1, -1):
- glibc_minor = _LAST_GLIBC_MINOR[glibc_major]
- glibc_max_list.append(_GLibCVersion(glibc_major, glibc_minor))
- for glibc_max in glibc_max_list:
- if glibc_max.major == too_old_glibc2.major:
- min_minor = too_old_glibc2.minor
- else:
- # For other glibc major versions oldest supported is (x, 0).
- min_minor = -1
- for glibc_minor in range(glibc_max.minor, min_minor, -1):
- glibc_version = _GLibCVersion(glibc_max.major, glibc_minor)
- tag = "manylinux_{}_{}".format(*glibc_version)
- if _is_compatible(tag, arch, glibc_version):
- yield linux.replace("linux", tag)
- # Handle the legacy manylinux1, manylinux2010, manylinux2014 tags.
- if glibc_version in _LEGACY_MANYLINUX_MAP:
- legacy_tag = _LEGACY_MANYLINUX_MAP[glibc_version]
- if _is_compatible(legacy_tag, arch, glibc_version):
- yield linux.replace("linux", legacy_tag)
diff --git a/contrib/python/setuptools/py3/pkg_resources/_vendor/packaging/_musllinux.py b/contrib/python/setuptools/py3/pkg_resources/_vendor/packaging/_musllinux.py
deleted file mode 100644
index 85450fafa34..00000000000
--- a/contrib/python/setuptools/py3/pkg_resources/_vendor/packaging/_musllinux.py
+++ /dev/null
@@ -1,136 +0,0 @@
-"""PEP 656 support.
-
-This module implements logic to detect if the currently running Python is
-linked against musl, and what musl version is used.
-"""
-
-import contextlib
-import functools
-import operator
-import os
-import re
-import struct
-import subprocess
-import sys
-from typing import IO, Iterator, NamedTuple, Optional, Tuple
-
-
-def _read_unpacked(f: IO[bytes], fmt: str) -> Tuple[int, ...]:
- return struct.unpack(fmt, f.read(struct.calcsize(fmt)))
-
-
-def _parse_ld_musl_from_elf(f: IO[bytes]) -> Optional[str]:
- """Detect musl libc location by parsing the Python executable.
-
- Based on: https://gist.github.com/lyssdod/f51579ae8d93c8657a5564aefc2ffbca
- ELF header: https://refspecs.linuxfoundation.org/elf/gabi4+/ch4.eheader.html
- """
- f.seek(0)
- try:
- ident = _read_unpacked(f, "16B")
- except struct.error:
- return None
- if ident[:4] != tuple(b"\x7fELF"): # Invalid magic, not ELF.
- return None
- f.seek(struct.calcsize("HHI"), 1) # Skip file type, machine, and version.
-
- try:
- # e_fmt: Format for program header.
- # p_fmt: Format for section header.
- # p_idx: Indexes to find p_type, p_offset, and p_filesz.
- e_fmt, p_fmt, p_idx = {
- 1: ("IIIIHHH", "IIIIIIII", (0, 1, 4)), # 32-bit.
- 2: ("QQQIHHH", "IIQQQQQQ", (0, 2, 5)), # 64-bit.
- }[ident[4]]
- except KeyError:
- return None
- else:
- p_get = operator.itemgetter(*p_idx)
-
- # Find the interpreter section and return its content.
- try:
- _, e_phoff, _, _, _, e_phentsize, e_phnum = _read_unpacked(f, e_fmt)
- except struct.error:
- return None
- for i in range(e_phnum + 1):
- f.seek(e_phoff + e_phentsize * i)
- try:
- p_type, p_offset, p_filesz = p_get(_read_unpacked(f, p_fmt))
- except struct.error:
- return None
- if p_type != 3: # Not PT_INTERP.
- continue
- f.seek(p_offset)
- interpreter = os.fsdecode(f.read(p_filesz)).strip("\0")
- if "musl" not in interpreter:
- return None
- return interpreter
- return None
-
-
-class _MuslVersion(NamedTuple):
- major: int
- minor: int
-
-
-def _parse_musl_version(output: str) -> Optional[_MuslVersion]:
- lines = [n for n in (n.strip() for n in output.splitlines()) if n]
- if len(lines) < 2 or lines[0][:4] != "musl":
- return None
- m = re.match(r"Version (\d+)\.(\d+)", lines[1])
- if not m:
- return None
- return _MuslVersion(major=int(m.group(1)), minor=int(m.group(2)))
-
-
-def _get_musl_version(executable: str) -> Optional[_MuslVersion]:
- """Detect currently-running musl runtime version.
-
- This is done by checking the specified executable's dynamic linking
- information, and invoking the loader to parse its output for a version
- string. If the loader is musl, the output would be something like::
-
- musl libc (x86_64)
- Version 1.2.2
- Dynamic Program Loader
- """
- with contextlib.ExitStack() as stack:
- try:
- f = stack.enter_context(open(executable, "rb"))
- except IOError:
- return None
- ld = _parse_ld_musl_from_elf(f)
- if not ld:
- return None
- proc = subprocess.run([ld], stderr=subprocess.PIPE, universal_newlines=True)
- return _parse_musl_version(proc.stderr)
-
-
-def platform_tags(arch: str) -> Iterator[str]:
- """Generate musllinux tags compatible to the current platform.
-
- :param arch: Should be the part of platform tag after the ``linux_``
- prefix, e.g. ``x86_64``. The ``linux_`` prefix is assumed as a
- prerequisite for the current platform to be musllinux-compatible.
-
- :returns: An iterator of compatible musllinux tags.
- """
- sys_musl = _get_musl_version(sys.executable)
- if sys_musl is None: # Python not dynamically linked against musl.
- return
- for minor in range(sys_musl.minor, -1, -1):
- yield f"musllinux_{sys_musl.major}_{minor}_{arch}"
-
-
-if __name__ == "__main__": # pragma: no cover
- import sysconfig
-
- plat = sysconfig.get_platform()
- assert plat.startswith("linux-"), "not linux"
-
- print("plat:", plat)
- print("musl:", _get_musl_version(sys.executable))
- print("tags:", end=" ")
- for t in platform_tags(re.sub(r"[.-]", "_", plat.split("-", 1)[-1])):
- print(t, end="\n ")
diff --git a/contrib/python/setuptools/py3/pkg_resources/_vendor/packaging/_structures.py b/contrib/python/setuptools/py3/pkg_resources/_vendor/packaging/_structures.py
deleted file mode 100644
index 951549753af..00000000000
--- a/contrib/python/setuptools/py3/pkg_resources/_vendor/packaging/_structures.py
+++ /dev/null
@@ -1,67 +0,0 @@
-# This file is dual licensed under the terms of the Apache License, Version
-# 2.0, and the BSD License. See the LICENSE file in the root of this repository
-# for complete details.
-
-
-class InfinityType:
- def __repr__(self) -> str:
- return "Infinity"
-
- def __hash__(self) -> int:
- return hash(repr(self))
-
- def __lt__(self, other: object) -> bool:
- return False
-
- def __le__(self, other: object) -> bool:
- return False
-
- def __eq__(self, other: object) -> bool:
- return isinstance(other, self.__class__)
-
- def __ne__(self, other: object) -> bool:
- return not isinstance(other, self.__class__)
-
- def __gt__(self, other: object) -> bool:
- return True
-
- def __ge__(self, other: object) -> bool:
- return True
-
- def __neg__(self: object) -> "NegativeInfinityType":
- return NegativeInfinity
-
-
-Infinity = InfinityType()
-
-
-class NegativeInfinityType:
- def __repr__(self) -> str:
- return "-Infinity"
-
- def __hash__(self) -> int:
- return hash(repr(self))
-
- def __lt__(self, other: object) -> bool:
- return True
-
- def __le__(self, other: object) -> bool:
- return True
-
- def __eq__(self, other: object) -> bool:
- return isinstance(other, self.__class__)
-
- def __ne__(self, other: object) -> bool:
- return not isinstance(other, self.__class__)
-
- def __gt__(self, other: object) -> bool:
- return False
-
- def __ge__(self, other: object) -> bool:
- return False
-
- def __neg__(self: object) -> InfinityType:
- return Infinity
-
-
-NegativeInfinity = NegativeInfinityType()
diff --git a/contrib/python/setuptools/py3/pkg_resources/_vendor/packaging/markers.py b/contrib/python/setuptools/py3/pkg_resources/_vendor/packaging/markers.py
deleted file mode 100644
index 18769b09a8a..00000000000
--- a/contrib/python/setuptools/py3/pkg_resources/_vendor/packaging/markers.py
+++ /dev/null
@@ -1,304 +0,0 @@
-# This file is dual licensed under the terms of the Apache License, Version
-# 2.0, and the BSD License. See the LICENSE file in the root of this repository
-# for complete details.
-
-import operator
-import os
-import platform
-import sys
-from typing import Any, Callable, Dict, List, Optional, Tuple, Union
-
-from pkg_resources.extern.pyparsing import ( # noqa: N817
- Forward,
- Group,
- Literal as L,
- ParseException,
- ParseResults,
- QuotedString,
- ZeroOrMore,
- stringEnd,
- stringStart,
-)
-
-from .specifiers import InvalidSpecifier, Specifier
-
-__all__ = [
- "InvalidMarker",
- "UndefinedComparison",
- "UndefinedEnvironmentName",
- "Marker",
- "default_environment",
-]
-
-Operator = Callable[[str, str], bool]
-
-
-class InvalidMarker(ValueError):
- """
- An invalid marker was found, users should refer to PEP 508.
- """
-
-
-class UndefinedComparison(ValueError):
- """
- An invalid operation was attempted on a value that doesn't support it.
- """
-
-
-class UndefinedEnvironmentName(ValueError):
- """
- A name was attempted to be used that does not exist inside of the
- environment.
- """
-
-
-class Node:
- def __init__(self, value: Any) -> None:
- self.value = value
-
- def __str__(self) -> str:
- return str(self.value)
-
- def __repr__(self) -> str:
- return f"<{self.__class__.__name__}('{self}')>"
-
- def serialize(self) -> str:
- raise NotImplementedError
-
-
-class Variable(Node):
- def serialize(self) -> str:
- return str(self)
-
-
-class Value(Node):
- def serialize(self) -> str:
- return f'"{self}"'
-
-
-class Op(Node):
- def serialize(self) -> str:
- return str(self)
-
-
-VARIABLE = (
- L("implementation_version")
- | L("platform_python_implementation")
- | L("implementation_name")
- | L("python_full_version")
- | L("platform_release")
- | L("platform_version")
- | L("platform_machine")
- | L("platform_system")
- | L("python_version")
- | L("sys_platform")
- | L("os_name")
- | L("os.name") # PEP-345
- | L("sys.platform") # PEP-345
- | L("platform.version") # PEP-345
- | L("platform.machine") # PEP-345
- | L("platform.python_implementation") # PEP-345
- | L("python_implementation") # undocumented setuptools legacy
- | L("extra") # PEP-508
-)
-ALIASES = {
- "os.name": "os_name",
- "sys.platform": "sys_platform",
- "platform.version": "platform_version",
- "platform.machine": "platform_machine",
- "platform.python_implementation": "platform_python_implementation",
- "python_implementation": "platform_python_implementation",
-}
-VARIABLE.setParseAction(lambda s, l, t: Variable(ALIASES.get(t[0], t[0])))
-
-VERSION_CMP = (
- L("===") | L("==") | L(">=") | L("<=") | L("!=") | L("~=") | L(">") | L("<")
-)
-
-MARKER_OP = VERSION_CMP | L("not in") | L("in")
-MARKER_OP.setParseAction(lambda s, l, t: Op(t[0]))
-
-MARKER_VALUE = QuotedString("'") | QuotedString('"')
-MARKER_VALUE.setParseAction(lambda s, l, t: Value(t[0]))
-
-BOOLOP = L("and") | L("or")
-
-MARKER_VAR = VARIABLE | MARKER_VALUE
-
-MARKER_ITEM = Group(MARKER_VAR + MARKER_OP + MARKER_VAR)
-MARKER_ITEM.setParseAction(lambda s, l, t: tuple(t[0]))
-
-LPAREN = L("(").suppress()
-RPAREN = L(")").suppress()
-
-MARKER_EXPR = Forward()
-MARKER_ATOM = MARKER_ITEM | Group(LPAREN + MARKER_EXPR + RPAREN)
-MARKER_EXPR << MARKER_ATOM + ZeroOrMore(BOOLOP + MARKER_EXPR)
-
-MARKER = stringStart + MARKER_EXPR + stringEnd
-
-
-def _coerce_parse_result(results: Union[ParseResults, List[Any]]) -> List[Any]:
- if isinstance(results, ParseResults):
- return [_coerce_parse_result(i) for i in results]
- else:
- return results
-
-
-def _format_marker(
- marker: Union[List[str], Tuple[Node, ...], str], first: Optional[bool] = True
-) -> str:
-
- assert isinstance(marker, (list, tuple, str))
-
- # Sometimes we have a structure like [[...]] which is a single item list
- # where the single item is itself it's own list. In that case we want skip
- # the rest of this function so that we don't get extraneous () on the
- # outside.
- if (
- isinstance(marker, list)
- and len(marker) == 1
- and isinstance(marker[0], (list, tuple))
- ):
- return _format_marker(marker[0])
-
- if isinstance(marker, list):
- inner = (_format_marker(m, first=False) for m in marker)
- if first:
- return " ".join(inner)
- else:
- return "(" + " ".join(inner) + ")"
- elif isinstance(marker, tuple):
- return " ".join([m.serialize() for m in marker])
- else:
- return marker
-
-
-_operators: Dict[str, Operator] = {
- "in": lambda lhs, rhs: lhs in rhs,
- "not in": lambda lhs, rhs: lhs not in rhs,
- "<": operator.lt,
- "<=": operator.le,
- "==": operator.eq,
- "!=": operator.ne,
- ">=": operator.ge,
- ">": operator.gt,
-}
-
-
-def _eval_op(lhs: str, op: Op, rhs: str) -> bool:
- try:
- spec = Specifier("".join([op.serialize(), rhs]))
- except InvalidSpecifier:
- pass
- else:
- return spec.contains(lhs)
-
- oper: Optional[Operator] = _operators.get(op.serialize())
- if oper is None:
- raise UndefinedComparison(f"Undefined {op!r} on {lhs!r} and {rhs!r}.")
-
- return oper(lhs, rhs)
-
-
-class Undefined:
- pass
-
-
-_undefined = Undefined()
-
-
-def _get_env(environment: Dict[str, str], name: str) -> str:
- value: Union[str, Undefined] = environment.get(name, _undefined)
-
- if isinstance(value, Undefined):
- raise UndefinedEnvironmentName(
- f"{name!r} does not exist in evaluation environment."
- )
-
- return value
-
-
-def _evaluate_markers(markers: List[Any], environment: Dict[str, str]) -> bool:
- groups: List[List[bool]] = [[]]
-
- for marker in markers:
- assert isinstance(marker, (list, tuple, str))
-
- if isinstance(marker, list):
- groups[-1].append(_evaluate_markers(marker, environment))
- elif isinstance(marker, tuple):
- lhs, op, rhs = marker
-
- if isinstance(lhs, Variable):
- lhs_value = _get_env(environment, lhs.value)
- rhs_value = rhs.value
- else:
- lhs_value = lhs.value
- rhs_value = _get_env(environment, rhs.value)
-
- groups[-1].append(_eval_op(lhs_value, op, rhs_value))
- else:
- assert marker in ["and", "or"]
- if marker == "or":
- groups.append([])
-
- return any(all(item) for item in groups)
-
-
-def format_full_version(info: "sys._version_info") -> str:
- version = "{0.major}.{0.minor}.{0.micro}".format(info)
- kind = info.releaselevel
- if kind != "final":
- version += kind[0] + str(info.serial)
- return version
-
-
-def default_environment() -> Dict[str, str]:
- iver = format_full_version(sys.implementation.version)
- implementation_name = sys.implementation.name
- return {
- "implementation_name": implementation_name,
- "implementation_version": iver,
- "os_name": os.name,
- "platform_machine": platform.machine(),
- "platform_release": platform.release(),
- "platform_system": platform.system(),
- "platform_version": platform.version(),
- "python_full_version": platform.python_version(),
- "platform_python_implementation": platform.python_implementation(),
- "python_version": ".".join(platform.python_version_tuple()[:2]),
- "sys_platform": sys.platform,
- }
-
-
-class Marker:
- def __init__(self, marker: str) -> None:
- try:
- self._markers = _coerce_parse_result(MARKER.parseString(marker))
- except ParseException as e:
- raise InvalidMarker(
- f"Invalid marker: {marker!r}, parse error at "
- f"{marker[e.loc : e.loc + 8]!r}"
- )
-
- def __str__(self) -> str:
- return _format_marker(self._markers)
-
- def __repr__(self) -> str:
- return f"<Marker('{self}')>"
-
- def evaluate(self, environment: Optional[Dict[str, str]] = None) -> bool:
- """Evaluate a marker.
-
- Return the boolean from evaluating the given marker against the
- environment. environment is an optional argument to override all or
- part of the determined environment.
-
- The environment is determined from the current Python process.
- """
- current_environment = default_environment()
- if environment is not None:
- current_environment.update(environment)
-
- return _evaluate_markers(self._markers, current_environment)
diff --git a/contrib/python/setuptools/py3/pkg_resources/_vendor/packaging/requirements.py b/contrib/python/setuptools/py3/pkg_resources/_vendor/packaging/requirements.py
deleted file mode 100644
index 6af14ec4ce4..00000000000
--- a/contrib/python/setuptools/py3/pkg_resources/_vendor/packaging/requirements.py
+++ /dev/null
@@ -1,146 +0,0 @@
-# This file is dual licensed under the terms of the Apache License, Version
-# 2.0, and the BSD License. See the LICENSE file in the root of this repository
-# for complete details.
-
-import re
-import string
-import urllib.parse
-from typing import List, Optional as TOptional, Set
-
-from pkg_resources.extern.pyparsing import ( # noqa
- Combine,
- Literal as L,
- Optional,
- ParseException,
- Regex,
- Word,
- ZeroOrMore,
- originalTextFor,
- stringEnd,
- stringStart,
-)
-
-from .markers import MARKER_EXPR, Marker
-from .specifiers import LegacySpecifier, Specifier, SpecifierSet
-
-
-class InvalidRequirement(ValueError):
- """
- An invalid requirement was found, users should refer to PEP 508.
- """
-
-
-ALPHANUM = Word(string.ascii_letters + string.digits)
-
-LBRACKET = L("[").suppress()
-RBRACKET = L("]").suppress()
-LPAREN = L("(").suppress()
-RPAREN = L(")").suppress()
-COMMA = L(",").suppress()
-SEMICOLON = L(";").suppress()
-AT = L("@").suppress()
-
-PUNCTUATION = Word("-_.")
-IDENTIFIER_END = ALPHANUM | (ZeroOrMore(PUNCTUATION) + ALPHANUM)
-IDENTIFIER = Combine(ALPHANUM + ZeroOrMore(IDENTIFIER_END))
-
-NAME = IDENTIFIER("name")
-EXTRA = IDENTIFIER
-
-URI = Regex(r"[^ ]+")("url")
-URL = AT + URI
-
-EXTRAS_LIST = EXTRA + ZeroOrMore(COMMA + EXTRA)
-EXTRAS = (LBRACKET + Optional(EXTRAS_LIST) + RBRACKET)("extras")
-
-VERSION_PEP440 = Regex(Specifier._regex_str, re.VERBOSE | re.IGNORECASE)
-VERSION_LEGACY = Regex(LegacySpecifier._regex_str, re.VERBOSE | re.IGNORECASE)
-
-VERSION_ONE = VERSION_PEP440 ^ VERSION_LEGACY
-VERSION_MANY = Combine(
- VERSION_ONE + ZeroOrMore(COMMA + VERSION_ONE), joinString=",", adjacent=False
-)("_raw_spec")
-_VERSION_SPEC = Optional((LPAREN + VERSION_MANY + RPAREN) | VERSION_MANY)
-_VERSION_SPEC.setParseAction(lambda s, l, t: t._raw_spec or "")
-
-VERSION_SPEC = originalTextFor(_VERSION_SPEC)("specifier")
-VERSION_SPEC.setParseAction(lambda s, l, t: t[1])
-
-MARKER_EXPR = originalTextFor(MARKER_EXPR())("marker")
-MARKER_EXPR.setParseAction(
- lambda s, l, t: Marker(s[t._original_start : t._original_end])
-)
-MARKER_SEPARATOR = SEMICOLON
-MARKER = MARKER_SEPARATOR + MARKER_EXPR
-
-VERSION_AND_MARKER = VERSION_SPEC + Optional(MARKER)
-URL_AND_MARKER = URL + Optional(MARKER)
-
-NAMED_REQUIREMENT = NAME + Optional(EXTRAS) + (URL_AND_MARKER | VERSION_AND_MARKER)
-
-REQUIREMENT = stringStart + NAMED_REQUIREMENT + stringEnd
-# pkg_resources.extern.pyparsing isn't thread safe during initialization, so we do it eagerly, see
-# issue #104
-REQUIREMENT.parseString("x[]")
-
-
-class Requirement:
- """Parse a requirement.
-
- Parse a given requirement string into its parts, such as name, specifier,
- URL, and extras. Raises InvalidRequirement on a badly-formed requirement
- string.
- """
-
- # TODO: Can we test whether something is contained within a requirement?
- # If so how do we do that? Do we need to test against the _name_ of
- # the thing as well as the version? What about the markers?
- # TODO: Can we normalize the name and extra name?
-
- def __init__(self, requirement_string: str) -> None:
- try:
- req = REQUIREMENT.parseString(requirement_string)
- except ParseException as e:
- raise InvalidRequirement(
- f'Parse error at "{ requirement_string[e.loc : e.loc + 8]!r}": {e.msg}'
- )
-
- self.name: str = req.name
- if req.url:
- parsed_url = urllib.parse.urlparse(req.url)
- if parsed_url.scheme == "file":
- if urllib.parse.urlunparse(parsed_url) != req.url:
- raise InvalidRequirement("Invalid URL given")
- elif not (parsed_url.scheme and parsed_url.netloc) or (
- not parsed_url.scheme and not parsed_url.netloc
- ):
- raise InvalidRequirement(f"Invalid URL: {req.url}")
- self.url: TOptional[str] = req.url
- else:
- self.url = None
- self.extras: Set[str] = set(req.extras.asList() if req.extras else [])
- self.specifier: SpecifierSet = SpecifierSet(req.specifier)
- self.marker: TOptional[Marker] = req.marker if req.marker else None
-
- def __str__(self) -> str:
- parts: List[str] = [self.name]
-
- if self.extras:
- formatted_extras = ",".join(sorted(self.extras))
- parts.append(f"[{formatted_extras}]")
-
- if self.specifier:
- parts.append(str(self.specifier))
-
- if self.url:
- parts.append(f"@ {self.url}")
- if self.marker:
- parts.append(" ")
-
- if self.marker:
- parts.append(f"; {self.marker}")
-
- return "".join(parts)
-
- def __repr__(self) -> str:
- return f"<Requirement('{self}')>"
diff --git a/contrib/python/setuptools/py3/pkg_resources/_vendor/packaging/specifiers.py b/contrib/python/setuptools/py3/pkg_resources/_vendor/packaging/specifiers.py
deleted file mode 100644
index ce66bd4addb..00000000000
--- a/contrib/python/setuptools/py3/pkg_resources/_vendor/packaging/specifiers.py
+++ /dev/null
@@ -1,828 +0,0 @@
-# This file is dual licensed under the terms of the Apache License, Version
-# 2.0, and the BSD License. See the LICENSE file in the root of this repository
-# for complete details.
-
-import abc
-import functools
-import itertools
-import re
-import warnings
-from typing import (
- Callable,
- Dict,
- Iterable,
- Iterator,
- List,
- Optional,
- Pattern,
- Set,
- Tuple,
- TypeVar,
- Union,
-)
-
-from .utils import canonicalize_version
-from .version import LegacyVersion, Version, parse
-
-ParsedVersion = Union[Version, LegacyVersion]
-UnparsedVersion = Union[Version, LegacyVersion, str]
-VersionTypeVar = TypeVar("VersionTypeVar", bound=UnparsedVersion)
-CallableOperator = Callable[[ParsedVersion, str], bool]
-
-
-class InvalidSpecifier(ValueError):
- """
- An invalid specifier was found, users should refer to PEP 440.
- """
-
-
-class BaseSpecifier(metaclass=abc.ABCMeta):
- @abc.abstractmethod
- def __str__(self) -> str:
- """
- Returns the str representation of this Specifier like object. This
- should be representative of the Specifier itself.
- """
-
- @abc.abstractmethod
- def __hash__(self) -> int:
- """
- Returns a hash value for this Specifier like object.
- """
-
- @abc.abstractmethod
- def __eq__(self, other: object) -> bool:
- """
- Returns a boolean representing whether or not the two Specifier like
- objects are equal.
- """
-
- @abc.abstractmethod
- def __ne__(self, other: object) -> bool:
- """
- Returns a boolean representing whether or not the two Specifier like
- objects are not equal.
- """
-
- @abc.abstractproperty
- def prereleases(self) -> Optional[bool]:
- """
- Returns whether or not pre-releases as a whole are allowed by this
- specifier.
- """
-
- @prereleases.setter
- def prereleases(self, value: bool) -> None:
- """
- Sets whether or not pre-releases as a whole are allowed by this
- specifier.
- """
-
- @abc.abstractmethod
- def contains(self, item: str, prereleases: Optional[bool] = None) -> bool:
- """
- Determines if the given item is contained within this specifier.
- """
-
- @abc.abstractmethod
- def filter(
- self, iterable: Iterable[VersionTypeVar], prereleases: Optional[bool] = None
- ) -> Iterable[VersionTypeVar]:
- """
- Takes an iterable of items and filters them so that only items which
- are contained within this specifier are allowed in it.
- """
-
-
-class _IndividualSpecifier(BaseSpecifier):
-
- _operators: Dict[str, str] = {}
- _regex: Pattern[str]
-
- def __init__(self, spec: str = "", prereleases: Optional[bool] = None) -> None:
- match = self._regex.search(spec)
- if not match:
- raise InvalidSpecifier(f"Invalid specifier: '{spec}'")
-
- self._spec: Tuple[str, str] = (
- match.group("operator").strip(),
- match.group("version").strip(),
- )
-
- # Store whether or not this Specifier should accept prereleases
- self._prereleases = prereleases
-
- def __repr__(self) -> str:
- pre = (
- f", prereleases={self.prereleases!r}"
- if self._prereleases is not None
- else ""
- )
-
- return "<{}({!r}{})>".format(self.__class__.__name__, str(self), pre)
-
- def __str__(self) -> str:
- return "{}{}".format(*self._spec)
-
- @property
- def _canonical_spec(self) -> Tuple[str, str]:
- return self._spec[0], canonicalize_version(self._spec[1])
-
- def __hash__(self) -> int:
- return hash(self._canonical_spec)
-
- def __eq__(self, other: object) -> bool:
- if isinstance(other, str):
- try:
- other = self.__class__(str(other))
- except InvalidSpecifier:
- return NotImplemented
- elif not isinstance(other, self.__class__):
- return NotImplemented
-
- return self._canonical_spec == other._canonical_spec
-
- def __ne__(self, other: object) -> bool:
- if isinstance(other, str):
- try:
- other = self.__class__(str(other))
- except InvalidSpecifier:
- return NotImplemented
- elif not isinstance(other, self.__class__):
- return NotImplemented
-
- return self._spec != other._spec
-
- def _get_operator(self, op: str) -> CallableOperator:
- operator_callable: CallableOperator = getattr(
- self, f"_compare_{self._operators[op]}"
- )
- return operator_callable
-
- def _coerce_version(self, version: UnparsedVersion) -> ParsedVersion:
- if not isinstance(version, (LegacyVersion, Version)):
- version = parse(version)
- return version
-
- @property
- def operator(self) -> str:
- return self._spec[0]
-
- @property
- def version(self) -> str:
- return self._spec[1]
-
- @property
- def prereleases(self) -> Optional[bool]:
- return self._prereleases
-
- @prereleases.setter
- def prereleases(self, value: bool) -> None:
- self._prereleases = value
-
- def __contains__(self, item: str) -> bool:
- return self.contains(item)
-
- def contains(
- self, item: UnparsedVersion, prereleases: Optional[bool] = None
- ) -> bool:
-
- # Determine if prereleases are to be allowed or not.
- if prereleases is None:
- prereleases = self.prereleases
-
- # Normalize item to a Version or LegacyVersion, this allows us to have
- # a shortcut for ``"2.0" in Specifier(">=2")
- normalized_item = self._coerce_version(item)
-
- # Determine if we should be supporting prereleases in this specifier
- # or not, if we do not support prereleases than we can short circuit
- # logic if this version is a prereleases.
- if normalized_item.is_prerelease and not prereleases:
- return False
-
- # Actually do the comparison to determine if this item is contained
- # within this Specifier or not.
- operator_callable: CallableOperator = self._get_operator(self.operator)
- return operator_callable(normalized_item, self.version)
-
- def filter(
- self, iterable: Iterable[VersionTypeVar], prereleases: Optional[bool] = None
- ) -> Iterable[VersionTypeVar]:
-
- yielded = False
- found_prereleases = []
-
- kw = {"prereleases": prereleases if prereleases is not None else True}
-
- # Attempt to iterate over all the values in the iterable and if any of
- # them match, yield them.
- for version in iterable:
- parsed_version = self._coerce_version(version)
-
- if self.contains(parsed_version, **kw):
- # If our version is a prerelease, and we were not set to allow
- # prereleases, then we'll store it for later in case nothing
- # else matches this specifier.
- if parsed_version.is_prerelease and not (
- prereleases or self.prereleases
- ):
- found_prereleases.append(version)
- # Either this is not a prerelease, or we should have been
- # accepting prereleases from the beginning.
- else:
- yielded = True
- yield version
-
- # Now that we've iterated over everything, determine if we've yielded
- # any values, and if we have not and we have any prereleases stored up
- # then we will go ahead and yield the prereleases.
- if not yielded and found_prereleases:
- for version in found_prereleases:
- yield version
-
-
-class LegacySpecifier(_IndividualSpecifier):
-
- _regex_str = r"""
- (?P<operator>(==|!=|<=|>=|<|>))
- \s*
- (?P<version>
- [^,;\s)]* # Since this is a "legacy" specifier, and the version
- # string can be just about anything, we match everything
- # except for whitespace, a semi-colon for marker support,
- # a closing paren since versions can be enclosed in
- # them, and a comma since it's a version separator.
- )
- """
-
- _regex = re.compile(r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE)
-
- _operators = {
- "==": "equal",
- "!=": "not_equal",
- "<=": "less_than_equal",
- ">=": "greater_than_equal",
- "<": "less_than",
- ">": "greater_than",
- }
-
- def __init__(self, spec: str = "", prereleases: Optional[bool] = None) -> None:
- super().__init__(spec, prereleases)
-
- warnings.warn(
- "Creating a LegacyVersion has been deprecated and will be "
- "removed in the next major release",
- DeprecationWarning,
- )
-
- def _coerce_version(self, version: UnparsedVersion) -> LegacyVersion:
- if not isinstance(version, LegacyVersion):
- version = LegacyVersion(str(version))
- return version
-
- def _compare_equal(self, prospective: LegacyVersion, spec: str) -> bool:
- return prospective == self._coerce_version(spec)
-
- def _compare_not_equal(self, prospective: LegacyVersion, spec: str) -> bool:
- return prospective != self._coerce_version(spec)
-
- def _compare_less_than_equal(self, prospective: LegacyVersion, spec: str) -> bool:
- return prospective <= self._coerce_version(spec)
-
- def _compare_greater_than_equal(
- self, prospective: LegacyVersion, spec: str
- ) -> bool:
- return prospective >= self._coerce_version(spec)
-
- def _compare_less_than(self, prospective: LegacyVersion, spec: str) -> bool:
- return prospective < self._coerce_version(spec)
-
- def _compare_greater_than(self, prospective: LegacyVersion, spec: str) -> bool:
- return prospective > self._coerce_version(spec)
-
-
-def _require_version_compare(
- fn: Callable[["Specifier", ParsedVersion, str], bool]
-) -> Callable[["Specifier", ParsedVersion, str], bool]:
- @functools.wraps(fn)
- def wrapped(self: "Specifier", prospective: ParsedVersion, spec: str) -> bool:
- if not isinstance(prospective, Version):
- return False
- return fn(self, prospective, spec)
-
- return wrapped
-
-
-class Specifier(_IndividualSpecifier):
-
- _regex_str = r"""
- (?P<operator>(~=|==|!=|<=|>=|<|>|===))
- (?P<version>
- (?:
- # The identity operators allow for an escape hatch that will
- # do an exact string match of the version you wish to install.
- # This will not be parsed by PEP 440 and we cannot determine
- # any semantic meaning from it. This operator is discouraged
- # but included entirely as an escape hatch.
- (?<====) # Only match for the identity operator
- \s*
- [^\s]* # We just match everything, except for whitespace
- # since we are only testing for strict identity.
- )
- |
- (?:
- # The (non)equality operators allow for wild card and local
- # versions to be specified so we have to define these two
- # operators separately to enable that.
- (?<===|!=) # Only match for equals and not equals
-
- \s*
- v?
- (?:[0-9]+!)? # epoch
- [0-9]+(?:\.[0-9]+)* # release
- (?: # pre release
- [-_\.]?
- (a|b|c|rc|alpha|beta|pre|preview)
- [-_\.]?
- [0-9]*
- )?
- (?: # post release
- (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
- )?
-
- # You cannot use a wild card and a dev or local version
- # together so group them with a | and make them optional.
- (?:
- (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
- (?:\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*)? # local
- |
- \.\* # Wild card syntax of .*
- )?
- )
- |
- (?:
- # The compatible operator requires at least two digits in the
- # release segment.
- (?<=~=) # Only match for the compatible operator
-
- \s*
- v?
- (?:[0-9]+!)? # epoch
- [0-9]+(?:\.[0-9]+)+ # release (We have a + instead of a *)
- (?: # pre release
- [-_\.]?
- (a|b|c|rc|alpha|beta|pre|preview)
- [-_\.]?
- [0-9]*
- )?
- (?: # post release
- (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
- )?
- (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
- )
- |
- (?:
- # All other operators only allow a sub set of what the
- # (non)equality operators do. Specifically they do not allow
- # local versions to be specified nor do they allow the prefix
- # matching wild cards.
- (?<!==|!=|~=) # We have special cases for these
- # operators so we want to make sure they
- # don't match here.
-
- \s*
- v?
- (?:[0-9]+!)? # epoch
- [0-9]+(?:\.[0-9]+)* # release
- (?: # pre release
- [-_\.]?
- (a|b|c|rc|alpha|beta|pre|preview)
- [-_\.]?
- [0-9]*
- )?
- (?: # post release
- (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
- )?
- (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
- )
- )
- """
-
- _regex = re.compile(r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE)
-
- _operators = {
- "~=": "compatible",
- "==": "equal",
- "!=": "not_equal",
- "<=": "less_than_equal",
- ">=": "greater_than_equal",
- "<": "less_than",
- ">": "greater_than",
- "===": "arbitrary",
- }
-
- @_require_version_compare
- def _compare_compatible(self, prospective: ParsedVersion, spec: str) -> bool:
-
- # Compatible releases have an equivalent combination of >= and ==. That
- # is that ~=2.2 is equivalent to >=2.2,==2.*. This allows us to
- # implement this in terms of the other specifiers instead of
- # implementing it ourselves. The only thing we need to do is construct
- # the other specifiers.
-
- # We want everything but the last item in the version, but we want to
- # ignore suffix segments.
- prefix = ".".join(
- list(itertools.takewhile(_is_not_suffix, _version_split(spec)))[:-1]
- )
-
- # Add the prefix notation to the end of our string
- prefix += ".*"
-
- return self._get_operator(">=")(prospective, spec) and self._get_operator("==")(
- prospective, prefix
- )
-
- @_require_version_compare
- def _compare_equal(self, prospective: ParsedVersion, spec: str) -> bool:
-
- # We need special logic to handle prefix matching
- if spec.endswith(".*"):
- # In the case of prefix matching we want to ignore local segment.
- prospective = Version(prospective.public)
- # Split the spec out by dots, and pretend that there is an implicit
- # dot in between a release segment and a pre-release segment.
- split_spec = _version_split(spec[:-2]) # Remove the trailing .*
-
- # Split the prospective version out by dots, and pretend that there
- # is an implicit dot in between a release segment and a pre-release
- # segment.
- split_prospective = _version_split(str(prospective))
-
- # Shorten the prospective version to be the same length as the spec
- # so that we can determine if the specifier is a prefix of the
- # prospective version or not.
- shortened_prospective = split_prospective[: len(split_spec)]
-
- # Pad out our two sides with zeros so that they both equal the same
- # length.
- padded_spec, padded_prospective = _pad_version(
- split_spec, shortened_prospective
- )
-
- return padded_prospective == padded_spec
- else:
- # Convert our spec string into a Version
- spec_version = Version(spec)
-
- # If the specifier does not have a local segment, then we want to
- # act as if the prospective version also does not have a local
- # segment.
- if not spec_version.local:
- prospective = Version(prospective.public)
-
- return prospective == spec_version
-
- @_require_version_compare
- def _compare_not_equal(self, prospective: ParsedVersion, spec: str) -> bool:
- return not self._compare_equal(prospective, spec)
-
- @_require_version_compare
- def _compare_less_than_equal(self, prospective: ParsedVersion, spec: str) -> bool:
-
- # NB: Local version identifiers are NOT permitted in the version
- # specifier, so local version labels can be universally removed from
- # the prospective version.
- return Version(prospective.public) <= Version(spec)
-
- @_require_version_compare
- def _compare_greater_than_equal(
- self, prospective: ParsedVersion, spec: str
- ) -> bool:
-
- # NB: Local version identifiers are NOT permitted in the version
- # specifier, so local version labels can be universally removed from
- # the prospective version.
- return Version(prospective.public) >= Version(spec)
-
- @_require_version_compare
- def _compare_less_than(self, prospective: ParsedVersion, spec_str: str) -> bool:
-
- # Convert our spec to a Version instance, since we'll want to work with
- # it as a version.
- spec = Version(spec_str)
-
- # Check to see if the prospective version is less than the spec
- # version. If it's not we can short circuit and just return False now
- # instead of doing extra unneeded work.
- if not prospective < spec:
- return False
-
- # This special case is here so that, unless the specifier itself
- # includes is a pre-release version, that we do not accept pre-release
- # versions for the version mentioned in the specifier (e.g. <3.1 should
- # not match 3.1.dev0, but should match 3.0.dev0).
- if not spec.is_prerelease and prospective.is_prerelease:
- if Version(prospective.base_version) == Version(spec.base_version):
- return False
-
- # If we've gotten to here, it means that prospective version is both
- # less than the spec version *and* it's not a pre-release of the same
- # version in the spec.
- return True
-
- @_require_version_compare
- def _compare_greater_than(self, prospective: ParsedVersion, spec_str: str) -> bool:
-
- # Convert our spec to a Version instance, since we'll want to work with
- # it as a version.
- spec = Version(spec_str)
-
- # Check to see if the prospective version is greater than the spec
- # version. If it's not we can short circuit and just return False now
- # instead of doing extra unneeded work.
- if not prospective > spec:
- return False
-
- # This special case is here so that, unless the specifier itself
- # includes is a post-release version, that we do not accept
- # post-release versions for the version mentioned in the specifier
- # (e.g. >3.1 should not match 3.0.post0, but should match 3.2.post0).
- if not spec.is_postrelease and prospective.is_postrelease:
- if Version(prospective.base_version) == Version(spec.base_version):
- return False
-
- # Ensure that we do not allow a local version of the version mentioned
- # in the specifier, which is technically greater than, to match.
- if prospective.local is not None:
- if Version(prospective.base_version) == Version(spec.base_version):
- return False
-
- # If we've gotten to here, it means that prospective version is both
- # greater than the spec version *and* it's not a pre-release of the
- # same version in the spec.
- return True
-
- def _compare_arbitrary(self, prospective: Version, spec: str) -> bool:
- return str(prospective).lower() == str(spec).lower()
-
- @property
- def prereleases(self) -> bool:
-
- # If there is an explicit prereleases set for this, then we'll just
- # blindly use that.
- if self._prereleases is not None:
- return self._prereleases
-
- # Look at all of our specifiers and determine if they are inclusive
- # operators, and if they are if they are including an explicit
- # prerelease.
- operator, version = self._spec
- if operator in ["==", ">=", "<=", "~=", "==="]:
- # The == specifier can include a trailing .*, if it does we
- # want to remove before parsing.
- if operator == "==" and version.endswith(".*"):
- version = version[:-2]
-
- # Parse the version, and if it is a pre-release than this
- # specifier allows pre-releases.
- if parse(version).is_prerelease:
- return True
-
- return False
-
- @prereleases.setter
- def prereleases(self, value: bool) -> None:
- self._prereleases = value
-
-
-_prefix_regex = re.compile(r"^([0-9]+)((?:a|b|c|rc)[0-9]+)$")
-
-
-def _version_split(version: str) -> List[str]:
- result: List[str] = []
- for item in version.split("."):
- match = _prefix_regex.search(item)
- if match:
- result.extend(match.groups())
- else:
- result.append(item)
- return result
-
-
-def _is_not_suffix(segment: str) -> bool:
- return not any(
- segment.startswith(prefix) for prefix in ("dev", "a", "b", "rc", "post")
- )
-
-
-def _pad_version(left: List[str], right: List[str]) -> Tuple[List[str], List[str]]:
- left_split, right_split = [], []
-
- # Get the release segment of our versions
- left_split.append(list(itertools.takewhile(lambda x: x.isdigit(), left)))
- right_split.append(list(itertools.takewhile(lambda x: x.isdigit(), right)))
-
- # Get the rest of our versions
- left_split.append(left[len(left_split[0]) :])
- right_split.append(right[len(right_split[0]) :])
-
- # Insert our padding
- left_split.insert(1, ["0"] * max(0, len(right_split[0]) - len(left_split[0])))
- right_split.insert(1, ["0"] * max(0, len(left_split[0]) - len(right_split[0])))
-
- return (list(itertools.chain(*left_split)), list(itertools.chain(*right_split)))
-
-
-class SpecifierSet(BaseSpecifier):
- def __init__(
- self, specifiers: str = "", prereleases: Optional[bool] = None
- ) -> None:
-
- # Split on , to break each individual specifier into it's own item, and
- # strip each item to remove leading/trailing whitespace.
- split_specifiers = [s.strip() for s in specifiers.split(",") if s.strip()]
-
- # Parsed each individual specifier, attempting first to make it a
- # Specifier and falling back to a LegacySpecifier.
- parsed: Set[_IndividualSpecifier] = set()
- for specifier in split_specifiers:
- try:
- parsed.add(Specifier(specifier))
- except InvalidSpecifier:
- parsed.add(LegacySpecifier(specifier))
-
- # Turn our parsed specifiers into a frozen set and save them for later.
- self._specs = frozenset(parsed)
-
- # Store our prereleases value so we can use it later to determine if
- # we accept prereleases or not.
- self._prereleases = prereleases
-
- def __repr__(self) -> str:
- pre = (
- f", prereleases={self.prereleases!r}"
- if self._prereleases is not None
- else ""
- )
-
- return "<SpecifierSet({!r}{})>".format(str(self), pre)
-
- def __str__(self) -> str:
- return ",".join(sorted(str(s) for s in self._specs))
-
- def __hash__(self) -> int:
- return hash(self._specs)
-
- def __and__(self, other: Union["SpecifierSet", str]) -> "SpecifierSet":
- if isinstance(other, str):
- other = SpecifierSet(other)
- elif not isinstance(other, SpecifierSet):
- return NotImplemented
-
- specifier = SpecifierSet()
- specifier._specs = frozenset(self._specs | other._specs)
-
- if self._prereleases is None and other._prereleases is not None:
- specifier._prereleases = other._prereleases
- elif self._prereleases is not None and other._prereleases is None:
- specifier._prereleases = self._prereleases
- elif self._prereleases == other._prereleases:
- specifier._prereleases = self._prereleases
- else:
- raise ValueError(
- "Cannot combine SpecifierSets with True and False prerelease "
- "overrides."
- )
-
- return specifier
-
- def __eq__(self, other: object) -> bool:
- if isinstance(other, (str, _IndividualSpecifier)):
- other = SpecifierSet(str(other))
- elif not isinstance(other, SpecifierSet):
- return NotImplemented
-
- return self._specs == other._specs
-
- def __ne__(self, other: object) -> bool:
- if isinstance(other, (str, _IndividualSpecifier)):
- other = SpecifierSet(str(other))
- elif not isinstance(other, SpecifierSet):
- return NotImplemented
-
- return self._specs != other._specs
-
- def __len__(self) -> int:
- return len(self._specs)
-
- def __iter__(self) -> Iterator[_IndividualSpecifier]:
- return iter(self._specs)
-
- @property
- def prereleases(self) -> Optional[bool]:
-
- # If we have been given an explicit prerelease modifier, then we'll
- # pass that through here.
- if self._prereleases is not None:
- return self._prereleases
-
- # If we don't have any specifiers, and we don't have a forced value,
- # then we'll just return None since we don't know if this should have
- # pre-releases or not.
- if not self._specs:
- return None
-
- # Otherwise we'll see if any of the given specifiers accept
- # prereleases, if any of them do we'll return True, otherwise False.
- return any(s.prereleases for s in self._specs)
-
- @prereleases.setter
- def prereleases(self, value: bool) -> None:
- self._prereleases = value
-
- def __contains__(self, item: UnparsedVersion) -> bool:
- return self.contains(item)
-
- def contains(
- self, item: UnparsedVersion, prereleases: Optional[bool] = None
- ) -> bool:
-
- # Ensure that our item is a Version or LegacyVersion instance.
- if not isinstance(item, (LegacyVersion, Version)):
- item = parse(item)
-
- # Determine if we're forcing a prerelease or not, if we're not forcing
- # one for this particular filter call, then we'll use whatever the
- # SpecifierSet thinks for whether or not we should support prereleases.
- if prereleases is None:
- prereleases = self.prereleases
-
- # We can determine if we're going to allow pre-releases by looking to
- # see if any of the underlying items supports them. If none of them do
- # and this item is a pre-release then we do not allow it and we can
- # short circuit that here.
- # Note: This means that 1.0.dev1 would not be contained in something
- # like >=1.0.devabc however it would be in >=1.0.debabc,>0.0.dev0
- if not prereleases and item.is_prerelease:
- return False
-
- # We simply dispatch to the underlying specs here to make sure that the
- # given version is contained within all of them.
- # Note: This use of all() here means that an empty set of specifiers
- # will always return True, this is an explicit design decision.
- return all(s.contains(item, prereleases=prereleases) for s in self._specs)
-
- def filter(
- self, iterable: Iterable[VersionTypeVar], prereleases: Optional[bool] = None
- ) -> Iterable[VersionTypeVar]:
-
- # Determine if we're forcing a prerelease or not, if we're not forcing
- # one for this particular filter call, then we'll use whatever the
- # SpecifierSet thinks for whether or not we should support prereleases.
- if prereleases is None:
- prereleases = self.prereleases
-
- # If we have any specifiers, then we want to wrap our iterable in the
- # filter method for each one, this will act as a logical AND amongst
- # each specifier.
- if self._specs:
- for spec in self._specs:
- iterable = spec.filter(iterable, prereleases=bool(prereleases))
- return iterable
- # If we do not have any specifiers, then we need to have a rough filter
- # which will filter out any pre-releases, unless there are no final
- # releases, and which will filter out LegacyVersion in general.
- else:
- filtered: List[VersionTypeVar] = []
- found_prereleases: List[VersionTypeVar] = []
-
- item: UnparsedVersion
- parsed_version: Union[Version, LegacyVersion]
-
- for item in iterable:
- # Ensure that we some kind of Version class for this item.
- if not isinstance(item, (LegacyVersion, Version)):
- parsed_version = parse(item)
- else:
- parsed_version = item
-
- # Filter out any item which is parsed as a LegacyVersion
- if isinstance(parsed_version, LegacyVersion):
- continue
-
- # Store any item which is a pre-release for later unless we've
- # already found a final version or we are accepting prereleases
- if parsed_version.is_prerelease and not prereleases:
- if not filtered:
- found_prereleases.append(item)
- else:
- filtered.append(item)
-
- # If we've found no items except for pre-releases, then we'll go
- # ahead and use the pre-releases
- if not filtered and found_prereleases and prereleases is None:
- return found_prereleases
-
- return filtered
diff --git a/contrib/python/setuptools/py3/pkg_resources/_vendor/packaging/tags.py b/contrib/python/setuptools/py3/pkg_resources/_vendor/packaging/tags.py
deleted file mode 100644
index e65890a90cd..00000000000
--- a/contrib/python/setuptools/py3/pkg_resources/_vendor/packaging/tags.py
+++ /dev/null
@@ -1,484 +0,0 @@
-# This file is dual licensed under the terms of the Apache License, Version
-# 2.0, and the BSD License. See the LICENSE file in the root of this repository
-# for complete details.
-
-import logging
-import platform
-import sys
-import sysconfig
-from importlib.machinery import EXTENSION_SUFFIXES
-from typing import (
- Dict,
- FrozenSet,
- Iterable,
- Iterator,
- List,
- Optional,
- Sequence,
- Tuple,
- Union,
- cast,
-)
-
-from . import _manylinux, _musllinux
-
-logger = logging.getLogger(__name__)
-
-PythonVersion = Sequence[int]
-MacVersion = Tuple[int, int]
-
-INTERPRETER_SHORT_NAMES: Dict[str, str] = {
- "python": "py", # Generic.
- "cpython": "cp",
- "pypy": "pp",
- "ironpython": "ip",
- "jython": "jy",
-}
-
-
-_32_BIT_INTERPRETER = sys.maxsize <= 2 ** 32
-
-
-class Tag:
- """
- A representation of the tag triple for a wheel.
-
- Instances are considered immutable and thus are hashable. Equality checking
- is also supported.
- """
-
- __slots__ = ["_interpreter", "_abi", "_platform", "_hash"]
-
- def __init__(self, interpreter: str, abi: str, platform: str) -> None:
- self._interpreter = interpreter.lower()
- self._abi = abi.lower()
- self._platform = platform.lower()
- # The __hash__ of every single element in a Set[Tag] will be evaluated each time
- # that a set calls its `.disjoint()` method, which may be called hundreds of
- # times when scanning a page of links for packages with tags matching that
- # Set[Tag]. Pre-computing the value here produces significant speedups for
- # downstream consumers.
- self._hash = hash((self._interpreter, self._abi, self._platform))
-
- @property
- def interpreter(self) -> str:
- return self._interpreter
-
- @property
- def abi(self) -> str:
- return self._abi
-
- @property
- def platform(self) -> str:
- return self._platform
-
- def __eq__(self, other: object) -> bool:
- if not isinstance(other, Tag):
- return NotImplemented
-
- return (
- (self._hash == other._hash) # Short-circuit ASAP for perf reasons.
- and (self._platform == other._platform)
- and (self._abi == other._abi)
- and (self._interpreter == other._interpreter)
- )
-
- def __hash__(self) -> int:
- return self._hash
-
- def __str__(self) -> str:
- return f"{self._interpreter}-{self._abi}-{self._platform}"
-
- def __repr__(self) -> str:
- return "<{self} @ {self_id}>".format(self=self, self_id=id(self))
-
-
-def parse_tag(tag: str) -> FrozenSet[Tag]:
- """
- Parses the provided tag (e.g. `py3-none-any`) into a frozenset of Tag instances.
-
- Returning a set is required due to the possibility that the tag is a
- compressed tag set.
- """
- tags = set()
- interpreters, abis, platforms = tag.split("-")
- for interpreter in interpreters.split("."):
- for abi in abis.split("."):
- for platform_ in platforms.split("."):
- tags.add(Tag(interpreter, abi, platform_))
- return frozenset(tags)
-
-
-def _get_config_var(name: str, warn: bool = False) -> Union[int, str, None]:
- value = sysconfig.get_config_var(name)
- if value is None and warn:
- logger.debug(
- "Config variable '%s' is unset, Python ABI tag may be incorrect", name
- )
- return value
-
-
-def _normalize_string(string: str) -> str:
- return string.replace(".", "_").replace("-", "_")
-
-
-def _abi3_applies(python_version: PythonVersion) -> bool:
- """
- Determine if the Python version supports abi3.
-
- PEP 384 was first implemented in Python 3.2.
- """
- return len(python_version) > 1 and tuple(python_version) >= (3, 2)
-
-
-def _cpython_abis(py_version: PythonVersion, warn: bool = False) -> List[str]:
- py_version = tuple(py_version) # To allow for version comparison.
- abis = []
- version = _version_nodot(py_version[:2])
- debug = pymalloc = ucs4 = ""
- with_debug = _get_config_var("Py_DEBUG", warn)
- has_refcount = hasattr(sys, "gettotalrefcount")
- # Windows doesn't set Py_DEBUG, so checking for support of debug-compiled
- # extension modules is the best option.
- # https://github.com/pypa/pip/issues/3383#issuecomment-173267692
- has_ext = "_d.pyd" in EXTENSION_SUFFIXES
- if with_debug or (with_debug is None and (has_refcount or has_ext)):
- debug = "d"
- if py_version < (3, 8):
- with_pymalloc = _get_config_var("WITH_PYMALLOC", warn)
- if with_pymalloc or with_pymalloc is None:
- pymalloc = "m"
- if py_version < (3, 3):
- unicode_size = _get_config_var("Py_UNICODE_SIZE", warn)
- if unicode_size == 4 or (
- unicode_size is None and sys.maxunicode == 0x10FFFF
- ):
- ucs4 = "u"
- elif debug:
- # Debug builds can also load "normal" extension modules.
- # We can also assume no UCS-4 or pymalloc requirement.
- abis.append(f"cp{version}")
- abis.insert(
- 0,
- "cp{version}{debug}{pymalloc}{ucs4}".format(
- version=version, debug=debug, pymalloc=pymalloc, ucs4=ucs4
- ),
- )
- return abis
-
-
-def cpython_tags(
- python_version: Optional[PythonVersion] = None,
- abis: Optional[Iterable[str]] = None,
- platforms: Optional[Iterable[str]] = None,
- *,
- warn: bool = False,
-) -> Iterator[Tag]:
- """
- Yields the tags for a CPython interpreter.
-
- The tags consist of:
- - cp<python_version>-<abi>-<platform>
- - cp<python_version>-abi3-<platform>
- - cp<python_version>-none-<platform>
- - cp<less than python_version>-abi3-<platform> # Older Python versions down to 3.2.
-
- If python_version only specifies a major version then user-provided ABIs and
- the 'none' ABItag will be used.
-
- If 'abi3' or 'none' are specified in 'abis' then they will be yielded at
- their normal position and not at the beginning.
- """
- if not python_version:
- python_version = sys.version_info[:2]
-
- interpreter = "cp{}".format(_version_nodot(python_version[:2]))
-
- if abis is None:
- if len(python_version) > 1:
- abis = _cpython_abis(python_version, warn)
- else:
- abis = []
- abis = list(abis)
- # 'abi3' and 'none' are explicitly handled later.
- for explicit_abi in ("abi3", "none"):
- try:
- abis.remove(explicit_abi)
- except ValueError:
- pass
-
- platforms = list(platforms or platform_tags())
- for abi in abis:
- for platform_ in platforms:
- yield Tag(interpreter, abi, platform_)
- if _abi3_applies(python_version):
- yield from (Tag(interpreter, "abi3", platform_) for platform_ in platforms)
- yield from (Tag(interpreter, "none", platform_) for platform_ in platforms)
-
- if _abi3_applies(python_version):
- for minor_version in range(python_version[1] - 1, 1, -1):
- for platform_ in platforms:
- interpreter = "cp{version}".format(
- version=_version_nodot((python_version[0], minor_version))
- )
- yield Tag(interpreter, "abi3", platform_)
-
-
-def _generic_abi() -> Iterator[str]:
- abi = sysconfig.get_config_var("SOABI")
- if abi:
- yield _normalize_string(abi)
-
-
-def generic_tags(
- interpreter: Optional[str] = None,
- abis: Optional[Iterable[str]] = None,
- platforms: Optional[Iterable[str]] = None,
- *,
- warn: bool = False,
-) -> Iterator[Tag]:
- """
- Yields the tags for a generic interpreter.
-
- The tags consist of:
- - <interpreter>-<abi>-<platform>
-
- The "none" ABI will be added if it was not explicitly provided.
- """
- if not interpreter:
- interp_name = interpreter_name()
- interp_version = interpreter_version(warn=warn)
- interpreter = "".join([interp_name, interp_version])
- if abis is None:
- abis = _generic_abi()
- platforms = list(platforms or platform_tags())
- abis = list(abis)
- if "none" not in abis:
- abis.append("none")
- for abi in abis:
- for platform_ in platforms:
- yield Tag(interpreter, abi, platform_)
-
-
-def _py_interpreter_range(py_version: PythonVersion) -> Iterator[str]:
- """
- Yields Python versions in descending order.
-
- After the latest version, the major-only version will be yielded, and then
- all previous versions of that major version.
- """
- if len(py_version) > 1:
- yield "py{version}".format(version=_version_nodot(py_version[:2]))
- yield "py{major}".format(major=py_version[0])
- if len(py_version) > 1:
- for minor in range(py_version[1] - 1, -1, -1):
- yield "py{version}".format(version=_version_nodot((py_version[0], minor)))
-
-
-def compatible_tags(
- python_version: Optional[PythonVersion] = None,
- interpreter: Optional[str] = None,
- platforms: Optional[Iterable[str]] = None,
-) -> Iterator[Tag]:
- """
- Yields the sequence of tags that are compatible with a specific version of Python.
-
- The tags consist of:
- - py*-none-<platform>
- - <interpreter>-none-any # ... if `interpreter` is provided.
- - py*-none-any
- """
- if not python_version:
- python_version = sys.version_info[:2]
- platforms = list(platforms or platform_tags())
- for version in _py_interpreter_range(python_version):
- for platform_ in platforms:
- yield Tag(version, "none", platform_)
- if interpreter:
- yield Tag(interpreter, "none", "any")
- for version in _py_interpreter_range(python_version):
- yield Tag(version, "none", "any")
-
-
-def _mac_arch(arch: str, is_32bit: bool = _32_BIT_INTERPRETER) -> str:
- if not is_32bit:
- return arch
-
- if arch.startswith("ppc"):
- return "ppc"
-
- return "i386"
-
-
-def _mac_binary_formats(version: MacVersion, cpu_arch: str) -> List[str]:
- formats = [cpu_arch]
- if cpu_arch == "x86_64":
- if version < (10, 4):
- return []
- formats.extend(["intel", "fat64", "fat32"])
-
- elif cpu_arch == "i386":
- if version < (10, 4):
- return []
- formats.extend(["intel", "fat32", "fat"])
-
- elif cpu_arch == "ppc64":
- # TODO: Need to care about 32-bit PPC for ppc64 through 10.2?
- if version > (10, 5) or version < (10, 4):
- return []
- formats.append("fat64")
-
- elif cpu_arch == "ppc":
- if version > (10, 6):
- return []
- formats.extend(["fat32", "fat"])
-
- if cpu_arch in {"arm64", "x86_64"}:
- formats.append("universal2")
-
- if cpu_arch in {"x86_64", "i386", "ppc64", "ppc", "intel"}:
- formats.append("universal")
-
- return formats
-
-
-def mac_platforms(
- version: Optional[MacVersion] = None, arch: Optional[str] = None
-) -> Iterator[str]:
- """
- Yields the platform tags for a macOS system.
-
- The `version` parameter is a two-item tuple specifying the macOS version to
- generate platform tags for. The `arch` parameter is the CPU architecture to
- generate platform tags for. Both parameters default to the appropriate value
- for the current system.
- """
- version_str, _, cpu_arch = platform.mac_ver()
- if version is None:
- version = cast("MacVersion", tuple(map(int, version_str.split(".")[:2])))
- else:
- version = version
- if arch is None:
- arch = _mac_arch(cpu_arch)
- else:
- arch = arch
-
- if (10, 0) <= version and version < (11, 0):
- # Prior to Mac OS 11, each yearly release of Mac OS bumped the
- # "minor" version number. The major version was always 10.
- for minor_version in range(version[1], -1, -1):
- compat_version = 10, minor_version
- binary_formats = _mac_binary_formats(compat_version, arch)
- for binary_format in binary_formats:
- yield "macosx_{major}_{minor}_{binary_format}".format(
- major=10, minor=minor_version, binary_format=binary_format
- )
-
- if version >= (11, 0):
- # Starting with Mac OS 11, each yearly release bumps the major version
- # number. The minor versions are now the midyear updates.
- for major_version in range(version[0], 10, -1):
- compat_version = major_version, 0
- binary_formats = _mac_binary_formats(compat_version, arch)
- for binary_format in binary_formats:
- yield "macosx_{major}_{minor}_{binary_format}".format(
- major=major_version, minor=0, binary_format=binary_format
- )
-
- if version >= (11, 0):
- # Mac OS 11 on x86_64 is compatible with binaries from previous releases.
- # Arm64 support was introduced in 11.0, so no Arm binaries from previous
- # releases exist.
- #
- # However, the "universal2" binary format can have a
- # macOS version earlier than 11.0 when the x86_64 part of the binary supports
- # that version of macOS.
- if arch == "x86_64":
- for minor_version in range(16, 3, -1):
- compat_version = 10, minor_version
- binary_formats = _mac_binary_formats(compat_version, arch)
- for binary_format in binary_formats:
- yield "macosx_{major}_{minor}_{binary_format}".format(
- major=compat_version[0],
- minor=compat_version[1],
- binary_format=binary_format,
- )
- else:
- for minor_version in range(16, 3, -1):
- compat_version = 10, minor_version
- binary_format = "universal2"
- yield "macosx_{major}_{minor}_{binary_format}".format(
- major=compat_version[0],
- minor=compat_version[1],
- binary_format=binary_format,
- )
-
-
-def _linux_platforms(is_32bit: bool = _32_BIT_INTERPRETER) -> Iterator[str]:
- linux = _normalize_string(sysconfig.get_platform())
- if is_32bit:
- if linux == "linux_x86_64":
- linux = "linux_i686"
- elif linux == "linux_aarch64":
- linux = "linux_armv7l"
- _, arch = linux.split("_", 1)
- yield from _manylinux.platform_tags(linux, arch)
- yield from _musllinux.platform_tags(arch)
- yield linux
-
-
-def _generic_platforms() -> Iterator[str]:
- yield _normalize_string(sysconfig.get_platform())
-
-
-def platform_tags() -> Iterator[str]:
- """
- Provides the platform tags for this installation.
- """
- if platform.system() == "Darwin":
- return mac_platforms()
- elif platform.system() == "Linux":
- return _linux_platforms()
- else:
- return _generic_platforms()
-
-
-def interpreter_name() -> str:
- """
- Returns the name of the running interpreter.
- """
- name = sys.implementation.name
- return INTERPRETER_SHORT_NAMES.get(name) or name
-
-
-def interpreter_version(*, warn: bool = False) -> str:
- """
- Returns the version of the running interpreter.
- """
- version = _get_config_var("py_version_nodot", warn=warn)
- if version:
- version = str(version)
- else:
- version = _version_nodot(sys.version_info[:2])
- return version
-
-
-def _version_nodot(version: PythonVersion) -> str:
- return "".join(map(str, version))
-
-
-def sys_tags(*, warn: bool = False) -> Iterator[Tag]:
- """
- Returns the sequence of tag triples for the running interpreter.
-
- The order of the sequence corresponds to priority order for the
- interpreter, from most to least important.
- """
-
- interp_name = interpreter_name()
- if interp_name == "cp":
- yield from cpython_tags(warn=warn)
- else:
- yield from generic_tags()
-
- yield from compatible_tags()
diff --git a/contrib/python/setuptools/py3/pkg_resources/_vendor/packaging/utils.py b/contrib/python/setuptools/py3/pkg_resources/_vendor/packaging/utils.py
deleted file mode 100644
index bab11b80c60..00000000000
--- a/contrib/python/setuptools/py3/pkg_resources/_vendor/packaging/utils.py
+++ /dev/null
@@ -1,136 +0,0 @@
-# This file is dual licensed under the terms of the Apache License, Version
-# 2.0, and the BSD License. See the LICENSE file in the root of this repository
-# for complete details.
-
-import re
-from typing import FrozenSet, NewType, Tuple, Union, cast
-
-from .tags import Tag, parse_tag
-from .version import InvalidVersion, Version
-
-BuildTag = Union[Tuple[()], Tuple[int, str]]
-NormalizedName = NewType("NormalizedName", str)
-
-
-class InvalidWheelFilename(ValueError):
- """
- An invalid wheel filename was found, users should refer to PEP 427.
- """
-
-
-class InvalidSdistFilename(ValueError):
- """
- An invalid sdist filename was found, users should refer to the packaging user guide.
- """
-
-
-_canonicalize_regex = re.compile(r"[-_.]+")
-# PEP 427: The build number must start with a digit.
-_build_tag_regex = re.compile(r"(\d+)(.*)")
-
-
-def canonicalize_name(name: str) -> NormalizedName:
- # This is taken from PEP 503.
- value = _canonicalize_regex.sub("-", name).lower()
- return cast(NormalizedName, value)
-
-
-def canonicalize_version(version: Union[Version, str]) -> str:
- """
- This is very similar to Version.__str__, but has one subtle difference
- with the way it handles the release segment.
- """
- if isinstance(version, str):
- try:
- parsed = Version(version)
- except InvalidVersion:
- # Legacy versions cannot be normalized
- return version
- else:
- parsed = version
-
- parts = []
-
- # Epoch
- if parsed.epoch != 0:
- parts.append(f"{parsed.epoch}!")
-
- # Release segment
- # NB: This strips trailing '.0's to normalize
- parts.append(re.sub(r"(\.0)+$", "", ".".join(str(x) for x in parsed.release)))
-
- # Pre-release
- if parsed.pre is not None:
- parts.append("".join(str(x) for x in parsed.pre))
-
- # Post-release
- if parsed.post is not None:
- parts.append(f".post{parsed.post}")
-
- # Development release
- if parsed.dev is not None:
- parts.append(f".dev{parsed.dev}")
-
- # Local version segment
- if parsed.local is not None:
- parts.append(f"+{parsed.local}")
-
- return "".join(parts)
-
-
-def parse_wheel_filename(
- filename: str,
-) -> Tuple[NormalizedName, Version, BuildTag, FrozenSet[Tag]]:
- if not filename.endswith(".whl"):
- raise InvalidWheelFilename(
- f"Invalid wheel filename (extension must be '.whl'): {filename}"
- )
-
- filename = filename[:-4]
- dashes = filename.count("-")
- if dashes not in (4, 5):
- raise InvalidWheelFilename(
- f"Invalid wheel filename (wrong number of parts): {filename}"
- )
-
- parts = filename.split("-", dashes - 2)
- name_part = parts[0]
- # See PEP 427 for the rules on escaping the project name
- if "__" in name_part or re.match(r"^[\w\d._]*$", name_part, re.UNICODE) is None:
- raise InvalidWheelFilename(f"Invalid project name: {filename}")
- name = canonicalize_name(name_part)
- version = Version(parts[1])
- if dashes == 5:
- build_part = parts[2]
- build_match = _build_tag_regex.match(build_part)
- if build_match is None:
- raise InvalidWheelFilename(
- f"Invalid build number: {build_part} in '{filename}'"
- )
- build = cast(BuildTag, (int(build_match.group(1)), build_match.group(2)))
- else:
- build = ()
- tags = parse_tag(parts[-1])
- return (name, version, build, tags)
-
-
-def parse_sdist_filename(filename: str) -> Tuple[NormalizedName, Version]:
- if filename.endswith(".tar.gz"):
- file_stem = filename[: -len(".tar.gz")]
- elif filename.endswith(".zip"):
- file_stem = filename[: -len(".zip")]
- else:
- raise InvalidSdistFilename(
- f"Invalid sdist filename (extension must be '.tar.gz' or '.zip'):"
- f" {filename}"
- )
-
- # We are requiring a PEP 440 version, which cannot contain dashes,
- # so we split on the last dash.
- name_part, sep, version_part = file_stem.rpartition("-")
- if not sep:
- raise InvalidSdistFilename(f"Invalid sdist filename: {filename}")
-
- name = canonicalize_name(name_part)
- version = Version(version_part)
- return (name, version)
diff --git a/contrib/python/setuptools/py3/pkg_resources/_vendor/packaging/version.py b/contrib/python/setuptools/py3/pkg_resources/_vendor/packaging/version.py
deleted file mode 100644
index de9a09a4ed3..00000000000
--- a/contrib/python/setuptools/py3/pkg_resources/_vendor/packaging/version.py
+++ /dev/null
@@ -1,504 +0,0 @@
-# This file is dual licensed under the terms of the Apache License, Version
-# 2.0, and the BSD License. See the LICENSE file in the root of this repository
-# for complete details.
-
-import collections
-import itertools
-import re
-import warnings
-from typing import Callable, Iterator, List, Optional, SupportsInt, Tuple, Union
-
-from ._structures import Infinity, InfinityType, NegativeInfinity, NegativeInfinityType
-
-__all__ = ["parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN"]
-
-InfiniteTypes = Union[InfinityType, NegativeInfinityType]
-PrePostDevType = Union[InfiniteTypes, Tuple[str, int]]
-SubLocalType = Union[InfiniteTypes, int, str]
-LocalType = Union[
- NegativeInfinityType,
- Tuple[
- Union[
- SubLocalType,
- Tuple[SubLocalType, str],
- Tuple[NegativeInfinityType, SubLocalType],
- ],
- ...,
- ],
-]
-CmpKey = Tuple[
- int, Tuple[int, ...], PrePostDevType, PrePostDevType, PrePostDevType, LocalType
-]
-LegacyCmpKey = Tuple[int, Tuple[str, ...]]
-VersionComparisonMethod = Callable[
- [Union[CmpKey, LegacyCmpKey], Union[CmpKey, LegacyCmpKey]], bool
-]
-
-_Version = collections.namedtuple(
- "_Version", ["epoch", "release", "dev", "pre", "post", "local"]
-)
-
-
-def parse(version: str) -> Union["LegacyVersion", "Version"]:
- """
- Parse the given version string and return either a :class:`Version` object
- or a :class:`LegacyVersion` object depending on if the given version is
- a valid PEP 440 version or a legacy version.
- """
- try:
- return Version(version)
- except InvalidVersion:
- return LegacyVersion(version)
-
-
-class InvalidVersion(ValueError):
- """
- An invalid version was found, users should refer to PEP 440.
- """
-
-
-class _BaseVersion:
- _key: Union[CmpKey, LegacyCmpKey]
-
- def __hash__(self) -> int:
- return hash(self._key)
-
- # Please keep the duplicated `isinstance` check
- # in the six comparisons hereunder
- # unless you find a way to avoid adding overhead function calls.
- def __lt__(self, other: "_BaseVersion") -> bool:
- if not isinstance(other, _BaseVersion):
- return NotImplemented
-
- return self._key < other._key
-
- def __le__(self, other: "_BaseVersion") -> bool:
- if not isinstance(other, _BaseVersion):
- return NotImplemented
-
- return self._key <= other._key
-
- def __eq__(self, other: object) -> bool:
- if not isinstance(other, _BaseVersion):
- return NotImplemented
-
- return self._key == other._key
-
- def __ge__(self, other: "_BaseVersion") -> bool:
- if not isinstance(other, _BaseVersion):
- return NotImplemented
-
- return self._key >= other._key
-
- def __gt__(self, other: "_BaseVersion") -> bool:
- if not isinstance(other, _BaseVersion):
- return NotImplemented
-
- return self._key > other._key
-
- def __ne__(self, other: object) -> bool:
- if not isinstance(other, _BaseVersion):
- return NotImplemented
-
- return self._key != other._key
-
-
-class LegacyVersion(_BaseVersion):
- def __init__(self, version: str) -> None:
- self._version = str(version)
- self._key = _legacy_cmpkey(self._version)
-
- warnings.warn(
- "Creating a LegacyVersion has been deprecated and will be "
- "removed in the next major release",
- DeprecationWarning,
- )
-
- def __str__(self) -> str:
- return self._version
-
- def __repr__(self) -> str:
- return f"<LegacyVersion('{self}')>"
-
- @property
- def public(self) -> str:
- return self._version
-
- @property
- def base_version(self) -> str:
- return self._version
-
- @property
- def epoch(self) -> int:
- return -1
-
- @property
- def release(self) -> None:
- return None
-
- @property
- def pre(self) -> None:
- return None
-
- @property
- def post(self) -> None:
- return None
-
- @property
- def dev(self) -> None:
- return None
-
- @property
- def local(self) -> None:
- return None
-
- @property
- def is_prerelease(self) -> bool:
- return False
-
- @property
- def is_postrelease(self) -> bool:
- return False
-
- @property
- def is_devrelease(self) -> bool:
- return False
-
-
-_legacy_version_component_re = re.compile(r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE)
-
-_legacy_version_replacement_map = {
- "pre": "c",
- "preview": "c",
- "-": "final-",
- "rc": "c",
- "dev": "@",
-}
-
-
-def _parse_version_parts(s: str) -> Iterator[str]:
- for part in _legacy_version_component_re.split(s):
- part = _legacy_version_replacement_map.get(part, part)
-
- if not part or part == ".":
- continue
-
- if part[:1] in "0123456789":
- # pad for numeric comparison
- yield part.zfill(8)
- else:
- yield "*" + part
-
- # ensure that alpha/beta/candidate are before final
- yield "*final"
-
-
-def _legacy_cmpkey(version: str) -> LegacyCmpKey:
-
- # We hardcode an epoch of -1 here. A PEP 440 version can only have a epoch
- # greater than or equal to 0. This will effectively put the LegacyVersion,
- # which uses the defacto standard originally implemented by setuptools,
- # as before all PEP 440 versions.
- epoch = -1
-
- # This scheme is taken from pkg_resources.parse_version setuptools prior to
- # it's adoption of the packaging library.
- parts: List[str] = []
- for part in _parse_version_parts(version.lower()):
- if part.startswith("*"):
- # remove "-" before a prerelease tag
- if part < "*final":
- while parts and parts[-1] == "*final-":
- parts.pop()
-
- # remove trailing zeros from each series of numeric parts
- while parts and parts[-1] == "00000000":
- parts.pop()
-
- parts.append(part)
-
- return epoch, tuple(parts)
-
-
-# Deliberately not anchored to the start and end of the string, to make it
-# easier for 3rd party code to reuse
-VERSION_PATTERN = r"""
- v?
- (?:
- (?:(?P<epoch>[0-9]+)!)? # epoch
- (?P<release>[0-9]+(?:\.[0-9]+)*) # release segment
- (?P<pre> # pre-release
- [-_\.]?
- (?P<pre_l>(a|b|c|rc|alpha|beta|pre|preview))
- [-_\.]?
- (?P<pre_n>[0-9]+)?
- )?
- (?P<post> # post release
- (?:-(?P<post_n1>[0-9]+))
- |
- (?:
- [-_\.]?
- (?P<post_l>post|rev|r)
- [-_\.]?
- (?P<post_n2>[0-9]+)?
- )
- )?
- (?P<dev> # dev release
- [-_\.]?
- (?P<dev_l>dev)
- [-_\.]?
- (?P<dev_n>[0-9]+)?
- )?
- )
- (?:\+(?P<local>[a-z0-9]+(?:[-_\.][a-z0-9]+)*))? # local version
-"""
-
-
-class Version(_BaseVersion):
-
- _regex = re.compile(r"^\s*" + VERSION_PATTERN + r"\s*$", re.VERBOSE | re.IGNORECASE)
-
- def __init__(self, version: str) -> None:
-
- # Validate the version and parse it into pieces
- match = self._regex.search(version)
- if not match:
- raise InvalidVersion(f"Invalid version: '{version}'")
-
- # Store the parsed out pieces of the version
- self._version = _Version(
- epoch=int(match.group("epoch")) if match.group("epoch") else 0,
- release=tuple(int(i) for i in match.group("release").split(".")),
- pre=_parse_letter_version(match.group("pre_l"), match.group("pre_n")),
- post=_parse_letter_version(
- match.group("post_l"), match.group("post_n1") or match.group("post_n2")
- ),
- dev=_parse_letter_version(match.group("dev_l"), match.group("dev_n")),
- local=_parse_local_version(match.group("local")),
- )
-
- # Generate a key which will be used for sorting
- self._key = _cmpkey(
- self._version.epoch,
- self._version.release,
- self._version.pre,
- self._version.post,
- self._version.dev,
- self._version.local,
- )
-
- def __repr__(self) -> str:
- return f"<Version('{self}')>"
-
- def __str__(self) -> str:
- parts = []
-
- # Epoch
- if self.epoch != 0:
- parts.append(f"{self.epoch}!")
-
- # Release segment
- parts.append(".".join(str(x) for x in self.release))
-
- # Pre-release
- if self.pre is not None:
- parts.append("".join(str(x) for x in self.pre))
-
- # Post-release
- if self.post is not None:
- parts.append(f".post{self.post}")
-
- # Development release
- if self.dev is not None:
- parts.append(f".dev{self.dev}")
-
- # Local version segment
- if self.local is not None:
- parts.append(f"+{self.local}")
-
- return "".join(parts)
-
- @property
- def epoch(self) -> int:
- _epoch: int = self._version.epoch
- return _epoch
-
- @property
- def release(self) -> Tuple[int, ...]:
- _release: Tuple[int, ...] = self._version.release
- return _release
-
- @property
- def pre(self) -> Optional[Tuple[str, int]]:
- _pre: Optional[Tuple[str, int]] = self._version.pre
- return _pre
-
- @property
- def post(self) -> Optional[int]:
- return self._version.post[1] if self._version.post else None
-
- @property
- def dev(self) -> Optional[int]:
- return self._version.dev[1] if self._version.dev else None
-
- @property
- def local(self) -> Optional[str]:
- if self._version.local:
- return ".".join(str(x) for x in self._version.local)
- else:
- return None
-
- @property
- def public(self) -> str:
- return str(self).split("+", 1)[0]
-
- @property
- def base_version(self) -> str:
- parts = []
-
- # Epoch
- if self.epoch != 0:
- parts.append(f"{self.epoch}!")
-
- # Release segment
- parts.append(".".join(str(x) for x in self.release))
-
- return "".join(parts)
-
- @property
- def is_prerelease(self) -> bool:
- return self.dev is not None or self.pre is not None
-
- @property
- def is_postrelease(self) -> bool:
- return self.post is not None
-
- @property
- def is_devrelease(self) -> bool:
- return self.dev is not None
-
- @property
- def major(self) -> int:
- return self.release[0] if len(self.release) >= 1 else 0
-
- @property
- def minor(self) -> int:
- return self.release[1] if len(self.release) >= 2 else 0
-
- @property
- def micro(self) -> int:
- return self.release[2] if len(self.release) >= 3 else 0
-
-
-def _parse_letter_version(
- letter: str, number: Union[str, bytes, SupportsInt]
-) -> Optional[Tuple[str, int]]:
-
- if letter:
- # We consider there to be an implicit 0 in a pre-release if there is
- # not a numeral associated with it.
- if number is None:
- number = 0
-
- # We normalize any letters to their lower case form
- letter = letter.lower()
-
- # We consider some words to be alternate spellings of other words and
- # in those cases we want to normalize the spellings to our preferred
- # spelling.
- if letter == "alpha":
- letter = "a"
- elif letter == "beta":
- letter = "b"
- elif letter in ["c", "pre", "preview"]:
- letter = "rc"
- elif letter in ["rev", "r"]:
- letter = "post"
-
- return letter, int(number)
- if not letter and number:
- # We assume if we are given a number, but we are not given a letter
- # then this is using the implicit post release syntax (e.g. 1.0-1)
- letter = "post"
-
- return letter, int(number)
-
- return None
-
-
-_local_version_separators = re.compile(r"[\._-]")
-
-
-def _parse_local_version(local: str) -> Optional[LocalType]:
- """
- Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
- """
- if local is not None:
- return tuple(
- part.lower() if not part.isdigit() else int(part)
- for part in _local_version_separators.split(local)
- )
- return None
-
-
-def _cmpkey(
- epoch: int,
- release: Tuple[int, ...],
- pre: Optional[Tuple[str, int]],
- post: Optional[Tuple[str, int]],
- dev: Optional[Tuple[str, int]],
- local: Optional[Tuple[SubLocalType]],
-) -> CmpKey:
-
- # When we compare a release version, we want to compare it with all of the
- # trailing zeros removed. So we'll use a reverse the list, drop all the now
- # leading zeros until we come to something non zero, then take the rest
- # re-reverse it back into the correct order and make it a tuple and use
- # that for our sorting key.
- _release = tuple(
- reversed(list(itertools.dropwhile(lambda x: x == 0, reversed(release))))
- )
-
- # We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
- # We'll do this by abusing the pre segment, but we _only_ want to do this
- # if there is not a pre or a post segment. If we have one of those then
- # the normal sorting rules will handle this case correctly.
- if pre is None and post is None and dev is not None:
- _pre: PrePostDevType = NegativeInfinity
- # Versions without a pre-release (except as noted above) should sort after
- # those with one.
- elif pre is None:
- _pre = Infinity
- else:
- _pre = pre
-
- # Versions without a post segment should sort before those with one.
- if post is None:
- _post: PrePostDevType = NegativeInfinity
-
- else:
- _post = post
-
- # Versions without a development segment should sort after those with one.
- if dev is None:
- _dev: PrePostDevType = Infinity
-
- else:
- _dev = dev
-
- if local is None:
- # Versions without a local segment should sort before those with one.
- _local: LocalType = NegativeInfinity
- else:
- # Versions with a local segment need that segment parsed to implement
- # the sorting rules in PEP440.
- # - Alpha numeric segments sort before numeric segments
- # - Alpha numeric segments sort lexicographically
- # - Numeric segments sort numerically
- # - Shorter versions sort before longer versions when the prefixes
- # match exactly
- _local = tuple(
- (i, "") if isinstance(i, int) else (NegativeInfinity, i) for i in local
- )
-
- return epoch, _release, _pre, _post, _dev, _local
diff --git a/contrib/python/setuptools/py3/pkg_resources/_vendor/pyparsing.py b/contrib/python/setuptools/py3/pkg_resources/_vendor/pyparsing.py
deleted file mode 100644
index cf75e1e5fcb..00000000000
--- a/contrib/python/setuptools/py3/pkg_resources/_vendor/pyparsing.py
+++ /dev/null
@@ -1,5742 +0,0 @@
-# module pyparsing.py
-#
-# Copyright (c) 2003-2018 Paul T. McGuire
-#
-# Permission is hereby granted, free of charge, to any person obtaining
-# a copy of this software and associated documentation files (the
-# "Software"), to deal in the Software without restriction, including
-# without limitation the rights to use, copy, modify, merge, publish,
-# distribute, sublicense, and/or sell copies of the Software, and to
-# permit persons to whom the Software is furnished to do so, subject to
-# the following conditions:
-#
-# The above copyright notice and this permission notice shall be
-# included in all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
-# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-#
-
-__doc__ = \
-"""
-pyparsing module - Classes and methods to define and execute parsing grammars
-=============================================================================
-
-The pyparsing module is an alternative approach to creating and executing simple grammars,
-vs. the traditional lex/yacc approach, or the use of regular expressions. With pyparsing, you
-don't need to learn a new syntax for defining grammars or matching expressions - the parsing module
-provides a library of classes that you use to construct the grammar directly in Python.
-
-Here is a program to parse "Hello, World!" (or any greeting of the form
-C{"<salutation>, <addressee>!"}), built up using L{Word}, L{Literal}, and L{And} elements
-(L{'+'<ParserElement.__add__>} operator gives L{And} expressions, strings are auto-converted to
-L{Literal} expressions)::
-
- from pyparsing import Word, alphas
-
- # define grammar of a greeting
- greet = Word(alphas) + "," + Word(alphas) + "!"
-
- hello = "Hello, World!"
- print (hello, "->", greet.parseString(hello))
-
-The program outputs the following::
-
- Hello, World! -> ['Hello', ',', 'World', '!']
-
-The Python representation of the grammar is quite readable, owing to the self-explanatory
-class names, and the use of '+', '|' and '^' operators.
-
-The L{ParseResults} object returned from L{ParserElement.parseString<ParserElement.parseString>} can be accessed as a nested list, a dictionary, or an
-object with named attributes.
-
-The pyparsing module handles some of the problems that are typically vexing when writing text parsers:
- - extra or missing whitespace (the above program will also handle "Hello,World!", "Hello , World !", etc.)
- - quoted strings
- - embedded comments
-
-
-Getting Started -
------------------
-Visit the classes L{ParserElement} and L{ParseResults} to see the base classes that most other pyparsing
-classes inherit from. Use the docstrings for examples of how to:
- - construct literal match expressions from L{Literal} and L{CaselessLiteral} classes
- - construct character word-group expressions using the L{Word} class
- - see how to create repetitive expressions using L{ZeroOrMore} and L{OneOrMore} classes
- - use L{'+'<And>}, L{'|'<MatchFirst>}, L{'^'<Or>}, and L{'&'<Each>} operators to combine simple expressions into more complex ones
- - associate names with your parsed results using L{ParserElement.setResultsName}
- - find some helpful expression short-cuts like L{delimitedList} and L{oneOf}
- - find more useful common expressions in the L{pyparsing_common} namespace class
-"""
-
-__version__ = "2.2.1"
-__versionTime__ = "18 Sep 2018 00:49 UTC"
-__author__ = "Paul McGuire <[email protected]>"
-
-import string
-from weakref import ref as wkref
-import copy
-import sys
-import warnings
-import re
-import sre_constants
-import collections
-import pprint
-import traceback
-import types
-from datetime import datetime
-
-try:
- from _thread import RLock
-except ImportError:
- from threading import RLock
-
-try:
- # Python 3
- from collections.abc import Iterable
- from collections.abc import MutableMapping
-except ImportError:
- # Python 2.7
- from collections import Iterable
- from collections import MutableMapping
-
-try:
- from collections import OrderedDict as _OrderedDict
-except ImportError:
- try:
- from ordereddict import OrderedDict as _OrderedDict
- except ImportError:
- _OrderedDict = None
-
-#~ sys.stderr.write( "testing pyparsing module, version %s, %s\n" % (__version__,__versionTime__ ) )
-
-__all__ = [
-'And', 'CaselessKeyword', 'CaselessLiteral', 'CharsNotIn', 'Combine', 'Dict', 'Each', 'Empty',
-'FollowedBy', 'Forward', 'GoToColumn', 'Group', 'Keyword', 'LineEnd', 'LineStart', 'Literal',
-'MatchFirst', 'NoMatch', 'NotAny', 'OneOrMore', 'OnlyOnce', 'Optional', 'Or',
-'ParseBaseException', 'ParseElementEnhance', 'ParseException', 'ParseExpression', 'ParseFatalException',
-'ParseResults', 'ParseSyntaxException', 'ParserElement', 'QuotedString', 'RecursiveGrammarException',
-'Regex', 'SkipTo', 'StringEnd', 'StringStart', 'Suppress', 'Token', 'TokenConverter',
-'White', 'Word', 'WordEnd', 'WordStart', 'ZeroOrMore',
-'alphanums', 'alphas', 'alphas8bit', 'anyCloseTag', 'anyOpenTag', 'cStyleComment', 'col',
-'commaSeparatedList', 'commonHTMLEntity', 'countedArray', 'cppStyleComment', 'dblQuotedString',
-'dblSlashComment', 'delimitedList', 'dictOf', 'downcaseTokens', 'empty', 'hexnums',
-'htmlComment', 'javaStyleComment', 'line', 'lineEnd', 'lineStart', 'lineno',
-'makeHTMLTags', 'makeXMLTags', 'matchOnlyAtCol', 'matchPreviousExpr', 'matchPreviousLiteral',
-'nestedExpr', 'nullDebugAction', 'nums', 'oneOf', 'opAssoc', 'operatorPrecedence', 'printables',
-'punc8bit', 'pythonStyleComment', 'quotedString', 'removeQuotes', 'replaceHTMLEntity',
-'replaceWith', 'restOfLine', 'sglQuotedString', 'srange', 'stringEnd',
-'stringStart', 'traceParseAction', 'unicodeString', 'upcaseTokens', 'withAttribute',
-'indentedBlock', 'originalTextFor', 'ungroup', 'infixNotation','locatedExpr', 'withClass',
-'CloseMatch', 'tokenMap', 'pyparsing_common',
-]
-
-system_version = tuple(sys.version_info)[:3]
-PY_3 = system_version[0] == 3
-if PY_3:
- _MAX_INT = sys.maxsize
- basestring = str
- unichr = chr
- _ustr = str
-
- # build list of single arg builtins, that can be used as parse actions
- singleArgBuiltins = [sum, len, sorted, reversed, list, tuple, set, any, all, min, max]
-
-else:
- _MAX_INT = sys.maxint
- range = xrange
-
- def _ustr(obj):
- """Drop-in replacement for str(obj) that tries to be Unicode friendly. It first tries
- str(obj). If that fails with a UnicodeEncodeError, then it tries unicode(obj). It
- then < returns the unicode object | encodes it with the default encoding | ... >.
- """
- if isinstance(obj,unicode):
- return obj
-
- try:
- # If this works, then _ustr(obj) has the same behaviour as str(obj), so
- # it won't break any existing code.
- return str(obj)
-
- except UnicodeEncodeError:
- # Else encode it
- ret = unicode(obj).encode(sys.getdefaultencoding(), 'xmlcharrefreplace')
- xmlcharref = Regex(r'&#\d+;')
- xmlcharref.setParseAction(lambda t: '\\u' + hex(int(t[0][2:-1]))[2:])
- return xmlcharref.transformString(ret)
-
- # build list of single arg builtins, tolerant of Python version, that can be used as parse actions
- singleArgBuiltins = []
- import __builtin__
- for fname in "sum len sorted reversed list tuple set any all min max".split():
- try:
- singleArgBuiltins.append(getattr(__builtin__,fname))
- except AttributeError:
- continue
-
-_generatorType = type((y for y in range(1)))
-
-def _xml_escape(data):
- """Escape &, <, >, ", ', etc. in a string of data."""
-
- # ampersand must be replaced first
- from_symbols = '&><"\''
- to_symbols = ('&'+s+';' for s in "amp gt lt quot apos".split())
- for from_,to_ in zip(from_symbols, to_symbols):
- data = data.replace(from_, to_)
- return data
-
-class _Constants(object):
- pass
-
-alphas = string.ascii_uppercase + string.ascii_lowercase
-nums = "0123456789"
-hexnums = nums + "ABCDEFabcdef"
-alphanums = alphas + nums
-_bslash = chr(92)
-printables = "".join(c for c in string.printable if c not in string.whitespace)
-
-class ParseBaseException(Exception):
- """base exception class for all parsing runtime exceptions"""
- # Performance tuning: we construct a *lot* of these, so keep this
- # constructor as small and fast as possible
- def __init__( self, pstr, loc=0, msg=None, elem=None ):
- self.loc = loc
- if msg is None:
- self.msg = pstr
- self.pstr = ""
- else:
- self.msg = msg
- self.pstr = pstr
- self.parserElement = elem
- self.args = (pstr, loc, msg)
-
- @classmethod
- def _from_exception(cls, pe):
- """
- internal factory method to simplify creating one type of ParseException
- from another - avoids having __init__ signature conflicts among subclasses
- """
- return cls(pe.pstr, pe.loc, pe.msg, pe.parserElement)
-
- def __getattr__( self, aname ):
- """supported attributes by name are:
- - lineno - returns the line number of the exception text
- - col - returns the column number of the exception text
- - line - returns the line containing the exception text
- """
- if( aname == "lineno" ):
- return lineno( self.loc, self.pstr )
- elif( aname in ("col", "column") ):
- return col( self.loc, self.pstr )
- elif( aname == "line" ):
- return line( self.loc, self.pstr )
- else:
- raise AttributeError(aname)
-
- def __str__( self ):
- return "%s (at char %d), (line:%d, col:%d)" % \
- ( self.msg, self.loc, self.lineno, self.column )
- def __repr__( self ):
- return _ustr(self)
- def markInputline( self, markerString = ">!<" ):
- """Extracts the exception line from the input string, and marks
- the location of the exception with a special symbol.
- """
- line_str = self.line
- line_column = self.column - 1
- if markerString:
- line_str = "".join((line_str[:line_column],
- markerString, line_str[line_column:]))
- return line_str.strip()
- def __dir__(self):
- return "lineno col line".split() + dir(type(self))
-
-class ParseException(ParseBaseException):
- """
- Exception thrown when parse expressions don't match class;
- supported attributes by name are:
- - lineno - returns the line number of the exception text
- - col - returns the column number of the exception text
- - line - returns the line containing the exception text
-
- Example::
- try:
- Word(nums).setName("integer").parseString("ABC")
- except ParseException as pe:
- print(pe)
- print("column: {}".format(pe.col))
-
- prints::
- Expected integer (at char 0), (line:1, col:1)
- column: 1
- """
- pass
-
-class ParseFatalException(ParseBaseException):
- """user-throwable exception thrown when inconsistent parse content
- is found; stops all parsing immediately"""
- pass
-
-class ParseSyntaxException(ParseFatalException):
- """just like L{ParseFatalException}, but thrown internally when an
- L{ErrorStop<And._ErrorStop>} ('-' operator) indicates that parsing is to stop
- immediately because an unbacktrackable syntax error has been found"""
- pass
-
-#~ class ReparseException(ParseBaseException):
- #~ """Experimental class - parse actions can raise this exception to cause
- #~ pyparsing to reparse the input string:
- #~ - with a modified input string, and/or
- #~ - with a modified start location
- #~ Set the values of the ReparseException in the constructor, and raise the
- #~ exception in a parse action to cause pyparsing to use the new string/location.
- #~ Setting the values as None causes no change to be made.
- #~ """
- #~ def __init_( self, newstring, restartLoc ):
- #~ self.newParseText = newstring
- #~ self.reparseLoc = restartLoc
-
-class RecursiveGrammarException(Exception):
- """exception thrown by L{ParserElement.validate} if the grammar could be improperly recursive"""
- def __init__( self, parseElementList ):
- self.parseElementTrace = parseElementList
-
- def __str__( self ):
- return "RecursiveGrammarException: %s" % self.parseElementTrace
-
-class _ParseResultsWithOffset(object):
- def __init__(self,p1,p2):
- self.tup = (p1,p2)
- def __getitem__(self,i):
- return self.tup[i]
- def __repr__(self):
- return repr(self.tup[0])
- def setOffset(self,i):
- self.tup = (self.tup[0],i)
-
-class ParseResults(object):
- """
- Structured parse results, to provide multiple means of access to the parsed data:
- - as a list (C{len(results)})
- - by list index (C{results[0], results[1]}, etc.)
- - by attribute (C{results.<resultsName>} - see L{ParserElement.setResultsName})
-
- Example::
- integer = Word(nums)
- date_str = (integer.setResultsName("year") + '/'
- + integer.setResultsName("month") + '/'
- + integer.setResultsName("day"))
- # equivalent form:
- # date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
-
- # parseString returns a ParseResults object
- result = date_str.parseString("1999/12/31")
-
- def test(s, fn=repr):
- print("%s -> %s" % (s, fn(eval(s))))
- test("list(result)")
- test("result[0]")
- test("result['month']")
- test("result.day")
- test("'month' in result")
- test("'minutes' in result")
- test("result.dump()", str)
- prints::
- list(result) -> ['1999', '/', '12', '/', '31']
- result[0] -> '1999'
- result['month'] -> '12'
- result.day -> '31'
- 'month' in result -> True
- 'minutes' in result -> False
- result.dump() -> ['1999', '/', '12', '/', '31']
- - day: 31
- - month: 12
- - year: 1999
- """
- def __new__(cls, toklist=None, name=None, asList=True, modal=True ):
- if isinstance(toklist, cls):
- return toklist
- retobj = object.__new__(cls)
- retobj.__doinit = True
- return retobj
-
- # Performance tuning: we construct a *lot* of these, so keep this
- # constructor as small and fast as possible
- def __init__( self, toklist=None, name=None, asList=True, modal=True, isinstance=isinstance ):
- if self.__doinit:
- self.__doinit = False
- self.__name = None
- self.__parent = None
- self.__accumNames = {}
- self.__asList = asList
- self.__modal = modal
- if toklist is None:
- toklist = []
- if isinstance(toklist, list):
- self.__toklist = toklist[:]
- elif isinstance(toklist, _generatorType):
- self.__toklist = list(toklist)
- else:
- self.__toklist = [toklist]
- self.__tokdict = dict()
-
- if name is not None and name:
- if not modal:
- self.__accumNames[name] = 0
- if isinstance(name,int):
- name = _ustr(name) # will always return a str, but use _ustr for consistency
- self.__name = name
- if not (isinstance(toklist, (type(None), basestring, list)) and toklist in (None,'',[])):
- if isinstance(toklist,basestring):
- toklist = [ toklist ]
- if asList:
- if isinstance(toklist,ParseResults):
- self[name] = _ParseResultsWithOffset(toklist.copy(),0)
- else:
- self[name] = _ParseResultsWithOffset(ParseResults(toklist[0]),0)
- self[name].__name = name
- else:
- try:
- self[name] = toklist[0]
- except (KeyError,TypeError,IndexError):
- self[name] = toklist
-
- def __getitem__( self, i ):
- if isinstance( i, (int,slice) ):
- return self.__toklist[i]
- else:
- if i not in self.__accumNames:
- return self.__tokdict[i][-1][0]
- else:
- return ParseResults([ v[0] for v in self.__tokdict[i] ])
-
- def __setitem__( self, k, v, isinstance=isinstance ):
- if isinstance(v,_ParseResultsWithOffset):
- self.__tokdict[k] = self.__tokdict.get(k,list()) + [v]
- sub = v[0]
- elif isinstance(k,(int,slice)):
- self.__toklist[k] = v
- sub = v
- else:
- self.__tokdict[k] = self.__tokdict.get(k,list()) + [_ParseResultsWithOffset(v,0)]
- sub = v
- if isinstance(sub,ParseResults):
- sub.__parent = wkref(self)
-
- def __delitem__( self, i ):
- if isinstance(i,(int,slice)):
- mylen = len( self.__toklist )
- del self.__toklist[i]
-
- # convert int to slice
- if isinstance(i, int):
- if i < 0:
- i += mylen
- i = slice(i, i+1)
- # get removed indices
- removed = list(range(*i.indices(mylen)))
- removed.reverse()
- # fixup indices in token dictionary
- for name,occurrences in self.__tokdict.items():
- for j in removed:
- for k, (value, position) in enumerate(occurrences):
- occurrences[k] = _ParseResultsWithOffset(value, position - (position > j))
- else:
- del self.__tokdict[i]
-
- def __contains__( self, k ):
- return k in self.__tokdict
-
- def __len__( self ): return len( self.__toklist )
- def __bool__(self): return ( not not self.__toklist )
- __nonzero__ = __bool__
- def __iter__( self ): return iter( self.__toklist )
- def __reversed__( self ): return iter( self.__toklist[::-1] )
- def _iterkeys( self ):
- if hasattr(self.__tokdict, "iterkeys"):
- return self.__tokdict.iterkeys()
- else:
- return iter(self.__tokdict)
-
- def _itervalues( self ):
- return (self[k] for k in self._iterkeys())
-
- def _iteritems( self ):
- return ((k, self[k]) for k in self._iterkeys())
-
- if PY_3:
- keys = _iterkeys
- """Returns an iterator of all named result keys (Python 3.x only)."""
-
- values = _itervalues
- """Returns an iterator of all named result values (Python 3.x only)."""
-
- items = _iteritems
- """Returns an iterator of all named result key-value tuples (Python 3.x only)."""
-
- else:
- iterkeys = _iterkeys
- """Returns an iterator of all named result keys (Python 2.x only)."""
-
- itervalues = _itervalues
- """Returns an iterator of all named result values (Python 2.x only)."""
-
- iteritems = _iteritems
- """Returns an iterator of all named result key-value tuples (Python 2.x only)."""
-
- def keys( self ):
- """Returns all named result keys (as a list in Python 2.x, as an iterator in Python 3.x)."""
- return list(self.iterkeys())
-
- def values( self ):
- """Returns all named result values (as a list in Python 2.x, as an iterator in Python 3.x)."""
- return list(self.itervalues())
-
- def items( self ):
- """Returns all named result key-values (as a list of tuples in Python 2.x, as an iterator in Python 3.x)."""
- return list(self.iteritems())
-
- def haskeys( self ):
- """Since keys() returns an iterator, this method is helpful in bypassing
- code that looks for the existence of any defined results names."""
- return bool(self.__tokdict)
-
- def pop( self, *args, **kwargs):
- """
- Removes and returns item at specified index (default=C{last}).
- Supports both C{list} and C{dict} semantics for C{pop()}. If passed no
- argument or an integer argument, it will use C{list} semantics
- and pop tokens from the list of parsed tokens. If passed a
- non-integer argument (most likely a string), it will use C{dict}
- semantics and pop the corresponding value from any defined
- results names. A second default return value argument is
- supported, just as in C{dict.pop()}.
-
- Example::
- def remove_first(tokens):
- tokens.pop(0)
- print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
- print(OneOrMore(Word(nums)).addParseAction(remove_first).parseString("0 123 321")) # -> ['123', '321']
-
- label = Word(alphas)
- patt = label("LABEL") + OneOrMore(Word(nums))
- print(patt.parseString("AAB 123 321").dump())
-
- # Use pop() in a parse action to remove named result (note that corresponding value is not
- # removed from list form of results)
- def remove_LABEL(tokens):
- tokens.pop("LABEL")
- return tokens
- patt.addParseAction(remove_LABEL)
- print(patt.parseString("AAB 123 321").dump())
- prints::
- ['AAB', '123', '321']
- - LABEL: AAB
-
- ['AAB', '123', '321']
- """
- if not args:
- args = [-1]
- for k,v in kwargs.items():
- if k == 'default':
- args = (args[0], v)
- else:
- raise TypeError("pop() got an unexpected keyword argument '%s'" % k)
- if (isinstance(args[0], int) or
- len(args) == 1 or
- args[0] in self):
- index = args[0]
- ret = self[index]
- del self[index]
- return ret
- else:
- defaultvalue = args[1]
- return defaultvalue
-
- def get(self, key, defaultValue=None):
- """
- Returns named result matching the given key, or if there is no
- such name, then returns the given C{defaultValue} or C{None} if no
- C{defaultValue} is specified.
-
- Similar to C{dict.get()}.
-
- Example::
- integer = Word(nums)
- date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
-
- result = date_str.parseString("1999/12/31")
- print(result.get("year")) # -> '1999'
- print(result.get("hour", "not specified")) # -> 'not specified'
- print(result.get("hour")) # -> None
- """
- if key in self:
- return self[key]
- else:
- return defaultValue
-
- def insert( self, index, insStr ):
- """
- Inserts new element at location index in the list of parsed tokens.
-
- Similar to C{list.insert()}.
-
- Example::
- print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
-
- # use a parse action to insert the parse location in the front of the parsed results
- def insert_locn(locn, tokens):
- tokens.insert(0, locn)
- print(OneOrMore(Word(nums)).addParseAction(insert_locn).parseString("0 123 321")) # -> [0, '0', '123', '321']
- """
- self.__toklist.insert(index, insStr)
- # fixup indices in token dictionary
- for name,occurrences in self.__tokdict.items():
- for k, (value, position) in enumerate(occurrences):
- occurrences[k] = _ParseResultsWithOffset(value, position + (position > index))
-
- def append( self, item ):
- """
- Add single element to end of ParseResults list of elements.
-
- Example::
- print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
-
- # use a parse action to compute the sum of the parsed integers, and add it to the end
- def append_sum(tokens):
- tokens.append(sum(map(int, tokens)))
- print(OneOrMore(Word(nums)).addParseAction(append_sum).parseString("0 123 321")) # -> ['0', '123', '321', 444]
- """
- self.__toklist.append(item)
-
- def extend( self, itemseq ):
- """
- Add sequence of elements to end of ParseResults list of elements.
-
- Example::
- patt = OneOrMore(Word(alphas))
-
- # use a parse action to append the reverse of the matched strings, to make a palindrome
- def make_palindrome(tokens):
- tokens.extend(reversed([t[::-1] for t in tokens]))
- return ''.join(tokens)
- print(patt.addParseAction(make_palindrome).parseString("lskdj sdlkjf lksd")) # -> 'lskdjsdlkjflksddsklfjkldsjdksl'
- """
- if isinstance(itemseq, ParseResults):
- self += itemseq
- else:
- self.__toklist.extend(itemseq)
-
- def clear( self ):
- """
- Clear all elements and results names.
- """
- del self.__toklist[:]
- self.__tokdict.clear()
-
- def __getattr__( self, name ):
- try:
- return self[name]
- except KeyError:
- return ""
-
- if name in self.__tokdict:
- if name not in self.__accumNames:
- return self.__tokdict[name][-1][0]
- else:
- return ParseResults([ v[0] for v in self.__tokdict[name] ])
- else:
- return ""
-
- def __add__( self, other ):
- ret = self.copy()
- ret += other
- return ret
-
- def __iadd__( self, other ):
- if other.__tokdict:
- offset = len(self.__toklist)
- addoffset = lambda a: offset if a<0 else a+offset
- otheritems = other.__tokdict.items()
- otherdictitems = [(k, _ParseResultsWithOffset(v[0],addoffset(v[1])) )
- for (k,vlist) in otheritems for v in vlist]
- for k,v in otherdictitems:
- self[k] = v
- if isinstance(v[0],ParseResults):
- v[0].__parent = wkref(self)
-
- self.__toklist += other.__toklist
- self.__accumNames.update( other.__accumNames )
- return self
-
- def __radd__(self, other):
- if isinstance(other,int) and other == 0:
- # useful for merging many ParseResults using sum() builtin
- return self.copy()
- else:
- # this may raise a TypeError - so be it
- return other + self
-
- def __repr__( self ):
- return "(%s, %s)" % ( repr( self.__toklist ), repr( self.__tokdict ) )
-
- def __str__( self ):
- return '[' + ', '.join(_ustr(i) if isinstance(i, ParseResults) else repr(i) for i in self.__toklist) + ']'
-
- def _asStringList( self, sep='' ):
- out = []
- for item in self.__toklist:
- if out and sep:
- out.append(sep)
- if isinstance( item, ParseResults ):
- out += item._asStringList()
- else:
- out.append( _ustr(item) )
- return out
-
- def asList( self ):
- """
- Returns the parse results as a nested list of matching tokens, all converted to strings.
-
- Example::
- patt = OneOrMore(Word(alphas))
- result = patt.parseString("sldkj lsdkj sldkj")
- # even though the result prints in string-like form, it is actually a pyparsing ParseResults
- print(type(result), result) # -> <class 'pyparsing.ParseResults'> ['sldkj', 'lsdkj', 'sldkj']
-
- # Use asList() to create an actual list
- result_list = result.asList()
- print(type(result_list), result_list) # -> <class 'list'> ['sldkj', 'lsdkj', 'sldkj']
- """
- return [res.asList() if isinstance(res,ParseResults) else res for res in self.__toklist]
-
- def asDict( self ):
- """
- Returns the named parse results as a nested dictionary.
-
- Example::
- integer = Word(nums)
- date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
-
- result = date_str.parseString('12/31/1999')
- print(type(result), repr(result)) # -> <class 'pyparsing.ParseResults'> (['12', '/', '31', '/', '1999'], {'day': [('1999', 4)], 'year': [('12', 0)], 'month': [('31', 2)]})
-
- result_dict = result.asDict()
- print(type(result_dict), repr(result_dict)) # -> <class 'dict'> {'day': '1999', 'year': '12', 'month': '31'}
-
- # even though a ParseResults supports dict-like access, sometime you just need to have a dict
- import json
- print(json.dumps(result)) # -> Exception: TypeError: ... is not JSON serializable
- print(json.dumps(result.asDict())) # -> {"month": "31", "day": "1999", "year": "12"}
- """
- if PY_3:
- item_fn = self.items
- else:
- item_fn = self.iteritems
-
- def toItem(obj):
- if isinstance(obj, ParseResults):
- if obj.haskeys():
- return obj.asDict()
- else:
- return [toItem(v) for v in obj]
- else:
- return obj
-
- return dict((k,toItem(v)) for k,v in item_fn())
-
- def copy( self ):
- """
- Returns a new copy of a C{ParseResults} object.
- """
- ret = ParseResults( self.__toklist )
- ret.__tokdict = self.__tokdict.copy()
- ret.__parent = self.__parent
- ret.__accumNames.update( self.__accumNames )
- ret.__name = self.__name
- return ret
-
- def asXML( self, doctag=None, namedItemsOnly=False, indent="", formatted=True ):
- """
- (Deprecated) Returns the parse results as XML. Tags are created for tokens and lists that have defined results names.
- """
- nl = "\n"
- out = []
- namedItems = dict((v[1],k) for (k,vlist) in self.__tokdict.items()
- for v in vlist)
- nextLevelIndent = indent + " "
-
- # collapse out indents if formatting is not desired
- if not formatted:
- indent = ""
- nextLevelIndent = ""
- nl = ""
-
- selfTag = None
- if doctag is not None:
- selfTag = doctag
- else:
- if self.__name:
- selfTag = self.__name
-
- if not selfTag:
- if namedItemsOnly:
- return ""
- else:
- selfTag = "ITEM"
-
- out += [ nl, indent, "<", selfTag, ">" ]
-
- for i,res in enumerate(self.__toklist):
- if isinstance(res,ParseResults):
- if i in namedItems:
- out += [ res.asXML(namedItems[i],
- namedItemsOnly and doctag is None,
- nextLevelIndent,
- formatted)]
- else:
- out += [ res.asXML(None,
- namedItemsOnly and doctag is None,
- nextLevelIndent,
- formatted)]
- else:
- # individual token, see if there is a name for it
- resTag = None
- if i in namedItems:
- resTag = namedItems[i]
- if not resTag:
- if namedItemsOnly:
- continue
- else:
- resTag = "ITEM"
- xmlBodyText = _xml_escape(_ustr(res))
- out += [ nl, nextLevelIndent, "<", resTag, ">",
- xmlBodyText,
- "</", resTag, ">" ]
-
- out += [ nl, indent, "</", selfTag, ">" ]
- return "".join(out)
-
- def __lookup(self,sub):
- for k,vlist in self.__tokdict.items():
- for v,loc in vlist:
- if sub is v:
- return k
- return None
-
- def getName(self):
- r"""
- Returns the results name for this token expression. Useful when several
- different expressions might match at a particular location.
-
- Example::
- integer = Word(nums)
- ssn_expr = Regex(r"\d\d\d-\d\d-\d\d\d\d")
- house_number_expr = Suppress('#') + Word(nums, alphanums)
- user_data = (Group(house_number_expr)("house_number")
- | Group(ssn_expr)("ssn")
- | Group(integer)("age"))
- user_info = OneOrMore(user_data)
-
- result = user_info.parseString("22 111-22-3333 #221B")
- for item in result:
- print(item.getName(), ':', item[0])
- prints::
- age : 22
- ssn : 111-22-3333
- house_number : 221B
- """
- if self.__name:
- return self.__name
- elif self.__parent:
- par = self.__parent()
- if par:
- return par.__lookup(self)
- else:
- return None
- elif (len(self) == 1 and
- len(self.__tokdict) == 1 and
- next(iter(self.__tokdict.values()))[0][1] in (0,-1)):
- return next(iter(self.__tokdict.keys()))
- else:
- return None
-
- def dump(self, indent='', depth=0, full=True):
- """
- Diagnostic method for listing out the contents of a C{ParseResults}.
- Accepts an optional C{indent} argument so that this string can be embedded
- in a nested display of other data.
-
- Example::
- integer = Word(nums)
- date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
-
- result = date_str.parseString('12/31/1999')
- print(result.dump())
- prints::
- ['12', '/', '31', '/', '1999']
- - day: 1999
- - month: 31
- - year: 12
- """
- out = []
- NL = '\n'
- out.append( indent+_ustr(self.asList()) )
- if full:
- if self.haskeys():
- items = sorted((str(k), v) for k,v in self.items())
- for k,v in items:
- if out:
- out.append(NL)
- out.append( "%s%s- %s: " % (indent,(' '*depth), k) )
- if isinstance(v,ParseResults):
- if v:
- out.append( v.dump(indent,depth+1) )
- else:
- out.append(_ustr(v))
- else:
- out.append(repr(v))
- elif any(isinstance(vv,ParseResults) for vv in self):
- v = self
- for i,vv in enumerate(v):
- if isinstance(vv,ParseResults):
- out.append("\n%s%s[%d]:\n%s%s%s" % (indent,(' '*(depth)),i,indent,(' '*(depth+1)),vv.dump(indent,depth+1) ))
- else:
- out.append("\n%s%s[%d]:\n%s%s%s" % (indent,(' '*(depth)),i,indent,(' '*(depth+1)),_ustr(vv)))
-
- return "".join(out)
-
- def pprint(self, *args, **kwargs):
- """
- Pretty-printer for parsed results as a list, using the C{pprint} module.
- Accepts additional positional or keyword args as defined for the
- C{pprint.pprint} method. (U{http://docs.python.org/3/library/pprint.html#pprint.pprint})
-
- Example::
- ident = Word(alphas, alphanums)
- num = Word(nums)
- func = Forward()
- term = ident | num | Group('(' + func + ')')
- func <<= ident + Group(Optional(delimitedList(term)))
- result = func.parseString("fna a,b,(fnb c,d,200),100")
- result.pprint(width=40)
- prints::
- ['fna',
- ['a',
- 'b',
- ['(', 'fnb', ['c', 'd', '200'], ')'],
- '100']]
- """
- pprint.pprint(self.asList(), *args, **kwargs)
-
- # add support for pickle protocol
- def __getstate__(self):
- return ( self.__toklist,
- ( self.__tokdict.copy(),
- self.__parent is not None and self.__parent() or None,
- self.__accumNames,
- self.__name ) )
-
- def __setstate__(self,state):
- self.__toklist = state[0]
- (self.__tokdict,
- par,
- inAccumNames,
- self.__name) = state[1]
- self.__accumNames = {}
- self.__accumNames.update(inAccumNames)
- if par is not None:
- self.__parent = wkref(par)
- else:
- self.__parent = None
-
- def __getnewargs__(self):
- return self.__toklist, self.__name, self.__asList, self.__modal
-
- def __dir__(self):
- return (dir(type(self)) + list(self.keys()))
-
-MutableMapping.register(ParseResults)
-
-def col (loc,strg):
- """Returns current column within a string, counting newlines as line separators.
- The first column is number 1.
-
- Note: the default parsing behavior is to expand tabs in the input string
- before starting the parsing process. See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information
- on parsing strings containing C{<TAB>}s, and suggested methods to maintain a
- consistent view of the parsed string, the parse location, and line and column
- positions within the parsed string.
- """
- s = strg
- return 1 if 0<loc<len(s) and s[loc-1] == '\n' else loc - s.rfind("\n", 0, loc)
-
-def lineno(loc,strg):
- """Returns current line number within a string, counting newlines as line separators.
- The first line is number 1.
-
- Note: the default parsing behavior is to expand tabs in the input string
- before starting the parsing process. See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information
- on parsing strings containing C{<TAB>}s, and suggested methods to maintain a
- consistent view of the parsed string, the parse location, and line and column
- positions within the parsed string.
- """
- return strg.count("\n",0,loc) + 1
-
-def line( loc, strg ):
- """Returns the line of text containing loc within a string, counting newlines as line separators.
- """
- lastCR = strg.rfind("\n", 0, loc)
- nextCR = strg.find("\n", loc)
- if nextCR >= 0:
- return strg[lastCR+1:nextCR]
- else:
- return strg[lastCR+1:]
-
-def _defaultStartDebugAction( instring, loc, expr ):
- print (("Match " + _ustr(expr) + " at loc " + _ustr(loc) + "(%d,%d)" % ( lineno(loc,instring), col(loc,instring) )))
-
-def _defaultSuccessDebugAction( instring, startloc, endloc, expr, toks ):
- print ("Matched " + _ustr(expr) + " -> " + str(toks.asList()))
-
-def _defaultExceptionDebugAction( instring, loc, expr, exc ):
- print ("Exception raised:" + _ustr(exc))
-
-def nullDebugAction(*args):
- """'Do-nothing' debug action, to suppress debugging output during parsing."""
- pass
-
-# Only works on Python 3.x - nonlocal is toxic to Python 2 installs
-#~ 'decorator to trim function calls to match the arity of the target'
-#~ def _trim_arity(func, maxargs=3):
- #~ if func in singleArgBuiltins:
- #~ return lambda s,l,t: func(t)
- #~ limit = 0
- #~ foundArity = False
- #~ def wrapper(*args):
- #~ nonlocal limit,foundArity
- #~ while 1:
- #~ try:
- #~ ret = func(*args[limit:])
- #~ foundArity = True
- #~ return ret
- #~ except TypeError:
- #~ if limit == maxargs or foundArity:
- #~ raise
- #~ limit += 1
- #~ continue
- #~ return wrapper
-
-# this version is Python 2.x-3.x cross-compatible
-'decorator to trim function calls to match the arity of the target'
-def _trim_arity(func, maxargs=2):
- if func in singleArgBuiltins:
- return lambda s,l,t: func(t)
- limit = [0]
- foundArity = [False]
-
- # traceback return data structure changed in Py3.5 - normalize back to plain tuples
- if system_version[:2] >= (3,5):
- def extract_stack(limit=0):
- # special handling for Python 3.5.0 - extra deep call stack by 1
- offset = -3 if system_version == (3,5,0) else -2
- frame_summary = traceback.extract_stack(limit=-offset+limit-1)[offset]
- return [frame_summary[:2]]
- def extract_tb(tb, limit=0):
- frames = traceback.extract_tb(tb, limit=limit)
- frame_summary = frames[-1]
- return [frame_summary[:2]]
- else:
- extract_stack = traceback.extract_stack
- extract_tb = traceback.extract_tb
-
- # synthesize what would be returned by traceback.extract_stack at the call to
- # user's parse action 'func', so that we don't incur call penalty at parse time
-
- LINE_DIFF = 6
- # IF ANY CODE CHANGES, EVEN JUST COMMENTS OR BLANK LINES, BETWEEN THE NEXT LINE AND
- # THE CALL TO FUNC INSIDE WRAPPER, LINE_DIFF MUST BE MODIFIED!!!!
- this_line = extract_stack(limit=2)[-1]
- pa_call_line_synth = (this_line[0], this_line[1]+LINE_DIFF)
-
- def wrapper(*args):
- while 1:
- try:
- ret = func(*args[limit[0]:])
- foundArity[0] = True
- return ret
- except TypeError:
- # re-raise TypeErrors if they did not come from our arity testing
- if foundArity[0]:
- raise
- else:
- try:
- tb = sys.exc_info()[-1]
- if not extract_tb(tb, limit=2)[-1][:2] == pa_call_line_synth:
- raise
- finally:
- del tb
-
- if limit[0] <= maxargs:
- limit[0] += 1
- continue
- raise
-
- # copy func name to wrapper for sensible debug output
- func_name = "<parse action>"
- try:
- func_name = getattr(func, '__name__',
- getattr(func, '__class__').__name__)
- except Exception:
- func_name = str(func)
- wrapper.__name__ = func_name
-
- return wrapper
-
-class ParserElement(object):
- """Abstract base level parser element class."""
- DEFAULT_WHITE_CHARS = " \n\t\r"
- verbose_stacktrace = False
-
- @staticmethod
- def setDefaultWhitespaceChars( chars ):
- r"""
- Overrides the default whitespace chars
-
- Example::
- # default whitespace chars are space, <TAB> and newline
- OneOrMore(Word(alphas)).parseString("abc def\nghi jkl") # -> ['abc', 'def', 'ghi', 'jkl']
-
- # change to just treat newline as significant
- ParserElement.setDefaultWhitespaceChars(" \t")
- OneOrMore(Word(alphas)).parseString("abc def\nghi jkl") # -> ['abc', 'def']
- """
- ParserElement.DEFAULT_WHITE_CHARS = chars
-
- @staticmethod
- def inlineLiteralsUsing(cls):
- """
- Set class to be used for inclusion of string literals into a parser.
-
- Example::
- # default literal class used is Literal
- integer = Word(nums)
- date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
-
- date_str.parseString("1999/12/31") # -> ['1999', '/', '12', '/', '31']
-
-
- # change to Suppress
- ParserElement.inlineLiteralsUsing(Suppress)
- date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
-
- date_str.parseString("1999/12/31") # -> ['1999', '12', '31']
- """
- ParserElement._literalStringClass = cls
-
- def __init__( self, savelist=False ):
- self.parseAction = list()
- self.failAction = None
- #~ self.name = "<unknown>" # don't define self.name, let subclasses try/except upcall
- self.strRepr = None
- self.resultsName = None
- self.saveAsList = savelist
- self.skipWhitespace = True
- self.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
- self.copyDefaultWhiteChars = True
- self.mayReturnEmpty = False # used when checking for left-recursion
- self.keepTabs = False
- self.ignoreExprs = list()
- self.debug = False
- self.streamlined = False
- self.mayIndexError = True # used to optimize exception handling for subclasses that don't advance parse index
- self.errmsg = ""
- self.modalResults = True # used to mark results names as modal (report only last) or cumulative (list all)
- self.debugActions = ( None, None, None ) #custom debug actions
- self.re = None
- self.callPreparse = True # used to avoid redundant calls to preParse
- self.callDuringTry = False
-
- def copy( self ):
- """
- Make a copy of this C{ParserElement}. Useful for defining different parse actions
- for the same parsing pattern, using copies of the original parse element.
-
- Example::
- integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
- integerK = integer.copy().addParseAction(lambda toks: toks[0]*1024) + Suppress("K")
- integerM = integer.copy().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M")
-
- print(OneOrMore(integerK | integerM | integer).parseString("5K 100 640K 256M"))
- prints::
- [5120, 100, 655360, 268435456]
- Equivalent form of C{expr.copy()} is just C{expr()}::
- integerM = integer().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M")
- """
- cpy = copy.copy( self )
- cpy.parseAction = self.parseAction[:]
- cpy.ignoreExprs = self.ignoreExprs[:]
- if self.copyDefaultWhiteChars:
- cpy.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
- return cpy
-
- def setName( self, name ):
- """
- Define name for this expression, makes debugging and exception messages clearer.
-
- Example::
- Word(nums).parseString("ABC") # -> Exception: Expected W:(0123...) (at char 0), (line:1, col:1)
- Word(nums).setName("integer").parseString("ABC") # -> Exception: Expected integer (at char 0), (line:1, col:1)
- """
- self.name = name
- self.errmsg = "Expected " + self.name
- if hasattr(self,"exception"):
- self.exception.msg = self.errmsg
- return self
-
- def setResultsName( self, name, listAllMatches=False ):
- """
- Define name for referencing matching tokens as a nested attribute
- of the returned parse results.
- NOTE: this returns a *copy* of the original C{ParserElement} object;
- this is so that the client can define a basic element, such as an
- integer, and reference it in multiple places with different names.
-
- You can also set results names using the abbreviated syntax,
- C{expr("name")} in place of C{expr.setResultsName("name")} -
- see L{I{__call__}<__call__>}.
-
- Example::
- date_str = (integer.setResultsName("year") + '/'
- + integer.setResultsName("month") + '/'
- + integer.setResultsName("day"))
-
- # equivalent form:
- date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
- """
- newself = self.copy()
- if name.endswith("*"):
- name = name[:-1]
- listAllMatches=True
- newself.resultsName = name
- newself.modalResults = not listAllMatches
- return newself
-
- def setBreak(self,breakFlag = True):
- """Method to invoke the Python pdb debugger when this element is
- about to be parsed. Set C{breakFlag} to True to enable, False to
- disable.
- """
- if breakFlag:
- _parseMethod = self._parse
- def breaker(instring, loc, doActions=True, callPreParse=True):
- import pdb
- pdb.set_trace()
- return _parseMethod( instring, loc, doActions, callPreParse )
- breaker._originalParseMethod = _parseMethod
- self._parse = breaker
- else:
- if hasattr(self._parse,"_originalParseMethod"):
- self._parse = self._parse._originalParseMethod
- return self
-
- def setParseAction( self, *fns, **kwargs ):
- """
- Define one or more actions to perform when successfully matching parse element definition.
- Parse action fn is a callable method with 0-3 arguments, called as C{fn(s,loc,toks)},
- C{fn(loc,toks)}, C{fn(toks)}, or just C{fn()}, where:
- - s = the original string being parsed (see note below)
- - loc = the location of the matching substring
- - toks = a list of the matched tokens, packaged as a C{L{ParseResults}} object
- If the functions in fns modify the tokens, they can return them as the return
- value from fn, and the modified list of tokens will replace the original.
- Otherwise, fn does not need to return any value.
-
- Optional keyword arguments:
- - callDuringTry = (default=C{False}) indicate if parse action should be run during lookaheads and alternate testing
-
- Note: the default parsing behavior is to expand tabs in the input string
- before starting the parsing process. See L{I{parseString}<parseString>} for more information
- on parsing strings containing C{<TAB>}s, and suggested methods to maintain a
- consistent view of the parsed string, the parse location, and line and column
- positions within the parsed string.
-
- Example::
- integer = Word(nums)
- date_str = integer + '/' + integer + '/' + integer
-
- date_str.parseString("1999/12/31") # -> ['1999', '/', '12', '/', '31']
-
- # use parse action to convert to ints at parse time
- integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
- date_str = integer + '/' + integer + '/' + integer
-
- # note that integer fields are now ints, not strings
- date_str.parseString("1999/12/31") # -> [1999, '/', 12, '/', 31]
- """
- self.parseAction = list(map(_trim_arity, list(fns)))
- self.callDuringTry = kwargs.get("callDuringTry", False)
- return self
-
- def addParseAction( self, *fns, **kwargs ):
- """
- Add one or more parse actions to expression's list of parse actions. See L{I{setParseAction}<setParseAction>}.
-
- See examples in L{I{copy}<copy>}.
- """
- self.parseAction += list(map(_trim_arity, list(fns)))
- self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False)
- return self
-
- def addCondition(self, *fns, **kwargs):
- """Add a boolean predicate function to expression's list of parse actions. See
- L{I{setParseAction}<setParseAction>} for function call signatures. Unlike C{setParseAction},
- functions passed to C{addCondition} need to return boolean success/fail of the condition.
-
- Optional keyword arguments:
- - message = define a custom message to be used in the raised exception
- - fatal = if True, will raise ParseFatalException to stop parsing immediately; otherwise will raise ParseException
-
- Example::
- integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
- year_int = integer.copy()
- year_int.addCondition(lambda toks: toks[0] >= 2000, message="Only support years 2000 and later")
- date_str = year_int + '/' + integer + '/' + integer
-
- result = date_str.parseString("1999/12/31") # -> Exception: Only support years 2000 and later (at char 0), (line:1, col:1)
- """
- msg = kwargs.get("message", "failed user-defined condition")
- exc_type = ParseFatalException if kwargs.get("fatal", False) else ParseException
- for fn in fns:
- def pa(s,l,t):
- if not bool(_trim_arity(fn)(s,l,t)):
- raise exc_type(s,l,msg)
- self.parseAction.append(pa)
- self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False)
- return self
-
- def setFailAction( self, fn ):
- """Define action to perform if parsing fails at this expression.
- Fail acton fn is a callable function that takes the arguments
- C{fn(s,loc,expr,err)} where:
- - s = string being parsed
- - loc = location where expression match was attempted and failed
- - expr = the parse expression that failed
- - err = the exception thrown
- The function returns no value. It may throw C{L{ParseFatalException}}
- if it is desired to stop parsing immediately."""
- self.failAction = fn
- return self
-
- def _skipIgnorables( self, instring, loc ):
- exprsFound = True
- while exprsFound:
- exprsFound = False
- for e in self.ignoreExprs:
- try:
- while 1:
- loc,dummy = e._parse( instring, loc )
- exprsFound = True
- except ParseException:
- pass
- return loc
-
- def preParse( self, instring, loc ):
- if self.ignoreExprs:
- loc = self._skipIgnorables( instring, loc )
-
- if self.skipWhitespace:
- wt = self.whiteChars
- instrlen = len(instring)
- while loc < instrlen and instring[loc] in wt:
- loc += 1
-
- return loc
-
- def parseImpl( self, instring, loc, doActions=True ):
- return loc, []
-
- def postParse( self, instring, loc, tokenlist ):
- return tokenlist
-
- #~ @profile
- def _parseNoCache( self, instring, loc, doActions=True, callPreParse=True ):
- debugging = ( self.debug ) #and doActions )
-
- if debugging or self.failAction:
- #~ print ("Match",self,"at loc",loc,"(%d,%d)" % ( lineno(loc,instring), col(loc,instring) ))
- if (self.debugActions[0] ):
- self.debugActions[0]( instring, loc, self )
- if callPreParse and self.callPreparse:
- preloc = self.preParse( instring, loc )
- else:
- preloc = loc
- tokensStart = preloc
- try:
- try:
- loc,tokens = self.parseImpl( instring, preloc, doActions )
- except IndexError:
- raise ParseException( instring, len(instring), self.errmsg, self )
- except ParseBaseException as err:
- #~ print ("Exception raised:", err)
- if self.debugActions[2]:
- self.debugActions[2]( instring, tokensStart, self, err )
- if self.failAction:
- self.failAction( instring, tokensStart, self, err )
- raise
- else:
- if callPreParse and self.callPreparse:
- preloc = self.preParse( instring, loc )
- else:
- preloc = loc
- tokensStart = preloc
- if self.mayIndexError or preloc >= len(instring):
- try:
- loc,tokens = self.parseImpl( instring, preloc, doActions )
- except IndexError:
- raise ParseException( instring, len(instring), self.errmsg, self )
- else:
- loc,tokens = self.parseImpl( instring, preloc, doActions )
-
- tokens = self.postParse( instring, loc, tokens )
-
- retTokens = ParseResults( tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults )
- if self.parseAction and (doActions or self.callDuringTry):
- if debugging:
- try:
- for fn in self.parseAction:
- tokens = fn( instring, tokensStart, retTokens )
- if tokens is not None:
- retTokens = ParseResults( tokens,
- self.resultsName,
- asList=self.saveAsList and isinstance(tokens,(ParseResults,list)),
- modal=self.modalResults )
- except ParseBaseException as err:
- #~ print "Exception raised in user parse action:", err
- if (self.debugActions[2] ):
- self.debugActions[2]( instring, tokensStart, self, err )
- raise
- else:
- for fn in self.parseAction:
- tokens = fn( instring, tokensStart, retTokens )
- if tokens is not None:
- retTokens = ParseResults( tokens,
- self.resultsName,
- asList=self.saveAsList and isinstance(tokens,(ParseResults,list)),
- modal=self.modalResults )
- if debugging:
- #~ print ("Matched",self,"->",retTokens.asList())
- if (self.debugActions[1] ):
- self.debugActions[1]( instring, tokensStart, loc, self, retTokens )
-
- return loc, retTokens
-
- def tryParse( self, instring, loc ):
- try:
- return self._parse( instring, loc, doActions=False )[0]
- except ParseFatalException:
- raise ParseException( instring, loc, self.errmsg, self)
-
- def canParseNext(self, instring, loc):
- try:
- self.tryParse(instring, loc)
- except (ParseException, IndexError):
- return False
- else:
- return True
-
- class _UnboundedCache(object):
- def __init__(self):
- cache = {}
- self.not_in_cache = not_in_cache = object()
-
- def get(self, key):
- return cache.get(key, not_in_cache)
-
- def set(self, key, value):
- cache[key] = value
-
- def clear(self):
- cache.clear()
-
- def cache_len(self):
- return len(cache)
-
- self.get = types.MethodType(get, self)
- self.set = types.MethodType(set, self)
- self.clear = types.MethodType(clear, self)
- self.__len__ = types.MethodType(cache_len, self)
-
- if _OrderedDict is not None:
- class _FifoCache(object):
- def __init__(self, size):
- self.not_in_cache = not_in_cache = object()
-
- cache = _OrderedDict()
-
- def get(self, key):
- return cache.get(key, not_in_cache)
-
- def set(self, key, value):
- cache[key] = value
- while len(cache) > size:
- try:
- cache.popitem(False)
- except KeyError:
- pass
-
- def clear(self):
- cache.clear()
-
- def cache_len(self):
- return len(cache)
-
- self.get = types.MethodType(get, self)
- self.set = types.MethodType(set, self)
- self.clear = types.MethodType(clear, self)
- self.__len__ = types.MethodType(cache_len, self)
-
- else:
- class _FifoCache(object):
- def __init__(self, size):
- self.not_in_cache = not_in_cache = object()
-
- cache = {}
- key_fifo = collections.deque([], size)
-
- def get(self, key):
- return cache.get(key, not_in_cache)
-
- def set(self, key, value):
- cache[key] = value
- while len(key_fifo) > size:
- cache.pop(key_fifo.popleft(), None)
- key_fifo.append(key)
-
- def clear(self):
- cache.clear()
- key_fifo.clear()
-
- def cache_len(self):
- return len(cache)
-
- self.get = types.MethodType(get, self)
- self.set = types.MethodType(set, self)
- self.clear = types.MethodType(clear, self)
- self.__len__ = types.MethodType(cache_len, self)
-
- # argument cache for optimizing repeated calls when backtracking through recursive expressions
- packrat_cache = {} # this is set later by enabledPackrat(); this is here so that resetCache() doesn't fail
- packrat_cache_lock = RLock()
- packrat_cache_stats = [0, 0]
-
- # this method gets repeatedly called during backtracking with the same arguments -
- # we can cache these arguments and save ourselves the trouble of re-parsing the contained expression
- def _parseCache( self, instring, loc, doActions=True, callPreParse=True ):
- HIT, MISS = 0, 1
- lookup = (self, instring, loc, callPreParse, doActions)
- with ParserElement.packrat_cache_lock:
- cache = ParserElement.packrat_cache
- value = cache.get(lookup)
- if value is cache.not_in_cache:
- ParserElement.packrat_cache_stats[MISS] += 1
- try:
- value = self._parseNoCache(instring, loc, doActions, callPreParse)
- except ParseBaseException as pe:
- # cache a copy of the exception, without the traceback
- cache.set(lookup, pe.__class__(*pe.args))
- raise
- else:
- cache.set(lookup, (value[0], value[1].copy()))
- return value
- else:
- ParserElement.packrat_cache_stats[HIT] += 1
- if isinstance(value, Exception):
- raise value
- return (value[0], value[1].copy())
-
- _parse = _parseNoCache
-
- @staticmethod
- def resetCache():
- ParserElement.packrat_cache.clear()
- ParserElement.packrat_cache_stats[:] = [0] * len(ParserElement.packrat_cache_stats)
-
- _packratEnabled = False
- @staticmethod
- def enablePackrat(cache_size_limit=128):
- """Enables "packrat" parsing, which adds memoizing to the parsing logic.
- Repeated parse attempts at the same string location (which happens
- often in many complex grammars) can immediately return a cached value,
- instead of re-executing parsing/validating code. Memoizing is done of
- both valid results and parsing exceptions.
-
- Parameters:
- - cache_size_limit - (default=C{128}) - if an integer value is provided
- will limit the size of the packrat cache; if None is passed, then
- the cache size will be unbounded; if 0 is passed, the cache will
- be effectively disabled.
-
- This speedup may break existing programs that use parse actions that
- have side-effects. For this reason, packrat parsing is disabled when
- you first import pyparsing. To activate the packrat feature, your
- program must call the class method C{ParserElement.enablePackrat()}. If
- your program uses C{psyco} to "compile as you go", you must call
- C{enablePackrat} before calling C{psyco.full()}. If you do not do this,
- Python will crash. For best results, call C{enablePackrat()} immediately
- after importing pyparsing.
-
- Example::
- import pyparsing
- pyparsing.ParserElement.enablePackrat()
- """
- if not ParserElement._packratEnabled:
- ParserElement._packratEnabled = True
- if cache_size_limit is None:
- ParserElement.packrat_cache = ParserElement._UnboundedCache()
- else:
- ParserElement.packrat_cache = ParserElement._FifoCache(cache_size_limit)
- ParserElement._parse = ParserElement._parseCache
-
- def parseString( self, instring, parseAll=False ):
- """
- Execute the parse expression with the given string.
- This is the main interface to the client code, once the complete
- expression has been built.
-
- If you want the grammar to require that the entire input string be
- successfully parsed, then set C{parseAll} to True (equivalent to ending
- the grammar with C{L{StringEnd()}}).
-
- Note: C{parseString} implicitly calls C{expandtabs()} on the input string,
- in order to report proper column numbers in parse actions.
- If the input string contains tabs and
- the grammar uses parse actions that use the C{loc} argument to index into the
- string being parsed, you can ensure you have a consistent view of the input
- string by:
- - calling C{parseWithTabs} on your grammar before calling C{parseString}
- (see L{I{parseWithTabs}<parseWithTabs>})
- - define your parse action using the full C{(s,loc,toks)} signature, and
- reference the input string using the parse action's C{s} argument
- - explictly expand the tabs in your input string before calling
- C{parseString}
-
- Example::
- Word('a').parseString('aaaaabaaa') # -> ['aaaaa']
- Word('a').parseString('aaaaabaaa', parseAll=True) # -> Exception: Expected end of text
- """
- ParserElement.resetCache()
- if not self.streamlined:
- self.streamline()
- #~ self.saveAsList = True
- for e in self.ignoreExprs:
- e.streamline()
- if not self.keepTabs:
- instring = instring.expandtabs()
- try:
- loc, tokens = self._parse( instring, 0 )
- if parseAll:
- loc = self.preParse( instring, loc )
- se = Empty() + StringEnd()
- se._parse( instring, loc )
- except ParseBaseException as exc:
- if ParserElement.verbose_stacktrace:
- raise
- else:
- # catch and re-raise exception from here, clears out pyparsing internal stack trace
- raise exc
- else:
- return tokens
-
- def scanString( self, instring, maxMatches=_MAX_INT, overlap=False ):
- """
- Scan the input string for expression matches. Each match will return the
- matching tokens, start location, and end location. May be called with optional
- C{maxMatches} argument, to clip scanning after 'n' matches are found. If
- C{overlap} is specified, then overlapping matches will be reported.
-
- Note that the start and end locations are reported relative to the string
- being parsed. See L{I{parseString}<parseString>} for more information on parsing
- strings with embedded tabs.
-
- Example::
- source = "sldjf123lsdjjkf345sldkjf879lkjsfd987"
- print(source)
- for tokens,start,end in Word(alphas).scanString(source):
- print(' '*start + '^'*(end-start))
- print(' '*start + tokens[0])
-
- prints::
-
- sldjf123lsdjjkf345sldkjf879lkjsfd987
- ^^^^^
- sldjf
- ^^^^^^^
- lsdjjkf
- ^^^^^^
- sldkjf
- ^^^^^^
- lkjsfd
- """
- if not self.streamlined:
- self.streamline()
- for e in self.ignoreExprs:
- e.streamline()
-
- if not self.keepTabs:
- instring = _ustr(instring).expandtabs()
- instrlen = len(instring)
- loc = 0
- preparseFn = self.preParse
- parseFn = self._parse
- ParserElement.resetCache()
- matches = 0
- try:
- while loc <= instrlen and matches < maxMatches:
- try:
- preloc = preparseFn( instring, loc )
- nextLoc,tokens = parseFn( instring, preloc, callPreParse=False )
- except ParseException:
- loc = preloc+1
- else:
- if nextLoc > loc:
- matches += 1
- yield tokens, preloc, nextLoc
- if overlap:
- nextloc = preparseFn( instring, loc )
- if nextloc > loc:
- loc = nextLoc
- else:
- loc += 1
- else:
- loc = nextLoc
- else:
- loc = preloc+1
- except ParseBaseException as exc:
- if ParserElement.verbose_stacktrace:
- raise
- else:
- # catch and re-raise exception from here, clears out pyparsing internal stack trace
- raise exc
-
- def transformString( self, instring ):
- """
- Extension to C{L{scanString}}, to modify matching text with modified tokens that may
- be returned from a parse action. To use C{transformString}, define a grammar and
- attach a parse action to it that modifies the returned token list.
- Invoking C{transformString()} on a target string will then scan for matches,
- and replace the matched text patterns according to the logic in the parse
- action. C{transformString()} returns the resulting transformed string.
-
- Example::
- wd = Word(alphas)
- wd.setParseAction(lambda toks: toks[0].title())
-
- print(wd.transformString("now is the winter of our discontent made glorious summer by this sun of york."))
- Prints::
- Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York.
- """
- out = []
- lastE = 0
- # force preservation of <TAB>s, to minimize unwanted transformation of string, and to
- # keep string locs straight between transformString and scanString
- self.keepTabs = True
- try:
- for t,s,e in self.scanString( instring ):
- out.append( instring[lastE:s] )
- if t:
- if isinstance(t,ParseResults):
- out += t.asList()
- elif isinstance(t,list):
- out += t
- else:
- out.append(t)
- lastE = e
- out.append(instring[lastE:])
- out = [o for o in out if o]
- return "".join(map(_ustr,_flatten(out)))
- except ParseBaseException as exc:
- if ParserElement.verbose_stacktrace:
- raise
- else:
- # catch and re-raise exception from here, clears out pyparsing internal stack trace
- raise exc
-
- def searchString( self, instring, maxMatches=_MAX_INT ):
- """
- Another extension to C{L{scanString}}, simplifying the access to the tokens found
- to match the given parse expression. May be called with optional
- C{maxMatches} argument, to clip searching after 'n' matches are found.
-
- Example::
- # a capitalized word starts with an uppercase letter, followed by zero or more lowercase letters
- cap_word = Word(alphas.upper(), alphas.lower())
-
- print(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity"))
-
- # the sum() builtin can be used to merge results into a single ParseResults object
- print(sum(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity")))
- prints::
- [['More'], ['Iron'], ['Lead'], ['Gold'], ['I'], ['Electricity']]
- ['More', 'Iron', 'Lead', 'Gold', 'I', 'Electricity']
- """
- try:
- return ParseResults([ t for t,s,e in self.scanString( instring, maxMatches ) ])
- except ParseBaseException as exc:
- if ParserElement.verbose_stacktrace:
- raise
- else:
- # catch and re-raise exception from here, clears out pyparsing internal stack trace
- raise exc
-
- def split(self, instring, maxsplit=_MAX_INT, includeSeparators=False):
- """
- Generator method to split a string using the given expression as a separator.
- May be called with optional C{maxsplit} argument, to limit the number of splits;
- and the optional C{includeSeparators} argument (default=C{False}), if the separating
- matching text should be included in the split results.
-
- Example::
- punc = oneOf(list(".,;:/-!?"))
- print(list(punc.split("This, this?, this sentence, is badly punctuated!")))
- prints::
- ['This', ' this', '', ' this sentence', ' is badly punctuated', '']
- """
- splits = 0
- last = 0
- for t,s,e in self.scanString(instring, maxMatches=maxsplit):
- yield instring[last:s]
- if includeSeparators:
- yield t[0]
- last = e
- yield instring[last:]
-
- def __add__(self, other ):
- """
- Implementation of + operator - returns C{L{And}}. Adding strings to a ParserElement
- converts them to L{Literal}s by default.
-
- Example::
- greet = Word(alphas) + "," + Word(alphas) + "!"
- hello = "Hello, World!"
- print (hello, "->", greet.parseString(hello))
- Prints::
- Hello, World! -> ['Hello', ',', 'World', '!']
- """
- if isinstance( other, basestring ):
- other = ParserElement._literalStringClass( other )
- if not isinstance( other, ParserElement ):
- warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
- SyntaxWarning, stacklevel=2)
- return None
- return And( [ self, other ] )
-
- def __radd__(self, other ):
- """
- Implementation of + operator when left operand is not a C{L{ParserElement}}
- """
- if isinstance( other, basestring ):
- other = ParserElement._literalStringClass( other )
- if not isinstance( other, ParserElement ):
- warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
- SyntaxWarning, stacklevel=2)
- return None
- return other + self
-
- def __sub__(self, other):
- """
- Implementation of - operator, returns C{L{And}} with error stop
- """
- if isinstance( other, basestring ):
- other = ParserElement._literalStringClass( other )
- if not isinstance( other, ParserElement ):
- warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
- SyntaxWarning, stacklevel=2)
- return None
- return self + And._ErrorStop() + other
-
- def __rsub__(self, other ):
- """
- Implementation of - operator when left operand is not a C{L{ParserElement}}
- """
- if isinstance( other, basestring ):
- other = ParserElement._literalStringClass( other )
- if not isinstance( other, ParserElement ):
- warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
- SyntaxWarning, stacklevel=2)
- return None
- return other - self
-
- def __mul__(self,other):
- """
- Implementation of * operator, allows use of C{expr * 3} in place of
- C{expr + expr + expr}. Expressions may also me multiplied by a 2-integer
- tuple, similar to C{{min,max}} multipliers in regular expressions. Tuples
- may also include C{None} as in:
- - C{expr*(n,None)} or C{expr*(n,)} is equivalent
- to C{expr*n + L{ZeroOrMore}(expr)}
- (read as "at least n instances of C{expr}")
- - C{expr*(None,n)} is equivalent to C{expr*(0,n)}
- (read as "0 to n instances of C{expr}")
- - C{expr*(None,None)} is equivalent to C{L{ZeroOrMore}(expr)}
- - C{expr*(1,None)} is equivalent to C{L{OneOrMore}(expr)}
-
- Note that C{expr*(None,n)} does not raise an exception if
- more than n exprs exist in the input stream; that is,
- C{expr*(None,n)} does not enforce a maximum number of expr
- occurrences. If this behavior is desired, then write
- C{expr*(None,n) + ~expr}
- """
- if isinstance(other,int):
- minElements, optElements = other,0
- elif isinstance(other,tuple):
- other = (other + (None, None))[:2]
- if other[0] is None:
- other = (0, other[1])
- if isinstance(other[0],int) and other[1] is None:
- if other[0] == 0:
- return ZeroOrMore(self)
- if other[0] == 1:
- return OneOrMore(self)
- else:
- return self*other[0] + ZeroOrMore(self)
- elif isinstance(other[0],int) and isinstance(other[1],int):
- minElements, optElements = other
- optElements -= minElements
- else:
- raise TypeError("cannot multiply 'ParserElement' and ('%s','%s') objects", type(other[0]),type(other[1]))
- else:
- raise TypeError("cannot multiply 'ParserElement' and '%s' objects", type(other))
-
- if minElements < 0:
- raise ValueError("cannot multiply ParserElement by negative value")
- if optElements < 0:
- raise ValueError("second tuple value must be greater or equal to first tuple value")
- if minElements == optElements == 0:
- raise ValueError("cannot multiply ParserElement by 0 or (0,0)")
-
- if (optElements):
- def makeOptionalList(n):
- if n>1:
- return Optional(self + makeOptionalList(n-1))
- else:
- return Optional(self)
- if minElements:
- if minElements == 1:
- ret = self + makeOptionalList(optElements)
- else:
- ret = And([self]*minElements) + makeOptionalList(optElements)
- else:
- ret = makeOptionalList(optElements)
- else:
- if minElements == 1:
- ret = self
- else:
- ret = And([self]*minElements)
- return ret
-
- def __rmul__(self, other):
- return self.__mul__(other)
-
- def __or__(self, other ):
- """
- Implementation of | operator - returns C{L{MatchFirst}}
- """
- if isinstance( other, basestring ):
- other = ParserElement._literalStringClass( other )
- if not isinstance( other, ParserElement ):
- warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
- SyntaxWarning, stacklevel=2)
- return None
- return MatchFirst( [ self, other ] )
-
- def __ror__(self, other ):
- """
- Implementation of | operator when left operand is not a C{L{ParserElement}}
- """
- if isinstance( other, basestring ):
- other = ParserElement._literalStringClass( other )
- if not isinstance( other, ParserElement ):
- warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
- SyntaxWarning, stacklevel=2)
- return None
- return other | self
-
- def __xor__(self, other ):
- """
- Implementation of ^ operator - returns C{L{Or}}
- """
- if isinstance( other, basestring ):
- other = ParserElement._literalStringClass( other )
- if not isinstance( other, ParserElement ):
- warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
- SyntaxWarning, stacklevel=2)
- return None
- return Or( [ self, other ] )
-
- def __rxor__(self, other ):
- """
- Implementation of ^ operator when left operand is not a C{L{ParserElement}}
- """
- if isinstance( other, basestring ):
- other = ParserElement._literalStringClass( other )
- if not isinstance( other, ParserElement ):
- warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
- SyntaxWarning, stacklevel=2)
- return None
- return other ^ self
-
- def __and__(self, other ):
- """
- Implementation of & operator - returns C{L{Each}}
- """
- if isinstance( other, basestring ):
- other = ParserElement._literalStringClass( other )
- if not isinstance( other, ParserElement ):
- warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
- SyntaxWarning, stacklevel=2)
- return None
- return Each( [ self, other ] )
-
- def __rand__(self, other ):
- """
- Implementation of & operator when left operand is not a C{L{ParserElement}}
- """
- if isinstance( other, basestring ):
- other = ParserElement._literalStringClass( other )
- if not isinstance( other, ParserElement ):
- warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
- SyntaxWarning, stacklevel=2)
- return None
- return other & self
-
- def __invert__( self ):
- """
- Implementation of ~ operator - returns C{L{NotAny}}
- """
- return NotAny( self )
-
- def __call__(self, name=None):
- """
- Shortcut for C{L{setResultsName}}, with C{listAllMatches=False}.
-
- If C{name} is given with a trailing C{'*'} character, then C{listAllMatches} will be
- passed as C{True}.
-
- If C{name} is omitted, same as calling C{L{copy}}.
-
- Example::
- # these are equivalent
- userdata = Word(alphas).setResultsName("name") + Word(nums+"-").setResultsName("socsecno")
- userdata = Word(alphas)("name") + Word(nums+"-")("socsecno")
- """
- if name is not None:
- return self.setResultsName(name)
- else:
- return self.copy()
-
- def suppress( self ):
- """
- Suppresses the output of this C{ParserElement}; useful to keep punctuation from
- cluttering up returned output.
- """
- return Suppress( self )
-
- def leaveWhitespace( self ):
- """
- Disables the skipping of whitespace before matching the characters in the
- C{ParserElement}'s defined pattern. This is normally only used internally by
- the pyparsing module, but may be needed in some whitespace-sensitive grammars.
- """
- self.skipWhitespace = False
- return self
-
- def setWhitespaceChars( self, chars ):
- """
- Overrides the default whitespace chars
- """
- self.skipWhitespace = True
- self.whiteChars = chars
- self.copyDefaultWhiteChars = False
- return self
-
- def parseWithTabs( self ):
- """
- Overrides default behavior to expand C{<TAB>}s to spaces before parsing the input string.
- Must be called before C{parseString} when the input grammar contains elements that
- match C{<TAB>} characters.
- """
- self.keepTabs = True
- return self
-
- def ignore( self, other ):
- """
- Define expression to be ignored (e.g., comments) while doing pattern
- matching; may be called repeatedly, to define multiple comment or other
- ignorable patterns.
-
- Example::
- patt = OneOrMore(Word(alphas))
- patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj']
-
- patt.ignore(cStyleComment)
- patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj', 'lskjd']
- """
- if isinstance(other, basestring):
- other = Suppress(other)
-
- if isinstance( other, Suppress ):
- if other not in self.ignoreExprs:
- self.ignoreExprs.append(other)
- else:
- self.ignoreExprs.append( Suppress( other.copy() ) )
- return self
-
- def setDebugActions( self, startAction, successAction, exceptionAction ):
- """
- Enable display of debugging messages while doing pattern matching.
- """
- self.debugActions = (startAction or _defaultStartDebugAction,
- successAction or _defaultSuccessDebugAction,
- exceptionAction or _defaultExceptionDebugAction)
- self.debug = True
- return self
-
- def setDebug( self, flag=True ):
- """
- Enable display of debugging messages while doing pattern matching.
- Set C{flag} to True to enable, False to disable.
-
- Example::
- wd = Word(alphas).setName("alphaword")
- integer = Word(nums).setName("numword")
- term = wd | integer
-
- # turn on debugging for wd
- wd.setDebug()
-
- OneOrMore(term).parseString("abc 123 xyz 890")
-
- prints::
- Match alphaword at loc 0(1,1)
- Matched alphaword -> ['abc']
- Match alphaword at loc 3(1,4)
- Exception raised:Expected alphaword (at char 4), (line:1, col:5)
- Match alphaword at loc 7(1,8)
- Matched alphaword -> ['xyz']
- Match alphaword at loc 11(1,12)
- Exception raised:Expected alphaword (at char 12), (line:1, col:13)
- Match alphaword at loc 15(1,16)
- Exception raised:Expected alphaword (at char 15), (line:1, col:16)
-
- The output shown is that produced by the default debug actions - custom debug actions can be
- specified using L{setDebugActions}. Prior to attempting
- to match the C{wd} expression, the debugging message C{"Match <exprname> at loc <n>(<line>,<col>)"}
- is shown. Then if the parse succeeds, a C{"Matched"} message is shown, or an C{"Exception raised"}
- message is shown. Also note the use of L{setName} to assign a human-readable name to the expression,
- which makes debugging and exception messages easier to understand - for instance, the default
- name created for the C{Word} expression without calling C{setName} is C{"W:(ABCD...)"}.
- """
- if flag:
- self.setDebugActions( _defaultStartDebugAction, _defaultSuccessDebugAction, _defaultExceptionDebugAction )
- else:
- self.debug = False
- return self
-
- def __str__( self ):
- return self.name
-
- def __repr__( self ):
- return _ustr(self)
-
- def streamline( self ):
- self.streamlined = True
- self.strRepr = None
- return self
-
- def checkRecursion( self, parseElementList ):
- pass
-
- def validate( self, validateTrace=[] ):
- """
- Check defined expressions for valid structure, check for infinite recursive definitions.
- """
- self.checkRecursion( [] )
-
- def parseFile( self, file_or_filename, parseAll=False ):
- """
- Execute the parse expression on the given file or filename.
- If a filename is specified (instead of a file object),
- the entire file is opened, read, and closed before parsing.
- """
- try:
- file_contents = file_or_filename.read()
- except AttributeError:
- with open(file_or_filename, "r") as f:
- file_contents = f.read()
- try:
- return self.parseString(file_contents, parseAll)
- except ParseBaseException as exc:
- if ParserElement.verbose_stacktrace:
- raise
- else:
- # catch and re-raise exception from here, clears out pyparsing internal stack trace
- raise exc
-
- def __eq__(self,other):
- if isinstance(other, ParserElement):
- return self is other or vars(self) == vars(other)
- elif isinstance(other, basestring):
- return self.matches(other)
- else:
- return super(ParserElement,self)==other
-
- def __ne__(self,other):
- return not (self == other)
-
- def __hash__(self):
- return hash(id(self))
-
- def __req__(self,other):
- return self == other
-
- def __rne__(self,other):
- return not (self == other)
-
- def matches(self, testString, parseAll=True):
- """
- Method for quick testing of a parser against a test string. Good for simple
- inline microtests of sub expressions while building up larger parser.
-
- Parameters:
- - testString - to test against this expression for a match
- - parseAll - (default=C{True}) - flag to pass to C{L{parseString}} when running tests
-
- Example::
- expr = Word(nums)
- assert expr.matches("100")
- """
- try:
- self.parseString(_ustr(testString), parseAll=parseAll)
- return True
- except ParseBaseException:
- return False
-
- def runTests(self, tests, parseAll=True, comment='#', fullDump=True, printResults=True, failureTests=False):
- """
- Execute the parse expression on a series of test strings, showing each
- test, the parsed results or where the parse failed. Quick and easy way to
- run a parse expression against a list of sample strings.
-
- Parameters:
- - tests - a list of separate test strings, or a multiline string of test strings
- - parseAll - (default=C{True}) - flag to pass to C{L{parseString}} when running tests
- - comment - (default=C{'#'}) - expression for indicating embedded comments in the test
- string; pass None to disable comment filtering
- - fullDump - (default=C{True}) - dump results as list followed by results names in nested outline;
- if False, only dump nested list
- - printResults - (default=C{True}) prints test output to stdout
- - failureTests - (default=C{False}) indicates if these tests are expected to fail parsing
-
- Returns: a (success, results) tuple, where success indicates that all tests succeeded
- (or failed if C{failureTests} is True), and the results contain a list of lines of each
- test's output
-
- Example::
- number_expr = pyparsing_common.number.copy()
-
- result = number_expr.runTests('''
- # unsigned integer
- 100
- # negative integer
- -100
- # float with scientific notation
- 6.02e23
- # integer with scientific notation
- 1e-12
- ''')
- print("Success" if result[0] else "Failed!")
-
- result = number_expr.runTests('''
- # stray character
- 100Z
- # missing leading digit before '.'
- -.100
- # too many '.'
- 3.14.159
- ''', failureTests=True)
- print("Success" if result[0] else "Failed!")
- prints::
- # unsigned integer
- 100
- [100]
-
- # negative integer
- -100
- [-100]
-
- # float with scientific notation
- 6.02e23
- [6.02e+23]
-
- # integer with scientific notation
- 1e-12
- [1e-12]
-
- Success
-
- # stray character
- 100Z
- ^
- FAIL: Expected end of text (at char 3), (line:1, col:4)
-
- # missing leading digit before '.'
- -.100
- ^
- FAIL: Expected {real number with scientific notation | real number | signed integer} (at char 0), (line:1, col:1)
-
- # too many '.'
- 3.14.159
- ^
- FAIL: Expected end of text (at char 4), (line:1, col:5)
-
- Success
-
- Each test string must be on a single line. If you want to test a string that spans multiple
- lines, create a test like this::
-
- expr.runTest(r"this is a test\\n of strings that spans \\n 3 lines")
-
- (Note that this is a raw string literal, you must include the leading 'r'.)
- """
- if isinstance(tests, basestring):
- tests = list(map(str.strip, tests.rstrip().splitlines()))
- if isinstance(comment, basestring):
- comment = Literal(comment)
- allResults = []
- comments = []
- success = True
- for t in tests:
- if comment is not None and comment.matches(t, False) or comments and not t:
- comments.append(t)
- continue
- if not t:
- continue
- out = ['\n'.join(comments), t]
- comments = []
- try:
- t = t.replace(r'\n','\n')
- result = self.parseString(t, parseAll=parseAll)
- out.append(result.dump(full=fullDump))
- success = success and not failureTests
- except ParseBaseException as pe:
- fatal = "(FATAL)" if isinstance(pe, ParseFatalException) else ""
- if '\n' in t:
- out.append(line(pe.loc, t))
- out.append(' '*(col(pe.loc,t)-1) + '^' + fatal)
- else:
- out.append(' '*pe.loc + '^' + fatal)
- out.append("FAIL: " + str(pe))
- success = success and failureTests
- result = pe
- except Exception as exc:
- out.append("FAIL-EXCEPTION: " + str(exc))
- success = success and failureTests
- result = exc
-
- if printResults:
- if fullDump:
- out.append('')
- print('\n'.join(out))
-
- allResults.append((t, result))
-
- return success, allResults
-
-
-class Token(ParserElement):
- """
- Abstract C{ParserElement} subclass, for defining atomic matching patterns.
- """
- def __init__( self ):
- super(Token,self).__init__( savelist=False )
-
-
-class Empty(Token):
- """
- An empty token, will always match.
- """
- def __init__( self ):
- super(Empty,self).__init__()
- self.name = "Empty"
- self.mayReturnEmpty = True
- self.mayIndexError = False
-
-
-class NoMatch(Token):
- """
- A token that will never match.
- """
- def __init__( self ):
- super(NoMatch,self).__init__()
- self.name = "NoMatch"
- self.mayReturnEmpty = True
- self.mayIndexError = False
- self.errmsg = "Unmatchable token"
-
- def parseImpl( self, instring, loc, doActions=True ):
- raise ParseException(instring, loc, self.errmsg, self)
-
-
-class Literal(Token):
- """
- Token to exactly match a specified string.
-
- Example::
- Literal('blah').parseString('blah') # -> ['blah']
- Literal('blah').parseString('blahfooblah') # -> ['blah']
- Literal('blah').parseString('bla') # -> Exception: Expected "blah"
-
- For case-insensitive matching, use L{CaselessLiteral}.
-
- For keyword matching (force word break before and after the matched string),
- use L{Keyword} or L{CaselessKeyword}.
- """
- def __init__( self, matchString ):
- super(Literal,self).__init__()
- self.match = matchString
- self.matchLen = len(matchString)
- try:
- self.firstMatchChar = matchString[0]
- except IndexError:
- warnings.warn("null string passed to Literal; use Empty() instead",
- SyntaxWarning, stacklevel=2)
- self.__class__ = Empty
- self.name = '"%s"' % _ustr(self.match)
- self.errmsg = "Expected " + self.name
- self.mayReturnEmpty = False
- self.mayIndexError = False
-
- # Performance tuning: this routine gets called a *lot*
- # if this is a single character match string and the first character matches,
- # short-circuit as quickly as possible, and avoid calling startswith
- #~ @profile
- def parseImpl( self, instring, loc, doActions=True ):
- if (instring[loc] == self.firstMatchChar and
- (self.matchLen==1 or instring.startswith(self.match,loc)) ):
- return loc+self.matchLen, self.match
- raise ParseException(instring, loc, self.errmsg, self)
-_L = Literal
-ParserElement._literalStringClass = Literal
-
-class Keyword(Token):
- """
- Token to exactly match a specified string as a keyword, that is, it must be
- immediately followed by a non-keyword character. Compare with C{L{Literal}}:
- - C{Literal("if")} will match the leading C{'if'} in C{'ifAndOnlyIf'}.
- - C{Keyword("if")} will not; it will only match the leading C{'if'} in C{'if x=1'}, or C{'if(y==2)'}
- Accepts two optional constructor arguments in addition to the keyword string:
- - C{identChars} is a string of characters that would be valid identifier characters,
- defaulting to all alphanumerics + "_" and "$"
- - C{caseless} allows case-insensitive matching, default is C{False}.
-
- Example::
- Keyword("start").parseString("start") # -> ['start']
- Keyword("start").parseString("starting") # -> Exception
-
- For case-insensitive matching, use L{CaselessKeyword}.
- """
- DEFAULT_KEYWORD_CHARS = alphanums+"_$"
-
- def __init__( self, matchString, identChars=None, caseless=False ):
- super(Keyword,self).__init__()
- if identChars is None:
- identChars = Keyword.DEFAULT_KEYWORD_CHARS
- self.match = matchString
- self.matchLen = len(matchString)
- try:
- self.firstMatchChar = matchString[0]
- except IndexError:
- warnings.warn("null string passed to Keyword; use Empty() instead",
- SyntaxWarning, stacklevel=2)
- self.name = '"%s"' % self.match
- self.errmsg = "Expected " + self.name
- self.mayReturnEmpty = False
- self.mayIndexError = False
- self.caseless = caseless
- if caseless:
- self.caselessmatch = matchString.upper()
- identChars = identChars.upper()
- self.identChars = set(identChars)
-
- def parseImpl( self, instring, loc, doActions=True ):
- if self.caseless:
- if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and
- (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) and
- (loc == 0 or instring[loc-1].upper() not in self.identChars) ):
- return loc+self.matchLen, self.match
- else:
- if (instring[loc] == self.firstMatchChar and
- (self.matchLen==1 or instring.startswith(self.match,loc)) and
- (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen] not in self.identChars) and
- (loc == 0 or instring[loc-1] not in self.identChars) ):
- return loc+self.matchLen, self.match
- raise ParseException(instring, loc, self.errmsg, self)
-
- def copy(self):
- c = super(Keyword,self).copy()
- c.identChars = Keyword.DEFAULT_KEYWORD_CHARS
- return c
-
- @staticmethod
- def setDefaultKeywordChars( chars ):
- """Overrides the default Keyword chars
- """
- Keyword.DEFAULT_KEYWORD_CHARS = chars
-
-class CaselessLiteral(Literal):
- """
- Token to match a specified string, ignoring case of letters.
- Note: the matched results will always be in the case of the given
- match string, NOT the case of the input text.
-
- Example::
- OneOrMore(CaselessLiteral("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD', 'CMD']
-
- (Contrast with example for L{CaselessKeyword}.)
- """
- def __init__( self, matchString ):
- super(CaselessLiteral,self).__init__( matchString.upper() )
- # Preserve the defining literal.
- self.returnString = matchString
- self.name = "'%s'" % self.returnString
- self.errmsg = "Expected " + self.name
-
- def parseImpl( self, instring, loc, doActions=True ):
- if instring[ loc:loc+self.matchLen ].upper() == self.match:
- return loc+self.matchLen, self.returnString
- raise ParseException(instring, loc, self.errmsg, self)
-
-class CaselessKeyword(Keyword):
- """
- Caseless version of L{Keyword}.
-
- Example::
- OneOrMore(CaselessKeyword("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD']
-
- (Contrast with example for L{CaselessLiteral}.)
- """
- def __init__( self, matchString, identChars=None ):
- super(CaselessKeyword,self).__init__( matchString, identChars, caseless=True )
-
- def parseImpl( self, instring, loc, doActions=True ):
- if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and
- (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) ):
- return loc+self.matchLen, self.match
- raise ParseException(instring, loc, self.errmsg, self)
-
-class CloseMatch(Token):
- """
- A variation on L{Literal} which matches "close" matches, that is,
- strings with at most 'n' mismatching characters. C{CloseMatch} takes parameters:
- - C{match_string} - string to be matched
- - C{maxMismatches} - (C{default=1}) maximum number of mismatches allowed to count as a match
-
- The results from a successful parse will contain the matched text from the input string and the following named results:
- - C{mismatches} - a list of the positions within the match_string where mismatches were found
- - C{original} - the original match_string used to compare against the input string
-
- If C{mismatches} is an empty list, then the match was an exact match.
-
- Example::
- patt = CloseMatch("ATCATCGAATGGA")
- patt.parseString("ATCATCGAAXGGA") # -> (['ATCATCGAAXGGA'], {'mismatches': [[9]], 'original': ['ATCATCGAATGGA']})
- patt.parseString("ATCAXCGAAXGGA") # -> Exception: Expected 'ATCATCGAATGGA' (with up to 1 mismatches) (at char 0), (line:1, col:1)
-
- # exact match
- patt.parseString("ATCATCGAATGGA") # -> (['ATCATCGAATGGA'], {'mismatches': [[]], 'original': ['ATCATCGAATGGA']})
-
- # close match allowing up to 2 mismatches
- patt = CloseMatch("ATCATCGAATGGA", maxMismatches=2)
- patt.parseString("ATCAXCGAAXGGA") # -> (['ATCAXCGAAXGGA'], {'mismatches': [[4, 9]], 'original': ['ATCATCGAATGGA']})
- """
- def __init__(self, match_string, maxMismatches=1):
- super(CloseMatch,self).__init__()
- self.name = match_string
- self.match_string = match_string
- self.maxMismatches = maxMismatches
- self.errmsg = "Expected %r (with up to %d mismatches)" % (self.match_string, self.maxMismatches)
- self.mayIndexError = False
- self.mayReturnEmpty = False
-
- def parseImpl( self, instring, loc, doActions=True ):
- start = loc
- instrlen = len(instring)
- maxloc = start + len(self.match_string)
-
- if maxloc <= instrlen:
- match_string = self.match_string
- match_stringloc = 0
- mismatches = []
- maxMismatches = self.maxMismatches
-
- for match_stringloc,s_m in enumerate(zip(instring[loc:maxloc], self.match_string)):
- src,mat = s_m
- if src != mat:
- mismatches.append(match_stringloc)
- if len(mismatches) > maxMismatches:
- break
- else:
- loc = match_stringloc + 1
- results = ParseResults([instring[start:loc]])
- results['original'] = self.match_string
- results['mismatches'] = mismatches
- return loc, results
-
- raise ParseException(instring, loc, self.errmsg, self)
-
-
-class Word(Token):
- """
- Token for matching words composed of allowed character sets.
- Defined with string containing all allowed initial characters,
- an optional string containing allowed body characters (if omitted,
- defaults to the initial character set), and an optional minimum,
- maximum, and/or exact length. The default value for C{min} is 1 (a
- minimum value < 1 is not valid); the default values for C{max} and C{exact}
- are 0, meaning no maximum or exact length restriction. An optional
- C{excludeChars} parameter can list characters that might be found in
- the input C{bodyChars} string; useful to define a word of all printables
- except for one or two characters, for instance.
-
- L{srange} is useful for defining custom character set strings for defining
- C{Word} expressions, using range notation from regular expression character sets.
-
- A common mistake is to use C{Word} to match a specific literal string, as in
- C{Word("Address")}. Remember that C{Word} uses the string argument to define
- I{sets} of matchable characters. This expression would match "Add", "AAA",
- "dAred", or any other word made up of the characters 'A', 'd', 'r', 'e', and 's'.
- To match an exact literal string, use L{Literal} or L{Keyword}.
-
- pyparsing includes helper strings for building Words:
- - L{alphas}
- - L{nums}
- - L{alphanums}
- - L{hexnums}
- - L{alphas8bit} (alphabetic characters in ASCII range 128-255 - accented, tilded, umlauted, etc.)
- - L{punc8bit} (non-alphabetic characters in ASCII range 128-255 - currency, symbols, superscripts, diacriticals, etc.)
- - L{printables} (any non-whitespace character)
-
- Example::
- # a word composed of digits
- integer = Word(nums) # equivalent to Word("0123456789") or Word(srange("0-9"))
-
- # a word with a leading capital, and zero or more lowercase
- capital_word = Word(alphas.upper(), alphas.lower())
-
- # hostnames are alphanumeric, with leading alpha, and '-'
- hostname = Word(alphas, alphanums+'-')
-
- # roman numeral (not a strict parser, accepts invalid mix of characters)
- roman = Word("IVXLCDM")
-
- # any string of non-whitespace characters, except for ','
- csv_value = Word(printables, excludeChars=",")
- """
- def __init__( self, initChars, bodyChars=None, min=1, max=0, exact=0, asKeyword=False, excludeChars=None ):
- super(Word,self).__init__()
- if excludeChars:
- initChars = ''.join(c for c in initChars if c not in excludeChars)
- if bodyChars:
- bodyChars = ''.join(c for c in bodyChars if c not in excludeChars)
- self.initCharsOrig = initChars
- self.initChars = set(initChars)
- if bodyChars :
- self.bodyCharsOrig = bodyChars
- self.bodyChars = set(bodyChars)
- else:
- self.bodyCharsOrig = initChars
- self.bodyChars = set(initChars)
-
- self.maxSpecified = max > 0
-
- if min < 1:
- raise ValueError("cannot specify a minimum length < 1; use Optional(Word()) if zero-length word is permitted")
-
- self.minLen = min
-
- if max > 0:
- self.maxLen = max
- else:
- self.maxLen = _MAX_INT
-
- if exact > 0:
- self.maxLen = exact
- self.minLen = exact
-
- self.name = _ustr(self)
- self.errmsg = "Expected " + self.name
- self.mayIndexError = False
- self.asKeyword = asKeyword
-
- if ' ' not in self.initCharsOrig+self.bodyCharsOrig and (min==1 and max==0 and exact==0):
- if self.bodyCharsOrig == self.initCharsOrig:
- self.reString = "[%s]+" % _escapeRegexRangeChars(self.initCharsOrig)
- elif len(self.initCharsOrig) == 1:
- self.reString = "%s[%s]*" % \
- (re.escape(self.initCharsOrig),
- _escapeRegexRangeChars(self.bodyCharsOrig),)
- else:
- self.reString = "[%s][%s]*" % \
- (_escapeRegexRangeChars(self.initCharsOrig),
- _escapeRegexRangeChars(self.bodyCharsOrig),)
- if self.asKeyword:
- self.reString = r"\b"+self.reString+r"\b"
- try:
- self.re = re.compile( self.reString )
- except Exception:
- self.re = None
-
- def parseImpl( self, instring, loc, doActions=True ):
- if self.re:
- result = self.re.match(instring,loc)
- if not result:
- raise ParseException(instring, loc, self.errmsg, self)
-
- loc = result.end()
- return loc, result.group()
-
- if not(instring[ loc ] in self.initChars):
- raise ParseException(instring, loc, self.errmsg, self)
-
- start = loc
- loc += 1
- instrlen = len(instring)
- bodychars = self.bodyChars
- maxloc = start + self.maxLen
- maxloc = min( maxloc, instrlen )
- while loc < maxloc and instring[loc] in bodychars:
- loc += 1
-
- throwException = False
- if loc - start < self.minLen:
- throwException = True
- if self.maxSpecified and loc < instrlen and instring[loc] in bodychars:
- throwException = True
- if self.asKeyword:
- if (start>0 and instring[start-1] in bodychars) or (loc<instrlen and instring[loc] in bodychars):
- throwException = True
-
- if throwException:
- raise ParseException(instring, loc, self.errmsg, self)
-
- return loc, instring[start:loc]
-
- def __str__( self ):
- try:
- return super(Word,self).__str__()
- except Exception:
- pass
-
-
- if self.strRepr is None:
-
- def charsAsStr(s):
- if len(s)>4:
- return s[:4]+"..."
- else:
- return s
-
- if ( self.initCharsOrig != self.bodyCharsOrig ):
- self.strRepr = "W:(%s,%s)" % ( charsAsStr(self.initCharsOrig), charsAsStr(self.bodyCharsOrig) )
- else:
- self.strRepr = "W:(%s)" % charsAsStr(self.initCharsOrig)
-
- return self.strRepr
-
-
-class Regex(Token):
- r"""
- Token for matching strings that match a given regular expression.
- Defined with string specifying the regular expression in a form recognized by the inbuilt Python re module.
- If the given regex contains named groups (defined using C{(?P<name>...)}), these will be preserved as
- named parse results.
-
- Example::
- realnum = Regex(r"[+-]?\d+\.\d*")
- date = Regex(r'(?P<year>\d{4})-(?P<month>\d\d?)-(?P<day>\d\d?)')
- # ref: http://stackoverflow.com/questions/267399/how-do-you-match-only-valid-roman-numerals-with-a-regular-expression
- roman = Regex(r"M{0,4}(CM|CD|D?C{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})")
- """
- compiledREtype = type(re.compile("[A-Z]"))
- def __init__( self, pattern, flags=0):
- """The parameters C{pattern} and C{flags} are passed to the C{re.compile()} function as-is. See the Python C{re} module for an explanation of the acceptable patterns and flags."""
- super(Regex,self).__init__()
-
- if isinstance(pattern, basestring):
- if not pattern:
- warnings.warn("null string passed to Regex; use Empty() instead",
- SyntaxWarning, stacklevel=2)
-
- self.pattern = pattern
- self.flags = flags
-
- try:
- self.re = re.compile(self.pattern, self.flags)
- self.reString = self.pattern
- except sre_constants.error:
- warnings.warn("invalid pattern (%s) passed to Regex" % pattern,
- SyntaxWarning, stacklevel=2)
- raise
-
- elif isinstance(pattern, Regex.compiledREtype):
- self.re = pattern
- self.pattern = \
- self.reString = str(pattern)
- self.flags = flags
-
- else:
- raise ValueError("Regex may only be constructed with a string or a compiled RE object")
-
- self.name = _ustr(self)
- self.errmsg = "Expected " + self.name
- self.mayIndexError = False
- self.mayReturnEmpty = True
-
- def parseImpl( self, instring, loc, doActions=True ):
- result = self.re.match(instring,loc)
- if not result:
- raise ParseException(instring, loc, self.errmsg, self)
-
- loc = result.end()
- d = result.groupdict()
- ret = ParseResults(result.group())
- if d:
- for k in d:
- ret[k] = d[k]
- return loc,ret
-
- def __str__( self ):
- try:
- return super(Regex,self).__str__()
- except Exception:
- pass
-
- if self.strRepr is None:
- self.strRepr = "Re:(%s)" % repr(self.pattern)
-
- return self.strRepr
-
-
-class QuotedString(Token):
- r"""
- Token for matching strings that are delimited by quoting characters.
-
- Defined with the following parameters:
- - quoteChar - string of one or more characters defining the quote delimiting string
- - escChar - character to escape quotes, typically backslash (default=C{None})
- - escQuote - special quote sequence to escape an embedded quote string (such as SQL's "" to escape an embedded ") (default=C{None})
- - multiline - boolean indicating whether quotes can span multiple lines (default=C{False})
- - unquoteResults - boolean indicating whether the matched text should be unquoted (default=C{True})
- - endQuoteChar - string of one or more characters defining the end of the quote delimited string (default=C{None} => same as quoteChar)
- - convertWhitespaceEscapes - convert escaped whitespace (C{'\t'}, C{'\n'}, etc.) to actual whitespace (default=C{True})
-
- Example::
- qs = QuotedString('"')
- print(qs.searchString('lsjdf "This is the quote" sldjf'))
- complex_qs = QuotedString('{{', endQuoteChar='}}')
- print(complex_qs.searchString('lsjdf {{This is the "quote"}} sldjf'))
- sql_qs = QuotedString('"', escQuote='""')
- print(sql_qs.searchString('lsjdf "This is the quote with ""embedded"" quotes" sldjf'))
- prints::
- [['This is the quote']]
- [['This is the "quote"']]
- [['This is the quote with "embedded" quotes']]
- """
- def __init__( self, quoteChar, escChar=None, escQuote=None, multiline=False, unquoteResults=True, endQuoteChar=None, convertWhitespaceEscapes=True):
- super(QuotedString,self).__init__()
-
- # remove white space from quote chars - wont work anyway
- quoteChar = quoteChar.strip()
- if not quoteChar:
- warnings.warn("quoteChar cannot be the empty string",SyntaxWarning,stacklevel=2)
- raise SyntaxError()
-
- if endQuoteChar is None:
- endQuoteChar = quoteChar
- else:
- endQuoteChar = endQuoteChar.strip()
- if not endQuoteChar:
- warnings.warn("endQuoteChar cannot be the empty string",SyntaxWarning,stacklevel=2)
- raise SyntaxError()
-
- self.quoteChar = quoteChar
- self.quoteCharLen = len(quoteChar)
- self.firstQuoteChar = quoteChar[0]
- self.endQuoteChar = endQuoteChar
- self.endQuoteCharLen = len(endQuoteChar)
- self.escChar = escChar
- self.escQuote = escQuote
- self.unquoteResults = unquoteResults
- self.convertWhitespaceEscapes = convertWhitespaceEscapes
-
- if multiline:
- self.flags = re.MULTILINE | re.DOTALL
- self.pattern = r'%s(?:[^%s%s]' % \
- ( re.escape(self.quoteChar),
- _escapeRegexRangeChars(self.endQuoteChar[0]),
- (escChar is not None and _escapeRegexRangeChars(escChar) or '') )
- else:
- self.flags = 0
- self.pattern = r'%s(?:[^%s\n\r%s]' % \
- ( re.escape(self.quoteChar),
- _escapeRegexRangeChars(self.endQuoteChar[0]),
- (escChar is not None and _escapeRegexRangeChars(escChar) or '') )
- if len(self.endQuoteChar) > 1:
- self.pattern += (
- '|(?:' + ')|(?:'.join("%s[^%s]" % (re.escape(self.endQuoteChar[:i]),
- _escapeRegexRangeChars(self.endQuoteChar[i]))
- for i in range(len(self.endQuoteChar)-1,0,-1)) + ')'
- )
- if escQuote:
- self.pattern += (r'|(?:%s)' % re.escape(escQuote))
- if escChar:
- self.pattern += (r'|(?:%s.)' % re.escape(escChar))
- self.escCharReplacePattern = re.escape(self.escChar)+"(.)"
- self.pattern += (r')*%s' % re.escape(self.endQuoteChar))
-
- try:
- self.re = re.compile(self.pattern, self.flags)
- self.reString = self.pattern
- except sre_constants.error:
- warnings.warn("invalid pattern (%s) passed to Regex" % self.pattern,
- SyntaxWarning, stacklevel=2)
- raise
-
- self.name = _ustr(self)
- self.errmsg = "Expected " + self.name
- self.mayIndexError = False
- self.mayReturnEmpty = True
-
- def parseImpl( self, instring, loc, doActions=True ):
- result = instring[loc] == self.firstQuoteChar and self.re.match(instring,loc) or None
- if not result:
- raise ParseException(instring, loc, self.errmsg, self)
-
- loc = result.end()
- ret = result.group()
-
- if self.unquoteResults:
-
- # strip off quotes
- ret = ret[self.quoteCharLen:-self.endQuoteCharLen]
-
- if isinstance(ret,basestring):
- # replace escaped whitespace
- if '\\' in ret and self.convertWhitespaceEscapes:
- ws_map = {
- r'\t' : '\t',
- r'\n' : '\n',
- r'\f' : '\f',
- r'\r' : '\r',
- }
- for wslit,wschar in ws_map.items():
- ret = ret.replace(wslit, wschar)
-
- # replace escaped characters
- if self.escChar:
- ret = re.sub(self.escCharReplacePattern, r"\g<1>", ret)
-
- # replace escaped quotes
- if self.escQuote:
- ret = ret.replace(self.escQuote, self.endQuoteChar)
-
- return loc, ret
-
- def __str__( self ):
- try:
- return super(QuotedString,self).__str__()
- except Exception:
- pass
-
- if self.strRepr is None:
- self.strRepr = "quoted string, starting with %s ending with %s" % (self.quoteChar, self.endQuoteChar)
-
- return self.strRepr
-
-
-class CharsNotIn(Token):
- """
- Token for matching words composed of characters I{not} in a given set (will
- include whitespace in matched characters if not listed in the provided exclusion set - see example).
- Defined with string containing all disallowed characters, and an optional
- minimum, maximum, and/or exact length. The default value for C{min} is 1 (a
- minimum value < 1 is not valid); the default values for C{max} and C{exact}
- are 0, meaning no maximum or exact length restriction.
-
- Example::
- # define a comma-separated-value as anything that is not a ','
- csv_value = CharsNotIn(',')
- print(delimitedList(csv_value).parseString("dkls,lsdkjf,s12 34,@!#,213"))
- prints::
- ['dkls', 'lsdkjf', 's12 34', '@!#', '213']
- """
- def __init__( self, notChars, min=1, max=0, exact=0 ):
- super(CharsNotIn,self).__init__()
- self.skipWhitespace = False
- self.notChars = notChars
-
- if min < 1:
- raise ValueError("cannot specify a minimum length < 1; use Optional(CharsNotIn()) if zero-length char group is permitted")
-
- self.minLen = min
-
- if max > 0:
- self.maxLen = max
- else:
- self.maxLen = _MAX_INT
-
- if exact > 0:
- self.maxLen = exact
- self.minLen = exact
-
- self.name = _ustr(self)
- self.errmsg = "Expected " + self.name
- self.mayReturnEmpty = ( self.minLen == 0 )
- self.mayIndexError = False
-
- def parseImpl( self, instring, loc, doActions=True ):
- if instring[loc] in self.notChars:
- raise ParseException(instring, loc, self.errmsg, self)
-
- start = loc
- loc += 1
- notchars = self.notChars
- maxlen = min( start+self.maxLen, len(instring) )
- while loc < maxlen and \
- (instring[loc] not in notchars):
- loc += 1
-
- if loc - start < self.minLen:
- raise ParseException(instring, loc, self.errmsg, self)
-
- return loc, instring[start:loc]
-
- def __str__( self ):
- try:
- return super(CharsNotIn, self).__str__()
- except Exception:
- pass
-
- if self.strRepr is None:
- if len(self.notChars) > 4:
- self.strRepr = "!W:(%s...)" % self.notChars[:4]
- else:
- self.strRepr = "!W:(%s)" % self.notChars
-
- return self.strRepr
-
-class White(Token):
- """
- Special matching class for matching whitespace. Normally, whitespace is ignored
- by pyparsing grammars. This class is included when some whitespace structures
- are significant. Define with a string containing the whitespace characters to be
- matched; default is C{" \\t\\r\\n"}. Also takes optional C{min}, C{max}, and C{exact} arguments,
- as defined for the C{L{Word}} class.
- """
- whiteStrs = {
- " " : "<SPC>",
- "\t": "<TAB>",
- "\n": "<LF>",
- "\r": "<CR>",
- "\f": "<FF>",
- }
- def __init__(self, ws=" \t\r\n", min=1, max=0, exact=0):
- super(White,self).__init__()
- self.matchWhite = ws
- self.setWhitespaceChars( "".join(c for c in self.whiteChars if c not in self.matchWhite) )
- #~ self.leaveWhitespace()
- self.name = ("".join(White.whiteStrs[c] for c in self.matchWhite))
- self.mayReturnEmpty = True
- self.errmsg = "Expected " + self.name
-
- self.minLen = min
-
- if max > 0:
- self.maxLen = max
- else:
- self.maxLen = _MAX_INT
-
- if exact > 0:
- self.maxLen = exact
- self.minLen = exact
-
- def parseImpl( self, instring, loc, doActions=True ):
- if not(instring[ loc ] in self.matchWhite):
- raise ParseException(instring, loc, self.errmsg, self)
- start = loc
- loc += 1
- maxloc = start + self.maxLen
- maxloc = min( maxloc, len(instring) )
- while loc < maxloc and instring[loc] in self.matchWhite:
- loc += 1
-
- if loc - start < self.minLen:
- raise ParseException(instring, loc, self.errmsg, self)
-
- return loc, instring[start:loc]
-
-
-class _PositionToken(Token):
- def __init__( self ):
- super(_PositionToken,self).__init__()
- self.name=self.__class__.__name__
- self.mayReturnEmpty = True
- self.mayIndexError = False
-
-class GoToColumn(_PositionToken):
- """
- Token to advance to a specific column of input text; useful for tabular report scraping.
- """
- def __init__( self, colno ):
- super(GoToColumn,self).__init__()
- self.col = colno
-
- def preParse( self, instring, loc ):
- if col(loc,instring) != self.col:
- instrlen = len(instring)
- if self.ignoreExprs:
- loc = self._skipIgnorables( instring, loc )
- while loc < instrlen and instring[loc].isspace() and col( loc, instring ) != self.col :
- loc += 1
- return loc
-
- def parseImpl( self, instring, loc, doActions=True ):
- thiscol = col( loc, instring )
- if thiscol > self.col:
- raise ParseException( instring, loc, "Text not in expected column", self )
- newloc = loc + self.col - thiscol
- ret = instring[ loc: newloc ]
- return newloc, ret
-
-
-class LineStart(_PositionToken):
- """
- Matches if current position is at the beginning of a line within the parse string
-
- Example::
-
- test = '''\
- AAA this line
- AAA and this line
- AAA but not this one
- B AAA and definitely not this one
- '''
-
- for t in (LineStart() + 'AAA' + restOfLine).searchString(test):
- print(t)
-
- Prints::
- ['AAA', ' this line']
- ['AAA', ' and this line']
-
- """
- def __init__( self ):
- super(LineStart,self).__init__()
- self.errmsg = "Expected start of line"
-
- def parseImpl( self, instring, loc, doActions=True ):
- if col(loc, instring) == 1:
- return loc, []
- raise ParseException(instring, loc, self.errmsg, self)
-
-class LineEnd(_PositionToken):
- """
- Matches if current position is at the end of a line within the parse string
- """
- def __init__( self ):
- super(LineEnd,self).__init__()
- self.setWhitespaceChars( ParserElement.DEFAULT_WHITE_CHARS.replace("\n","") )
- self.errmsg = "Expected end of line"
-
- def parseImpl( self, instring, loc, doActions=True ):
- if loc<len(instring):
- if instring[loc] == "\n":
- return loc+1, "\n"
- else:
- raise ParseException(instring, loc, self.errmsg, self)
- elif loc == len(instring):
- return loc+1, []
- else:
- raise ParseException(instring, loc, self.errmsg, self)
-
-class StringStart(_PositionToken):
- """
- Matches if current position is at the beginning of the parse string
- """
- def __init__( self ):
- super(StringStart,self).__init__()
- self.errmsg = "Expected start of text"
-
- def parseImpl( self, instring, loc, doActions=True ):
- if loc != 0:
- # see if entire string up to here is just whitespace and ignoreables
- if loc != self.preParse( instring, 0 ):
- raise ParseException(instring, loc, self.errmsg, self)
- return loc, []
-
-class StringEnd(_PositionToken):
- """
- Matches if current position is at the end of the parse string
- """
- def __init__( self ):
- super(StringEnd,self).__init__()
- self.errmsg = "Expected end of text"
-
- def parseImpl( self, instring, loc, doActions=True ):
- if loc < len(instring):
- raise ParseException(instring, loc, self.errmsg, self)
- elif loc == len(instring):
- return loc+1, []
- elif loc > len(instring):
- return loc, []
- else:
- raise ParseException(instring, loc, self.errmsg, self)
-
-class WordStart(_PositionToken):
- """
- Matches if the current position is at the beginning of a Word, and
- is not preceded by any character in a given set of C{wordChars}
- (default=C{printables}). To emulate the C{\b} behavior of regular expressions,
- use C{WordStart(alphanums)}. C{WordStart} will also match at the beginning of
- the string being parsed, or at the beginning of a line.
- """
- def __init__(self, wordChars = printables):
- super(WordStart,self).__init__()
- self.wordChars = set(wordChars)
- self.errmsg = "Not at the start of a word"
-
- def parseImpl(self, instring, loc, doActions=True ):
- if loc != 0:
- if (instring[loc-1] in self.wordChars or
- instring[loc] not in self.wordChars):
- raise ParseException(instring, loc, self.errmsg, self)
- return loc, []
-
-class WordEnd(_PositionToken):
- """
- Matches if the current position is at the end of a Word, and
- is not followed by any character in a given set of C{wordChars}
- (default=C{printables}). To emulate the C{\b} behavior of regular expressions,
- use C{WordEnd(alphanums)}. C{WordEnd} will also match at the end of
- the string being parsed, or at the end of a line.
- """
- def __init__(self, wordChars = printables):
- super(WordEnd,self).__init__()
- self.wordChars = set(wordChars)
- self.skipWhitespace = False
- self.errmsg = "Not at the end of a word"
-
- def parseImpl(self, instring, loc, doActions=True ):
- instrlen = len(instring)
- if instrlen>0 and loc<instrlen:
- if (instring[loc] in self.wordChars or
- instring[loc-1] not in self.wordChars):
- raise ParseException(instring, loc, self.errmsg, self)
- return loc, []
-
-
-class ParseExpression(ParserElement):
- """
- Abstract subclass of ParserElement, for combining and post-processing parsed tokens.
- """
- def __init__( self, exprs, savelist = False ):
- super(ParseExpression,self).__init__(savelist)
- if isinstance( exprs, _generatorType ):
- exprs = list(exprs)
-
- if isinstance( exprs, basestring ):
- self.exprs = [ ParserElement._literalStringClass( exprs ) ]
- elif isinstance( exprs, Iterable ):
- exprs = list(exprs)
- # if sequence of strings provided, wrap with Literal
- if all(isinstance(expr, basestring) for expr in exprs):
- exprs = map(ParserElement._literalStringClass, exprs)
- self.exprs = list(exprs)
- else:
- try:
- self.exprs = list( exprs )
- except TypeError:
- self.exprs = [ exprs ]
- self.callPreparse = False
-
- def __getitem__( self, i ):
- return self.exprs[i]
-
- def append( self, other ):
- self.exprs.append( other )
- self.strRepr = None
- return self
-
- def leaveWhitespace( self ):
- """Extends C{leaveWhitespace} defined in base class, and also invokes C{leaveWhitespace} on
- all contained expressions."""
- self.skipWhitespace = False
- self.exprs = [ e.copy() for e in self.exprs ]
- for e in self.exprs:
- e.leaveWhitespace()
- return self
-
- def ignore( self, other ):
- if isinstance( other, Suppress ):
- if other not in self.ignoreExprs:
- super( ParseExpression, self).ignore( other )
- for e in self.exprs:
- e.ignore( self.ignoreExprs[-1] )
- else:
- super( ParseExpression, self).ignore( other )
- for e in self.exprs:
- e.ignore( self.ignoreExprs[-1] )
- return self
-
- def __str__( self ):
- try:
- return super(ParseExpression,self).__str__()
- except Exception:
- pass
-
- if self.strRepr is None:
- self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.exprs) )
- return self.strRepr
-
- def streamline( self ):
- super(ParseExpression,self).streamline()
-
- for e in self.exprs:
- e.streamline()
-
- # collapse nested And's of the form And( And( And( a,b), c), d) to And( a,b,c,d )
- # but only if there are no parse actions or resultsNames on the nested And's
- # (likewise for Or's and MatchFirst's)
- if ( len(self.exprs) == 2 ):
- other = self.exprs[0]
- if ( isinstance( other, self.__class__ ) and
- not(other.parseAction) and
- other.resultsName is None and
- not other.debug ):
- self.exprs = other.exprs[:] + [ self.exprs[1] ]
- self.strRepr = None
- self.mayReturnEmpty |= other.mayReturnEmpty
- self.mayIndexError |= other.mayIndexError
-
- other = self.exprs[-1]
- if ( isinstance( other, self.__class__ ) and
- not(other.parseAction) and
- other.resultsName is None and
- not other.debug ):
- self.exprs = self.exprs[:-1] + other.exprs[:]
- self.strRepr = None
- self.mayReturnEmpty |= other.mayReturnEmpty
- self.mayIndexError |= other.mayIndexError
-
- self.errmsg = "Expected " + _ustr(self)
-
- return self
-
- def setResultsName( self, name, listAllMatches=False ):
- ret = super(ParseExpression,self).setResultsName(name,listAllMatches)
- return ret
-
- def validate( self, validateTrace=[] ):
- tmp = validateTrace[:]+[self]
- for e in self.exprs:
- e.validate(tmp)
- self.checkRecursion( [] )
-
- def copy(self):
- ret = super(ParseExpression,self).copy()
- ret.exprs = [e.copy() for e in self.exprs]
- return ret
-
-class And(ParseExpression):
- """
- Requires all given C{ParseExpression}s to be found in the given order.
- Expressions may be separated by whitespace.
- May be constructed using the C{'+'} operator.
- May also be constructed using the C{'-'} operator, which will suppress backtracking.
-
- Example::
- integer = Word(nums)
- name_expr = OneOrMore(Word(alphas))
-
- expr = And([integer("id"),name_expr("name"),integer("age")])
- # more easily written as:
- expr = integer("id") + name_expr("name") + integer("age")
- """
-
- class _ErrorStop(Empty):
- def __init__(self, *args, **kwargs):
- super(And._ErrorStop,self).__init__(*args, **kwargs)
- self.name = '-'
- self.leaveWhitespace()
-
- def __init__( self, exprs, savelist = True ):
- super(And,self).__init__(exprs, savelist)
- self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
- self.setWhitespaceChars( self.exprs[0].whiteChars )
- self.skipWhitespace = self.exprs[0].skipWhitespace
- self.callPreparse = True
-
- def parseImpl( self, instring, loc, doActions=True ):
- # pass False as last arg to _parse for first element, since we already
- # pre-parsed the string as part of our And pre-parsing
- loc, resultlist = self.exprs[0]._parse( instring, loc, doActions, callPreParse=False )
- errorStop = False
- for e in self.exprs[1:]:
- if isinstance(e, And._ErrorStop):
- errorStop = True
- continue
- if errorStop:
- try:
- loc, exprtokens = e._parse( instring, loc, doActions )
- except ParseSyntaxException:
- raise
- except ParseBaseException as pe:
- pe.__traceback__ = None
- raise ParseSyntaxException._from_exception(pe)
- except IndexError:
- raise ParseSyntaxException(instring, len(instring), self.errmsg, self)
- else:
- loc, exprtokens = e._parse( instring, loc, doActions )
- if exprtokens or exprtokens.haskeys():
- resultlist += exprtokens
- return loc, resultlist
-
- def __iadd__(self, other ):
- if isinstance( other, basestring ):
- other = ParserElement._literalStringClass( other )
- return self.append( other ) #And( [ self, other ] )
-
- def checkRecursion( self, parseElementList ):
- subRecCheckList = parseElementList[:] + [ self ]
- for e in self.exprs:
- e.checkRecursion( subRecCheckList )
- if not e.mayReturnEmpty:
- break
-
- def __str__( self ):
- if hasattr(self,"name"):
- return self.name
-
- if self.strRepr is None:
- self.strRepr = "{" + " ".join(_ustr(e) for e in self.exprs) + "}"
-
- return self.strRepr
-
-
-class Or(ParseExpression):
- """
- Requires that at least one C{ParseExpression} is found.
- If two expressions match, the expression that matches the longest string will be used.
- May be constructed using the C{'^'} operator.
-
- Example::
- # construct Or using '^' operator
-
- number = Word(nums) ^ Combine(Word(nums) + '.' + Word(nums))
- print(number.searchString("123 3.1416 789"))
- prints::
- [['123'], ['3.1416'], ['789']]
- """
- def __init__( self, exprs, savelist = False ):
- super(Or,self).__init__(exprs, savelist)
- if self.exprs:
- self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)
- else:
- self.mayReturnEmpty = True
-
- def parseImpl( self, instring, loc, doActions=True ):
- maxExcLoc = -1
- maxException = None
- matches = []
- for e in self.exprs:
- try:
- loc2 = e.tryParse( instring, loc )
- except ParseException as err:
- err.__traceback__ = None
- if err.loc > maxExcLoc:
- maxException = err
- maxExcLoc = err.loc
- except IndexError:
- if len(instring) > maxExcLoc:
- maxException = ParseException(instring,len(instring),e.errmsg,self)
- maxExcLoc = len(instring)
- else:
- # save match among all matches, to retry longest to shortest
- matches.append((loc2, e))
-
- if matches:
- matches.sort(key=lambda x: -x[0])
- for _,e in matches:
- try:
- return e._parse( instring, loc, doActions )
- except ParseException as err:
- err.__traceback__ = None
- if err.loc > maxExcLoc:
- maxException = err
- maxExcLoc = err.loc
-
- if maxException is not None:
- maxException.msg = self.errmsg
- raise maxException
- else:
- raise ParseException(instring, loc, "no defined alternatives to match", self)
-
-
- def __ixor__(self, other ):
- if isinstance( other, basestring ):
- other = ParserElement._literalStringClass( other )
- return self.append( other ) #Or( [ self, other ] )
-
- def __str__( self ):
- if hasattr(self,"name"):
- return self.name
-
- if self.strRepr is None:
- self.strRepr = "{" + " ^ ".join(_ustr(e) for e in self.exprs) + "}"
-
- return self.strRepr
-
- def checkRecursion( self, parseElementList ):
- subRecCheckList = parseElementList[:] + [ self ]
- for e in self.exprs:
- e.checkRecursion( subRecCheckList )
-
-
-class MatchFirst(ParseExpression):
- """
- Requires that at least one C{ParseExpression} is found.
- If two expressions match, the first one listed is the one that will match.
- May be constructed using the C{'|'} operator.
-
- Example::
- # construct MatchFirst using '|' operator
-
- # watch the order of expressions to match
- number = Word(nums) | Combine(Word(nums) + '.' + Word(nums))
- print(number.searchString("123 3.1416 789")) # Fail! -> [['123'], ['3'], ['1416'], ['789']]
-
- # put more selective expression first
- number = Combine(Word(nums) + '.' + Word(nums)) | Word(nums)
- print(number.searchString("123 3.1416 789")) # Better -> [['123'], ['3.1416'], ['789']]
- """
- def __init__( self, exprs, savelist = False ):
- super(MatchFirst,self).__init__(exprs, savelist)
- if self.exprs:
- self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)
- else:
- self.mayReturnEmpty = True
-
- def parseImpl( self, instring, loc, doActions=True ):
- maxExcLoc = -1
- maxException = None
- for e in self.exprs:
- try:
- ret = e._parse( instring, loc, doActions )
- return ret
- except ParseException as err:
- if err.loc > maxExcLoc:
- maxException = err
- maxExcLoc = err.loc
- except IndexError:
- if len(instring) > maxExcLoc:
- maxException = ParseException(instring,len(instring),e.errmsg,self)
- maxExcLoc = len(instring)
-
- # only got here if no expression matched, raise exception for match that made it the furthest
- else:
- if maxException is not None:
- maxException.msg = self.errmsg
- raise maxException
- else:
- raise ParseException(instring, loc, "no defined alternatives to match", self)
-
- def __ior__(self, other ):
- if isinstance( other, basestring ):
- other = ParserElement._literalStringClass( other )
- return self.append( other ) #MatchFirst( [ self, other ] )
-
- def __str__( self ):
- if hasattr(self,"name"):
- return self.name
-
- if self.strRepr is None:
- self.strRepr = "{" + " | ".join(_ustr(e) for e in self.exprs) + "}"
-
- return self.strRepr
-
- def checkRecursion( self, parseElementList ):
- subRecCheckList = parseElementList[:] + [ self ]
- for e in self.exprs:
- e.checkRecursion( subRecCheckList )
-
-
-class Each(ParseExpression):
- """
- Requires all given C{ParseExpression}s to be found, but in any order.
- Expressions may be separated by whitespace.
- May be constructed using the C{'&'} operator.
-
- Example::
- color = oneOf("RED ORANGE YELLOW GREEN BLUE PURPLE BLACK WHITE BROWN")
- shape_type = oneOf("SQUARE CIRCLE TRIANGLE STAR HEXAGON OCTAGON")
- integer = Word(nums)
- shape_attr = "shape:" + shape_type("shape")
- posn_attr = "posn:" + Group(integer("x") + ',' + integer("y"))("posn")
- color_attr = "color:" + color("color")
- size_attr = "size:" + integer("size")
-
- # use Each (using operator '&') to accept attributes in any order
- # (shape and posn are required, color and size are optional)
- shape_spec = shape_attr & posn_attr & Optional(color_attr) & Optional(size_attr)
-
- shape_spec.runTests('''
- shape: SQUARE color: BLACK posn: 100, 120
- shape: CIRCLE size: 50 color: BLUE posn: 50,80
- color:GREEN size:20 shape:TRIANGLE posn:20,40
- '''
- )
- prints::
- shape: SQUARE color: BLACK posn: 100, 120
- ['shape:', 'SQUARE', 'color:', 'BLACK', 'posn:', ['100', ',', '120']]
- - color: BLACK
- - posn: ['100', ',', '120']
- - x: 100
- - y: 120
- - shape: SQUARE
-
-
- shape: CIRCLE size: 50 color: BLUE posn: 50,80
- ['shape:', 'CIRCLE', 'size:', '50', 'color:', 'BLUE', 'posn:', ['50', ',', '80']]
- - color: BLUE
- - posn: ['50', ',', '80']
- - x: 50
- - y: 80
- - shape: CIRCLE
- - size: 50
-
-
- color: GREEN size: 20 shape: TRIANGLE posn: 20,40
- ['color:', 'GREEN', 'size:', '20', 'shape:', 'TRIANGLE', 'posn:', ['20', ',', '40']]
- - color: GREEN
- - posn: ['20', ',', '40']
- - x: 20
- - y: 40
- - shape: TRIANGLE
- - size: 20
- """
- def __init__( self, exprs, savelist = True ):
- super(Each,self).__init__(exprs, savelist)
- self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
- self.skipWhitespace = True
- self.initExprGroups = True
-
- def parseImpl( self, instring, loc, doActions=True ):
- if self.initExprGroups:
- self.opt1map = dict((id(e.expr),e) for e in self.exprs if isinstance(e,Optional))
- opt1 = [ e.expr for e in self.exprs if isinstance(e,Optional) ]
- opt2 = [ e for e in self.exprs if e.mayReturnEmpty and not isinstance(e,Optional)]
- self.optionals = opt1 + opt2
- self.multioptionals = [ e.expr for e in self.exprs if isinstance(e,ZeroOrMore) ]
- self.multirequired = [ e.expr for e in self.exprs if isinstance(e,OneOrMore) ]
- self.required = [ e for e in self.exprs if not isinstance(e,(Optional,ZeroOrMore,OneOrMore)) ]
- self.required += self.multirequired
- self.initExprGroups = False
- tmpLoc = loc
- tmpReqd = self.required[:]
- tmpOpt = self.optionals[:]
- matchOrder = []
-
- keepMatching = True
- while keepMatching:
- tmpExprs = tmpReqd + tmpOpt + self.multioptionals + self.multirequired
- failed = []
- for e in tmpExprs:
- try:
- tmpLoc = e.tryParse( instring, tmpLoc )
- except ParseException:
- failed.append(e)
- else:
- matchOrder.append(self.opt1map.get(id(e),e))
- if e in tmpReqd:
- tmpReqd.remove(e)
- elif e in tmpOpt:
- tmpOpt.remove(e)
- if len(failed) == len(tmpExprs):
- keepMatching = False
-
- if tmpReqd:
- missing = ", ".join(_ustr(e) for e in tmpReqd)
- raise ParseException(instring,loc,"Missing one or more required elements (%s)" % missing )
-
- # add any unmatched Optionals, in case they have default values defined
- matchOrder += [e for e in self.exprs if isinstance(e,Optional) and e.expr in tmpOpt]
-
- resultlist = []
- for e in matchOrder:
- loc,results = e._parse(instring,loc,doActions)
- resultlist.append(results)
-
- finalResults = sum(resultlist, ParseResults([]))
- return loc, finalResults
-
- def __str__( self ):
- if hasattr(self,"name"):
- return self.name
-
- if self.strRepr is None:
- self.strRepr = "{" + " & ".join(_ustr(e) for e in self.exprs) + "}"
-
- return self.strRepr
-
- def checkRecursion( self, parseElementList ):
- subRecCheckList = parseElementList[:] + [ self ]
- for e in self.exprs:
- e.checkRecursion( subRecCheckList )
-
-
-class ParseElementEnhance(ParserElement):
- """
- Abstract subclass of C{ParserElement}, for combining and post-processing parsed tokens.
- """
- def __init__( self, expr, savelist=False ):
- super(ParseElementEnhance,self).__init__(savelist)
- if isinstance( expr, basestring ):
- if issubclass(ParserElement._literalStringClass, Token):
- expr = ParserElement._literalStringClass(expr)
- else:
- expr = ParserElement._literalStringClass(Literal(expr))
- self.expr = expr
- self.strRepr = None
- if expr is not None:
- self.mayIndexError = expr.mayIndexError
- self.mayReturnEmpty = expr.mayReturnEmpty
- self.setWhitespaceChars( expr.whiteChars )
- self.skipWhitespace = expr.skipWhitespace
- self.saveAsList = expr.saveAsList
- self.callPreparse = expr.callPreparse
- self.ignoreExprs.extend(expr.ignoreExprs)
-
- def parseImpl( self, instring, loc, doActions=True ):
- if self.expr is not None:
- return self.expr._parse( instring, loc, doActions, callPreParse=False )
- else:
- raise ParseException("",loc,self.errmsg,self)
-
- def leaveWhitespace( self ):
- self.skipWhitespace = False
- self.expr = self.expr.copy()
- if self.expr is not None:
- self.expr.leaveWhitespace()
- return self
-
- def ignore( self, other ):
- if isinstance( other, Suppress ):
- if other not in self.ignoreExprs:
- super( ParseElementEnhance, self).ignore( other )
- if self.expr is not None:
- self.expr.ignore( self.ignoreExprs[-1] )
- else:
- super( ParseElementEnhance, self).ignore( other )
- if self.expr is not None:
- self.expr.ignore( self.ignoreExprs[-1] )
- return self
-
- def streamline( self ):
- super(ParseElementEnhance,self).streamline()
- if self.expr is not None:
- self.expr.streamline()
- return self
-
- def checkRecursion( self, parseElementList ):
- if self in parseElementList:
- raise RecursiveGrammarException( parseElementList+[self] )
- subRecCheckList = parseElementList[:] + [ self ]
- if self.expr is not None:
- self.expr.checkRecursion( subRecCheckList )
-
- def validate( self, validateTrace=[] ):
- tmp = validateTrace[:]+[self]
- if self.expr is not None:
- self.expr.validate(tmp)
- self.checkRecursion( [] )
-
- def __str__( self ):
- try:
- return super(ParseElementEnhance,self).__str__()
- except Exception:
- pass
-
- if self.strRepr is None and self.expr is not None:
- self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.expr) )
- return self.strRepr
-
-
-class FollowedBy(ParseElementEnhance):
- """
- Lookahead matching of the given parse expression. C{FollowedBy}
- does I{not} advance the parsing position within the input string, it only
- verifies that the specified parse expression matches at the current
- position. C{FollowedBy} always returns a null token list.
-
- Example::
- # use FollowedBy to match a label only if it is followed by a ':'
- data_word = Word(alphas)
- label = data_word + FollowedBy(':')
- attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
-
- OneOrMore(attr_expr).parseString("shape: SQUARE color: BLACK posn: upper left").pprint()
- prints::
- [['shape', 'SQUARE'], ['color', 'BLACK'], ['posn', 'upper left']]
- """
- def __init__( self, expr ):
- super(FollowedBy,self).__init__(expr)
- self.mayReturnEmpty = True
-
- def parseImpl( self, instring, loc, doActions=True ):
- self.expr.tryParse( instring, loc )
- return loc, []
-
-
-class NotAny(ParseElementEnhance):
- """
- Lookahead to disallow matching with the given parse expression. C{NotAny}
- does I{not} advance the parsing position within the input string, it only
- verifies that the specified parse expression does I{not} match at the current
- position. Also, C{NotAny} does I{not} skip over leading whitespace. C{NotAny}
- always returns a null token list. May be constructed using the '~' operator.
-
- Example::
-
- """
- def __init__( self, expr ):
- super(NotAny,self).__init__(expr)
- #~ self.leaveWhitespace()
- self.skipWhitespace = False # do NOT use self.leaveWhitespace(), don't want to propagate to exprs
- self.mayReturnEmpty = True
- self.errmsg = "Found unwanted token, "+_ustr(self.expr)
-
- def parseImpl( self, instring, loc, doActions=True ):
- if self.expr.canParseNext(instring, loc):
- raise ParseException(instring, loc, self.errmsg, self)
- return loc, []
-
- def __str__( self ):
- if hasattr(self,"name"):
- return self.name
-
- if self.strRepr is None:
- self.strRepr = "~{" + _ustr(self.expr) + "}"
-
- return self.strRepr
-
-class _MultipleMatch(ParseElementEnhance):
- def __init__( self, expr, stopOn=None):
- super(_MultipleMatch, self).__init__(expr)
- self.saveAsList = True
- ender = stopOn
- if isinstance(ender, basestring):
- ender = ParserElement._literalStringClass(ender)
- self.not_ender = ~ender if ender is not None else None
-
- def parseImpl( self, instring, loc, doActions=True ):
- self_expr_parse = self.expr._parse
- self_skip_ignorables = self._skipIgnorables
- check_ender = self.not_ender is not None
- if check_ender:
- try_not_ender = self.not_ender.tryParse
-
- # must be at least one (but first see if we are the stopOn sentinel;
- # if so, fail)
- if check_ender:
- try_not_ender(instring, loc)
- loc, tokens = self_expr_parse( instring, loc, doActions, callPreParse=False )
- try:
- hasIgnoreExprs = (not not self.ignoreExprs)
- while 1:
- if check_ender:
- try_not_ender(instring, loc)
- if hasIgnoreExprs:
- preloc = self_skip_ignorables( instring, loc )
- else:
- preloc = loc
- loc, tmptokens = self_expr_parse( instring, preloc, doActions )
- if tmptokens or tmptokens.haskeys():
- tokens += tmptokens
- except (ParseException,IndexError):
- pass
-
- return loc, tokens
-
-class OneOrMore(_MultipleMatch):
- """
- Repetition of one or more of the given expression.
-
- Parameters:
- - expr - expression that must match one or more times
- - stopOn - (default=C{None}) - expression for a terminating sentinel
- (only required if the sentinel would ordinarily match the repetition
- expression)
-
- Example::
- data_word = Word(alphas)
- label = data_word + FollowedBy(':')
- attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join))
-
- text = "shape: SQUARE posn: upper left color: BLACK"
- OneOrMore(attr_expr).parseString(text).pprint() # Fail! read 'color' as data instead of next label -> [['shape', 'SQUARE color']]
-
- # use stopOn attribute for OneOrMore to avoid reading label string as part of the data
- attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
- OneOrMore(attr_expr).parseString(text).pprint() # Better -> [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'BLACK']]
-
- # could also be written as
- (attr_expr * (1,)).parseString(text).pprint()
- """
-
- def __str__( self ):
- if hasattr(self,"name"):
- return self.name
-
- if self.strRepr is None:
- self.strRepr = "{" + _ustr(self.expr) + "}..."
-
- return self.strRepr
-
-class ZeroOrMore(_MultipleMatch):
- """
- Optional repetition of zero or more of the given expression.
-
- Parameters:
- - expr - expression that must match zero or more times
- - stopOn - (default=C{None}) - expression for a terminating sentinel
- (only required if the sentinel would ordinarily match the repetition
- expression)
-
- Example: similar to L{OneOrMore}
- """
- def __init__( self, expr, stopOn=None):
- super(ZeroOrMore,self).__init__(expr, stopOn=stopOn)
- self.mayReturnEmpty = True
-
- def parseImpl( self, instring, loc, doActions=True ):
- try:
- return super(ZeroOrMore, self).parseImpl(instring, loc, doActions)
- except (ParseException,IndexError):
- return loc, []
-
- def __str__( self ):
- if hasattr(self,"name"):
- return self.name
-
- if self.strRepr is None:
- self.strRepr = "[" + _ustr(self.expr) + "]..."
-
- return self.strRepr
-
-class _NullToken(object):
- def __bool__(self):
- return False
- __nonzero__ = __bool__
- def __str__(self):
- return ""
-
-_optionalNotMatched = _NullToken()
-class Optional(ParseElementEnhance):
- """
- Optional matching of the given expression.
-
- Parameters:
- - expr - expression that must match zero or more times
- - default (optional) - value to be returned if the optional expression is not found.
-
- Example::
- # US postal code can be a 5-digit zip, plus optional 4-digit qualifier
- zip = Combine(Word(nums, exact=5) + Optional('-' + Word(nums, exact=4)))
- zip.runTests('''
- # traditional ZIP code
- 12345
-
- # ZIP+4 form
- 12101-0001
-
- # invalid ZIP
- 98765-
- ''')
- prints::
- # traditional ZIP code
- 12345
- ['12345']
-
- # ZIP+4 form
- 12101-0001
- ['12101-0001']
-
- # invalid ZIP
- 98765-
- ^
- FAIL: Expected end of text (at char 5), (line:1, col:6)
- """
- def __init__( self, expr, default=_optionalNotMatched ):
- super(Optional,self).__init__( expr, savelist=False )
- self.saveAsList = self.expr.saveAsList
- self.defaultValue = default
- self.mayReturnEmpty = True
-
- def parseImpl( self, instring, loc, doActions=True ):
- try:
- loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False )
- except (ParseException,IndexError):
- if self.defaultValue is not _optionalNotMatched:
- if self.expr.resultsName:
- tokens = ParseResults([ self.defaultValue ])
- tokens[self.expr.resultsName] = self.defaultValue
- else:
- tokens = [ self.defaultValue ]
- else:
- tokens = []
- return loc, tokens
-
- def __str__( self ):
- if hasattr(self,"name"):
- return self.name
-
- if self.strRepr is None:
- self.strRepr = "[" + _ustr(self.expr) + "]"
-
- return self.strRepr
-
-class SkipTo(ParseElementEnhance):
- """
- Token for skipping over all undefined text until the matched expression is found.
-
- Parameters:
- - expr - target expression marking the end of the data to be skipped
- - include - (default=C{False}) if True, the target expression is also parsed
- (the skipped text and target expression are returned as a 2-element list).
- - ignore - (default=C{None}) used to define grammars (typically quoted strings and
- comments) that might contain false matches to the target expression
- - failOn - (default=C{None}) define expressions that are not allowed to be
- included in the skipped test; if found before the target expression is found,
- the SkipTo is not a match
-
- Example::
- report = '''
- Outstanding Issues Report - 1 Jan 2000
-
- # | Severity | Description | Days Open
- -----+----------+-------------------------------------------+-----------
- 101 | Critical | Intermittent system crash | 6
- 94 | Cosmetic | Spelling error on Login ('log|n') | 14
- 79 | Minor | System slow when running too many reports | 47
- '''
- integer = Word(nums)
- SEP = Suppress('|')
- # use SkipTo to simply match everything up until the next SEP
- # - ignore quoted strings, so that a '|' character inside a quoted string does not match
- # - parse action will call token.strip() for each matched token, i.e., the description body
- string_data = SkipTo(SEP, ignore=quotedString)
- string_data.setParseAction(tokenMap(str.strip))
- ticket_expr = (integer("issue_num") + SEP
- + string_data("sev") + SEP
- + string_data("desc") + SEP
- + integer("days_open"))
-
- for tkt in ticket_expr.searchString(report):
- print tkt.dump()
- prints::
- ['101', 'Critical', 'Intermittent system crash', '6']
- - days_open: 6
- - desc: Intermittent system crash
- - issue_num: 101
- - sev: Critical
- ['94', 'Cosmetic', "Spelling error on Login ('log|n')", '14']
- - days_open: 14
- - desc: Spelling error on Login ('log|n')
- - issue_num: 94
- - sev: Cosmetic
- ['79', 'Minor', 'System slow when running too many reports', '47']
- - days_open: 47
- - desc: System slow when running too many reports
- - issue_num: 79
- - sev: Minor
- """
- def __init__( self, other, include=False, ignore=None, failOn=None ):
- super( SkipTo, self ).__init__( other )
- self.ignoreExpr = ignore
- self.mayReturnEmpty = True
- self.mayIndexError = False
- self.includeMatch = include
- self.asList = False
- if isinstance(failOn, basestring):
- self.failOn = ParserElement._literalStringClass(failOn)
- else:
- self.failOn = failOn
- self.errmsg = "No match found for "+_ustr(self.expr)
-
- def parseImpl( self, instring, loc, doActions=True ):
- startloc = loc
- instrlen = len(instring)
- expr = self.expr
- expr_parse = self.expr._parse
- self_failOn_canParseNext = self.failOn.canParseNext if self.failOn is not None else None
- self_ignoreExpr_tryParse = self.ignoreExpr.tryParse if self.ignoreExpr is not None else None
-
- tmploc = loc
- while tmploc <= instrlen:
- if self_failOn_canParseNext is not None:
- # break if failOn expression matches
- if self_failOn_canParseNext(instring, tmploc):
- break
-
- if self_ignoreExpr_tryParse is not None:
- # advance past ignore expressions
- while 1:
- try:
- tmploc = self_ignoreExpr_tryParse(instring, tmploc)
- except ParseBaseException:
- break
-
- try:
- expr_parse(instring, tmploc, doActions=False, callPreParse=False)
- except (ParseException, IndexError):
- # no match, advance loc in string
- tmploc += 1
- else:
- # matched skipto expr, done
- break
-
- else:
- # ran off the end of the input string without matching skipto expr, fail
- raise ParseException(instring, loc, self.errmsg, self)
-
- # build up return values
- loc = tmploc
- skiptext = instring[startloc:loc]
- skipresult = ParseResults(skiptext)
-
- if self.includeMatch:
- loc, mat = expr_parse(instring,loc,doActions,callPreParse=False)
- skipresult += mat
-
- return loc, skipresult
-
-class Forward(ParseElementEnhance):
- """
- Forward declaration of an expression to be defined later -
- used for recursive grammars, such as algebraic infix notation.
- When the expression is known, it is assigned to the C{Forward} variable using the '<<' operator.
-
- Note: take care when assigning to C{Forward} not to overlook precedence of operators.
- Specifically, '|' has a lower precedence than '<<', so that::
- fwdExpr << a | b | c
- will actually be evaluated as::
- (fwdExpr << a) | b | c
- thereby leaving b and c out as parseable alternatives. It is recommended that you
- explicitly group the values inserted into the C{Forward}::
- fwdExpr << (a | b | c)
- Converting to use the '<<=' operator instead will avoid this problem.
-
- See L{ParseResults.pprint} for an example of a recursive parser created using
- C{Forward}.
- """
- def __init__( self, other=None ):
- super(Forward,self).__init__( other, savelist=False )
-
- def __lshift__( self, other ):
- if isinstance( other, basestring ):
- other = ParserElement._literalStringClass(other)
- self.expr = other
- self.strRepr = None
- self.mayIndexError = self.expr.mayIndexError
- self.mayReturnEmpty = self.expr.mayReturnEmpty
- self.setWhitespaceChars( self.expr.whiteChars )
- self.skipWhitespace = self.expr.skipWhitespace
- self.saveAsList = self.expr.saveAsList
- self.ignoreExprs.extend(self.expr.ignoreExprs)
- return self
-
- def __ilshift__(self, other):
- return self << other
-
- def leaveWhitespace( self ):
- self.skipWhitespace = False
- return self
-
- def streamline( self ):
- if not self.streamlined:
- self.streamlined = True
- if self.expr is not None:
- self.expr.streamline()
- return self
-
- def validate( self, validateTrace=[] ):
- if self not in validateTrace:
- tmp = validateTrace[:]+[self]
- if self.expr is not None:
- self.expr.validate(tmp)
- self.checkRecursion([])
-
- def __str__( self ):
- if hasattr(self,"name"):
- return self.name
- return self.__class__.__name__ + ": ..."
-
- # stubbed out for now - creates awful memory and perf issues
- self._revertClass = self.__class__
- self.__class__ = _ForwardNoRecurse
- try:
- if self.expr is not None:
- retString = _ustr(self.expr)
- else:
- retString = "None"
- finally:
- self.__class__ = self._revertClass
- return self.__class__.__name__ + ": " + retString
-
- def copy(self):
- if self.expr is not None:
- return super(Forward,self).copy()
- else:
- ret = Forward()
- ret <<= self
- return ret
-
-class _ForwardNoRecurse(Forward):
- def __str__( self ):
- return "..."
-
-class TokenConverter(ParseElementEnhance):
- """
- Abstract subclass of C{ParseExpression}, for converting parsed results.
- """
- def __init__( self, expr, savelist=False ):
- super(TokenConverter,self).__init__( expr )#, savelist )
- self.saveAsList = False
-
-class Combine(TokenConverter):
- """
- Converter to concatenate all matching tokens to a single string.
- By default, the matching patterns must also be contiguous in the input string;
- this can be disabled by specifying C{'adjacent=False'} in the constructor.
-
- Example::
- real = Word(nums) + '.' + Word(nums)
- print(real.parseString('3.1416')) # -> ['3', '.', '1416']
- # will also erroneously match the following
- print(real.parseString('3. 1416')) # -> ['3', '.', '1416']
-
- real = Combine(Word(nums) + '.' + Word(nums))
- print(real.parseString('3.1416')) # -> ['3.1416']
- # no match when there are internal spaces
- print(real.parseString('3. 1416')) # -> Exception: Expected W:(0123...)
- """
- def __init__( self, expr, joinString="", adjacent=True ):
- super(Combine,self).__init__( expr )
- # suppress whitespace-stripping in contained parse expressions, but re-enable it on the Combine itself
- if adjacent:
- self.leaveWhitespace()
- self.adjacent = adjacent
- self.skipWhitespace = True
- self.joinString = joinString
- self.callPreparse = True
-
- def ignore( self, other ):
- if self.adjacent:
- ParserElement.ignore(self, other)
- else:
- super( Combine, self).ignore( other )
- return self
-
- def postParse( self, instring, loc, tokenlist ):
- retToks = tokenlist.copy()
- del retToks[:]
- retToks += ParseResults([ "".join(tokenlist._asStringList(self.joinString)) ], modal=self.modalResults)
-
- if self.resultsName and retToks.haskeys():
- return [ retToks ]
- else:
- return retToks
-
-class Group(TokenConverter):
- """
- Converter to return the matched tokens as a list - useful for returning tokens of C{L{ZeroOrMore}} and C{L{OneOrMore}} expressions.
-
- Example::
- ident = Word(alphas)
- num = Word(nums)
- term = ident | num
- func = ident + Optional(delimitedList(term))
- print(func.parseString("fn a,b,100")) # -> ['fn', 'a', 'b', '100']
-
- func = ident + Group(Optional(delimitedList(term)))
- print(func.parseString("fn a,b,100")) # -> ['fn', ['a', 'b', '100']]
- """
- def __init__( self, expr ):
- super(Group,self).__init__( expr )
- self.saveAsList = True
-
- def postParse( self, instring, loc, tokenlist ):
- return [ tokenlist ]
-
-class Dict(TokenConverter):
- """
- Converter to return a repetitive expression as a list, but also as a dictionary.
- Each element can also be referenced using the first token in the expression as its key.
- Useful for tabular report scraping when the first column can be used as a item key.
-
- Example::
- data_word = Word(alphas)
- label = data_word + FollowedBy(':')
- attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join))
-
- text = "shape: SQUARE posn: upper left color: light blue texture: burlap"
- attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
-
- # print attributes as plain groups
- print(OneOrMore(attr_expr).parseString(text).dump())
-
- # instead of OneOrMore(expr), parse using Dict(OneOrMore(Group(expr))) - Dict will auto-assign names
- result = Dict(OneOrMore(Group(attr_expr))).parseString(text)
- print(result.dump())
-
- # access named fields as dict entries, or output as dict
- print(result['shape'])
- print(result.asDict())
- prints::
- ['shape', 'SQUARE', 'posn', 'upper left', 'color', 'light blue', 'texture', 'burlap']
-
- [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']]
- - color: light blue
- - posn: upper left
- - shape: SQUARE
- - texture: burlap
- SQUARE
- {'color': 'light blue', 'posn': 'upper left', 'texture': 'burlap', 'shape': 'SQUARE'}
- See more examples at L{ParseResults} of accessing fields by results name.
- """
- def __init__( self, expr ):
- super(Dict,self).__init__( expr )
- self.saveAsList = True
-
- def postParse( self, instring, loc, tokenlist ):
- for i,tok in enumerate(tokenlist):
- if len(tok) == 0:
- continue
- ikey = tok[0]
- if isinstance(ikey,int):
- ikey = _ustr(tok[0]).strip()
- if len(tok)==1:
- tokenlist[ikey] = _ParseResultsWithOffset("",i)
- elif len(tok)==2 and not isinstance(tok[1],ParseResults):
- tokenlist[ikey] = _ParseResultsWithOffset(tok[1],i)
- else:
- dictvalue = tok.copy() #ParseResults(i)
- del dictvalue[0]
- if len(dictvalue)!= 1 or (isinstance(dictvalue,ParseResults) and dictvalue.haskeys()):
- tokenlist[ikey] = _ParseResultsWithOffset(dictvalue,i)
- else:
- tokenlist[ikey] = _ParseResultsWithOffset(dictvalue[0],i)
-
- if self.resultsName:
- return [ tokenlist ]
- else:
- return tokenlist
-
-
-class Suppress(TokenConverter):
- """
- Converter for ignoring the results of a parsed expression.
-
- Example::
- source = "a, b, c,d"
- wd = Word(alphas)
- wd_list1 = wd + ZeroOrMore(',' + wd)
- print(wd_list1.parseString(source))
-
- # often, delimiters that are useful during parsing are just in the
- # way afterward - use Suppress to keep them out of the parsed output
- wd_list2 = wd + ZeroOrMore(Suppress(',') + wd)
- print(wd_list2.parseString(source))
- prints::
- ['a', ',', 'b', ',', 'c', ',', 'd']
- ['a', 'b', 'c', 'd']
- (See also L{delimitedList}.)
- """
- def postParse( self, instring, loc, tokenlist ):
- return []
-
- def suppress( self ):
- return self
-
-
-class OnlyOnce(object):
- """
- Wrapper for parse actions, to ensure they are only called once.
- """
- def __init__(self, methodCall):
- self.callable = _trim_arity(methodCall)
- self.called = False
- def __call__(self,s,l,t):
- if not self.called:
- results = self.callable(s,l,t)
- self.called = True
- return results
- raise ParseException(s,l,"")
- def reset(self):
- self.called = False
-
-def traceParseAction(f):
- """
- Decorator for debugging parse actions.
-
- When the parse action is called, this decorator will print C{">> entering I{method-name}(line:I{current_source_line}, I{parse_location}, I{matched_tokens})".}
- When the parse action completes, the decorator will print C{"<<"} followed by the returned value, or any exception that the parse action raised.
-
- Example::
- wd = Word(alphas)
-
- @traceParseAction
- def remove_duplicate_chars(tokens):
- return ''.join(sorted(set(''.join(tokens))))
-
- wds = OneOrMore(wd).setParseAction(remove_duplicate_chars)
- print(wds.parseString("slkdjs sld sldd sdlf sdljf"))
- prints::
- >>entering remove_duplicate_chars(line: 'slkdjs sld sldd sdlf sdljf', 0, (['slkdjs', 'sld', 'sldd', 'sdlf', 'sdljf'], {}))
- <<leaving remove_duplicate_chars (ret: 'dfjkls')
- ['dfjkls']
- """
- f = _trim_arity(f)
- def z(*paArgs):
- thisFunc = f.__name__
- s,l,t = paArgs[-3:]
- if len(paArgs)>3:
- thisFunc = paArgs[0].__class__.__name__ + '.' + thisFunc
- sys.stderr.write( ">>entering %s(line: '%s', %d, %r)\n" % (thisFunc,line(l,s),l,t) )
- try:
- ret = f(*paArgs)
- except Exception as exc:
- sys.stderr.write( "<<leaving %s (exception: %s)\n" % (thisFunc,exc) )
- raise
- sys.stderr.write( "<<leaving %s (ret: %r)\n" % (thisFunc,ret) )
- return ret
- try:
- z.__name__ = f.__name__
- except AttributeError:
- pass
- return z
-
-#
-# global helpers
-#
-def delimitedList( expr, delim=",", combine=False ):
- """
- Helper to define a delimited list of expressions - the delimiter defaults to ','.
- By default, the list elements and delimiters can have intervening whitespace, and
- comments, but this can be overridden by passing C{combine=True} in the constructor.
- If C{combine} is set to C{True}, the matching tokens are returned as a single token
- string, with the delimiters included; otherwise, the matching tokens are returned
- as a list of tokens, with the delimiters suppressed.
-
- Example::
- delimitedList(Word(alphas)).parseString("aa,bb,cc") # -> ['aa', 'bb', 'cc']
- delimitedList(Word(hexnums), delim=':', combine=True).parseString("AA:BB:CC:DD:EE") # -> ['AA:BB:CC:DD:EE']
- """
- dlName = _ustr(expr)+" ["+_ustr(delim)+" "+_ustr(expr)+"]..."
- if combine:
- return Combine( expr + ZeroOrMore( delim + expr ) ).setName(dlName)
- else:
- return ( expr + ZeroOrMore( Suppress( delim ) + expr ) ).setName(dlName)
-
-def countedArray( expr, intExpr=None ):
- """
- Helper to define a counted list of expressions.
- This helper defines a pattern of the form::
- integer expr expr expr...
- where the leading integer tells how many expr expressions follow.
- The matched tokens returns the array of expr tokens as a list - the leading count token is suppressed.
-
- If C{intExpr} is specified, it should be a pyparsing expression that produces an integer value.
-
- Example::
- countedArray(Word(alphas)).parseString('2 ab cd ef') # -> ['ab', 'cd']
-
- # in this parser, the leading integer value is given in binary,
- # '10' indicating that 2 values are in the array
- binaryConstant = Word('01').setParseAction(lambda t: int(t[0], 2))
- countedArray(Word(alphas), intExpr=binaryConstant).parseString('10 ab cd ef') # -> ['ab', 'cd']
- """
- arrayExpr = Forward()
- def countFieldParseAction(s,l,t):
- n = t[0]
- arrayExpr << (n and Group(And([expr]*n)) or Group(empty))
- return []
- if intExpr is None:
- intExpr = Word(nums).setParseAction(lambda t:int(t[0]))
- else:
- intExpr = intExpr.copy()
- intExpr.setName("arrayLen")
- intExpr.addParseAction(countFieldParseAction, callDuringTry=True)
- return ( intExpr + arrayExpr ).setName('(len) ' + _ustr(expr) + '...')
-
-def _flatten(L):
- ret = []
- for i in L:
- if isinstance(i,list):
- ret.extend(_flatten(i))
- else:
- ret.append(i)
- return ret
-
-def matchPreviousLiteral(expr):
- """
- Helper to define an expression that is indirectly defined from
- the tokens matched in a previous expression, that is, it looks
- for a 'repeat' of a previous expression. For example::
- first = Word(nums)
- second = matchPreviousLiteral(first)
- matchExpr = first + ":" + second
- will match C{"1:1"}, but not C{"1:2"}. Because this matches a
- previous literal, will also match the leading C{"1:1"} in C{"1:10"}.
- If this is not desired, use C{matchPreviousExpr}.
- Do I{not} use with packrat parsing enabled.
- """
- rep = Forward()
- def copyTokenToRepeater(s,l,t):
- if t:
- if len(t) == 1:
- rep << t[0]
- else:
- # flatten t tokens
- tflat = _flatten(t.asList())
- rep << And(Literal(tt) for tt in tflat)
- else:
- rep << Empty()
- expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
- rep.setName('(prev) ' + _ustr(expr))
- return rep
-
-def matchPreviousExpr(expr):
- """
- Helper to define an expression that is indirectly defined from
- the tokens matched in a previous expression, that is, it looks
- for a 'repeat' of a previous expression. For example::
- first = Word(nums)
- second = matchPreviousExpr(first)
- matchExpr = first + ":" + second
- will match C{"1:1"}, but not C{"1:2"}. Because this matches by
- expressions, will I{not} match the leading C{"1:1"} in C{"1:10"};
- the expressions are evaluated first, and then compared, so
- C{"1"} is compared with C{"10"}.
- Do I{not} use with packrat parsing enabled.
- """
- rep = Forward()
- e2 = expr.copy()
- rep <<= e2
- def copyTokenToRepeater(s,l,t):
- matchTokens = _flatten(t.asList())
- def mustMatchTheseTokens(s,l,t):
- theseTokens = _flatten(t.asList())
- if theseTokens != matchTokens:
- raise ParseException("",0,"")
- rep.setParseAction( mustMatchTheseTokens, callDuringTry=True )
- expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
- rep.setName('(prev) ' + _ustr(expr))
- return rep
-
-def _escapeRegexRangeChars(s):
- #~ escape these chars: ^-]
- for c in r"\^-]":
- s = s.replace(c,_bslash+c)
- s = s.replace("\n",r"\n")
- s = s.replace("\t",r"\t")
- return _ustr(s)
-
-def oneOf( strs, caseless=False, useRegex=True ):
- """
- Helper to quickly define a set of alternative Literals, and makes sure to do
- longest-first testing when there is a conflict, regardless of the input order,
- but returns a C{L{MatchFirst}} for best performance.
-
- Parameters:
- - strs - a string of space-delimited literals, or a collection of string literals
- - caseless - (default=C{False}) - treat all literals as caseless
- - useRegex - (default=C{True}) - as an optimization, will generate a Regex
- object; otherwise, will generate a C{MatchFirst} object (if C{caseless=True}, or
- if creating a C{Regex} raises an exception)
-
- Example::
- comp_oper = oneOf("< = > <= >= !=")
- var = Word(alphas)
- number = Word(nums)
- term = var | number
- comparison_expr = term + comp_oper + term
- print(comparison_expr.searchString("B = 12 AA=23 B<=AA AA>12"))
- prints::
- [['B', '=', '12'], ['AA', '=', '23'], ['B', '<=', 'AA'], ['AA', '>', '12']]
- """
- if caseless:
- isequal = ( lambda a,b: a.upper() == b.upper() )
- masks = ( lambda a,b: b.upper().startswith(a.upper()) )
- parseElementClass = CaselessLiteral
- else:
- isequal = ( lambda a,b: a == b )
- masks = ( lambda a,b: b.startswith(a) )
- parseElementClass = Literal
-
- symbols = []
- if isinstance(strs,basestring):
- symbols = strs.split()
- elif isinstance(strs, Iterable):
- symbols = list(strs)
- else:
- warnings.warn("Invalid argument to oneOf, expected string or iterable",
- SyntaxWarning, stacklevel=2)
- if not symbols:
- return NoMatch()
-
- i = 0
- while i < len(symbols)-1:
- cur = symbols[i]
- for j,other in enumerate(symbols[i+1:]):
- if ( isequal(other, cur) ):
- del symbols[i+j+1]
- break
- elif ( masks(cur, other) ):
- del symbols[i+j+1]
- symbols.insert(i,other)
- cur = other
- break
- else:
- i += 1
-
- if not caseless and useRegex:
- #~ print (strs,"->", "|".join( [ _escapeRegexChars(sym) for sym in symbols] ))
- try:
- if len(symbols)==len("".join(symbols)):
- return Regex( "[%s]" % "".join(_escapeRegexRangeChars(sym) for sym in symbols) ).setName(' | '.join(symbols))
- else:
- return Regex( "|".join(re.escape(sym) for sym in symbols) ).setName(' | '.join(symbols))
- except Exception:
- warnings.warn("Exception creating Regex for oneOf, building MatchFirst",
- SyntaxWarning, stacklevel=2)
-
-
- # last resort, just use MatchFirst
- return MatchFirst(parseElementClass(sym) for sym in symbols).setName(' | '.join(symbols))
-
-def dictOf( key, value ):
- """
- Helper to easily and clearly define a dictionary by specifying the respective patterns
- for the key and value. Takes care of defining the C{L{Dict}}, C{L{ZeroOrMore}}, and C{L{Group}} tokens
- in the proper order. The key pattern can include delimiting markers or punctuation,
- as long as they are suppressed, thereby leaving the significant key text. The value
- pattern can include named results, so that the C{Dict} results can include named token
- fields.
-
- Example::
- text = "shape: SQUARE posn: upper left color: light blue texture: burlap"
- attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
- print(OneOrMore(attr_expr).parseString(text).dump())
-
- attr_label = label
- attr_value = Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)
-
- # similar to Dict, but simpler call format
- result = dictOf(attr_label, attr_value).parseString(text)
- print(result.dump())
- print(result['shape'])
- print(result.shape) # object attribute access works too
- print(result.asDict())
- prints::
- [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']]
- - color: light blue
- - posn: upper left
- - shape: SQUARE
- - texture: burlap
- SQUARE
- SQUARE
- {'color': 'light blue', 'shape': 'SQUARE', 'posn': 'upper left', 'texture': 'burlap'}
- """
- return Dict( ZeroOrMore( Group ( key + value ) ) )
-
-def originalTextFor(expr, asString=True):
- """
- Helper to return the original, untokenized text for a given expression. Useful to
- restore the parsed fields of an HTML start tag into the raw tag text itself, or to
- revert separate tokens with intervening whitespace back to the original matching
- input text. By default, returns astring containing the original parsed text.
-
- If the optional C{asString} argument is passed as C{False}, then the return value is a
- C{L{ParseResults}} containing any results names that were originally matched, and a
- single token containing the original matched text from the input string. So if
- the expression passed to C{L{originalTextFor}} contains expressions with defined
- results names, you must set C{asString} to C{False} if you want to preserve those
- results name values.
-
- Example::
- src = "this is test <b> bold <i>text</i> </b> normal text "
- for tag in ("b","i"):
- opener,closer = makeHTMLTags(tag)
- patt = originalTextFor(opener + SkipTo(closer) + closer)
- print(patt.searchString(src)[0])
- prints::
- ['<b> bold <i>text</i> </b>']
- ['<i>text</i>']
- """
- locMarker = Empty().setParseAction(lambda s,loc,t: loc)
- endlocMarker = locMarker.copy()
- endlocMarker.callPreparse = False
- matchExpr = locMarker("_original_start") + expr + endlocMarker("_original_end")
- if asString:
- extractText = lambda s,l,t: s[t._original_start:t._original_end]
- else:
- def extractText(s,l,t):
- t[:] = [s[t.pop('_original_start'):t.pop('_original_end')]]
- matchExpr.setParseAction(extractText)
- matchExpr.ignoreExprs = expr.ignoreExprs
- return matchExpr
-
-def ungroup(expr):
- """
- Helper to undo pyparsing's default grouping of And expressions, even
- if all but one are non-empty.
- """
- return TokenConverter(expr).setParseAction(lambda t:t[0])
-
-def locatedExpr(expr):
- """
- Helper to decorate a returned token with its starting and ending locations in the input string.
- This helper adds the following results names:
- - locn_start = location where matched expression begins
- - locn_end = location where matched expression ends
- - value = the actual parsed results
-
- Be careful if the input text contains C{<TAB>} characters, you may want to call
- C{L{ParserElement.parseWithTabs}}
-
- Example::
- wd = Word(alphas)
- for match in locatedExpr(wd).searchString("ljsdf123lksdjjf123lkkjj1222"):
- print(match)
- prints::
- [[0, 'ljsdf', 5]]
- [[8, 'lksdjjf', 15]]
- [[18, 'lkkjj', 23]]
- """
- locator = Empty().setParseAction(lambda s,l,t: l)
- return Group(locator("locn_start") + expr("value") + locator.copy().leaveWhitespace()("locn_end"))
-
-
-# convenience constants for positional expressions
-empty = Empty().setName("empty")
-lineStart = LineStart().setName("lineStart")
-lineEnd = LineEnd().setName("lineEnd")
-stringStart = StringStart().setName("stringStart")
-stringEnd = StringEnd().setName("stringEnd")
-
-_escapedPunc = Word( _bslash, r"\[]-*.$+^?()~ ", exact=2 ).setParseAction(lambda s,l,t:t[0][1])
-_escapedHexChar = Regex(r"\\0?[xX][0-9a-fA-F]+").setParseAction(lambda s,l,t:unichr(int(t[0].lstrip(r'\0x'),16)))
-_escapedOctChar = Regex(r"\\0[0-7]+").setParseAction(lambda s,l,t:unichr(int(t[0][1:],8)))
-_singleChar = _escapedPunc | _escapedHexChar | _escapedOctChar | CharsNotIn(r'\]', exact=1)
-_charRange = Group(_singleChar + Suppress("-") + _singleChar)
-_reBracketExpr = Literal("[") + Optional("^").setResultsName("negate") + Group( OneOrMore( _charRange | _singleChar ) ).setResultsName("body") + "]"
-
-def srange(s):
- r"""
- Helper to easily define string ranges for use in Word construction. Borrows
- syntax from regexp '[]' string range definitions::
- srange("[0-9]") -> "0123456789"
- srange("[a-z]") -> "abcdefghijklmnopqrstuvwxyz"
- srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_"
- The input string must be enclosed in []'s, and the returned string is the expanded
- character set joined into a single string.
- The values enclosed in the []'s may be:
- - a single character
- - an escaped character with a leading backslash (such as C{\-} or C{\]})
- - an escaped hex character with a leading C{'\x'} (C{\x21}, which is a C{'!'} character)
- (C{\0x##} is also supported for backwards compatibility)
- - an escaped octal character with a leading C{'\0'} (C{\041}, which is a C{'!'} character)
- - a range of any of the above, separated by a dash (C{'a-z'}, etc.)
- - any combination of the above (C{'aeiouy'}, C{'a-zA-Z0-9_$'}, etc.)
- """
- _expanded = lambda p: p if not isinstance(p,ParseResults) else ''.join(unichr(c) for c in range(ord(p[0]),ord(p[1])+1))
- try:
- return "".join(_expanded(part) for part in _reBracketExpr.parseString(s).body)
- except Exception:
- return ""
-
-def matchOnlyAtCol(n):
- """
- Helper method for defining parse actions that require matching at a specific
- column in the input text.
- """
- def verifyCol(strg,locn,toks):
- if col(locn,strg) != n:
- raise ParseException(strg,locn,"matched token not at column %d" % n)
- return verifyCol
-
-def replaceWith(replStr):
- """
- Helper method for common parse actions that simply return a literal value. Especially
- useful when used with C{L{transformString<ParserElement.transformString>}()}.
-
- Example::
- num = Word(nums).setParseAction(lambda toks: int(toks[0]))
- na = oneOf("N/A NA").setParseAction(replaceWith(math.nan))
- term = na | num
-
- OneOrMore(term).parseString("324 234 N/A 234") # -> [324, 234, nan, 234]
- """
- return lambda s,l,t: [replStr]
-
-def removeQuotes(s,l,t):
- """
- Helper parse action for removing quotation marks from parsed quoted strings.
-
- Example::
- # by default, quotation marks are included in parsed results
- quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["'Now is the Winter of our Discontent'"]
-
- # use removeQuotes to strip quotation marks from parsed results
- quotedString.setParseAction(removeQuotes)
- quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["Now is the Winter of our Discontent"]
- """
- return t[0][1:-1]
-
-def tokenMap(func, *args):
- """
- Helper to define a parse action by mapping a function to all elements of a ParseResults list.If any additional
- args are passed, they are forwarded to the given function as additional arguments after
- the token, as in C{hex_integer = Word(hexnums).setParseAction(tokenMap(int, 16))}, which will convert the
- parsed data to an integer using base 16.
-
- Example (compare the last to example in L{ParserElement.transformString}::
- hex_ints = OneOrMore(Word(hexnums)).setParseAction(tokenMap(int, 16))
- hex_ints.runTests('''
- 00 11 22 aa FF 0a 0d 1a
- ''')
-
- upperword = Word(alphas).setParseAction(tokenMap(str.upper))
- OneOrMore(upperword).runTests('''
- my kingdom for a horse
- ''')
-
- wd = Word(alphas).setParseAction(tokenMap(str.title))
- OneOrMore(wd).setParseAction(' '.join).runTests('''
- now is the winter of our discontent made glorious summer by this sun of york
- ''')
- prints::
- 00 11 22 aa FF 0a 0d 1a
- [0, 17, 34, 170, 255, 10, 13, 26]
-
- my kingdom for a horse
- ['MY', 'KINGDOM', 'FOR', 'A', 'HORSE']
-
- now is the winter of our discontent made glorious summer by this sun of york
- ['Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York']
- """
- def pa(s,l,t):
- return [func(tokn, *args) for tokn in t]
-
- try:
- func_name = getattr(func, '__name__',
- getattr(func, '__class__').__name__)
- except Exception:
- func_name = str(func)
- pa.__name__ = func_name
-
- return pa
-
-upcaseTokens = tokenMap(lambda t: _ustr(t).upper())
-"""(Deprecated) Helper parse action to convert tokens to upper case. Deprecated in favor of L{pyparsing_common.upcaseTokens}"""
-
-downcaseTokens = tokenMap(lambda t: _ustr(t).lower())
-"""(Deprecated) Helper parse action to convert tokens to lower case. Deprecated in favor of L{pyparsing_common.downcaseTokens}"""
-
-def _makeTags(tagStr, xml):
- """Internal helper to construct opening and closing tag expressions, given a tag name"""
- if isinstance(tagStr,basestring):
- resname = tagStr
- tagStr = Keyword(tagStr, caseless=not xml)
- else:
- resname = tagStr.name
-
- tagAttrName = Word(alphas,alphanums+"_-:")
- if (xml):
- tagAttrValue = dblQuotedString.copy().setParseAction( removeQuotes )
- openTag = Suppress("<") + tagStr("tag") + \
- Dict(ZeroOrMore(Group( tagAttrName + Suppress("=") + tagAttrValue ))) + \
- Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">")
- else:
- printablesLessRAbrack = "".join(c for c in printables if c not in ">")
- tagAttrValue = quotedString.copy().setParseAction( removeQuotes ) | Word(printablesLessRAbrack)
- openTag = Suppress("<") + tagStr("tag") + \
- Dict(ZeroOrMore(Group( tagAttrName.setParseAction(downcaseTokens) + \
- Optional( Suppress("=") + tagAttrValue ) ))) + \
- Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">")
- closeTag = Combine(_L("</") + tagStr + ">")
-
- openTag = openTag.setResultsName("start"+"".join(resname.replace(":"," ").title().split())).setName("<%s>" % resname)
- closeTag = closeTag.setResultsName("end"+"".join(resname.replace(":"," ").title().split())).setName("</%s>" % resname)
- openTag.tag = resname
- closeTag.tag = resname
- return openTag, closeTag
-
-def makeHTMLTags(tagStr):
- """
- Helper to construct opening and closing tag expressions for HTML, given a tag name. Matches
- tags in either upper or lower case, attributes with namespaces and with quoted or unquoted values.
-
- Example::
- text = '<td>More info at the <a href="http://pyparsing.wikispaces.com">pyparsing</a> wiki page</td>'
- # makeHTMLTags returns pyparsing expressions for the opening and closing tags as a 2-tuple
- a,a_end = makeHTMLTags("A")
- link_expr = a + SkipTo(a_end)("link_text") + a_end
-
- for link in link_expr.searchString(text):
- # attributes in the <A> tag (like "href" shown here) are also accessible as named results
- print(link.link_text, '->', link.href)
- prints::
- pyparsing -> http://pyparsing.wikispaces.com
- """
- return _makeTags( tagStr, False )
-
-def makeXMLTags(tagStr):
- """
- Helper to construct opening and closing tag expressions for XML, given a tag name. Matches
- tags only in the given upper/lower case.
-
- Example: similar to L{makeHTMLTags}
- """
- return _makeTags( tagStr, True )
-
-def withAttribute(*args,**attrDict):
- """
- Helper to create a validating parse action to be used with start tags created
- with C{L{makeXMLTags}} or C{L{makeHTMLTags}}. Use C{withAttribute} to qualify a starting tag
- with a required attribute value, to avoid false matches on common tags such as
- C{<TD>} or C{<DIV>}.
-
- Call C{withAttribute} with a series of attribute names and values. Specify the list
- of filter attributes names and values as:
- - keyword arguments, as in C{(align="right")}, or
- - as an explicit dict with C{**} operator, when an attribute name is also a Python
- reserved word, as in C{**{"class":"Customer", "align":"right"}}
- - a list of name-value tuples, as in ( ("ns1:class", "Customer"), ("ns2:align","right") )
- For attribute names with a namespace prefix, you must use the second form. Attribute
- names are matched insensitive to upper/lower case.
-
- If just testing for C{class} (with or without a namespace), use C{L{withClass}}.
-
- To verify that the attribute exists, but without specifying a value, pass
- C{withAttribute.ANY_VALUE} as the value.
-
- Example::
- html = '''
- <div>
- Some text
- <div type="grid">1 4 0 1 0</div>
- <div type="graph">1,3 2,3 1,1</div>
- <div>this has no type</div>
- </div>
-
- '''
- div,div_end = makeHTMLTags("div")
-
- # only match div tag having a type attribute with value "grid"
- div_grid = div().setParseAction(withAttribute(type="grid"))
- grid_expr = div_grid + SkipTo(div | div_end)("body")
- for grid_header in grid_expr.searchString(html):
- print(grid_header.body)
-
- # construct a match with any div tag having a type attribute, regardless of the value
- div_any_type = div().setParseAction(withAttribute(type=withAttribute.ANY_VALUE))
- div_expr = div_any_type + SkipTo(div | div_end)("body")
- for div_header in div_expr.searchString(html):
- print(div_header.body)
- prints::
- 1 4 0 1 0
-
- 1 4 0 1 0
- 1,3 2,3 1,1
- """
- if args:
- attrs = args[:]
- else:
- attrs = attrDict.items()
- attrs = [(k,v) for k,v in attrs]
- def pa(s,l,tokens):
- for attrName,attrValue in attrs:
- if attrName not in tokens:
- raise ParseException(s,l,"no matching attribute " + attrName)
- if attrValue != withAttribute.ANY_VALUE and tokens[attrName] != attrValue:
- raise ParseException(s,l,"attribute '%s' has value '%s', must be '%s'" %
- (attrName, tokens[attrName], attrValue))
- return pa
-withAttribute.ANY_VALUE = object()
-
-def withClass(classname, namespace=''):
- """
- Simplified version of C{L{withAttribute}} when matching on a div class - made
- difficult because C{class} is a reserved word in Python.
-
- Example::
- html = '''
- <div>
- Some text
- <div class="grid">1 4 0 1 0</div>
- <div class="graph">1,3 2,3 1,1</div>
- <div>this &lt;div&gt; has no class</div>
- </div>
-
- '''
- div,div_end = makeHTMLTags("div")
- div_grid = div().setParseAction(withClass("grid"))
-
- grid_expr = div_grid + SkipTo(div | div_end)("body")
- for grid_header in grid_expr.searchString(html):
- print(grid_header.body)
-
- div_any_type = div().setParseAction(withClass(withAttribute.ANY_VALUE))
- div_expr = div_any_type + SkipTo(div | div_end)("body")
- for div_header in div_expr.searchString(html):
- print(div_header.body)
- prints::
- 1 4 0 1 0
-
- 1 4 0 1 0
- 1,3 2,3 1,1
- """
- classattr = "%s:class" % namespace if namespace else "class"
- return withAttribute(**{classattr : classname})
-
-opAssoc = _Constants()
-opAssoc.LEFT = object()
-opAssoc.RIGHT = object()
-
-def infixNotation( baseExpr, opList, lpar=Suppress('('), rpar=Suppress(')') ):
- """
- Helper method for constructing grammars of expressions made up of
- operators working in a precedence hierarchy. Operators may be unary or
- binary, left- or right-associative. Parse actions can also be attached
- to operator expressions. The generated parser will also recognize the use
- of parentheses to override operator precedences (see example below).
-
- Note: if you define a deep operator list, you may see performance issues
- when using infixNotation. See L{ParserElement.enablePackrat} for a
- mechanism to potentially improve your parser performance.
-
- Parameters:
- - baseExpr - expression representing the most basic element for the nested
- - opList - list of tuples, one for each operator precedence level in the
- expression grammar; each tuple is of the form
- (opExpr, numTerms, rightLeftAssoc, parseAction), where:
- - opExpr is the pyparsing expression for the operator;
- may also be a string, which will be converted to a Literal;
- if numTerms is 3, opExpr is a tuple of two expressions, for the
- two operators separating the 3 terms
- - numTerms is the number of terms for this operator (must
- be 1, 2, or 3)
- - rightLeftAssoc is the indicator whether the operator is
- right or left associative, using the pyparsing-defined
- constants C{opAssoc.RIGHT} and C{opAssoc.LEFT}.
- - parseAction is the parse action to be associated with
- expressions matching this operator expression (the
- parse action tuple member may be omitted); if the parse action
- is passed a tuple or list of functions, this is equivalent to
- calling C{setParseAction(*fn)} (L{ParserElement.setParseAction})
- - lpar - expression for matching left-parentheses (default=C{Suppress('(')})
- - rpar - expression for matching right-parentheses (default=C{Suppress(')')})
-
- Example::
- # simple example of four-function arithmetic with ints and variable names
- integer = pyparsing_common.signed_integer
- varname = pyparsing_common.identifier
-
- arith_expr = infixNotation(integer | varname,
- [
- ('-', 1, opAssoc.RIGHT),
- (oneOf('* /'), 2, opAssoc.LEFT),
- (oneOf('+ -'), 2, opAssoc.LEFT),
- ])
-
- arith_expr.runTests('''
- 5+3*6
- (5+3)*6
- -2--11
- ''', fullDump=False)
- prints::
- 5+3*6
- [[5, '+', [3, '*', 6]]]
-
- (5+3)*6
- [[[5, '+', 3], '*', 6]]
-
- -2--11
- [[['-', 2], '-', ['-', 11]]]
- """
- ret = Forward()
- lastExpr = baseExpr | ( lpar + ret + rpar )
- for i,operDef in enumerate(opList):
- opExpr,arity,rightLeftAssoc,pa = (operDef + (None,))[:4]
- termName = "%s term" % opExpr if arity < 3 else "%s%s term" % opExpr
- if arity == 3:
- if opExpr is None or len(opExpr) != 2:
- raise ValueError("if numterms=3, opExpr must be a tuple or list of two expressions")
- opExpr1, opExpr2 = opExpr
- thisExpr = Forward().setName(termName)
- if rightLeftAssoc == opAssoc.LEFT:
- if arity == 1:
- matchExpr = FollowedBy(lastExpr + opExpr) + Group( lastExpr + OneOrMore( opExpr ) )
- elif arity == 2:
- if opExpr is not None:
- matchExpr = FollowedBy(lastExpr + opExpr + lastExpr) + Group( lastExpr + OneOrMore( opExpr + lastExpr ) )
- else:
- matchExpr = FollowedBy(lastExpr+lastExpr) + Group( lastExpr + OneOrMore(lastExpr) )
- elif arity == 3:
- matchExpr = FollowedBy(lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr) + \
- Group( lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr )
- else:
- raise ValueError("operator must be unary (1), binary (2), or ternary (3)")
- elif rightLeftAssoc == opAssoc.RIGHT:
- if arity == 1:
- # try to avoid LR with this extra test
- if not isinstance(opExpr, Optional):
- opExpr = Optional(opExpr)
- matchExpr = FollowedBy(opExpr.expr + thisExpr) + Group( opExpr + thisExpr )
- elif arity == 2:
- if opExpr is not None:
- matchExpr = FollowedBy(lastExpr + opExpr + thisExpr) + Group( lastExpr + OneOrMore( opExpr + thisExpr ) )
- else:
- matchExpr = FollowedBy(lastExpr + thisExpr) + Group( lastExpr + OneOrMore( thisExpr ) )
- elif arity == 3:
- matchExpr = FollowedBy(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr) + \
- Group( lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr )
- else:
- raise ValueError("operator must be unary (1), binary (2), or ternary (3)")
- else:
- raise ValueError("operator must indicate right or left associativity")
- if pa:
- if isinstance(pa, (tuple, list)):
- matchExpr.setParseAction(*pa)
- else:
- matchExpr.setParseAction(pa)
- thisExpr <<= ( matchExpr.setName(termName) | lastExpr )
- lastExpr = thisExpr
- ret <<= lastExpr
- return ret
-
-operatorPrecedence = infixNotation
-"""(Deprecated) Former name of C{L{infixNotation}}, will be dropped in a future release."""
-
-dblQuotedString = Combine(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*')+'"').setName("string enclosed in double quotes")
-sglQuotedString = Combine(Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*")+"'").setName("string enclosed in single quotes")
-quotedString = Combine(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*')+'"'|
- Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*")+"'").setName("quotedString using single or double quotes")
-unicodeString = Combine(_L('u') + quotedString.copy()).setName("unicode string literal")
-
-def nestedExpr(opener="(", closer=")", content=None, ignoreExpr=quotedString.copy()):
- """
- Helper method for defining nested lists enclosed in opening and closing
- delimiters ("(" and ")" are the default).
-
- Parameters:
- - opener - opening character for a nested list (default=C{"("}); can also be a pyparsing expression
- - closer - closing character for a nested list (default=C{")"}); can also be a pyparsing expression
- - content - expression for items within the nested lists (default=C{None})
- - ignoreExpr - expression for ignoring opening and closing delimiters (default=C{quotedString})
-
- If an expression is not provided for the content argument, the nested
- expression will capture all whitespace-delimited content between delimiters
- as a list of separate values.
-
- Use the C{ignoreExpr} argument to define expressions that may contain
- opening or closing characters that should not be treated as opening
- or closing characters for nesting, such as quotedString or a comment
- expression. Specify multiple expressions using an C{L{Or}} or C{L{MatchFirst}}.
- The default is L{quotedString}, but if no expressions are to be ignored,
- then pass C{None} for this argument.
-
- Example::
- data_type = oneOf("void int short long char float double")
- decl_data_type = Combine(data_type + Optional(Word('*')))
- ident = Word(alphas+'_', alphanums+'_')
- number = pyparsing_common.number
- arg = Group(decl_data_type + ident)
- LPAR,RPAR = map(Suppress, "()")
-
- code_body = nestedExpr('{', '}', ignoreExpr=(quotedString | cStyleComment))
-
- c_function = (decl_data_type("type")
- + ident("name")
- + LPAR + Optional(delimitedList(arg), [])("args") + RPAR
- + code_body("body"))
- c_function.ignore(cStyleComment)
-
- source_code = '''
- int is_odd(int x) {
- return (x%2);
- }
-
- int dec_to_hex(char hchar) {
- if (hchar >= '0' && hchar <= '9') {
- return (ord(hchar)-ord('0'));
- } else {
- return (10+ord(hchar)-ord('A'));
- }
- }
- '''
- for func in c_function.searchString(source_code):
- print("%(name)s (%(type)s) args: %(args)s" % func)
-
- prints::
- is_odd (int) args: [['int', 'x']]
- dec_to_hex (int) args: [['char', 'hchar']]
- """
- if opener == closer:
- raise ValueError("opening and closing strings cannot be the same")
- if content is None:
- if isinstance(opener,basestring) and isinstance(closer,basestring):
- if len(opener) == 1 and len(closer)==1:
- if ignoreExpr is not None:
- content = (Combine(OneOrMore(~ignoreExpr +
- CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS,exact=1))
- ).setParseAction(lambda t:t[0].strip()))
- else:
- content = (empty.copy()+CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS
- ).setParseAction(lambda t:t[0].strip()))
- else:
- if ignoreExpr is not None:
- content = (Combine(OneOrMore(~ignoreExpr +
- ~Literal(opener) + ~Literal(closer) +
- CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1))
- ).setParseAction(lambda t:t[0].strip()))
- else:
- content = (Combine(OneOrMore(~Literal(opener) + ~Literal(closer) +
- CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1))
- ).setParseAction(lambda t:t[0].strip()))
- else:
- raise ValueError("opening and closing arguments must be strings if no content expression is given")
- ret = Forward()
- if ignoreExpr is not None:
- ret <<= Group( Suppress(opener) + ZeroOrMore( ignoreExpr | ret | content ) + Suppress(closer) )
- else:
- ret <<= Group( Suppress(opener) + ZeroOrMore( ret | content ) + Suppress(closer) )
- ret.setName('nested %s%s expression' % (opener,closer))
- return ret
-
-def indentedBlock(blockStatementExpr, indentStack, indent=True):
- """
- Helper method for defining space-delimited indentation blocks, such as
- those used to define block statements in Python source code.
-
- Parameters:
- - blockStatementExpr - expression defining syntax of statement that
- is repeated within the indented block
- - indentStack - list created by caller to manage indentation stack
- (multiple statementWithIndentedBlock expressions within a single grammar
- should share a common indentStack)
- - indent - boolean indicating whether block must be indented beyond the
- the current level; set to False for block of left-most statements
- (default=C{True})
-
- A valid block must contain at least one C{blockStatement}.
-
- Example::
- data = '''
- def A(z):
- A1
- B = 100
- G = A2
- A2
- A3
- B
- def BB(a,b,c):
- BB1
- def BBA():
- bba1
- bba2
- bba3
- C
- D
- def spam(x,y):
- def eggs(z):
- pass
- '''
-
-
- indentStack = [1]
- stmt = Forward()
-
- identifier = Word(alphas, alphanums)
- funcDecl = ("def" + identifier + Group( "(" + Optional( delimitedList(identifier) ) + ")" ) + ":")
- func_body = indentedBlock(stmt, indentStack)
- funcDef = Group( funcDecl + func_body )
-
- rvalue = Forward()
- funcCall = Group(identifier + "(" + Optional(delimitedList(rvalue)) + ")")
- rvalue << (funcCall | identifier | Word(nums))
- assignment = Group(identifier + "=" + rvalue)
- stmt << ( funcDef | assignment | identifier )
-
- module_body = OneOrMore(stmt)
-
- parseTree = module_body.parseString(data)
- parseTree.pprint()
- prints::
- [['def',
- 'A',
- ['(', 'z', ')'],
- ':',
- [['A1'], [['B', '=', '100']], [['G', '=', 'A2']], ['A2'], ['A3']]],
- 'B',
- ['def',
- 'BB',
- ['(', 'a', 'b', 'c', ')'],
- ':',
- [['BB1'], [['def', 'BBA', ['(', ')'], ':', [['bba1'], ['bba2'], ['bba3']]]]]],
- 'C',
- 'D',
- ['def',
- 'spam',
- ['(', 'x', 'y', ')'],
- ':',
- [[['def', 'eggs', ['(', 'z', ')'], ':', [['pass']]]]]]]
- """
- def checkPeerIndent(s,l,t):
- if l >= len(s): return
- curCol = col(l,s)
- if curCol != indentStack[-1]:
- if curCol > indentStack[-1]:
- raise ParseFatalException(s,l,"illegal nesting")
- raise ParseException(s,l,"not a peer entry")
-
- def checkSubIndent(s,l,t):
- curCol = col(l,s)
- if curCol > indentStack[-1]:
- indentStack.append( curCol )
- else:
- raise ParseException(s,l,"not a subentry")
-
- def checkUnindent(s,l,t):
- if l >= len(s): return
- curCol = col(l,s)
- if not(indentStack and curCol < indentStack[-1] and curCol <= indentStack[-2]):
- raise ParseException(s,l,"not an unindent")
- indentStack.pop()
-
- NL = OneOrMore(LineEnd().setWhitespaceChars("\t ").suppress())
- INDENT = (Empty() + Empty().setParseAction(checkSubIndent)).setName('INDENT')
- PEER = Empty().setParseAction(checkPeerIndent).setName('')
- UNDENT = Empty().setParseAction(checkUnindent).setName('UNINDENT')
- if indent:
- smExpr = Group( Optional(NL) +
- #~ FollowedBy(blockStatementExpr) +
- INDENT + (OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) + UNDENT)
- else:
- smExpr = Group( Optional(NL) +
- (OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) )
- blockStatementExpr.ignore(_bslash + LineEnd())
- return smExpr.setName('indented block')
-
-alphas8bit = srange(r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]")
-punc8bit = srange(r"[\0xa1-\0xbf\0xd7\0xf7]")
-
-anyOpenTag,anyCloseTag = makeHTMLTags(Word(alphas,alphanums+"_:").setName('any tag'))
-_htmlEntityMap = dict(zip("gt lt amp nbsp quot apos".split(),'><& "\''))
-commonHTMLEntity = Regex('&(?P<entity>' + '|'.join(_htmlEntityMap.keys()) +");").setName("common HTML entity")
-def replaceHTMLEntity(t):
- """Helper parser action to replace common HTML entities with their special characters"""
- return _htmlEntityMap.get(t.entity)
-
-# it's easy to get these comment structures wrong - they're very common, so may as well make them available
-cStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/').setName("C style comment")
-"Comment of the form C{/* ... */}"
-
-htmlComment = Regex(r"<!--[\s\S]*?-->").setName("HTML comment")
-"Comment of the form C{<!-- ... -->}"
-
-restOfLine = Regex(r".*").leaveWhitespace().setName("rest of line")
-dblSlashComment = Regex(r"//(?:\\\n|[^\n])*").setName("// comment")
-"Comment of the form C{// ... (to end of line)}"
-
-cppStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/'| dblSlashComment).setName("C++ style comment")
-"Comment of either form C{L{cStyleComment}} or C{L{dblSlashComment}}"
-
-javaStyleComment = cppStyleComment
-"Same as C{L{cppStyleComment}}"
-
-pythonStyleComment = Regex(r"#.*").setName("Python style comment")
-"Comment of the form C{# ... (to end of line)}"
-
-_commasepitem = Combine(OneOrMore(Word(printables, excludeChars=',') +
- Optional( Word(" \t") +
- ~Literal(",") + ~LineEnd() ) ) ).streamline().setName("commaItem")
-commaSeparatedList = delimitedList( Optional( quotedString.copy() | _commasepitem, default="") ).setName("commaSeparatedList")
-"""(Deprecated) Predefined expression of 1 or more printable words or quoted strings, separated by commas.
- This expression is deprecated in favor of L{pyparsing_common.comma_separated_list}."""
-
-# some other useful expressions - using lower-case class name since we are really using this as a namespace
-class pyparsing_common:
- """
- Here are some common low-level expressions that may be useful in jump-starting parser development:
- - numeric forms (L{integers<integer>}, L{reals<real>}, L{scientific notation<sci_real>})
- - common L{programming identifiers<identifier>}
- - network addresses (L{MAC<mac_address>}, L{IPv4<ipv4_address>}, L{IPv6<ipv6_address>})
- - ISO8601 L{dates<iso8601_date>} and L{datetime<iso8601_datetime>}
- - L{UUID<uuid>}
- - L{comma-separated list<comma_separated_list>}
- Parse actions:
- - C{L{convertToInteger}}
- - C{L{convertToFloat}}
- - C{L{convertToDate}}
- - C{L{convertToDatetime}}
- - C{L{stripHTMLTags}}
- - C{L{upcaseTokens}}
- - C{L{downcaseTokens}}
-
- Example::
- pyparsing_common.number.runTests('''
- # any int or real number, returned as the appropriate type
- 100
- -100
- +100
- 3.14159
- 6.02e23
- 1e-12
- ''')
-
- pyparsing_common.fnumber.runTests('''
- # any int or real number, returned as float
- 100
- -100
- +100
- 3.14159
- 6.02e23
- 1e-12
- ''')
-
- pyparsing_common.hex_integer.runTests('''
- # hex numbers
- 100
- FF
- ''')
-
- pyparsing_common.fraction.runTests('''
- # fractions
- 1/2
- -3/4
- ''')
-
- pyparsing_common.mixed_integer.runTests('''
- # mixed fractions
- 1
- 1/2
- -3/4
- 1-3/4
- ''')
-
- import uuid
- pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID))
- pyparsing_common.uuid.runTests('''
- # uuid
- 12345678-1234-5678-1234-567812345678
- ''')
- prints::
- # any int or real number, returned as the appropriate type
- 100
- [100]
-
- -100
- [-100]
-
- +100
- [100]
-
- 3.14159
- [3.14159]
-
- 6.02e23
- [6.02e+23]
-
- 1e-12
- [1e-12]
-
- # any int or real number, returned as float
- 100
- [100.0]
-
- -100
- [-100.0]
-
- +100
- [100.0]
-
- 3.14159
- [3.14159]
-
- 6.02e23
- [6.02e+23]
-
- 1e-12
- [1e-12]
-
- # hex numbers
- 100
- [256]
-
- FF
- [255]
-
- # fractions
- 1/2
- [0.5]
-
- -3/4
- [-0.75]
-
- # mixed fractions
- 1
- [1]
-
- 1/2
- [0.5]
-
- -3/4
- [-0.75]
-
- 1-3/4
- [1.75]
-
- # uuid
- 12345678-1234-5678-1234-567812345678
- [UUID('12345678-1234-5678-1234-567812345678')]
- """
-
- convertToInteger = tokenMap(int)
- """
- Parse action for converting parsed integers to Python int
- """
-
- convertToFloat = tokenMap(float)
- """
- Parse action for converting parsed numbers to Python float
- """
-
- integer = Word(nums).setName("integer").setParseAction(convertToInteger)
- """expression that parses an unsigned integer, returns an int"""
-
- hex_integer = Word(hexnums).setName("hex integer").setParseAction(tokenMap(int,16))
- """expression that parses a hexadecimal integer, returns an int"""
-
- signed_integer = Regex(r'[+-]?\d+').setName("signed integer").setParseAction(convertToInteger)
- """expression that parses an integer with optional leading sign, returns an int"""
-
- fraction = (signed_integer().setParseAction(convertToFloat) + '/' + signed_integer().setParseAction(convertToFloat)).setName("fraction")
- """fractional expression of an integer divided by an integer, returns a float"""
- fraction.addParseAction(lambda t: t[0]/t[-1])
-
- mixed_integer = (fraction | signed_integer + Optional(Optional('-').suppress() + fraction)).setName("fraction or mixed integer-fraction")
- """mixed integer of the form 'integer - fraction', with optional leading integer, returns float"""
- mixed_integer.addParseAction(sum)
-
- real = Regex(r'[+-]?\d+\.\d*').setName("real number").setParseAction(convertToFloat)
- """expression that parses a floating point number and returns a float"""
-
- sci_real = Regex(r'[+-]?\d+([eE][+-]?\d+|\.\d*([eE][+-]?\d+)?)').setName("real number with scientific notation").setParseAction(convertToFloat)
- """expression that parses a floating point number with optional scientific notation and returns a float"""
-
- # streamlining this expression makes the docs nicer-looking
- number = (sci_real | real | signed_integer).streamline()
- """any numeric expression, returns the corresponding Python type"""
-
- fnumber = Regex(r'[+-]?\d+\.?\d*([eE][+-]?\d+)?').setName("fnumber").setParseAction(convertToFloat)
- """any int or real number, returned as float"""
-
- identifier = Word(alphas+'_', alphanums+'_').setName("identifier")
- """typical code identifier (leading alpha or '_', followed by 0 or more alphas, nums, or '_')"""
-
- ipv4_address = Regex(r'(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})(\.(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})){3}').setName("IPv4 address")
- "IPv4 address (C{0.0.0.0 - 255.255.255.255})"
-
- _ipv6_part = Regex(r'[0-9a-fA-F]{1,4}').setName("hex_integer")
- _full_ipv6_address = (_ipv6_part + (':' + _ipv6_part)*7).setName("full IPv6 address")
- _short_ipv6_address = (Optional(_ipv6_part + (':' + _ipv6_part)*(0,6)) + "::" + Optional(_ipv6_part + (':' + _ipv6_part)*(0,6))).setName("short IPv6 address")
- _short_ipv6_address.addCondition(lambda t: sum(1 for tt in t if pyparsing_common._ipv6_part.matches(tt)) < 8)
- _mixed_ipv6_address = ("::ffff:" + ipv4_address).setName("mixed IPv6 address")
- ipv6_address = Combine((_full_ipv6_address | _mixed_ipv6_address | _short_ipv6_address).setName("IPv6 address")).setName("IPv6 address")
- "IPv6 address (long, short, or mixed form)"
-
- mac_address = Regex(r'[0-9a-fA-F]{2}([:.-])[0-9a-fA-F]{2}(?:\1[0-9a-fA-F]{2}){4}').setName("MAC address")
- "MAC address xx:xx:xx:xx:xx (may also have '-' or '.' delimiters)"
-
- @staticmethod
- def convertToDate(fmt="%Y-%m-%d"):
- """
- Helper to create a parse action for converting parsed date string to Python datetime.date
-
- Params -
- - fmt - format to be passed to datetime.strptime (default=C{"%Y-%m-%d"})
-
- Example::
- date_expr = pyparsing_common.iso8601_date.copy()
- date_expr.setParseAction(pyparsing_common.convertToDate())
- print(date_expr.parseString("1999-12-31"))
- prints::
- [datetime.date(1999, 12, 31)]
- """
- def cvt_fn(s,l,t):
- try:
- return datetime.strptime(t[0], fmt).date()
- except ValueError as ve:
- raise ParseException(s, l, str(ve))
- return cvt_fn
-
- @staticmethod
- def convertToDatetime(fmt="%Y-%m-%dT%H:%M:%S.%f"):
- """
- Helper to create a parse action for converting parsed datetime string to Python datetime.datetime
-
- Params -
- - fmt - format to be passed to datetime.strptime (default=C{"%Y-%m-%dT%H:%M:%S.%f"})
-
- Example::
- dt_expr = pyparsing_common.iso8601_datetime.copy()
- dt_expr.setParseAction(pyparsing_common.convertToDatetime())
- print(dt_expr.parseString("1999-12-31T23:59:59.999"))
- prints::
- [datetime.datetime(1999, 12, 31, 23, 59, 59, 999000)]
- """
- def cvt_fn(s,l,t):
- try:
- return datetime.strptime(t[0], fmt)
- except ValueError as ve:
- raise ParseException(s, l, str(ve))
- return cvt_fn
-
- iso8601_date = Regex(r'(?P<year>\d{4})(?:-(?P<month>\d\d)(?:-(?P<day>\d\d))?)?').setName("ISO8601 date")
- "ISO8601 date (C{yyyy-mm-dd})"
-
- iso8601_datetime = Regex(r'(?P<year>\d{4})-(?P<month>\d\d)-(?P<day>\d\d)[T ](?P<hour>\d\d):(?P<minute>\d\d)(:(?P<second>\d\d(\.\d*)?)?)?(?P<tz>Z|[+-]\d\d:?\d\d)?').setName("ISO8601 datetime")
- "ISO8601 datetime (C{yyyy-mm-ddThh:mm:ss.s(Z|+-00:00)}) - trailing seconds, milliseconds, and timezone optional; accepts separating C{'T'} or C{' '}"
-
- uuid = Regex(r'[0-9a-fA-F]{8}(-[0-9a-fA-F]{4}){3}-[0-9a-fA-F]{12}').setName("UUID")
- "UUID (C{xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx})"
-
- _html_stripper = anyOpenTag.suppress() | anyCloseTag.suppress()
- @staticmethod
- def stripHTMLTags(s, l, tokens):
- """
- Parse action to remove HTML tags from web page HTML source
-
- Example::
- # strip HTML links from normal text
- text = '<td>More info at the <a href="http://pyparsing.wikispaces.com">pyparsing</a> wiki page</td>'
- td,td_end = makeHTMLTags("TD")
- table_text = td + SkipTo(td_end).setParseAction(pyparsing_common.stripHTMLTags)("body") + td_end
-
- print(table_text.parseString(text).body) # -> 'More info at the pyparsing wiki page'
- """
- return pyparsing_common._html_stripper.transformString(tokens[0])
-
- _commasepitem = Combine(OneOrMore(~Literal(",") + ~LineEnd() + Word(printables, excludeChars=',')
- + Optional( White(" \t") ) ) ).streamline().setName("commaItem")
- comma_separated_list = delimitedList( Optional( quotedString.copy() | _commasepitem, default="") ).setName("comma separated list")
- """Predefined expression of 1 or more printable words or quoted strings, separated by commas."""
-
- upcaseTokens = staticmethod(tokenMap(lambda t: _ustr(t).upper()))
- """Parse action to convert tokens to upper case."""
-
- downcaseTokens = staticmethod(tokenMap(lambda t: _ustr(t).lower()))
- """Parse action to convert tokens to lower case."""
-
-
-if __name__ == "__main__":
-
- selectToken = CaselessLiteral("select")
- fromToken = CaselessLiteral("from")
-
- ident = Word(alphas, alphanums + "_$")
-
- columnName = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens)
- columnNameList = Group(delimitedList(columnName)).setName("columns")
- columnSpec = ('*' | columnNameList)
-
- tableName = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens)
- tableNameList = Group(delimitedList(tableName)).setName("tables")
-
- simpleSQL = selectToken("command") + columnSpec("columns") + fromToken + tableNameList("tables")
-
- # demo runTests method, including embedded comments in test string
- simpleSQL.runTests("""
- # '*' as column list and dotted table name
- select * from SYS.XYZZY
-
- # caseless match on "SELECT", and casts back to "select"
- SELECT * from XYZZY, ABC
-
- # list of column names, and mixed case SELECT keyword
- Select AA,BB,CC from Sys.dual
-
- # multiple tables
- Select A, B, C from Sys.dual, Table2
-
- # invalid SELECT keyword - should fail
- Xelect A, B, C from Sys.dual
-
- # incomplete command - should fail
- Select
-
- # invalid column name - should fail
- Select ^^^ frox Sys.dual
-
- """)
-
- pyparsing_common.number.runTests("""
- 100
- -100
- +100
- 3.14159
- 6.02e23
- 1e-12
- """)
-
- # any int or real number, returned as float
- pyparsing_common.fnumber.runTests("""
- 100
- -100
- +100
- 3.14159
- 6.02e23
- 1e-12
- """)
-
- pyparsing_common.hex_integer.runTests("""
- 100
- FF
- """)
-
- import uuid
- pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID))
- pyparsing_common.uuid.runTests("""
- 12345678-1234-5678-1234-567812345678
- """)
diff --git a/contrib/python/setuptools/py3/pkg_resources/extern/__init__.py b/contrib/python/setuptools/py3/pkg_resources/extern/__init__.py
deleted file mode 100644
index fed59295403..00000000000
--- a/contrib/python/setuptools/py3/pkg_resources/extern/__init__.py
+++ /dev/null
@@ -1,73 +0,0 @@
-import importlib.util
-import sys
-
-
-class VendorImporter:
- """
- A PEP 302 meta path importer for finding optionally-vendored
- or otherwise naturally-installed packages from root_name.
- """
-
- def __init__(self, root_name, vendored_names=(), vendor_pkg=None):
- self.root_name = root_name
- self.vendored_names = set(vendored_names)
- self.vendor_pkg = vendor_pkg or root_name.replace('extern', '_vendor')
-
- @property
- def search_path(self):
- """
- Search first the vendor package then as a natural package.
- """
- yield self.vendor_pkg + '.'
- yield ''
-
- def _module_matches_namespace(self, fullname):
- """Figure out if the target module is vendored."""
- root, base, target = fullname.partition(self.root_name + '.')
- return not root and any(map(target.startswith, self.vendored_names))
-
- def load_module(self, fullname):
- """
- Iterate over the search path to locate and load fullname.
- """
- root, base, target = fullname.partition(self.root_name + '.')
- for prefix in self.search_path:
- try:
- extant = prefix + target
- __import__(extant)
- mod = sys.modules[extant]
- sys.modules[fullname] = mod
- return mod
- except ImportError:
- pass
- else:
- raise ImportError(
- "The '{target}' package is required; "
- "normally this is bundled with this package so if you get "
- "this warning, consult the packager of your "
- "distribution.".format(**locals())
- )
-
- def create_module(self, spec):
- return self.load_module(spec.name)
-
- def exec_module(self, module):
- pass
-
- def find_spec(self, fullname, path=None, target=None):
- """Return a module spec for vendored names."""
- return (
- importlib.util.spec_from_loader(fullname, self)
- if self._module_matches_namespace(fullname) else None
- )
-
- def install(self):
- """
- Install this importer into sys.meta_path if not already present.
- """
- if self not in sys.meta_path:
- sys.meta_path.append(self)
-
-
-names = 'packaging', 'pyparsing', 'appdirs'
-VendorImporter(__name__, names).install()
diff --git a/contrib/python/setuptools/py3/setuptools/__init__.py b/contrib/python/setuptools/py3/setuptools/__init__.py
deleted file mode 100644
index 9d6f0bc0dd6..00000000000
--- a/contrib/python/setuptools/py3/setuptools/__init__.py
+++ /dev/null
@@ -1,242 +0,0 @@
-"""Extensions to the 'distutils' for large or complex distributions"""
-
-from fnmatch import fnmatchcase
-import functools
-import os
-import re
-
-import _distutils_hack.override # noqa: F401
-
-import distutils.core
-from distutils.errors import DistutilsOptionError
-from distutils.util import convert_path
-
-from ._deprecation_warning import SetuptoolsDeprecationWarning
-
-import setuptools.version
-from setuptools.extension import Extension
-from setuptools.dist import Distribution
-from setuptools.depends import Require
-from . import monkey
-
-
-__all__ = [
- 'setup',
- 'Distribution',
- 'Command',
- 'Extension',
- 'Require',
- 'SetuptoolsDeprecationWarning',
- 'find_packages',
- 'find_namespace_packages',
-]
-
-__version__ = setuptools.version.__version__
-
-bootstrap_install_from = None
-
-
-class PackageFinder:
- """
- Generate a list of all Python packages found within a directory
- """
-
- @classmethod
- def find(cls, where='.', exclude=(), include=('*',)):
- """Return a list all Python packages found within directory 'where'
-
- 'where' is the root directory which will be searched for packages. It
- should be supplied as a "cross-platform" (i.e. URL-style) path; it will
- be converted to the appropriate local path syntax.
-
- 'exclude' is a sequence of package names to exclude; '*' can be used
- as a wildcard in the names, such that 'foo.*' will exclude all
- subpackages of 'foo' (but not 'foo' itself).
-
- 'include' is a sequence of package names to include. If it's
- specified, only the named packages will be included. If it's not
- specified, all found packages will be included. 'include' can contain
- shell style wildcard patterns just like 'exclude'.
- """
-
- return list(
- cls._find_packages_iter(
- convert_path(where),
- cls._build_filter('ez_setup', '*__pycache__', *exclude),
- cls._build_filter(*include),
- )
- )
-
- @classmethod
- def _find_packages_iter(cls, where, exclude, include):
- """
- All the packages found in 'where' that pass the 'include' filter, but
- not the 'exclude' filter.
- """
- for root, dirs, files in os.walk(where, followlinks=True):
- # Copy dirs to iterate over it, then empty dirs.
- all_dirs = dirs[:]
- dirs[:] = []
-
- for dir in all_dirs:
- full_path = os.path.join(root, dir)
- rel_path = os.path.relpath(full_path, where)
- package = rel_path.replace(os.path.sep, '.')
-
- # Skip directory trees that are not valid packages
- if '.' in dir or not cls._looks_like_package(full_path):
- continue
-
- # Should this package be included?
- if include(package) and not exclude(package):
- yield package
-
- # Keep searching subdirectories, as there may be more packages
- # down there, even if the parent was excluded.
- dirs.append(dir)
-
- @staticmethod
- def _looks_like_package(path):
- """Does a directory look like a package?"""
- return os.path.isfile(os.path.join(path, '__init__.py'))
-
- @staticmethod
- def _build_filter(*patterns):
- """
- Given a list of patterns, return a callable that will be true only if
- the input matches at least one of the patterns.
- """
- return lambda name: any(fnmatchcase(name, pat=pat) for pat in patterns)
-
-
-class PEP420PackageFinder(PackageFinder):
- @staticmethod
- def _looks_like_package(path):
- return True
-
-
-find_packages = PackageFinder.find
-find_namespace_packages = PEP420PackageFinder.find
-
-
-def _install_setup_requires(attrs):
- # Note: do not use `setuptools.Distribution` directly, as
- # our PEP 517 backend patch `distutils.core.Distribution`.
- class MinimalDistribution(distutils.core.Distribution):
- """
- A minimal version of a distribution for supporting the
- fetch_build_eggs interface.
- """
-
- def __init__(self, attrs):
- _incl = 'dependency_links', 'setup_requires'
- filtered = {k: attrs[k] for k in set(_incl) & set(attrs)}
- distutils.core.Distribution.__init__(self, filtered)
-
- def finalize_options(self):
- """
- Disable finalize_options to avoid building the working set.
- Ref #2158.
- """
-
- dist = MinimalDistribution(attrs)
-
- # Honor setup.cfg's options.
- dist.parse_config_files(ignore_option_errors=True)
- if dist.setup_requires:
- dist.fetch_build_eggs(dist.setup_requires)
-
-
-def setup(**attrs):
- # Make sure we have any requirements needed to interpret 'attrs'.
- _install_setup_requires(attrs)
- return distutils.core.setup(**attrs)
-
-
-setup.__doc__ = distutils.core.setup.__doc__
-
-
-_Command = monkey.get_unpatched(distutils.core.Command)
-
-
-class Command(_Command):
- __doc__ = _Command.__doc__
-
- command_consumes_arguments = False
-
- def __init__(self, dist, **kw):
- """
- Construct the command for dist, updating
- vars(self) with any keyword parameters.
- """
- _Command.__init__(self, dist)
- vars(self).update(kw)
-
- def _ensure_stringlike(self, option, what, default=None):
- val = getattr(self, option)
- if val is None:
- setattr(self, option, default)
- return default
- elif not isinstance(val, str):
- raise DistutilsOptionError(
- "'%s' must be a %s (got `%s`)" % (option, what, val)
- )
- return val
-
- def ensure_string_list(self, option):
- r"""Ensure that 'option' is a list of strings. If 'option' is
- currently a string, we split it either on /,\s*/ or /\s+/, so
- "foo bar baz", "foo,bar,baz", and "foo, bar baz" all become
- ["foo", "bar", "baz"].
- """
- val = getattr(self, option)
- if val is None:
- return
- elif isinstance(val, str):
- setattr(self, option, re.split(r',\s*|\s+', val))
- else:
- if isinstance(val, list):
- ok = all(isinstance(v, str) for v in val)
- else:
- ok = False
- if not ok:
- raise DistutilsOptionError(
- "'%s' must be a list of strings (got %r)" % (option, val)
- )
-
- def reinitialize_command(self, command, reinit_subcommands=0, **kw):
- cmd = _Command.reinitialize_command(self, command, reinit_subcommands)
- vars(cmd).update(kw)
- return cmd
-
-
-def _find_all_simple(path):
- """
- Find all files under 'path'
- """
- results = (
- os.path.join(base, file)
- for base, dirs, files in os.walk(path, followlinks=True)
- for file in files
- )
- return filter(os.path.isfile, results)
-
-
-def findall(dir=os.curdir):
- """
- Find all files under 'dir' and return the list of full filenames.
- Unless dir is '.', return full filenames with dir prepended.
- """
- files = _find_all_simple(dir)
- if dir == os.curdir:
- make_rel = functools.partial(os.path.relpath, start=dir)
- files = map(make_rel, files)
- return list(files)
-
-
-class sic(str):
- """Treat this string as-is (https://en.wikipedia.org/wiki/Sic)"""
-
-
-# Apply monkey patches
-monkey.patch_all()
diff --git a/contrib/python/setuptools/py3/setuptools/_deprecation_warning.py b/contrib/python/setuptools/py3/setuptools/_deprecation_warning.py
deleted file mode 100644
index 086b64dd381..00000000000
--- a/contrib/python/setuptools/py3/setuptools/_deprecation_warning.py
+++ /dev/null
@@ -1,7 +0,0 @@
-class SetuptoolsDeprecationWarning(Warning):
- """
- Base class for warning deprecations in ``setuptools``
-
- This class is not derived from ``DeprecationWarning``, and as such is
- visible by default.
- """
diff --git a/contrib/python/setuptools/py3/setuptools/_distutils/__init__.py b/contrib/python/setuptools/py3/setuptools/_distutils/__init__.py
deleted file mode 100644
index 8fd493b42c7..00000000000
--- a/contrib/python/setuptools/py3/setuptools/_distutils/__init__.py
+++ /dev/null
@@ -1,24 +0,0 @@
-"""distutils
-
-The main package for the Python Module Distribution Utilities. Normally
-used from a setup script as
-
- from distutils.core import setup
-
- setup (...)
-"""
-
-import sys
-import importlib
-
-__version__ = sys.version[:sys.version.index(' ')]
-
-
-try:
- # Allow Debian and pkgsrc (only) to customize system
- # behavior. Ref pypa/distutils#2 and pypa/distutils#16.
- # This hook is deprecated and no other environments
- # should use it.
- importlib.import_module('_distutils_system_mod')
-except ImportError:
- pass
diff --git a/contrib/python/setuptools/py3/setuptools/_distutils/_msvccompiler.py b/contrib/python/setuptools/py3/setuptools/_distutils/_msvccompiler.py
deleted file mode 100644
index b7a06082ae7..00000000000
--- a/contrib/python/setuptools/py3/setuptools/_distutils/_msvccompiler.py
+++ /dev/null
@@ -1,561 +0,0 @@
-"""distutils._msvccompiler
-
-Contains MSVCCompiler, an implementation of the abstract CCompiler class
-for Microsoft Visual Studio 2015.
-
-The module is compatible with VS 2015 and later. You can find legacy support
-for older versions in distutils.msvc9compiler and distutils.msvccompiler.
-"""
-
-# Written by Perry Stoll
-# hacked by Robin Becker and Thomas Heller to do a better job of
-# finding DevStudio (through the registry)
-# ported to VS 2005 and VS 2008 by Christian Heimes
-# ported to VS 2015 by Steve Dower
-
-import os
-import subprocess
-import contextlib
-import warnings
-import unittest.mock
-with contextlib.suppress(ImportError):
- import winreg
-
-from distutils.errors import DistutilsExecError, DistutilsPlatformError, \
- CompileError, LibError, LinkError
-from distutils.ccompiler import CCompiler, gen_lib_options
-from distutils import log
-from distutils.util import get_platform
-
-from itertools import count
-
-def _find_vc2015():
- try:
- key = winreg.OpenKeyEx(
- winreg.HKEY_LOCAL_MACHINE,
- r"Software\Microsoft\VisualStudio\SxS\VC7",
- access=winreg.KEY_READ | winreg.KEY_WOW64_32KEY
- )
- except OSError:
- log.debug("Visual C++ is not registered")
- return None, None
-
- best_version = 0
- best_dir = None
- with key:
- for i in count():
- try:
- v, vc_dir, vt = winreg.EnumValue(key, i)
- except OSError:
- break
- if v and vt == winreg.REG_SZ and os.path.isdir(vc_dir):
- try:
- version = int(float(v))
- except (ValueError, TypeError):
- continue
- if version >= 14 and version > best_version:
- best_version, best_dir = version, vc_dir
- return best_version, best_dir
-
-def _find_vc2017():
- """Returns "15, path" based on the result of invoking vswhere.exe
- If no install is found, returns "None, None"
-
- The version is returned to avoid unnecessarily changing the function
- result. It may be ignored when the path is not None.
-
- If vswhere.exe is not available, by definition, VS 2017 is not
- installed.
- """
- root = os.environ.get("ProgramFiles(x86)") or os.environ.get("ProgramFiles")
- if not root:
- return None, None
-
- try:
- path = subprocess.check_output([
- os.path.join(root, "Microsoft Visual Studio", "Installer", "vswhere.exe"),
- "-latest",
- "-prerelease",
- "-requires", "Microsoft.VisualStudio.Component.VC.Tools.x86.x64",
- "-property", "installationPath",
- "-products", "*",
- ], encoding="mbcs", errors="strict").strip()
- except (subprocess.CalledProcessError, OSError, UnicodeDecodeError):
- return None, None
-
- path = os.path.join(path, "VC", "Auxiliary", "Build")
- if os.path.isdir(path):
- return 15, path
-
- return None, None
-
-PLAT_SPEC_TO_RUNTIME = {
- 'x86' : 'x86',
- 'x86_amd64' : 'x64',
- 'x86_arm' : 'arm',
- 'x86_arm64' : 'arm64'
-}
-
-def _find_vcvarsall(plat_spec):
- # bpo-38597: Removed vcruntime return value
- _, best_dir = _find_vc2017()
-
- if not best_dir:
- best_version, best_dir = _find_vc2015()
-
- if not best_dir:
- log.debug("No suitable Visual C++ version found")
- return None, None
-
- vcvarsall = os.path.join(best_dir, "vcvarsall.bat")
- if not os.path.isfile(vcvarsall):
- log.debug("%s cannot be found", vcvarsall)
- return None, None
-
- return vcvarsall, None
-
-def _get_vc_env(plat_spec):
- if os.getenv("DISTUTILS_USE_SDK"):
- return {
- key.lower(): value
- for key, value in os.environ.items()
- }
-
- vcvarsall, _ = _find_vcvarsall(plat_spec)
- if not vcvarsall:
- raise DistutilsPlatformError("Unable to find vcvarsall.bat")
-
- try:
- out = subprocess.check_output(
- 'cmd /u /c "{}" {} && set'.format(vcvarsall, plat_spec),
- stderr=subprocess.STDOUT,
- ).decode('utf-16le', errors='replace')
- except subprocess.CalledProcessError as exc:
- log.error(exc.output)
- raise DistutilsPlatformError("Error executing {}"
- .format(exc.cmd))
-
- env = {
- key.lower(): value
- for key, _, value in
- (line.partition('=') for line in out.splitlines())
- if key and value
- }
-
- return env
-
-def _find_exe(exe, paths=None):
- """Return path to an MSVC executable program.
-
- Tries to find the program in several places: first, one of the
- MSVC program search paths from the registry; next, the directories
- in the PATH environment variable. If any of those work, return an
- absolute path that is known to exist. If none of them work, just
- return the original program name, 'exe'.
- """
- if not paths:
- paths = os.getenv('path').split(os.pathsep)
- for p in paths:
- fn = os.path.join(os.path.abspath(p), exe)
- if os.path.isfile(fn):
- return fn
- return exe
-
-# A map keyed by get_platform() return values to values accepted by
-# 'vcvarsall.bat'. Always cross-compile from x86 to work with the
-# lighter-weight MSVC installs that do not include native 64-bit tools.
-PLAT_TO_VCVARS = {
- 'win32' : 'x86',
- 'win-amd64' : 'x86_amd64',
- 'win-arm32' : 'x86_arm',
- 'win-arm64' : 'x86_arm64'
-}
-
-class MSVCCompiler(CCompiler) :
- """Concrete class that implements an interface to Microsoft Visual C++,
- as defined by the CCompiler abstract class."""
-
- compiler_type = 'msvc'
-
- # Just set this so CCompiler's constructor doesn't barf. We currently
- # don't use the 'set_executables()' bureaucracy provided by CCompiler,
- # as it really isn't necessary for this sort of single-compiler class.
- # Would be nice to have a consistent interface with UnixCCompiler,
- # though, so it's worth thinking about.
- executables = {}
-
- # Private class data (need to distinguish C from C++ source for compiler)
- _c_extensions = ['.c']
- _cpp_extensions = ['.cc', '.cpp', '.cxx']
- _rc_extensions = ['.rc']
- _mc_extensions = ['.mc']
-
- # Needed for the filename generation methods provided by the
- # base class, CCompiler.
- src_extensions = (_c_extensions + _cpp_extensions +
- _rc_extensions + _mc_extensions)
- res_extension = '.res'
- obj_extension = '.obj'
- static_lib_extension = '.lib'
- shared_lib_extension = '.dll'
- static_lib_format = shared_lib_format = '%s%s'
- exe_extension = '.exe'
-
-
- def __init__(self, verbose=0, dry_run=0, force=0):
- CCompiler.__init__ (self, verbose, dry_run, force)
- # target platform (.plat_name is consistent with 'bdist')
- self.plat_name = None
- self.initialized = False
-
- def initialize(self, plat_name=None):
- # multi-init means we would need to check platform same each time...
- assert not self.initialized, "don't init multiple times"
- if plat_name is None:
- plat_name = get_platform()
- # sanity check for platforms to prevent obscure errors later.
- if plat_name not in PLAT_TO_VCVARS:
- raise DistutilsPlatformError("--plat-name must be one of {}"
- .format(tuple(PLAT_TO_VCVARS)))
-
- # Get the vcvarsall.bat spec for the requested platform.
- plat_spec = PLAT_TO_VCVARS[plat_name]
-
- vc_env = _get_vc_env(plat_spec)
- if not vc_env:
- raise DistutilsPlatformError("Unable to find a compatible "
- "Visual Studio installation.")
-
- self._paths = vc_env.get('path', '')
- paths = self._paths.split(os.pathsep)
- self.cc = _find_exe("cl.exe", paths)
- self.linker = _find_exe("link.exe", paths)
- self.lib = _find_exe("lib.exe", paths)
- self.rc = _find_exe("rc.exe", paths) # resource compiler
- self.mc = _find_exe("mc.exe", paths) # message compiler
- self.mt = _find_exe("mt.exe", paths) # message compiler
-
- for dir in vc_env.get('include', '').split(os.pathsep):
- if dir:
- self.add_include_dir(dir.rstrip(os.sep))
-
- for dir in vc_env.get('lib', '').split(os.pathsep):
- if dir:
- self.add_library_dir(dir.rstrip(os.sep))
-
- self.preprocess_options = None
- # bpo-38597: Always compile with dynamic linking
- # Future releases of Python 3.x will include all past
- # versions of vcruntime*.dll for compatibility.
- self.compile_options = [
- '/nologo', '/O2', '/W3', '/GL', '/DNDEBUG', '/MD'
- ]
-
- self.compile_options_debug = [
- '/nologo', '/Od', '/MDd', '/Zi', '/W3', '/D_DEBUG'
- ]
-
- ldflags = [
- '/nologo', '/INCREMENTAL:NO', '/LTCG'
- ]
-
- ldflags_debug = [
- '/nologo', '/INCREMENTAL:NO', '/LTCG', '/DEBUG:FULL'
- ]
-
- self.ldflags_exe = [*ldflags, '/MANIFEST:EMBED,ID=1']
- self.ldflags_exe_debug = [*ldflags_debug, '/MANIFEST:EMBED,ID=1']
- self.ldflags_shared = [*ldflags, '/DLL', '/MANIFEST:EMBED,ID=2', '/MANIFESTUAC:NO']
- self.ldflags_shared_debug = [*ldflags_debug, '/DLL', '/MANIFEST:EMBED,ID=2', '/MANIFESTUAC:NO']
- self.ldflags_static = [*ldflags]
- self.ldflags_static_debug = [*ldflags_debug]
-
- self._ldflags = {
- (CCompiler.EXECUTABLE, None): self.ldflags_exe,
- (CCompiler.EXECUTABLE, False): self.ldflags_exe,
- (CCompiler.EXECUTABLE, True): self.ldflags_exe_debug,
- (CCompiler.SHARED_OBJECT, None): self.ldflags_shared,
- (CCompiler.SHARED_OBJECT, False): self.ldflags_shared,
- (CCompiler.SHARED_OBJECT, True): self.ldflags_shared_debug,
- (CCompiler.SHARED_LIBRARY, None): self.ldflags_static,
- (CCompiler.SHARED_LIBRARY, False): self.ldflags_static,
- (CCompiler.SHARED_LIBRARY, True): self.ldflags_static_debug,
- }
-
- self.initialized = True
-
- # -- Worker methods ------------------------------------------------
-
- def object_filenames(self,
- source_filenames,
- strip_dir=0,
- output_dir=''):
- ext_map = {
- **{ext: self.obj_extension for ext in self.src_extensions},
- **{ext: self.res_extension for ext in self._rc_extensions + self._mc_extensions},
- }
-
- output_dir = output_dir or ''
-
- def make_out_path(p):
- base, ext = os.path.splitext(p)
- if strip_dir:
- base = os.path.basename(base)
- else:
- _, base = os.path.splitdrive(base)
- if base.startswith((os.path.sep, os.path.altsep)):
- base = base[1:]
- try:
- # XXX: This may produce absurdly long paths. We should check
- # the length of the result and trim base until we fit within
- # 260 characters.
- return os.path.join(output_dir, base + ext_map[ext])
- except LookupError:
- # Better to raise an exception instead of silently continuing
- # and later complain about sources and targets having
- # different lengths
- raise CompileError("Don't know how to compile {}".format(p))
-
- return list(map(make_out_path, source_filenames))
-
-
- def compile(self, sources,
- output_dir=None, macros=None, include_dirs=None, debug=0,
- extra_preargs=None, extra_postargs=None, depends=None):
-
- if not self.initialized:
- self.initialize()
- compile_info = self._setup_compile(output_dir, macros, include_dirs,
- sources, depends, extra_postargs)
- macros, objects, extra_postargs, pp_opts, build = compile_info
-
- compile_opts = extra_preargs or []
- compile_opts.append('/c')
- if debug:
- compile_opts.extend(self.compile_options_debug)
- else:
- compile_opts.extend(self.compile_options)
-
-
- add_cpp_opts = False
-
- for obj in objects:
- try:
- src, ext = build[obj]
- except KeyError:
- continue
- if debug:
- # pass the full pathname to MSVC in debug mode,
- # this allows the debugger to find the source file
- # without asking the user to browse for it
- src = os.path.abspath(src)
-
- if ext in self._c_extensions:
- input_opt = "/Tc" + src
- elif ext in self._cpp_extensions:
- input_opt = "/Tp" + src
- add_cpp_opts = True
- elif ext in self._rc_extensions:
- # compile .RC to .RES file
- input_opt = src
- output_opt = "/fo" + obj
- try:
- self.spawn([self.rc] + pp_opts + [output_opt, input_opt])
- except DistutilsExecError as msg:
- raise CompileError(msg)
- continue
- elif ext in self._mc_extensions:
- # Compile .MC to .RC file to .RES file.
- # * '-h dir' specifies the directory for the
- # generated include file
- # * '-r dir' specifies the target directory of the
- # generated RC file and the binary message resource
- # it includes
- #
- # For now (since there are no options to change this),
- # we use the source-directory for the include file and
- # the build directory for the RC file and message
- # resources. This works at least for win32all.
- h_dir = os.path.dirname(src)
- rc_dir = os.path.dirname(obj)
- try:
- # first compile .MC to .RC and .H file
- self.spawn([self.mc, '-h', h_dir, '-r', rc_dir, src])
- base, _ = os.path.splitext(os.path.basename (src))
- rc_file = os.path.join(rc_dir, base + '.rc')
- # then compile .RC to .RES file
- self.spawn([self.rc, "/fo" + obj, rc_file])
-
- except DistutilsExecError as msg:
- raise CompileError(msg)
- continue
- else:
- # how to handle this file?
- raise CompileError("Don't know how to compile {} to {}"
- .format(src, obj))
-
- args = [self.cc] + compile_opts + pp_opts
- if add_cpp_opts:
- args.append('/EHsc')
- args.append(input_opt)
- args.append("/Fo" + obj)
- args.extend(extra_postargs)
-
- try:
- self.spawn(args)
- except DistutilsExecError as msg:
- raise CompileError(msg)
-
- return objects
-
-
- def create_static_lib(self,
- objects,
- output_libname,
- output_dir=None,
- debug=0,
- target_lang=None):
-
- if not self.initialized:
- self.initialize()
- objects, output_dir = self._fix_object_args(objects, output_dir)
- output_filename = self.library_filename(output_libname,
- output_dir=output_dir)
-
- if self._need_link(objects, output_filename):
- lib_args = objects + ['/OUT:' + output_filename]
- if debug:
- pass # XXX what goes here?
- try:
- log.debug('Executing "%s" %s', self.lib, ' '.join(lib_args))
- self.spawn([self.lib] + lib_args)
- except DistutilsExecError as msg:
- raise LibError(msg)
- else:
- log.debug("skipping %s (up-to-date)", output_filename)
-
-
- def link(self,
- target_desc,
- objects,
- output_filename,
- output_dir=None,
- libraries=None,
- library_dirs=None,
- runtime_library_dirs=None,
- export_symbols=None,
- debug=0,
- extra_preargs=None,
- extra_postargs=None,
- build_temp=None,
- target_lang=None):
-
- if not self.initialized:
- self.initialize()
- objects, output_dir = self._fix_object_args(objects, output_dir)
- fixed_args = self._fix_lib_args(libraries, library_dirs,
- runtime_library_dirs)
- libraries, library_dirs, runtime_library_dirs = fixed_args
-
- if runtime_library_dirs:
- self.warn("I don't know what to do with 'runtime_library_dirs': "
- + str(runtime_library_dirs))
-
- lib_opts = gen_lib_options(self,
- library_dirs, runtime_library_dirs,
- libraries)
- if output_dir is not None:
- output_filename = os.path.join(output_dir, output_filename)
-
- if self._need_link(objects, output_filename):
- ldflags = self._ldflags[target_desc, debug]
-
- export_opts = ["/EXPORT:" + sym for sym in (export_symbols or [])]
-
- ld_args = (ldflags + lib_opts + export_opts +
- objects + ['/OUT:' + output_filename])
-
- # The MSVC linker generates .lib and .exp files, which cannot be
- # suppressed by any linker switches. The .lib files may even be
- # needed! Make sure they are generated in the temporary build
- # directory. Since they have different names for debug and release
- # builds, they can go into the same directory.
- build_temp = os.path.dirname(objects[0])
- if export_symbols is not None:
- (dll_name, dll_ext) = os.path.splitext(
- os.path.basename(output_filename))
- implib_file = os.path.join(
- build_temp,
- self.library_filename(dll_name))
- ld_args.append ('/IMPLIB:' + implib_file)
-
- if extra_preargs:
- ld_args[:0] = extra_preargs
- if extra_postargs:
- ld_args.extend(extra_postargs)
-
- output_dir = os.path.dirname(os.path.abspath(output_filename))
- self.mkpath(output_dir)
- try:
- log.debug('Executing "%s" %s', self.linker, ' '.join(ld_args))
- self.spawn([self.linker] + ld_args)
- except DistutilsExecError as msg:
- raise LinkError(msg)
- else:
- log.debug("skipping %s (up-to-date)", output_filename)
-
- def spawn(self, cmd):
- env = dict(os.environ, PATH=self._paths)
- with self._fallback_spawn(cmd, env) as fallback:
- return super().spawn(cmd, env=env)
- return fallback.value
-
- @contextlib.contextmanager
- def _fallback_spawn(self, cmd, env):
- """
- Discovered in pypa/distutils#15, some tools monkeypatch the compiler,
- so the 'env' kwarg causes a TypeError. Detect this condition and
- restore the legacy, unsafe behavior.
- """
- bag = type('Bag', (), {})()
- try:
- yield bag
- except TypeError as exc:
- if "unexpected keyword argument 'env'" not in str(exc):
- raise
- else:
- return
- warnings.warn(
- "Fallback spawn triggered. Please update distutils monkeypatch.")
- with unittest.mock.patch('os.environ', env):
- bag.value = super().spawn(cmd)
-
- # -- Miscellaneous methods -----------------------------------------
- # These are all used by the 'gen_lib_options() function, in
- # ccompiler.py.
-
- def library_dir_option(self, dir):
- return "/LIBPATH:" + dir
-
- def runtime_library_dir_option(self, dir):
- raise DistutilsPlatformError(
- "don't know how to set runtime library search path for MSVC")
-
- def library_option(self, lib):
- return self.library_filename(lib)
-
- def find_library_file(self, dirs, lib, debug=0):
- # Prefer a debugging library if found (and requested), but deal
- # with it if we don't have one.
- if debug:
- try_names = [lib + "_d", lib]
- else:
- try_names = [lib]
- for dir in dirs:
- for name in try_names:
- libfile = os.path.join(dir, self.library_filename(name))
- if os.path.isfile(libfile):
- return libfile
- else:
- # Oops, didn't find it in *any* of 'dirs'
- return None
diff --git a/contrib/python/setuptools/py3/setuptools/_distutils/archive_util.py b/contrib/python/setuptools/py3/setuptools/_distutils/archive_util.py
deleted file mode 100644
index 565a3117b4b..00000000000
--- a/contrib/python/setuptools/py3/setuptools/_distutils/archive_util.py
+++ /dev/null
@@ -1,256 +0,0 @@
-"""distutils.archive_util
-
-Utility functions for creating archive files (tarballs, zip files,
-that sort of thing)."""
-
-import os
-from warnings import warn
-import sys
-
-try:
- import zipfile
-except ImportError:
- zipfile = None
-
-
-from distutils.errors import DistutilsExecError
-from distutils.spawn import spawn
-from distutils.dir_util import mkpath
-from distutils import log
-
-try:
- from pwd import getpwnam
-except ImportError:
- getpwnam = None
-
-try:
- from grp import getgrnam
-except ImportError:
- getgrnam = None
-
-def _get_gid(name):
- """Returns a gid, given a group name."""
- if getgrnam is None or name is None:
- return None
- try:
- result = getgrnam(name)
- except KeyError:
- result = None
- if result is not None:
- return result[2]
- return None
-
-def _get_uid(name):
- """Returns an uid, given a user name."""
- if getpwnam is None or name is None:
- return None
- try:
- result = getpwnam(name)
- except KeyError:
- result = None
- if result is not None:
- return result[2]
- return None
-
-def make_tarball(base_name, base_dir, compress="gzip", verbose=0, dry_run=0,
- owner=None, group=None):
- """Create a (possibly compressed) tar file from all the files under
- 'base_dir'.
-
- 'compress' must be "gzip" (the default), "bzip2", "xz", "compress", or
- None. ("compress" will be deprecated in Python 3.2)
-
- 'owner' and 'group' can be used to define an owner and a group for the
- archive that is being built. If not provided, the current owner and group
- will be used.
-
- The output tar file will be named 'base_dir' + ".tar", possibly plus
- the appropriate compression extension (".gz", ".bz2", ".xz" or ".Z").
-
- Returns the output filename.
- """
- tar_compression = {'gzip': 'gz', 'bzip2': 'bz2', 'xz': 'xz', None: '',
- 'compress': ''}
- compress_ext = {'gzip': '.gz', 'bzip2': '.bz2', 'xz': '.xz',
- 'compress': '.Z'}
-
- # flags for compression program, each element of list will be an argument
- if compress is not None and compress not in compress_ext.keys():
- raise ValueError(
- "bad value for 'compress': must be None, 'gzip', 'bzip2', "
- "'xz' or 'compress'")
-
- archive_name = base_name + '.tar'
- if compress != 'compress':
- archive_name += compress_ext.get(compress, '')
-
- mkpath(os.path.dirname(archive_name), dry_run=dry_run)
-
- # creating the tarball
- import tarfile # late import so Python build itself doesn't break
-
- log.info('Creating tar archive')
-
- uid = _get_uid(owner)
- gid = _get_gid(group)
-
- def _set_uid_gid(tarinfo):
- if gid is not None:
- tarinfo.gid = gid
- tarinfo.gname = group
- if uid is not None:
- tarinfo.uid = uid
- tarinfo.uname = owner
- return tarinfo
-
- if not dry_run:
- tar = tarfile.open(archive_name, 'w|%s' % tar_compression[compress])
- try:
- tar.add(base_dir, filter=_set_uid_gid)
- finally:
- tar.close()
-
- # compression using `compress`
- if compress == 'compress':
- warn("'compress' will be deprecated.", PendingDeprecationWarning)
- # the option varies depending on the platform
- compressed_name = archive_name + compress_ext[compress]
- if sys.platform == 'win32':
- cmd = [compress, archive_name, compressed_name]
- else:
- cmd = [compress, '-f', archive_name]
- spawn(cmd, dry_run=dry_run)
- return compressed_name
-
- return archive_name
-
-def make_zipfile(base_name, base_dir, verbose=0, dry_run=0):
- """Create a zip file from all the files under 'base_dir'.
-
- The output zip file will be named 'base_name' + ".zip". Uses either the
- "zipfile" Python module (if available) or the InfoZIP "zip" utility
- (if installed and found on the default search path). If neither tool is
- available, raises DistutilsExecError. Returns the name of the output zip
- file.
- """
- zip_filename = base_name + ".zip"
- mkpath(os.path.dirname(zip_filename), dry_run=dry_run)
-
- # If zipfile module is not available, try spawning an external
- # 'zip' command.
- if zipfile is None:
- if verbose:
- zipoptions = "-r"
- else:
- zipoptions = "-rq"
-
- try:
- spawn(["zip", zipoptions, zip_filename, base_dir],
- dry_run=dry_run)
- except DistutilsExecError:
- # XXX really should distinguish between "couldn't find
- # external 'zip' command" and "zip failed".
- raise DistutilsExecError(("unable to create zip file '%s': "
- "could neither import the 'zipfile' module nor "
- "find a standalone zip utility") % zip_filename)
-
- else:
- log.info("creating '%s' and adding '%s' to it",
- zip_filename, base_dir)
-
- if not dry_run:
- try:
- zip = zipfile.ZipFile(zip_filename, "w",
- compression=zipfile.ZIP_DEFLATED)
- except RuntimeError:
- zip = zipfile.ZipFile(zip_filename, "w",
- compression=zipfile.ZIP_STORED)
-
- with zip:
- if base_dir != os.curdir:
- path = os.path.normpath(os.path.join(base_dir, ''))
- zip.write(path, path)
- log.info("adding '%s'", path)
- for dirpath, dirnames, filenames in os.walk(base_dir):
- for name in dirnames:
- path = os.path.normpath(os.path.join(dirpath, name, ''))
- zip.write(path, path)
- log.info("adding '%s'", path)
- for name in filenames:
- path = os.path.normpath(os.path.join(dirpath, name))
- if os.path.isfile(path):
- zip.write(path, path)
- log.info("adding '%s'", path)
-
- return zip_filename
-
-ARCHIVE_FORMATS = {
- 'gztar': (make_tarball, [('compress', 'gzip')], "gzip'ed tar-file"),
- 'bztar': (make_tarball, [('compress', 'bzip2')], "bzip2'ed tar-file"),
- 'xztar': (make_tarball, [('compress', 'xz')], "xz'ed tar-file"),
- 'ztar': (make_tarball, [('compress', 'compress')], "compressed tar file"),
- 'tar': (make_tarball, [('compress', None)], "uncompressed tar file"),
- 'zip': (make_zipfile, [],"ZIP file")
- }
-
-def check_archive_formats(formats):
- """Returns the first format from the 'format' list that is unknown.
-
- If all formats are known, returns None
- """
- for format in formats:
- if format not in ARCHIVE_FORMATS:
- return format
- return None
-
-def make_archive(base_name, format, root_dir=None, base_dir=None, verbose=0,
- dry_run=0, owner=None, group=None):
- """Create an archive file (eg. zip or tar).
-
- 'base_name' is the name of the file to create, minus any format-specific
- extension; 'format' is the archive format: one of "zip", "tar", "gztar",
- "bztar", "xztar", or "ztar".
-
- 'root_dir' is a directory that will be the root directory of the
- archive; ie. we typically chdir into 'root_dir' before creating the
- archive. 'base_dir' is the directory where we start archiving from;
- ie. 'base_dir' will be the common prefix of all files and
- directories in the archive. 'root_dir' and 'base_dir' both default
- to the current directory. Returns the name of the archive file.
-
- 'owner' and 'group' are used when creating a tar archive. By default,
- uses the current owner and group.
- """
- save_cwd = os.getcwd()
- if root_dir is not None:
- log.debug("changing into '%s'", root_dir)
- base_name = os.path.abspath(base_name)
- if not dry_run:
- os.chdir(root_dir)
-
- if base_dir is None:
- base_dir = os.curdir
-
- kwargs = {'dry_run': dry_run}
-
- try:
- format_info = ARCHIVE_FORMATS[format]
- except KeyError:
- raise ValueError("unknown archive format '%s'" % format)
-
- func = format_info[0]
- for arg, val in format_info[1]:
- kwargs[arg] = val
-
- if format != 'zip':
- kwargs['owner'] = owner
- kwargs['group'] = group
-
- try:
- filename = func(base_name, base_dir, **kwargs)
- finally:
- if root_dir is not None:
- log.debug("changing back to '%s'", save_cwd)
- os.chdir(save_cwd)
-
- return filename
diff --git a/contrib/python/setuptools/py3/setuptools/_distutils/bcppcompiler.py b/contrib/python/setuptools/py3/setuptools/_distutils/bcppcompiler.py
deleted file mode 100644
index 071fea5d038..00000000000
--- a/contrib/python/setuptools/py3/setuptools/_distutils/bcppcompiler.py
+++ /dev/null
@@ -1,393 +0,0 @@
-"""distutils.bcppcompiler
-
-Contains BorlandCCompiler, an implementation of the abstract CCompiler class
-for the Borland C++ compiler.
-"""
-
-# This implementation by Lyle Johnson, based on the original msvccompiler.py
-# module and using the directions originally published by Gordon Williams.
-
-# XXX looks like there's a LOT of overlap between these two classes:
-# someone should sit down and factor out the common code as
-# WindowsCCompiler! --GPW
-
-
-import os
-from distutils.errors import \
- DistutilsExecError, \
- CompileError, LibError, LinkError, UnknownFileError
-from distutils.ccompiler import \
- CCompiler, gen_preprocess_options
-from distutils.file_util import write_file
-from distutils.dep_util import newer
-from distutils import log
-
-class BCPPCompiler(CCompiler) :
- """Concrete class that implements an interface to the Borland C/C++
- compiler, as defined by the CCompiler abstract class.
- """
-
- compiler_type = 'bcpp'
-
- # Just set this so CCompiler's constructor doesn't barf. We currently
- # don't use the 'set_executables()' bureaucracy provided by CCompiler,
- # as it really isn't necessary for this sort of single-compiler class.
- # Would be nice to have a consistent interface with UnixCCompiler,
- # though, so it's worth thinking about.
- executables = {}
-
- # Private class data (need to distinguish C from C++ source for compiler)
- _c_extensions = ['.c']
- _cpp_extensions = ['.cc', '.cpp', '.cxx']
-
- # Needed for the filename generation methods provided by the
- # base class, CCompiler.
- src_extensions = _c_extensions + _cpp_extensions
- obj_extension = '.obj'
- static_lib_extension = '.lib'
- shared_lib_extension = '.dll'
- static_lib_format = shared_lib_format = '%s%s'
- exe_extension = '.exe'
-
-
- def __init__ (self,
- verbose=0,
- dry_run=0,
- force=0):
-
- CCompiler.__init__ (self, verbose, dry_run, force)
-
- # These executables are assumed to all be in the path.
- # Borland doesn't seem to use any special registry settings to
- # indicate their installation locations.
-
- self.cc = "bcc32.exe"
- self.linker = "ilink32.exe"
- self.lib = "tlib.exe"
-
- self.preprocess_options = None
- self.compile_options = ['/tWM', '/O2', '/q', '/g0']
- self.compile_options_debug = ['/tWM', '/Od', '/q', '/g0']
-
- self.ldflags_shared = ['/Tpd', '/Gn', '/q', '/x']
- self.ldflags_shared_debug = ['/Tpd', '/Gn', '/q', '/x']
- self.ldflags_static = []
- self.ldflags_exe = ['/Gn', '/q', '/x']
- self.ldflags_exe_debug = ['/Gn', '/q', '/x','/r']
-
-
- # -- Worker methods ------------------------------------------------
-
- def compile(self, sources,
- output_dir=None, macros=None, include_dirs=None, debug=0,
- extra_preargs=None, extra_postargs=None, depends=None):
-
- macros, objects, extra_postargs, pp_opts, build = \
- self._setup_compile(output_dir, macros, include_dirs, sources,
- depends, extra_postargs)
- compile_opts = extra_preargs or []
- compile_opts.append ('-c')
- if debug:
- compile_opts.extend (self.compile_options_debug)
- else:
- compile_opts.extend (self.compile_options)
-
- for obj in objects:
- try:
- src, ext = build[obj]
- except KeyError:
- continue
- # XXX why do the normpath here?
- src = os.path.normpath(src)
- obj = os.path.normpath(obj)
- # XXX _setup_compile() did a mkpath() too but before the normpath.
- # Is it possible to skip the normpath?
- self.mkpath(os.path.dirname(obj))
-
- if ext == '.res':
- # This is already a binary file -- skip it.
- continue # the 'for' loop
- if ext == '.rc':
- # This needs to be compiled to a .res file -- do it now.
- try:
- self.spawn (["brcc32", "-fo", obj, src])
- except DistutilsExecError as msg:
- raise CompileError(msg)
- continue # the 'for' loop
-
- # The next two are both for the real compiler.
- if ext in self._c_extensions:
- input_opt = ""
- elif ext in self._cpp_extensions:
- input_opt = "-P"
- else:
- # Unknown file type -- no extra options. The compiler
- # will probably fail, but let it just in case this is a
- # file the compiler recognizes even if we don't.
- input_opt = ""
-
- output_opt = "-o" + obj
-
- # Compiler command line syntax is: "bcc32 [options] file(s)".
- # Note that the source file names must appear at the end of
- # the command line.
- try:
- self.spawn ([self.cc] + compile_opts + pp_opts +
- [input_opt, output_opt] +
- extra_postargs + [src])
- except DistutilsExecError as msg:
- raise CompileError(msg)
-
- return objects
-
- # compile ()
-
-
- def create_static_lib (self,
- objects,
- output_libname,
- output_dir=None,
- debug=0,
- target_lang=None):
-
- (objects, output_dir) = self._fix_object_args (objects, output_dir)
- output_filename = \
- self.library_filename (output_libname, output_dir=output_dir)
-
- if self._need_link (objects, output_filename):
- lib_args = [output_filename, '/u'] + objects
- if debug:
- pass # XXX what goes here?
- try:
- self.spawn ([self.lib] + lib_args)
- except DistutilsExecError as msg:
- raise LibError(msg)
- else:
- log.debug("skipping %s (up-to-date)", output_filename)
-
- # create_static_lib ()
-
-
- def link (self,
- target_desc,
- objects,
- output_filename,
- output_dir=None,
- libraries=None,
- library_dirs=None,
- runtime_library_dirs=None,
- export_symbols=None,
- debug=0,
- extra_preargs=None,
- extra_postargs=None,
- build_temp=None,
- target_lang=None):
-
- # XXX this ignores 'build_temp'! should follow the lead of
- # msvccompiler.py
-
- (objects, output_dir) = self._fix_object_args (objects, output_dir)
- (libraries, library_dirs, runtime_library_dirs) = \
- self._fix_lib_args (libraries, library_dirs, runtime_library_dirs)
-
- if runtime_library_dirs:
- log.warn("I don't know what to do with 'runtime_library_dirs': %s",
- str(runtime_library_dirs))
-
- if output_dir is not None:
- output_filename = os.path.join (output_dir, output_filename)
-
- if self._need_link (objects, output_filename):
-
- # Figure out linker args based on type of target.
- if target_desc == CCompiler.EXECUTABLE:
- startup_obj = 'c0w32'
- if debug:
- ld_args = self.ldflags_exe_debug[:]
- else:
- ld_args = self.ldflags_exe[:]
- else:
- startup_obj = 'c0d32'
- if debug:
- ld_args = self.ldflags_shared_debug[:]
- else:
- ld_args = self.ldflags_shared[:]
-
-
- # Create a temporary exports file for use by the linker
- if export_symbols is None:
- def_file = ''
- else:
- head, tail = os.path.split (output_filename)
- modname, ext = os.path.splitext (tail)
- temp_dir = os.path.dirname(objects[0]) # preserve tree structure
- def_file = os.path.join (temp_dir, '%s.def' % modname)
- contents = ['EXPORTS']
- for sym in (export_symbols or []):
- contents.append(' %s=_%s' % (sym, sym))
- self.execute(write_file, (def_file, contents),
- "writing %s" % def_file)
-
- # Borland C++ has problems with '/' in paths
- objects2 = map(os.path.normpath, objects)
- # split objects in .obj and .res files
- # Borland C++ needs them at different positions in the command line
- objects = [startup_obj]
- resources = []
- for file in objects2:
- (base, ext) = os.path.splitext(os.path.normcase(file))
- if ext == '.res':
- resources.append(file)
- else:
- objects.append(file)
-
-
- for l in library_dirs:
- ld_args.append("/L%s" % os.path.normpath(l))
- ld_args.append("/L.") # we sometimes use relative paths
-
- # list of object files
- ld_args.extend(objects)
-
- # XXX the command-line syntax for Borland C++ is a bit wonky;
- # certain filenames are jammed together in one big string, but
- # comma-delimited. This doesn't mesh too well with the
- # Unix-centric attitude (with a DOS/Windows quoting hack) of
- # 'spawn()', so constructing the argument list is a bit
- # awkward. Note that doing the obvious thing and jamming all
- # the filenames and commas into one argument would be wrong,
- # because 'spawn()' would quote any filenames with spaces in
- # them. Arghghh!. Apparently it works fine as coded...
-
- # name of dll/exe file
- ld_args.extend([',',output_filename])
- # no map file and start libraries
- ld_args.append(',,')
-
- for lib in libraries:
- # see if we find it and if there is a bcpp specific lib
- # (xxx_bcpp.lib)
- libfile = self.find_library_file(library_dirs, lib, debug)
- if libfile is None:
- ld_args.append(lib)
- # probably a BCPP internal library -- don't warn
- else:
- # full name which prefers bcpp_xxx.lib over xxx.lib
- ld_args.append(libfile)
-
- # some default libraries
- ld_args.append ('import32')
- ld_args.append ('cw32mt')
-
- # def file for export symbols
- ld_args.extend([',',def_file])
- # add resource files
- ld_args.append(',')
- ld_args.extend(resources)
-
-
- if extra_preargs:
- ld_args[:0] = extra_preargs
- if extra_postargs:
- ld_args.extend(extra_postargs)
-
- self.mkpath (os.path.dirname (output_filename))
- try:
- self.spawn ([self.linker] + ld_args)
- except DistutilsExecError as msg:
- raise LinkError(msg)
-
- else:
- log.debug("skipping %s (up-to-date)", output_filename)
-
- # link ()
-
- # -- Miscellaneous methods -----------------------------------------
-
-
- def find_library_file (self, dirs, lib, debug=0):
- # List of effective library names to try, in order of preference:
- # xxx_bcpp.lib is better than xxx.lib
- # and xxx_d.lib is better than xxx.lib if debug is set
- #
- # The "_bcpp" suffix is to handle a Python installation for people
- # with multiple compilers (primarily Distutils hackers, I suspect
- # ;-). The idea is they'd have one static library for each
- # compiler they care about, since (almost?) every Windows compiler
- # seems to have a different format for static libraries.
- if debug:
- dlib = (lib + "_d")
- try_names = (dlib + "_bcpp", lib + "_bcpp", dlib, lib)
- else:
- try_names = (lib + "_bcpp", lib)
-
- for dir in dirs:
- for name in try_names:
- libfile = os.path.join(dir, self.library_filename(name))
- if os.path.exists(libfile):
- return libfile
- else:
- # Oops, didn't find it in *any* of 'dirs'
- return None
-
- # overwrite the one from CCompiler to support rc and res-files
- def object_filenames (self,
- source_filenames,
- strip_dir=0,
- output_dir=''):
- if output_dir is None: output_dir = ''
- obj_names = []
- for src_name in source_filenames:
- # use normcase to make sure '.rc' is really '.rc' and not '.RC'
- (base, ext) = os.path.splitext (os.path.normcase(src_name))
- if ext not in (self.src_extensions + ['.rc','.res']):
- raise UnknownFileError("unknown file type '%s' (from '%s')" % \
- (ext, src_name))
- if strip_dir:
- base = os.path.basename (base)
- if ext == '.res':
- # these can go unchanged
- obj_names.append (os.path.join (output_dir, base + ext))
- elif ext == '.rc':
- # these need to be compiled to .res-files
- obj_names.append (os.path.join (output_dir, base + '.res'))
- else:
- obj_names.append (os.path.join (output_dir,
- base + self.obj_extension))
- return obj_names
-
- # object_filenames ()
-
- def preprocess (self,
- source,
- output_file=None,
- macros=None,
- include_dirs=None,
- extra_preargs=None,
- extra_postargs=None):
-
- (_, macros, include_dirs) = \
- self._fix_compile_args(None, macros, include_dirs)
- pp_opts = gen_preprocess_options(macros, include_dirs)
- pp_args = ['cpp32.exe'] + pp_opts
- if output_file is not None:
- pp_args.append('-o' + output_file)
- if extra_preargs:
- pp_args[:0] = extra_preargs
- if extra_postargs:
- pp_args.extend(extra_postargs)
- pp_args.append(source)
-
- # We need to preprocess: either we're being forced to, or the
- # source file is newer than the target (or the target doesn't
- # exist).
- if self.force or output_file is None or newer(source, output_file):
- if output_file:
- self.mkpath(os.path.dirname(output_file))
- try:
- self.spawn(pp_args)
- except DistutilsExecError as msg:
- print(msg)
- raise CompileError(msg)
-
- # preprocess()
diff --git a/contrib/python/setuptools/py3/setuptools/_distutils/ccompiler.py b/contrib/python/setuptools/py3/setuptools/_distutils/ccompiler.py
deleted file mode 100644
index 777fc661eac..00000000000
--- a/contrib/python/setuptools/py3/setuptools/_distutils/ccompiler.py
+++ /dev/null
@@ -1,1123 +0,0 @@
-"""distutils.ccompiler
-
-Contains CCompiler, an abstract base class that defines the interface
-for the Distutils compiler abstraction model."""
-
-import sys, os, re
-from distutils.errors import *
-from distutils.spawn import spawn
-from distutils.file_util import move_file
-from distutils.dir_util import mkpath
-from distutils.dep_util import newer_group
-from distutils.util import split_quoted, execute
-from distutils import log
-
-class CCompiler:
- """Abstract base class to define the interface that must be implemented
- by real compiler classes. Also has some utility methods used by
- several compiler classes.
-
- The basic idea behind a compiler abstraction class is that each
- instance can be used for all the compile/link steps in building a
- single project. Thus, attributes common to all of those compile and
- link steps -- include directories, macros to define, libraries to link
- against, etc. -- are attributes of the compiler instance. To allow for
- variability in how individual files are treated, most of those
- attributes may be varied on a per-compilation or per-link basis.
- """
-
- # 'compiler_type' is a class attribute that identifies this class. It
- # keeps code that wants to know what kind of compiler it's dealing with
- # from having to import all possible compiler classes just to do an
- # 'isinstance'. In concrete CCompiler subclasses, 'compiler_type'
- # should really, really be one of the keys of the 'compiler_class'
- # dictionary (see below -- used by the 'new_compiler()' factory
- # function) -- authors of new compiler interface classes are
- # responsible for updating 'compiler_class'!
- compiler_type = None
-
- # XXX things not handled by this compiler abstraction model:
- # * client can't provide additional options for a compiler,
- # e.g. warning, optimization, debugging flags. Perhaps this
- # should be the domain of concrete compiler abstraction classes
- # (UnixCCompiler, MSVCCompiler, etc.) -- or perhaps the base
- # class should have methods for the common ones.
- # * can't completely override the include or library searchg
- # path, ie. no "cc -I -Idir1 -Idir2" or "cc -L -Ldir1 -Ldir2".
- # I'm not sure how widely supported this is even by Unix
- # compilers, much less on other platforms. And I'm even less
- # sure how useful it is; maybe for cross-compiling, but
- # support for that is a ways off. (And anyways, cross
- # compilers probably have a dedicated binary with the
- # right paths compiled in. I hope.)
- # * can't do really freaky things with the library list/library
- # dirs, e.g. "-Ldir1 -lfoo -Ldir2 -lfoo" to link against
- # different versions of libfoo.a in different locations. I
- # think this is useless without the ability to null out the
- # library search path anyways.
-
-
- # Subclasses that rely on the standard filename generation methods
- # implemented below should override these; see the comment near
- # those methods ('object_filenames()' et. al.) for details:
- src_extensions = None # list of strings
- obj_extension = None # string
- static_lib_extension = None
- shared_lib_extension = None # string
- static_lib_format = None # format string
- shared_lib_format = None # prob. same as static_lib_format
- exe_extension = None # string
-
- # Default language settings. language_map is used to detect a source
- # file or Extension target language, checking source filenames.
- # language_order is used to detect the language precedence, when deciding
- # what language to use when mixing source types. For example, if some
- # extension has two files with ".c" extension, and one with ".cpp", it
- # is still linked as c++.
- language_map = {".c" : "c",
- ".cc" : "c++",
- ".cpp" : "c++",
- ".cxx" : "c++",
- ".m" : "objc",
- }
- language_order = ["c++", "objc", "c"]
-
- def __init__(self, verbose=0, dry_run=0, force=0):
- self.dry_run = dry_run
- self.force = force
- self.verbose = verbose
-
- # 'output_dir': a common output directory for object, library,
- # shared object, and shared library files
- self.output_dir = None
-
- # 'macros': a list of macro definitions (or undefinitions). A
- # macro definition is a 2-tuple (name, value), where the value is
- # either a string or None (no explicit value). A macro
- # undefinition is a 1-tuple (name,).
- self.macros = []
-
- # 'include_dirs': a list of directories to search for include files
- self.include_dirs = []
-
- # 'libraries': a list of libraries to include in any link
- # (library names, not filenames: eg. "foo" not "libfoo.a")
- self.libraries = []
-
- # 'library_dirs': a list of directories to search for libraries
- self.library_dirs = []
-
- # 'runtime_library_dirs': a list of directories to search for
- # shared libraries/objects at runtime
- self.runtime_library_dirs = []
-
- # 'objects': a list of object files (or similar, such as explicitly
- # named library files) to include on any link
- self.objects = []
-
- for key in self.executables.keys():
- self.set_executable(key, self.executables[key])
-
- def set_executables(self, **kwargs):
- """Define the executables (and options for them) that will be run
- to perform the various stages of compilation. The exact set of
- executables that may be specified here depends on the compiler
- class (via the 'executables' class attribute), but most will have:
- compiler the C/C++ compiler
- linker_so linker used to create shared objects and libraries
- linker_exe linker used to create binary executables
- archiver static library creator
-
- On platforms with a command-line (Unix, DOS/Windows), each of these
- is a string that will be split into executable name and (optional)
- list of arguments. (Splitting the string is done similarly to how
- Unix shells operate: words are delimited by spaces, but quotes and
- backslashes can override this. See
- 'distutils.util.split_quoted()'.)
- """
-
- # Note that some CCompiler implementation classes will define class
- # attributes 'cpp', 'cc', etc. with hard-coded executable names;
- # this is appropriate when a compiler class is for exactly one
- # compiler/OS combination (eg. MSVCCompiler). Other compiler
- # classes (UnixCCompiler, in particular) are driven by information
- # discovered at run-time, since there are many different ways to do
- # basically the same things with Unix C compilers.
-
- for key in kwargs:
- if key not in self.executables:
- raise ValueError("unknown executable '%s' for class %s" %
- (key, self.__class__.__name__))
- self.set_executable(key, kwargs[key])
-
- def set_executable(self, key, value):
- if isinstance(value, str):
- setattr(self, key, split_quoted(value))
- else:
- setattr(self, key, value)
-
- def _find_macro(self, name):
- i = 0
- for defn in self.macros:
- if defn[0] == name:
- return i
- i += 1
- return None
-
- def _check_macro_definitions(self, definitions):
- """Ensures that every element of 'definitions' is a valid macro
- definition, ie. either (name,value) 2-tuple or a (name,) tuple. Do
- nothing if all definitions are OK, raise TypeError otherwise.
- """
- for defn in definitions:
- if not (isinstance(defn, tuple) and
- (len(defn) in (1, 2) and
- (isinstance (defn[1], str) or defn[1] is None)) and
- isinstance (defn[0], str)):
- raise TypeError(("invalid macro definition '%s': " % defn) + \
- "must be tuple (string,), (string, string), or " + \
- "(string, None)")
-
-
- # -- Bookkeeping methods -------------------------------------------
-
- def define_macro(self, name, value=None):
- """Define a preprocessor macro for all compilations driven by this
- compiler object. The optional parameter 'value' should be a
- string; if it is not supplied, then the macro will be defined
- without an explicit value and the exact outcome depends on the
- compiler used (XXX true? does ANSI say anything about this?)
- """
- # Delete from the list of macro definitions/undefinitions if
- # already there (so that this one will take precedence).
- i = self._find_macro (name)
- if i is not None:
- del self.macros[i]
-
- self.macros.append((name, value))
-
- def undefine_macro(self, name):
- """Undefine a preprocessor macro for all compilations driven by
- this compiler object. If the same macro is defined by
- 'define_macro()' and undefined by 'undefine_macro()' the last call
- takes precedence (including multiple redefinitions or
- undefinitions). If the macro is redefined/undefined on a
- per-compilation basis (ie. in the call to 'compile()'), then that
- takes precedence.
- """
- # Delete from the list of macro definitions/undefinitions if
- # already there (so that this one will take precedence).
- i = self._find_macro (name)
- if i is not None:
- del self.macros[i]
-
- undefn = (name,)
- self.macros.append(undefn)
-
- def add_include_dir(self, dir):
- """Add 'dir' to the list of directories that will be searched for
- header files. The compiler is instructed to search directories in
- the order in which they are supplied by successive calls to
- 'add_include_dir()'.
- """
- self.include_dirs.append(dir)
-
- def set_include_dirs(self, dirs):
- """Set the list of directories that will be searched to 'dirs' (a
- list of strings). Overrides any preceding calls to
- 'add_include_dir()'; subsequence calls to 'add_include_dir()' add
- to the list passed to 'set_include_dirs()'. This does not affect
- any list of standard include directories that the compiler may
- search by default.
- """
- self.include_dirs = dirs[:]
-
- def add_library(self, libname):
- """Add 'libname' to the list of libraries that will be included in
- all links driven by this compiler object. Note that 'libname'
- should *not* be the name of a file containing a library, but the
- name of the library itself: the actual filename will be inferred by
- the linker, the compiler, or the compiler class (depending on the
- platform).
-
- The linker will be instructed to link against libraries in the
- order they were supplied to 'add_library()' and/or
- 'set_libraries()'. It is perfectly valid to duplicate library
- names; the linker will be instructed to link against libraries as
- many times as they are mentioned.
- """
- self.libraries.append(libname)
-
- def set_libraries(self, libnames):
- """Set the list of libraries to be included in all links driven by
- this compiler object to 'libnames' (a list of strings). This does
- not affect any standard system libraries that the linker may
- include by default.
- """
- self.libraries = libnames[:]
-
- def add_library_dir(self, dir):
- """Add 'dir' to the list of directories that will be searched for
- libraries specified to 'add_library()' and 'set_libraries()'. The
- linker will be instructed to search for libraries in the order they
- are supplied to 'add_library_dir()' and/or 'set_library_dirs()'.
- """
- self.library_dirs.append(dir)
-
- def set_library_dirs(self, dirs):
- """Set the list of library search directories to 'dirs' (a list of
- strings). This does not affect any standard library search path
- that the linker may search by default.
- """
- self.library_dirs = dirs[:]
-
- def add_runtime_library_dir(self, dir):
- """Add 'dir' to the list of directories that will be searched for
- shared libraries at runtime.
- """
- self.runtime_library_dirs.append(dir)
-
- def set_runtime_library_dirs(self, dirs):
- """Set the list of directories to search for shared libraries at
- runtime to 'dirs' (a list of strings). This does not affect any
- standard search path that the runtime linker may search by
- default.
- """
- self.runtime_library_dirs = dirs[:]
-
- def add_link_object(self, object):
- """Add 'object' to the list of object files (or analogues, such as
- explicitly named library files or the output of "resource
- compilers") to be included in every link driven by this compiler
- object.
- """
- self.objects.append(object)
-
- def set_link_objects(self, objects):
- """Set the list of object files (or analogues) to be included in
- every link to 'objects'. This does not affect any standard object
- files that the linker may include by default (such as system
- libraries).
- """
- self.objects = objects[:]
-
-
- # -- Private utility methods --------------------------------------
- # (here for the convenience of subclasses)
-
- # Helper method to prep compiler in subclass compile() methods
-
- def _setup_compile(self, outdir, macros, incdirs, sources, depends,
- extra):
- """Process arguments and decide which source files to compile."""
- if outdir is None:
- outdir = self.output_dir
- elif not isinstance(outdir, str):
- raise TypeError("'output_dir' must be a string or None")
-
- if macros is None:
- macros = self.macros
- elif isinstance(macros, list):
- macros = macros + (self.macros or [])
- else:
- raise TypeError("'macros' (if supplied) must be a list of tuples")
-
- if incdirs is None:
- incdirs = self.include_dirs
- elif isinstance(incdirs, (list, tuple)):
- incdirs = list(incdirs) + (self.include_dirs or [])
- else:
- raise TypeError(
- "'include_dirs' (if supplied) must be a list of strings")
-
- if extra is None:
- extra = []
-
- # Get the list of expected output (object) files
- objects = self.object_filenames(sources, strip_dir=0,
- output_dir=outdir)
- assert len(objects) == len(sources)
-
- pp_opts = gen_preprocess_options(macros, incdirs)
-
- build = {}
- for i in range(len(sources)):
- src = sources[i]
- obj = objects[i]
- ext = os.path.splitext(src)[1]
- self.mkpath(os.path.dirname(obj))
- build[obj] = (src, ext)
-
- return macros, objects, extra, pp_opts, build
-
- def _get_cc_args(self, pp_opts, debug, before):
- # works for unixccompiler, cygwinccompiler
- cc_args = pp_opts + ['-c']
- if debug:
- cc_args[:0] = ['-g']
- if before:
- cc_args[:0] = before
- return cc_args
-
- def _fix_compile_args(self, output_dir, macros, include_dirs):
- """Typecheck and fix-up some of the arguments to the 'compile()'
- method, and return fixed-up values. Specifically: if 'output_dir'
- is None, replaces it with 'self.output_dir'; ensures that 'macros'
- is a list, and augments it with 'self.macros'; ensures that
- 'include_dirs' is a list, and augments it with 'self.include_dirs'.
- Guarantees that the returned values are of the correct type,
- i.e. for 'output_dir' either string or None, and for 'macros' and
- 'include_dirs' either list or None.
- """
- if output_dir is None:
- output_dir = self.output_dir
- elif not isinstance(output_dir, str):
- raise TypeError("'output_dir' must be a string or None")
-
- if macros is None:
- macros = self.macros
- elif isinstance(macros, list):
- macros = macros + (self.macros or [])
- else:
- raise TypeError("'macros' (if supplied) must be a list of tuples")
-
- if include_dirs is None:
- include_dirs = self.include_dirs
- elif isinstance(include_dirs, (list, tuple)):
- include_dirs = list(include_dirs) + (self.include_dirs or [])
- else:
- raise TypeError(
- "'include_dirs' (if supplied) must be a list of strings")
-
- return output_dir, macros, include_dirs
-
- def _prep_compile(self, sources, output_dir, depends=None):
- """Decide which source files must be recompiled.
-
- Determine the list of object files corresponding to 'sources',
- and figure out which ones really need to be recompiled.
- Return a list of all object files and a dictionary telling
- which source files can be skipped.
- """
- # Get the list of expected output (object) files
- objects = self.object_filenames(sources, output_dir=output_dir)
- assert len(objects) == len(sources)
-
- # Return an empty dict for the "which source files can be skipped"
- # return value to preserve API compatibility.
- return objects, {}
-
- def _fix_object_args(self, objects, output_dir):
- """Typecheck and fix up some arguments supplied to various methods.
- Specifically: ensure that 'objects' is a list; if output_dir is
- None, replace with self.output_dir. Return fixed versions of
- 'objects' and 'output_dir'.
- """
- if not isinstance(objects, (list, tuple)):
- raise TypeError("'objects' must be a list or tuple of strings")
- objects = list(objects)
-
- if output_dir is None:
- output_dir = self.output_dir
- elif not isinstance(output_dir, str):
- raise TypeError("'output_dir' must be a string or None")
-
- return (objects, output_dir)
-
- def _fix_lib_args(self, libraries, library_dirs, runtime_library_dirs):
- """Typecheck and fix up some of the arguments supplied to the
- 'link_*' methods. Specifically: ensure that all arguments are
- lists, and augment them with their permanent versions
- (eg. 'self.libraries' augments 'libraries'). Return a tuple with
- fixed versions of all arguments.
- """
- if libraries is None:
- libraries = self.libraries
- elif isinstance(libraries, (list, tuple)):
- libraries = list (libraries) + (self.libraries or [])
- else:
- raise TypeError(
- "'libraries' (if supplied) must be a list of strings")
-
- if library_dirs is None:
- library_dirs = self.library_dirs
- elif isinstance(library_dirs, (list, tuple)):
- library_dirs = list (library_dirs) + (self.library_dirs or [])
- else:
- raise TypeError(
- "'library_dirs' (if supplied) must be a list of strings")
-
- if runtime_library_dirs is None:
- runtime_library_dirs = self.runtime_library_dirs
- elif isinstance(runtime_library_dirs, (list, tuple)):
- runtime_library_dirs = (list(runtime_library_dirs) +
- (self.runtime_library_dirs or []))
- else:
- raise TypeError("'runtime_library_dirs' (if supplied) "
- "must be a list of strings")
-
- return (libraries, library_dirs, runtime_library_dirs)
-
- def _need_link(self, objects, output_file):
- """Return true if we need to relink the files listed in 'objects'
- to recreate 'output_file'.
- """
- if self.force:
- return True
- else:
- if self.dry_run:
- newer = newer_group (objects, output_file, missing='newer')
- else:
- newer = newer_group (objects, output_file)
- return newer
-
- def detect_language(self, sources):
- """Detect the language of a given file, or list of files. Uses
- language_map, and language_order to do the job.
- """
- if not isinstance(sources, list):
- sources = [sources]
- lang = None
- index = len(self.language_order)
- for source in sources:
- base, ext = os.path.splitext(source)
- extlang = self.language_map.get(ext)
- try:
- extindex = self.language_order.index(extlang)
- if extindex < index:
- lang = extlang
- index = extindex
- except ValueError:
- pass
- return lang
-
-
- # -- Worker methods ------------------------------------------------
- # (must be implemented by subclasses)
-
- def preprocess(self, source, output_file=None, macros=None,
- include_dirs=None, extra_preargs=None, extra_postargs=None):
- """Preprocess a single C/C++ source file, named in 'source'.
- Output will be written to file named 'output_file', or stdout if
- 'output_file' not supplied. 'macros' is a list of macro
- definitions as for 'compile()', which will augment the macros set
- with 'define_macro()' and 'undefine_macro()'. 'include_dirs' is a
- list of directory names that will be added to the default list.
-
- Raises PreprocessError on failure.
- """
- pass
-
- def compile(self, sources, output_dir=None, macros=None,
- include_dirs=None, debug=0, extra_preargs=None,
- extra_postargs=None, depends=None):
- """Compile one or more source files.
-
- 'sources' must be a list of filenames, most likely C/C++
- files, but in reality anything that can be handled by a
- particular compiler and compiler class (eg. MSVCCompiler can
- handle resource files in 'sources'). Return a list of object
- filenames, one per source filename in 'sources'. Depending on
- the implementation, not all source files will necessarily be
- compiled, but all corresponding object filenames will be
- returned.
-
- If 'output_dir' is given, object files will be put under it, while
- retaining their original path component. That is, "foo/bar.c"
- normally compiles to "foo/bar.o" (for a Unix implementation); if
- 'output_dir' is "build", then it would compile to
- "build/foo/bar.o".
-
- 'macros', if given, must be a list of macro definitions. A macro
- definition is either a (name, value) 2-tuple or a (name,) 1-tuple.
- The former defines a macro; if the value is None, the macro is
- defined without an explicit value. The 1-tuple case undefines a
- macro. Later definitions/redefinitions/ undefinitions take
- precedence.
-
- 'include_dirs', if given, must be a list of strings, the
- directories to add to the default include file search path for this
- compilation only.
-
- 'debug' is a boolean; if true, the compiler will be instructed to
- output debug symbols in (or alongside) the object file(s).
-
- 'extra_preargs' and 'extra_postargs' are implementation- dependent.
- On platforms that have the notion of a command-line (e.g. Unix,
- DOS/Windows), they are most likely lists of strings: extra
- command-line arguments to prepend/append to the compiler command
- line. On other platforms, consult the implementation class
- documentation. In any event, they are intended as an escape hatch
- for those occasions when the abstract compiler framework doesn't
- cut the mustard.
-
- 'depends', if given, is a list of filenames that all targets
- depend on. If a source file is older than any file in
- depends, then the source file will be recompiled. This
- supports dependency tracking, but only at a coarse
- granularity.
-
- Raises CompileError on failure.
- """
- # A concrete compiler class can either override this method
- # entirely or implement _compile().
- macros, objects, extra_postargs, pp_opts, build = \
- self._setup_compile(output_dir, macros, include_dirs, sources,
- depends, extra_postargs)
- cc_args = self._get_cc_args(pp_opts, debug, extra_preargs)
-
- for obj in objects:
- try:
- src, ext = build[obj]
- except KeyError:
- continue
- self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts)
-
- # Return *all* object filenames, not just the ones we just built.
- return objects
-
- def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts):
- """Compile 'src' to product 'obj'."""
- # A concrete compiler class that does not override compile()
- # should implement _compile().
- pass
-
- def create_static_lib(self, objects, output_libname, output_dir=None,
- debug=0, target_lang=None):
- """Link a bunch of stuff together to create a static library file.
- The "bunch of stuff" consists of the list of object files supplied
- as 'objects', the extra object files supplied to
- 'add_link_object()' and/or 'set_link_objects()', the libraries
- supplied to 'add_library()' and/or 'set_libraries()', and the
- libraries supplied as 'libraries' (if any).
-
- 'output_libname' should be a library name, not a filename; the
- filename will be inferred from the library name. 'output_dir' is
- the directory where the library file will be put.
-
- 'debug' is a boolean; if true, debugging information will be
- included in the library (note that on most platforms, it is the
- compile step where this matters: the 'debug' flag is included here
- just for consistency).
-
- 'target_lang' is the target language for which the given objects
- are being compiled. This allows specific linkage time treatment of
- certain languages.
-
- Raises LibError on failure.
- """
- pass
-
-
- # values for target_desc parameter in link()
- SHARED_OBJECT = "shared_object"
- SHARED_LIBRARY = "shared_library"
- EXECUTABLE = "executable"
-
- def link(self,
- target_desc,
- objects,
- output_filename,
- output_dir=None,
- libraries=None,
- library_dirs=None,
- runtime_library_dirs=None,
- export_symbols=None,
- debug=0,
- extra_preargs=None,
- extra_postargs=None,
- build_temp=None,
- target_lang=None):
- """Link a bunch of stuff together to create an executable or
- shared library file.
-
- The "bunch of stuff" consists of the list of object files supplied
- as 'objects'. 'output_filename' should be a filename. If
- 'output_dir' is supplied, 'output_filename' is relative to it
- (i.e. 'output_filename' can provide directory components if
- needed).
-
- 'libraries' is a list of libraries to link against. These are
- library names, not filenames, since they're translated into
- filenames in a platform-specific way (eg. "foo" becomes "libfoo.a"
- on Unix and "foo.lib" on DOS/Windows). However, they can include a
- directory component, which means the linker will look in that
- specific directory rather than searching all the normal locations.
-
- 'library_dirs', if supplied, should be a list of directories to
- search for libraries that were specified as bare library names
- (ie. no directory component). These are on top of the system
- default and those supplied to 'add_library_dir()' and/or
- 'set_library_dirs()'. 'runtime_library_dirs' is a list of
- directories that will be embedded into the shared library and used
- to search for other shared libraries that *it* depends on at
- run-time. (This may only be relevant on Unix.)
-
- 'export_symbols' is a list of symbols that the shared library will
- export. (This appears to be relevant only on Windows.)
-
- 'debug' is as for 'compile()' and 'create_static_lib()', with the
- slight distinction that it actually matters on most platforms (as
- opposed to 'create_static_lib()', which includes a 'debug' flag
- mostly for form's sake).
-
- 'extra_preargs' and 'extra_postargs' are as for 'compile()' (except
- of course that they supply command-line arguments for the
- particular linker being used).
-
- 'target_lang' is the target language for which the given objects
- are being compiled. This allows specific linkage time treatment of
- certain languages.
-
- Raises LinkError on failure.
- """
- raise NotImplementedError
-
-
- # Old 'link_*()' methods, rewritten to use the new 'link()' method.
-
- def link_shared_lib(self,
- objects,
- output_libname,
- output_dir=None,
- libraries=None,
- library_dirs=None,
- runtime_library_dirs=None,
- export_symbols=None,
- debug=0,
- extra_preargs=None,
- extra_postargs=None,
- build_temp=None,
- target_lang=None):
- self.link(CCompiler.SHARED_LIBRARY, objects,
- self.library_filename(output_libname, lib_type='shared'),
- output_dir,
- libraries, library_dirs, runtime_library_dirs,
- export_symbols, debug,
- extra_preargs, extra_postargs, build_temp, target_lang)
-
-
- def link_shared_object(self,
- objects,
- output_filename,
- output_dir=None,
- libraries=None,
- library_dirs=None,
- runtime_library_dirs=None,
- export_symbols=None,
- debug=0,
- extra_preargs=None,
- extra_postargs=None,
- build_temp=None,
- target_lang=None):
- self.link(CCompiler.SHARED_OBJECT, objects,
- output_filename, output_dir,
- libraries, library_dirs, runtime_library_dirs,
- export_symbols, debug,
- extra_preargs, extra_postargs, build_temp, target_lang)
-
-
- def link_executable(self,
- objects,
- output_progname,
- output_dir=None,
- libraries=None,
- library_dirs=None,
- runtime_library_dirs=None,
- debug=0,
- extra_preargs=None,
- extra_postargs=None,
- target_lang=None):
- self.link(CCompiler.EXECUTABLE, objects,
- self.executable_filename(output_progname), output_dir,
- libraries, library_dirs, runtime_library_dirs, None,
- debug, extra_preargs, extra_postargs, None, target_lang)
-
-
- # -- Miscellaneous methods -----------------------------------------
- # These are all used by the 'gen_lib_options() function; there is
- # no appropriate default implementation so subclasses should
- # implement all of these.
-
- def library_dir_option(self, dir):
- """Return the compiler option to add 'dir' to the list of
- directories searched for libraries.
- """
- raise NotImplementedError
-
- def runtime_library_dir_option(self, dir):
- """Return the compiler option to add 'dir' to the list of
- directories searched for runtime libraries.
- """
- raise NotImplementedError
-
- def library_option(self, lib):
- """Return the compiler option to add 'lib' to the list of libraries
- linked into the shared library or executable.
- """
- raise NotImplementedError
-
- def has_function(self, funcname, includes=None, include_dirs=None,
- libraries=None, library_dirs=None):
- """Return a boolean indicating whether funcname is supported on
- the current platform. The optional arguments can be used to
- augment the compilation environment.
- """
- # this can't be included at module scope because it tries to
- # import math which might not be available at that point - maybe
- # the necessary logic should just be inlined?
- import tempfile
- if includes is None:
- includes = []
- if include_dirs is None:
- include_dirs = []
- if libraries is None:
- libraries = []
- if library_dirs is None:
- library_dirs = []
- fd, fname = tempfile.mkstemp(".c", funcname, text=True)
- f = os.fdopen(fd, "w")
- try:
- for incl in includes:
- f.write("""#include "%s"\n""" % incl)
- f.write("""\
-int main (int argc, char **argv) {
- %s();
- return 0;
-}
-""" % funcname)
- finally:
- f.close()
- try:
- objects = self.compile([fname], include_dirs=include_dirs)
- except CompileError:
- return False
- finally:
- os.remove(fname)
-
- try:
- self.link_executable(objects, "a.out",
- libraries=libraries,
- library_dirs=library_dirs)
- except (LinkError, TypeError):
- return False
- else:
- os.remove(os.path.join(self.output_dir or '', "a.out"))
- finally:
- for fn in objects:
- os.remove(fn)
- return True
-
- def find_library_file (self, dirs, lib, debug=0):
- """Search the specified list of directories for a static or shared
- library file 'lib' and return the full path to that file. If
- 'debug' true, look for a debugging version (if that makes sense on
- the current platform). Return None if 'lib' wasn't found in any of
- the specified directories.
- """
- raise NotImplementedError
-
- # -- Filename generation methods -----------------------------------
-
- # The default implementation of the filename generating methods are
- # prejudiced towards the Unix/DOS/Windows view of the world:
- # * object files are named by replacing the source file extension
- # (eg. .c/.cpp -> .o/.obj)
- # * library files (shared or static) are named by plugging the
- # library name and extension into a format string, eg.
- # "lib%s.%s" % (lib_name, ".a") for Unix static libraries
- # * executables are named by appending an extension (possibly
- # empty) to the program name: eg. progname + ".exe" for
- # Windows
- #
- # To reduce redundant code, these methods expect to find
- # several attributes in the current object (presumably defined
- # as class attributes):
- # * src_extensions -
- # list of C/C++ source file extensions, eg. ['.c', '.cpp']
- # * obj_extension -
- # object file extension, eg. '.o' or '.obj'
- # * static_lib_extension -
- # extension for static library files, eg. '.a' or '.lib'
- # * shared_lib_extension -
- # extension for shared library/object files, eg. '.so', '.dll'
- # * static_lib_format -
- # format string for generating static library filenames,
- # eg. 'lib%s.%s' or '%s.%s'
- # * shared_lib_format
- # format string for generating shared library filenames
- # (probably same as static_lib_format, since the extension
- # is one of the intended parameters to the format string)
- # * exe_extension -
- # extension for executable files, eg. '' or '.exe'
-
- def object_filenames(self, source_filenames, strip_dir=0, output_dir=''):
- if output_dir is None:
- output_dir = ''
- obj_names = []
- for src_name in source_filenames:
- base, ext = os.path.splitext(src_name)
- base = os.path.splitdrive(base)[1] # Chop off the drive
- base = base[os.path.isabs(base):] # If abs, chop off leading /
- if ext not in self.src_extensions:
- raise UnknownFileError(
- "unknown file type '%s' (from '%s')" % (ext, src_name))
- if strip_dir:
- base = os.path.basename(base)
- obj_names.append(os.path.join(output_dir,
- base + self.obj_extension))
- return obj_names
-
- def shared_object_filename(self, basename, strip_dir=0, output_dir=''):
- assert output_dir is not None
- if strip_dir:
- basename = os.path.basename(basename)
- return os.path.join(output_dir, basename + self.shared_lib_extension)
-
- def executable_filename(self, basename, strip_dir=0, output_dir=''):
- assert output_dir is not None
- if strip_dir:
- basename = os.path.basename(basename)
- return os.path.join(output_dir, basename + (self.exe_extension or ''))
-
- def library_filename(self, libname, lib_type='static', # or 'shared'
- strip_dir=0, output_dir=''):
- assert output_dir is not None
- if lib_type not in ("static", "shared", "dylib", "xcode_stub"):
- raise ValueError(
- "'lib_type' must be \"static\", \"shared\", \"dylib\", or \"xcode_stub\"")
- fmt = getattr(self, lib_type + "_lib_format")
- ext = getattr(self, lib_type + "_lib_extension")
-
- dir, base = os.path.split(libname)
- filename = fmt % (base, ext)
- if strip_dir:
- dir = ''
-
- return os.path.join(output_dir, dir, filename)
-
-
- # -- Utility methods -----------------------------------------------
-
- def announce(self, msg, level=1):
- log.debug(msg)
-
- def debug_print(self, msg):
- from distutils.debug import DEBUG
- if DEBUG:
- print(msg)
-
- def warn(self, msg):
- sys.stderr.write("warning: %s\n" % msg)
-
- def execute(self, func, args, msg=None, level=1):
- execute(func, args, msg, self.dry_run)
-
- def spawn(self, cmd, **kwargs):
- spawn(cmd, dry_run=self.dry_run, **kwargs)
-
- def move_file(self, src, dst):
- return move_file(src, dst, dry_run=self.dry_run)
-
- def mkpath (self, name, mode=0o777):
- mkpath(name, mode, dry_run=self.dry_run)
-
-
-# Map a sys.platform/os.name ('posix', 'nt') to the default compiler
-# type for that platform. Keys are interpreted as re match
-# patterns. Order is important; platform mappings are preferred over
-# OS names.
-_default_compilers = (
-
- # Platform string mappings
-
- # on a cygwin built python we can use gcc like an ordinary UNIXish
- # compiler
- ('cygwin.*', 'unix'),
-
- # OS name mappings
- ('posix', 'unix'),
- ('nt', 'msvc'),
-
- )
-
-def get_default_compiler(osname=None, platform=None):
- """Determine the default compiler to use for the given platform.
-
- osname should be one of the standard Python OS names (i.e. the
- ones returned by os.name) and platform the common value
- returned by sys.platform for the platform in question.
-
- The default values are os.name and sys.platform in case the
- parameters are not given.
- """
- if osname is None:
- osname = os.name
- if platform is None:
- platform = sys.platform
- for pattern, compiler in _default_compilers:
- if re.match(pattern, platform) is not None or \
- re.match(pattern, osname) is not None:
- return compiler
- # Default to Unix compiler
- return 'unix'
-
-# Map compiler types to (module_name, class_name) pairs -- ie. where to
-# find the code that implements an interface to this compiler. (The module
-# is assumed to be in the 'distutils' package.)
-compiler_class = { 'unix': ('unixccompiler', 'UnixCCompiler',
- "standard UNIX-style compiler"),
- 'msvc': ('_msvccompiler', 'MSVCCompiler',
- "Microsoft Visual C++"),
- 'cygwin': ('cygwinccompiler', 'CygwinCCompiler',
- "Cygwin port of GNU C Compiler for Win32"),
- 'mingw32': ('cygwinccompiler', 'Mingw32CCompiler',
- "Mingw32 port of GNU C Compiler for Win32"),
- 'bcpp': ('bcppcompiler', 'BCPPCompiler',
- "Borland C++ Compiler"),
- }
-
-def show_compilers():
- """Print list of available compilers (used by the "--help-compiler"
- options to "build", "build_ext", "build_clib").
- """
- # XXX this "knows" that the compiler option it's describing is
- # "--compiler", which just happens to be the case for the three
- # commands that use it.
- from distutils.fancy_getopt import FancyGetopt
- compilers = []
- for compiler in compiler_class.keys():
- compilers.append(("compiler="+compiler, None,
- compiler_class[compiler][2]))
- compilers.sort()
- pretty_printer = FancyGetopt(compilers)
- pretty_printer.print_help("List of available compilers:")
-
-
-def new_compiler(plat=None, compiler=None, verbose=0, dry_run=0, force=0):
- """Generate an instance of some CCompiler subclass for the supplied
- platform/compiler combination. 'plat' defaults to 'os.name'
- (eg. 'posix', 'nt'), and 'compiler' defaults to the default compiler
- for that platform. Currently only 'posix' and 'nt' are supported, and
- the default compilers are "traditional Unix interface" (UnixCCompiler
- class) and Visual C++ (MSVCCompiler class). Note that it's perfectly
- possible to ask for a Unix compiler object under Windows, and a
- Microsoft compiler object under Unix -- if you supply a value for
- 'compiler', 'plat' is ignored.
- """
- if plat is None:
- plat = os.name
-
- try:
- if compiler is None:
- compiler = get_default_compiler(plat)
-
- (module_name, class_name, long_description) = compiler_class[compiler]
- except KeyError:
- msg = "don't know how to compile C/C++ code on platform '%s'" % plat
- if compiler is not None:
- msg = msg + " with '%s' compiler" % compiler
- raise DistutilsPlatformError(msg)
-
- try:
- module_name = "distutils." + module_name
- __import__ (module_name)
- module = sys.modules[module_name]
- klass = vars(module)[class_name]
- except ImportError:
- raise DistutilsModuleError(
- "can't compile C/C++ code: unable to load module '%s'" % \
- module_name)
- except KeyError:
- raise DistutilsModuleError(
- "can't compile C/C++ code: unable to find class '%s' "
- "in module '%s'" % (class_name, module_name))
-
- # XXX The None is necessary to preserve backwards compatibility
- # with classes that expect verbose to be the first positional
- # argument.
- return klass(None, dry_run, force)
-
-
-def gen_preprocess_options(macros, include_dirs):
- """Generate C pre-processor options (-D, -U, -I) as used by at least
- two types of compilers: the typical Unix compiler and Visual C++.
- 'macros' is the usual thing, a list of 1- or 2-tuples, where (name,)
- means undefine (-U) macro 'name', and (name,value) means define (-D)
- macro 'name' to 'value'. 'include_dirs' is just a list of directory
- names to be added to the header file search path (-I). Returns a list
- of command-line options suitable for either Unix compilers or Visual
- C++.
- """
- # XXX it would be nice (mainly aesthetic, and so we don't generate
- # stupid-looking command lines) to go over 'macros' and eliminate
- # redundant definitions/undefinitions (ie. ensure that only the
- # latest mention of a particular macro winds up on the command
- # line). I don't think it's essential, though, since most (all?)
- # Unix C compilers only pay attention to the latest -D or -U
- # mention of a macro on their command line. Similar situation for
- # 'include_dirs'. I'm punting on both for now. Anyways, weeding out
- # redundancies like this should probably be the province of
- # CCompiler, since the data structures used are inherited from it
- # and therefore common to all CCompiler classes.
- pp_opts = []
- for macro in macros:
- if not (isinstance(macro, tuple) and 1 <= len(macro) <= 2):
- raise TypeError(
- "bad macro definition '%s': "
- "each element of 'macros' list must be a 1- or 2-tuple"
- % macro)
-
- if len(macro) == 1: # undefine this macro
- pp_opts.append("-U%s" % macro[0])
- elif len(macro) == 2:
- if macro[1] is None: # define with no explicit value
- pp_opts.append("-D%s" % macro[0])
- else:
- # XXX *don't* need to be clever about quoting the
- # macro value here, because we're going to avoid the
- # shell at all costs when we spawn the command!
- pp_opts.append("-D%s=%s" % macro)
-
- for dir in include_dirs:
- pp_opts.append("-I%s" % dir)
- return pp_opts
-
-
-def gen_lib_options (compiler, library_dirs, runtime_library_dirs, libraries):
- """Generate linker options for searching library directories and
- linking with specific libraries. 'libraries' and 'library_dirs' are,
- respectively, lists of library names (not filenames!) and search
- directories. Returns a list of command-line options suitable for use
- with some compiler (depending on the two format strings passed in).
- """
- lib_opts = []
-
- for dir in library_dirs:
- lib_opts.append(compiler.library_dir_option(dir))
-
- for dir in runtime_library_dirs:
- opt = compiler.runtime_library_dir_option(dir)
- if isinstance(opt, list):
- lib_opts = lib_opts + opt
- else:
- lib_opts.append(opt)
-
- # XXX it's important that we *not* remove redundant library mentions!
- # sometimes you really do have to say "-lfoo -lbar -lfoo" in order to
- # resolve all symbols. I just hope we never have to say "-lfoo obj.o
- # -lbar" to get things to work -- that's certainly a possibility, but a
- # pretty nasty way to arrange your C code.
-
- for lib in libraries:
- (lib_dir, lib_name) = os.path.split(lib)
- if lib_dir:
- lib_file = compiler.find_library_file([lib_dir], lib_name)
- if lib_file:
- lib_opts.append(lib_file)
- else:
- compiler.warn("no library file corresponding to "
- "'%s' found (skipping)" % lib)
- else:
- lib_opts.append(compiler.library_option (lib))
- return lib_opts
diff --git a/contrib/python/setuptools/py3/setuptools/_distutils/cmd.py b/contrib/python/setuptools/py3/setuptools/_distutils/cmd.py
deleted file mode 100644
index dba3191e584..00000000000
--- a/contrib/python/setuptools/py3/setuptools/_distutils/cmd.py
+++ /dev/null
@@ -1,403 +0,0 @@
-"""distutils.cmd
-
-Provides the Command class, the base class for the command classes
-in the distutils.command package.
-"""
-
-import sys, os, re
-from distutils.errors import DistutilsOptionError
-from distutils import util, dir_util, file_util, archive_util, dep_util
-from distutils import log
-
-class Command:
- """Abstract base class for defining command classes, the "worker bees"
- of the Distutils. A useful analogy for command classes is to think of
- them as subroutines with local variables called "options". The options
- are "declared" in 'initialize_options()' and "defined" (given their
- final values, aka "finalized") in 'finalize_options()', both of which
- must be defined by every command class. The distinction between the
- two is necessary because option values might come from the outside
- world (command line, config file, ...), and any options dependent on
- other options must be computed *after* these outside influences have
- been processed -- hence 'finalize_options()'. The "body" of the
- subroutine, where it does all its work based on the values of its
- options, is the 'run()' method, which must also be implemented by every
- command class.
- """
-
- # 'sub_commands' formalizes the notion of a "family" of commands,
- # eg. "install" as the parent with sub-commands "install_lib",
- # "install_headers", etc. The parent of a family of commands
- # defines 'sub_commands' as a class attribute; it's a list of
- # (command_name : string, predicate : unbound_method | string | None)
- # tuples, where 'predicate' is a method of the parent command that
- # determines whether the corresponding command is applicable in the
- # current situation. (Eg. we "install_headers" is only applicable if
- # we have any C header files to install.) If 'predicate' is None,
- # that command is always applicable.
- #
- # 'sub_commands' is usually defined at the *end* of a class, because
- # predicates can be unbound methods, so they must already have been
- # defined. The canonical example is the "install" command.
- sub_commands = []
-
-
- # -- Creation/initialization methods -------------------------------
-
- def __init__(self, dist):
- """Create and initialize a new Command object. Most importantly,
- invokes the 'initialize_options()' method, which is the real
- initializer and depends on the actual command being
- instantiated.
- """
- # late import because of mutual dependence between these classes
- from distutils.dist import Distribution
-
- if not isinstance(dist, Distribution):
- raise TypeError("dist must be a Distribution instance")
- if self.__class__ is Command:
- raise RuntimeError("Command is an abstract class")
-
- self.distribution = dist
- self.initialize_options()
-
- # Per-command versions of the global flags, so that the user can
- # customize Distutils' behaviour command-by-command and let some
- # commands fall back on the Distribution's behaviour. None means
- # "not defined, check self.distribution's copy", while 0 or 1 mean
- # false and true (duh). Note that this means figuring out the real
- # value of each flag is a touch complicated -- hence "self._dry_run"
- # will be handled by __getattr__, below.
- # XXX This needs to be fixed.
- self._dry_run = None
-
- # verbose is largely ignored, but needs to be set for
- # backwards compatibility (I think)?
- self.verbose = dist.verbose
-
- # Some commands define a 'self.force' option to ignore file
- # timestamps, but methods defined *here* assume that
- # 'self.force' exists for all commands. So define it here
- # just to be safe.
- self.force = None
-
- # The 'help' flag is just used for command-line parsing, so
- # none of that complicated bureaucracy is needed.
- self.help = 0
-
- # 'finalized' records whether or not 'finalize_options()' has been
- # called. 'finalize_options()' itself should not pay attention to
- # this flag: it is the business of 'ensure_finalized()', which
- # always calls 'finalize_options()', to respect/update it.
- self.finalized = 0
-
- # XXX A more explicit way to customize dry_run would be better.
- def __getattr__(self, attr):
- if attr == 'dry_run':
- myval = getattr(self, "_" + attr)
- if myval is None:
- return getattr(self.distribution, attr)
- else:
- return myval
- else:
- raise AttributeError(attr)
-
- def ensure_finalized(self):
- if not self.finalized:
- self.finalize_options()
- self.finalized = 1
-
- # Subclasses must define:
- # initialize_options()
- # provide default values for all options; may be customized by
- # setup script, by options from config file(s), or by command-line
- # options
- # finalize_options()
- # decide on the final values for all options; this is called
- # after all possible intervention from the outside world
- # (command-line, option file, etc.) has been processed
- # run()
- # run the command: do whatever it is we're here to do,
- # controlled by the command's various option values
-
- def initialize_options(self):
- """Set default values for all the options that this command
- supports. Note that these defaults may be overridden by other
- commands, by the setup script, by config files, or by the
- command-line. Thus, this is not the place to code dependencies
- between options; generally, 'initialize_options()' implementations
- are just a bunch of "self.foo = None" assignments.
-
- This method must be implemented by all command classes.
- """
- raise RuntimeError("abstract method -- subclass %s must override"
- % self.__class__)
-
- def finalize_options(self):
- """Set final values for all the options that this command supports.
- This is always called as late as possible, ie. after any option
- assignments from the command-line or from other commands have been
- done. Thus, this is the place to code option dependencies: if
- 'foo' depends on 'bar', then it is safe to set 'foo' from 'bar' as
- long as 'foo' still has the same value it was assigned in
- 'initialize_options()'.
-
- This method must be implemented by all command classes.
- """
- raise RuntimeError("abstract method -- subclass %s must override"
- % self.__class__)
-
-
- def dump_options(self, header=None, indent=""):
- from distutils.fancy_getopt import longopt_xlate
- if header is None:
- header = "command options for '%s':" % self.get_command_name()
- self.announce(indent + header, level=log.INFO)
- indent = indent + " "
- for (option, _, _) in self.user_options:
- option = option.translate(longopt_xlate)
- if option[-1] == "=":
- option = option[:-1]
- value = getattr(self, option)
- self.announce(indent + "%s = %s" % (option, value),
- level=log.INFO)
-
- def run(self):
- """A command's raison d'etre: carry out the action it exists to
- perform, controlled by the options initialized in
- 'initialize_options()', customized by other commands, the setup
- script, the command-line, and config files, and finalized in
- 'finalize_options()'. All terminal output and filesystem
- interaction should be done by 'run()'.
-
- This method must be implemented by all command classes.
- """
- raise RuntimeError("abstract method -- subclass %s must override"
- % self.__class__)
-
- def announce(self, msg, level=1):
- """If the current verbosity level is of greater than or equal to
- 'level' print 'msg' to stdout.
- """
- log.log(level, msg)
-
- def debug_print(self, msg):
- """Print 'msg' to stdout if the global DEBUG (taken from the
- DISTUTILS_DEBUG environment variable) flag is true.
- """
- from distutils.debug import DEBUG
- if DEBUG:
- print(msg)
- sys.stdout.flush()
-
-
- # -- Option validation methods -------------------------------------
- # (these are very handy in writing the 'finalize_options()' method)
- #
- # NB. the general philosophy here is to ensure that a particular option
- # value meets certain type and value constraints. If not, we try to
- # force it into conformance (eg. if we expect a list but have a string,
- # split the string on comma and/or whitespace). If we can't force the
- # option into conformance, raise DistutilsOptionError. Thus, command
- # classes need do nothing more than (eg.)
- # self.ensure_string_list('foo')
- # and they can be guaranteed that thereafter, self.foo will be
- # a list of strings.
-
- def _ensure_stringlike(self, option, what, default=None):
- val = getattr(self, option)
- if val is None:
- setattr(self, option, default)
- return default
- elif not isinstance(val, str):
- raise DistutilsOptionError("'%s' must be a %s (got `%s`)"
- % (option, what, val))
- return val
-
- def ensure_string(self, option, default=None):
- """Ensure that 'option' is a string; if not defined, set it to
- 'default'.
- """
- self._ensure_stringlike(option, "string", default)
-
- def ensure_string_list(self, option):
- r"""Ensure that 'option' is a list of strings. If 'option' is
- currently a string, we split it either on /,\s*/ or /\s+/, so
- "foo bar baz", "foo,bar,baz", and "foo, bar baz" all become
- ["foo", "bar", "baz"].
- """
- val = getattr(self, option)
- if val is None:
- return
- elif isinstance(val, str):
- setattr(self, option, re.split(r',\s*|\s+', val))
- else:
- if isinstance(val, list):
- ok = all(isinstance(v, str) for v in val)
- else:
- ok = False
- if not ok:
- raise DistutilsOptionError(
- "'%s' must be a list of strings (got %r)"
- % (option, val))
-
- def _ensure_tested_string(self, option, tester, what, error_fmt,
- default=None):
- val = self._ensure_stringlike(option, what, default)
- if val is not None and not tester(val):
- raise DistutilsOptionError(("error in '%s' option: " + error_fmt)
- % (option, val))
-
- def ensure_filename(self, option):
- """Ensure that 'option' is the name of an existing file."""
- self._ensure_tested_string(option, os.path.isfile,
- "filename",
- "'%s' does not exist or is not a file")
-
- def ensure_dirname(self, option):
- self._ensure_tested_string(option, os.path.isdir,
- "directory name",
- "'%s' does not exist or is not a directory")
-
-
- # -- Convenience methods for commands ------------------------------
-
- def get_command_name(self):
- if hasattr(self, 'command_name'):
- return self.command_name
- else:
- return self.__class__.__name__
-
- def set_undefined_options(self, src_cmd, *option_pairs):
- """Set the values of any "undefined" options from corresponding
- option values in some other command object. "Undefined" here means
- "is None", which is the convention used to indicate that an option
- has not been changed between 'initialize_options()' and
- 'finalize_options()'. Usually called from 'finalize_options()' for
- options that depend on some other command rather than another
- option of the same command. 'src_cmd' is the other command from
- which option values will be taken (a command object will be created
- for it if necessary); the remaining arguments are
- '(src_option,dst_option)' tuples which mean "take the value of
- 'src_option' in the 'src_cmd' command object, and copy it to
- 'dst_option' in the current command object".
- """
- # Option_pairs: list of (src_option, dst_option) tuples
- src_cmd_obj = self.distribution.get_command_obj(src_cmd)
- src_cmd_obj.ensure_finalized()
- for (src_option, dst_option) in option_pairs:
- if getattr(self, dst_option) is None:
- setattr(self, dst_option, getattr(src_cmd_obj, src_option))
-
- def get_finalized_command(self, command, create=1):
- """Wrapper around Distribution's 'get_command_obj()' method: find
- (create if necessary and 'create' is true) the command object for
- 'command', call its 'ensure_finalized()' method, and return the
- finalized command object.
- """
- cmd_obj = self.distribution.get_command_obj(command, create)
- cmd_obj.ensure_finalized()
- return cmd_obj
-
- # XXX rename to 'get_reinitialized_command()'? (should do the
- # same in dist.py, if so)
- def reinitialize_command(self, command, reinit_subcommands=0):
- return self.distribution.reinitialize_command(command,
- reinit_subcommands)
-
- def run_command(self, command):
- """Run some other command: uses the 'run_command()' method of
- Distribution, which creates and finalizes the command object if
- necessary and then invokes its 'run()' method.
- """
- self.distribution.run_command(command)
-
- def get_sub_commands(self):
- """Determine the sub-commands that are relevant in the current
- distribution (ie., that need to be run). This is based on the
- 'sub_commands' class attribute: each tuple in that list may include
- a method that we call to determine if the subcommand needs to be
- run for the current distribution. Return a list of command names.
- """
- commands = []
- for (cmd_name, method) in self.sub_commands:
- if method is None or method(self):
- commands.append(cmd_name)
- return commands
-
-
- # -- External world manipulation -----------------------------------
-
- def warn(self, msg):
- log.warn("warning: %s: %s\n", self.get_command_name(), msg)
-
- def execute(self, func, args, msg=None, level=1):
- util.execute(func, args, msg, dry_run=self.dry_run)
-
- def mkpath(self, name, mode=0o777):
- dir_util.mkpath(name, mode, dry_run=self.dry_run)
-
- def copy_file(self, infile, outfile, preserve_mode=1, preserve_times=1,
- link=None, level=1):
- """Copy a file respecting verbose, dry-run and force flags. (The
- former two default to whatever is in the Distribution object, and
- the latter defaults to false for commands that don't define it.)"""
- return file_util.copy_file(infile, outfile, preserve_mode,
- preserve_times, not self.force, link,
- dry_run=self.dry_run)
-
- def copy_tree(self, infile, outfile, preserve_mode=1, preserve_times=1,
- preserve_symlinks=0, level=1):
- """Copy an entire directory tree respecting verbose, dry-run,
- and force flags.
- """
- return dir_util.copy_tree(infile, outfile, preserve_mode,
- preserve_times, preserve_symlinks,
- not self.force, dry_run=self.dry_run)
-
- def move_file (self, src, dst, level=1):
- """Move a file respecting dry-run flag."""
- return file_util.move_file(src, dst, dry_run=self.dry_run)
-
- def spawn(self, cmd, search_path=1, level=1):
- """Spawn an external command respecting dry-run flag."""
- from distutils.spawn import spawn
- spawn(cmd, search_path, dry_run=self.dry_run)
-
- def make_archive(self, base_name, format, root_dir=None, base_dir=None,
- owner=None, group=None):
- return archive_util.make_archive(base_name, format, root_dir, base_dir,
- dry_run=self.dry_run,
- owner=owner, group=group)
-
- def make_file(self, infiles, outfile, func, args,
- exec_msg=None, skip_msg=None, level=1):
- """Special case of 'execute()' for operations that process one or
- more input files and generate one output file. Works just like
- 'execute()', except the operation is skipped and a different
- message printed if 'outfile' already exists and is newer than all
- files listed in 'infiles'. If the command defined 'self.force',
- and it is true, then the command is unconditionally run -- does no
- timestamp checks.
- """
- if skip_msg is None:
- skip_msg = "skipping %s (inputs unchanged)" % outfile
-
- # Allow 'infiles' to be a single string
- if isinstance(infiles, str):
- infiles = (infiles,)
- elif not isinstance(infiles, (list, tuple)):
- raise TypeError(
- "'infiles' must be a string, or a list or tuple of strings")
-
- if exec_msg is None:
- exec_msg = "generating %s from %s" % (outfile, ', '.join(infiles))
-
- # If 'outfile' must be regenerated (either because it doesn't
- # exist, is out-of-date, or the 'force' flag is true) then
- # perform the action that presumably regenerates it
- if self.force or dep_util.newer_group(infiles, outfile):
- self.execute(func, args, exec_msg, level)
- # Otherwise, print the "skip" message
- else:
- log.debug(skip_msg)
diff --git a/contrib/python/setuptools/py3/setuptools/_distutils/command/__init__.py b/contrib/python/setuptools/py3/setuptools/_distutils/command/__init__.py
deleted file mode 100644
index 481eea9fd4b..00000000000
--- a/contrib/python/setuptools/py3/setuptools/_distutils/command/__init__.py
+++ /dev/null
@@ -1,31 +0,0 @@
-"""distutils.command
-
-Package containing implementation of all the standard Distutils
-commands."""
-
-__all__ = ['build',
- 'build_py',
- 'build_ext',
- 'build_clib',
- 'build_scripts',
- 'clean',
- 'install',
- 'install_lib',
- 'install_headers',
- 'install_scripts',
- 'install_data',
- 'sdist',
- 'register',
- 'bdist',
- 'bdist_dumb',
- 'bdist_rpm',
- 'bdist_wininst',
- 'check',
- 'upload',
- # These two are reserved for future use:
- #'bdist_sdux',
- #'bdist_pkgtool',
- # Note:
- # bdist_packager is not included because it only provides
- # an abstract base class
- ]
diff --git a/contrib/python/setuptools/py3/setuptools/_distutils/command/bdist.py b/contrib/python/setuptools/py3/setuptools/_distutils/command/bdist.py
deleted file mode 100644
index 014871d280e..00000000000
--- a/contrib/python/setuptools/py3/setuptools/_distutils/command/bdist.py
+++ /dev/null
@@ -1,143 +0,0 @@
-"""distutils.command.bdist
-
-Implements the Distutils 'bdist' command (create a built [binary]
-distribution)."""
-
-import os
-from distutils.core import Command
-from distutils.errors import *
-from distutils.util import get_platform
-
-
-def show_formats():
- """Print list of available formats (arguments to "--format" option).
- """
- from distutils.fancy_getopt import FancyGetopt
- formats = []
- for format in bdist.format_commands:
- formats.append(("formats=" + format, None,
- bdist.format_command[format][1]))
- pretty_printer = FancyGetopt(formats)
- pretty_printer.print_help("List of available distribution formats:")
-
-
-class bdist(Command):
-
- description = "create a built (binary) distribution"
-
- user_options = [('bdist-base=', 'b',
- "temporary directory for creating built distributions"),
- ('plat-name=', 'p',
- "platform name to embed in generated filenames "
- "(default: %s)" % get_platform()),
- ('formats=', None,
- "formats for distribution (comma-separated list)"),
- ('dist-dir=', 'd',
- "directory to put final built distributions in "
- "[default: dist]"),
- ('skip-build', None,
- "skip rebuilding everything (for testing/debugging)"),
- ('owner=', 'u',
- "Owner name used when creating a tar file"
- " [default: current user]"),
- ('group=', 'g',
- "Group name used when creating a tar file"
- " [default: current group]"),
- ]
-
- boolean_options = ['skip-build']
-
- help_options = [
- ('help-formats', None,
- "lists available distribution formats", show_formats),
- ]
-
- # The following commands do not take a format option from bdist
- no_format_option = ('bdist_rpm',)
-
- # This won't do in reality: will need to distinguish RPM-ish Linux,
- # Debian-ish Linux, Solaris, FreeBSD, ..., Windows, Mac OS.
- default_format = {'posix': 'gztar',
- 'nt': 'zip'}
-
- # Establish the preferred order (for the --help-formats option).
- format_commands = ['rpm', 'gztar', 'bztar', 'xztar', 'ztar', 'tar',
- 'wininst', 'zip', 'msi']
-
- # And the real information.
- format_command = {'rpm': ('bdist_rpm', "RPM distribution"),
- 'gztar': ('bdist_dumb', "gzip'ed tar file"),
- 'bztar': ('bdist_dumb', "bzip2'ed tar file"),
- 'xztar': ('bdist_dumb', "xz'ed tar file"),
- 'ztar': ('bdist_dumb', "compressed tar file"),
- 'tar': ('bdist_dumb', "tar file"),
- 'wininst': ('bdist_wininst',
- "Windows executable installer"),
- 'zip': ('bdist_dumb', "ZIP file"),
- 'msi': ('bdist_msi', "Microsoft Installer")
- }
-
-
- def initialize_options(self):
- self.bdist_base = None
- self.plat_name = None
- self.formats = None
- self.dist_dir = None
- self.skip_build = 0
- self.group = None
- self.owner = None
-
- def finalize_options(self):
- # have to finalize 'plat_name' before 'bdist_base'
- if self.plat_name is None:
- if self.skip_build:
- self.plat_name = get_platform()
- else:
- self.plat_name = self.get_finalized_command('build').plat_name
-
- # 'bdist_base' -- parent of per-built-distribution-format
- # temporary directories (eg. we'll probably have
- # "build/bdist.<plat>/dumb", "build/bdist.<plat>/rpm", etc.)
- if self.bdist_base is None:
- build_base = self.get_finalized_command('build').build_base
- self.bdist_base = os.path.join(build_base,
- 'bdist.' + self.plat_name)
-
- self.ensure_string_list('formats')
- if self.formats is None:
- try:
- self.formats = [self.default_format[os.name]]
- except KeyError:
- raise DistutilsPlatformError(
- "don't know how to create built distributions "
- "on platform %s" % os.name)
-
- if self.dist_dir is None:
- self.dist_dir = "dist"
-
- def run(self):
- # Figure out which sub-commands we need to run.
- commands = []
- for format in self.formats:
- try:
- commands.append(self.format_command[format][0])
- except KeyError:
- raise DistutilsOptionError("invalid format '%s'" % format)
-
- # Reinitialize and run each command.
- for i in range(len(self.formats)):
- cmd_name = commands[i]
- sub_cmd = self.reinitialize_command(cmd_name)
- if cmd_name not in self.no_format_option:
- sub_cmd.format = self.formats[i]
-
- # passing the owner and group names for tar archiving
- if cmd_name == 'bdist_dumb':
- sub_cmd.owner = self.owner
- sub_cmd.group = self.group
-
- # If we're going to need to run this command again, tell it to
- # keep its temporary files around so subsequent runs go faster.
- if cmd_name in commands[i+1:]:
- sub_cmd.keep_temp = 1
- self.run_command(cmd_name)
diff --git a/contrib/python/setuptools/py3/setuptools/_distutils/command/bdist_dumb.py b/contrib/python/setuptools/py3/setuptools/_distutils/command/bdist_dumb.py
deleted file mode 100644
index f0d6b5b8cd8..00000000000
--- a/contrib/python/setuptools/py3/setuptools/_distutils/command/bdist_dumb.py
+++ /dev/null
@@ -1,123 +0,0 @@
-"""distutils.command.bdist_dumb
-
-Implements the Distutils 'bdist_dumb' command (create a "dumb" built
-distribution -- i.e., just an archive to be unpacked under $prefix or
-$exec_prefix)."""
-
-import os
-from distutils.core import Command
-from distutils.util import get_platform
-from distutils.dir_util import remove_tree, ensure_relative
-from distutils.errors import *
-from distutils.sysconfig import get_python_version
-from distutils import log
-
-class bdist_dumb(Command):
-
- description = "create a \"dumb\" built distribution"
-
- user_options = [('bdist-dir=', 'd',
- "temporary directory for creating the distribution"),
- ('plat-name=', 'p',
- "platform name to embed in generated filenames "
- "(default: %s)" % get_platform()),
- ('format=', 'f',
- "archive format to create (tar, gztar, bztar, xztar, "
- "ztar, zip)"),
- ('keep-temp', 'k',
- "keep the pseudo-installation tree around after " +
- "creating the distribution archive"),
- ('dist-dir=', 'd',
- "directory to put final built distributions in"),
- ('skip-build', None,
- "skip rebuilding everything (for testing/debugging)"),
- ('relative', None,
- "build the archive using relative paths "
- "(default: false)"),
- ('owner=', 'u',
- "Owner name used when creating a tar file"
- " [default: current user]"),
- ('group=', 'g',
- "Group name used when creating a tar file"
- " [default: current group]"),
- ]
-
- boolean_options = ['keep-temp', 'skip-build', 'relative']
-
- default_format = { 'posix': 'gztar',
- 'nt': 'zip' }
-
- def initialize_options(self):
- self.bdist_dir = None
- self.plat_name = None
- self.format = None
- self.keep_temp = 0
- self.dist_dir = None
- self.skip_build = None
- self.relative = 0
- self.owner = None
- self.group = None
-
- def finalize_options(self):
- if self.bdist_dir is None:
- bdist_base = self.get_finalized_command('bdist').bdist_base
- self.bdist_dir = os.path.join(bdist_base, 'dumb')
-
- if self.format is None:
- try:
- self.format = self.default_format[os.name]
- except KeyError:
- raise DistutilsPlatformError(
- "don't know how to create dumb built distributions "
- "on platform %s" % os.name)
-
- self.set_undefined_options('bdist',
- ('dist_dir', 'dist_dir'),
- ('plat_name', 'plat_name'),
- ('skip_build', 'skip_build'))
-
- def run(self):
- if not self.skip_build:
- self.run_command('build')
-
- install = self.reinitialize_command('install', reinit_subcommands=1)
- install.root = self.bdist_dir
- install.skip_build = self.skip_build
- install.warn_dir = 0
-
- log.info("installing to %s", self.bdist_dir)
- self.run_command('install')
-
- # And make an archive relative to the root of the
- # pseudo-installation tree.
- archive_basename = "%s.%s" % (self.distribution.get_fullname(),
- self.plat_name)
-
- pseudoinstall_root = os.path.join(self.dist_dir, archive_basename)
- if not self.relative:
- archive_root = self.bdist_dir
- else:
- if (self.distribution.has_ext_modules() and
- (install.install_base != install.install_platbase)):
- raise DistutilsPlatformError(
- "can't make a dumb built distribution where "
- "base and platbase are different (%s, %s)"
- % (repr(install.install_base),
- repr(install.install_platbase)))
- else:
- archive_root = os.path.join(self.bdist_dir,
- ensure_relative(install.install_base))
-
- # Make the archive
- filename = self.make_archive(pseudoinstall_root,
- self.format, root_dir=archive_root,
- owner=self.owner, group=self.group)
- if self.distribution.has_ext_modules():
- pyversion = get_python_version()
- else:
- pyversion = 'any'
- self.distribution.dist_files.append(('bdist_dumb', pyversion,
- filename))
-
- if not self.keep_temp:
- remove_tree(self.bdist_dir, dry_run=self.dry_run)
diff --git a/contrib/python/setuptools/py3/setuptools/_distutils/command/bdist_msi.py b/contrib/python/setuptools/py3/setuptools/_distutils/command/bdist_msi.py
deleted file mode 100644
index 0863a1883e7..00000000000
--- a/contrib/python/setuptools/py3/setuptools/_distutils/command/bdist_msi.py
+++ /dev/null
@@ -1,749 +0,0 @@
-# Copyright (C) 2005, 2006 Martin von Löwis
-# Licensed to PSF under a Contributor Agreement.
-# The bdist_wininst command proper
-# based on bdist_wininst
-"""
-Implements the bdist_msi command.
-"""
-
-import os
-import sys
-import warnings
-from distutils.core import Command
-from distutils.dir_util import remove_tree
-from distutils.sysconfig import get_python_version
-from distutils.version import StrictVersion
-from distutils.errors import DistutilsOptionError
-from distutils.util import get_platform
-from distutils import log
-import msilib
-from msilib import schema, sequence, text
-from msilib import Directory, Feature, Dialog, add_data
-
-class PyDialog(Dialog):
- """Dialog class with a fixed layout: controls at the top, then a ruler,
- then a list of buttons: back, next, cancel. Optionally a bitmap at the
- left."""
- def __init__(self, *args, **kw):
- """Dialog(database, name, x, y, w, h, attributes, title, first,
- default, cancel, bitmap=true)"""
- Dialog.__init__(self, *args)
- ruler = self.h - 36
- bmwidth = 152*ruler/328
- #if kw.get("bitmap", True):
- # self.bitmap("Bitmap", 0, 0, bmwidth, ruler, "PythonWin")
- self.line("BottomLine", 0, ruler, self.w, 0)
-
- def title(self, title):
- "Set the title text of the dialog at the top."
- # name, x, y, w, h, flags=Visible|Enabled|Transparent|NoPrefix,
- # text, in VerdanaBold10
- self.text("Title", 15, 10, 320, 60, 0x30003,
- r"{\VerdanaBold10}%s" % title)
-
- def back(self, title, next, name = "Back", active = 1):
- """Add a back button with a given title, the tab-next button,
- its name in the Control table, possibly initially disabled.
-
- Return the button, so that events can be associated"""
- if active:
- flags = 3 # Visible|Enabled
- else:
- flags = 1 # Visible
- return self.pushbutton(name, 180, self.h-27 , 56, 17, flags, title, next)
-
- def cancel(self, title, next, name = "Cancel", active = 1):
- """Add a cancel button with a given title, the tab-next button,
- its name in the Control table, possibly initially disabled.
-
- Return the button, so that events can be associated"""
- if active:
- flags = 3 # Visible|Enabled
- else:
- flags = 1 # Visible
- return self.pushbutton(name, 304, self.h-27, 56, 17, flags, title, next)
-
- def next(self, title, next, name = "Next", active = 1):
- """Add a Next button with a given title, the tab-next button,
- its name in the Control table, possibly initially disabled.
-
- Return the button, so that events can be associated"""
- if active:
- flags = 3 # Visible|Enabled
- else:
- flags = 1 # Visible
- return self.pushbutton(name, 236, self.h-27, 56, 17, flags, title, next)
-
- def xbutton(self, name, title, next, xpos):
- """Add a button with a given title, the tab-next button,
- its name in the Control table, giving its x position; the
- y-position is aligned with the other buttons.
-
- Return the button, so that events can be associated"""
- return self.pushbutton(name, int(self.w*xpos - 28), self.h-27, 56, 17, 3, title, next)
-
-class bdist_msi(Command):
-
- description = "create a Microsoft Installer (.msi) binary distribution"
-
- user_options = [('bdist-dir=', None,
- "temporary directory for creating the distribution"),
- ('plat-name=', 'p',
- "platform name to embed in generated filenames "
- "(default: %s)" % get_platform()),
- ('keep-temp', 'k',
- "keep the pseudo-installation tree around after " +
- "creating the distribution archive"),
- ('target-version=', None,
- "require a specific python version" +
- " on the target system"),
- ('no-target-compile', 'c',
- "do not compile .py to .pyc on the target system"),
- ('no-target-optimize', 'o',
- "do not compile .py to .pyo (optimized) "
- "on the target system"),
- ('dist-dir=', 'd',
- "directory to put final built distributions in"),
- ('skip-build', None,
- "skip rebuilding everything (for testing/debugging)"),
- ('install-script=', None,
- "basename of installation script to be run after "
- "installation or before deinstallation"),
- ('pre-install-script=', None,
- "Fully qualified filename of a script to be run before "
- "any files are installed. This script need not be in the "
- "distribution"),
- ]
-
- boolean_options = ['keep-temp', 'no-target-compile', 'no-target-optimize',
- 'skip-build']
-
- all_versions = ['2.0', '2.1', '2.2', '2.3', '2.4',
- '2.5', '2.6', '2.7', '2.8', '2.9',
- '3.0', '3.1', '3.2', '3.3', '3.4',
- '3.5', '3.6', '3.7', '3.8', '3.9']
- other_version = 'X'
-
- def __init__(self, *args, **kw):
- super().__init__(*args, **kw)
- warnings.warn("bdist_msi command is deprecated since Python 3.9, "
- "use bdist_wheel (wheel packages) instead",
- DeprecationWarning, 2)
-
- def initialize_options(self):
- self.bdist_dir = None
- self.plat_name = None
- self.keep_temp = 0
- self.no_target_compile = 0
- self.no_target_optimize = 0
- self.target_version = None
- self.dist_dir = None
- self.skip_build = None
- self.install_script = None
- self.pre_install_script = None
- self.versions = None
-
- def finalize_options(self):
- self.set_undefined_options('bdist', ('skip_build', 'skip_build'))
-
- if self.bdist_dir is None:
- bdist_base = self.get_finalized_command('bdist').bdist_base
- self.bdist_dir = os.path.join(bdist_base, 'msi')
-
- short_version = get_python_version()
- if (not self.target_version) and self.distribution.has_ext_modules():
- self.target_version = short_version
-
- if self.target_version:
- self.versions = [self.target_version]
- if not self.skip_build and self.distribution.has_ext_modules()\
- and self.target_version != short_version:
- raise DistutilsOptionError(
- "target version can only be %s, or the '--skip-build'"
- " option must be specified" % (short_version,))
- else:
- self.versions = list(self.all_versions)
-
- self.set_undefined_options('bdist',
- ('dist_dir', 'dist_dir'),
- ('plat_name', 'plat_name'),
- )
-
- if self.pre_install_script:
- raise DistutilsOptionError(
- "the pre-install-script feature is not yet implemented")
-
- if self.install_script:
- for script in self.distribution.scripts:
- if self.install_script == os.path.basename(script):
- break
- else:
- raise DistutilsOptionError(
- "install_script '%s' not found in scripts"
- % self.install_script)
- self.install_script_key = None
-
- def run(self):
- if not self.skip_build:
- self.run_command('build')
-
- install = self.reinitialize_command('install', reinit_subcommands=1)
- install.prefix = self.bdist_dir
- install.skip_build = self.skip_build
- install.warn_dir = 0
-
- install_lib = self.reinitialize_command('install_lib')
- # we do not want to include pyc or pyo files
- install_lib.compile = 0
- install_lib.optimize = 0
-
- if self.distribution.has_ext_modules():
- # If we are building an installer for a Python version other
- # than the one we are currently running, then we need to ensure
- # our build_lib reflects the other Python version rather than ours.
- # Note that for target_version!=sys.version, we must have skipped the
- # build step, so there is no issue with enforcing the build of this
- # version.
- target_version = self.target_version
- if not target_version:
- assert self.skip_build, "Should have already checked this"
- target_version = '%d.%d' % sys.version_info[:2]
- plat_specifier = ".%s-%s" % (self.plat_name, target_version)
- build = self.get_finalized_command('build')
- build.build_lib = os.path.join(build.build_base,
- 'lib' + plat_specifier)
-
- log.info("installing to %s", self.bdist_dir)
- install.ensure_finalized()
-
- # avoid warning of 'install_lib' about installing
- # into a directory not in sys.path
- sys.path.insert(0, os.path.join(self.bdist_dir, 'PURELIB'))
-
- install.run()
-
- del sys.path[0]
-
- self.mkpath(self.dist_dir)
- fullname = self.distribution.get_fullname()
- installer_name = self.get_installer_filename(fullname)
- installer_name = os.path.abspath(installer_name)
- if os.path.exists(installer_name): os.unlink(installer_name)
-
- metadata = self.distribution.metadata
- author = metadata.author
- if not author:
- author = metadata.maintainer
- if not author:
- author = "UNKNOWN"
- version = metadata.get_version()
- # ProductVersion must be strictly numeric
- # XXX need to deal with prerelease versions
- sversion = "%d.%d.%d" % StrictVersion(version).version
- # Prefix ProductName with Python x.y, so that
- # it sorts together with the other Python packages
- # in Add-Remove-Programs (APR)
- fullname = self.distribution.get_fullname()
- if self.target_version:
- product_name = "Python %s %s" % (self.target_version, fullname)
- else:
- product_name = "Python %s" % (fullname)
- self.db = msilib.init_database(installer_name, schema,
- product_name, msilib.gen_uuid(),
- sversion, author)
- msilib.add_tables(self.db, sequence)
- props = [('DistVersion', version)]
- email = metadata.author_email or metadata.maintainer_email
- if email:
- props.append(("ARPCONTACT", email))
- if metadata.url:
- props.append(("ARPURLINFOABOUT", metadata.url))
- if props:
- add_data(self.db, 'Property', props)
-
- self.add_find_python()
- self.add_files()
- self.add_scripts()
- self.add_ui()
- self.db.Commit()
-
- if hasattr(self.distribution, 'dist_files'):
- tup = 'bdist_msi', self.target_version or 'any', fullname
- self.distribution.dist_files.append(tup)
-
- if not self.keep_temp:
- remove_tree(self.bdist_dir, dry_run=self.dry_run)
-
- def add_files(self):
- db = self.db
- cab = msilib.CAB("distfiles")
- rootdir = os.path.abspath(self.bdist_dir)
-
- root = Directory(db, cab, None, rootdir, "TARGETDIR", "SourceDir")
- f = Feature(db, "Python", "Python", "Everything",
- 0, 1, directory="TARGETDIR")
-
- items = [(f, root, '')]
- for version in self.versions + [self.other_version]:
- target = "TARGETDIR" + version
- name = default = "Python" + version
- desc = "Everything"
- if version is self.other_version:
- title = "Python from another location"
- level = 2
- else:
- title = "Python %s from registry" % version
- level = 1
- f = Feature(db, name, title, desc, 1, level, directory=target)
- dir = Directory(db, cab, root, rootdir, target, default)
- items.append((f, dir, version))
- db.Commit()
-
- seen = {}
- for feature, dir, version in items:
- todo = [dir]
- while todo:
- dir = todo.pop()
- for file in os.listdir(dir.absolute):
- afile = os.path.join(dir.absolute, file)
- if os.path.isdir(afile):
- short = "%s|%s" % (dir.make_short(file), file)
- default = file + version
- newdir = Directory(db, cab, dir, file, default, short)
- todo.append(newdir)
- else:
- if not dir.component:
- dir.start_component(dir.logical, feature, 0)
- if afile not in seen:
- key = seen[afile] = dir.add_file(file)
- if file==self.install_script:
- if self.install_script_key:
- raise DistutilsOptionError(
- "Multiple files with name %s" % file)
- self.install_script_key = '[#%s]' % key
- else:
- key = seen[afile]
- add_data(self.db, "DuplicateFile",
- [(key + version, dir.component, key, None, dir.logical)])
- db.Commit()
- cab.commit(db)
-
- def add_find_python(self):
- """Adds code to the installer to compute the location of Python.
-
- Properties PYTHON.MACHINE.X.Y and PYTHON.USER.X.Y will be set from the
- registry for each version of Python.
-
- Properties TARGETDIRX.Y will be set from PYTHON.USER.X.Y if defined,
- else from PYTHON.MACHINE.X.Y.
-
- Properties PYTHONX.Y will be set to TARGETDIRX.Y\\python.exe"""
-
- start = 402
- for ver in self.versions:
- install_path = r"SOFTWARE\Python\PythonCore\%s\InstallPath" % ver
- machine_reg = "python.machine." + ver
- user_reg = "python.user." + ver
- machine_prop = "PYTHON.MACHINE." + ver
- user_prop = "PYTHON.USER." + ver
- machine_action = "PythonFromMachine" + ver
- user_action = "PythonFromUser" + ver
- exe_action = "PythonExe" + ver
- target_dir_prop = "TARGETDIR" + ver
- exe_prop = "PYTHON" + ver
- if msilib.Win64:
- # type: msidbLocatorTypeRawValue + msidbLocatorType64bit
- Type = 2+16
- else:
- Type = 2
- add_data(self.db, "RegLocator",
- [(machine_reg, 2, install_path, None, Type),
- (user_reg, 1, install_path, None, Type)])
- add_data(self.db, "AppSearch",
- [(machine_prop, machine_reg),
- (user_prop, user_reg)])
- add_data(self.db, "CustomAction",
- [(machine_action, 51+256, target_dir_prop, "[" + machine_prop + "]"),
- (user_action, 51+256, target_dir_prop, "[" + user_prop + "]"),
- (exe_action, 51+256, exe_prop, "[" + target_dir_prop + "]\\python.exe"),
- ])
- add_data(self.db, "InstallExecuteSequence",
- [(machine_action, machine_prop, start),
- (user_action, user_prop, start + 1),
- (exe_action, None, start + 2),
- ])
- add_data(self.db, "InstallUISequence",
- [(machine_action, machine_prop, start),
- (user_action, user_prop, start + 1),
- (exe_action, None, start + 2),
- ])
- add_data(self.db, "Condition",
- [("Python" + ver, 0, "NOT TARGETDIR" + ver)])
- start += 4
- assert start < 500
-
- def add_scripts(self):
- if self.install_script:
- start = 6800
- for ver in self.versions + [self.other_version]:
- install_action = "install_script." + ver
- exe_prop = "PYTHON" + ver
- add_data(self.db, "CustomAction",
- [(install_action, 50, exe_prop, self.install_script_key)])
- add_data(self.db, "InstallExecuteSequence",
- [(install_action, "&Python%s=3" % ver, start)])
- start += 1
- # XXX pre-install scripts are currently refused in finalize_options()
- # but if this feature is completed, it will also need to add
- # entries for each version as the above code does
- if self.pre_install_script:
- scriptfn = os.path.join(self.bdist_dir, "preinstall.bat")
- with open(scriptfn, "w") as f:
- # The batch file will be executed with [PYTHON], so that %1
- # is the path to the Python interpreter; %0 will be the path
- # of the batch file.
- # rem ="""
- # %1 %0
- # exit
- # """
- # <actual script>
- f.write('rem ="""\n%1 %0\nexit\n"""\n')
- with open(self.pre_install_script) as fin:
- f.write(fin.read())
- add_data(self.db, "Binary",
- [("PreInstall", msilib.Binary(scriptfn))
- ])
- add_data(self.db, "CustomAction",
- [("PreInstall", 2, "PreInstall", None)
- ])
- add_data(self.db, "InstallExecuteSequence",
- [("PreInstall", "NOT Installed", 450)])
-
-
- def add_ui(self):
- db = self.db
- x = y = 50
- w = 370
- h = 300
- title = "[ProductName] Setup"
-
- # see "Dialog Style Bits"
- modal = 3 # visible | modal
- modeless = 1 # visible
- track_disk_space = 32
-
- # UI customization properties
- add_data(db, "Property",
- # See "DefaultUIFont Property"
- [("DefaultUIFont", "DlgFont8"),
- # See "ErrorDialog Style Bit"
- ("ErrorDialog", "ErrorDlg"),
- ("Progress1", "Install"), # modified in maintenance type dlg
- ("Progress2", "installs"),
- ("MaintenanceForm_Action", "Repair"),
- # possible values: ALL, JUSTME
- ("WhichUsers", "ALL")
- ])
-
- # Fonts, see "TextStyle Table"
- add_data(db, "TextStyle",
- [("DlgFont8", "Tahoma", 9, None, 0),
- ("DlgFontBold8", "Tahoma", 8, None, 1), #bold
- ("VerdanaBold10", "Verdana", 10, None, 1),
- ("VerdanaRed9", "Verdana", 9, 255, 0),
- ])
-
- # UI Sequences, see "InstallUISequence Table", "Using a Sequence Table"
- # Numbers indicate sequence; see sequence.py for how these action integrate
- add_data(db, "InstallUISequence",
- [("PrepareDlg", "Not Privileged or Windows9x or Installed", 140),
- ("WhichUsersDlg", "Privileged and not Windows9x and not Installed", 141),
- # In the user interface, assume all-users installation if privileged.
- ("SelectFeaturesDlg", "Not Installed", 1230),
- # XXX no support for resume installations yet
- #("ResumeDlg", "Installed AND (RESUME OR Preselected)", 1240),
- ("MaintenanceTypeDlg", "Installed AND NOT RESUME AND NOT Preselected", 1250),
- ("ProgressDlg", None, 1280)])
-
- add_data(db, 'ActionText', text.ActionText)
- add_data(db, 'UIText', text.UIText)
- #####################################################################
- # Standard dialogs: FatalError, UserExit, ExitDialog
- fatal=PyDialog(db, "FatalError", x, y, w, h, modal, title,
- "Finish", "Finish", "Finish")
- fatal.title("[ProductName] Installer ended prematurely")
- fatal.back("< Back", "Finish", active = 0)
- fatal.cancel("Cancel", "Back", active = 0)
- fatal.text("Description1", 15, 70, 320, 80, 0x30003,
- "[ProductName] setup ended prematurely because of an error. Your system has not been modified. To install this program at a later time, please run the installation again.")
- fatal.text("Description2", 15, 155, 320, 20, 0x30003,
- "Click the Finish button to exit the Installer.")
- c=fatal.next("Finish", "Cancel", name="Finish")
- c.event("EndDialog", "Exit")
-
- user_exit=PyDialog(db, "UserExit", x, y, w, h, modal, title,
- "Finish", "Finish", "Finish")
- user_exit.title("[ProductName] Installer was interrupted")
- user_exit.back("< Back", "Finish", active = 0)
- user_exit.cancel("Cancel", "Back", active = 0)
- user_exit.text("Description1", 15, 70, 320, 80, 0x30003,
- "[ProductName] setup was interrupted. Your system has not been modified. "
- "To install this program at a later time, please run the installation again.")
- user_exit.text("Description2", 15, 155, 320, 20, 0x30003,
- "Click the Finish button to exit the Installer.")
- c = user_exit.next("Finish", "Cancel", name="Finish")
- c.event("EndDialog", "Exit")
-
- exit_dialog = PyDialog(db, "ExitDialog", x, y, w, h, modal, title,
- "Finish", "Finish", "Finish")
- exit_dialog.title("Completing the [ProductName] Installer")
- exit_dialog.back("< Back", "Finish", active = 0)
- exit_dialog.cancel("Cancel", "Back", active = 0)
- exit_dialog.text("Description", 15, 235, 320, 20, 0x30003,
- "Click the Finish button to exit the Installer.")
- c = exit_dialog.next("Finish", "Cancel", name="Finish")
- c.event("EndDialog", "Return")
-
- #####################################################################
- # Required dialog: FilesInUse, ErrorDlg
- inuse = PyDialog(db, "FilesInUse",
- x, y, w, h,
- 19, # KeepModeless|Modal|Visible
- title,
- "Retry", "Retry", "Retry", bitmap=False)
- inuse.text("Title", 15, 6, 200, 15, 0x30003,
- r"{\DlgFontBold8}Files in Use")
- inuse.text("Description", 20, 23, 280, 20, 0x30003,
- "Some files that need to be updated are currently in use.")
- inuse.text("Text", 20, 55, 330, 50, 3,
- "The following applications are using files that need to be updated by this setup. Close these applications and then click Retry to continue the installation or Cancel to exit it.")
- inuse.control("List", "ListBox", 20, 107, 330, 130, 7, "FileInUseProcess",
- None, None, None)
- c=inuse.back("Exit", "Ignore", name="Exit")
- c.event("EndDialog", "Exit")
- c=inuse.next("Ignore", "Retry", name="Ignore")
- c.event("EndDialog", "Ignore")
- c=inuse.cancel("Retry", "Exit", name="Retry")
- c.event("EndDialog","Retry")
-
- # See "Error Dialog". See "ICE20" for the required names of the controls.
- error = Dialog(db, "ErrorDlg",
- 50, 10, 330, 101,
- 65543, # Error|Minimize|Modal|Visible
- title,
- "ErrorText", None, None)
- error.text("ErrorText", 50,9,280,48,3, "")
- #error.control("ErrorIcon", "Icon", 15, 9, 24, 24, 5242881, None, "py.ico", None, None)
- error.pushbutton("N",120,72,81,21,3,"No",None).event("EndDialog","ErrorNo")
- error.pushbutton("Y",240,72,81,21,3,"Yes",None).event("EndDialog","ErrorYes")
- error.pushbutton("A",0,72,81,21,3,"Abort",None).event("EndDialog","ErrorAbort")
- error.pushbutton("C",42,72,81,21,3,"Cancel",None).event("EndDialog","ErrorCancel")
- error.pushbutton("I",81,72,81,21,3,"Ignore",None).event("EndDialog","ErrorIgnore")
- error.pushbutton("O",159,72,81,21,3,"Ok",None).event("EndDialog","ErrorOk")
- error.pushbutton("R",198,72,81,21,3,"Retry",None).event("EndDialog","ErrorRetry")
-
- #####################################################################
- # Global "Query Cancel" dialog
- cancel = Dialog(db, "CancelDlg", 50, 10, 260, 85, 3, title,
- "No", "No", "No")
- cancel.text("Text", 48, 15, 194, 30, 3,
- "Are you sure you want to cancel [ProductName] installation?")
- #cancel.control("Icon", "Icon", 15, 15, 24, 24, 5242881, None,
- # "py.ico", None, None)
- c=cancel.pushbutton("Yes", 72, 57, 56, 17, 3, "Yes", "No")
- c.event("EndDialog", "Exit")
-
- c=cancel.pushbutton("No", 132, 57, 56, 17, 3, "No", "Yes")
- c.event("EndDialog", "Return")
-
- #####################################################################
- # Global "Wait for costing" dialog
- costing = Dialog(db, "WaitForCostingDlg", 50, 10, 260, 85, modal, title,
- "Return", "Return", "Return")
- costing.text("Text", 48, 15, 194, 30, 3,
- "Please wait while the installer finishes determining your disk space requirements.")
- c = costing.pushbutton("Return", 102, 57, 56, 17, 3, "Return", None)
- c.event("EndDialog", "Exit")
-
- #####################################################################
- # Preparation dialog: no user input except cancellation
- prep = PyDialog(db, "PrepareDlg", x, y, w, h, modeless, title,
- "Cancel", "Cancel", "Cancel")
- prep.text("Description", 15, 70, 320, 40, 0x30003,
- "Please wait while the Installer prepares to guide you through the installation.")
- prep.title("Welcome to the [ProductName] Installer")
- c=prep.text("ActionText", 15, 110, 320, 20, 0x30003, "Pondering...")
- c.mapping("ActionText", "Text")
- c=prep.text("ActionData", 15, 135, 320, 30, 0x30003, None)
- c.mapping("ActionData", "Text")
- prep.back("Back", None, active=0)
- prep.next("Next", None, active=0)
- c=prep.cancel("Cancel", None)
- c.event("SpawnDialog", "CancelDlg")
-
- #####################################################################
- # Feature (Python directory) selection
- seldlg = PyDialog(db, "SelectFeaturesDlg", x, y, w, h, modal, title,
- "Next", "Next", "Cancel")
- seldlg.title("Select Python Installations")
-
- seldlg.text("Hint", 15, 30, 300, 20, 3,
- "Select the Python locations where %s should be installed."
- % self.distribution.get_fullname())
-
- seldlg.back("< Back", None, active=0)
- c = seldlg.next("Next >", "Cancel")
- order = 1
- c.event("[TARGETDIR]", "[SourceDir]", ordering=order)
- for version in self.versions + [self.other_version]:
- order += 1
- c.event("[TARGETDIR]", "[TARGETDIR%s]" % version,
- "FEATURE_SELECTED AND &Python%s=3" % version,
- ordering=order)
- c.event("SpawnWaitDialog", "WaitForCostingDlg", ordering=order + 1)
- c.event("EndDialog", "Return", ordering=order + 2)
- c = seldlg.cancel("Cancel", "Features")
- c.event("SpawnDialog", "CancelDlg")
-
- c = seldlg.control("Features", "SelectionTree", 15, 60, 300, 120, 3,
- "FEATURE", None, "PathEdit", None)
- c.event("[FEATURE_SELECTED]", "1")
- ver = self.other_version
- install_other_cond = "FEATURE_SELECTED AND &Python%s=3" % ver
- dont_install_other_cond = "FEATURE_SELECTED AND &Python%s<>3" % ver
-
- c = seldlg.text("Other", 15, 200, 300, 15, 3,
- "Provide an alternate Python location")
- c.condition("Enable", install_other_cond)
- c.condition("Show", install_other_cond)
- c.condition("Disable", dont_install_other_cond)
- c.condition("Hide", dont_install_other_cond)
-
- c = seldlg.control("PathEdit", "PathEdit", 15, 215, 300, 16, 1,
- "TARGETDIR" + ver, None, "Next", None)
- c.condition("Enable", install_other_cond)
- c.condition("Show", install_other_cond)
- c.condition("Disable", dont_install_other_cond)
- c.condition("Hide", dont_install_other_cond)
-
- #####################################################################
- # Disk cost
- cost = PyDialog(db, "DiskCostDlg", x, y, w, h, modal, title,
- "OK", "OK", "OK", bitmap=False)
- cost.text("Title", 15, 6, 200, 15, 0x30003,
- r"{\DlgFontBold8}Disk Space Requirements")
- cost.text("Description", 20, 20, 280, 20, 0x30003,
- "The disk space required for the installation of the selected features.")
- cost.text("Text", 20, 53, 330, 60, 3,
- "The highlighted volumes (if any) do not have enough disk space "
- "available for the currently selected features. You can either "
- "remove some files from the highlighted volumes, or choose to "
- "install less features onto local drive(s), or select different "
- "destination drive(s).")
- cost.control("VolumeList", "VolumeCostList", 20, 100, 330, 150, 393223,
- None, "{120}{70}{70}{70}{70}", None, None)
- cost.xbutton("OK", "Ok", None, 0.5).event("EndDialog", "Return")
-
- #####################################################################
- # WhichUsers Dialog. Only available on NT, and for privileged users.
- # This must be run before FindRelatedProducts, because that will
- # take into account whether the previous installation was per-user
- # or per-machine. We currently don't support going back to this
- # dialog after "Next" was selected; to support this, we would need to
- # find how to reset the ALLUSERS property, and how to re-run
- # FindRelatedProducts.
- # On Windows9x, the ALLUSERS property is ignored on the command line
- # and in the Property table, but installer fails according to the documentation
- # if a dialog attempts to set ALLUSERS.
- whichusers = PyDialog(db, "WhichUsersDlg", x, y, w, h, modal, title,
- "AdminInstall", "Next", "Cancel")
- whichusers.title("Select whether to install [ProductName] for all users of this computer.")
- # A radio group with two options: allusers, justme
- g = whichusers.radiogroup("AdminInstall", 15, 60, 260, 50, 3,
- "WhichUsers", "", "Next")
- g.add("ALL", 0, 5, 150, 20, "Install for all users")
- g.add("JUSTME", 0, 25, 150, 20, "Install just for me")
-
- whichusers.back("Back", None, active=0)
-
- c = whichusers.next("Next >", "Cancel")
- c.event("[ALLUSERS]", "1", 'WhichUsers="ALL"', 1)
- c.event("EndDialog", "Return", ordering = 2)
-
- c = whichusers.cancel("Cancel", "AdminInstall")
- c.event("SpawnDialog", "CancelDlg")
-
- #####################################################################
- # Installation Progress dialog (modeless)
- progress = PyDialog(db, "ProgressDlg", x, y, w, h, modeless, title,
- "Cancel", "Cancel", "Cancel", bitmap=False)
- progress.text("Title", 20, 15, 200, 15, 0x30003,
- r"{\DlgFontBold8}[Progress1] [ProductName]")
- progress.text("Text", 35, 65, 300, 30, 3,
- "Please wait while the Installer [Progress2] [ProductName]. "
- "This may take several minutes.")
- progress.text("StatusLabel", 35, 100, 35, 20, 3, "Status:")
-
- c=progress.text("ActionText", 70, 100, w-70, 20, 3, "Pondering...")
- c.mapping("ActionText", "Text")
-
- #c=progress.text("ActionData", 35, 140, 300, 20, 3, None)
- #c.mapping("ActionData", "Text")
-
- c=progress.control("ProgressBar", "ProgressBar", 35, 120, 300, 10, 65537,
- None, "Progress done", None, None)
- c.mapping("SetProgress", "Progress")
-
- progress.back("< Back", "Next", active=False)
- progress.next("Next >", "Cancel", active=False)
- progress.cancel("Cancel", "Back").event("SpawnDialog", "CancelDlg")
-
- ###################################################################
- # Maintenance type: repair/uninstall
- maint = PyDialog(db, "MaintenanceTypeDlg", x, y, w, h, modal, title,
- "Next", "Next", "Cancel")
- maint.title("Welcome to the [ProductName] Setup Wizard")
- maint.text("BodyText", 15, 63, 330, 42, 3,
- "Select whether you want to repair or remove [ProductName].")
- g=maint.radiogroup("RepairRadioGroup", 15, 108, 330, 60, 3,
- "MaintenanceForm_Action", "", "Next")
- #g.add("Change", 0, 0, 200, 17, "&Change [ProductName]")
- g.add("Repair", 0, 18, 200, 17, "&Repair [ProductName]")
- g.add("Remove", 0, 36, 200, 17, "Re&move [ProductName]")
-
- maint.back("< Back", None, active=False)
- c=maint.next("Finish", "Cancel")
- # Change installation: Change progress dialog to "Change", then ask
- # for feature selection
- #c.event("[Progress1]", "Change", 'MaintenanceForm_Action="Change"', 1)
- #c.event("[Progress2]", "changes", 'MaintenanceForm_Action="Change"', 2)
-
- # Reinstall: Change progress dialog to "Repair", then invoke reinstall
- # Also set list of reinstalled features to "ALL"
- c.event("[REINSTALL]", "ALL", 'MaintenanceForm_Action="Repair"', 5)
- c.event("[Progress1]", "Repairing", 'MaintenanceForm_Action="Repair"', 6)
- c.event("[Progress2]", "repairs", 'MaintenanceForm_Action="Repair"', 7)
- c.event("Reinstall", "ALL", 'MaintenanceForm_Action="Repair"', 8)
-
- # Uninstall: Change progress to "Remove", then invoke uninstall
- # Also set list of removed features to "ALL"
- c.event("[REMOVE]", "ALL", 'MaintenanceForm_Action="Remove"', 11)
- c.event("[Progress1]", "Removing", 'MaintenanceForm_Action="Remove"', 12)
- c.event("[Progress2]", "removes", 'MaintenanceForm_Action="Remove"', 13)
- c.event("Remove", "ALL", 'MaintenanceForm_Action="Remove"', 14)
-
- # Close dialog when maintenance action scheduled
- c.event("EndDialog", "Return", 'MaintenanceForm_Action<>"Change"', 20)
- #c.event("NewDialog", "SelectFeaturesDlg", 'MaintenanceForm_Action="Change"', 21)
-
- maint.cancel("Cancel", "RepairRadioGroup").event("SpawnDialog", "CancelDlg")
-
- def get_installer_filename(self, fullname):
- # Factored out to allow overriding in subclasses
- if self.target_version:
- base_name = "%s.%s-py%s.msi" % (fullname, self.plat_name,
- self.target_version)
- else:
- base_name = "%s.%s.msi" % (fullname, self.plat_name)
- installer_name = os.path.join(self.dist_dir, base_name)
- return installer_name
diff --git a/contrib/python/setuptools/py3/setuptools/_distutils/command/bdist_rpm.py b/contrib/python/setuptools/py3/setuptools/_distutils/command/bdist_rpm.py
deleted file mode 100644
index 550cbfa1e28..00000000000
--- a/contrib/python/setuptools/py3/setuptools/_distutils/command/bdist_rpm.py
+++ /dev/null
@@ -1,579 +0,0 @@
-"""distutils.command.bdist_rpm
-
-Implements the Distutils 'bdist_rpm' command (create RPM source and binary
-distributions)."""
-
-import subprocess, sys, os
-from distutils.core import Command
-from distutils.debug import DEBUG
-from distutils.file_util import write_file
-from distutils.errors import *
-from distutils.sysconfig import get_python_version
-from distutils import log
-
-class bdist_rpm(Command):
-
- description = "create an RPM distribution"
-
- user_options = [
- ('bdist-base=', None,
- "base directory for creating built distributions"),
- ('rpm-base=', None,
- "base directory for creating RPMs (defaults to \"rpm\" under "
- "--bdist-base; must be specified for RPM 2)"),
- ('dist-dir=', 'd',
- "directory to put final RPM files in "
- "(and .spec files if --spec-only)"),
- ('python=', None,
- "path to Python interpreter to hard-code in the .spec file "
- "(default: \"python\")"),
- ('fix-python', None,
- "hard-code the exact path to the current Python interpreter in "
- "the .spec file"),
- ('spec-only', None,
- "only regenerate spec file"),
- ('source-only', None,
- "only generate source RPM"),
- ('binary-only', None,
- "only generate binary RPM"),
- ('use-bzip2', None,
- "use bzip2 instead of gzip to create source distribution"),
-
- # More meta-data: too RPM-specific to put in the setup script,
- # but needs to go in the .spec file -- so we make these options
- # to "bdist_rpm". The idea is that packagers would put this
- # info in setup.cfg, although they are of course free to
- # supply it on the command line.
- ('distribution-name=', None,
- "name of the (Linux) distribution to which this "
- "RPM applies (*not* the name of the module distribution!)"),
- ('group=', None,
- "package classification [default: \"Development/Libraries\"]"),
- ('release=', None,
- "RPM release number"),
- ('serial=', None,
- "RPM serial number"),
- ('vendor=', None,
- "RPM \"vendor\" (eg. \"Joe Blow <[email protected]>\") "
- "[default: maintainer or author from setup script]"),
- ('packager=', None,
- "RPM packager (eg. \"Jane Doe <[email protected]>\") "
- "[default: vendor]"),
- ('doc-files=', None,
- "list of documentation files (space or comma-separated)"),
- ('changelog=', None,
- "RPM changelog"),
- ('icon=', None,
- "name of icon file"),
- ('provides=', None,
- "capabilities provided by this package"),
- ('requires=', None,
- "capabilities required by this package"),
- ('conflicts=', None,
- "capabilities which conflict with this package"),
- ('build-requires=', None,
- "capabilities required to build this package"),
- ('obsoletes=', None,
- "capabilities made obsolete by this package"),
- ('no-autoreq', None,
- "do not automatically calculate dependencies"),
-
- # Actions to take when building RPM
- ('keep-temp', 'k',
- "don't clean up RPM build directory"),
- ('no-keep-temp', None,
- "clean up RPM build directory [default]"),
- ('use-rpm-opt-flags', None,
- "compile with RPM_OPT_FLAGS when building from source RPM"),
- ('no-rpm-opt-flags', None,
- "do not pass any RPM CFLAGS to compiler"),
- ('rpm3-mode', None,
- "RPM 3 compatibility mode (default)"),
- ('rpm2-mode', None,
- "RPM 2 compatibility mode"),
-
- # Add the hooks necessary for specifying custom scripts
- ('prep-script=', None,
- "Specify a script for the PREP phase of RPM building"),
- ('build-script=', None,
- "Specify a script for the BUILD phase of RPM building"),
-
- ('pre-install=', None,
- "Specify a script for the pre-INSTALL phase of RPM building"),
- ('install-script=', None,
- "Specify a script for the INSTALL phase of RPM building"),
- ('post-install=', None,
- "Specify a script for the post-INSTALL phase of RPM building"),
-
- ('pre-uninstall=', None,
- "Specify a script for the pre-UNINSTALL phase of RPM building"),
- ('post-uninstall=', None,
- "Specify a script for the post-UNINSTALL phase of RPM building"),
-
- ('clean-script=', None,
- "Specify a script for the CLEAN phase of RPM building"),
-
- ('verify-script=', None,
- "Specify a script for the VERIFY phase of the RPM build"),
-
- # Allow a packager to explicitly force an architecture
- ('force-arch=', None,
- "Force an architecture onto the RPM build process"),
-
- ('quiet', 'q',
- "Run the INSTALL phase of RPM building in quiet mode"),
- ]
-
- boolean_options = ['keep-temp', 'use-rpm-opt-flags', 'rpm3-mode',
- 'no-autoreq', 'quiet']
-
- negative_opt = {'no-keep-temp': 'keep-temp',
- 'no-rpm-opt-flags': 'use-rpm-opt-flags',
- 'rpm2-mode': 'rpm3-mode'}
-
-
- def initialize_options(self):
- self.bdist_base = None
- self.rpm_base = None
- self.dist_dir = None
- self.python = None
- self.fix_python = None
- self.spec_only = None
- self.binary_only = None
- self.source_only = None
- self.use_bzip2 = None
-
- self.distribution_name = None
- self.group = None
- self.release = None
- self.serial = None
- self.vendor = None
- self.packager = None
- self.doc_files = None
- self.changelog = None
- self.icon = None
-
- self.prep_script = None
- self.build_script = None
- self.install_script = None
- self.clean_script = None
- self.verify_script = None
- self.pre_install = None
- self.post_install = None
- self.pre_uninstall = None
- self.post_uninstall = None
- self.prep = None
- self.provides = None
- self.requires = None
- self.conflicts = None
- self.build_requires = None
- self.obsoletes = None
-
- self.keep_temp = 0
- self.use_rpm_opt_flags = 1
- self.rpm3_mode = 1
- self.no_autoreq = 0
-
- self.force_arch = None
- self.quiet = 0
-
- def finalize_options(self):
- self.set_undefined_options('bdist', ('bdist_base', 'bdist_base'))
- if self.rpm_base is None:
- if not self.rpm3_mode:
- raise DistutilsOptionError(
- "you must specify --rpm-base in RPM 2 mode")
- self.rpm_base = os.path.join(self.bdist_base, "rpm")
-
- if self.python is None:
- if self.fix_python:
- self.python = sys.executable
- else:
- self.python = "python3"
- elif self.fix_python:
- raise DistutilsOptionError(
- "--python and --fix-python are mutually exclusive options")
-
- if os.name != 'posix':
- raise DistutilsPlatformError("don't know how to create RPM "
- "distributions on platform %s" % os.name)
- if self.binary_only and self.source_only:
- raise DistutilsOptionError(
- "cannot supply both '--source-only' and '--binary-only'")
-
- # don't pass CFLAGS to pure python distributions
- if not self.distribution.has_ext_modules():
- self.use_rpm_opt_flags = 0
-
- self.set_undefined_options('bdist', ('dist_dir', 'dist_dir'))
- self.finalize_package_data()
-
- def finalize_package_data(self):
- self.ensure_string('group', "Development/Libraries")
- self.ensure_string('vendor',
- "%s <%s>" % (self.distribution.get_contact(),
- self.distribution.get_contact_email()))
- self.ensure_string('packager')
- self.ensure_string_list('doc_files')
- if isinstance(self.doc_files, list):
- for readme in ('README', 'README.txt'):
- if os.path.exists(readme) and readme not in self.doc_files:
- self.doc_files.append(readme)
-
- self.ensure_string('release', "1")
- self.ensure_string('serial') # should it be an int?
-
- self.ensure_string('distribution_name')
-
- self.ensure_string('changelog')
- # Format changelog correctly
- self.changelog = self._format_changelog(self.changelog)
-
- self.ensure_filename('icon')
-
- self.ensure_filename('prep_script')
- self.ensure_filename('build_script')
- self.ensure_filename('install_script')
- self.ensure_filename('clean_script')
- self.ensure_filename('verify_script')
- self.ensure_filename('pre_install')
- self.ensure_filename('post_install')
- self.ensure_filename('pre_uninstall')
- self.ensure_filename('post_uninstall')
-
- # XXX don't forget we punted on summaries and descriptions -- they
- # should be handled here eventually!
-
- # Now *this* is some meta-data that belongs in the setup script...
- self.ensure_string_list('provides')
- self.ensure_string_list('requires')
- self.ensure_string_list('conflicts')
- self.ensure_string_list('build_requires')
- self.ensure_string_list('obsoletes')
-
- self.ensure_string('force_arch')
-
- def run(self):
- if DEBUG:
- print("before _get_package_data():")
- print("vendor =", self.vendor)
- print("packager =", self.packager)
- print("doc_files =", self.doc_files)
- print("changelog =", self.changelog)
-
- # make directories
- if self.spec_only:
- spec_dir = self.dist_dir
- self.mkpath(spec_dir)
- else:
- rpm_dir = {}
- for d in ('SOURCES', 'SPECS', 'BUILD', 'RPMS', 'SRPMS'):
- rpm_dir[d] = os.path.join(self.rpm_base, d)
- self.mkpath(rpm_dir[d])
- spec_dir = rpm_dir['SPECS']
-
- # Spec file goes into 'dist_dir' if '--spec-only specified',
- # build/rpm.<plat> otherwise.
- spec_path = os.path.join(spec_dir,
- "%s.spec" % self.distribution.get_name())
- self.execute(write_file,
- (spec_path,
- self._make_spec_file()),
- "writing '%s'" % spec_path)
-
- if self.spec_only: # stop if requested
- return
-
- # Make a source distribution and copy to SOURCES directory with
- # optional icon.
- saved_dist_files = self.distribution.dist_files[:]
- sdist = self.reinitialize_command('sdist')
- if self.use_bzip2:
- sdist.formats = ['bztar']
- else:
- sdist.formats = ['gztar']
- self.run_command('sdist')
- self.distribution.dist_files = saved_dist_files
-
- source = sdist.get_archive_files()[0]
- source_dir = rpm_dir['SOURCES']
- self.copy_file(source, source_dir)
-
- if self.icon:
- if os.path.exists(self.icon):
- self.copy_file(self.icon, source_dir)
- else:
- raise DistutilsFileError(
- "icon file '%s' does not exist" % self.icon)
-
- # build package
- log.info("building RPMs")
- rpm_cmd = ['rpmbuild']
-
- if self.source_only: # what kind of RPMs?
- rpm_cmd.append('-bs')
- elif self.binary_only:
- rpm_cmd.append('-bb')
- else:
- rpm_cmd.append('-ba')
- rpm_cmd.extend(['--define', '__python %s' % self.python])
- if self.rpm3_mode:
- rpm_cmd.extend(['--define',
- '_topdir %s' % os.path.abspath(self.rpm_base)])
- if not self.keep_temp:
- rpm_cmd.append('--clean')
-
- if self.quiet:
- rpm_cmd.append('--quiet')
-
- rpm_cmd.append(spec_path)
- # Determine the binary rpm names that should be built out of this spec
- # file
- # Note that some of these may not be really built (if the file
- # list is empty)
- nvr_string = "%{name}-%{version}-%{release}"
- src_rpm = nvr_string + ".src.rpm"
- non_src_rpm = "%{arch}/" + nvr_string + ".%{arch}.rpm"
- q_cmd = r"rpm -q --qf '%s %s\n' --specfile '%s'" % (
- src_rpm, non_src_rpm, spec_path)
-
- out = os.popen(q_cmd)
- try:
- binary_rpms = []
- source_rpm = None
- while True:
- line = out.readline()
- if not line:
- break
- l = line.strip().split()
- assert(len(l) == 2)
- binary_rpms.append(l[1])
- # The source rpm is named after the first entry in the spec file
- if source_rpm is None:
- source_rpm = l[0]
-
- status = out.close()
- if status:
- raise DistutilsExecError("Failed to execute: %s" % repr(q_cmd))
-
- finally:
- out.close()
-
- self.spawn(rpm_cmd)
-
- if not self.dry_run:
- if self.distribution.has_ext_modules():
- pyversion = get_python_version()
- else:
- pyversion = 'any'
-
- if not self.binary_only:
- srpm = os.path.join(rpm_dir['SRPMS'], source_rpm)
- assert(os.path.exists(srpm))
- self.move_file(srpm, self.dist_dir)
- filename = os.path.join(self.dist_dir, source_rpm)
- self.distribution.dist_files.append(
- ('bdist_rpm', pyversion, filename))
-
- if not self.source_only:
- for rpm in binary_rpms:
- rpm = os.path.join(rpm_dir['RPMS'], rpm)
- if os.path.exists(rpm):
- self.move_file(rpm, self.dist_dir)
- filename = os.path.join(self.dist_dir,
- os.path.basename(rpm))
- self.distribution.dist_files.append(
- ('bdist_rpm', pyversion, filename))
-
- def _dist_path(self, path):
- return os.path.join(self.dist_dir, os.path.basename(path))
-
- def _make_spec_file(self):
- """Generate the text of an RPM spec file and return it as a
- list of strings (one per line).
- """
- # definitions and headers
- spec_file = [
- '%define name ' + self.distribution.get_name(),
- '%define version ' + self.distribution.get_version().replace('-','_'),
- '%define unmangled_version ' + self.distribution.get_version(),
- '%define release ' + self.release.replace('-','_'),
- '',
- 'Summary: ' + self.distribution.get_description(),
- ]
-
- # Workaround for #14443 which affects some RPM based systems such as
- # RHEL6 (and probably derivatives)
- vendor_hook = subprocess.getoutput('rpm --eval %{__os_install_post}')
- # Generate a potential replacement value for __os_install_post (whilst
- # normalizing the whitespace to simplify the test for whether the
- # invocation of brp-python-bytecompile passes in __python):
- vendor_hook = '\n'.join([' %s \\' % line.strip()
- for line in vendor_hook.splitlines()])
- problem = "brp-python-bytecompile \\\n"
- fixed = "brp-python-bytecompile %{__python} \\\n"
- fixed_hook = vendor_hook.replace(problem, fixed)
- if fixed_hook != vendor_hook:
- spec_file.append('# Workaround for http://bugs.python.org/issue14443')
- spec_file.append('%define __os_install_post ' + fixed_hook + '\n')
-
- # put locale summaries into spec file
- # XXX not supported for now (hard to put a dictionary
- # in a config file -- arg!)
- #for locale in self.summaries.keys():
- # spec_file.append('Summary(%s): %s' % (locale,
- # self.summaries[locale]))
-
- spec_file.extend([
- 'Name: %{name}',
- 'Version: %{version}',
- 'Release: %{release}',])
-
- # XXX yuck! this filename is available from the "sdist" command,
- # but only after it has run: and we create the spec file before
- # running "sdist", in case of --spec-only.
- if self.use_bzip2:
- spec_file.append('Source0: %{name}-%{unmangled_version}.tar.bz2')
- else:
- spec_file.append('Source0: %{name}-%{unmangled_version}.tar.gz')
-
- spec_file.extend([
- 'License: ' + self.distribution.get_license(),
- 'Group: ' + self.group,
- 'BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-buildroot',
- 'Prefix: %{_prefix}', ])
-
- if not self.force_arch:
- # noarch if no extension modules
- if not self.distribution.has_ext_modules():
- spec_file.append('BuildArch: noarch')
- else:
- spec_file.append( 'BuildArch: %s' % self.force_arch )
-
- for field in ('Vendor',
- 'Packager',
- 'Provides',
- 'Requires',
- 'Conflicts',
- 'Obsoletes',
- ):
- val = getattr(self, field.lower())
- if isinstance(val, list):
- spec_file.append('%s: %s' % (field, ' '.join(val)))
- elif val is not None:
- spec_file.append('%s: %s' % (field, val))
-
-
- if self.distribution.get_url() != 'UNKNOWN':
- spec_file.append('Url: ' + self.distribution.get_url())
-
- if self.distribution_name:
- spec_file.append('Distribution: ' + self.distribution_name)
-
- if self.build_requires:
- spec_file.append('BuildRequires: ' +
- ' '.join(self.build_requires))
-
- if self.icon:
- spec_file.append('Icon: ' + os.path.basename(self.icon))
-
- if self.no_autoreq:
- spec_file.append('AutoReq: 0')
-
- spec_file.extend([
- '',
- '%description',
- self.distribution.get_long_description()
- ])
-
- # put locale descriptions into spec file
- # XXX again, suppressed because config file syntax doesn't
- # easily support this ;-(
- #for locale in self.descriptions.keys():
- # spec_file.extend([
- # '',
- # '%description -l ' + locale,
- # self.descriptions[locale],
- # ])
-
- # rpm scripts
- # figure out default build script
- def_setup_call = "%s %s" % (self.python,os.path.basename(sys.argv[0]))
- def_build = "%s build" % def_setup_call
- if self.use_rpm_opt_flags:
- def_build = 'env CFLAGS="$RPM_OPT_FLAGS" ' + def_build
-
- # insert contents of files
-
- # XXX this is kind of misleading: user-supplied options are files
- # that we open and interpolate into the spec file, but the defaults
- # are just text that we drop in as-is. Hmmm.
-
- install_cmd = ('%s install -O1 --root=$RPM_BUILD_ROOT '
- '--record=INSTALLED_FILES') % def_setup_call
-
- script_options = [
- ('prep', 'prep_script', "%setup -n %{name}-%{unmangled_version}"),
- ('build', 'build_script', def_build),
- ('install', 'install_script', install_cmd),
- ('clean', 'clean_script', "rm -rf $RPM_BUILD_ROOT"),
- ('verifyscript', 'verify_script', None),
- ('pre', 'pre_install', None),
- ('post', 'post_install', None),
- ('preun', 'pre_uninstall', None),
- ('postun', 'post_uninstall', None),
- ]
-
- for (rpm_opt, attr, default) in script_options:
- # Insert contents of file referred to, if no file is referred to
- # use 'default' as contents of script
- val = getattr(self, attr)
- if val or default:
- spec_file.extend([
- '',
- '%' + rpm_opt,])
- if val:
- with open(val) as f:
- spec_file.extend(f.read().split('\n'))
- else:
- spec_file.append(default)
-
-
- # files section
- spec_file.extend([
- '',
- '%files -f INSTALLED_FILES',
- '%defattr(-,root,root)',
- ])
-
- if self.doc_files:
- spec_file.append('%doc ' + ' '.join(self.doc_files))
-
- if self.changelog:
- spec_file.extend([
- '',
- '%changelog',])
- spec_file.extend(self.changelog)
-
- return spec_file
-
- def _format_changelog(self, changelog):
- """Format the changelog correctly and convert it to a list of strings
- """
- if not changelog:
- return changelog
- new_changelog = []
- for line in changelog.strip().split('\n'):
- line = line.strip()
- if line[0] == '*':
- new_changelog.extend(['', line])
- elif line[0] == '-':
- new_changelog.append(line)
- else:
- new_changelog.append(' ' + line)
-
- # strip trailing newline inserted by first changelog entry
- if not new_changelog[0]:
- del new_changelog[0]
-
- return new_changelog
diff --git a/contrib/python/setuptools/py3/setuptools/_distutils/command/bdist_wininst.py b/contrib/python/setuptools/py3/setuptools/_distutils/command/bdist_wininst.py
deleted file mode 100644
index 0e9ddaa2141..00000000000
--- a/contrib/python/setuptools/py3/setuptools/_distutils/command/bdist_wininst.py
+++ /dev/null
@@ -1,377 +0,0 @@
-"""distutils.command.bdist_wininst
-
-Implements the Distutils 'bdist_wininst' command: create a windows installer
-exe-program."""
-
-import os
-import sys
-import warnings
-from distutils.core import Command
-from distutils.util import get_platform
-from distutils.dir_util import remove_tree
-from distutils.errors import *
-from distutils.sysconfig import get_python_version
-from distutils import log
-
-class bdist_wininst(Command):
-
- description = "create an executable installer for MS Windows"
-
- user_options = [('bdist-dir=', None,
- "temporary directory for creating the distribution"),
- ('plat-name=', 'p',
- "platform name to embed in generated filenames "
- "(default: %s)" % get_platform()),
- ('keep-temp', 'k',
- "keep the pseudo-installation tree around after " +
- "creating the distribution archive"),
- ('target-version=', None,
- "require a specific python version" +
- " on the target system"),
- ('no-target-compile', 'c',
- "do not compile .py to .pyc on the target system"),
- ('no-target-optimize', 'o',
- "do not compile .py to .pyo (optimized) "
- "on the target system"),
- ('dist-dir=', 'd',
- "directory to put final built distributions in"),
- ('bitmap=', 'b',
- "bitmap to use for the installer instead of python-powered logo"),
- ('title=', 't',
- "title to display on the installer background instead of default"),
- ('skip-build', None,
- "skip rebuilding everything (for testing/debugging)"),
- ('install-script=', None,
- "basename of installation script to be run after "
- "installation or before deinstallation"),
- ('pre-install-script=', None,
- "Fully qualified filename of a script to be run before "
- "any files are installed. This script need not be in the "
- "distribution"),
- ('user-access-control=', None,
- "specify Vista's UAC handling - 'none'/default=no "
- "handling, 'auto'=use UAC if target Python installed for "
- "all users, 'force'=always use UAC"),
- ]
-
- boolean_options = ['keep-temp', 'no-target-compile', 'no-target-optimize',
- 'skip-build']
-
- # bpo-10945: bdist_wininst requires mbcs encoding only available on Windows
- _unsupported = (sys.platform != "win32")
-
- def __init__(self, *args, **kw):
- super().__init__(*args, **kw)
- warnings.warn("bdist_wininst command is deprecated since Python 3.8, "
- "use bdist_wheel (wheel packages) instead",
- DeprecationWarning, 2)
-
- def initialize_options(self):
- self.bdist_dir = None
- self.plat_name = None
- self.keep_temp = 0
- self.no_target_compile = 0
- self.no_target_optimize = 0
- self.target_version = None
- self.dist_dir = None
- self.bitmap = None
- self.title = None
- self.skip_build = None
- self.install_script = None
- self.pre_install_script = None
- self.user_access_control = None
-
-
- def finalize_options(self):
- self.set_undefined_options('bdist', ('skip_build', 'skip_build'))
-
- if self.bdist_dir is None:
- if self.skip_build and self.plat_name:
- # If build is skipped and plat_name is overridden, bdist will
- # not see the correct 'plat_name' - so set that up manually.
- bdist = self.distribution.get_command_obj('bdist')
- bdist.plat_name = self.plat_name
- # next the command will be initialized using that name
- bdist_base = self.get_finalized_command('bdist').bdist_base
- self.bdist_dir = os.path.join(bdist_base, 'wininst')
-
- if not self.target_version:
- self.target_version = ""
-
- if not self.skip_build and self.distribution.has_ext_modules():
- short_version = get_python_version()
- if self.target_version and self.target_version != short_version:
- raise DistutilsOptionError(
- "target version can only be %s, or the '--skip-build'" \
- " option must be specified" % (short_version,))
- self.target_version = short_version
-
- self.set_undefined_options('bdist',
- ('dist_dir', 'dist_dir'),
- ('plat_name', 'plat_name'),
- )
-
- if self.install_script:
- for script in self.distribution.scripts:
- if self.install_script == os.path.basename(script):
- break
- else:
- raise DistutilsOptionError(
- "install_script '%s' not found in scripts"
- % self.install_script)
-
- def run(self):
- if (sys.platform != "win32" and
- (self.distribution.has_ext_modules() or
- self.distribution.has_c_libraries())):
- raise DistutilsPlatformError \
- ("distribution contains extensions and/or C libraries; "
- "must be compiled on a Windows 32 platform")
-
- if not self.skip_build:
- self.run_command('build')
-
- install = self.reinitialize_command('install', reinit_subcommands=1)
- install.root = self.bdist_dir
- install.skip_build = self.skip_build
- install.warn_dir = 0
- install.plat_name = self.plat_name
-
- install_lib = self.reinitialize_command('install_lib')
- # we do not want to include pyc or pyo files
- install_lib.compile = 0
- install_lib.optimize = 0
-
- if self.distribution.has_ext_modules():
- # If we are building an installer for a Python version other
- # than the one we are currently running, then we need to ensure
- # our build_lib reflects the other Python version rather than ours.
- # Note that for target_version!=sys.version, we must have skipped the
- # build step, so there is no issue with enforcing the build of this
- # version.
- target_version = self.target_version
- if not target_version:
- assert self.skip_build, "Should have already checked this"
- target_version = '%d.%d' % sys.version_info[:2]
- plat_specifier = ".%s-%s" % (self.plat_name, target_version)
- build = self.get_finalized_command('build')
- build.build_lib = os.path.join(build.build_base,
- 'lib' + plat_specifier)
-
- # Use a custom scheme for the zip-file, because we have to decide
- # at installation time which scheme to use.
- for key in ('purelib', 'platlib', 'headers', 'scripts', 'data'):
- value = key.upper()
- if key == 'headers':
- value = value + '/Include/$dist_name'
- setattr(install,
- 'install_' + key,
- value)
-
- log.info("installing to %s", self.bdist_dir)
- install.ensure_finalized()
-
- # avoid warning of 'install_lib' about installing
- # into a directory not in sys.path
- sys.path.insert(0, os.path.join(self.bdist_dir, 'PURELIB'))
-
- install.run()
-
- del sys.path[0]
-
- # And make an archive relative to the root of the
- # pseudo-installation tree.
- from tempfile import mktemp
- archive_basename = mktemp()
- fullname = self.distribution.get_fullname()
- arcname = self.make_archive(archive_basename, "zip",
- root_dir=self.bdist_dir)
- # create an exe containing the zip-file
- self.create_exe(arcname, fullname, self.bitmap)
- if self.distribution.has_ext_modules():
- pyversion = get_python_version()
- else:
- pyversion = 'any'
- self.distribution.dist_files.append(('bdist_wininst', pyversion,
- self.get_installer_filename(fullname)))
- # remove the zip-file again
- log.debug("removing temporary file '%s'", arcname)
- os.remove(arcname)
-
- if not self.keep_temp:
- remove_tree(self.bdist_dir, dry_run=self.dry_run)
-
- def get_inidata(self):
- # Return data describing the installation.
- lines = []
- metadata = self.distribution.metadata
-
- # Write the [metadata] section.
- lines.append("[metadata]")
-
- # 'info' will be displayed in the installer's dialog box,
- # describing the items to be installed.
- info = (metadata.long_description or '') + '\n'
-
- # Escape newline characters
- def escape(s):
- return s.replace("\n", "\\n")
-
- for name in ["author", "author_email", "description", "maintainer",
- "maintainer_email", "name", "url", "version"]:
- data = getattr(metadata, name, "")
- if data:
- info = info + ("\n %s: %s" % \
- (name.capitalize(), escape(data)))
- lines.append("%s=%s" % (name, escape(data)))
-
- # The [setup] section contains entries controlling
- # the installer runtime.
- lines.append("\n[Setup]")
- if self.install_script:
- lines.append("install_script=%s" % self.install_script)
- lines.append("info=%s" % escape(info))
- lines.append("target_compile=%d" % (not self.no_target_compile))
- lines.append("target_optimize=%d" % (not self.no_target_optimize))
- if self.target_version:
- lines.append("target_version=%s" % self.target_version)
- if self.user_access_control:
- lines.append("user_access_control=%s" % self.user_access_control)
-
- title = self.title or self.distribution.get_fullname()
- lines.append("title=%s" % escape(title))
- import time
- import distutils
- build_info = "Built %s with distutils-%s" % \
- (time.ctime(time.time()), distutils.__version__)
- lines.append("build_info=%s" % build_info)
- return "\n".join(lines)
-
- def create_exe(self, arcname, fullname, bitmap=None):
- import struct
-
- self.mkpath(self.dist_dir)
-
- cfgdata = self.get_inidata()
-
- installer_name = self.get_installer_filename(fullname)
- self.announce("creating %s" % installer_name)
-
- if bitmap:
- with open(bitmap, "rb") as f:
- bitmapdata = f.read()
- bitmaplen = len(bitmapdata)
- else:
- bitmaplen = 0
-
- with open(installer_name, "wb") as file:
- file.write(self.get_exe_bytes())
- if bitmap:
- file.write(bitmapdata)
-
- # Convert cfgdata from unicode to ascii, mbcs encoded
- if isinstance(cfgdata, str):
- cfgdata = cfgdata.encode("mbcs")
-
- # Append the pre-install script
- cfgdata = cfgdata + b"\0"
- if self.pre_install_script:
- # We need to normalize newlines, so we open in text mode and
- # convert back to bytes. "latin-1" simply avoids any possible
- # failures.
- with open(self.pre_install_script, "r",
- encoding="latin-1") as script:
- script_data = script.read().encode("latin-1")
- cfgdata = cfgdata + script_data + b"\n\0"
- else:
- # empty pre-install script
- cfgdata = cfgdata + b"\0"
- file.write(cfgdata)
-
- # The 'magic number' 0x1234567B is used to make sure that the
- # binary layout of 'cfgdata' is what the wininst.exe binary
- # expects. If the layout changes, increment that number, make
- # the corresponding changes to the wininst.exe sources, and
- # recompile them.
- header = struct.pack("<iii",
- 0x1234567B, # tag
- len(cfgdata), # length
- bitmaplen, # number of bytes in bitmap
- )
- file.write(header)
- with open(arcname, "rb") as f:
- file.write(f.read())
-
- def get_installer_filename(self, fullname):
- # Factored out to allow overriding in subclasses
- if self.target_version:
- # if we create an installer for a specific python version,
- # it's better to include this in the name
- installer_name = os.path.join(self.dist_dir,
- "%s.%s-py%s.exe" %
- (fullname, self.plat_name, self.target_version))
- else:
- installer_name = os.path.join(self.dist_dir,
- "%s.%s.exe" % (fullname, self.plat_name))
- return installer_name
-
- def get_exe_bytes(self):
- # If a target-version other than the current version has been
- # specified, then using the MSVC version from *this* build is no good.
- # Without actually finding and executing the target version and parsing
- # its sys.version, we just hard-code our knowledge of old versions.
- # NOTE: Possible alternative is to allow "--target-version" to
- # specify a Python executable rather than a simple version string.
- # We can then execute this program to obtain any info we need, such
- # as the real sys.version string for the build.
- cur_version = get_python_version()
-
- # If the target version is *later* than us, then we assume they
- # use what we use
- # string compares seem wrong, but are what sysconfig.py itself uses
- if self.target_version and self.target_version < cur_version:
- if self.target_version < "2.4":
- bv = '6.0'
- elif self.target_version == "2.4":
- bv = '7.1'
- elif self.target_version == "2.5":
- bv = '8.0'
- elif self.target_version <= "3.2":
- bv = '9.0'
- elif self.target_version <= "3.4":
- bv = '10.0'
- else:
- bv = '14.0'
- else:
- # for current version - use authoritative check.
- try:
- from msvcrt import CRT_ASSEMBLY_VERSION
- except ImportError:
- # cross-building, so assume the latest version
- bv = '14.0'
- else:
- # as far as we know, CRT is binary compatible based on
- # the first field, so assume 'x.0' until proven otherwise
- major = CRT_ASSEMBLY_VERSION.partition('.')[0]
- bv = major + '.0'
-
-
- # wininst-x.y.exe is in the same directory as this file
- directory = os.path.dirname(__file__)
- # we must use a wininst-x.y.exe built with the same C compiler
- # used for python. XXX What about mingw, borland, and so on?
-
- # if plat_name starts with "win" but is not "win32"
- # we want to strip "win" and leave the rest (e.g. -amd64)
- # for all other cases, we don't want any suffix
- if self.plat_name != 'win32' and self.plat_name[:3] == 'win':
- sfix = self.plat_name[3:]
- else:
- sfix = ''
-
- filename = os.path.join(directory, "wininst-%s%s.exe" % (bv, sfix))
- f = open(filename, "rb")
- try:
- return f.read()
- finally:
- f.close()
diff --git a/contrib/python/setuptools/py3/setuptools/_distutils/command/build.py b/contrib/python/setuptools/py3/setuptools/_distutils/command/build.py
deleted file mode 100644
index 4355a63235c..00000000000
--- a/contrib/python/setuptools/py3/setuptools/_distutils/command/build.py
+++ /dev/null
@@ -1,157 +0,0 @@
-"""distutils.command.build
-
-Implements the Distutils 'build' command."""
-
-import sys, os
-from distutils.core import Command
-from distutils.errors import DistutilsOptionError
-from distutils.util import get_platform
-
-
-def show_compilers():
- from distutils.ccompiler import show_compilers
- show_compilers()
-
-
-class build(Command):
-
- description = "build everything needed to install"
-
- user_options = [
- ('build-base=', 'b',
- "base directory for build library"),
- ('build-purelib=', None,
- "build directory for platform-neutral distributions"),
- ('build-platlib=', None,
- "build directory for platform-specific distributions"),
- ('build-lib=', None,
- "build directory for all distribution (defaults to either " +
- "build-purelib or build-platlib"),
- ('build-scripts=', None,
- "build directory for scripts"),
- ('build-temp=', 't',
- "temporary build directory"),
- ('plat-name=', 'p',
- "platform name to build for, if supported "
- "(default: %s)" % get_platform()),
- ('compiler=', 'c',
- "specify the compiler type"),
- ('parallel=', 'j',
- "number of parallel build jobs"),
- ('debug', 'g',
- "compile extensions and libraries with debugging information"),
- ('force', 'f',
- "forcibly build everything (ignore file timestamps)"),
- ('executable=', 'e',
- "specify final destination interpreter path (build.py)"),
- ]
-
- boolean_options = ['debug', 'force']
-
- help_options = [
- ('help-compiler', None,
- "list available compilers", show_compilers),
- ]
-
- def initialize_options(self):
- self.build_base = 'build'
- # these are decided only after 'build_base' has its final value
- # (unless overridden by the user or client)
- self.build_purelib = None
- self.build_platlib = None
- self.build_lib = None
- self.build_temp = None
- self.build_scripts = None
- self.compiler = None
- self.plat_name = None
- self.debug = None
- self.force = 0
- self.executable = None
- self.parallel = None
-
- def finalize_options(self):
- if self.plat_name is None:
- self.plat_name = get_platform()
- else:
- # plat-name only supported for windows (other platforms are
- # supported via ./configure flags, if at all). Avoid misleading
- # other platforms.
- if os.name != 'nt':
- raise DistutilsOptionError(
- "--plat-name only supported on Windows (try "
- "using './configure --help' on your platform)")
-
- plat_specifier = ".%s-%d.%d" % (self.plat_name, *sys.version_info[:2])
-
- # Make it so Python 2.x and Python 2.x with --with-pydebug don't
- # share the same build directories. Doing so confuses the build
- # process for C modules
- if hasattr(sys, 'gettotalrefcount'):
- plat_specifier += '-pydebug'
-
- # 'build_purelib' and 'build_platlib' just default to 'lib' and
- # 'lib.<plat>' under the base build directory. We only use one of
- # them for a given distribution, though --
- if self.build_purelib is None:
- self.build_purelib = os.path.join(self.build_base, 'lib')
- if self.build_platlib is None:
- self.build_platlib = os.path.join(self.build_base,
- 'lib' + plat_specifier)
-
- # 'build_lib' is the actual directory that we will use for this
- # particular module distribution -- if user didn't supply it, pick
- # one of 'build_purelib' or 'build_platlib'.
- if self.build_lib is None:
- if self.distribution.has_ext_modules():
- self.build_lib = self.build_platlib
- else:
- self.build_lib = self.build_purelib
-
- # 'build_temp' -- temporary directory for compiler turds,
- # "build/temp.<plat>"
- if self.build_temp is None:
- self.build_temp = os.path.join(self.build_base,
- 'temp' + plat_specifier)
- if self.build_scripts is None:
- self.build_scripts = os.path.join(self.build_base,
- 'scripts-%d.%d' % sys.version_info[:2])
-
- if self.executable is None and sys.executable:
- self.executable = os.path.normpath(sys.executable)
-
- if isinstance(self.parallel, str):
- try:
- self.parallel = int(self.parallel)
- except ValueError:
- raise DistutilsOptionError("parallel should be an integer")
-
- def run(self):
- # Run all relevant sub-commands. This will be some subset of:
- # - build_py - pure Python modules
- # - build_clib - standalone C libraries
- # - build_ext - Python extensions
- # - build_scripts - (Python) scripts
- for cmd_name in self.get_sub_commands():
- self.run_command(cmd_name)
-
-
- # -- Predicates for the sub-command list ---------------------------
-
- def has_pure_modules(self):
- return self.distribution.has_pure_modules()
-
- def has_c_libraries(self):
- return self.distribution.has_c_libraries()
-
- def has_ext_modules(self):
- return self.distribution.has_ext_modules()
-
- def has_scripts(self):
- return self.distribution.has_scripts()
-
-
- sub_commands = [('build_py', has_pure_modules),
- ('build_clib', has_c_libraries),
- ('build_ext', has_ext_modules),
- ('build_scripts', has_scripts),
- ]
diff --git a/contrib/python/setuptools/py3/setuptools/_distutils/command/build_clib.py b/contrib/python/setuptools/py3/setuptools/_distutils/command/build_clib.py
deleted file mode 100644
index 3e20ef23cd8..00000000000
--- a/contrib/python/setuptools/py3/setuptools/_distutils/command/build_clib.py
+++ /dev/null
@@ -1,209 +0,0 @@
-"""distutils.command.build_clib
-
-Implements the Distutils 'build_clib' command, to build a C/C++ library
-that is included in the module distribution and needed by an extension
-module."""
-
-
-# XXX this module has *lots* of code ripped-off quite transparently from
-# build_ext.py -- not surprisingly really, as the work required to build
-# a static library from a collection of C source files is not really all
-# that different from what's required to build a shared object file from
-# a collection of C source files. Nevertheless, I haven't done the
-# necessary refactoring to account for the overlap in code between the
-# two modules, mainly because a number of subtle details changed in the
-# cut 'n paste. Sigh.
-
-import os
-from distutils.core import Command
-from distutils.errors import *
-from distutils.sysconfig import customize_compiler
-from distutils import log
-
-def show_compilers():
- from distutils.ccompiler import show_compilers
- show_compilers()
-
-
-class build_clib(Command):
-
- description = "build C/C++ libraries used by Python extensions"
-
- user_options = [
- ('build-clib=', 'b',
- "directory to build C/C++ libraries to"),
- ('build-temp=', 't',
- "directory to put temporary build by-products"),
- ('debug', 'g',
- "compile with debugging information"),
- ('force', 'f',
- "forcibly build everything (ignore file timestamps)"),
- ('compiler=', 'c',
- "specify the compiler type"),
- ]
-
- boolean_options = ['debug', 'force']
-
- help_options = [
- ('help-compiler', None,
- "list available compilers", show_compilers),
- ]
-
- def initialize_options(self):
- self.build_clib = None
- self.build_temp = None
-
- # List of libraries to build
- self.libraries = None
-
- # Compilation options for all libraries
- self.include_dirs = None
- self.define = None
- self.undef = None
- self.debug = None
- self.force = 0
- self.compiler = None
-
-
- def finalize_options(self):
- # This might be confusing: both build-clib and build-temp default
- # to build-temp as defined by the "build" command. This is because
- # I think that C libraries are really just temporary build
- # by-products, at least from the point of view of building Python
- # extensions -- but I want to keep my options open.
- self.set_undefined_options('build',
- ('build_temp', 'build_clib'),
- ('build_temp', 'build_temp'),
- ('compiler', 'compiler'),
- ('debug', 'debug'),
- ('force', 'force'))
-
- self.libraries = self.distribution.libraries
- if self.libraries:
- self.check_library_list(self.libraries)
-
- if self.include_dirs is None:
- self.include_dirs = self.distribution.include_dirs or []
- if isinstance(self.include_dirs, str):
- self.include_dirs = self.include_dirs.split(os.pathsep)
-
- # XXX same as for build_ext -- what about 'self.define' and
- # 'self.undef' ?
-
-
- def run(self):
- if not self.libraries:
- return
-
- # Yech -- this is cut 'n pasted from build_ext.py!
- from distutils.ccompiler import new_compiler
- self.compiler = new_compiler(compiler=self.compiler,
- dry_run=self.dry_run,
- force=self.force)
- customize_compiler(self.compiler)
-
- if self.include_dirs is not None:
- self.compiler.set_include_dirs(self.include_dirs)
- if self.define is not None:
- # 'define' option is a list of (name,value) tuples
- for (name,value) in self.define:
- self.compiler.define_macro(name, value)
- if self.undef is not None:
- for macro in self.undef:
- self.compiler.undefine_macro(macro)
-
- self.build_libraries(self.libraries)
-
-
- def check_library_list(self, libraries):
- """Ensure that the list of libraries is valid.
-
- `library` is presumably provided as a command option 'libraries'.
- This method checks that it is a list of 2-tuples, where the tuples
- are (library_name, build_info_dict).
-
- Raise DistutilsSetupError if the structure is invalid anywhere;
- just returns otherwise.
- """
- if not isinstance(libraries, list):
- raise DistutilsSetupError(
- "'libraries' option must be a list of tuples")
-
- for lib in libraries:
- if not isinstance(lib, tuple) and len(lib) != 2:
- raise DistutilsSetupError(
- "each element of 'libraries' must a 2-tuple")
-
- name, build_info = lib
-
- if not isinstance(name, str):
- raise DistutilsSetupError(
- "first element of each tuple in 'libraries' "
- "must be a string (the library name)")
-
- if '/' in name or (os.sep != '/' and os.sep in name):
- raise DistutilsSetupError("bad library name '%s': "
- "may not contain directory separators" % lib[0])
-
- if not isinstance(build_info, dict):
- raise DistutilsSetupError(
- "second element of each tuple in 'libraries' "
- "must be a dictionary (build info)")
-
-
- def get_library_names(self):
- # Assume the library list is valid -- 'check_library_list()' is
- # called from 'finalize_options()', so it should be!
- if not self.libraries:
- return None
-
- lib_names = []
- for (lib_name, build_info) in self.libraries:
- lib_names.append(lib_name)
- return lib_names
-
-
- def get_source_files(self):
- self.check_library_list(self.libraries)
- filenames = []
- for (lib_name, build_info) in self.libraries:
- sources = build_info.get('sources')
- if sources is None or not isinstance(sources, (list, tuple)):
- raise DistutilsSetupError(
- "in 'libraries' option (library '%s'), "
- "'sources' must be present and must be "
- "a list of source filenames" % lib_name)
-
- filenames.extend(sources)
- return filenames
-
-
- def build_libraries(self, libraries):
- for (lib_name, build_info) in libraries:
- sources = build_info.get('sources')
- if sources is None or not isinstance(sources, (list, tuple)):
- raise DistutilsSetupError(
- "in 'libraries' option (library '%s'), "
- "'sources' must be present and must be "
- "a list of source filenames" % lib_name)
- sources = list(sources)
-
- log.info("building '%s' library", lib_name)
-
- # First, compile the source code to object files in the library
- # directory. (This should probably change to putting object
- # files in a temporary build directory.)
- macros = build_info.get('macros')
- include_dirs = build_info.get('include_dirs')
- objects = self.compiler.compile(sources,
- output_dir=self.build_temp,
- macros=macros,
- include_dirs=include_dirs,
- debug=self.debug)
-
- # Now "link" the object files together into a static library.
- # (On Unix at least, this isn't really linking -- it just
- # builds an archive. Whatever.)
- self.compiler.create_static_lib(objects, lib_name,
- output_dir=self.build_clib,
- debug=self.debug)
diff --git a/contrib/python/setuptools/py3/setuptools/_distutils/command/build_ext.py b/contrib/python/setuptools/py3/setuptools/_distutils/command/build_ext.py
deleted file mode 100644
index 181671bf19f..00000000000
--- a/contrib/python/setuptools/py3/setuptools/_distutils/command/build_ext.py
+++ /dev/null
@@ -1,755 +0,0 @@
-"""distutils.command.build_ext
-
-Implements the Distutils 'build_ext' command, for building extension
-modules (currently limited to C extensions, should accommodate C++
-extensions ASAP)."""
-
-import contextlib
-import os
-import re
-import sys
-from distutils.core import Command
-from distutils.errors import *
-from distutils.sysconfig import customize_compiler, get_python_version
-from distutils.sysconfig import get_config_h_filename
-from distutils.dep_util import newer_group
-from distutils.extension import Extension
-from distutils.util import get_platform
-from distutils import log
-from . import py37compat
-
-from site import USER_BASE
-
-# An extension name is just a dot-separated list of Python NAMEs (ie.
-# the same as a fully-qualified module name).
-extension_name_re = re.compile \
- (r'^[a-zA-Z_][a-zA-Z_0-9]*(\.[a-zA-Z_][a-zA-Z_0-9]*)*$')
-
-
-def show_compilers ():
- from distutils.ccompiler import show_compilers
- show_compilers()
-
-
-class build_ext(Command):
-
- description = "build C/C++ extensions (compile/link to build directory)"
-
- # XXX thoughts on how to deal with complex command-line options like
- # these, i.e. how to make it so fancy_getopt can suck them off the
- # command line and make it look like setup.py defined the appropriate
- # lists of tuples of what-have-you.
- # - each command needs a callback to process its command-line options
- # - Command.__init__() needs access to its share of the whole
- # command line (must ultimately come from
- # Distribution.parse_command_line())
- # - it then calls the current command class' option-parsing
- # callback to deal with weird options like -D, which have to
- # parse the option text and churn out some custom data
- # structure
- # - that data structure (in this case, a list of 2-tuples)
- # will then be present in the command object by the time
- # we get to finalize_options() (i.e. the constructor
- # takes care of both command-line and client options
- # in between initialize_options() and finalize_options())
-
- sep_by = " (separated by '%s')" % os.pathsep
- user_options = [
- ('build-lib=', 'b',
- "directory for compiled extension modules"),
- ('build-temp=', 't',
- "directory for temporary files (build by-products)"),
- ('plat-name=', 'p',
- "platform name to cross-compile for, if supported "
- "(default: %s)" % get_platform()),
- ('inplace', 'i',
- "ignore build-lib and put compiled extensions into the source " +
- "directory alongside your pure Python modules"),
- ('include-dirs=', 'I',
- "list of directories to search for header files" + sep_by),
- ('define=', 'D',
- "C preprocessor macros to define"),
- ('undef=', 'U',
- "C preprocessor macros to undefine"),
- ('libraries=', 'l',
- "external C libraries to link with"),
- ('library-dirs=', 'L',
- "directories to search for external C libraries" + sep_by),
- ('rpath=', 'R',
- "directories to search for shared C libraries at runtime"),
- ('link-objects=', 'O',
- "extra explicit link objects to include in the link"),
- ('debug', 'g',
- "compile/link with debugging information"),
- ('force', 'f',
- "forcibly build everything (ignore file timestamps)"),
- ('compiler=', 'c',
- "specify the compiler type"),
- ('parallel=', 'j',
- "number of parallel build jobs"),
- ('swig-cpp', None,
- "make SWIG create C++ files (default is C)"),
- ('swig-opts=', None,
- "list of SWIG command line options"),
- ('swig=', None,
- "path to the SWIG executable"),
- ('user', None,
- "add user include, library and rpath")
- ]
-
- boolean_options = ['inplace', 'debug', 'force', 'swig-cpp', 'user']
-
- help_options = [
- ('help-compiler', None,
- "list available compilers", show_compilers),
- ]
-
- def initialize_options(self):
- self.extensions = None
- self.build_lib = None
- self.plat_name = None
- self.build_temp = None
- self.inplace = 0
- self.package = None
-
- self.include_dirs = None
- self.define = None
- self.undef = None
- self.libraries = None
- self.library_dirs = None
- self.rpath = None
- self.link_objects = None
- self.debug = None
- self.force = None
- self.compiler = None
- self.swig = None
- self.swig_cpp = None
- self.swig_opts = None
- self.user = None
- self.parallel = None
-
- def finalize_options(self):
- from distutils import sysconfig
-
- self.set_undefined_options('build',
- ('build_lib', 'build_lib'),
- ('build_temp', 'build_temp'),
- ('compiler', 'compiler'),
- ('debug', 'debug'),
- ('force', 'force'),
- ('parallel', 'parallel'),
- ('plat_name', 'plat_name'),
- )
-
- if self.package is None:
- self.package = self.distribution.ext_package
-
- self.extensions = self.distribution.ext_modules
-
- # Make sure Python's include directories (for Python.h, pyconfig.h,
- # etc.) are in the include search path.
- py_include = sysconfig.get_python_inc()
- plat_py_include = sysconfig.get_python_inc(plat_specific=1)
- if self.include_dirs is None:
- self.include_dirs = self.distribution.include_dirs or []
- if isinstance(self.include_dirs, str):
- self.include_dirs = self.include_dirs.split(os.pathsep)
-
- # If in a virtualenv, add its include directory
- # Issue 16116
- if sys.exec_prefix != sys.base_exec_prefix:
- self.include_dirs.append(os.path.join(sys.exec_prefix, 'include'))
-
- # Put the Python "system" include dir at the end, so that
- # any local include dirs take precedence.
- self.include_dirs.extend(py_include.split(os.path.pathsep))
- if plat_py_include != py_include:
- self.include_dirs.extend(
- plat_py_include.split(os.path.pathsep))
-
- self.ensure_string_list('libraries')
- self.ensure_string_list('link_objects')
-
- # Life is easier if we're not forever checking for None, so
- # simplify these options to empty lists if unset
- if self.libraries is None:
- self.libraries = []
- if self.library_dirs is None:
- self.library_dirs = []
- elif isinstance(self.library_dirs, str):
- self.library_dirs = self.library_dirs.split(os.pathsep)
-
- if self.rpath is None:
- self.rpath = []
- elif isinstance(self.rpath, str):
- self.rpath = self.rpath.split(os.pathsep)
-
- # for extensions under windows use different directories
- # for Release and Debug builds.
- # also Python's library directory must be appended to library_dirs
- if os.name == 'nt':
- # the 'libs' directory is for binary installs - we assume that
- # must be the *native* platform. But we don't really support
- # cross-compiling via a binary install anyway, so we let it go.
- self.library_dirs.append(os.path.join(sys.exec_prefix, 'libs'))
- if sys.base_exec_prefix != sys.prefix: # Issue 16116
- self.library_dirs.append(os.path.join(sys.base_exec_prefix, 'libs'))
- if self.debug:
- self.build_temp = os.path.join(self.build_temp, "Debug")
- else:
- self.build_temp = os.path.join(self.build_temp, "Release")
-
- # Append the source distribution include and library directories,
- # this allows distutils on windows to work in the source tree
- self.include_dirs.append(os.path.dirname(get_config_h_filename()))
- self.library_dirs.append(sys.base_exec_prefix)
-
- # Use the .lib files for the correct architecture
- if self.plat_name == 'win32':
- suffix = 'win32'
- else:
- # win-amd64
- suffix = self.plat_name[4:]
- new_lib = os.path.join(sys.exec_prefix, 'PCbuild')
- if suffix:
- new_lib = os.path.join(new_lib, suffix)
- self.library_dirs.append(new_lib)
-
- # For extensions under Cygwin, Python's library directory must be
- # appended to library_dirs
- if sys.platform[:6] == 'cygwin':
- if not sysconfig.python_build:
- # building third party extensions
- self.library_dirs.append(os.path.join(sys.prefix, "lib",
- "python" + get_python_version(),
- "config"))
- else:
- # building python standard extensions
- self.library_dirs.append('.')
-
- # For building extensions with a shared Python library,
- # Python's library directory must be appended to library_dirs
- # See Issues: #1600860, #4366
- if (sysconfig.get_config_var('Py_ENABLE_SHARED')):
- if not sysconfig.python_build:
- # building third party extensions
- self.library_dirs.append(sysconfig.get_config_var('LIBDIR'))
- else:
- # building python standard extensions
- self.library_dirs.append('.')
-
- # The argument parsing will result in self.define being a string, but
- # it has to be a list of 2-tuples. All the preprocessor symbols
- # specified by the 'define' option will be set to '1'. Multiple
- # symbols can be separated with commas.
-
- if self.define:
- defines = self.define.split(',')
- self.define = [(symbol, '1') for symbol in defines]
-
- # The option for macros to undefine is also a string from the
- # option parsing, but has to be a list. Multiple symbols can also
- # be separated with commas here.
- if self.undef:
- self.undef = self.undef.split(',')
-
- if self.swig_opts is None:
- self.swig_opts = []
- else:
- self.swig_opts = self.swig_opts.split(' ')
-
- # Finally add the user include and library directories if requested
- if self.user:
- user_include = os.path.join(USER_BASE, "include")
- user_lib = os.path.join(USER_BASE, "lib")
- if os.path.isdir(user_include):
- self.include_dirs.append(user_include)
- if os.path.isdir(user_lib):
- self.library_dirs.append(user_lib)
- self.rpath.append(user_lib)
-
- if isinstance(self.parallel, str):
- try:
- self.parallel = int(self.parallel)
- except ValueError:
- raise DistutilsOptionError("parallel should be an integer")
-
- def run(self):
- from distutils.ccompiler import new_compiler
-
- # 'self.extensions', as supplied by setup.py, is a list of
- # Extension instances. See the documentation for Extension (in
- # distutils.extension) for details.
- #
- # For backwards compatibility with Distutils 0.8.2 and earlier, we
- # also allow the 'extensions' list to be a list of tuples:
- # (ext_name, build_info)
- # where build_info is a dictionary containing everything that
- # Extension instances do except the name, with a few things being
- # differently named. We convert these 2-tuples to Extension
- # instances as needed.
-
- if not self.extensions:
- return
-
- # If we were asked to build any C/C++ libraries, make sure that the
- # directory where we put them is in the library search path for
- # linking extensions.
- if self.distribution.has_c_libraries():
- build_clib = self.get_finalized_command('build_clib')
- self.libraries.extend(build_clib.get_library_names() or [])
- self.library_dirs.append(build_clib.build_clib)
-
- # Setup the CCompiler object that we'll use to do all the
- # compiling and linking
- self.compiler = new_compiler(compiler=self.compiler,
- verbose=self.verbose,
- dry_run=self.dry_run,
- force=self.force)
- customize_compiler(self.compiler)
- # If we are cross-compiling, init the compiler now (if we are not
- # cross-compiling, init would not hurt, but people may rely on
- # late initialization of compiler even if they shouldn't...)
- if os.name == 'nt' and self.plat_name != get_platform():
- self.compiler.initialize(self.plat_name)
-
- # And make sure that any compile/link-related options (which might
- # come from the command-line or from the setup script) are set in
- # that CCompiler object -- that way, they automatically apply to
- # all compiling and linking done here.
- if self.include_dirs is not None:
- self.compiler.set_include_dirs(self.include_dirs)
- if self.define is not None:
- # 'define' option is a list of (name,value) tuples
- for (name, value) in self.define:
- self.compiler.define_macro(name, value)
- if self.undef is not None:
- for macro in self.undef:
- self.compiler.undefine_macro(macro)
- if self.libraries is not None:
- self.compiler.set_libraries(self.libraries)
- if self.library_dirs is not None:
- self.compiler.set_library_dirs(self.library_dirs)
- if self.rpath is not None:
- self.compiler.set_runtime_library_dirs(self.rpath)
- if self.link_objects is not None:
- self.compiler.set_link_objects(self.link_objects)
-
- # Now actually compile and link everything.
- self.build_extensions()
-
- def check_extensions_list(self, extensions):
- """Ensure that the list of extensions (presumably provided as a
- command option 'extensions') is valid, i.e. it is a list of
- Extension objects. We also support the old-style list of 2-tuples,
- where the tuples are (ext_name, build_info), which are converted to
- Extension instances here.
-
- Raise DistutilsSetupError if the structure is invalid anywhere;
- just returns otherwise.
- """
- if not isinstance(extensions, list):
- raise DistutilsSetupError(
- "'ext_modules' option must be a list of Extension instances")
-
- for i, ext in enumerate(extensions):
- if isinstance(ext, Extension):
- continue # OK! (assume type-checking done
- # by Extension constructor)
-
- if not isinstance(ext, tuple) or len(ext) != 2:
- raise DistutilsSetupError(
- "each element of 'ext_modules' option must be an "
- "Extension instance or 2-tuple")
-
- ext_name, build_info = ext
-
- log.warn("old-style (ext_name, build_info) tuple found in "
- "ext_modules for extension '%s' "
- "-- please convert to Extension instance", ext_name)
-
- if not (isinstance(ext_name, str) and
- extension_name_re.match(ext_name)):
- raise DistutilsSetupError(
- "first element of each tuple in 'ext_modules' "
- "must be the extension name (a string)")
-
- if not isinstance(build_info, dict):
- raise DistutilsSetupError(
- "second element of each tuple in 'ext_modules' "
- "must be a dictionary (build info)")
-
- # OK, the (ext_name, build_info) dict is type-safe: convert it
- # to an Extension instance.
- ext = Extension(ext_name, build_info['sources'])
-
- # Easy stuff: one-to-one mapping from dict elements to
- # instance attributes.
- for key in ('include_dirs', 'library_dirs', 'libraries',
- 'extra_objects', 'extra_compile_args',
- 'extra_link_args'):
- val = build_info.get(key)
- if val is not None:
- setattr(ext, key, val)
-
- # Medium-easy stuff: same syntax/semantics, different names.
- ext.runtime_library_dirs = build_info.get('rpath')
- if 'def_file' in build_info:
- log.warn("'def_file' element of build info dict "
- "no longer supported")
-
- # Non-trivial stuff: 'macros' split into 'define_macros'
- # and 'undef_macros'.
- macros = build_info.get('macros')
- if macros:
- ext.define_macros = []
- ext.undef_macros = []
- for macro in macros:
- if not (isinstance(macro, tuple) and len(macro) in (1, 2)):
- raise DistutilsSetupError(
- "'macros' element of build info dict "
- "must be 1- or 2-tuple")
- if len(macro) == 1:
- ext.undef_macros.append(macro[0])
- elif len(macro) == 2:
- ext.define_macros.append(macro)
-
- extensions[i] = ext
-
- def get_source_files(self):
- self.check_extensions_list(self.extensions)
- filenames = []
-
- # Wouldn't it be neat if we knew the names of header files too...
- for ext in self.extensions:
- filenames.extend(ext.sources)
- return filenames
-
- def get_outputs(self):
- # Sanity check the 'extensions' list -- can't assume this is being
- # done in the same run as a 'build_extensions()' call (in fact, we
- # can probably assume that it *isn't*!).
- self.check_extensions_list(self.extensions)
-
- # And build the list of output (built) filenames. Note that this
- # ignores the 'inplace' flag, and assumes everything goes in the
- # "build" tree.
- outputs = []
- for ext in self.extensions:
- outputs.append(self.get_ext_fullpath(ext.name))
- return outputs
-
- def build_extensions(self):
- # First, sanity-check the 'extensions' list
- self.check_extensions_list(self.extensions)
- if self.parallel:
- self._build_extensions_parallel()
- else:
- self._build_extensions_serial()
-
- def _build_extensions_parallel(self):
- workers = self.parallel
- if self.parallel is True:
- workers = os.cpu_count() # may return None
- try:
- from concurrent.futures import ThreadPoolExecutor
- except ImportError:
- workers = None
-
- if workers is None:
- self._build_extensions_serial()
- return
-
- with ThreadPoolExecutor(max_workers=workers) as executor:
- futures = [executor.submit(self.build_extension, ext)
- for ext in self.extensions]
- for ext, fut in zip(self.extensions, futures):
- with self._filter_build_errors(ext):
- fut.result()
-
- def _build_extensions_serial(self):
- for ext in self.extensions:
- with self._filter_build_errors(ext):
- self.build_extension(ext)
-
- @contextlib.contextmanager
- def _filter_build_errors(self, ext):
- try:
- yield
- except (CCompilerError, DistutilsError, CompileError) as e:
- if not ext.optional:
- raise
- self.warn('building extension "%s" failed: %s' %
- (ext.name, e))
-
- def build_extension(self, ext):
- sources = ext.sources
- if sources is None or not isinstance(sources, (list, tuple)):
- raise DistutilsSetupError(
- "in 'ext_modules' option (extension '%s'), "
- "'sources' must be present and must be "
- "a list of source filenames" % ext.name)
- # sort to make the resulting .so file build reproducible
- sources = sorted(sources)
-
- ext_path = self.get_ext_fullpath(ext.name)
- depends = sources + ext.depends
- if not (self.force or newer_group(depends, ext_path, 'newer')):
- log.debug("skipping '%s' extension (up-to-date)", ext.name)
- return
- else:
- log.info("building '%s' extension", ext.name)
-
- # First, scan the sources for SWIG definition files (.i), run
- # SWIG on 'em to create .c files, and modify the sources list
- # accordingly.
- sources = self.swig_sources(sources, ext)
-
- # Next, compile the source code to object files.
-
- # XXX not honouring 'define_macros' or 'undef_macros' -- the
- # CCompiler API needs to change to accommodate this, and I
- # want to do one thing at a time!
-
- # Two possible sources for extra compiler arguments:
- # - 'extra_compile_args' in Extension object
- # - CFLAGS environment variable (not particularly
- # elegant, but people seem to expect it and I
- # guess it's useful)
- # The environment variable should take precedence, and
- # any sensible compiler will give precedence to later
- # command line args. Hence we combine them in order:
- extra_args = ext.extra_compile_args or []
-
- macros = ext.define_macros[:]
- for undef in ext.undef_macros:
- macros.append((undef,))
-
- objects = self.compiler.compile(sources,
- output_dir=self.build_temp,
- macros=macros,
- include_dirs=ext.include_dirs,
- debug=self.debug,
- extra_postargs=extra_args,
- depends=ext.depends)
-
- # XXX outdated variable, kept here in case third-part code
- # needs it.
- self._built_objects = objects[:]
-
- # Now link the object files together into a "shared object" --
- # of course, first we have to figure out all the other things
- # that go into the mix.
- if ext.extra_objects:
- objects.extend(ext.extra_objects)
- extra_args = ext.extra_link_args or []
-
- # Detect target language, if not provided
- language = ext.language or self.compiler.detect_language(sources)
-
- self.compiler.link_shared_object(
- objects, ext_path,
- libraries=self.get_libraries(ext),
- library_dirs=ext.library_dirs,
- runtime_library_dirs=ext.runtime_library_dirs,
- extra_postargs=extra_args,
- export_symbols=self.get_export_symbols(ext),
- debug=self.debug,
- build_temp=self.build_temp,
- target_lang=language)
-
- def swig_sources(self, sources, extension):
- """Walk the list of source files in 'sources', looking for SWIG
- interface (.i) files. Run SWIG on all that are found, and
- return a modified 'sources' list with SWIG source files replaced
- by the generated C (or C++) files.
- """
- new_sources = []
- swig_sources = []
- swig_targets = {}
-
- # XXX this drops generated C/C++ files into the source tree, which
- # is fine for developers who want to distribute the generated
- # source -- but there should be an option to put SWIG output in
- # the temp dir.
-
- if self.swig_cpp:
- log.warn("--swig-cpp is deprecated - use --swig-opts=-c++")
-
- if self.swig_cpp or ('-c++' in self.swig_opts) or \
- ('-c++' in extension.swig_opts):
- target_ext = '.cpp'
- else:
- target_ext = '.c'
-
- for source in sources:
- (base, ext) = os.path.splitext(source)
- if ext == ".i": # SWIG interface file
- new_sources.append(base + '_wrap' + target_ext)
- swig_sources.append(source)
- swig_targets[source] = new_sources[-1]
- else:
- new_sources.append(source)
-
- if not swig_sources:
- return new_sources
-
- swig = self.swig or self.find_swig()
- swig_cmd = [swig, "-python"]
- swig_cmd.extend(self.swig_opts)
- if self.swig_cpp:
- swig_cmd.append("-c++")
-
- # Do not override commandline arguments
- if not self.swig_opts:
- for o in extension.swig_opts:
- swig_cmd.append(o)
-
- for source in swig_sources:
- target = swig_targets[source]
- log.info("swigging %s to %s", source, target)
- self.spawn(swig_cmd + ["-o", target, source])
-
- return new_sources
-
- def find_swig(self):
- """Return the name of the SWIG executable. On Unix, this is
- just "swig" -- it should be in the PATH. Tries a bit harder on
- Windows.
- """
- if os.name == "posix":
- return "swig"
- elif os.name == "nt":
- # Look for SWIG in its standard installation directory on
- # Windows (or so I presume!). If we find it there, great;
- # if not, act like Unix and assume it's in the PATH.
- for vers in ("1.3", "1.2", "1.1"):
- fn = os.path.join("c:\\swig%s" % vers, "swig.exe")
- if os.path.isfile(fn):
- return fn
- else:
- return "swig.exe"
- else:
- raise DistutilsPlatformError(
- "I don't know how to find (much less run) SWIG "
- "on platform '%s'" % os.name)
-
- # -- Name generators -----------------------------------------------
- # (extension names, filenames, whatever)
- def get_ext_fullpath(self, ext_name):
- """Returns the path of the filename for a given extension.
-
- The file is located in `build_lib` or directly in the package
- (inplace option).
- """
- fullname = self.get_ext_fullname(ext_name)
- modpath = fullname.split('.')
- filename = self.get_ext_filename(modpath[-1])
-
- if not self.inplace:
- # no further work needed
- # returning :
- # build_dir/package/path/filename
- filename = os.path.join(*modpath[:-1]+[filename])
- return os.path.join(self.build_lib, filename)
-
- # the inplace option requires to find the package directory
- # using the build_py command for that
- package = '.'.join(modpath[0:-1])
- build_py = self.get_finalized_command('build_py')
- package_dir = os.path.abspath(build_py.get_package_dir(package))
-
- # returning
- # package_dir/filename
- return os.path.join(package_dir, filename)
-
- def get_ext_fullname(self, ext_name):
- """Returns the fullname of a given extension name.
-
- Adds the `package.` prefix"""
- if self.package is None:
- return ext_name
- else:
- return self.package + '.' + ext_name
-
- def get_ext_filename(self, ext_name):
- r"""Convert the name of an extension (eg. "foo.bar") into the name
- of the file from which it will be loaded (eg. "foo/bar.so", or
- "foo\bar.pyd").
- """
- from distutils.sysconfig import get_config_var
- ext_path = ext_name.split('.')
- ext_suffix = get_config_var('EXT_SUFFIX')
- return os.path.join(*ext_path) + ext_suffix
-
- def get_export_symbols(self, ext):
- """Return the list of symbols that a shared extension has to
- export. This either uses 'ext.export_symbols' or, if it's not
- provided, "PyInit_" + module_name. Only relevant on Windows, where
- the .pyd file (DLL) must export the module "PyInit_" function.
- """
- name = ext.name.split('.')[-1]
- try:
- # Unicode module name support as defined in PEP-489
- # https://www.python.org/dev/peps/pep-0489/#export-hook-name
- name.encode('ascii')
- except UnicodeEncodeError:
- suffix = 'U_' + name.encode('punycode').replace(b'-', b'_').decode('ascii')
- else:
- suffix = "_" + name
-
- initfunc_name = "PyInit" + suffix
- if initfunc_name not in ext.export_symbols:
- ext.export_symbols.append(initfunc_name)
- return ext.export_symbols
-
- def get_libraries(self, ext):
- """Return the list of libraries to link against when building a
- shared extension. On most platforms, this is just 'ext.libraries';
- on Windows, we add the Python library (eg. python20.dll).
- """
- # The python library is always needed on Windows. For MSVC, this
- # is redundant, since the library is mentioned in a pragma in
- # pyconfig.h that MSVC groks. The other Windows compilers all seem
- # to need it mentioned explicitly, though, so that's what we do.
- # Append '_d' to the python import library on debug builds.
- if sys.platform == "win32":
- from distutils._msvccompiler import MSVCCompiler
- if not isinstance(self.compiler, MSVCCompiler):
- template = "python%d%d"
- if self.debug:
- template = template + '_d'
- pythonlib = (template %
- (sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff))
- # don't extend ext.libraries, it may be shared with other
- # extensions, it is a reference to the original list
- return ext.libraries + [pythonlib]
- else:
- # On Android only the main executable and LD_PRELOADs are considered
- # to be RTLD_GLOBAL, all the dependencies of the main executable
- # remain RTLD_LOCAL and so the shared libraries must be linked with
- # libpython when python is built with a shared python library (issue
- # bpo-21536).
- # On Cygwin (and if required, other POSIX-like platforms based on
- # Windows like MinGW) it is simply necessary that all symbols in
- # shared libraries are resolved at link time.
- from distutils.sysconfig import get_config_var
- link_libpython = False
- if get_config_var('Py_ENABLE_SHARED'):
- # A native build on an Android device or on Cygwin
- if hasattr(sys, 'getandroidapilevel'):
- link_libpython = True
- elif sys.platform == 'cygwin':
- link_libpython = True
- elif '_PYTHON_HOST_PLATFORM' in os.environ:
- # We are cross-compiling for one of the relevant platforms
- if get_config_var('ANDROID_API_LEVEL') != 0:
- link_libpython = True
- elif get_config_var('MACHDEP') == 'cygwin':
- link_libpython = True
-
- if link_libpython:
- ldversion = get_config_var('LDVERSION')
- return ext.libraries + ['python' + ldversion]
-
- return ext.libraries + py37compat.pythonlib()
diff --git a/contrib/python/setuptools/py3/setuptools/_distutils/command/build_py.py b/contrib/python/setuptools/py3/setuptools/_distutils/command/build_py.py
deleted file mode 100644
index 7ef9bcefdec..00000000000
--- a/contrib/python/setuptools/py3/setuptools/_distutils/command/build_py.py
+++ /dev/null
@@ -1,392 +0,0 @@
-"""distutils.command.build_py
-
-Implements the Distutils 'build_py' command."""
-
-import os
-import importlib.util
-import sys
-import glob
-
-from distutils.core import Command
-from distutils.errors import *
-from distutils.util import convert_path
-from distutils import log
-
-class build_py (Command):
-
- description = "\"build\" pure Python modules (copy to build directory)"
-
- user_options = [
- ('build-lib=', 'd', "directory to \"build\" (copy) to"),
- ('compile', 'c', "compile .py to .pyc"),
- ('no-compile', None, "don't compile .py files [default]"),
- ('optimize=', 'O',
- "also compile with optimization: -O1 for \"python -O\", "
- "-O2 for \"python -OO\", and -O0 to disable [default: -O0]"),
- ('force', 'f', "forcibly build everything (ignore file timestamps)"),
- ]
-
- boolean_options = ['compile', 'force']
- negative_opt = {'no-compile' : 'compile'}
-
- def initialize_options(self):
- self.build_lib = None
- self.py_modules = None
- self.package = None
- self.package_data = None
- self.package_dir = None
- self.compile = 0
- self.optimize = 0
- self.force = None
-
- def finalize_options(self):
- self.set_undefined_options('build',
- ('build_lib', 'build_lib'),
- ('force', 'force'))
-
- # Get the distribution options that are aliases for build_py
- # options -- list of packages and list of modules.
- self.packages = self.distribution.packages
- self.py_modules = self.distribution.py_modules
- self.package_data = self.distribution.package_data
- self.package_dir = {}
- if self.distribution.package_dir:
- for name, path in self.distribution.package_dir.items():
- self.package_dir[name] = convert_path(path)
- self.data_files = self.get_data_files()
-
- # Ick, copied straight from install_lib.py (fancy_getopt needs a
- # type system! Hell, *everything* needs a type system!!!)
- if not isinstance(self.optimize, int):
- try:
- self.optimize = int(self.optimize)
- assert 0 <= self.optimize <= 2
- except (ValueError, AssertionError):
- raise DistutilsOptionError("optimize must be 0, 1, or 2")
-
- def run(self):
- # XXX copy_file by default preserves atime and mtime. IMHO this is
- # the right thing to do, but perhaps it should be an option -- in
- # particular, a site administrator might want installed files to
- # reflect the time of installation rather than the last
- # modification time before the installed release.
-
- # XXX copy_file by default preserves mode, which appears to be the
- # wrong thing to do: if a file is read-only in the working
- # directory, we want it to be installed read/write so that the next
- # installation of the same module distribution can overwrite it
- # without problems. (This might be a Unix-specific issue.) Thus
- # we turn off 'preserve_mode' when copying to the build directory,
- # since the build directory is supposed to be exactly what the
- # installation will look like (ie. we preserve mode when
- # installing).
-
- # Two options control which modules will be installed: 'packages'
- # and 'py_modules'. The former lets us work with whole packages, not
- # specifying individual modules at all; the latter is for
- # specifying modules one-at-a-time.
-
- if self.py_modules:
- self.build_modules()
- if self.packages:
- self.build_packages()
- self.build_package_data()
-
- self.byte_compile(self.get_outputs(include_bytecode=0))
-
- def get_data_files(self):
- """Generate list of '(package,src_dir,build_dir,filenames)' tuples"""
- data = []
- if not self.packages:
- return data
- for package in self.packages:
- # Locate package source directory
- src_dir = self.get_package_dir(package)
-
- # Compute package build directory
- build_dir = os.path.join(*([self.build_lib] + package.split('.')))
-
- # Length of path to strip from found files
- plen = 0
- if src_dir:
- plen = len(src_dir)+1
-
- # Strip directory from globbed filenames
- filenames = [
- file[plen:] for file in self.find_data_files(package, src_dir)
- ]
- data.append((package, src_dir, build_dir, filenames))
- return data
-
- def find_data_files(self, package, src_dir):
- """Return filenames for package's data files in 'src_dir'"""
- globs = (self.package_data.get('', [])
- + self.package_data.get(package, []))
- files = []
- for pattern in globs:
- # Each pattern has to be converted to a platform-specific path
- filelist = glob.glob(os.path.join(glob.escape(src_dir), convert_path(pattern)))
- # Files that match more than one pattern are only added once
- files.extend([fn for fn in filelist if fn not in files
- and os.path.isfile(fn)])
- return files
-
- def build_package_data(self):
- """Copy data files into build directory"""
- lastdir = None
- for package, src_dir, build_dir, filenames in self.data_files:
- for filename in filenames:
- target = os.path.join(build_dir, filename)
- self.mkpath(os.path.dirname(target))
- self.copy_file(os.path.join(src_dir, filename), target,
- preserve_mode=False)
-
- def get_package_dir(self, package):
- """Return the directory, relative to the top of the source
- distribution, where package 'package' should be found
- (at least according to the 'package_dir' option, if any)."""
- path = package.split('.')
-
- if not self.package_dir:
- if path:
- return os.path.join(*path)
- else:
- return ''
- else:
- tail = []
- while path:
- try:
- pdir = self.package_dir['.'.join(path)]
- except KeyError:
- tail.insert(0, path[-1])
- del path[-1]
- else:
- tail.insert(0, pdir)
- return os.path.join(*tail)
- else:
- # Oops, got all the way through 'path' without finding a
- # match in package_dir. If package_dir defines a directory
- # for the root (nameless) package, then fallback on it;
- # otherwise, we might as well have not consulted
- # package_dir at all, as we just use the directory implied
- # by 'tail' (which should be the same as the original value
- # of 'path' at this point).
- pdir = self.package_dir.get('')
- if pdir is not None:
- tail.insert(0, pdir)
-
- if tail:
- return os.path.join(*tail)
- else:
- return ''
-
- def check_package(self, package, package_dir):
- # Empty dir name means current directory, which we can probably
- # assume exists. Also, os.path.exists and isdir don't know about
- # my "empty string means current dir" convention, so we have to
- # circumvent them.
- if package_dir != "":
- if not os.path.exists(package_dir):
- raise DistutilsFileError(
- "package directory '%s' does not exist" % package_dir)
- if not os.path.isdir(package_dir):
- raise DistutilsFileError(
- "supposed package directory '%s' exists, "
- "but is not a directory" % package_dir)
-
- # Require __init__.py for all but the "root package"
- if package:
- init_py = os.path.join(package_dir, "__init__.py")
- if os.path.isfile(init_py):
- return init_py
- else:
- log.warn(("package init file '%s' not found " +
- "(or not a regular file)"), init_py)
-
- # Either not in a package at all (__init__.py not expected), or
- # __init__.py doesn't exist -- so don't return the filename.
- return None
-
- def check_module(self, module, module_file):
- if not os.path.isfile(module_file):
- log.warn("file %s (for module %s) not found", module_file, module)
- return False
- else:
- return True
-
- def find_package_modules(self, package, package_dir):
- self.check_package(package, package_dir)
- module_files = glob.glob(os.path.join(glob.escape(package_dir), "*.py"))
- modules = []
- setup_script = os.path.abspath(self.distribution.script_name)
-
- for f in module_files:
- abs_f = os.path.abspath(f)
- if abs_f != setup_script:
- module = os.path.splitext(os.path.basename(f))[0]
- modules.append((package, module, f))
- else:
- self.debug_print("excluding %s" % setup_script)
- return modules
-
- def find_modules(self):
- """Finds individually-specified Python modules, ie. those listed by
- module name in 'self.py_modules'. Returns a list of tuples (package,
- module_base, filename): 'package' is a tuple of the path through
- package-space to the module; 'module_base' is the bare (no
- packages, no dots) module name, and 'filename' is the path to the
- ".py" file (relative to the distribution root) that implements the
- module.
- """
- # Map package names to tuples of useful info about the package:
- # (package_dir, checked)
- # package_dir - the directory where we'll find source files for
- # this package
- # checked - true if we have checked that the package directory
- # is valid (exists, contains __init__.py, ... ?)
- packages = {}
-
- # List of (package, module, filename) tuples to return
- modules = []
-
- # We treat modules-in-packages almost the same as toplevel modules,
- # just the "package" for a toplevel is empty (either an empty
- # string or empty list, depending on context). Differences:
- # - don't check for __init__.py in directory for empty package
- for module in self.py_modules:
- path = module.split('.')
- package = '.'.join(path[0:-1])
- module_base = path[-1]
-
- try:
- (package_dir, checked) = packages[package]
- except KeyError:
- package_dir = self.get_package_dir(package)
- checked = 0
-
- if not checked:
- init_py = self.check_package(package, package_dir)
- packages[package] = (package_dir, 1)
- if init_py:
- modules.append((package, "__init__", init_py))
-
- # XXX perhaps we should also check for just .pyc files
- # (so greedy closed-source bastards can distribute Python
- # modules too)
- module_file = os.path.join(package_dir, module_base + ".py")
- if not self.check_module(module, module_file):
- continue
-
- modules.append((package, module_base, module_file))
-
- return modules
-
- def find_all_modules(self):
- """Compute the list of all modules that will be built, whether
- they are specified one-module-at-a-time ('self.py_modules') or
- by whole packages ('self.packages'). Return a list of tuples
- (package, module, module_file), just like 'find_modules()' and
- 'find_package_modules()' do."""
- modules = []
- if self.py_modules:
- modules.extend(self.find_modules())
- if self.packages:
- for package in self.packages:
- package_dir = self.get_package_dir(package)
- m = self.find_package_modules(package, package_dir)
- modules.extend(m)
- return modules
-
- def get_source_files(self):
- return [module[-1] for module in self.find_all_modules()]
-
- def get_module_outfile(self, build_dir, package, module):
- outfile_path = [build_dir] + list(package) + [module + ".py"]
- return os.path.join(*outfile_path)
-
- def get_outputs(self, include_bytecode=1):
- modules = self.find_all_modules()
- outputs = []
- for (package, module, module_file) in modules:
- package = package.split('.')
- filename = self.get_module_outfile(self.build_lib, package, module)
- outputs.append(filename)
- if include_bytecode:
- if self.compile:
- outputs.append(importlib.util.cache_from_source(
- filename, optimization=''))
- if self.optimize > 0:
- outputs.append(importlib.util.cache_from_source(
- filename, optimization=self.optimize))
-
- outputs += [
- os.path.join(build_dir, filename)
- for package, src_dir, build_dir, filenames in self.data_files
- for filename in filenames
- ]
-
- return outputs
-
- def build_module(self, module, module_file, package):
- if isinstance(package, str):
- package = package.split('.')
- elif not isinstance(package, (list, tuple)):
- raise TypeError(
- "'package' must be a string (dot-separated), list, or tuple")
-
- # Now put the module source file into the "build" area -- this is
- # easy, we just copy it somewhere under self.build_lib (the build
- # directory for Python source).
- outfile = self.get_module_outfile(self.build_lib, package, module)
- dir = os.path.dirname(outfile)
- self.mkpath(dir)
- return self.copy_file(module_file, outfile, preserve_mode=0)
-
- def build_modules(self):
- modules = self.find_modules()
- for (package, module, module_file) in modules:
- # Now "build" the module -- ie. copy the source file to
- # self.build_lib (the build directory for Python source).
- # (Actually, it gets copied to the directory for this package
- # under self.build_lib.)
- self.build_module(module, module_file, package)
-
- def build_packages(self):
- for package in self.packages:
- # Get list of (package, module, module_file) tuples based on
- # scanning the package directory. 'package' is only included
- # in the tuple so that 'find_modules()' and
- # 'find_package_tuples()' have a consistent interface; it's
- # ignored here (apart from a sanity check). Also, 'module' is
- # the *unqualified* module name (ie. no dots, no package -- we
- # already know its package!), and 'module_file' is the path to
- # the .py file, relative to the current directory
- # (ie. including 'package_dir').
- package_dir = self.get_package_dir(package)
- modules = self.find_package_modules(package, package_dir)
-
- # Now loop over the modules we found, "building" each one (just
- # copy it to self.build_lib).
- for (package_, module, module_file) in modules:
- assert package == package_
- self.build_module(module, module_file, package)
-
- def byte_compile(self, files):
- if sys.dont_write_bytecode:
- self.warn('byte-compiling is disabled, skipping.')
- return
-
- from distutils.util import byte_compile
- prefix = self.build_lib
- if prefix[-1] != os.sep:
- prefix = prefix + os.sep
-
- # XXX this code is essentially the same as the 'byte_compile()
- # method of the "install_lib" command, except for the determination
- # of the 'prefix' string. Hmmm.
- if self.compile:
- byte_compile(files, optimize=0,
- force=self.force, prefix=prefix, dry_run=self.dry_run)
- if self.optimize > 0:
- byte_compile(files, optimize=self.optimize,
- force=self.force, prefix=prefix, dry_run=self.dry_run)
diff --git a/contrib/python/setuptools/py3/setuptools/_distutils/command/build_scripts.py b/contrib/python/setuptools/py3/setuptools/_distutils/command/build_scripts.py
deleted file mode 100644
index e3312cf0caa..00000000000
--- a/contrib/python/setuptools/py3/setuptools/_distutils/command/build_scripts.py
+++ /dev/null
@@ -1,152 +0,0 @@
-"""distutils.command.build_scripts
-
-Implements the Distutils 'build_scripts' command."""
-
-import os, re
-from stat import ST_MODE
-from distutils import sysconfig
-from distutils.core import Command
-from distutils.dep_util import newer
-from distutils.util import convert_path
-from distutils import log
-import tokenize
-
-# check if Python is called on the first line with this expression
-first_line_re = re.compile(b'^#!.*python[0-9.]*([ \t].*)?$')
-
-class build_scripts(Command):
-
- description = "\"build\" scripts (copy and fixup #! line)"
-
- user_options = [
- ('build-dir=', 'd', "directory to \"build\" (copy) to"),
- ('force', 'f', "forcibly build everything (ignore file timestamps"),
- ('executable=', 'e', "specify final destination interpreter path"),
- ]
-
- boolean_options = ['force']
-
-
- def initialize_options(self):
- self.build_dir = None
- self.scripts = None
- self.force = None
- self.executable = None
- self.outfiles = None
-
- def finalize_options(self):
- self.set_undefined_options('build',
- ('build_scripts', 'build_dir'),
- ('force', 'force'),
- ('executable', 'executable'))
- self.scripts = self.distribution.scripts
-
- def get_source_files(self):
- return self.scripts
-
- def run(self):
- if not self.scripts:
- return
- self.copy_scripts()
-
-
- def copy_scripts(self):
- r"""Copy each script listed in 'self.scripts'; if it's marked as a
- Python script in the Unix way (first line matches 'first_line_re',
- ie. starts with "\#!" and contains "python"), then adjust the first
- line to refer to the current Python interpreter as we copy.
- """
- self.mkpath(self.build_dir)
- outfiles = []
- updated_files = []
- for script in self.scripts:
- adjust = False
- script = convert_path(script)
- outfile = os.path.join(self.build_dir, os.path.basename(script))
- outfiles.append(outfile)
-
- if not self.force and not newer(script, outfile):
- log.debug("not copying %s (up-to-date)", script)
- continue
-
- # Always open the file, but ignore failures in dry-run mode --
- # that way, we'll get accurate feedback if we can read the
- # script.
- try:
- f = open(script, "rb")
- except OSError:
- if not self.dry_run:
- raise
- f = None
- else:
- encoding, lines = tokenize.detect_encoding(f.readline)
- f.seek(0)
- first_line = f.readline()
- if not first_line:
- self.warn("%s is an empty file (skipping)" % script)
- continue
-
- match = first_line_re.match(first_line)
- if match:
- adjust = True
- post_interp = match.group(1) or b''
-
- if adjust:
- log.info("copying and adjusting %s -> %s", script,
- self.build_dir)
- updated_files.append(outfile)
- if not self.dry_run:
- if not sysconfig.python_build:
- executable = self.executable
- else:
- executable = os.path.join(
- sysconfig.get_config_var("BINDIR"),
- "python%s%s" % (sysconfig.get_config_var("VERSION"),
- sysconfig.get_config_var("EXE")))
- executable = os.fsencode(executable)
- shebang = b"#!" + executable + post_interp + b"\n"
- # Python parser starts to read a script using UTF-8 until
- # it gets a #coding:xxx cookie. The shebang has to be the
- # first line of a file, the #coding:xxx cookie cannot be
- # written before. So the shebang has to be decodable from
- # UTF-8.
- try:
- shebang.decode('utf-8')
- except UnicodeDecodeError:
- raise ValueError(
- "The shebang ({!r}) is not decodable "
- "from utf-8".format(shebang))
- # If the script is encoded to a custom encoding (use a
- # #coding:xxx cookie), the shebang has to be decodable from
- # the script encoding too.
- try:
- shebang.decode(encoding)
- except UnicodeDecodeError:
- raise ValueError(
- "The shebang ({!r}) is not decodable "
- "from the script encoding ({})"
- .format(shebang, encoding))
- with open(outfile, "wb") as outf:
- outf.write(shebang)
- outf.writelines(f.readlines())
- if f:
- f.close()
- else:
- if f:
- f.close()
- updated_files.append(outfile)
- self.copy_file(script, outfile)
-
- if os.name == 'posix':
- for file in outfiles:
- if self.dry_run:
- log.info("changing mode of %s", file)
- else:
- oldmode = os.stat(file)[ST_MODE] & 0o7777
- newmode = (oldmode | 0o555) & 0o7777
- if newmode != oldmode:
- log.info("changing mode of %s from %o to %o",
- file, oldmode, newmode)
- os.chmod(file, newmode)
- # XXX should we modify self.outfiles?
- return outfiles, updated_files
diff --git a/contrib/python/setuptools/py3/setuptools/_distutils/command/check.py b/contrib/python/setuptools/py3/setuptools/_distutils/command/check.py
deleted file mode 100644
index ada25006467..00000000000
--- a/contrib/python/setuptools/py3/setuptools/_distutils/command/check.py
+++ /dev/null
@@ -1,148 +0,0 @@
-"""distutils.command.check
-
-Implements the Distutils 'check' command.
-"""
-from distutils.core import Command
-from distutils.errors import DistutilsSetupError
-
-try:
- # docutils is installed
- from docutils.utils import Reporter
- from docutils.parsers.rst import Parser
- from docutils import frontend
- from docutils import nodes
-
- class SilentReporter(Reporter):
-
- def __init__(self, source, report_level, halt_level, stream=None,
- debug=0, encoding='ascii', error_handler='replace'):
- self.messages = []
- Reporter.__init__(self, source, report_level, halt_level, stream,
- debug, encoding, error_handler)
-
- def system_message(self, level, message, *children, **kwargs):
- self.messages.append((level, message, children, kwargs))
- return nodes.system_message(message, level=level,
- type=self.levels[level],
- *children, **kwargs)
-
- HAS_DOCUTILS = True
-except Exception:
- # Catch all exceptions because exceptions besides ImportError probably
- # indicate that docutils is not ported to Py3k.
- HAS_DOCUTILS = False
-
-class check(Command):
- """This command checks the meta-data of the package.
- """
- description = ("perform some checks on the package")
- user_options = [('metadata', 'm', 'Verify meta-data'),
- ('restructuredtext', 'r',
- ('Checks if long string meta-data syntax '
- 'are reStructuredText-compliant')),
- ('strict', 's',
- 'Will exit with an error if a check fails')]
-
- boolean_options = ['metadata', 'restructuredtext', 'strict']
-
- def initialize_options(self):
- """Sets default values for options."""
- self.restructuredtext = 0
- self.metadata = 1
- self.strict = 0
- self._warnings = 0
-
- def finalize_options(self):
- pass
-
- def warn(self, msg):
- """Counts the number of warnings that occurs."""
- self._warnings += 1
- return Command.warn(self, msg)
-
- def run(self):
- """Runs the command."""
- # perform the various tests
- if self.metadata:
- self.check_metadata()
- if self.restructuredtext:
- if HAS_DOCUTILS:
- self.check_restructuredtext()
- elif self.strict:
- raise DistutilsSetupError('The docutils package is needed.')
-
- # let's raise an error in strict mode, if we have at least
- # one warning
- if self.strict and self._warnings > 0:
- raise DistutilsSetupError('Please correct your package.')
-
- def check_metadata(self):
- """Ensures that all required elements of meta-data are supplied.
-
- Required fields:
- name, version, URL
-
- Recommended fields:
- (author and author_email) or (maintainer and maintainer_email))
-
- Warns if any are missing.
- """
- metadata = self.distribution.metadata
-
- missing = []
- for attr in ('name', 'version', 'url'):
- if not (hasattr(metadata, attr) and getattr(metadata, attr)):
- missing.append(attr)
-
- if missing:
- self.warn("missing required meta-data: %s" % ', '.join(missing))
- if metadata.author:
- if not metadata.author_email:
- self.warn("missing meta-data: if 'author' supplied, " +
- "'author_email' should be supplied too")
- elif metadata.maintainer:
- if not metadata.maintainer_email:
- self.warn("missing meta-data: if 'maintainer' supplied, " +
- "'maintainer_email' should be supplied too")
- else:
- self.warn("missing meta-data: either (author and author_email) " +
- "or (maintainer and maintainer_email) " +
- "should be supplied")
-
- def check_restructuredtext(self):
- """Checks if the long string fields are reST-compliant."""
- data = self.distribution.get_long_description()
- for warning in self._check_rst_data(data):
- line = warning[-1].get('line')
- if line is None:
- warning = warning[1]
- else:
- warning = '%s (line %s)' % (warning[1], line)
- self.warn(warning)
-
- def _check_rst_data(self, data):
- """Returns warnings when the provided data doesn't compile."""
- # the include and csv_table directives need this to be a path
- source_path = self.distribution.script_name or 'setup.py'
- parser = Parser()
- settings = frontend.OptionParser(components=(Parser,)).get_default_values()
- settings.tab_width = 4
- settings.pep_references = None
- settings.rfc_references = None
- reporter = SilentReporter(source_path,
- settings.report_level,
- settings.halt_level,
- stream=settings.warning_stream,
- debug=settings.debug,
- encoding=settings.error_encoding,
- error_handler=settings.error_encoding_error_handler)
-
- document = nodes.document(settings, reporter, source=source_path)
- document.note_source(source_path, -1)
- try:
- parser.parse(data, document)
- except AttributeError as e:
- reporter.messages.append(
- (-1, 'Could not finish the parsing: %s.' % e, '', {}))
-
- return reporter.messages
diff --git a/contrib/python/setuptools/py3/setuptools/_distutils/command/clean.py b/contrib/python/setuptools/py3/setuptools/_distutils/command/clean.py
deleted file mode 100644
index 0cb27016621..00000000000
--- a/contrib/python/setuptools/py3/setuptools/_distutils/command/clean.py
+++ /dev/null
@@ -1,76 +0,0 @@
-"""distutils.command.clean
-
-Implements the Distutils 'clean' command."""
-
-# contributed by Bastian Kleineidam <[email protected]>, added 2000-03-18
-
-import os
-from distutils.core import Command
-from distutils.dir_util import remove_tree
-from distutils import log
-
-class clean(Command):
-
- description = "clean up temporary files from 'build' command"
- user_options = [
- ('build-base=', 'b',
- "base build directory (default: 'build.build-base')"),
- ('build-lib=', None,
- "build directory for all modules (default: 'build.build-lib')"),
- ('build-temp=', 't',
- "temporary build directory (default: 'build.build-temp')"),
- ('build-scripts=', None,
- "build directory for scripts (default: 'build.build-scripts')"),
- ('bdist-base=', None,
- "temporary directory for built distributions"),
- ('all', 'a',
- "remove all build output, not just temporary by-products")
- ]
-
- boolean_options = ['all']
-
- def initialize_options(self):
- self.build_base = None
- self.build_lib = None
- self.build_temp = None
- self.build_scripts = None
- self.bdist_base = None
- self.all = None
-
- def finalize_options(self):
- self.set_undefined_options('build',
- ('build_base', 'build_base'),
- ('build_lib', 'build_lib'),
- ('build_scripts', 'build_scripts'),
- ('build_temp', 'build_temp'))
- self.set_undefined_options('bdist',
- ('bdist_base', 'bdist_base'))
-
- def run(self):
- # remove the build/temp.<plat> directory (unless it's already
- # gone)
- if os.path.exists(self.build_temp):
- remove_tree(self.build_temp, dry_run=self.dry_run)
- else:
- log.debug("'%s' does not exist -- can't clean it",
- self.build_temp)
-
- if self.all:
- # remove build directories
- for directory in (self.build_lib,
- self.bdist_base,
- self.build_scripts):
- if os.path.exists(directory):
- remove_tree(directory, dry_run=self.dry_run)
- else:
- log.warn("'%s' does not exist -- can't clean it",
- directory)
-
- # just for the heck of it, try to remove the base build directory:
- # we might have emptied it right now, but if not we don't care
- if not self.dry_run:
- try:
- os.rmdir(self.build_base)
- log.info("removing '%s'", self.build_base)
- except OSError:
- pass
diff --git a/contrib/python/setuptools/py3/setuptools/_distutils/command/config.py b/contrib/python/setuptools/py3/setuptools/_distutils/command/config.py
deleted file mode 100644
index aeda408e731..00000000000
--- a/contrib/python/setuptools/py3/setuptools/_distutils/command/config.py
+++ /dev/null
@@ -1,344 +0,0 @@
-"""distutils.command.config
-
-Implements the Distutils 'config' command, a (mostly) empty command class
-that exists mainly to be sub-classed by specific module distributions and
-applications. The idea is that while every "config" command is different,
-at least they're all named the same, and users always see "config" in the
-list of standard commands. Also, this is a good place to put common
-configure-like tasks: "try to compile this C code", or "figure out where
-this header file lives".
-"""
-
-import os, re
-
-from distutils.core import Command
-from distutils.errors import DistutilsExecError
-from distutils.sysconfig import customize_compiler
-from distutils import log
-
-LANG_EXT = {"c": ".c", "c++": ".cxx"}
-
-class config(Command):
-
- description = "prepare to build"
-
- user_options = [
- ('compiler=', None,
- "specify the compiler type"),
- ('cc=', None,
- "specify the compiler executable"),
- ('include-dirs=', 'I',
- "list of directories to search for header files"),
- ('define=', 'D',
- "C preprocessor macros to define"),
- ('undef=', 'U',
- "C preprocessor macros to undefine"),
- ('libraries=', 'l',
- "external C libraries to link with"),
- ('library-dirs=', 'L',
- "directories to search for external C libraries"),
-
- ('noisy', None,
- "show every action (compile, link, run, ...) taken"),
- ('dump-source', None,
- "dump generated source files before attempting to compile them"),
- ]
-
-
- # The three standard command methods: since the "config" command
- # does nothing by default, these are empty.
-
- def initialize_options(self):
- self.compiler = None
- self.cc = None
- self.include_dirs = None
- self.libraries = None
- self.library_dirs = None
-
- # maximal output for now
- self.noisy = 1
- self.dump_source = 1
-
- # list of temporary files generated along-the-way that we have
- # to clean at some point
- self.temp_files = []
-
- def finalize_options(self):
- if self.include_dirs is None:
- self.include_dirs = self.distribution.include_dirs or []
- elif isinstance(self.include_dirs, str):
- self.include_dirs = self.include_dirs.split(os.pathsep)
-
- if self.libraries is None:
- self.libraries = []
- elif isinstance(self.libraries, str):
- self.libraries = [self.libraries]
-
- if self.library_dirs is None:
- self.library_dirs = []
- elif isinstance(self.library_dirs, str):
- self.library_dirs = self.library_dirs.split(os.pathsep)
-
- def run(self):
- pass
-
- # Utility methods for actual "config" commands. The interfaces are
- # loosely based on Autoconf macros of similar names. Sub-classes
- # may use these freely.
-
- def _check_compiler(self):
- """Check that 'self.compiler' really is a CCompiler object;
- if not, make it one.
- """
- # We do this late, and only on-demand, because this is an expensive
- # import.
- from distutils.ccompiler import CCompiler, new_compiler
- if not isinstance(self.compiler, CCompiler):
- self.compiler = new_compiler(compiler=self.compiler,
- dry_run=self.dry_run, force=1)
- customize_compiler(self.compiler)
- if self.include_dirs:
- self.compiler.set_include_dirs(self.include_dirs)
- if self.libraries:
- self.compiler.set_libraries(self.libraries)
- if self.library_dirs:
- self.compiler.set_library_dirs(self.library_dirs)
-
- def _gen_temp_sourcefile(self, body, headers, lang):
- filename = "_configtest" + LANG_EXT[lang]
- with open(filename, "w") as file:
- if headers:
- for header in headers:
- file.write("#include <%s>\n" % header)
- file.write("\n")
- file.write(body)
- if body[-1] != "\n":
- file.write("\n")
- return filename
-
- def _preprocess(self, body, headers, include_dirs, lang):
- src = self._gen_temp_sourcefile(body, headers, lang)
- out = "_configtest.i"
- self.temp_files.extend([src, out])
- self.compiler.preprocess(src, out, include_dirs=include_dirs)
- return (src, out)
-
- def _compile(self, body, headers, include_dirs, lang):
- src = self._gen_temp_sourcefile(body, headers, lang)
- if self.dump_source:
- dump_file(src, "compiling '%s':" % src)
- (obj,) = self.compiler.object_filenames([src])
- self.temp_files.extend([src, obj])
- self.compiler.compile([src], include_dirs=include_dirs)
- return (src, obj)
-
- def _link(self, body, headers, include_dirs, libraries, library_dirs,
- lang):
- (src, obj) = self._compile(body, headers, include_dirs, lang)
- prog = os.path.splitext(os.path.basename(src))[0]
- self.compiler.link_executable([obj], prog,
- libraries=libraries,
- library_dirs=library_dirs,
- target_lang=lang)
-
- if self.compiler.exe_extension is not None:
- prog = prog + self.compiler.exe_extension
- self.temp_files.append(prog)
-
- return (src, obj, prog)
-
- def _clean(self, *filenames):
- if not filenames:
- filenames = self.temp_files
- self.temp_files = []
- log.info("removing: %s", ' '.join(filenames))
- for filename in filenames:
- try:
- os.remove(filename)
- except OSError:
- pass
-
-
- # XXX these ignore the dry-run flag: what to do, what to do? even if
- # you want a dry-run build, you still need some sort of configuration
- # info. My inclination is to make it up to the real config command to
- # consult 'dry_run', and assume a default (minimal) configuration if
- # true. The problem with trying to do it here is that you'd have to
- # return either true or false from all the 'try' methods, neither of
- # which is correct.
-
- # XXX need access to the header search path and maybe default macros.
-
- def try_cpp(self, body=None, headers=None, include_dirs=None, lang="c"):
- """Construct a source file from 'body' (a string containing lines
- of C/C++ code) and 'headers' (a list of header files to include)
- and run it through the preprocessor. Return true if the
- preprocessor succeeded, false if there were any errors.
- ('body' probably isn't of much use, but what the heck.)
- """
- from distutils.ccompiler import CompileError
- self._check_compiler()
- ok = True
- try:
- self._preprocess(body, headers, include_dirs, lang)
- except CompileError:
- ok = False
-
- self._clean()
- return ok
-
- def search_cpp(self, pattern, body=None, headers=None, include_dirs=None,
- lang="c"):
- """Construct a source file (just like 'try_cpp()'), run it through
- the preprocessor, and return true if any line of the output matches
- 'pattern'. 'pattern' should either be a compiled regex object or a
- string containing a regex. If both 'body' and 'headers' are None,
- preprocesses an empty file -- which can be useful to determine the
- symbols the preprocessor and compiler set by default.
- """
- self._check_compiler()
- src, out = self._preprocess(body, headers, include_dirs, lang)
-
- if isinstance(pattern, str):
- pattern = re.compile(pattern)
-
- with open(out) as file:
- match = False
- while True:
- line = file.readline()
- if line == '':
- break
- if pattern.search(line):
- match = True
- break
-
- self._clean()
- return match
-
- def try_compile(self, body, headers=None, include_dirs=None, lang="c"):
- """Try to compile a source file built from 'body' and 'headers'.
- Return true on success, false otherwise.
- """
- from distutils.ccompiler import CompileError
- self._check_compiler()
- try:
- self._compile(body, headers, include_dirs, lang)
- ok = True
- except CompileError:
- ok = False
-
- log.info(ok and "success!" or "failure.")
- self._clean()
- return ok
-
- def try_link(self, body, headers=None, include_dirs=None, libraries=None,
- library_dirs=None, lang="c"):
- """Try to compile and link a source file, built from 'body' and
- 'headers', to executable form. Return true on success, false
- otherwise.
- """
- from distutils.ccompiler import CompileError, LinkError
- self._check_compiler()
- try:
- self._link(body, headers, include_dirs,
- libraries, library_dirs, lang)
- ok = True
- except (CompileError, LinkError):
- ok = False
-
- log.info(ok and "success!" or "failure.")
- self._clean()
- return ok
-
- def try_run(self, body, headers=None, include_dirs=None, libraries=None,
- library_dirs=None, lang="c"):
- """Try to compile, link to an executable, and run a program
- built from 'body' and 'headers'. Return true on success, false
- otherwise.
- """
- from distutils.ccompiler import CompileError, LinkError
- self._check_compiler()
- try:
- src, obj, exe = self._link(body, headers, include_dirs,
- libraries, library_dirs, lang)
- self.spawn([exe])
- ok = True
- except (CompileError, LinkError, DistutilsExecError):
- ok = False
-
- log.info(ok and "success!" or "failure.")
- self._clean()
- return ok
-
-
- # -- High-level methods --------------------------------------------
- # (these are the ones that are actually likely to be useful
- # when implementing a real-world config command!)
-
- def check_func(self, func, headers=None, include_dirs=None,
- libraries=None, library_dirs=None, decl=0, call=0):
- """Determine if function 'func' is available by constructing a
- source file that refers to 'func', and compiles and links it.
- If everything succeeds, returns true; otherwise returns false.
-
- The constructed source file starts out by including the header
- files listed in 'headers'. If 'decl' is true, it then declares
- 'func' (as "int func()"); you probably shouldn't supply 'headers'
- and set 'decl' true in the same call, or you might get errors about
- a conflicting declarations for 'func'. Finally, the constructed
- 'main()' function either references 'func' or (if 'call' is true)
- calls it. 'libraries' and 'library_dirs' are used when
- linking.
- """
- self._check_compiler()
- body = []
- if decl:
- body.append("int %s ();" % func)
- body.append("int main () {")
- if call:
- body.append(" %s();" % func)
- else:
- body.append(" %s;" % func)
- body.append("}")
- body = "\n".join(body) + "\n"
-
- return self.try_link(body, headers, include_dirs,
- libraries, library_dirs)
-
- def check_lib(self, library, library_dirs=None, headers=None,
- include_dirs=None, other_libraries=[]):
- """Determine if 'library' is available to be linked against,
- without actually checking that any particular symbols are provided
- by it. 'headers' will be used in constructing the source file to
- be compiled, but the only effect of this is to check if all the
- header files listed are available. Any libraries listed in
- 'other_libraries' will be included in the link, in case 'library'
- has symbols that depend on other libraries.
- """
- self._check_compiler()
- return self.try_link("int main (void) { }", headers, include_dirs,
- [library] + other_libraries, library_dirs)
-
- def check_header(self, header, include_dirs=None, library_dirs=None,
- lang="c"):
- """Determine if the system header file named by 'header_file'
- exists and can be found by the preprocessor; return true if so,
- false otherwise.
- """
- return self.try_cpp(body="/* No body */", headers=[header],
- include_dirs=include_dirs)
-
-def dump_file(filename, head=None):
- """Dumps a file content into log.info.
-
- If head is not None, will be dumped before the file content.
- """
- if head is None:
- log.info('%s', filename)
- else:
- log.info(head)
- file = open(filename)
- try:
- log.info(file.read())
- finally:
- file.close()
diff --git a/contrib/python/setuptools/py3/setuptools/_distutils/command/install.py b/contrib/python/setuptools/py3/setuptools/_distutils/command/install.py
deleted file mode 100644
index 18b352fac06..00000000000
--- a/contrib/python/setuptools/py3/setuptools/_distutils/command/install.py
+++ /dev/null
@@ -1,721 +0,0 @@
-"""distutils.command.install
-
-Implements the Distutils 'install' command."""
-
-import sys
-import os
-import contextlib
-import sysconfig
-import itertools
-
-from distutils import log
-from distutils.core import Command
-from distutils.debug import DEBUG
-from distutils.sysconfig import get_config_vars
-from distutils.errors import DistutilsPlatformError
-from distutils.file_util import write_file
-from distutils.util import convert_path, subst_vars, change_root
-from distutils.util import get_platform
-from distutils.errors import DistutilsOptionError
-
-from site import USER_BASE
-from site import USER_SITE
-HAS_USER_SITE = True
-
-WINDOWS_SCHEME = {
- 'purelib': '{base}/Lib/site-packages',
- 'platlib': '{base}/Lib/site-packages',
- 'headers': '{base}/Include/{dist_name}',
- 'scripts': '{base}/Scripts',
- 'data' : '{base}',
-}
-
-INSTALL_SCHEMES = {
- 'posix_prefix': {
- 'purelib': '{base}/lib/{implementation_lower}{py_version_short}/site-packages',
- 'platlib': '{platbase}/{platlibdir}/{implementation_lower}{py_version_short}/site-packages',
- 'headers': '{base}/include/{implementation_lower}{py_version_short}{abiflags}/{dist_name}',
- 'scripts': '{base}/bin',
- 'data' : '{base}',
- },
- 'posix_home': {
- 'purelib': '{base}/lib/{implementation_lower}',
- 'platlib': '{base}/{platlibdir}/{implementation_lower}',
- 'headers': '{base}/include/{implementation_lower}/{dist_name}',
- 'scripts': '{base}/bin',
- 'data' : '{base}',
- },
- 'nt': WINDOWS_SCHEME,
- 'pypy': {
- 'purelib': '{base}/site-packages',
- 'platlib': '{base}/site-packages',
- 'headers': '{base}/include/{dist_name}',
- 'scripts': '{base}/bin',
- 'data' : '{base}',
- },
- 'pypy_nt': {
- 'purelib': '{base}/site-packages',
- 'platlib': '{base}/site-packages',
- 'headers': '{base}/include/{dist_name}',
- 'scripts': '{base}/Scripts',
- 'data' : '{base}',
- },
- }
-
-# user site schemes
-if HAS_USER_SITE:
- INSTALL_SCHEMES['nt_user'] = {
- 'purelib': '{usersite}',
- 'platlib': '{usersite}',
- 'headers': '{userbase}/{implementation}{py_version_nodot}/Include/{dist_name}',
- 'scripts': '{userbase}/{implementation}{py_version_nodot}/Scripts',
- 'data' : '{userbase}',
- }
-
- INSTALL_SCHEMES['posix_user'] = {
- 'purelib': '{usersite}',
- 'platlib': '{usersite}',
- 'headers':
- '{userbase}/include/{implementation_lower}{py_version_short}{abiflags}/{dist_name}',
- 'scripts': '{userbase}/bin',
- 'data' : '{userbase}',
- }
-
-# The keys to an installation scheme; if any new types of files are to be
-# installed, be sure to add an entry to every installation scheme above,
-# and to SCHEME_KEYS here.
-SCHEME_KEYS = ('purelib', 'platlib', 'headers', 'scripts', 'data')
-
-
-def _load_sysconfig_schemes():
- with contextlib.suppress(AttributeError):
- return {
- scheme: sysconfig.get_paths(scheme, expand=False)
- for scheme in sysconfig.get_scheme_names()
- }
-
-
-def _load_schemes():
- """
- Extend default schemes with schemes from sysconfig.
- """
-
- sysconfig_schemes = _load_sysconfig_schemes() or {}
-
- return {
- scheme: {
- **INSTALL_SCHEMES.get(scheme, {}),
- **sysconfig_schemes.get(scheme, {}),
- }
- for scheme in set(itertools.chain(INSTALL_SCHEMES, sysconfig_schemes))
- }
-
-
-def _get_implementation():
- if hasattr(sys, 'pypy_version_info'):
- return 'PyPy'
- else:
- return 'Python'
-
-
-class install(Command):
-
- description = "install everything from build directory"
-
- user_options = [
- # Select installation scheme and set base director(y|ies)
- ('prefix=', None,
- "installation prefix"),
- ('exec-prefix=', None,
- "(Unix only) prefix for platform-specific files"),
- ('home=', None,
- "(Unix only) home directory to install under"),
-
- # Or, just set the base director(y|ies)
- ('install-base=', None,
- "base installation directory (instead of --prefix or --home)"),
- ('install-platbase=', None,
- "base installation directory for platform-specific files " +
- "(instead of --exec-prefix or --home)"),
- ('root=', None,
- "install everything relative to this alternate root directory"),
-
- # Or, explicitly set the installation scheme
- ('install-purelib=', None,
- "installation directory for pure Python module distributions"),
- ('install-platlib=', None,
- "installation directory for non-pure module distributions"),
- ('install-lib=', None,
- "installation directory for all module distributions " +
- "(overrides --install-purelib and --install-platlib)"),
-
- ('install-headers=', None,
- "installation directory for C/C++ headers"),
- ('install-scripts=', None,
- "installation directory for Python scripts"),
- ('install-data=', None,
- "installation directory for data files"),
-
- # Byte-compilation options -- see install_lib.py for details, as
- # these are duplicated from there (but only install_lib does
- # anything with them).
- ('compile', 'c', "compile .py to .pyc [default]"),
- ('no-compile', None, "don't compile .py files"),
- ('optimize=', 'O',
- "also compile with optimization: -O1 for \"python -O\", "
- "-O2 for \"python -OO\", and -O0 to disable [default: -O0]"),
-
- # Miscellaneous control options
- ('force', 'f',
- "force installation (overwrite any existing files)"),
- ('skip-build', None,
- "skip rebuilding everything (for testing/debugging)"),
-
- # Where to install documentation (eventually!)
- #('doc-format=', None, "format of documentation to generate"),
- #('install-man=', None, "directory for Unix man pages"),
- #('install-html=', None, "directory for HTML documentation"),
- #('install-info=', None, "directory for GNU info files"),
-
- ('record=', None,
- "filename in which to record list of installed files"),
- ]
-
- boolean_options = ['compile', 'force', 'skip-build']
-
- if HAS_USER_SITE:
- user_options.append(('user', None,
- "install in user site-package '%s'" % USER_SITE))
- boolean_options.append('user')
-
- negative_opt = {'no-compile' : 'compile'}
-
-
- def initialize_options(self):
- """Initializes options."""
- # High-level options: these select both an installation base
- # and scheme.
- self.prefix = None
- self.exec_prefix = None
- self.home = None
- self.user = 0
-
- # These select only the installation base; it's up to the user to
- # specify the installation scheme (currently, that means supplying
- # the --install-{platlib,purelib,scripts,data} options).
- self.install_base = None
- self.install_platbase = None
- self.root = None
-
- # These options are the actual installation directories; if not
- # supplied by the user, they are filled in using the installation
- # scheme implied by prefix/exec-prefix/home and the contents of
- # that installation scheme.
- self.install_purelib = None # for pure module distributions
- self.install_platlib = None # non-pure (dists w/ extensions)
- self.install_headers = None # for C/C++ headers
- self.install_lib = None # set to either purelib or platlib
- self.install_scripts = None
- self.install_data = None
- self.install_userbase = USER_BASE
- self.install_usersite = USER_SITE
-
- self.compile = None
- self.optimize = None
-
- # Deprecated
- # These two are for putting non-packagized distributions into their
- # own directory and creating a .pth file if it makes sense.
- # 'extra_path' comes from the setup file; 'install_path_file' can
- # be turned off if it makes no sense to install a .pth file. (But
- # better to install it uselessly than to guess wrong and not
- # install it when it's necessary and would be used!) Currently,
- # 'install_path_file' is always true unless some outsider meddles
- # with it.
- self.extra_path = None
- self.install_path_file = 1
-
- # 'force' forces installation, even if target files are not
- # out-of-date. 'skip_build' skips running the "build" command,
- # handy if you know it's not necessary. 'warn_dir' (which is *not*
- # a user option, it's just there so the bdist_* commands can turn
- # it off) determines whether we warn about installing to a
- # directory not in sys.path.
- self.force = 0
- self.skip_build = 0
- self.warn_dir = 1
-
- # These are only here as a conduit from the 'build' command to the
- # 'install_*' commands that do the real work. ('build_base' isn't
- # actually used anywhere, but it might be useful in future.) They
- # are not user options, because if the user told the install
- # command where the build directory is, that wouldn't affect the
- # build command.
- self.build_base = None
- self.build_lib = None
-
- # Not defined yet because we don't know anything about
- # documentation yet.
- #self.install_man = None
- #self.install_html = None
- #self.install_info = None
-
- self.record = None
-
-
- # -- Option finalizing methods -------------------------------------
- # (This is rather more involved than for most commands,
- # because this is where the policy for installing third-
- # party Python modules on various platforms given a wide
- # array of user input is decided. Yes, it's quite complex!)
-
- def finalize_options(self):
- """Finalizes options."""
- # This method (and its helpers, like 'finalize_unix()',
- # 'finalize_other()', and 'select_scheme()') is where the default
- # installation directories for modules, extension modules, and
- # anything else we care to install from a Python module
- # distribution. Thus, this code makes a pretty important policy
- # statement about how third-party stuff is added to a Python
- # installation! Note that the actual work of installation is done
- # by the relatively simple 'install_*' commands; they just take
- # their orders from the installation directory options determined
- # here.
-
- # Check for errors/inconsistencies in the options; first, stuff
- # that's wrong on any platform.
-
- if ((self.prefix or self.exec_prefix or self.home) and
- (self.install_base or self.install_platbase)):
- raise DistutilsOptionError(
- "must supply either prefix/exec-prefix/home or " +
- "install-base/install-platbase -- not both")
-
- if self.home and (self.prefix or self.exec_prefix):
- raise DistutilsOptionError(
- "must supply either home or prefix/exec-prefix -- not both")
-
- if self.user and (self.prefix or self.exec_prefix or self.home or
- self.install_base or self.install_platbase):
- raise DistutilsOptionError("can't combine user with prefix, "
- "exec_prefix/home, or install_(plat)base")
-
- # Next, stuff that's wrong (or dubious) only on certain platforms.
- if os.name != "posix":
- if self.exec_prefix:
- self.warn("exec-prefix option ignored on this platform")
- self.exec_prefix = None
-
- # Now the interesting logic -- so interesting that we farm it out
- # to other methods. The goal of these methods is to set the final
- # values for the install_{lib,scripts,data,...} options, using as
- # input a heady brew of prefix, exec_prefix, home, install_base,
- # install_platbase, user-supplied versions of
- # install_{purelib,platlib,lib,scripts,data,...}, and the
- # install schemes. Phew!
-
- self.dump_dirs("pre-finalize_{unix,other}")
-
- if os.name == 'posix':
- self.finalize_unix()
- else:
- self.finalize_other()
-
- self.dump_dirs("post-finalize_{unix,other}()")
-
- # Expand configuration variables, tilde, etc. in self.install_base
- # and self.install_platbase -- that way, we can use $base or
- # $platbase in the other installation directories and not worry
- # about needing recursive variable expansion (shudder).
-
- py_version = sys.version.split()[0]
- (prefix, exec_prefix) = get_config_vars('prefix', 'exec_prefix')
- try:
- abiflags = sys.abiflags
- except AttributeError:
- # sys.abiflags may not be defined on all platforms.
- abiflags = ''
- self.config_vars = {'dist_name': self.distribution.get_name(),
- 'dist_version': self.distribution.get_version(),
- 'dist_fullname': self.distribution.get_fullname(),
- 'py_version': py_version,
- 'py_version_short': '%d.%d' % sys.version_info[:2],
- 'py_version_nodot': '%d%d' % sys.version_info[:2],
- 'sys_prefix': prefix,
- 'prefix': prefix,
- 'sys_exec_prefix': exec_prefix,
- 'exec_prefix': exec_prefix,
- 'abiflags': abiflags,
- 'platlibdir': getattr(sys, 'platlibdir', 'lib'),
- 'implementation_lower': _get_implementation().lower(),
- 'implementation': _get_implementation(),
- }
-
- if HAS_USER_SITE:
- self.config_vars['userbase'] = self.install_userbase
- self.config_vars['usersite'] = self.install_usersite
-
- self.expand_basedirs()
-
- self.dump_dirs("post-expand_basedirs()")
-
- # Now define config vars for the base directories so we can expand
- # everything else.
- self.config_vars['base'] = self.install_base
- self.config_vars['platbase'] = self.install_platbase
- self.config_vars['installed_base'] = (
- sysconfig.get_config_vars()['installed_base'])
-
- if DEBUG:
- from pprint import pprint
- print("config vars:")
- pprint(self.config_vars)
-
- # Expand "~" and configuration variables in the installation
- # directories.
- self.expand_dirs()
-
- self.dump_dirs("post-expand_dirs()")
-
- # Create directories in the home dir:
- if self.user:
- self.create_home_path()
-
- # Pick the actual directory to install all modules to: either
- # install_purelib or install_platlib, depending on whether this
- # module distribution is pure or not. Of course, if the user
- # already specified install_lib, use their selection.
- if self.install_lib is None:
- if self.distribution.has_ext_modules(): # has extensions: non-pure
- self.install_lib = self.install_platlib
- else:
- self.install_lib = self.install_purelib
-
-
- # Convert directories from Unix /-separated syntax to the local
- # convention.
- self.convert_paths('lib', 'purelib', 'platlib',
- 'scripts', 'data', 'headers',
- 'userbase', 'usersite')
-
- # Deprecated
- # Well, we're not actually fully completely finalized yet: we still
- # have to deal with 'extra_path', which is the hack for allowing
- # non-packagized module distributions (hello, Numerical Python!) to
- # get their own directories.
- self.handle_extra_path()
- self.install_libbase = self.install_lib # needed for .pth file
- self.install_lib = os.path.join(self.install_lib, self.extra_dirs)
-
- # If a new root directory was supplied, make all the installation
- # dirs relative to it.
- if self.root is not None:
- self.change_roots('libbase', 'lib', 'purelib', 'platlib',
- 'scripts', 'data', 'headers')
-
- self.dump_dirs("after prepending root")
-
- # Find out the build directories, ie. where to install from.
- self.set_undefined_options('build',
- ('build_base', 'build_base'),
- ('build_lib', 'build_lib'))
-
- # Punt on doc directories for now -- after all, we're punting on
- # documentation completely!
-
- def dump_dirs(self, msg):
- """Dumps the list of user options."""
- if not DEBUG:
- return
- from distutils.fancy_getopt import longopt_xlate
- log.debug(msg + ":")
- for opt in self.user_options:
- opt_name = opt[0]
- if opt_name[-1] == "=":
- opt_name = opt_name[0:-1]
- if opt_name in self.negative_opt:
- opt_name = self.negative_opt[opt_name]
- opt_name = opt_name.translate(longopt_xlate)
- val = not getattr(self, opt_name)
- else:
- opt_name = opt_name.translate(longopt_xlate)
- val = getattr(self, opt_name)
- log.debug(" %s: %s", opt_name, val)
-
- def finalize_unix(self):
- """Finalizes options for posix platforms."""
- if self.install_base is not None or self.install_platbase is not None:
- if ((self.install_lib is None and
- self.install_purelib is None and
- self.install_platlib is None) or
- self.install_headers is None or
- self.install_scripts is None or
- self.install_data is None):
- raise DistutilsOptionError(
- "install-base or install-platbase supplied, but "
- "installation scheme is incomplete")
- return
-
- if self.user:
- if self.install_userbase is None:
- raise DistutilsPlatformError(
- "User base directory is not specified")
- self.install_base = self.install_platbase = self.install_userbase
- self.select_scheme("posix_user")
- elif self.home is not None:
- self.install_base = self.install_platbase = self.home
- self.select_scheme("posix_home")
- else:
- if self.prefix is None:
- if self.exec_prefix is not None:
- raise DistutilsOptionError(
- "must not supply exec-prefix without prefix")
-
- # Allow Fedora to add components to the prefix
- _prefix_addition = getattr(sysconfig, '_prefix_addition', "")
-
- self.prefix = (
- os.path.normpath(sys.prefix) + _prefix_addition)
- self.exec_prefix = (
- os.path.normpath(sys.exec_prefix) + _prefix_addition)
-
- else:
- if self.exec_prefix is None:
- self.exec_prefix = self.prefix
-
- self.install_base = self.prefix
- self.install_platbase = self.exec_prefix
- self.select_scheme("posix_prefix")
-
- def finalize_other(self):
- """Finalizes options for non-posix platforms"""
- if self.user:
- if self.install_userbase is None:
- raise DistutilsPlatformError(
- "User base directory is not specified")
- self.install_base = self.install_platbase = self.install_userbase
- self.select_scheme(os.name + "_user")
- elif self.home is not None:
- self.install_base = self.install_platbase = self.home
- self.select_scheme("posix_home")
- else:
- if self.prefix is None:
- self.prefix = os.path.normpath(sys.prefix)
-
- self.install_base = self.install_platbase = self.prefix
- try:
- self.select_scheme(os.name)
- except KeyError:
- raise DistutilsPlatformError(
- "I don't know how to install stuff on '%s'" % os.name)
-
- def select_scheme(self, name):
- """Sets the install directories by applying the install schemes."""
- # it's the caller's problem if they supply a bad name!
- if (hasattr(sys, 'pypy_version_info') and
- sys.version_info < (3, 8) and
- not name.endswith(('_user', '_home'))):
- if os.name == 'nt':
- name = 'pypy_nt'
- else:
- name = 'pypy'
- scheme = _load_schemes()[name]
- for key in SCHEME_KEYS:
- attrname = 'install_' + key
- if getattr(self, attrname) is None:
- setattr(self, attrname, scheme[key])
-
- def _expand_attrs(self, attrs):
- for attr in attrs:
- val = getattr(self, attr)
- if val is not None:
- if os.name == 'posix' or os.name == 'nt':
- val = os.path.expanduser(val)
- val = subst_vars(val, self.config_vars)
- setattr(self, attr, val)
-
- def expand_basedirs(self):
- """Calls `os.path.expanduser` on install_base, install_platbase and
- root."""
- self._expand_attrs(['install_base', 'install_platbase', 'root'])
-
- def expand_dirs(self):
- """Calls `os.path.expanduser` on install dirs."""
- self._expand_attrs(['install_purelib', 'install_platlib',
- 'install_lib', 'install_headers',
- 'install_scripts', 'install_data',])
-
- def convert_paths(self, *names):
- """Call `convert_path` over `names`."""
- for name in names:
- attr = "install_" + name
- setattr(self, attr, convert_path(getattr(self, attr)))
-
- def handle_extra_path(self):
- """Set `path_file` and `extra_dirs` using `extra_path`."""
- if self.extra_path is None:
- self.extra_path = self.distribution.extra_path
-
- if self.extra_path is not None:
- log.warn(
- "Distribution option extra_path is deprecated. "
- "See issue27919 for details."
- )
- if isinstance(self.extra_path, str):
- self.extra_path = self.extra_path.split(',')
-
- if len(self.extra_path) == 1:
- path_file = extra_dirs = self.extra_path[0]
- elif len(self.extra_path) == 2:
- path_file, extra_dirs = self.extra_path
- else:
- raise DistutilsOptionError(
- "'extra_path' option must be a list, tuple, or "
- "comma-separated string with 1 or 2 elements")
-
- # convert to local form in case Unix notation used (as it
- # should be in setup scripts)
- extra_dirs = convert_path(extra_dirs)
- else:
- path_file = None
- extra_dirs = ''
-
- # XXX should we warn if path_file and not extra_dirs? (in which
- # case the path file would be harmless but pointless)
- self.path_file = path_file
- self.extra_dirs = extra_dirs
-
- def change_roots(self, *names):
- """Change the install directories pointed by name using root."""
- for name in names:
- attr = "install_" + name
- setattr(self, attr, change_root(self.root, getattr(self, attr)))
-
- def create_home_path(self):
- """Create directories under ~."""
- if not self.user:
- return
- home = convert_path(os.path.expanduser("~"))
- for name, path in self.config_vars.items():
- if path.startswith(home) and not os.path.isdir(path):
- self.debug_print("os.makedirs('%s', 0o700)" % path)
- os.makedirs(path, 0o700)
-
- # -- Command execution methods -------------------------------------
-
- def run(self):
- """Runs the command."""
- # Obviously have to build before we can install
- if not self.skip_build:
- self.run_command('build')
- # If we built for any other platform, we can't install.
- build_plat = self.distribution.get_command_obj('build').plat_name
- # check warn_dir - it is a clue that the 'install' is happening
- # internally, and not to sys.path, so we don't check the platform
- # matches what we are running.
- if self.warn_dir and build_plat != get_platform():
- raise DistutilsPlatformError("Can't install when "
- "cross-compiling")
-
- # Run all sub-commands (at least those that need to be run)
- for cmd_name in self.get_sub_commands():
- self.run_command(cmd_name)
-
- if self.path_file:
- self.create_path_file()
-
- # write list of installed files, if requested.
- if self.record:
- outputs = self.get_outputs()
- if self.root: # strip any package prefix
- root_len = len(self.root)
- for counter in range(len(outputs)):
- outputs[counter] = outputs[counter][root_len:]
- self.execute(write_file,
- (self.record, outputs),
- "writing list of installed files to '%s'" %
- self.record)
-
- sys_path = map(os.path.normpath, sys.path)
- sys_path = map(os.path.normcase, sys_path)
- install_lib = os.path.normcase(os.path.normpath(self.install_lib))
- if (self.warn_dir and
- not (self.path_file and self.install_path_file) and
- install_lib not in sys_path):
- log.debug(("modules installed to '%s', which is not in "
- "Python's module search path (sys.path) -- "
- "you'll have to change the search path yourself"),
- self.install_lib)
-
- def create_path_file(self):
- """Creates the .pth file"""
- filename = os.path.join(self.install_libbase,
- self.path_file + ".pth")
- if self.install_path_file:
- self.execute(write_file,
- (filename, [self.extra_dirs]),
- "creating %s" % filename)
- else:
- self.warn("path file '%s' not created" % filename)
-
-
- # -- Reporting methods ---------------------------------------------
-
- def get_outputs(self):
- """Assembles the outputs of all the sub-commands."""
- outputs = []
- for cmd_name in self.get_sub_commands():
- cmd = self.get_finalized_command(cmd_name)
- # Add the contents of cmd.get_outputs(), ensuring
- # that outputs doesn't contain duplicate entries
- for filename in cmd.get_outputs():
- if filename not in outputs:
- outputs.append(filename)
-
- if self.path_file and self.install_path_file:
- outputs.append(os.path.join(self.install_libbase,
- self.path_file + ".pth"))
-
- return outputs
-
- def get_inputs(self):
- """Returns the inputs of all the sub-commands"""
- # XXX gee, this looks familiar ;-(
- inputs = []
- for cmd_name in self.get_sub_commands():
- cmd = self.get_finalized_command(cmd_name)
- inputs.extend(cmd.get_inputs())
-
- return inputs
-
- # -- Predicates for sub-command list -------------------------------
-
- def has_lib(self):
- """Returns true if the current distribution has any Python
- modules to install."""
- return (self.distribution.has_pure_modules() or
- self.distribution.has_ext_modules())
-
- def has_headers(self):
- """Returns true if the current distribution has any headers to
- install."""
- return self.distribution.has_headers()
-
- def has_scripts(self):
- """Returns true if the current distribution has any scripts to.
- install."""
- return self.distribution.has_scripts()
-
- def has_data(self):
- """Returns true if the current distribution has any data to.
- install."""
- return self.distribution.has_data_files()
-
- # 'sub_commands': a list of commands this command might have to run to
- # get its work done. See cmd.py for more info.
- sub_commands = [('install_lib', has_lib),
- ('install_headers', has_headers),
- ('install_scripts', has_scripts),
- ('install_data', has_data),
- ('install_egg_info', lambda self:True),
- ]
diff --git a/contrib/python/setuptools/py3/setuptools/_distutils/command/install_data.py b/contrib/python/setuptools/py3/setuptools/_distutils/command/install_data.py
deleted file mode 100644
index 947cd76a99e..00000000000
--- a/contrib/python/setuptools/py3/setuptools/_distutils/command/install_data.py
+++ /dev/null
@@ -1,79 +0,0 @@
-"""distutils.command.install_data
-
-Implements the Distutils 'install_data' command, for installing
-platform-independent data files."""
-
-# contributed by Bastian Kleineidam
-
-import os
-from distutils.core import Command
-from distutils.util import change_root, convert_path
-
-class install_data(Command):
-
- description = "install data files"
-
- user_options = [
- ('install-dir=', 'd',
- "base directory for installing data files "
- "(default: installation base dir)"),
- ('root=', None,
- "install everything relative to this alternate root directory"),
- ('force', 'f', "force installation (overwrite existing files)"),
- ]
-
- boolean_options = ['force']
-
- def initialize_options(self):
- self.install_dir = None
- self.outfiles = []
- self.root = None
- self.force = 0
- self.data_files = self.distribution.data_files
- self.warn_dir = 1
-
- def finalize_options(self):
- self.set_undefined_options('install',
- ('install_data', 'install_dir'),
- ('root', 'root'),
- ('force', 'force'),
- )
-
- def run(self):
- self.mkpath(self.install_dir)
- for f in self.data_files:
- if isinstance(f, str):
- # it's a simple file, so copy it
- f = convert_path(f)
- if self.warn_dir:
- self.warn("setup script did not provide a directory for "
- "'%s' -- installing right in '%s'" %
- (f, self.install_dir))
- (out, _) = self.copy_file(f, self.install_dir)
- self.outfiles.append(out)
- else:
- # it's a tuple with path to install to and a list of files
- dir = convert_path(f[0])
- if not os.path.isabs(dir):
- dir = os.path.join(self.install_dir, dir)
- elif self.root:
- dir = change_root(self.root, dir)
- self.mkpath(dir)
-
- if f[1] == []:
- # If there are no files listed, the user must be
- # trying to create an empty directory, so add the
- # directory to the list of output files.
- self.outfiles.append(dir)
- else:
- # Copy files, adding them to the list of output files.
- for data in f[1]:
- data = convert_path(data)
- (out, _) = self.copy_file(data, dir)
- self.outfiles.append(out)
-
- def get_inputs(self):
- return self.data_files or []
-
- def get_outputs(self):
- return self.outfiles
diff --git a/contrib/python/setuptools/py3/setuptools/_distutils/command/install_egg_info.py b/contrib/python/setuptools/py3/setuptools/_distutils/command/install_egg_info.py
deleted file mode 100644
index adc0323f98f..00000000000
--- a/contrib/python/setuptools/py3/setuptools/_distutils/command/install_egg_info.py
+++ /dev/null
@@ -1,84 +0,0 @@
-"""distutils.command.install_egg_info
-
-Implements the Distutils 'install_egg_info' command, for installing
-a package's PKG-INFO metadata."""
-
-
-from distutils.cmd import Command
-from distutils import log, dir_util
-import os, sys, re
-
-class install_egg_info(Command):
- """Install an .egg-info file for the package"""
-
- description = "Install package's PKG-INFO metadata as an .egg-info file"
- user_options = [
- ('install-dir=', 'd', "directory to install to"),
- ]
-
- def initialize_options(self):
- self.install_dir = None
-
- @property
- def basename(self):
- """
- Allow basename to be overridden by child class.
- Ref pypa/distutils#2.
- """
- return "%s-%s-py%d.%d.egg-info" % (
- to_filename(safe_name(self.distribution.get_name())),
- to_filename(safe_version(self.distribution.get_version())),
- *sys.version_info[:2]
- )
-
- def finalize_options(self):
- self.set_undefined_options('install_lib',('install_dir','install_dir'))
- self.target = os.path.join(self.install_dir, self.basename)
- self.outputs = [self.target]
-
- def run(self):
- target = self.target
- if os.path.isdir(target) and not os.path.islink(target):
- dir_util.remove_tree(target, dry_run=self.dry_run)
- elif os.path.exists(target):
- self.execute(os.unlink,(self.target,),"Removing "+target)
- elif not os.path.isdir(self.install_dir):
- self.execute(os.makedirs, (self.install_dir,),
- "Creating "+self.install_dir)
- log.info("Writing %s", target)
- if not self.dry_run:
- with open(target, 'w', encoding='UTF-8') as f:
- self.distribution.metadata.write_pkg_file(f)
-
- def get_outputs(self):
- return self.outputs
-
-
-# The following routines are taken from setuptools' pkg_resources module and
-# can be replaced by importing them from pkg_resources once it is included
-# in the stdlib.
-
-def safe_name(name):
- """Convert an arbitrary string to a standard distribution name
-
- Any runs of non-alphanumeric/. characters are replaced with a single '-'.
- """
- return re.sub('[^A-Za-z0-9.]+', '-', name)
-
-
-def safe_version(version):
- """Convert an arbitrary string to a standard version string
-
- Spaces become dots, and all other non-alphanumeric characters become
- dashes, with runs of multiple dashes condensed to a single dash.
- """
- version = version.replace(' ','.')
- return re.sub('[^A-Za-z0-9.]+', '-', version)
-
-
-def to_filename(name):
- """Convert a project or version name to its filename-escaped form
-
- Any '-' characters are currently replaced with '_'.
- """
- return name.replace('-','_')
diff --git a/contrib/python/setuptools/py3/setuptools/_distutils/command/install_headers.py b/contrib/python/setuptools/py3/setuptools/_distutils/command/install_headers.py
deleted file mode 100644
index 9bb0b18dc0d..00000000000
--- a/contrib/python/setuptools/py3/setuptools/_distutils/command/install_headers.py
+++ /dev/null
@@ -1,47 +0,0 @@
-"""distutils.command.install_headers
-
-Implements the Distutils 'install_headers' command, to install C/C++ header
-files to the Python include directory."""
-
-from distutils.core import Command
-
-
-# XXX force is never used
-class install_headers(Command):
-
- description = "install C/C++ header files"
-
- user_options = [('install-dir=', 'd',
- "directory to install header files to"),
- ('force', 'f',
- "force installation (overwrite existing files)"),
- ]
-
- boolean_options = ['force']
-
- def initialize_options(self):
- self.install_dir = None
- self.force = 0
- self.outfiles = []
-
- def finalize_options(self):
- self.set_undefined_options('install',
- ('install_headers', 'install_dir'),
- ('force', 'force'))
-
-
- def run(self):
- headers = self.distribution.headers
- if not headers:
- return
-
- self.mkpath(self.install_dir)
- for header in headers:
- (out, _) = self.copy_file(header, self.install_dir)
- self.outfiles.append(out)
-
- def get_inputs(self):
- return self.distribution.headers or []
-
- def get_outputs(self):
- return self.outfiles
diff --git a/contrib/python/setuptools/py3/setuptools/_distutils/command/install_lib.py b/contrib/python/setuptools/py3/setuptools/_distutils/command/install_lib.py
deleted file mode 100644
index 6154cf09431..00000000000
--- a/contrib/python/setuptools/py3/setuptools/_distutils/command/install_lib.py
+++ /dev/null
@@ -1,217 +0,0 @@
-"""distutils.command.install_lib
-
-Implements the Distutils 'install_lib' command
-(install all Python modules)."""
-
-import os
-import importlib.util
-import sys
-
-from distutils.core import Command
-from distutils.errors import DistutilsOptionError
-
-
-# Extension for Python source files.
-PYTHON_SOURCE_EXTENSION = ".py"
-
-class install_lib(Command):
-
- description = "install all Python modules (extensions and pure Python)"
-
- # The byte-compilation options are a tad confusing. Here are the
- # possible scenarios:
- # 1) no compilation at all (--no-compile --no-optimize)
- # 2) compile .pyc only (--compile --no-optimize; default)
- # 3) compile .pyc and "opt-1" .pyc (--compile --optimize)
- # 4) compile "opt-1" .pyc only (--no-compile --optimize)
- # 5) compile .pyc and "opt-2" .pyc (--compile --optimize-more)
- # 6) compile "opt-2" .pyc only (--no-compile --optimize-more)
- #
- # The UI for this is two options, 'compile' and 'optimize'.
- # 'compile' is strictly boolean, and only decides whether to
- # generate .pyc files. 'optimize' is three-way (0, 1, or 2), and
- # decides both whether to generate .pyc files and what level of
- # optimization to use.
-
- user_options = [
- ('install-dir=', 'd', "directory to install to"),
- ('build-dir=','b', "build directory (where to install from)"),
- ('force', 'f', "force installation (overwrite existing files)"),
- ('compile', 'c', "compile .py to .pyc [default]"),
- ('no-compile', None, "don't compile .py files"),
- ('optimize=', 'O',
- "also compile with optimization: -O1 for \"python -O\", "
- "-O2 for \"python -OO\", and -O0 to disable [default: -O0]"),
- ('skip-build', None, "skip the build steps"),
- ]
-
- boolean_options = ['force', 'compile', 'skip-build']
- negative_opt = {'no-compile' : 'compile'}
-
- def initialize_options(self):
- # let the 'install' command dictate our installation directory
- self.install_dir = None
- self.build_dir = None
- self.force = 0
- self.compile = None
- self.optimize = None
- self.skip_build = None
-
- def finalize_options(self):
- # Get all the information we need to install pure Python modules
- # from the umbrella 'install' command -- build (source) directory,
- # install (target) directory, and whether to compile .py files.
- self.set_undefined_options('install',
- ('build_lib', 'build_dir'),
- ('install_lib', 'install_dir'),
- ('force', 'force'),
- ('compile', 'compile'),
- ('optimize', 'optimize'),
- ('skip_build', 'skip_build'),
- )
-
- if self.compile is None:
- self.compile = True
- if self.optimize is None:
- self.optimize = False
-
- if not isinstance(self.optimize, int):
- try:
- self.optimize = int(self.optimize)
- if self.optimize not in (0, 1, 2):
- raise AssertionError
- except (ValueError, AssertionError):
- raise DistutilsOptionError("optimize must be 0, 1, or 2")
-
- def run(self):
- # Make sure we have built everything we need first
- self.build()
-
- # Install everything: simply dump the entire contents of the build
- # directory to the installation directory (that's the beauty of
- # having a build directory!)
- outfiles = self.install()
-
- # (Optionally) compile .py to .pyc
- if outfiles is not None and self.distribution.has_pure_modules():
- self.byte_compile(outfiles)
-
- # -- Top-level worker functions ------------------------------------
- # (called from 'run()')
-
- def build(self):
- if not self.skip_build:
- if self.distribution.has_pure_modules():
- self.run_command('build_py')
- if self.distribution.has_ext_modules():
- self.run_command('build_ext')
-
- def install(self):
- if os.path.isdir(self.build_dir):
- outfiles = self.copy_tree(self.build_dir, self.install_dir)
- else:
- self.warn("'%s' does not exist -- no Python modules to install" %
- self.build_dir)
- return
- return outfiles
-
- def byte_compile(self, files):
- if sys.dont_write_bytecode:
- self.warn('byte-compiling is disabled, skipping.')
- return
-
- from distutils.util import byte_compile
-
- # Get the "--root" directory supplied to the "install" command,
- # and use it as a prefix to strip off the purported filename
- # encoded in bytecode files. This is far from complete, but it
- # should at least generate usable bytecode in RPM distributions.
- install_root = self.get_finalized_command('install').root
-
- if self.compile:
- byte_compile(files, optimize=0,
- force=self.force, prefix=install_root,
- dry_run=self.dry_run)
- if self.optimize > 0:
- byte_compile(files, optimize=self.optimize,
- force=self.force, prefix=install_root,
- verbose=self.verbose, dry_run=self.dry_run)
-
-
- # -- Utility methods -----------------------------------------------
-
- def _mutate_outputs(self, has_any, build_cmd, cmd_option, output_dir):
- if not has_any:
- return []
-
- build_cmd = self.get_finalized_command(build_cmd)
- build_files = build_cmd.get_outputs()
- build_dir = getattr(build_cmd, cmd_option)
-
- prefix_len = len(build_dir) + len(os.sep)
- outputs = []
- for file in build_files:
- outputs.append(os.path.join(output_dir, file[prefix_len:]))
-
- return outputs
-
- def _bytecode_filenames(self, py_filenames):
- bytecode_files = []
- for py_file in py_filenames:
- # Since build_py handles package data installation, the
- # list of outputs can contain more than just .py files.
- # Make sure we only report bytecode for the .py files.
- ext = os.path.splitext(os.path.normcase(py_file))[1]
- if ext != PYTHON_SOURCE_EXTENSION:
- continue
- if self.compile:
- bytecode_files.append(importlib.util.cache_from_source(
- py_file, optimization=''))
- if self.optimize > 0:
- bytecode_files.append(importlib.util.cache_from_source(
- py_file, optimization=self.optimize))
-
- return bytecode_files
-
-
- # -- External interface --------------------------------------------
- # (called by outsiders)
-
- def get_outputs(self):
- """Return the list of files that would be installed if this command
- were actually run. Not affected by the "dry-run" flag or whether
- modules have actually been built yet.
- """
- pure_outputs = \
- self._mutate_outputs(self.distribution.has_pure_modules(),
- 'build_py', 'build_lib',
- self.install_dir)
- if self.compile:
- bytecode_outputs = self._bytecode_filenames(pure_outputs)
- else:
- bytecode_outputs = []
-
- ext_outputs = \
- self._mutate_outputs(self.distribution.has_ext_modules(),
- 'build_ext', 'build_lib',
- self.install_dir)
-
- return pure_outputs + bytecode_outputs + ext_outputs
-
- def get_inputs(self):
- """Get the list of files that are input to this command, ie. the
- files that get installed as they are named in the build tree.
- The files in this list correspond one-to-one to the output
- filenames returned by 'get_outputs()'.
- """
- inputs = []
-
- if self.distribution.has_pure_modules():
- build_py = self.get_finalized_command('build_py')
- inputs.extend(build_py.get_outputs())
-
- if self.distribution.has_ext_modules():
- build_ext = self.get_finalized_command('build_ext')
- inputs.extend(build_ext.get_outputs())
-
- return inputs
diff --git a/contrib/python/setuptools/py3/setuptools/_distutils/command/install_scripts.py b/contrib/python/setuptools/py3/setuptools/_distutils/command/install_scripts.py
deleted file mode 100644
index 31a1130ee54..00000000000
--- a/contrib/python/setuptools/py3/setuptools/_distutils/command/install_scripts.py
+++ /dev/null
@@ -1,60 +0,0 @@
-"""distutils.command.install_scripts
-
-Implements the Distutils 'install_scripts' command, for installing
-Python scripts."""
-
-# contributed by Bastian Kleineidam
-
-import os
-from distutils.core import Command
-from distutils import log
-from stat import ST_MODE
-
-
-class install_scripts(Command):
-
- description = "install scripts (Python or otherwise)"
-
- user_options = [
- ('install-dir=', 'd', "directory to install scripts to"),
- ('build-dir=','b', "build directory (where to install from)"),
- ('force', 'f', "force installation (overwrite existing files)"),
- ('skip-build', None, "skip the build steps"),
- ]
-
- boolean_options = ['force', 'skip-build']
-
- def initialize_options(self):
- self.install_dir = None
- self.force = 0
- self.build_dir = None
- self.skip_build = None
-
- def finalize_options(self):
- self.set_undefined_options('build', ('build_scripts', 'build_dir'))
- self.set_undefined_options('install',
- ('install_scripts', 'install_dir'),
- ('force', 'force'),
- ('skip_build', 'skip_build'),
- )
-
- def run(self):
- if not self.skip_build:
- self.run_command('build_scripts')
- self.outfiles = self.copy_tree(self.build_dir, self.install_dir)
- if os.name == 'posix':
- # Set the executable bits (owner, group, and world) on
- # all the scripts we just installed.
- for file in self.get_outputs():
- if self.dry_run:
- log.info("changing mode of %s", file)
- else:
- mode = ((os.stat(file)[ST_MODE]) | 0o555) & 0o7777
- log.info("changing mode of %s to %o", file, mode)
- os.chmod(file, mode)
-
- def get_inputs(self):
- return self.distribution.scripts or []
-
- def get_outputs(self):
- return self.outfiles or []
diff --git a/contrib/python/setuptools/py3/setuptools/_distutils/command/py37compat.py b/contrib/python/setuptools/py3/setuptools/_distutils/command/py37compat.py
deleted file mode 100644
index 754715a5084..00000000000
--- a/contrib/python/setuptools/py3/setuptools/_distutils/command/py37compat.py
+++ /dev/null
@@ -1,30 +0,0 @@
-import sys
-
-
-def _pythonlib_compat():
- """
- On Python 3.7 and earlier, distutils would include the Python
- library. See pypa/distutils#9.
- """
- from distutils import sysconfig
- if not sysconfig.get_config_var('Py_ENABLED_SHARED'):
- return
-
- yield 'python{}.{}{}'.format(
- sys.hexversion >> 24,
- (sys.hexversion >> 16) & 0xff,
- sysconfig.get_config_var('ABIFLAGS'),
- )
-
-
-def compose(f1, f2):
- return lambda *args, **kwargs: f1(f2(*args, **kwargs))
-
-
-pythonlib = (
- compose(list, _pythonlib_compat)
- if sys.version_info < (3, 8)
- and sys.platform != 'darwin'
- and sys.platform[:3] != 'aix'
- else list
-)
diff --git a/contrib/python/setuptools/py3/setuptools/_distutils/command/register.py b/contrib/python/setuptools/py3/setuptools/_distutils/command/register.py
deleted file mode 100644
index 0fac94e9e54..00000000000
--- a/contrib/python/setuptools/py3/setuptools/_distutils/command/register.py
+++ /dev/null
@@ -1,304 +0,0 @@
-"""distutils.command.register
-
-Implements the Distutils 'register' command (register with the repository).
-"""
-
-# created 2002/10/21, Richard Jones
-
-import getpass
-import io
-import urllib.parse, urllib.request
-from warnings import warn
-
-from distutils.core import PyPIRCCommand
-from distutils.errors import *
-from distutils import log
-
-class register(PyPIRCCommand):
-
- description = ("register the distribution with the Python package index")
- user_options = PyPIRCCommand.user_options + [
- ('list-classifiers', None,
- 'list the valid Trove classifiers'),
- ('strict', None ,
- 'Will stop the registering if the meta-data are not fully compliant')
- ]
- boolean_options = PyPIRCCommand.boolean_options + [
- 'verify', 'list-classifiers', 'strict']
-
- sub_commands = [('check', lambda self: True)]
-
- def initialize_options(self):
- PyPIRCCommand.initialize_options(self)
- self.list_classifiers = 0
- self.strict = 0
-
- def finalize_options(self):
- PyPIRCCommand.finalize_options(self)
- # setting options for the `check` subcommand
- check_options = {'strict': ('register', self.strict),
- 'restructuredtext': ('register', 1)}
- self.distribution.command_options['check'] = check_options
-
- def run(self):
- self.finalize_options()
- self._set_config()
-
- # Run sub commands
- for cmd_name in self.get_sub_commands():
- self.run_command(cmd_name)
-
- if self.dry_run:
- self.verify_metadata()
- elif self.list_classifiers:
- self.classifiers()
- else:
- self.send_metadata()
-
- def check_metadata(self):
- """Deprecated API."""
- warn("distutils.command.register.check_metadata is deprecated, \
- use the check command instead", PendingDeprecationWarning)
- check = self.distribution.get_command_obj('check')
- check.ensure_finalized()
- check.strict = self.strict
- check.restructuredtext = 1
- check.run()
-
- def _set_config(self):
- ''' Reads the configuration file and set attributes.
- '''
- config = self._read_pypirc()
- if config != {}:
- self.username = config['username']
- self.password = config['password']
- self.repository = config['repository']
- self.realm = config['realm']
- self.has_config = True
- else:
- if self.repository not in ('pypi', self.DEFAULT_REPOSITORY):
- raise ValueError('%s not found in .pypirc' % self.repository)
- if self.repository == 'pypi':
- self.repository = self.DEFAULT_REPOSITORY
- self.has_config = False
-
- def classifiers(self):
- ''' Fetch the list of classifiers from the server.
- '''
- url = self.repository+'?:action=list_classifiers'
- response = urllib.request.urlopen(url)
- log.info(self._read_pypi_response(response))
-
- def verify_metadata(self):
- ''' Send the metadata to the package index server to be checked.
- '''
- # send the info to the server and report the result
- (code, result) = self.post_to_server(self.build_post_data('verify'))
- log.info('Server response (%s): %s', code, result)
-
- def send_metadata(self):
- ''' Send the metadata to the package index server.
-
- Well, do the following:
- 1. figure who the user is, and then
- 2. send the data as a Basic auth'ed POST.
-
- First we try to read the username/password from $HOME/.pypirc,
- which is a ConfigParser-formatted file with a section
- [distutils] containing username and password entries (both
- in clear text). Eg:
-
- [distutils]
- index-servers =
- pypi
-
- [pypi]
- username: fred
- password: sekrit
-
- Otherwise, to figure who the user is, we offer the user three
- choices:
-
- 1. use existing login,
- 2. register as a new user, or
- 3. set the password to a random string and email the user.
-
- '''
- # see if we can short-cut and get the username/password from the
- # config
- if self.has_config:
- choice = '1'
- username = self.username
- password = self.password
- else:
- choice = 'x'
- username = password = ''
-
- # get the user's login info
- choices = '1 2 3 4'.split()
- while choice not in choices:
- self.announce('''\
-We need to know who you are, so please choose either:
- 1. use your existing login,
- 2. register as a new user,
- 3. have the server generate a new password for you (and email it to you), or
- 4. quit
-Your selection [default 1]: ''', log.INFO)
- choice = input()
- if not choice:
- choice = '1'
- elif choice not in choices:
- print('Please choose one of the four options!')
-
- if choice == '1':
- # get the username and password
- while not username:
- username = input('Username: ')
- while not password:
- password = getpass.getpass('Password: ')
-
- # set up the authentication
- auth = urllib.request.HTTPPasswordMgr()
- host = urllib.parse.urlparse(self.repository)[1]
- auth.add_password(self.realm, host, username, password)
- # send the info to the server and report the result
- code, result = self.post_to_server(self.build_post_data('submit'),
- auth)
- self.announce('Server response (%s): %s' % (code, result),
- log.INFO)
-
- # possibly save the login
- if code == 200:
- if self.has_config:
- # sharing the password in the distribution instance
- # so the upload command can reuse it
- self.distribution.password = password
- else:
- self.announce(('I can store your PyPI login so future '
- 'submissions will be faster.'), log.INFO)
- self.announce('(the login will be stored in %s)' % \
- self._get_rc_file(), log.INFO)
- choice = 'X'
- while choice.lower() not in 'yn':
- choice = input('Save your login (y/N)?')
- if not choice:
- choice = 'n'
- if choice.lower() == 'y':
- self._store_pypirc(username, password)
-
- elif choice == '2':
- data = {':action': 'user'}
- data['name'] = data['password'] = data['email'] = ''
- data['confirm'] = None
- while not data['name']:
- data['name'] = input('Username: ')
- while data['password'] != data['confirm']:
- while not data['password']:
- data['password'] = getpass.getpass('Password: ')
- while not data['confirm']:
- data['confirm'] = getpass.getpass(' Confirm: ')
- if data['password'] != data['confirm']:
- data['password'] = ''
- data['confirm'] = None
- print("Password and confirm don't match!")
- while not data['email']:
- data['email'] = input(' EMail: ')
- code, result = self.post_to_server(data)
- if code != 200:
- log.info('Server response (%s): %s', code, result)
- else:
- log.info('You will receive an email shortly.')
- log.info(('Follow the instructions in it to '
- 'complete registration.'))
- elif choice == '3':
- data = {':action': 'password_reset'}
- data['email'] = ''
- while not data['email']:
- data['email'] = input('Your email address: ')
- code, result = self.post_to_server(data)
- log.info('Server response (%s): %s', code, result)
-
- def build_post_data(self, action):
- # figure the data to send - the metadata plus some additional
- # information used by the package server
- meta = self.distribution.metadata
- data = {
- ':action': action,
- 'metadata_version' : '1.0',
- 'name': meta.get_name(),
- 'version': meta.get_version(),
- 'summary': meta.get_description(),
- 'home_page': meta.get_url(),
- 'author': meta.get_contact(),
- 'author_email': meta.get_contact_email(),
- 'license': meta.get_licence(),
- 'description': meta.get_long_description(),
- 'keywords': meta.get_keywords(),
- 'platform': meta.get_platforms(),
- 'classifiers': meta.get_classifiers(),
- 'download_url': meta.get_download_url(),
- # PEP 314
- 'provides': meta.get_provides(),
- 'requires': meta.get_requires(),
- 'obsoletes': meta.get_obsoletes(),
- }
- if data['provides'] or data['requires'] or data['obsoletes']:
- data['metadata_version'] = '1.1'
- return data
-
- def post_to_server(self, data, auth=None):
- ''' Post a query to the server, and return a string response.
- '''
- if 'name' in data:
- self.announce('Registering %s to %s' % (data['name'],
- self.repository),
- log.INFO)
- # Build up the MIME payload for the urllib2 POST data
- boundary = '--------------GHSKFJDLGDS7543FJKLFHRE75642756743254'
- sep_boundary = '\n--' + boundary
- end_boundary = sep_boundary + '--'
- body = io.StringIO()
- for key, value in data.items():
- # handle multiple entries for the same name
- if type(value) not in (type([]), type( () )):
- value = [value]
- for value in value:
- value = str(value)
- body.write(sep_boundary)
- body.write('\nContent-Disposition: form-data; name="%s"'%key)
- body.write("\n\n")
- body.write(value)
- if value and value[-1] == '\r':
- body.write('\n') # write an extra newline (lurve Macs)
- body.write(end_boundary)
- body.write("\n")
- body = body.getvalue().encode("utf-8")
-
- # build the Request
- headers = {
- 'Content-type': 'multipart/form-data; boundary=%s; charset=utf-8'%boundary,
- 'Content-length': str(len(body))
- }
- req = urllib.request.Request(self.repository, body, headers)
-
- # handle HTTP and include the Basic Auth handler
- opener = urllib.request.build_opener(
- urllib.request.HTTPBasicAuthHandler(password_mgr=auth)
- )
- data = ''
- try:
- result = opener.open(req)
- except urllib.error.HTTPError as e:
- if self.show_response:
- data = e.fp.read()
- result = e.code, e.msg
- except urllib.error.URLError as e:
- result = 500, str(e)
- else:
- if self.show_response:
- data = self._read_pypi_response(result)
- result = 200, 'OK'
- if self.show_response:
- msg = '\n'.join(('-' * 75, data, '-' * 75))
- self.announce(msg, log.INFO)
- return result
diff --git a/contrib/python/setuptools/py3/setuptools/_distutils/command/sdist.py b/contrib/python/setuptools/py3/setuptools/_distutils/command/sdist.py
deleted file mode 100644
index b4996fcb1d2..00000000000
--- a/contrib/python/setuptools/py3/setuptools/_distutils/command/sdist.py
+++ /dev/null
@@ -1,494 +0,0 @@
-"""distutils.command.sdist
-
-Implements the Distutils 'sdist' command (create a source distribution)."""
-
-import os
-import sys
-from glob import glob
-from warnings import warn
-
-from distutils.core import Command
-from distutils import dir_util
-from distutils import file_util
-from distutils import archive_util
-from distutils.text_file import TextFile
-from distutils.filelist import FileList
-from distutils import log
-from distutils.util import convert_path
-from distutils.errors import DistutilsTemplateError, DistutilsOptionError
-
-
-def show_formats():
- """Print all possible values for the 'formats' option (used by
- the "--help-formats" command-line option).
- """
- from distutils.fancy_getopt import FancyGetopt
- from distutils.archive_util import ARCHIVE_FORMATS
- formats = []
- for format in ARCHIVE_FORMATS.keys():
- formats.append(("formats=" + format, None,
- ARCHIVE_FORMATS[format][2]))
- formats.sort()
- FancyGetopt(formats).print_help(
- "List of available source distribution formats:")
-
-
-class sdist(Command):
-
- description = "create a source distribution (tarball, zip file, etc.)"
-
- def checking_metadata(self):
- """Callable used for the check sub-command.
-
- Placed here so user_options can view it"""
- return self.metadata_check
-
- user_options = [
- ('template=', 't',
- "name of manifest template file [default: MANIFEST.in]"),
- ('manifest=', 'm',
- "name of manifest file [default: MANIFEST]"),
- ('use-defaults', None,
- "include the default file set in the manifest "
- "[default; disable with --no-defaults]"),
- ('no-defaults', None,
- "don't include the default file set"),
- ('prune', None,
- "specifically exclude files/directories that should not be "
- "distributed (build tree, RCS/CVS dirs, etc.) "
- "[default; disable with --no-prune]"),
- ('no-prune', None,
- "don't automatically exclude anything"),
- ('manifest-only', 'o',
- "just regenerate the manifest and then stop "
- "(implies --force-manifest)"),
- ('force-manifest', 'f',
- "forcibly regenerate the manifest and carry on as usual. "
- "Deprecated: now the manifest is always regenerated."),
- ('formats=', None,
- "formats for source distribution (comma-separated list)"),
- ('keep-temp', 'k',
- "keep the distribution tree around after creating " +
- "archive file(s)"),
- ('dist-dir=', 'd',
- "directory to put the source distribution archive(s) in "
- "[default: dist]"),
- ('metadata-check', None,
- "Ensure that all required elements of meta-data "
- "are supplied. Warn if any missing. [default]"),
- ('owner=', 'u',
- "Owner name used when creating a tar file [default: current user]"),
- ('group=', 'g',
- "Group name used when creating a tar file [default: current group]"),
- ]
-
- boolean_options = ['use-defaults', 'prune',
- 'manifest-only', 'force-manifest',
- 'keep-temp', 'metadata-check']
-
- help_options = [
- ('help-formats', None,
- "list available distribution formats", show_formats),
- ]
-
- negative_opt = {'no-defaults': 'use-defaults',
- 'no-prune': 'prune' }
-
- sub_commands = [('check', checking_metadata)]
-
- READMES = ('README', 'README.txt', 'README.rst')
-
- def initialize_options(self):
- # 'template' and 'manifest' are, respectively, the names of
- # the manifest template and manifest file.
- self.template = None
- self.manifest = None
-
- # 'use_defaults': if true, we will include the default file set
- # in the manifest
- self.use_defaults = 1
- self.prune = 1
-
- self.manifest_only = 0
- self.force_manifest = 0
-
- self.formats = ['gztar']
- self.keep_temp = 0
- self.dist_dir = None
-
- self.archive_files = None
- self.metadata_check = 1
- self.owner = None
- self.group = None
-
- def finalize_options(self):
- if self.manifest is None:
- self.manifest = "MANIFEST"
- if self.template is None:
- self.template = "MANIFEST.in"
-
- self.ensure_string_list('formats')
-
- bad_format = archive_util.check_archive_formats(self.formats)
- if bad_format:
- raise DistutilsOptionError(
- "unknown archive format '%s'" % bad_format)
-
- if self.dist_dir is None:
- self.dist_dir = "dist"
-
- def run(self):
- # 'filelist' contains the list of files that will make up the
- # manifest
- self.filelist = FileList()
-
- # Run sub commands
- for cmd_name in self.get_sub_commands():
- self.run_command(cmd_name)
-
- # Do whatever it takes to get the list of files to process
- # (process the manifest template, read an existing manifest,
- # whatever). File list is accumulated in 'self.filelist'.
- self.get_file_list()
-
- # If user just wanted us to regenerate the manifest, stop now.
- if self.manifest_only:
- return
-
- # Otherwise, go ahead and create the source distribution tarball,
- # or zipfile, or whatever.
- self.make_distribution()
-
- def check_metadata(self):
- """Deprecated API."""
- warn("distutils.command.sdist.check_metadata is deprecated, \
- use the check command instead", PendingDeprecationWarning)
- check = self.distribution.get_command_obj('check')
- check.ensure_finalized()
- check.run()
-
- def get_file_list(self):
- """Figure out the list of files to include in the source
- distribution, and put it in 'self.filelist'. This might involve
- reading the manifest template (and writing the manifest), or just
- reading the manifest, or just using the default file set -- it all
- depends on the user's options.
- """
- # new behavior when using a template:
- # the file list is recalculated every time because
- # even if MANIFEST.in or setup.py are not changed
- # the user might have added some files in the tree that
- # need to be included.
- #
- # This makes --force the default and only behavior with templates.
- template_exists = os.path.isfile(self.template)
- if not template_exists and self._manifest_is_not_generated():
- self.read_manifest()
- self.filelist.sort()
- self.filelist.remove_duplicates()
- return
-
- if not template_exists:
- self.warn(("manifest template '%s' does not exist " +
- "(using default file list)") %
- self.template)
- self.filelist.findall()
-
- if self.use_defaults:
- self.add_defaults()
-
- if template_exists:
- self.read_template()
-
- if self.prune:
- self.prune_file_list()
-
- self.filelist.sort()
- self.filelist.remove_duplicates()
- self.write_manifest()
-
- def add_defaults(self):
- """Add all the default files to self.filelist:
- - README or README.txt
- - setup.py
- - test/test*.py
- - all pure Python modules mentioned in setup script
- - all files pointed by package_data (build_py)
- - all files defined in data_files.
- - all files defined as scripts.
- - all C sources listed as part of extensions or C libraries
- in the setup script (doesn't catch C headers!)
- Warns if (README or README.txt) or setup.py are missing; everything
- else is optional.
- """
- self._add_defaults_standards()
- self._add_defaults_optional()
- self._add_defaults_python()
- self._add_defaults_data_files()
- self._add_defaults_ext()
- self._add_defaults_c_libs()
- self._add_defaults_scripts()
-
- @staticmethod
- def _cs_path_exists(fspath):
- """
- Case-sensitive path existence check
-
- >>> sdist._cs_path_exists(__file__)
- True
- >>> sdist._cs_path_exists(__file__.upper())
- False
- """
- if not os.path.exists(fspath):
- return False
- # make absolute so we always have a directory
- abspath = os.path.abspath(fspath)
- directory, filename = os.path.split(abspath)
- return filename in os.listdir(directory)
-
- def _add_defaults_standards(self):
- standards = [self.READMES, self.distribution.script_name]
- for fn in standards:
- if isinstance(fn, tuple):
- alts = fn
- got_it = False
- for fn in alts:
- if self._cs_path_exists(fn):
- got_it = True
- self.filelist.append(fn)
- break
-
- if not got_it:
- self.warn("standard file not found: should have one of " +
- ', '.join(alts))
- else:
- if self._cs_path_exists(fn):
- self.filelist.append(fn)
- else:
- self.warn("standard file '%s' not found" % fn)
-
- def _add_defaults_optional(self):
- optional = ['test/test*.py', 'setup.cfg']
- for pattern in optional:
- files = filter(os.path.isfile, glob(pattern))
- self.filelist.extend(files)
-
- def _add_defaults_python(self):
- # build_py is used to get:
- # - python modules
- # - files defined in package_data
- build_py = self.get_finalized_command('build_py')
-
- # getting python files
- if self.distribution.has_pure_modules():
- self.filelist.extend(build_py.get_source_files())
-
- # getting package_data files
- # (computed in build_py.data_files by build_py.finalize_options)
- for pkg, src_dir, build_dir, filenames in build_py.data_files:
- for filename in filenames:
- self.filelist.append(os.path.join(src_dir, filename))
-
- def _add_defaults_data_files(self):
- # getting distribution.data_files
- if self.distribution.has_data_files():
- for item in self.distribution.data_files:
- if isinstance(item, str):
- # plain file
- item = convert_path(item)
- if os.path.isfile(item):
- self.filelist.append(item)
- else:
- # a (dirname, filenames) tuple
- dirname, filenames = item
- for f in filenames:
- f = convert_path(f)
- if os.path.isfile(f):
- self.filelist.append(f)
-
- def _add_defaults_ext(self):
- if self.distribution.has_ext_modules():
- build_ext = self.get_finalized_command('build_ext')
- self.filelist.extend(build_ext.get_source_files())
-
- def _add_defaults_c_libs(self):
- if self.distribution.has_c_libraries():
- build_clib = self.get_finalized_command('build_clib')
- self.filelist.extend(build_clib.get_source_files())
-
- def _add_defaults_scripts(self):
- if self.distribution.has_scripts():
- build_scripts = self.get_finalized_command('build_scripts')
- self.filelist.extend(build_scripts.get_source_files())
-
- def read_template(self):
- """Read and parse manifest template file named by self.template.
-
- (usually "MANIFEST.in") The parsing and processing is done by
- 'self.filelist', which updates itself accordingly.
- """
- log.info("reading manifest template '%s'", self.template)
- template = TextFile(self.template, strip_comments=1, skip_blanks=1,
- join_lines=1, lstrip_ws=1, rstrip_ws=1,
- collapse_join=1)
-
- try:
- while True:
- line = template.readline()
- if line is None: # end of file
- break
-
- try:
- self.filelist.process_template_line(line)
- # the call above can raise a DistutilsTemplateError for
- # malformed lines, or a ValueError from the lower-level
- # convert_path function
- except (DistutilsTemplateError, ValueError) as msg:
- self.warn("%s, line %d: %s" % (template.filename,
- template.current_line,
- msg))
- finally:
- template.close()
-
- def prune_file_list(self):
- """Prune off branches that might slip into the file list as created
- by 'read_template()', but really don't belong there:
- * the build tree (typically "build")
- * the release tree itself (only an issue if we ran "sdist"
- previously with --keep-temp, or it aborted)
- * any RCS, CVS, .svn, .hg, .git, .bzr, _darcs directories
- """
- build = self.get_finalized_command('build')
- base_dir = self.distribution.get_fullname()
-
- self.filelist.exclude_pattern(None, prefix=build.build_base)
- self.filelist.exclude_pattern(None, prefix=base_dir)
-
- if sys.platform == 'win32':
- seps = r'/|\\'
- else:
- seps = '/'
-
- vcs_dirs = ['RCS', 'CVS', r'\.svn', r'\.hg', r'\.git', r'\.bzr',
- '_darcs']
- vcs_ptrn = r'(^|%s)(%s)(%s).*' % (seps, '|'.join(vcs_dirs), seps)
- self.filelist.exclude_pattern(vcs_ptrn, is_regex=1)
-
- def write_manifest(self):
- """Write the file list in 'self.filelist' (presumably as filled in
- by 'add_defaults()' and 'read_template()') to the manifest file
- named by 'self.manifest'.
- """
- if self._manifest_is_not_generated():
- log.info("not writing to manually maintained "
- "manifest file '%s'" % self.manifest)
- return
-
- content = self.filelist.files[:]
- content.insert(0, '# file GENERATED by distutils, do NOT edit')
- self.execute(file_util.write_file, (self.manifest, content),
- "writing manifest file '%s'" % self.manifest)
-
- def _manifest_is_not_generated(self):
- # check for special comment used in 3.1.3 and higher
- if not os.path.isfile(self.manifest):
- return False
-
- fp = open(self.manifest)
- try:
- first_line = fp.readline()
- finally:
- fp.close()
- return first_line != '# file GENERATED by distutils, do NOT edit\n'
-
- def read_manifest(self):
- """Read the manifest file (named by 'self.manifest') and use it to
- fill in 'self.filelist', the list of files to include in the source
- distribution.
- """
- log.info("reading manifest file '%s'", self.manifest)
- with open(self.manifest) as manifest:
- for line in manifest:
- # ignore comments and blank lines
- line = line.strip()
- if line.startswith('#') or not line:
- continue
- self.filelist.append(line)
-
- def make_release_tree(self, base_dir, files):
- """Create the directory tree that will become the source
- distribution archive. All directories implied by the filenames in
- 'files' are created under 'base_dir', and then we hard link or copy
- (if hard linking is unavailable) those files into place.
- Essentially, this duplicates the developer's source tree, but in a
- directory named after the distribution, containing only the files
- to be distributed.
- """
- # Create all the directories under 'base_dir' necessary to
- # put 'files' there; the 'mkpath()' is just so we don't die
- # if the manifest happens to be empty.
- self.mkpath(base_dir)
- dir_util.create_tree(base_dir, files, dry_run=self.dry_run)
-
- # And walk over the list of files, either making a hard link (if
- # os.link exists) to each one that doesn't already exist in its
- # corresponding location under 'base_dir', or copying each file
- # that's out-of-date in 'base_dir'. (Usually, all files will be
- # out-of-date, because by default we blow away 'base_dir' when
- # we're done making the distribution archives.)
-
- if hasattr(os, 'link'): # can make hard links on this system
- link = 'hard'
- msg = "making hard links in %s..." % base_dir
- else: # nope, have to copy
- link = None
- msg = "copying files to %s..." % base_dir
-
- if not files:
- log.warn("no files to distribute -- empty manifest?")
- else:
- log.info(msg)
- for file in files:
- if not os.path.isfile(file):
- log.warn("'%s' not a regular file -- skipping", file)
- else:
- dest = os.path.join(base_dir, file)
- self.copy_file(file, dest, link=link)
-
- self.distribution.metadata.write_pkg_info(base_dir)
-
- def make_distribution(self):
- """Create the source distribution(s). First, we create the release
- tree with 'make_release_tree()'; then, we create all required
- archive files (according to 'self.formats') from the release tree.
- Finally, we clean up by blowing away the release tree (unless
- 'self.keep_temp' is true). The list of archive files created is
- stored so it can be retrieved later by 'get_archive_files()'.
- """
- # Don't warn about missing meta-data here -- should be (and is!)
- # done elsewhere.
- base_dir = self.distribution.get_fullname()
- base_name = os.path.join(self.dist_dir, base_dir)
-
- self.make_release_tree(base_dir, self.filelist.files)
- archive_files = [] # remember names of files we create
- # tar archive must be created last to avoid overwrite and remove
- if 'tar' in self.formats:
- self.formats.append(self.formats.pop(self.formats.index('tar')))
-
- for fmt in self.formats:
- file = self.make_archive(base_name, fmt, base_dir=base_dir,
- owner=self.owner, group=self.group)
- archive_files.append(file)
- self.distribution.dist_files.append(('sdist', '', file))
-
- self.archive_files = archive_files
-
- if not self.keep_temp:
- dir_util.remove_tree(base_dir, dry_run=self.dry_run)
-
- def get_archive_files(self):
- """Return the list of archive files created when the command
- was run, or None if the command hasn't run yet.
- """
- return self.archive_files
diff --git a/contrib/python/setuptools/py3/setuptools/_distutils/command/upload.py b/contrib/python/setuptools/py3/setuptools/_distutils/command/upload.py
deleted file mode 100644
index 95e9fda186f..00000000000
--- a/contrib/python/setuptools/py3/setuptools/_distutils/command/upload.py
+++ /dev/null
@@ -1,214 +0,0 @@
-"""
-distutils.command.upload
-
-Implements the Distutils 'upload' subcommand (upload package to a package
-index).
-"""
-
-import os
-import io
-import hashlib
-from base64 import standard_b64encode
-from urllib.request import urlopen, Request, HTTPError
-from urllib.parse import urlparse
-from distutils.errors import DistutilsError, DistutilsOptionError
-from distutils.core import PyPIRCCommand
-from distutils.spawn import spawn
-from distutils import log
-
-
-# PyPI Warehouse supports MD5, SHA256, and Blake2 (blake2-256)
-# https://bugs.python.org/issue40698
-_FILE_CONTENT_DIGESTS = {
- "md5_digest": getattr(hashlib, "md5", None),
- "sha256_digest": getattr(hashlib, "sha256", None),
- "blake2_256_digest": getattr(hashlib, "blake2b", None),
-}
-
-
-class upload(PyPIRCCommand):
-
- description = "upload binary package to PyPI"
-
- user_options = PyPIRCCommand.user_options + [
- ('sign', 's',
- 'sign files to upload using gpg'),
- ('identity=', 'i', 'GPG identity used to sign files'),
- ]
-
- boolean_options = PyPIRCCommand.boolean_options + ['sign']
-
- def initialize_options(self):
- PyPIRCCommand.initialize_options(self)
- self.username = ''
- self.password = ''
- self.show_response = 0
- self.sign = False
- self.identity = None
-
- def finalize_options(self):
- PyPIRCCommand.finalize_options(self)
- if self.identity and not self.sign:
- raise DistutilsOptionError(
- "Must use --sign for --identity to have meaning"
- )
- config = self._read_pypirc()
- if config != {}:
- self.username = config['username']
- self.password = config['password']
- self.repository = config['repository']
- self.realm = config['realm']
-
- # getting the password from the distribution
- # if previously set by the register command
- if not self.password and self.distribution.password:
- self.password = self.distribution.password
-
- def run(self):
- if not self.distribution.dist_files:
- msg = ("Must create and upload files in one command "
- "(e.g. setup.py sdist upload)")
- raise DistutilsOptionError(msg)
- for command, pyversion, filename in self.distribution.dist_files:
- self.upload_file(command, pyversion, filename)
-
- def upload_file(self, command, pyversion, filename):
- # Makes sure the repository URL is compliant
- schema, netloc, url, params, query, fragments = \
- urlparse(self.repository)
- if params or query or fragments:
- raise AssertionError("Incompatible url %s" % self.repository)
-
- if schema not in ('http', 'https'):
- raise AssertionError("unsupported schema " + schema)
-
- # Sign if requested
- if self.sign:
- gpg_args = ["gpg", "--detach-sign", "-a", filename]
- if self.identity:
- gpg_args[2:2] = ["--local-user", self.identity]
- spawn(gpg_args,
- dry_run=self.dry_run)
-
- # Fill in the data - send all the meta-data in case we need to
- # register a new release
- f = open(filename,'rb')
- try:
- content = f.read()
- finally:
- f.close()
-
- meta = self.distribution.metadata
- data = {
- # action
- ':action': 'file_upload',
- 'protocol_version': '1',
-
- # identify release
- 'name': meta.get_name(),
- 'version': meta.get_version(),
-
- # file content
- 'content': (os.path.basename(filename),content),
- 'filetype': command,
- 'pyversion': pyversion,
-
- # additional meta-data
- 'metadata_version': '1.0',
- 'summary': meta.get_description(),
- 'home_page': meta.get_url(),
- 'author': meta.get_contact(),
- 'author_email': meta.get_contact_email(),
- 'license': meta.get_licence(),
- 'description': meta.get_long_description(),
- 'keywords': meta.get_keywords(),
- 'platform': meta.get_platforms(),
- 'classifiers': meta.get_classifiers(),
- 'download_url': meta.get_download_url(),
- # PEP 314
- 'provides': meta.get_provides(),
- 'requires': meta.get_requires(),
- 'obsoletes': meta.get_obsoletes(),
- }
-
- data['comment'] = ''
-
- # file content digests
- for digest_name, digest_cons in _FILE_CONTENT_DIGESTS.items():
- if digest_cons is None:
- continue
- try:
- data[digest_name] = digest_cons(content).hexdigest()
- except ValueError:
- # hash digest not available or blocked by security policy
- pass
-
- if self.sign:
- with open(filename + ".asc", "rb") as f:
- data['gpg_signature'] = (os.path.basename(filename) + ".asc",
- f.read())
-
- # set up the authentication
- user_pass = (self.username + ":" + self.password).encode('ascii')
- # The exact encoding of the authentication string is debated.
- # Anyway PyPI only accepts ascii for both username or password.
- auth = "Basic " + standard_b64encode(user_pass).decode('ascii')
-
- # Build up the MIME payload for the POST data
- boundary = '--------------GHSKFJDLGDS7543FJKLFHRE75642756743254'
- sep_boundary = b'\r\n--' + boundary.encode('ascii')
- end_boundary = sep_boundary + b'--\r\n'
- body = io.BytesIO()
- for key, value in data.items():
- title = '\r\nContent-Disposition: form-data; name="%s"' % key
- # handle multiple entries for the same name
- if not isinstance(value, list):
- value = [value]
- for value in value:
- if type(value) is tuple:
- title += '; filename="%s"' % value[0]
- value = value[1]
- else:
- value = str(value).encode('utf-8')
- body.write(sep_boundary)
- body.write(title.encode('utf-8'))
- body.write(b"\r\n\r\n")
- body.write(value)
- body.write(end_boundary)
- body = body.getvalue()
-
- msg = "Submitting %s to %s" % (filename, self.repository)
- self.announce(msg, log.INFO)
-
- # build the Request
- headers = {
- 'Content-type': 'multipart/form-data; boundary=%s' % boundary,
- 'Content-length': str(len(body)),
- 'Authorization': auth,
- }
-
- request = Request(self.repository, data=body,
- headers=headers)
- # send the data
- try:
- result = urlopen(request)
- status = result.getcode()
- reason = result.msg
- except HTTPError as e:
- status = e.code
- reason = e.msg
- except OSError as e:
- self.announce(str(e), log.ERROR)
- raise
-
- if status == 200:
- self.announce('Server response (%s): %s' % (status, reason),
- log.INFO)
- if self.show_response:
- text = self._read_pypi_response(result)
- msg = '\n'.join(('-' * 75, text, '-' * 75))
- self.announce(msg, log.INFO)
- else:
- msg = 'Upload failed (%s): %s' % (status, reason)
- self.announce(msg, log.ERROR)
- raise DistutilsError(msg)
diff --git a/contrib/python/setuptools/py3/setuptools/_distutils/config.py b/contrib/python/setuptools/py3/setuptools/_distutils/config.py
deleted file mode 100644
index 2171abd6969..00000000000
--- a/contrib/python/setuptools/py3/setuptools/_distutils/config.py
+++ /dev/null
@@ -1,130 +0,0 @@
-"""distutils.pypirc
-
-Provides the PyPIRCCommand class, the base class for the command classes
-that uses .pypirc in the distutils.command package.
-"""
-import os
-from configparser import RawConfigParser
-
-from distutils.cmd import Command
-
-DEFAULT_PYPIRC = """\
-[distutils]
-index-servers =
- pypi
-
-[pypi]
-username:%s
-password:%s
-"""
-
-class PyPIRCCommand(Command):
- """Base command that knows how to handle the .pypirc file
- """
- DEFAULT_REPOSITORY = 'https://upload.pypi.org/legacy/'
- DEFAULT_REALM = 'pypi'
- repository = None
- realm = None
-
- user_options = [
- ('repository=', 'r',
- "url of repository [default: %s]" % \
- DEFAULT_REPOSITORY),
- ('show-response', None,
- 'display full response text from server')]
-
- boolean_options = ['show-response']
-
- def _get_rc_file(self):
- """Returns rc file path."""
- return os.path.join(os.path.expanduser('~'), '.pypirc')
-
- def _store_pypirc(self, username, password):
- """Creates a default .pypirc file."""
- rc = self._get_rc_file()
- with os.fdopen(os.open(rc, os.O_CREAT | os.O_WRONLY, 0o600), 'w') as f:
- f.write(DEFAULT_PYPIRC % (username, password))
-
- def _read_pypirc(self):
- """Reads the .pypirc file."""
- rc = self._get_rc_file()
- if os.path.exists(rc):
- self.announce('Using PyPI login from %s' % rc)
- repository = self.repository or self.DEFAULT_REPOSITORY
-
- config = RawConfigParser()
- config.read(rc)
- sections = config.sections()
- if 'distutils' in sections:
- # let's get the list of servers
- index_servers = config.get('distutils', 'index-servers')
- _servers = [server.strip() for server in
- index_servers.split('\n')
- if server.strip() != '']
- if _servers == []:
- # nothing set, let's try to get the default pypi
- if 'pypi' in sections:
- _servers = ['pypi']
- else:
- # the file is not properly defined, returning
- # an empty dict
- return {}
- for server in _servers:
- current = {'server': server}
- current['username'] = config.get(server, 'username')
-
- # optional params
- for key, default in (('repository',
- self.DEFAULT_REPOSITORY),
- ('realm', self.DEFAULT_REALM),
- ('password', None)):
- if config.has_option(server, key):
- current[key] = config.get(server, key)
- else:
- current[key] = default
-
- # work around people having "repository" for the "pypi"
- # section of their config set to the HTTP (rather than
- # HTTPS) URL
- if (server == 'pypi' and
- repository in (self.DEFAULT_REPOSITORY, 'pypi')):
- current['repository'] = self.DEFAULT_REPOSITORY
- return current
-
- if (current['server'] == repository or
- current['repository'] == repository):
- return current
- elif 'server-login' in sections:
- # old format
- server = 'server-login'
- if config.has_option(server, 'repository'):
- repository = config.get(server, 'repository')
- else:
- repository = self.DEFAULT_REPOSITORY
- return {'username': config.get(server, 'username'),
- 'password': config.get(server, 'password'),
- 'repository': repository,
- 'server': server,
- 'realm': self.DEFAULT_REALM}
-
- return {}
-
- def _read_pypi_response(self, response):
- """Read and decode a PyPI HTTP response."""
- import cgi
- content_type = response.getheader('content-type', 'text/plain')
- encoding = cgi.parse_header(content_type)[1].get('charset', 'ascii')
- return response.read().decode(encoding)
-
- def initialize_options(self):
- """Initialize options."""
- self.repository = None
- self.realm = None
- self.show_response = 0
-
- def finalize_options(self):
- """Finalizes options."""
- if self.repository is None:
- self.repository = self.DEFAULT_REPOSITORY
- if self.realm is None:
- self.realm = self.DEFAULT_REALM
diff --git a/contrib/python/setuptools/py3/setuptools/_distutils/core.py b/contrib/python/setuptools/py3/setuptools/_distutils/core.py
deleted file mode 100644
index f43888ea609..00000000000
--- a/contrib/python/setuptools/py3/setuptools/_distutils/core.py
+++ /dev/null
@@ -1,249 +0,0 @@
-"""distutils.core
-
-The only module that needs to be imported to use the Distutils; provides
-the 'setup' function (which is to be called from the setup script). Also
-indirectly provides the Distribution and Command classes, although they are
-really defined in distutils.dist and distutils.cmd.
-"""
-
-import os
-import sys
-import tokenize
-
-from distutils.debug import DEBUG
-from distutils.errors import *
-
-# Mainly import these so setup scripts can "from distutils.core import" them.
-from distutils.dist import Distribution
-from distutils.cmd import Command
-from distutils.config import PyPIRCCommand
-from distutils.extension import Extension
-
-# This is a barebones help message generated displayed when the user
-# runs the setup script with no arguments at all. More useful help
-# is generated with various --help options: global help, list commands,
-# and per-command help.
-USAGE = """\
-usage: %(script)s [global_opts] cmd1 [cmd1_opts] [cmd2 [cmd2_opts] ...]
- or: %(script)s --help [cmd1 cmd2 ...]
- or: %(script)s --help-commands
- or: %(script)s cmd --help
-"""
-
-def gen_usage (script_name):
- script = os.path.basename(script_name)
- return USAGE % vars()
-
-
-# Some mild magic to control the behaviour of 'setup()' from 'run_setup()'.
-_setup_stop_after = None
-_setup_distribution = None
-
-# Legal keyword arguments for the setup() function
-setup_keywords = ('distclass', 'script_name', 'script_args', 'options',
- 'name', 'version', 'author', 'author_email',
- 'maintainer', 'maintainer_email', 'url', 'license',
- 'description', 'long_description', 'keywords',
- 'platforms', 'classifiers', 'download_url',
- 'requires', 'provides', 'obsoletes',
- )
-
-# Legal keyword arguments for the Extension constructor
-extension_keywords = ('name', 'sources', 'include_dirs',
- 'define_macros', 'undef_macros',
- 'library_dirs', 'libraries', 'runtime_library_dirs',
- 'extra_objects', 'extra_compile_args', 'extra_link_args',
- 'swig_opts', 'export_symbols', 'depends', 'language')
-
-def setup (**attrs):
- """The gateway to the Distutils: do everything your setup script needs
- to do, in a highly flexible and user-driven way. Briefly: create a
- Distribution instance; find and parse config files; parse the command
- line; run each Distutils command found there, customized by the options
- supplied to 'setup()' (as keyword arguments), in config files, and on
- the command line.
-
- The Distribution instance might be an instance of a class supplied via
- the 'distclass' keyword argument to 'setup'; if no such class is
- supplied, then the Distribution class (in dist.py) is instantiated.
- All other arguments to 'setup' (except for 'cmdclass') are used to set
- attributes of the Distribution instance.
-
- The 'cmdclass' argument, if supplied, is a dictionary mapping command
- names to command classes. Each command encountered on the command line
- will be turned into a command class, which is in turn instantiated; any
- class found in 'cmdclass' is used in place of the default, which is
- (for command 'foo_bar') class 'foo_bar' in module
- 'distutils.command.foo_bar'. The command class must provide a
- 'user_options' attribute which is a list of option specifiers for
- 'distutils.fancy_getopt'. Any command-line options between the current
- and the next command are used to set attributes of the current command
- object.
-
- When the entire command-line has been successfully parsed, calls the
- 'run()' method on each command object in turn. This method will be
- driven entirely by the Distribution object (which each command object
- has a reference to, thanks to its constructor), and the
- command-specific options that became attributes of each command
- object.
- """
-
- global _setup_stop_after, _setup_distribution
-
- # Determine the distribution class -- either caller-supplied or
- # our Distribution (see below).
- klass = attrs.get('distclass')
- if klass:
- del attrs['distclass']
- else:
- klass = Distribution
-
- if 'script_name' not in attrs:
- attrs['script_name'] = os.path.basename(sys.argv[0])
- if 'script_args' not in attrs:
- attrs['script_args'] = sys.argv[1:]
-
- # Create the Distribution instance, using the remaining arguments
- # (ie. everything except distclass) to initialize it
- try:
- _setup_distribution = dist = klass(attrs)
- except DistutilsSetupError as msg:
- if 'name' not in attrs:
- raise SystemExit("error in setup command: %s" % msg)
- else:
- raise SystemExit("error in %s setup command: %s" % \
- (attrs['name'], msg))
-
- if _setup_stop_after == "init":
- return dist
-
- # Find and parse the config file(s): they will override options from
- # the setup script, but be overridden by the command line.
- dist.parse_config_files()
-
- if DEBUG:
- print("options (after parsing config files):")
- dist.dump_option_dicts()
-
- if _setup_stop_after == "config":
- return dist
-
- # Parse the command line and override config files; any
- # command-line errors are the end user's fault, so turn them into
- # SystemExit to suppress tracebacks.
- try:
- ok = dist.parse_command_line()
- except DistutilsArgError as msg:
- raise SystemExit(gen_usage(dist.script_name) + "\nerror: %s" % msg)
-
- if DEBUG:
- print("options (after parsing command line):")
- dist.dump_option_dicts()
-
- if _setup_stop_after == "commandline":
- return dist
-
- # And finally, run all the commands found on the command line.
- if ok:
- return run_commands(dist)
-
- return dist
-
-# setup ()
-
-
-def run_commands (dist):
- """Given a Distribution object run all the commands,
- raising ``SystemExit`` errors in the case of failure.
-
- This function assumes that either ``sys.argv`` or ``dist.script_args``
- is already set accordingly.
- """
- try:
- dist.run_commands()
- except KeyboardInterrupt:
- raise SystemExit("interrupted")
- except OSError as exc:
- if DEBUG:
- sys.stderr.write("error: %s\n" % (exc,))
- raise
- else:
- raise SystemExit("error: %s" % (exc,))
-
- except (DistutilsError,
- CCompilerError) as msg:
- if DEBUG:
- raise
- else:
- raise SystemExit("error: " + str(msg))
-
- return dist
-
-
-def run_setup (script_name, script_args=None, stop_after="run"):
- """Run a setup script in a somewhat controlled environment, and
- return the Distribution instance that drives things. This is useful
- if you need to find out the distribution meta-data (passed as
- keyword args from 'script' to 'setup()', or the contents of the
- config files or command-line.
-
- 'script_name' is a file that will be read and run with 'exec()';
- 'sys.argv[0]' will be replaced with 'script' for the duration of the
- call. 'script_args' is a list of strings; if supplied,
- 'sys.argv[1:]' will be replaced by 'script_args' for the duration of
- the call.
-
- 'stop_after' tells 'setup()' when to stop processing; possible
- values:
- init
- stop after the Distribution instance has been created and
- populated with the keyword arguments to 'setup()'
- config
- stop after config files have been parsed (and their data
- stored in the Distribution instance)
- commandline
- stop after the command-line ('sys.argv[1:]' or 'script_args')
- have been parsed (and the data stored in the Distribution)
- run [default]
- stop after all commands have been run (the same as if 'setup()'
- had been called in the usual way
-
- Returns the Distribution instance, which provides all information
- used to drive the Distutils.
- """
- if stop_after not in ('init', 'config', 'commandline', 'run'):
- raise ValueError("invalid value for 'stop_after': %r" % (stop_after,))
-
- global _setup_stop_after, _setup_distribution
- _setup_stop_after = stop_after
-
- save_argv = sys.argv.copy()
- g = {'__file__': script_name, '__name__': '__main__'}
- try:
- try:
- sys.argv[0] = script_name
- if script_args is not None:
- sys.argv[1:] = script_args
- # tokenize.open supports automatic encoding detection
- with tokenize.open(script_name) as f:
- code = f.read().replace(r'\r\n', r'\n')
- exec(code, g)
- finally:
- sys.argv = save_argv
- _setup_stop_after = None
- except SystemExit:
- # Hmm, should we do something if exiting with a non-zero code
- # (ie. error)?
- pass
-
- if _setup_distribution is None:
- raise RuntimeError(("'distutils.core.setup()' was never called -- "
- "perhaps '%s' is not a Distutils setup script?") % \
- script_name)
-
- # I wonder if the setup script's namespace -- g and l -- would be of
- # any interest to callers?
- #print "_setup_distribution:", _setup_distribution
- return _setup_distribution
-
-# run_setup ()
diff --git a/contrib/python/setuptools/py3/setuptools/_distutils/cygwinccompiler.py b/contrib/python/setuptools/py3/setuptools/_distutils/cygwinccompiler.py
deleted file mode 100644
index ad6cc44b08f..00000000000
--- a/contrib/python/setuptools/py3/setuptools/_distutils/cygwinccompiler.py
+++ /dev/null
@@ -1,425 +0,0 @@
-"""distutils.cygwinccompiler
-
-Provides the CygwinCCompiler class, a subclass of UnixCCompiler that
-handles the Cygwin port of the GNU C compiler to Windows. It also contains
-the Mingw32CCompiler class which handles the mingw32 port of GCC (same as
-cygwin in no-cygwin mode).
-"""
-
-# problems:
-#
-# * if you use a msvc compiled python version (1.5.2)
-# 1. you have to insert a __GNUC__ section in its config.h
-# 2. you have to generate an import library for its dll
-# - create a def-file for python??.dll
-# - create an import library using
-# dlltool --dllname python15.dll --def python15.def \
-# --output-lib libpython15.a
-#
-# see also http://starship.python.net/crew/kernr/mingw32/Notes.html
-#
-# * We put export_symbols in a def-file, and don't use
-# --export-all-symbols because it doesn't worked reliable in some
-# tested configurations. And because other windows compilers also
-# need their symbols specified this no serious problem.
-#
-# tested configurations:
-#
-# * cygwin gcc 2.91.57/ld 2.9.4/dllwrap 0.2.4 works
-# (after patching python's config.h and for C++ some other include files)
-# see also http://starship.python.net/crew/kernr/mingw32/Notes.html
-# * mingw32 gcc 2.95.2/ld 2.9.4/dllwrap 0.2.4 works
-# (ld doesn't support -shared, so we use dllwrap)
-# * cygwin gcc 2.95.2/ld 2.10.90/dllwrap 2.10.90 works now
-# - its dllwrap doesn't work, there is a bug in binutils 2.10.90
-# see also http://sources.redhat.com/ml/cygwin/2000-06/msg01274.html
-# - using gcc -mdll instead dllwrap doesn't work without -static because
-# it tries to link against dlls instead their import libraries. (If
-# it finds the dll first.)
-# By specifying -static we force ld to link against the import libraries,
-# this is windows standard and there are normally not the necessary symbols
-# in the dlls.
-# *** only the version of June 2000 shows these problems
-# * cygwin gcc 3.2/ld 2.13.90 works
-# (ld supports -shared)
-# * mingw gcc 3.2/ld 2.13 works
-# (ld supports -shared)
-# * llvm-mingw with Clang 11 works
-# (lld supports -shared)
-
-import os
-import sys
-import copy
-from subprocess import Popen, PIPE, check_output
-import re
-
-import distutils.version
-from distutils.unixccompiler import UnixCCompiler
-from distutils.file_util import write_file
-from distutils.errors import (DistutilsExecError, CCompilerError,
- CompileError, UnknownFileError)
-from distutils.version import LooseVersion
-from distutils.spawn import find_executable
-
-def get_msvcr():
- """Include the appropriate MSVC runtime library if Python was built
- with MSVC 7.0 or later.
- """
- msc_pos = sys.version.find('MSC v.')
- if msc_pos != -1:
- msc_ver = sys.version[msc_pos+6:msc_pos+10]
- if msc_ver == '1300':
- # MSVC 7.0
- return ['msvcr70']
- elif msc_ver == '1310':
- # MSVC 7.1
- return ['msvcr71']
- elif msc_ver == '1400':
- # VS2005 / MSVC 8.0
- return ['msvcr80']
- elif msc_ver == '1500':
- # VS2008 / MSVC 9.0
- return ['msvcr90']
- elif msc_ver == '1600':
- # VS2010 / MSVC 10.0
- return ['msvcr100']
- elif msc_ver == '1700':
- # VS2012 / MSVC 11.0
- return ['msvcr110']
- elif msc_ver == '1800':
- # VS2013 / MSVC 12.0
- return ['msvcr120']
- elif 1900 <= int(msc_ver) < 2000:
- # VS2015 / MSVC 14.0
- return ['ucrt', 'vcruntime140']
- else:
- raise ValueError("Unknown MS Compiler version %s " % msc_ver)
-
-
-class CygwinCCompiler(UnixCCompiler):
- """ Handles the Cygwin port of the GNU C compiler to Windows.
- """
- compiler_type = 'cygwin'
- obj_extension = ".o"
- static_lib_extension = ".a"
- shared_lib_extension = ".dll"
- static_lib_format = "lib%s%s"
- shared_lib_format = "%s%s"
- exe_extension = ".exe"
-
- def __init__(self, verbose=0, dry_run=0, force=0):
-
- UnixCCompiler.__init__(self, verbose, dry_run, force)
-
- status, details = check_config_h()
- self.debug_print("Python's GCC status: %s (details: %s)" %
- (status, details))
- if status is not CONFIG_H_OK:
- self.warn(
- "Python's pyconfig.h doesn't seem to support your compiler. "
- "Reason: %s. "
- "Compiling may fail because of undefined preprocessor macros."
- % details)
-
- self.cc = os.environ.get('CC', 'gcc')
- self.cxx = os.environ.get('CXX', 'g++')
-
- if ('gcc' in self.cc): # Start gcc workaround
- self.gcc_version, self.ld_version, self.dllwrap_version = \
- get_versions()
- self.debug_print(self.compiler_type + ": gcc %s, ld %s, dllwrap %s\n" %
- (self.gcc_version,
- self.ld_version,
- self.dllwrap_version) )
-
- # ld_version >= "2.10.90" and < "2.13" should also be able to use
- # gcc -mdll instead of dllwrap
- # Older dllwraps had own version numbers, newer ones use the
- # same as the rest of binutils ( also ld )
- # dllwrap 2.10.90 is buggy
- if self.ld_version >= "2.10.90":
- self.linker_dll = self.cc
- else:
- self.linker_dll = "dllwrap"
-
- # ld_version >= "2.13" support -shared so use it instead of
- # -mdll -static
- if self.ld_version >= "2.13":
- shared_option = "-shared"
- else:
- shared_option = "-mdll -static"
- else: # Assume linker is up to date
- self.linker_dll = self.cc
- shared_option = "-shared"
-
- self.set_executables(compiler='%s -mcygwin -O -Wall' % self.cc,
- compiler_so='%s -mcygwin -mdll -O -Wall' % self.cc,
- compiler_cxx='%s -mcygwin -O -Wall' % self.cxx,
- linker_exe='%s -mcygwin' % self.cc,
- linker_so=('%s -mcygwin %s' %
- (self.linker_dll, shared_option)))
-
- # cygwin and mingw32 need different sets of libraries
- if ('gcc' in self.cc and self.gcc_version == "2.91.57"):
- # cygwin shouldn't need msvcrt, but without the dlls will crash
- # (gcc version 2.91.57) -- perhaps something about initialization
- self.dll_libraries=["msvcrt"]
- self.warn(
- "Consider upgrading to a newer version of gcc")
- else:
- # Include the appropriate MSVC runtime library if Python was built
- # with MSVC 7.0 or later.
- self.dll_libraries = get_msvcr()
-
- def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts):
- """Compiles the source by spawning GCC and windres if needed."""
- if ext == '.rc' or ext == '.res':
- # gcc needs '.res' and '.rc' compiled to object files !!!
- try:
- self.spawn(["windres", "-i", src, "-o", obj])
- except DistutilsExecError as msg:
- raise CompileError(msg)
- else: # for other files use the C-compiler
- try:
- self.spawn(self.compiler_so + cc_args + [src, '-o', obj] +
- extra_postargs)
- except DistutilsExecError as msg:
- raise CompileError(msg)
-
- def link(self, target_desc, objects, output_filename, output_dir=None,
- libraries=None, library_dirs=None, runtime_library_dirs=None,
- export_symbols=None, debug=0, extra_preargs=None,
- extra_postargs=None, build_temp=None, target_lang=None):
- """Link the objects."""
- # use separate copies, so we can modify the lists
- extra_preargs = copy.copy(extra_preargs or [])
- libraries = copy.copy(libraries or [])
- objects = copy.copy(objects or [])
-
- # Additional libraries
- libraries.extend(self.dll_libraries)
-
- # handle export symbols by creating a def-file
- # with executables this only works with gcc/ld as linker
- if ((export_symbols is not None) and
- (target_desc != self.EXECUTABLE or self.linker_dll == "gcc")):
- # (The linker doesn't do anything if output is up-to-date.
- # So it would probably better to check if we really need this,
- # but for this we had to insert some unchanged parts of
- # UnixCCompiler, and this is not what we want.)
-
- # we want to put some files in the same directory as the
- # object files are, build_temp doesn't help much
- # where are the object files
- temp_dir = os.path.dirname(objects[0])
- # name of dll to give the helper files the same base name
- (dll_name, dll_extension) = os.path.splitext(
- os.path.basename(output_filename))
-
- # generate the filenames for these files
- def_file = os.path.join(temp_dir, dll_name + ".def")
- lib_file = os.path.join(temp_dir, 'lib' + dll_name + ".a")
-
- # Generate .def file
- contents = [
- "LIBRARY %s" % os.path.basename(output_filename),
- "EXPORTS"]
- for sym in export_symbols:
- contents.append(sym)
- self.execute(write_file, (def_file, contents),
- "writing %s" % def_file)
-
- # next add options for def-file and to creating import libraries
-
- # dllwrap uses different options than gcc/ld
- if self.linker_dll == "dllwrap":
- extra_preargs.extend(["--output-lib", lib_file])
- # for dllwrap we have to use a special option
- extra_preargs.extend(["--def", def_file])
- # we use gcc/ld here and can be sure ld is >= 2.9.10
- else:
- # doesn't work: bfd_close build\...\libfoo.a: Invalid operation
- #extra_preargs.extend(["-Wl,--out-implib,%s" % lib_file])
- # for gcc/ld the def-file is specified as any object files
- objects.append(def_file)
-
- #end: if ((export_symbols is not None) and
- # (target_desc != self.EXECUTABLE or self.linker_dll == "gcc")):
-
- # who wants symbols and a many times larger output file
- # should explicitly switch the debug mode on
- # otherwise we let dllwrap/ld strip the output file
- # (On my machine: 10KiB < stripped_file < ??100KiB
- # unstripped_file = stripped_file + XXX KiB
- # ( XXX=254 for a typical python extension))
- if not debug:
- extra_preargs.append("-s")
-
- UnixCCompiler.link(self, target_desc, objects, output_filename,
- output_dir, libraries, library_dirs,
- runtime_library_dirs,
- None, # export_symbols, we do this in our def-file
- debug, extra_preargs, extra_postargs, build_temp,
- target_lang)
-
- # -- Miscellaneous methods -----------------------------------------
-
- def object_filenames(self, source_filenames, strip_dir=0, output_dir=''):
- """Adds supports for rc and res files."""
- if output_dir is None:
- output_dir = ''
- obj_names = []
- for src_name in source_filenames:
- # use normcase to make sure '.rc' is really '.rc' and not '.RC'
- base, ext = os.path.splitext(os.path.normcase(src_name))
- if ext not in (self.src_extensions + ['.rc','.res']):
- raise UnknownFileError("unknown file type '%s' (from '%s')" % \
- (ext, src_name))
- if strip_dir:
- base = os.path.basename (base)
- if ext in ('.res', '.rc'):
- # these need to be compiled to object files
- obj_names.append (os.path.join(output_dir,
- base + ext + self.obj_extension))
- else:
- obj_names.append (os.path.join(output_dir,
- base + self.obj_extension))
- return obj_names
-
-# the same as cygwin plus some additional parameters
-class Mingw32CCompiler(CygwinCCompiler):
- """ Handles the Mingw32 port of the GNU C compiler to Windows.
- """
- compiler_type = 'mingw32'
-
- def __init__(self, verbose=0, dry_run=0, force=0):
-
- CygwinCCompiler.__init__ (self, verbose, dry_run, force)
-
- # ld_version >= "2.13" support -shared so use it instead of
- # -mdll -static
- if ('gcc' in self.cc and self.ld_version < "2.13"):
- shared_option = "-mdll -static"
- else:
- shared_option = "-shared"
-
- # A real mingw32 doesn't need to specify a different entry point,
- # but cygwin 2.91.57 in no-cygwin-mode needs it.
- if ('gcc' in self.cc and self.gcc_version <= "2.91.57"):
- entry_point = '--entry _DllMain@12'
- else:
- entry_point = ''
-
- if is_cygwincc(self.cc):
- raise CCompilerError(
- 'Cygwin gcc cannot be used with --compiler=mingw32')
-
- self.set_executables(compiler='%s -O -Wall' % self.cc,
- compiler_so='%s -mdll -O -Wall' % self.cc,
- compiler_cxx='%s -O -Wall' % self.cxx,
- linker_exe='%s' % self.cc,
- linker_so='%s %s %s'
- % (self.linker_dll, shared_option,
- entry_point))
- # Maybe we should also append -mthreads, but then the finished
- # dlls need another dll (mingwm10.dll see Mingw32 docs)
- # (-mthreads: Support thread-safe exception handling on `Mingw32')
-
- # no additional libraries needed
- self.dll_libraries=[]
-
- # Include the appropriate MSVC runtime library if Python was built
- # with MSVC 7.0 or later.
- self.dll_libraries = get_msvcr()
-
-# Because these compilers aren't configured in Python's pyconfig.h file by
-# default, we should at least warn the user if he is using an unmodified
-# version.
-
-CONFIG_H_OK = "ok"
-CONFIG_H_NOTOK = "not ok"
-CONFIG_H_UNCERTAIN = "uncertain"
-
-def check_config_h():
- """Check if the current Python installation appears amenable to building
- extensions with GCC.
-
- Returns a tuple (status, details), where 'status' is one of the following
- constants:
-
- - CONFIG_H_OK: all is well, go ahead and compile
- - CONFIG_H_NOTOK: doesn't look good
- - CONFIG_H_UNCERTAIN: not sure -- unable to read pyconfig.h
-
- 'details' is a human-readable string explaining the situation.
-
- Note there are two ways to conclude "OK": either 'sys.version' contains
- the string "GCC" (implying that this Python was built with GCC), or the
- installed "pyconfig.h" contains the string "__GNUC__".
- """
-
- # XXX since this function also checks sys.version, it's not strictly a
- # "pyconfig.h" check -- should probably be renamed...
-
- from distutils import sysconfig
-
- # if sys.version contains GCC then python was compiled with GCC, and the
- # pyconfig.h file should be OK
- if "GCC" in sys.version:
- return CONFIG_H_OK, "sys.version mentions 'GCC'"
-
- # Clang would also work
- if "Clang" in sys.version:
- return CONFIG_H_OK, "sys.version mentions 'Clang'"
-
- # let's see if __GNUC__ is mentioned in python.h
- fn = sysconfig.get_config_h_filename()
- try:
- config_h = open(fn)
- try:
- if "__GNUC__" in config_h.read():
- return CONFIG_H_OK, "'%s' mentions '__GNUC__'" % fn
- else:
- return CONFIG_H_NOTOK, "'%s' does not mention '__GNUC__'" % fn
- finally:
- config_h.close()
- except OSError as exc:
- return (CONFIG_H_UNCERTAIN,
- "couldn't read '%s': %s" % (fn, exc.strerror))
-
-RE_VERSION = re.compile(br'(\d+\.\d+(\.\d+)*)')
-
-def _find_exe_version(cmd):
- """Find the version of an executable by running `cmd` in the shell.
-
- If the command is not found, or the output does not match
- `RE_VERSION`, returns None.
- """
- executable = cmd.split()[0]
- if find_executable(executable) is None:
- return None
- out = Popen(cmd, shell=True, stdout=PIPE).stdout
- try:
- out_string = out.read()
- finally:
- out.close()
- result = RE_VERSION.search(out_string)
- if result is None:
- return None
- # LooseVersion works with strings; decode
- ver_str = result.group(1).decode()
- with distutils.version.suppress_known_deprecation():
- return LooseVersion(ver_str)
-
-def get_versions():
- """ Try to find out the versions of gcc, ld and dllwrap.
-
- If not possible it returns None for it.
- """
- commands = ['gcc -dumpversion', 'ld -v', 'dllwrap --version']
- return tuple([_find_exe_version(cmd) for cmd in commands])
-
-def is_cygwincc(cc):
- '''Try to determine if the compiler that would be used is from cygwin.'''
- out_string = check_output([cc, '-dumpmachine'])
- return out_string.strip().endswith(b'cygwin')
diff --git a/contrib/python/setuptools/py3/setuptools/_distutils/debug.py b/contrib/python/setuptools/py3/setuptools/_distutils/debug.py
deleted file mode 100644
index daf1660f0d8..00000000000
--- a/contrib/python/setuptools/py3/setuptools/_distutils/debug.py
+++ /dev/null
@@ -1,5 +0,0 @@
-import os
-
-# If DISTUTILS_DEBUG is anything other than the empty string, we run in
-# debug mode.
-DEBUG = os.environ.get('DISTUTILS_DEBUG')
diff --git a/contrib/python/setuptools/py3/setuptools/_distutils/dep_util.py b/contrib/python/setuptools/py3/setuptools/_distutils/dep_util.py
deleted file mode 100644
index d74f5e4e92f..00000000000
--- a/contrib/python/setuptools/py3/setuptools/_distutils/dep_util.py
+++ /dev/null
@@ -1,92 +0,0 @@
-"""distutils.dep_util
-
-Utility functions for simple, timestamp-based dependency of files
-and groups of files; also, function based entirely on such
-timestamp dependency analysis."""
-
-import os
-from distutils.errors import DistutilsFileError
-
-
-def newer (source, target):
- """Return true if 'source' exists and is more recently modified than
- 'target', or if 'source' exists and 'target' doesn't. Return false if
- both exist and 'target' is the same age or younger than 'source'.
- Raise DistutilsFileError if 'source' does not exist.
- """
- if not os.path.exists(source):
- raise DistutilsFileError("file '%s' does not exist" %
- os.path.abspath(source))
- if not os.path.exists(target):
- return 1
-
- from stat import ST_MTIME
- mtime1 = os.stat(source)[ST_MTIME]
- mtime2 = os.stat(target)[ST_MTIME]
-
- return mtime1 > mtime2
-
-# newer ()
-
-
-def newer_pairwise (sources, targets):
- """Walk two filename lists in parallel, testing if each source is newer
- than its corresponding target. Return a pair of lists (sources,
- targets) where source is newer than target, according to the semantics
- of 'newer()'.
- """
- if len(sources) != len(targets):
- raise ValueError("'sources' and 'targets' must be same length")
-
- # build a pair of lists (sources, targets) where source is newer
- n_sources = []
- n_targets = []
- for i in range(len(sources)):
- if newer(sources[i], targets[i]):
- n_sources.append(sources[i])
- n_targets.append(targets[i])
-
- return (n_sources, n_targets)
-
-# newer_pairwise ()
-
-
-def newer_group (sources, target, missing='error'):
- """Return true if 'target' is out-of-date with respect to any file
- listed in 'sources'. In other words, if 'target' exists and is newer
- than every file in 'sources', return false; otherwise return true.
- 'missing' controls what we do when a source file is missing; the
- default ("error") is to blow up with an OSError from inside 'stat()';
- if it is "ignore", we silently drop any missing source files; if it is
- "newer", any missing source files make us assume that 'target' is
- out-of-date (this is handy in "dry-run" mode: it'll make you pretend to
- carry out commands that wouldn't work because inputs are missing, but
- that doesn't matter because you're not actually going to run the
- commands).
- """
- # If the target doesn't even exist, then it's definitely out-of-date.
- if not os.path.exists(target):
- return 1
-
- # Otherwise we have to find out the hard way: if *any* source file
- # is more recent than 'target', then 'target' is out-of-date and
- # we can immediately return true. If we fall through to the end
- # of the loop, then 'target' is up-to-date and we return false.
- from stat import ST_MTIME
- target_mtime = os.stat(target)[ST_MTIME]
- for source in sources:
- if not os.path.exists(source):
- if missing == 'error': # blow up when we stat() the file
- pass
- elif missing == 'ignore': # missing source dropped from
- continue # target's dependency list
- elif missing == 'newer': # missing source means target is
- return 1 # out-of-date
-
- source_mtime = os.stat(source)[ST_MTIME]
- if source_mtime > target_mtime:
- return 1
- else:
- return 0
-
-# newer_group ()
diff --git a/contrib/python/setuptools/py3/setuptools/_distutils/dir_util.py b/contrib/python/setuptools/py3/setuptools/_distutils/dir_util.py
deleted file mode 100644
index d5cd8e3e24f..00000000000
--- a/contrib/python/setuptools/py3/setuptools/_distutils/dir_util.py
+++ /dev/null
@@ -1,210 +0,0 @@
-"""distutils.dir_util
-
-Utility functions for manipulating directories and directory trees."""
-
-import os
-import errno
-from distutils.errors import DistutilsFileError, DistutilsInternalError
-from distutils import log
-
-# cache for by mkpath() -- in addition to cheapening redundant calls,
-# eliminates redundant "creating /foo/bar/baz" messages in dry-run mode
-_path_created = {}
-
-# I don't use os.makedirs because a) it's new to Python 1.5.2, and
-# b) it blows up if the directory already exists (I want to silently
-# succeed in that case).
-def mkpath(name, mode=0o777, verbose=1, dry_run=0):
- """Create a directory and any missing ancestor directories.
-
- If the directory already exists (or if 'name' is the empty string, which
- means the current directory, which of course exists), then do nothing.
- Raise DistutilsFileError if unable to create some directory along the way
- (eg. some sub-path exists, but is a file rather than a directory).
- If 'verbose' is true, print a one-line summary of each mkdir to stdout.
- Return the list of directories actually created.
- """
-
- global _path_created
-
- # Detect a common bug -- name is None
- if not isinstance(name, str):
- raise DistutilsInternalError(
- "mkpath: 'name' must be a string (got %r)" % (name,))
-
- # XXX what's the better way to handle verbosity? print as we create
- # each directory in the path (the current behaviour), or only announce
- # the creation of the whole path? (quite easy to do the latter since
- # we're not using a recursive algorithm)
-
- name = os.path.normpath(name)
- created_dirs = []
- if os.path.isdir(name) or name == '':
- return created_dirs
- if _path_created.get(os.path.abspath(name)):
- return created_dirs
-
- (head, tail) = os.path.split(name)
- tails = [tail] # stack of lone dirs to create
-
- while head and tail and not os.path.isdir(head):
- (head, tail) = os.path.split(head)
- tails.insert(0, tail) # push next higher dir onto stack
-
- # now 'head' contains the deepest directory that already exists
- # (that is, the child of 'head' in 'name' is the highest directory
- # that does *not* exist)
- for d in tails:
- #print "head = %s, d = %s: " % (head, d),
- head = os.path.join(head, d)
- abs_head = os.path.abspath(head)
-
- if _path_created.get(abs_head):
- continue
-
- if verbose >= 1:
- log.info("creating %s", head)
-
- if not dry_run:
- try:
- os.mkdir(head, mode)
- except OSError as exc:
- if not (exc.errno == errno.EEXIST and os.path.isdir(head)):
- raise DistutilsFileError(
- "could not create '%s': %s" % (head, exc.args[-1]))
- created_dirs.append(head)
-
- _path_created[abs_head] = 1
- return created_dirs
-
-def create_tree(base_dir, files, mode=0o777, verbose=1, dry_run=0):
- """Create all the empty directories under 'base_dir' needed to put 'files'
- there.
-
- 'base_dir' is just the name of a directory which doesn't necessarily
- exist yet; 'files' is a list of filenames to be interpreted relative to
- 'base_dir'. 'base_dir' + the directory portion of every file in 'files'
- will be created if it doesn't already exist. 'mode', 'verbose' and
- 'dry_run' flags are as for 'mkpath()'.
- """
- # First get the list of directories to create
- need_dir = set()
- for file in files:
- need_dir.add(os.path.join(base_dir, os.path.dirname(file)))
-
- # Now create them
- for dir in sorted(need_dir):
- mkpath(dir, mode, verbose=verbose, dry_run=dry_run)
-
-def copy_tree(src, dst, preserve_mode=1, preserve_times=1,
- preserve_symlinks=0, update=0, verbose=1, dry_run=0):
- """Copy an entire directory tree 'src' to a new location 'dst'.
-
- Both 'src' and 'dst' must be directory names. If 'src' is not a
- directory, raise DistutilsFileError. If 'dst' does not exist, it is
- created with 'mkpath()'. The end result of the copy is that every
- file in 'src' is copied to 'dst', and directories under 'src' are
- recursively copied to 'dst'. Return the list of files that were
- copied or might have been copied, using their output name. The
- return value is unaffected by 'update' or 'dry_run': it is simply
- the list of all files under 'src', with the names changed to be
- under 'dst'.
-
- 'preserve_mode' and 'preserve_times' are the same as for
- 'copy_file'; note that they only apply to regular files, not to
- directories. If 'preserve_symlinks' is true, symlinks will be
- copied as symlinks (on platforms that support them!); otherwise
- (the default), the destination of the symlink will be copied.
- 'update' and 'verbose' are the same as for 'copy_file'.
- """
- from distutils.file_util import copy_file
-
- if not dry_run and not os.path.isdir(src):
- raise DistutilsFileError(
- "cannot copy tree '%s': not a directory" % src)
- try:
- names = os.listdir(src)
- except OSError as e:
- if dry_run:
- names = []
- else:
- raise DistutilsFileError(
- "error listing files in '%s': %s" % (src, e.strerror))
-
- if not dry_run:
- mkpath(dst, verbose=verbose)
-
- outputs = []
-
- for n in names:
- src_name = os.path.join(src, n)
- dst_name = os.path.join(dst, n)
-
- if n.startswith('.nfs'):
- # skip NFS rename files
- continue
-
- if preserve_symlinks and os.path.islink(src_name):
- link_dest = os.readlink(src_name)
- if verbose >= 1:
- log.info("linking %s -> %s", dst_name, link_dest)
- if not dry_run:
- os.symlink(link_dest, dst_name)
- outputs.append(dst_name)
-
- elif os.path.isdir(src_name):
- outputs.extend(
- copy_tree(src_name, dst_name, preserve_mode,
- preserve_times, preserve_symlinks, update,
- verbose=verbose, dry_run=dry_run))
- else:
- copy_file(src_name, dst_name, preserve_mode,
- preserve_times, update, verbose=verbose,
- dry_run=dry_run)
- outputs.append(dst_name)
-
- return outputs
-
-def _build_cmdtuple(path, cmdtuples):
- """Helper for remove_tree()."""
- for f in os.listdir(path):
- real_f = os.path.join(path,f)
- if os.path.isdir(real_f) and not os.path.islink(real_f):
- _build_cmdtuple(real_f, cmdtuples)
- else:
- cmdtuples.append((os.remove, real_f))
- cmdtuples.append((os.rmdir, path))
-
-def remove_tree(directory, verbose=1, dry_run=0):
- """Recursively remove an entire directory tree.
-
- Any errors are ignored (apart from being reported to stdout if 'verbose'
- is true).
- """
- global _path_created
-
- if verbose >= 1:
- log.info("removing '%s' (and everything under it)", directory)
- if dry_run:
- return
- cmdtuples = []
- _build_cmdtuple(directory, cmdtuples)
- for cmd in cmdtuples:
- try:
- cmd[0](cmd[1])
- # remove dir from cache if it's already there
- abspath = os.path.abspath(cmd[1])
- if abspath in _path_created:
- del _path_created[abspath]
- except OSError as exc:
- log.warn("error removing %s: %s", directory, exc)
-
-def ensure_relative(path):
- """Take the full path 'path', and make it a relative path.
-
- This is useful to make 'path' the second argument to os.path.join().
- """
- drive, path = os.path.splitdrive(path)
- if path[0:1] == os.sep:
- path = drive + path[1:]
- return path
diff --git a/contrib/python/setuptools/py3/setuptools/_distutils/dist.py b/contrib/python/setuptools/py3/setuptools/_distutils/dist.py
deleted file mode 100644
index 37db4d6cd75..00000000000
--- a/contrib/python/setuptools/py3/setuptools/_distutils/dist.py
+++ /dev/null
@@ -1,1257 +0,0 @@
-"""distutils.dist
-
-Provides the Distribution class, which represents the module distribution
-being built/installed/distributed.
-"""
-
-import sys
-import os
-import re
-from email import message_from_file
-
-try:
- import warnings
-except ImportError:
- warnings = None
-
-from distutils.errors import *
-from distutils.fancy_getopt import FancyGetopt, translate_longopt
-from distutils.util import check_environ, strtobool, rfc822_escape
-from distutils import log
-from distutils.debug import DEBUG
-
-# Regex to define acceptable Distutils command names. This is not *quite*
-# the same as a Python NAME -- I don't allow leading underscores. The fact
-# that they're very similar is no coincidence; the default naming scheme is
-# to look for a Python module named after the command.
-command_re = re.compile(r'^[a-zA-Z]([a-zA-Z0-9_]*)$')
-
-
-def _ensure_list(value, fieldname):
- if isinstance(value, str):
- # a string containing comma separated values is okay. It will
- # be converted to a list by Distribution.finalize_options().
- pass
- elif not isinstance(value, list):
- # passing a tuple or an iterator perhaps, warn and convert
- typename = type(value).__name__
- msg = "Warning: '{fieldname}' should be a list, got type '{typename}'"
- msg = msg.format(**locals())
- log.log(log.WARN, msg)
- value = list(value)
- return value
-
-
-class Distribution:
- """The core of the Distutils. Most of the work hiding behind 'setup'
- is really done within a Distribution instance, which farms the work out
- to the Distutils commands specified on the command line.
-
- Setup scripts will almost never instantiate Distribution directly,
- unless the 'setup()' function is totally inadequate to their needs.
- However, it is conceivable that a setup script might wish to subclass
- Distribution for some specialized purpose, and then pass the subclass
- to 'setup()' as the 'distclass' keyword argument. If so, it is
- necessary to respect the expectations that 'setup' has of Distribution.
- See the code for 'setup()', in core.py, for details.
- """
-
- # 'global_options' describes the command-line options that may be
- # supplied to the setup script prior to any actual commands.
- # Eg. "./setup.py -n" or "./setup.py --quiet" both take advantage of
- # these global options. This list should be kept to a bare minimum,
- # since every global option is also valid as a command option -- and we
- # don't want to pollute the commands with too many options that they
- # have minimal control over.
- # The fourth entry for verbose means that it can be repeated.
- global_options = [
- ('verbose', 'v', "run verbosely (default)", 1),
- ('quiet', 'q', "run quietly (turns verbosity off)"),
- ('dry-run', 'n', "don't actually do anything"),
- ('help', 'h', "show detailed help message"),
- ('no-user-cfg', None,
- 'ignore pydistutils.cfg in your home directory'),
- ]
-
- # 'common_usage' is a short (2-3 line) string describing the common
- # usage of the setup script.
- common_usage = """\
-Common commands: (see '--help-commands' for more)
-
- setup.py build will build the package underneath 'build/'
- setup.py install will install the package
-"""
-
- # options that are not propagated to the commands
- display_options = [
- ('help-commands', None,
- "list all available commands"),
- ('name', None,
- "print package name"),
- ('version', 'V',
- "print package version"),
- ('fullname', None,
- "print <package name>-<version>"),
- ('author', None,
- "print the author's name"),
- ('author-email', None,
- "print the author's email address"),
- ('maintainer', None,
- "print the maintainer's name"),
- ('maintainer-email', None,
- "print the maintainer's email address"),
- ('contact', None,
- "print the maintainer's name if known, else the author's"),
- ('contact-email', None,
- "print the maintainer's email address if known, else the author's"),
- ('url', None,
- "print the URL for this package"),
- ('license', None,
- "print the license of the package"),
- ('licence', None,
- "alias for --license"),
- ('description', None,
- "print the package description"),
- ('long-description', None,
- "print the long package description"),
- ('platforms', None,
- "print the list of platforms"),
- ('classifiers', None,
- "print the list of classifiers"),
- ('keywords', None,
- "print the list of keywords"),
- ('provides', None,
- "print the list of packages/modules provided"),
- ('requires', None,
- "print the list of packages/modules required"),
- ('obsoletes', None,
- "print the list of packages/modules made obsolete")
- ]
- display_option_names = [translate_longopt(x[0]) for x in display_options]
-
- # negative options are options that exclude other options
- negative_opt = {'quiet': 'verbose'}
-
- # -- Creation/initialization methods -------------------------------
-
- def __init__(self, attrs=None):
- """Construct a new Distribution instance: initialize all the
- attributes of a Distribution, and then use 'attrs' (a dictionary
- mapping attribute names to values) to assign some of those
- attributes their "real" values. (Any attributes not mentioned in
- 'attrs' will be assigned to some null value: 0, None, an empty list
- or dictionary, etc.) Most importantly, initialize the
- 'command_obj' attribute to the empty dictionary; this will be
- filled in with real command objects by 'parse_command_line()'.
- """
-
- # Default values for our command-line options
- self.verbose = 1
- self.dry_run = 0
- self.help = 0
- for attr in self.display_option_names:
- setattr(self, attr, 0)
-
- # Store the distribution meta-data (name, version, author, and so
- # forth) in a separate object -- we're getting to have enough
- # information here (and enough command-line options) that it's
- # worth it. Also delegate 'get_XXX()' methods to the 'metadata'
- # object in a sneaky and underhanded (but efficient!) way.
- self.metadata = DistributionMetadata()
- for basename in self.metadata._METHOD_BASENAMES:
- method_name = "get_" + basename
- setattr(self, method_name, getattr(self.metadata, method_name))
-
- # 'cmdclass' maps command names to class objects, so we
- # can 1) quickly figure out which class to instantiate when
- # we need to create a new command object, and 2) have a way
- # for the setup script to override command classes
- self.cmdclass = {}
-
- # 'command_packages' is a list of packages in which commands
- # are searched for. The factory for command 'foo' is expected
- # to be named 'foo' in the module 'foo' in one of the packages
- # named here. This list is searched from the left; an error
- # is raised if no named package provides the command being
- # searched for. (Always access using get_command_packages().)
- self.command_packages = None
-
- # 'script_name' and 'script_args' are usually set to sys.argv[0]
- # and sys.argv[1:], but they can be overridden when the caller is
- # not necessarily a setup script run from the command-line.
- self.script_name = None
- self.script_args = None
-
- # 'command_options' is where we store command options between
- # parsing them (from config files, the command-line, etc.) and when
- # they are actually needed -- ie. when the command in question is
- # instantiated. It is a dictionary of dictionaries of 2-tuples:
- # command_options = { command_name : { option : (source, value) } }
- self.command_options = {}
-
- # 'dist_files' is the list of (command, pyversion, file) that
- # have been created by any dist commands run so far. This is
- # filled regardless of whether the run is dry or not. pyversion
- # gives sysconfig.get_python_version() if the dist file is
- # specific to a Python version, 'any' if it is good for all
- # Python versions on the target platform, and '' for a source
- # file. pyversion should not be used to specify minimum or
- # maximum required Python versions; use the metainfo for that
- # instead.
- self.dist_files = []
-
- # These options are really the business of various commands, rather
- # than of the Distribution itself. We provide aliases for them in
- # Distribution as a convenience to the developer.
- self.packages = None
- self.package_data = {}
- self.package_dir = None
- self.py_modules = None
- self.libraries = None
- self.headers = None
- self.ext_modules = None
- self.ext_package = None
- self.include_dirs = None
- self.extra_path = None
- self.scripts = None
- self.data_files = None
- self.password = ''
-
- # And now initialize bookkeeping stuff that can't be supplied by
- # the caller at all. 'command_obj' maps command names to
- # Command instances -- that's how we enforce that every command
- # class is a singleton.
- self.command_obj = {}
-
- # 'have_run' maps command names to boolean values; it keeps track
- # of whether we have actually run a particular command, to make it
- # cheap to "run" a command whenever we think we might need to -- if
- # it's already been done, no need for expensive filesystem
- # operations, we just check the 'have_run' dictionary and carry on.
- # It's only safe to query 'have_run' for a command class that has
- # been instantiated -- a false value will be inserted when the
- # command object is created, and replaced with a true value when
- # the command is successfully run. Thus it's probably best to use
- # '.get()' rather than a straight lookup.
- self.have_run = {}
-
- # Now we'll use the attrs dictionary (ultimately, keyword args from
- # the setup script) to possibly override any or all of these
- # distribution options.
-
- if attrs:
- # Pull out the set of command options and work on them
- # specifically. Note that this order guarantees that aliased
- # command options will override any supplied redundantly
- # through the general options dictionary.
- options = attrs.get('options')
- if options is not None:
- del attrs['options']
- for (command, cmd_options) in options.items():
- opt_dict = self.get_option_dict(command)
- for (opt, val) in cmd_options.items():
- opt_dict[opt] = ("setup script", val)
-
- if 'licence' in attrs:
- attrs['license'] = attrs['licence']
- del attrs['licence']
- msg = "'licence' distribution option is deprecated; use 'license'"
- if warnings is not None:
- warnings.warn(msg)
- else:
- sys.stderr.write(msg + "\n")
-
- # Now work on the rest of the attributes. Any attribute that's
- # not already defined is invalid!
- for (key, val) in attrs.items():
- if hasattr(self.metadata, "set_" + key):
- getattr(self.metadata, "set_" + key)(val)
- elif hasattr(self.metadata, key):
- setattr(self.metadata, key, val)
- elif hasattr(self, key):
- setattr(self, key, val)
- else:
- msg = "Unknown distribution option: %s" % repr(key)
- warnings.warn(msg)
-
- # no-user-cfg is handled before other command line args
- # because other args override the config files, and this
- # one is needed before we can load the config files.
- # If attrs['script_args'] wasn't passed, assume false.
- #
- # This also make sure we just look at the global options
- self.want_user_cfg = True
-
- if self.script_args is not None:
- for arg in self.script_args:
- if not arg.startswith('-'):
- break
- if arg == '--no-user-cfg':
- self.want_user_cfg = False
- break
-
- self.finalize_options()
-
- def get_option_dict(self, command):
- """Get the option dictionary for a given command. If that
- command's option dictionary hasn't been created yet, then create it
- and return the new dictionary; otherwise, return the existing
- option dictionary.
- """
- dict = self.command_options.get(command)
- if dict is None:
- dict = self.command_options[command] = {}
- return dict
-
- def dump_option_dicts(self, header=None, commands=None, indent=""):
- from pprint import pformat
-
- if commands is None: # dump all command option dicts
- commands = sorted(self.command_options.keys())
-
- if header is not None:
- self.announce(indent + header)
- indent = indent + " "
-
- if not commands:
- self.announce(indent + "no commands known yet")
- return
-
- for cmd_name in commands:
- opt_dict = self.command_options.get(cmd_name)
- if opt_dict is None:
- self.announce(indent +
- "no option dict for '%s' command" % cmd_name)
- else:
- self.announce(indent +
- "option dict for '%s' command:" % cmd_name)
- out = pformat(opt_dict)
- for line in out.split('\n'):
- self.announce(indent + " " + line)
-
- # -- Config file finding/parsing methods ---------------------------
-
- def find_config_files(self):
- """Find as many configuration files as should be processed for this
- platform, and return a list of filenames in the order in which they
- should be parsed. The filenames returned are guaranteed to exist
- (modulo nasty race conditions).
-
- There are three possible config files: distutils.cfg in the
- Distutils installation directory (ie. where the top-level
- Distutils __inst__.py file lives), a file in the user's home
- directory named .pydistutils.cfg on Unix and pydistutils.cfg
- on Windows/Mac; and setup.cfg in the current directory.
-
- The file in the user's home directory can be disabled with the
- --no-user-cfg option.
- """
- files = []
- check_environ()
-
- # Where to look for the system-wide Distutils config file
- sys_dir = os.path.dirname(sys.modules['distutils'].__file__)
-
- # Look for the system config file
- sys_file = os.path.join(sys_dir, "distutils.cfg")
- if os.path.isfile(sys_file):
- files.append(sys_file)
-
- # What to call the per-user config file
- if os.name == 'posix':
- user_filename = ".pydistutils.cfg"
- else:
- user_filename = "pydistutils.cfg"
-
- # And look for the user config file
- if self.want_user_cfg:
- user_file = os.path.join(os.path.expanduser('~'), user_filename)
- if os.path.isfile(user_file):
- files.append(user_file)
-
- # All platforms support local setup.cfg
- local_file = "setup.cfg"
- if os.path.isfile(local_file):
- files.append(local_file)
-
- if DEBUG:
- self.announce("using config files: %s" % ', '.join(files))
-
- return files
-
- def parse_config_files(self, filenames=None):
- from configparser import ConfigParser
-
- # Ignore install directory options if we have a venv
- if sys.prefix != sys.base_prefix:
- ignore_options = [
- 'install-base', 'install-platbase', 'install-lib',
- 'install-platlib', 'install-purelib', 'install-headers',
- 'install-scripts', 'install-data', 'prefix', 'exec-prefix',
- 'home', 'user', 'root']
- else:
- ignore_options = []
-
- ignore_options = frozenset(ignore_options)
-
- if filenames is None:
- filenames = self.find_config_files()
-
- if DEBUG:
- self.announce("Distribution.parse_config_files():")
-
- parser = ConfigParser()
- for filename in filenames:
- if DEBUG:
- self.announce(" reading %s" % filename)
- parser.read(filename)
- for section in parser.sections():
- options = parser.options(section)
- opt_dict = self.get_option_dict(section)
-
- for opt in options:
- if opt != '__name__' and opt not in ignore_options:
- val = parser.get(section,opt)
- opt = opt.replace('-', '_')
- opt_dict[opt] = (filename, val)
-
- # Make the ConfigParser forget everything (so we retain
- # the original filenames that options come from)
- parser.__init__()
-
- # If there was a "global" section in the config file, use it
- # to set Distribution options.
-
- if 'global' in self.command_options:
- for (opt, (src, val)) in self.command_options['global'].items():
- alias = self.negative_opt.get(opt)
- try:
- if alias:
- setattr(self, alias, not strtobool(val))
- elif opt in ('verbose', 'dry_run'): # ugh!
- setattr(self, opt, strtobool(val))
- else:
- setattr(self, opt, val)
- except ValueError as msg:
- raise DistutilsOptionError(msg)
-
- # -- Command-line parsing methods ----------------------------------
-
- def parse_command_line(self):
- """Parse the setup script's command line, taken from the
- 'script_args' instance attribute (which defaults to 'sys.argv[1:]'
- -- see 'setup()' in core.py). This list is first processed for
- "global options" -- options that set attributes of the Distribution
- instance. Then, it is alternately scanned for Distutils commands
- and options for that command. Each new command terminates the
- options for the previous command. The allowed options for a
- command are determined by the 'user_options' attribute of the
- command class -- thus, we have to be able to load command classes
- in order to parse the command line. Any error in that 'options'
- attribute raises DistutilsGetoptError; any error on the
- command-line raises DistutilsArgError. If no Distutils commands
- were found on the command line, raises DistutilsArgError. Return
- true if command-line was successfully parsed and we should carry
- on with executing commands; false if no errors but we shouldn't
- execute commands (currently, this only happens if user asks for
- help).
- """
- #
- # We now have enough information to show the Macintosh dialog
- # that allows the user to interactively specify the "command line".
- #
- toplevel_options = self._get_toplevel_options()
-
- # We have to parse the command line a bit at a time -- global
- # options, then the first command, then its options, and so on --
- # because each command will be handled by a different class, and
- # the options that are valid for a particular class aren't known
- # until we have loaded the command class, which doesn't happen
- # until we know what the command is.
-
- self.commands = []
- parser = FancyGetopt(toplevel_options + self.display_options)
- parser.set_negative_aliases(self.negative_opt)
- parser.set_aliases({'licence': 'license'})
- args = parser.getopt(args=self.script_args, object=self)
- option_order = parser.get_option_order()
- log.set_verbosity(self.verbose)
-
- # for display options we return immediately
- if self.handle_display_options(option_order):
- return
- while args:
- args = self._parse_command_opts(parser, args)
- if args is None: # user asked for help (and got it)
- return
-
- # Handle the cases of --help as a "global" option, ie.
- # "setup.py --help" and "setup.py --help command ...". For the
- # former, we show global options (--verbose, --dry-run, etc.)
- # and display-only options (--name, --version, etc.); for the
- # latter, we omit the display-only options and show help for
- # each command listed on the command line.
- if self.help:
- self._show_help(parser,
- display_options=len(self.commands) == 0,
- commands=self.commands)
- return
-
- # Oops, no commands found -- an end-user error
- if not self.commands:
- raise DistutilsArgError("no commands supplied")
-
- # All is well: return true
- return True
-
- def _get_toplevel_options(self):
- """Return the non-display options recognized at the top level.
-
- This includes options that are recognized *only* at the top
- level as well as options recognized for commands.
- """
- return self.global_options + [
- ("command-packages=", None,
- "list of packages that provide distutils commands"),
- ]
-
- def _parse_command_opts(self, parser, args):
- """Parse the command-line options for a single command.
- 'parser' must be a FancyGetopt instance; 'args' must be the list
- of arguments, starting with the current command (whose options
- we are about to parse). Returns a new version of 'args' with
- the next command at the front of the list; will be the empty
- list if there are no more commands on the command line. Returns
- None if the user asked for help on this command.
- """
- # late import because of mutual dependence between these modules
- from distutils.cmd import Command
-
- # Pull the current command from the head of the command line
- command = args[0]
- if not command_re.match(command):
- raise SystemExit("invalid command name '%s'" % command)
- self.commands.append(command)
-
- # Dig up the command class that implements this command, so we
- # 1) know that it's a valid command, and 2) know which options
- # it takes.
- try:
- cmd_class = self.get_command_class(command)
- except DistutilsModuleError as msg:
- raise DistutilsArgError(msg)
-
- # Require that the command class be derived from Command -- want
- # to be sure that the basic "command" interface is implemented.
- if not issubclass(cmd_class, Command):
- raise DistutilsClassError(
- "command class %s must subclass Command" % cmd_class)
-
- # Also make sure that the command object provides a list of its
- # known options.
- if not (hasattr(cmd_class, 'user_options') and
- isinstance(cmd_class.user_options, list)):
- msg = ("command class %s must provide "
- "'user_options' attribute (a list of tuples)")
- raise DistutilsClassError(msg % cmd_class)
-
- # If the command class has a list of negative alias options,
- # merge it in with the global negative aliases.
- negative_opt = self.negative_opt
- if hasattr(cmd_class, 'negative_opt'):
- negative_opt = negative_opt.copy()
- negative_opt.update(cmd_class.negative_opt)
-
- # Check for help_options in command class. They have a different
- # format (tuple of four) so we need to preprocess them here.
- if (hasattr(cmd_class, 'help_options') and
- isinstance(cmd_class.help_options, list)):
- help_options = fix_help_options(cmd_class.help_options)
- else:
- help_options = []
-
- # All commands support the global options too, just by adding
- # in 'global_options'.
- parser.set_option_table(self.global_options +
- cmd_class.user_options +
- help_options)
- parser.set_negative_aliases(negative_opt)
- (args, opts) = parser.getopt(args[1:])
- if hasattr(opts, 'help') and opts.help:
- self._show_help(parser, display_options=0, commands=[cmd_class])
- return
-
- if (hasattr(cmd_class, 'help_options') and
- isinstance(cmd_class.help_options, list)):
- help_option_found=0
- for (help_option, short, desc, func) in cmd_class.help_options:
- if hasattr(opts, parser.get_attr_name(help_option)):
- help_option_found=1
- if callable(func):
- func()
- else:
- raise DistutilsClassError(
- "invalid help function %r for help option '%s': "
- "must be a callable object (function, etc.)"
- % (func, help_option))
-
- if help_option_found:
- return
-
- # Put the options from the command-line into their official
- # holding pen, the 'command_options' dictionary.
- opt_dict = self.get_option_dict(command)
- for (name, value) in vars(opts).items():
- opt_dict[name] = ("command line", value)
-
- return args
-
- def finalize_options(self):
- """Set final values for all the options on the Distribution
- instance, analogous to the .finalize_options() method of Command
- objects.
- """
- for attr in ('keywords', 'platforms'):
- value = getattr(self.metadata, attr)
- if value is None:
- continue
- if isinstance(value, str):
- value = [elm.strip() for elm in value.split(',')]
- setattr(self.metadata, attr, value)
-
- def _show_help(self, parser, global_options=1, display_options=1,
- commands=[]):
- """Show help for the setup script command-line in the form of
- several lists of command-line options. 'parser' should be a
- FancyGetopt instance; do not expect it to be returned in the
- same state, as its option table will be reset to make it
- generate the correct help text.
-
- If 'global_options' is true, lists the global options:
- --verbose, --dry-run, etc. If 'display_options' is true, lists
- the "display-only" options: --name, --version, etc. Finally,
- lists per-command help for every command name or command class
- in 'commands'.
- """
- # late import because of mutual dependence between these modules
- from distutils.core import gen_usage
- from distutils.cmd import Command
-
- if global_options:
- if display_options:
- options = self._get_toplevel_options()
- else:
- options = self.global_options
- parser.set_option_table(options)
- parser.print_help(self.common_usage + "\nGlobal options:")
- print('')
-
- if display_options:
- parser.set_option_table(self.display_options)
- parser.print_help(
- "Information display options (just display " +
- "information, ignore any commands)")
- print('')
-
- for command in self.commands:
- if isinstance(command, type) and issubclass(command, Command):
- klass = command
- else:
- klass = self.get_command_class(command)
- if (hasattr(klass, 'help_options') and
- isinstance(klass.help_options, list)):
- parser.set_option_table(klass.user_options +
- fix_help_options(klass.help_options))
- else:
- parser.set_option_table(klass.user_options)
- parser.print_help("Options for '%s' command:" % klass.__name__)
- print('')
-
- print(gen_usage(self.script_name))
-
- def handle_display_options(self, option_order):
- """If there were any non-global "display-only" options
- (--help-commands or the metadata display options) on the command
- line, display the requested info and return true; else return
- false.
- """
- from distutils.core import gen_usage
-
- # User just wants a list of commands -- we'll print it out and stop
- # processing now (ie. if they ran "setup --help-commands foo bar",
- # we ignore "foo bar").
- if self.help_commands:
- self.print_commands()
- print('')
- print(gen_usage(self.script_name))
- return 1
-
- # If user supplied any of the "display metadata" options, then
- # display that metadata in the order in which the user supplied the
- # metadata options.
- any_display_options = 0
- is_display_option = {}
- for option in self.display_options:
- is_display_option[option[0]] = 1
-
- for (opt, val) in option_order:
- if val and is_display_option.get(opt):
- opt = translate_longopt(opt)
- value = getattr(self.metadata, "get_"+opt)()
- if opt in ['keywords', 'platforms']:
- print(','.join(value))
- elif opt in ('classifiers', 'provides', 'requires',
- 'obsoletes'):
- print('\n'.join(value))
- else:
- print(value)
- any_display_options = 1
-
- return any_display_options
-
- def print_command_list(self, commands, header, max_length):
- """Print a subset of the list of all commands -- used by
- 'print_commands()'.
- """
- print(header + ":")
-
- for cmd in commands:
- klass = self.cmdclass.get(cmd)
- if not klass:
- klass = self.get_command_class(cmd)
- try:
- description = klass.description
- except AttributeError:
- description = "(no description available)"
-
- print(" %-*s %s" % (max_length, cmd, description))
-
- def print_commands(self):
- """Print out a help message listing all available commands with a
- description of each. The list is divided into "standard commands"
- (listed in distutils.command.__all__) and "extra commands"
- (mentioned in self.cmdclass, but not a standard command). The
- descriptions come from the command class attribute
- 'description'.
- """
- import distutils.command
- std_commands = distutils.command.__all__
- is_std = {}
- for cmd in std_commands:
- is_std[cmd] = 1
-
- extra_commands = []
- for cmd in self.cmdclass.keys():
- if not is_std.get(cmd):
- extra_commands.append(cmd)
-
- max_length = 0
- for cmd in (std_commands + extra_commands):
- if len(cmd) > max_length:
- max_length = len(cmd)
-
- self.print_command_list(std_commands,
- "Standard commands",
- max_length)
- if extra_commands:
- print()
- self.print_command_list(extra_commands,
- "Extra commands",
- max_length)
-
- def get_command_list(self):
- """Get a list of (command, description) tuples.
- The list is divided into "standard commands" (listed in
- distutils.command.__all__) and "extra commands" (mentioned in
- self.cmdclass, but not a standard command). The descriptions come
- from the command class attribute 'description'.
- """
- # Currently this is only used on Mac OS, for the Mac-only GUI
- # Distutils interface (by Jack Jansen)
- import distutils.command
- std_commands = distutils.command.__all__
- is_std = {}
- for cmd in std_commands:
- is_std[cmd] = 1
-
- extra_commands = []
- for cmd in self.cmdclass.keys():
- if not is_std.get(cmd):
- extra_commands.append(cmd)
-
- rv = []
- for cmd in (std_commands + extra_commands):
- klass = self.cmdclass.get(cmd)
- if not klass:
- klass = self.get_command_class(cmd)
- try:
- description = klass.description
- except AttributeError:
- description = "(no description available)"
- rv.append((cmd, description))
- return rv
-
- # -- Command class/object methods ----------------------------------
-
- def get_command_packages(self):
- """Return a list of packages from which commands are loaded."""
- pkgs = self.command_packages
- if not isinstance(pkgs, list):
- if pkgs is None:
- pkgs = ''
- pkgs = [pkg.strip() for pkg in pkgs.split(',') if pkg != '']
- if "distutils.command" not in pkgs:
- pkgs.insert(0, "distutils.command")
- self.command_packages = pkgs
- return pkgs
-
- def get_command_class(self, command):
- """Return the class that implements the Distutils command named by
- 'command'. First we check the 'cmdclass' dictionary; if the
- command is mentioned there, we fetch the class object from the
- dictionary and return it. Otherwise we load the command module
- ("distutils.command." + command) and fetch the command class from
- the module. The loaded class is also stored in 'cmdclass'
- to speed future calls to 'get_command_class()'.
-
- Raises DistutilsModuleError if the expected module could not be
- found, or if that module does not define the expected class.
- """
- klass = self.cmdclass.get(command)
- if klass:
- return klass
-
- for pkgname in self.get_command_packages():
- module_name = "%s.%s" % (pkgname, command)
- klass_name = command
-
- try:
- __import__(module_name)
- module = sys.modules[module_name]
- except ImportError:
- continue
-
- try:
- klass = getattr(module, klass_name)
- except AttributeError:
- raise DistutilsModuleError(
- "invalid command '%s' (no class '%s' in module '%s')"
- % (command, klass_name, module_name))
-
- self.cmdclass[command] = klass
- return klass
-
- raise DistutilsModuleError("invalid command '%s'" % command)
-
- def get_command_obj(self, command, create=1):
- """Return the command object for 'command'. Normally this object
- is cached on a previous call to 'get_command_obj()'; if no command
- object for 'command' is in the cache, then we either create and
- return it (if 'create' is true) or return None.
- """
- cmd_obj = self.command_obj.get(command)
- if not cmd_obj and create:
- if DEBUG:
- self.announce("Distribution.get_command_obj(): "
- "creating '%s' command object" % command)
-
- klass = self.get_command_class(command)
- cmd_obj = self.command_obj[command] = klass(self)
- self.have_run[command] = 0
-
- # Set any options that were supplied in config files
- # or on the command line. (NB. support for error
- # reporting is lame here: any errors aren't reported
- # until 'finalize_options()' is called, which means
- # we won't report the source of the error.)
- options = self.command_options.get(command)
- if options:
- self._set_command_options(cmd_obj, options)
-
- return cmd_obj
-
- def _set_command_options(self, command_obj, option_dict=None):
- """Set the options for 'command_obj' from 'option_dict'. Basically
- this means copying elements of a dictionary ('option_dict') to
- attributes of an instance ('command').
-
- 'command_obj' must be a Command instance. If 'option_dict' is not
- supplied, uses the standard option dictionary for this command
- (from 'self.command_options').
- """
- command_name = command_obj.get_command_name()
- if option_dict is None:
- option_dict = self.get_option_dict(command_name)
-
- if DEBUG:
- self.announce(" setting options for '%s' command:" % command_name)
- for (option, (source, value)) in option_dict.items():
- if DEBUG:
- self.announce(" %s = %s (from %s)" % (option, value,
- source))
- try:
- bool_opts = [translate_longopt(o)
- for o in command_obj.boolean_options]
- except AttributeError:
- bool_opts = []
- try:
- neg_opt = command_obj.negative_opt
- except AttributeError:
- neg_opt = {}
-
- try:
- is_string = isinstance(value, str)
- if option in neg_opt and is_string:
- setattr(command_obj, neg_opt[option], not strtobool(value))
- elif option in bool_opts and is_string:
- setattr(command_obj, option, strtobool(value))
- elif hasattr(command_obj, option):
- setattr(command_obj, option, value)
- else:
- raise DistutilsOptionError(
- "error in %s: command '%s' has no such option '%s'"
- % (source, command_name, option))
- except ValueError as msg:
- raise DistutilsOptionError(msg)
-
- def reinitialize_command(self, command, reinit_subcommands=0):
- """Reinitializes a command to the state it was in when first
- returned by 'get_command_obj()': ie., initialized but not yet
- finalized. This provides the opportunity to sneak option
- values in programmatically, overriding or supplementing
- user-supplied values from the config files and command line.
- You'll have to re-finalize the command object (by calling
- 'finalize_options()' or 'ensure_finalized()') before using it for
- real.
-
- 'command' should be a command name (string) or command object. If
- 'reinit_subcommands' is true, also reinitializes the command's
- sub-commands, as declared by the 'sub_commands' class attribute (if
- it has one). See the "install" command for an example. Only
- reinitializes the sub-commands that actually matter, ie. those
- whose test predicates return true.
-
- Returns the reinitialized command object.
- """
- from distutils.cmd import Command
- if not isinstance(command, Command):
- command_name = command
- command = self.get_command_obj(command_name)
- else:
- command_name = command.get_command_name()
-
- if not command.finalized:
- return command
- command.initialize_options()
- command.finalized = 0
- self.have_run[command_name] = 0
- self._set_command_options(command)
-
- if reinit_subcommands:
- for sub in command.get_sub_commands():
- self.reinitialize_command(sub, reinit_subcommands)
-
- return command
-
- # -- Methods that operate on the Distribution ----------------------
-
- def announce(self, msg, level=log.INFO):
- log.log(level, msg)
-
- def run_commands(self):
- """Run each command that was seen on the setup script command line.
- Uses the list of commands found and cache of command objects
- created by 'get_command_obj()'.
- """
- for cmd in self.commands:
- self.run_command(cmd)
-
- # -- Methods that operate on its Commands --------------------------
-
- def run_command(self, command):
- """Do whatever it takes to run a command (including nothing at all,
- if the command has already been run). Specifically: if we have
- already created and run the command named by 'command', return
- silently without doing anything. If the command named by 'command'
- doesn't even have a command object yet, create one. Then invoke
- 'run()' on that command object (or an existing one).
- """
- # Already been here, done that? then return silently.
- if self.have_run.get(command):
- return
-
- log.info("running %s", command)
- cmd_obj = self.get_command_obj(command)
- cmd_obj.ensure_finalized()
- cmd_obj.run()
- self.have_run[command] = 1
-
- # -- Distribution query methods ------------------------------------
-
- def has_pure_modules(self):
- return len(self.packages or self.py_modules or []) > 0
-
- def has_ext_modules(self):
- return self.ext_modules and len(self.ext_modules) > 0
-
- def has_c_libraries(self):
- return self.libraries and len(self.libraries) > 0
-
- def has_modules(self):
- return self.has_pure_modules() or self.has_ext_modules()
-
- def has_headers(self):
- return self.headers and len(self.headers) > 0
-
- def has_scripts(self):
- return self.scripts and len(self.scripts) > 0
-
- def has_data_files(self):
- return self.data_files and len(self.data_files) > 0
-
- def is_pure(self):
- return (self.has_pure_modules() and
- not self.has_ext_modules() and
- not self.has_c_libraries())
-
- # -- Metadata query methods ----------------------------------------
-
- # If you're looking for 'get_name()', 'get_version()', and so forth,
- # they are defined in a sneaky way: the constructor binds self.get_XXX
- # to self.metadata.get_XXX. The actual code is in the
- # DistributionMetadata class, below.
-
-class DistributionMetadata:
- """Dummy class to hold the distribution meta-data: name, version,
- author, and so forth.
- """
-
- _METHOD_BASENAMES = ("name", "version", "author", "author_email",
- "maintainer", "maintainer_email", "url",
- "license", "description", "long_description",
- "keywords", "platforms", "fullname", "contact",
- "contact_email", "classifiers", "download_url",
- # PEP 314
- "provides", "requires", "obsoletes",
- )
-
- def __init__(self, path=None):
- if path is not None:
- self.read_pkg_file(open(path))
- else:
- self.name = None
- self.version = None
- self.author = None
- self.author_email = None
- self.maintainer = None
- self.maintainer_email = None
- self.url = None
- self.license = None
- self.description = None
- self.long_description = None
- self.keywords = None
- self.platforms = None
- self.classifiers = None
- self.download_url = None
- # PEP 314
- self.provides = None
- self.requires = None
- self.obsoletes = None
-
- def read_pkg_file(self, file):
- """Reads the metadata values from a file object."""
- msg = message_from_file(file)
-
- def _read_field(name):
- value = msg[name]
- if value == 'UNKNOWN':
- return None
- return value
-
- def _read_list(name):
- values = msg.get_all(name, None)
- if values == []:
- return None
- return values
-
- metadata_version = msg['metadata-version']
- self.name = _read_field('name')
- self.version = _read_field('version')
- self.description = _read_field('summary')
- # we are filling author only.
- self.author = _read_field('author')
- self.maintainer = None
- self.author_email = _read_field('author-email')
- self.maintainer_email = None
- self.url = _read_field('home-page')
- self.license = _read_field('license')
-
- if 'download-url' in msg:
- self.download_url = _read_field('download-url')
- else:
- self.download_url = None
-
- self.long_description = _read_field('description')
- self.description = _read_field('summary')
-
- if 'keywords' in msg:
- self.keywords = _read_field('keywords').split(',')
-
- self.platforms = _read_list('platform')
- self.classifiers = _read_list('classifier')
-
- # PEP 314 - these fields only exist in 1.1
- if metadata_version == '1.1':
- self.requires = _read_list('requires')
- self.provides = _read_list('provides')
- self.obsoletes = _read_list('obsoletes')
- else:
- self.requires = None
- self.provides = None
- self.obsoletes = None
-
- def write_pkg_info(self, base_dir):
- """Write the PKG-INFO file into the release tree.
- """
- with open(os.path.join(base_dir, 'PKG-INFO'), 'w',
- encoding='UTF-8') as pkg_info:
- self.write_pkg_file(pkg_info)
-
- def write_pkg_file(self, file):
- """Write the PKG-INFO format data to a file object.
- """
- version = '1.0'
- if (self.provides or self.requires or self.obsoletes or
- self.classifiers or self.download_url):
- version = '1.1'
-
- file.write('Metadata-Version: %s\n' % version)
- file.write('Name: %s\n' % self.get_name())
- file.write('Version: %s\n' % self.get_version())
- file.write('Summary: %s\n' % self.get_description())
- file.write('Home-page: %s\n' % self.get_url())
- file.write('Author: %s\n' % self.get_contact())
- file.write('Author-email: %s\n' % self.get_contact_email())
- file.write('License: %s\n' % self.get_license())
- if self.download_url:
- file.write('Download-URL: %s\n' % self.download_url)
-
- long_desc = rfc822_escape(self.get_long_description())
- file.write('Description: %s\n' % long_desc)
-
- keywords = ','.join(self.get_keywords())
- if keywords:
- file.write('Keywords: %s\n' % keywords)
-
- self._write_list(file, 'Platform', self.get_platforms())
- self._write_list(file, 'Classifier', self.get_classifiers())
-
- # PEP 314
- self._write_list(file, 'Requires', self.get_requires())
- self._write_list(file, 'Provides', self.get_provides())
- self._write_list(file, 'Obsoletes', self.get_obsoletes())
-
- def _write_list(self, file, name, values):
- for value in values:
- file.write('%s: %s\n' % (name, value))
-
- # -- Metadata query methods ----------------------------------------
-
- def get_name(self):
- return self.name or "UNKNOWN"
-
- def get_version(self):
- return self.version or "0.0.0"
-
- def get_fullname(self):
- return "%s-%s" % (self.get_name(), self.get_version())
-
- def get_author(self):
- return self.author or "UNKNOWN"
-
- def get_author_email(self):
- return self.author_email or "UNKNOWN"
-
- def get_maintainer(self):
- return self.maintainer or "UNKNOWN"
-
- def get_maintainer_email(self):
- return self.maintainer_email or "UNKNOWN"
-
- def get_contact(self):
- return self.maintainer or self.author or "UNKNOWN"
-
- def get_contact_email(self):
- return self.maintainer_email or self.author_email or "UNKNOWN"
-
- def get_url(self):
- return self.url or "UNKNOWN"
-
- def get_license(self):
- return self.license or "UNKNOWN"
- get_licence = get_license
-
- def get_description(self):
- return self.description or "UNKNOWN"
-
- def get_long_description(self):
- return self.long_description or "UNKNOWN"
-
- def get_keywords(self):
- return self.keywords or []
-
- def set_keywords(self, value):
- self.keywords = _ensure_list(value, 'keywords')
-
- def get_platforms(self):
- return self.platforms or ["UNKNOWN"]
-
- def set_platforms(self, value):
- self.platforms = _ensure_list(value, 'platforms')
-
- def get_classifiers(self):
- return self.classifiers or []
-
- def set_classifiers(self, value):
- self.classifiers = _ensure_list(value, 'classifiers')
-
- def get_download_url(self):
- return self.download_url or "UNKNOWN"
-
- # PEP 314
- def get_requires(self):
- return self.requires or []
-
- def set_requires(self, value):
- import distutils.versionpredicate
- for v in value:
- distutils.versionpredicate.VersionPredicate(v)
- self.requires = list(value)
-
- def get_provides(self):
- return self.provides or []
-
- def set_provides(self, value):
- value = [v.strip() for v in value]
- for v in value:
- import distutils.versionpredicate
- distutils.versionpredicate.split_provision(v)
- self.provides = value
-
- def get_obsoletes(self):
- return self.obsoletes or []
-
- def set_obsoletes(self, value):
- import distutils.versionpredicate
- for v in value:
- distutils.versionpredicate.VersionPredicate(v)
- self.obsoletes = list(value)
-
-def fix_help_options(options):
- """Convert a 4-tuple 'help_options' list as found in various command
- classes to the 3-tuple form required by FancyGetopt.
- """
- new_options = []
- for help_tuple in options:
- new_options.append(help_tuple[0:3])
- return new_options
diff --git a/contrib/python/setuptools/py3/setuptools/_distutils/errors.py b/contrib/python/setuptools/py3/setuptools/_distutils/errors.py
deleted file mode 100644
index 8b93059e19f..00000000000
--- a/contrib/python/setuptools/py3/setuptools/_distutils/errors.py
+++ /dev/null
@@ -1,97 +0,0 @@
-"""distutils.errors
-
-Provides exceptions used by the Distutils modules. Note that Distutils
-modules may raise standard exceptions; in particular, SystemExit is
-usually raised for errors that are obviously the end-user's fault
-(eg. bad command-line arguments).
-
-This module is safe to use in "from ... import *" mode; it only exports
-symbols whose names start with "Distutils" and end with "Error"."""
-
-class DistutilsError (Exception):
- """The root of all Distutils evil."""
- pass
-
-class DistutilsModuleError (DistutilsError):
- """Unable to load an expected module, or to find an expected class
- within some module (in particular, command modules and classes)."""
- pass
-
-class DistutilsClassError (DistutilsError):
- """Some command class (or possibly distribution class, if anyone
- feels a need to subclass Distribution) is found not to be holding
- up its end of the bargain, ie. implementing some part of the
- "command "interface."""
- pass
-
-class DistutilsGetoptError (DistutilsError):
- """The option table provided to 'fancy_getopt()' is bogus."""
- pass
-
-class DistutilsArgError (DistutilsError):
- """Raised by fancy_getopt in response to getopt.error -- ie. an
- error in the command line usage."""
- pass
-
-class DistutilsFileError (DistutilsError):
- """Any problems in the filesystem: expected file not found, etc.
- Typically this is for problems that we detect before OSError
- could be raised."""
- pass
-
-class DistutilsOptionError (DistutilsError):
- """Syntactic/semantic errors in command options, such as use of
- mutually conflicting options, or inconsistent options,
- badly-spelled values, etc. No distinction is made between option
- values originating in the setup script, the command line, config
- files, or what-have-you -- but if we *know* something originated in
- the setup script, we'll raise DistutilsSetupError instead."""
- pass
-
-class DistutilsSetupError (DistutilsError):
- """For errors that can be definitely blamed on the setup script,
- such as invalid keyword arguments to 'setup()'."""
- pass
-
-class DistutilsPlatformError (DistutilsError):
- """We don't know how to do something on the current platform (but
- we do know how to do it on some platform) -- eg. trying to compile
- C files on a platform not supported by a CCompiler subclass."""
- pass
-
-class DistutilsExecError (DistutilsError):
- """Any problems executing an external program (such as the C
- compiler, when compiling C files)."""
- pass
-
-class DistutilsInternalError (DistutilsError):
- """Internal inconsistencies or impossibilities (obviously, this
- should never be seen if the code is working!)."""
- pass
-
-class DistutilsTemplateError (DistutilsError):
- """Syntax error in a file list template."""
-
-class DistutilsByteCompileError(DistutilsError):
- """Byte compile error."""
-
-# Exception classes used by the CCompiler implementation classes
-class CCompilerError (Exception):
- """Some compile/link operation failed."""
-
-class PreprocessError (CCompilerError):
- """Failure to preprocess one or more C/C++ files."""
-
-class CompileError (CCompilerError):
- """Failure to compile one or more C/C++ source files."""
-
-class LibError (CCompilerError):
- """Failure to create a static library from one or more C/C++ object
- files."""
-
-class LinkError (CCompilerError):
- """Failure to link one or more C/C++ object files into an executable
- or shared library file."""
-
-class UnknownFileError (CCompilerError):
- """Attempt to process an unknown file type."""
diff --git a/contrib/python/setuptools/py3/setuptools/_distutils/extension.py b/contrib/python/setuptools/py3/setuptools/_distutils/extension.py
deleted file mode 100644
index c507da360aa..00000000000
--- a/contrib/python/setuptools/py3/setuptools/_distutils/extension.py
+++ /dev/null
@@ -1,240 +0,0 @@
-"""distutils.extension
-
-Provides the Extension class, used to describe C/C++ extension
-modules in setup scripts."""
-
-import os
-import warnings
-
-# This class is really only used by the "build_ext" command, so it might
-# make sense to put it in distutils.command.build_ext. However, that
-# module is already big enough, and I want to make this class a bit more
-# complex to simplify some common cases ("foo" module in "foo.c") and do
-# better error-checking ("foo.c" actually exists).
-#
-# Also, putting this in build_ext.py means every setup script would have to
-# import that large-ish module (indirectly, through distutils.core) in
-# order to do anything.
-
-class Extension:
- """Just a collection of attributes that describes an extension
- module and everything needed to build it (hopefully in a portable
- way, but there are hooks that let you be as unportable as you need).
-
- Instance attributes:
- name : string
- the full name of the extension, including any packages -- ie.
- *not* a filename or pathname, but Python dotted name
- sources : [string]
- list of source filenames, relative to the distribution root
- (where the setup script lives), in Unix form (slash-separated)
- for portability. Source files may be C, C++, SWIG (.i),
- platform-specific resource files, or whatever else is recognized
- by the "build_ext" command as source for a Python extension.
- include_dirs : [string]
- list of directories to search for C/C++ header files (in Unix
- form for portability)
- define_macros : [(name : string, value : string|None)]
- list of macros to define; each macro is defined using a 2-tuple,
- where 'value' is either the string to define it to or None to
- define it without a particular value (equivalent of "#define
- FOO" in source or -DFOO on Unix C compiler command line)
- undef_macros : [string]
- list of macros to undefine explicitly
- library_dirs : [string]
- list of directories to search for C/C++ libraries at link time
- libraries : [string]
- list of library names (not filenames or paths) to link against
- runtime_library_dirs : [string]
- list of directories to search for C/C++ libraries at run time
- (for shared extensions, this is when the extension is loaded)
- extra_objects : [string]
- list of extra files to link with (eg. object files not implied
- by 'sources', static library that must be explicitly specified,
- binary resource files, etc.)
- extra_compile_args : [string]
- any extra platform- and compiler-specific information to use
- when compiling the source files in 'sources'. For platforms and
- compilers where "command line" makes sense, this is typically a
- list of command-line arguments, but for other platforms it could
- be anything.
- extra_link_args : [string]
- any extra platform- and compiler-specific information to use
- when linking object files together to create the extension (or
- to create a new static Python interpreter). Similar
- interpretation as for 'extra_compile_args'.
- export_symbols : [string]
- list of symbols to be exported from a shared extension. Not
- used on all platforms, and not generally necessary for Python
- extensions, which typically export exactly one symbol: "init" +
- extension_name.
- swig_opts : [string]
- any extra options to pass to SWIG if a source file has the .i
- extension.
- depends : [string]
- list of files that the extension depends on
- language : string
- extension language (i.e. "c", "c++", "objc"). Will be detected
- from the source extensions if not provided.
- optional : boolean
- specifies that a build failure in the extension should not abort the
- build process, but simply not install the failing extension.
- """
-
- # When adding arguments to this constructor, be sure to update
- # setup_keywords in core.py.
- def __init__(self, name, sources,
- include_dirs=None,
- define_macros=None,
- undef_macros=None,
- library_dirs=None,
- libraries=None,
- runtime_library_dirs=None,
- extra_objects=None,
- extra_compile_args=None,
- extra_link_args=None,
- export_symbols=None,
- swig_opts = None,
- depends=None,
- language=None,
- optional=None,
- **kw # To catch unknown keywords
- ):
- if not isinstance(name, str):
- raise AssertionError("'name' must be a string")
- if not (isinstance(sources, list) and
- all(isinstance(v, str) for v in sources)):
- raise AssertionError("'sources' must be a list of strings")
-
- self.name = name
- self.sources = sources
- self.include_dirs = include_dirs or []
- self.define_macros = define_macros or []
- self.undef_macros = undef_macros or []
- self.library_dirs = library_dirs or []
- self.libraries = libraries or []
- self.runtime_library_dirs = runtime_library_dirs or []
- self.extra_objects = extra_objects or []
- self.extra_compile_args = extra_compile_args or []
- self.extra_link_args = extra_link_args or []
- self.export_symbols = export_symbols or []
- self.swig_opts = swig_opts or []
- self.depends = depends or []
- self.language = language
- self.optional = optional
-
- # If there are unknown keyword options, warn about them
- if len(kw) > 0:
- options = [repr(option) for option in kw]
- options = ', '.join(sorted(options))
- msg = "Unknown Extension options: %s" % options
- warnings.warn(msg)
-
- def __repr__(self):
- return '<%s.%s(%r) at %#x>' % (
- self.__class__.__module__,
- self.__class__.__qualname__,
- self.name,
- id(self))
-
-
-def read_setup_file(filename):
- """Reads a Setup file and returns Extension instances."""
- from distutils.sysconfig import (parse_makefile, expand_makefile_vars,
- _variable_rx)
-
- from distutils.text_file import TextFile
- from distutils.util import split_quoted
-
- # First pass over the file to gather "VAR = VALUE" assignments.
- vars = parse_makefile(filename)
-
- # Second pass to gobble up the real content: lines of the form
- # <module> ... [<sourcefile> ...] [<cpparg> ...] [<library> ...]
- file = TextFile(filename,
- strip_comments=1, skip_blanks=1, join_lines=1,
- lstrip_ws=1, rstrip_ws=1)
- try:
- extensions = []
-
- while True:
- line = file.readline()
- if line is None: # eof
- break
- if _variable_rx.match(line): # VAR=VALUE, handled in first pass
- continue
-
- if line[0] == line[-1] == "*":
- file.warn("'%s' lines not handled yet" % line)
- continue
-
- line = expand_makefile_vars(line, vars)
- words = split_quoted(line)
-
- # NB. this parses a slightly different syntax than the old
- # makesetup script: here, there must be exactly one extension per
- # line, and it must be the first word of the line. I have no idea
- # why the old syntax supported multiple extensions per line, as
- # they all wind up being the same.
-
- module = words[0]
- ext = Extension(module, [])
- append_next_word = None
-
- for word in words[1:]:
- if append_next_word is not None:
- append_next_word.append(word)
- append_next_word = None
- continue
-
- suffix = os.path.splitext(word)[1]
- switch = word[0:2] ; value = word[2:]
-
- if suffix in (".c", ".cc", ".cpp", ".cxx", ".c++", ".m", ".mm"):
- # hmm, should we do something about C vs. C++ sources?
- # or leave it up to the CCompiler implementation to
- # worry about?
- ext.sources.append(word)
- elif switch == "-I":
- ext.include_dirs.append(value)
- elif switch == "-D":
- equals = value.find("=")
- if equals == -1: # bare "-DFOO" -- no value
- ext.define_macros.append((value, None))
- else: # "-DFOO=blah"
- ext.define_macros.append((value[0:equals],
- value[equals+2:]))
- elif switch == "-U":
- ext.undef_macros.append(value)
- elif switch == "-C": # only here 'cause makesetup has it!
- ext.extra_compile_args.append(word)
- elif switch == "-l":
- ext.libraries.append(value)
- elif switch == "-L":
- ext.library_dirs.append(value)
- elif switch == "-R":
- ext.runtime_library_dirs.append(value)
- elif word == "-rpath":
- append_next_word = ext.runtime_library_dirs
- elif word == "-Xlinker":
- append_next_word = ext.extra_link_args
- elif word == "-Xcompiler":
- append_next_word = ext.extra_compile_args
- elif switch == "-u":
- ext.extra_link_args.append(word)
- if not value:
- append_next_word = ext.extra_link_args
- elif suffix in (".a", ".so", ".sl", ".o", ".dylib"):
- # NB. a really faithful emulation of makesetup would
- # append a .o file to extra_objects only if it
- # had a slash in it; otherwise, it would s/.o/.c/
- # and append it to sources. Hmmmm.
- ext.extra_objects.append(word)
- else:
- file.warn("unrecognized argument '%s'" % word)
-
- extensions.append(ext)
- finally:
- file.close()
-
- return extensions
diff --git a/contrib/python/setuptools/py3/setuptools/_distutils/fancy_getopt.py b/contrib/python/setuptools/py3/setuptools/_distutils/fancy_getopt.py
deleted file mode 100644
index 7d170dd2773..00000000000
--- a/contrib/python/setuptools/py3/setuptools/_distutils/fancy_getopt.py
+++ /dev/null
@@ -1,457 +0,0 @@
-"""distutils.fancy_getopt
-
-Wrapper around the standard getopt module that provides the following
-additional features:
- * short and long options are tied together
- * options have help strings, so fancy_getopt could potentially
- create a complete usage summary
- * options set attributes of a passed-in object
-"""
-
-import sys, string, re
-import getopt
-from distutils.errors import *
-
-# Much like command_re in distutils.core, this is close to but not quite
-# the same as a Python NAME -- except, in the spirit of most GNU
-# utilities, we use '-' in place of '_'. (The spirit of LISP lives on!)
-# The similarities to NAME are again not a coincidence...
-longopt_pat = r'[a-zA-Z](?:[a-zA-Z0-9-]*)'
-longopt_re = re.compile(r'^%s$' % longopt_pat)
-
-# For recognizing "negative alias" options, eg. "quiet=!verbose"
-neg_alias_re = re.compile("^(%s)=!(%s)$" % (longopt_pat, longopt_pat))
-
-# This is used to translate long options to legitimate Python identifiers
-# (for use as attributes of some object).
-longopt_xlate = str.maketrans('-', '_')
-
-class FancyGetopt:
- """Wrapper around the standard 'getopt()' module that provides some
- handy extra functionality:
- * short and long options are tied together
- * options have help strings, and help text can be assembled
- from them
- * options set attributes of a passed-in object
- * boolean options can have "negative aliases" -- eg. if
- --quiet is the "negative alias" of --verbose, then "--quiet"
- on the command line sets 'verbose' to false
- """
-
- def __init__(self, option_table=None):
- # The option table is (currently) a list of tuples. The
- # tuples may have 3 or four values:
- # (long_option, short_option, help_string [, repeatable])
- # if an option takes an argument, its long_option should have '='
- # appended; short_option should just be a single character, no ':'
- # in any case. If a long_option doesn't have a corresponding
- # short_option, short_option should be None. All option tuples
- # must have long options.
- self.option_table = option_table
-
- # 'option_index' maps long option names to entries in the option
- # table (ie. those 3-tuples).
- self.option_index = {}
- if self.option_table:
- self._build_index()
-
- # 'alias' records (duh) alias options; {'foo': 'bar'} means
- # --foo is an alias for --bar
- self.alias = {}
-
- # 'negative_alias' keeps track of options that are the boolean
- # opposite of some other option
- self.negative_alias = {}
-
- # These keep track of the information in the option table. We
- # don't actually populate these structures until we're ready to
- # parse the command-line, since the 'option_table' passed in here
- # isn't necessarily the final word.
- self.short_opts = []
- self.long_opts = []
- self.short2long = {}
- self.attr_name = {}
- self.takes_arg = {}
-
- # And 'option_order' is filled up in 'getopt()'; it records the
- # original order of options (and their values) on the command-line,
- # but expands short options, converts aliases, etc.
- self.option_order = []
-
- def _build_index(self):
- self.option_index.clear()
- for option in self.option_table:
- self.option_index[option[0]] = option
-
- def set_option_table(self, option_table):
- self.option_table = option_table
- self._build_index()
-
- def add_option(self, long_option, short_option=None, help_string=None):
- if long_option in self.option_index:
- raise DistutilsGetoptError(
- "option conflict: already an option '%s'" % long_option)
- else:
- option = (long_option, short_option, help_string)
- self.option_table.append(option)
- self.option_index[long_option] = option
-
- def has_option(self, long_option):
- """Return true if the option table for this parser has an
- option with long name 'long_option'."""
- return long_option in self.option_index
-
- def get_attr_name(self, long_option):
- """Translate long option name 'long_option' to the form it
- has as an attribute of some object: ie., translate hyphens
- to underscores."""
- return long_option.translate(longopt_xlate)
-
- def _check_alias_dict(self, aliases, what):
- assert isinstance(aliases, dict)
- for (alias, opt) in aliases.items():
- if alias not in self.option_index:
- raise DistutilsGetoptError(("invalid %s '%s': "
- "option '%s' not defined") % (what, alias, alias))
- if opt not in self.option_index:
- raise DistutilsGetoptError(("invalid %s '%s': "
- "aliased option '%s' not defined") % (what, alias, opt))
-
- def set_aliases(self, alias):
- """Set the aliases for this option parser."""
- self._check_alias_dict(alias, "alias")
- self.alias = alias
-
- def set_negative_aliases(self, negative_alias):
- """Set the negative aliases for this option parser.
- 'negative_alias' should be a dictionary mapping option names to
- option names, both the key and value must already be defined
- in the option table."""
- self._check_alias_dict(negative_alias, "negative alias")
- self.negative_alias = negative_alias
-
- def _grok_option_table(self):
- """Populate the various data structures that keep tabs on the
- option table. Called by 'getopt()' before it can do anything
- worthwhile.
- """
- self.long_opts = []
- self.short_opts = []
- self.short2long.clear()
- self.repeat = {}
-
- for option in self.option_table:
- if len(option) == 3:
- long, short, help = option
- repeat = 0
- elif len(option) == 4:
- long, short, help, repeat = option
- else:
- # the option table is part of the code, so simply
- # assert that it is correct
- raise ValueError("invalid option tuple: %r" % (option,))
-
- # Type- and value-check the option names
- if not isinstance(long, str) or len(long) < 2:
- raise DistutilsGetoptError(("invalid long option '%s': "
- "must be a string of length >= 2") % long)
-
- if (not ((short is None) or
- (isinstance(short, str) and len(short) == 1))):
- raise DistutilsGetoptError("invalid short option '%s': "
- "must a single character or None" % short)
-
- self.repeat[long] = repeat
- self.long_opts.append(long)
-
- if long[-1] == '=': # option takes an argument?
- if short: short = short + ':'
- long = long[0:-1]
- self.takes_arg[long] = 1
- else:
- # Is option is a "negative alias" for some other option (eg.
- # "quiet" == "!verbose")?
- alias_to = self.negative_alias.get(long)
- if alias_to is not None:
- if self.takes_arg[alias_to]:
- raise DistutilsGetoptError(
- "invalid negative alias '%s': "
- "aliased option '%s' takes a value"
- % (long, alias_to))
-
- self.long_opts[-1] = long # XXX redundant?!
- self.takes_arg[long] = 0
-
- # If this is an alias option, make sure its "takes arg" flag is
- # the same as the option it's aliased to.
- alias_to = self.alias.get(long)
- if alias_to is not None:
- if self.takes_arg[long] != self.takes_arg[alias_to]:
- raise DistutilsGetoptError(
- "invalid alias '%s': inconsistent with "
- "aliased option '%s' (one of them takes a value, "
- "the other doesn't"
- % (long, alias_to))
-
- # Now enforce some bondage on the long option name, so we can
- # later translate it to an attribute name on some object. Have
- # to do this a bit late to make sure we've removed any trailing
- # '='.
- if not longopt_re.match(long):
- raise DistutilsGetoptError(
- "invalid long option name '%s' "
- "(must be letters, numbers, hyphens only" % long)
-
- self.attr_name[long] = self.get_attr_name(long)
- if short:
- self.short_opts.append(short)
- self.short2long[short[0]] = long
-
- def getopt(self, args=None, object=None):
- """Parse command-line options in args. Store as attributes on object.
-
- If 'args' is None or not supplied, uses 'sys.argv[1:]'. If
- 'object' is None or not supplied, creates a new OptionDummy
- object, stores option values there, and returns a tuple (args,
- object). If 'object' is supplied, it is modified in place and
- 'getopt()' just returns 'args'; in both cases, the returned
- 'args' is a modified copy of the passed-in 'args' list, which
- is left untouched.
- """
- if args is None:
- args = sys.argv[1:]
- if object is None:
- object = OptionDummy()
- created_object = True
- else:
- created_object = False
-
- self._grok_option_table()
-
- short_opts = ' '.join(self.short_opts)
- try:
- opts, args = getopt.getopt(args, short_opts, self.long_opts)
- except getopt.error as msg:
- raise DistutilsArgError(msg)
-
- for opt, val in opts:
- if len(opt) == 2 and opt[0] == '-': # it's a short option
- opt = self.short2long[opt[1]]
- else:
- assert len(opt) > 2 and opt[:2] == '--'
- opt = opt[2:]
-
- alias = self.alias.get(opt)
- if alias:
- opt = alias
-
- if not self.takes_arg[opt]: # boolean option?
- assert val == '', "boolean option can't have value"
- alias = self.negative_alias.get(opt)
- if alias:
- opt = alias
- val = 0
- else:
- val = 1
-
- attr = self.attr_name[opt]
- # The only repeating option at the moment is 'verbose'.
- # It has a negative option -q quiet, which should set verbose = 0.
- if val and self.repeat.get(attr) is not None:
- val = getattr(object, attr, 0) + 1
- setattr(object, attr, val)
- self.option_order.append((opt, val))
-
- # for opts
- if created_object:
- return args, object
- else:
- return args
-
- def get_option_order(self):
- """Returns the list of (option, value) tuples processed by the
- previous run of 'getopt()'. Raises RuntimeError if
- 'getopt()' hasn't been called yet.
- """
- if self.option_order is None:
- raise RuntimeError("'getopt()' hasn't been called yet")
- else:
- return self.option_order
-
- def generate_help(self, header=None):
- """Generate help text (a list of strings, one per suggested line of
- output) from the option table for this FancyGetopt object.
- """
- # Blithely assume the option table is good: probably wouldn't call
- # 'generate_help()' unless you've already called 'getopt()'.
-
- # First pass: determine maximum length of long option names
- max_opt = 0
- for option in self.option_table:
- long = option[0]
- short = option[1]
- l = len(long)
- if long[-1] == '=':
- l = l - 1
- if short is not None:
- l = l + 5 # " (-x)" where short == 'x'
- if l > max_opt:
- max_opt = l
-
- opt_width = max_opt + 2 + 2 + 2 # room for indent + dashes + gutter
-
- # Typical help block looks like this:
- # --foo controls foonabulation
- # Help block for longest option looks like this:
- # --flimflam set the flim-flam level
- # and with wrapped text:
- # --flimflam set the flim-flam level (must be between
- # 0 and 100, except on Tuesdays)
- # Options with short names will have the short name shown (but
- # it doesn't contribute to max_opt):
- # --foo (-f) controls foonabulation
- # If adding the short option would make the left column too wide,
- # we push the explanation off to the next line
- # --flimflam (-l)
- # set the flim-flam level
- # Important parameters:
- # - 2 spaces before option block start lines
- # - 2 dashes for each long option name
- # - min. 2 spaces between option and explanation (gutter)
- # - 5 characters (incl. space) for short option name
-
- # Now generate lines of help text. (If 80 columns were good enough
- # for Jesus, then 78 columns are good enough for me!)
- line_width = 78
- text_width = line_width - opt_width
- big_indent = ' ' * opt_width
- if header:
- lines = [header]
- else:
- lines = ['Option summary:']
-
- for option in self.option_table:
- long, short, help = option[:3]
- text = wrap_text(help, text_width)
- if long[-1] == '=':
- long = long[0:-1]
-
- # Case 1: no short option at all (makes life easy)
- if short is None:
- if text:
- lines.append(" --%-*s %s" % (max_opt, long, text[0]))
- else:
- lines.append(" --%-*s " % (max_opt, long))
-
- # Case 2: we have a short option, so we have to include it
- # just after the long option
- else:
- opt_names = "%s (-%s)" % (long, short)
- if text:
- lines.append(" --%-*s %s" %
- (max_opt, opt_names, text[0]))
- else:
- lines.append(" --%-*s" % opt_names)
-
- for l in text[1:]:
- lines.append(big_indent + l)
- return lines
-
- def print_help(self, header=None, file=None):
- if file is None:
- file = sys.stdout
- for line in self.generate_help(header):
- file.write(line + "\n")
-
-
-def fancy_getopt(options, negative_opt, object, args):
- parser = FancyGetopt(options)
- parser.set_negative_aliases(negative_opt)
- return parser.getopt(args, object)
-
-
-WS_TRANS = {ord(_wschar) : ' ' for _wschar in string.whitespace}
-
-def wrap_text(text, width):
- """wrap_text(text : string, width : int) -> [string]
-
- Split 'text' into multiple lines of no more than 'width' characters
- each, and return the list of strings that results.
- """
- if text is None:
- return []
- if len(text) <= width:
- return [text]
-
- text = text.expandtabs()
- text = text.translate(WS_TRANS)
- chunks = re.split(r'( +|-+)', text)
- chunks = [ch for ch in chunks if ch] # ' - ' results in empty strings
- lines = []
-
- while chunks:
- cur_line = [] # list of chunks (to-be-joined)
- cur_len = 0 # length of current line
-
- while chunks:
- l = len(chunks[0])
- if cur_len + l <= width: # can squeeze (at least) this chunk in
- cur_line.append(chunks[0])
- del chunks[0]
- cur_len = cur_len + l
- else: # this line is full
- # drop last chunk if all space
- if cur_line and cur_line[-1][0] == ' ':
- del cur_line[-1]
- break
-
- if chunks: # any chunks left to process?
- # if the current line is still empty, then we had a single
- # chunk that's too big too fit on a line -- so we break
- # down and break it up at the line width
- if cur_len == 0:
- cur_line.append(chunks[0][0:width])
- chunks[0] = chunks[0][width:]
-
- # all-whitespace chunks at the end of a line can be discarded
- # (and we know from the re.split above that if a chunk has
- # *any* whitespace, it is *all* whitespace)
- if chunks[0][0] == ' ':
- del chunks[0]
-
- # and store this line in the list-of-all-lines -- as a single
- # string, of course!
- lines.append(''.join(cur_line))
-
- return lines
-
-
-def translate_longopt(opt):
- """Convert a long option name to a valid Python identifier by
- changing "-" to "_".
- """
- return opt.translate(longopt_xlate)
-
-
-class OptionDummy:
- """Dummy class just used as a place to hold command-line option
- values as instance attributes."""
-
- def __init__(self, options=[]):
- """Create a new OptionDummy instance. The attributes listed in
- 'options' will be initialized to None."""
- for opt in options:
- setattr(self, opt, None)
-
-
-if __name__ == "__main__":
- text = """\
-Tra-la-la, supercalifragilisticexpialidocious.
-How *do* you spell that odd word, anyways?
-(Someone ask Mary -- she'll know [or she'll
-say, "How should I know?"].)"""
-
- for w in (10, 20, 30, 40):
- print("width: %d" % w)
- print("\n".join(wrap_text(text, w)))
- print()
diff --git a/contrib/python/setuptools/py3/setuptools/_distutils/file_util.py b/contrib/python/setuptools/py3/setuptools/_distutils/file_util.py
deleted file mode 100644
index b3fee35a6cc..00000000000
--- a/contrib/python/setuptools/py3/setuptools/_distutils/file_util.py
+++ /dev/null
@@ -1,238 +0,0 @@
-"""distutils.file_util
-
-Utility functions for operating on single files.
-"""
-
-import os
-from distutils.errors import DistutilsFileError
-from distutils import log
-
-# for generating verbose output in 'copy_file()'
-_copy_action = { None: 'copying',
- 'hard': 'hard linking',
- 'sym': 'symbolically linking' }
-
-
-def _copy_file_contents(src, dst, buffer_size=16*1024):
- """Copy the file 'src' to 'dst'; both must be filenames. Any error
- opening either file, reading from 'src', or writing to 'dst', raises
- DistutilsFileError. Data is read/written in chunks of 'buffer_size'
- bytes (default 16k). No attempt is made to handle anything apart from
- regular files.
- """
- # Stolen from shutil module in the standard library, but with
- # custom error-handling added.
- fsrc = None
- fdst = None
- try:
- try:
- fsrc = open(src, 'rb')
- except OSError as e:
- raise DistutilsFileError("could not open '%s': %s" % (src, e.strerror))
-
- if os.path.exists(dst):
- try:
- os.unlink(dst)
- except OSError as e:
- raise DistutilsFileError(
- "could not delete '%s': %s" % (dst, e.strerror))
-
- try:
- fdst = open(dst, 'wb')
- except OSError as e:
- raise DistutilsFileError(
- "could not create '%s': %s" % (dst, e.strerror))
-
- while True:
- try:
- buf = fsrc.read(buffer_size)
- except OSError as e:
- raise DistutilsFileError(
- "could not read from '%s': %s" % (src, e.strerror))
-
- if not buf:
- break
-
- try:
- fdst.write(buf)
- except OSError as e:
- raise DistutilsFileError(
- "could not write to '%s': %s" % (dst, e.strerror))
- finally:
- if fdst:
- fdst.close()
- if fsrc:
- fsrc.close()
-
-def copy_file(src, dst, preserve_mode=1, preserve_times=1, update=0,
- link=None, verbose=1, dry_run=0):
- """Copy a file 'src' to 'dst'. If 'dst' is a directory, then 'src' is
- copied there with the same name; otherwise, it must be a filename. (If
- the file exists, it will be ruthlessly clobbered.) If 'preserve_mode'
- is true (the default), the file's mode (type and permission bits, or
- whatever is analogous on the current platform) is copied. If
- 'preserve_times' is true (the default), the last-modified and
- last-access times are copied as well. If 'update' is true, 'src' will
- only be copied if 'dst' does not exist, or if 'dst' does exist but is
- older than 'src'.
-
- 'link' allows you to make hard links (os.link) or symbolic links
- (os.symlink) instead of copying: set it to "hard" or "sym"; if it is
- None (the default), files are copied. Don't set 'link' on systems that
- don't support it: 'copy_file()' doesn't check if hard or symbolic
- linking is available. If hardlink fails, falls back to
- _copy_file_contents().
-
- Under Mac OS, uses the native file copy function in macostools; on
- other systems, uses '_copy_file_contents()' to copy file contents.
-
- Return a tuple (dest_name, copied): 'dest_name' is the actual name of
- the output file, and 'copied' is true if the file was copied (or would
- have been copied, if 'dry_run' true).
- """
- # XXX if the destination file already exists, we clobber it if
- # copying, but blow up if linking. Hmmm. And I don't know what
- # macostools.copyfile() does. Should definitely be consistent, and
- # should probably blow up if destination exists and we would be
- # changing it (ie. it's not already a hard/soft link to src OR
- # (not update) and (src newer than dst).
-
- from distutils.dep_util import newer
- from stat import ST_ATIME, ST_MTIME, ST_MODE, S_IMODE
-
- if not os.path.isfile(src):
- raise DistutilsFileError(
- "can't copy '%s': doesn't exist or not a regular file" % src)
-
- if os.path.isdir(dst):
- dir = dst
- dst = os.path.join(dst, os.path.basename(src))
- else:
- dir = os.path.dirname(dst)
-
- if update and not newer(src, dst):
- if verbose >= 1:
- log.debug("not copying %s (output up-to-date)", src)
- return (dst, 0)
-
- try:
- action = _copy_action[link]
- except KeyError:
- raise ValueError("invalid value '%s' for 'link' argument" % link)
-
- if verbose >= 1:
- if os.path.basename(dst) == os.path.basename(src):
- log.info("%s %s -> %s", action, src, dir)
- else:
- log.info("%s %s -> %s", action, src, dst)
-
- if dry_run:
- return (dst, 1)
-
- # If linking (hard or symbolic), use the appropriate system call
- # (Unix only, of course, but that's the caller's responsibility)
- elif link == 'hard':
- if not (os.path.exists(dst) and os.path.samefile(src, dst)):
- try:
- os.link(src, dst)
- return (dst, 1)
- except OSError:
- # If hard linking fails, fall back on copying file
- # (some special filesystems don't support hard linking
- # even under Unix, see issue #8876).
- pass
- elif link == 'sym':
- if not (os.path.exists(dst) and os.path.samefile(src, dst)):
- os.symlink(src, dst)
- return (dst, 1)
-
- # Otherwise (non-Mac, not linking), copy the file contents and
- # (optionally) copy the times and mode.
- _copy_file_contents(src, dst)
- if preserve_mode or preserve_times:
- st = os.stat(src)
-
- # According to David Ascher <[email protected]>, utime() should be done
- # before chmod() (at least under NT).
- if preserve_times:
- os.utime(dst, (st[ST_ATIME], st[ST_MTIME]))
- if preserve_mode:
- os.chmod(dst, S_IMODE(st[ST_MODE]))
-
- return (dst, 1)
-
-
-# XXX I suspect this is Unix-specific -- need porting help!
-def move_file (src, dst,
- verbose=1,
- dry_run=0):
-
- """Move a file 'src' to 'dst'. If 'dst' is a directory, the file will
- be moved into it with the same name; otherwise, 'src' is just renamed
- to 'dst'. Return the new full name of the file.
-
- Handles cross-device moves on Unix using 'copy_file()'. What about
- other systems???
- """
- from os.path import exists, isfile, isdir, basename, dirname
- import errno
-
- if verbose >= 1:
- log.info("moving %s -> %s", src, dst)
-
- if dry_run:
- return dst
-
- if not isfile(src):
- raise DistutilsFileError("can't move '%s': not a regular file" % src)
-
- if isdir(dst):
- dst = os.path.join(dst, basename(src))
- elif exists(dst):
- raise DistutilsFileError(
- "can't move '%s': destination '%s' already exists" %
- (src, dst))
-
- if not isdir(dirname(dst)):
- raise DistutilsFileError(
- "can't move '%s': destination '%s' not a valid path" %
- (src, dst))
-
- copy_it = False
- try:
- os.rename(src, dst)
- except OSError as e:
- (num, msg) = e.args
- if num == errno.EXDEV:
- copy_it = True
- else:
- raise DistutilsFileError(
- "couldn't move '%s' to '%s': %s" % (src, dst, msg))
-
- if copy_it:
- copy_file(src, dst, verbose=verbose)
- try:
- os.unlink(src)
- except OSError as e:
- (num, msg) = e.args
- try:
- os.unlink(dst)
- except OSError:
- pass
- raise DistutilsFileError(
- "couldn't move '%s' to '%s' by copy/delete: "
- "delete '%s' failed: %s"
- % (src, dst, src, msg))
- return dst
-
-
-def write_file (filename, contents):
- """Create a file with the specified name and write 'contents' (a
- sequence of strings without line terminators) to it.
- """
- f = open(filename, "w")
- try:
- for line in contents:
- f.write(line + "\n")
- finally:
- f.close()
diff --git a/contrib/python/setuptools/py3/setuptools/_distutils/filelist.py b/contrib/python/setuptools/py3/setuptools/_distutils/filelist.py
deleted file mode 100644
index 82a77384dcb..00000000000
--- a/contrib/python/setuptools/py3/setuptools/_distutils/filelist.py
+++ /dev/null
@@ -1,355 +0,0 @@
-"""distutils.filelist
-
-Provides the FileList class, used for poking about the filesystem
-and building lists of files.
-"""
-
-import os
-import re
-import fnmatch
-import functools
-
-from distutils.util import convert_path
-from distutils.errors import DistutilsTemplateError, DistutilsInternalError
-from distutils import log
-
-
-class FileList:
- """A list of files built by on exploring the filesystem and filtered by
- applying various patterns to what we find there.
-
- Instance attributes:
- dir
- directory from which files will be taken -- only used if
- 'allfiles' not supplied to constructor
- files
- list of filenames currently being built/filtered/manipulated
- allfiles
- complete list of files under consideration (ie. without any
- filtering applied)
- """
-
- def __init__(self, warn=None, debug_print=None):
- # ignore argument to FileList, but keep them for backwards
- # compatibility
- self.allfiles = None
- self.files = []
-
- def set_allfiles(self, allfiles):
- self.allfiles = allfiles
-
- def findall(self, dir=os.curdir):
- self.allfiles = findall(dir)
-
- def debug_print(self, msg):
- """Print 'msg' to stdout if the global DEBUG (taken from the
- DISTUTILS_DEBUG environment variable) flag is true.
- """
- from distutils.debug import DEBUG
- if DEBUG:
- print(msg)
-
- # Collection methods
-
- def append(self, item):
- self.files.append(item)
-
- def extend(self, items):
- self.files.extend(items)
-
- def sort(self):
- # Not a strict lexical sort!
- sortable_files = sorted(map(os.path.split, self.files))
- self.files = []
- for sort_tuple in sortable_files:
- self.files.append(os.path.join(*sort_tuple))
-
- # Other miscellaneous utility methods
-
- def remove_duplicates(self):
- # Assumes list has been sorted!
- for i in range(len(self.files) - 1, 0, -1):
- if self.files[i] == self.files[i - 1]:
- del self.files[i]
-
- # "File template" methods
-
- def _parse_template_line(self, line):
- words = line.split()
- action = words[0]
-
- patterns = dir = dir_pattern = None
-
- if action in ('include', 'exclude',
- 'global-include', 'global-exclude'):
- if len(words) < 2:
- raise DistutilsTemplateError(
- "'%s' expects <pattern1> <pattern2> ..." % action)
- patterns = [convert_path(w) for w in words[1:]]
- elif action in ('recursive-include', 'recursive-exclude'):
- if len(words) < 3:
- raise DistutilsTemplateError(
- "'%s' expects <dir> <pattern1> <pattern2> ..." % action)
- dir = convert_path(words[1])
- patterns = [convert_path(w) for w in words[2:]]
- elif action in ('graft', 'prune'):
- if len(words) != 2:
- raise DistutilsTemplateError(
- "'%s' expects a single <dir_pattern>" % action)
- dir_pattern = convert_path(words[1])
- else:
- raise DistutilsTemplateError("unknown action '%s'" % action)
-
- return (action, patterns, dir, dir_pattern)
-
- def process_template_line(self, line):
- # Parse the line: split it up, make sure the right number of words
- # is there, and return the relevant words. 'action' is always
- # defined: it's the first word of the line. Which of the other
- # three are defined depends on the action; it'll be either
- # patterns, (dir and patterns), or (dir_pattern).
- (action, patterns, dir, dir_pattern) = self._parse_template_line(line)
-
- # OK, now we know that the action is valid and we have the
- # right number of words on the line for that action -- so we
- # can proceed with minimal error-checking.
- if action == 'include':
- self.debug_print("include " + ' '.join(patterns))
- for pattern in patterns:
- if not self.include_pattern(pattern, anchor=1):
- log.warn("warning: no files found matching '%s'",
- pattern)
-
- elif action == 'exclude':
- self.debug_print("exclude " + ' '.join(patterns))
- for pattern in patterns:
- if not self.exclude_pattern(pattern, anchor=1):
- log.warn(("warning: no previously-included files "
- "found matching '%s'"), pattern)
-
- elif action == 'global-include':
- self.debug_print("global-include " + ' '.join(patterns))
- for pattern in patterns:
- if not self.include_pattern(pattern, anchor=0):
- log.warn(("warning: no files found matching '%s' "
- "anywhere in distribution"), pattern)
-
- elif action == 'global-exclude':
- self.debug_print("global-exclude " + ' '.join(patterns))
- for pattern in patterns:
- if not self.exclude_pattern(pattern, anchor=0):
- log.warn(("warning: no previously-included files matching "
- "'%s' found anywhere in distribution"),
- pattern)
-
- elif action == 'recursive-include':
- self.debug_print("recursive-include %s %s" %
- (dir, ' '.join(patterns)))
- for pattern in patterns:
- if not self.include_pattern(pattern, prefix=dir):
- msg = (
- "warning: no files found matching '%s' "
- "under directory '%s'"
- )
- log.warn(msg, pattern, dir)
-
- elif action == 'recursive-exclude':
- self.debug_print("recursive-exclude %s %s" %
- (dir, ' '.join(patterns)))
- for pattern in patterns:
- if not self.exclude_pattern(pattern, prefix=dir):
- log.warn(("warning: no previously-included files matching "
- "'%s' found under directory '%s'"),
- pattern, dir)
-
- elif action == 'graft':
- self.debug_print("graft " + dir_pattern)
- if not self.include_pattern(None, prefix=dir_pattern):
- log.warn("warning: no directories found matching '%s'",
- dir_pattern)
-
- elif action == 'prune':
- self.debug_print("prune " + dir_pattern)
- if not self.exclude_pattern(None, prefix=dir_pattern):
- log.warn(("no previously-included directories found "
- "matching '%s'"), dir_pattern)
- else:
- raise DistutilsInternalError(
- "this cannot happen: invalid action '%s'" % action)
-
- # Filtering/selection methods
-
- def include_pattern(self, pattern, anchor=1, prefix=None, is_regex=0):
- """Select strings (presumably filenames) from 'self.files' that
- match 'pattern', a Unix-style wildcard (glob) pattern. Patterns
- are not quite the same as implemented by the 'fnmatch' module: '*'
- and '?' match non-special characters, where "special" is platform-
- dependent: slash on Unix; colon, slash, and backslash on
- DOS/Windows; and colon on Mac OS.
-
- If 'anchor' is true (the default), then the pattern match is more
- stringent: "*.py" will match "foo.py" but not "foo/bar.py". If
- 'anchor' is false, both of these will match.
-
- If 'prefix' is supplied, then only filenames starting with 'prefix'
- (itself a pattern) and ending with 'pattern', with anything in between
- them, will match. 'anchor' is ignored in this case.
-
- If 'is_regex' is true, 'anchor' and 'prefix' are ignored, and
- 'pattern' is assumed to be either a string containing a regex or a
- regex object -- no translation is done, the regex is just compiled
- and used as-is.
-
- Selected strings will be added to self.files.
-
- Return True if files are found, False otherwise.
- """
- # XXX docstring lying about what the special chars are?
- files_found = False
- pattern_re = translate_pattern(pattern, anchor, prefix, is_regex)
- self.debug_print("include_pattern: applying regex r'%s'" %
- pattern_re.pattern)
-
- # delayed loading of allfiles list
- if self.allfiles is None:
- self.findall()
-
- for name in self.allfiles:
- if pattern_re.search(name):
- self.debug_print(" adding " + name)
- self.files.append(name)
- files_found = True
- return files_found
-
- def exclude_pattern(
- self, pattern, anchor=1, prefix=None, is_regex=0):
- """Remove strings (presumably filenames) from 'files' that match
- 'pattern'. Other parameters are the same as for
- 'include_pattern()', above.
- The list 'self.files' is modified in place.
- Return True if files are found, False otherwise.
- """
- files_found = False
- pattern_re = translate_pattern(pattern, anchor, prefix, is_regex)
- self.debug_print("exclude_pattern: applying regex r'%s'" %
- pattern_re.pattern)
- for i in range(len(self.files)-1, -1, -1):
- if pattern_re.search(self.files[i]):
- self.debug_print(" removing " + self.files[i])
- del self.files[i]
- files_found = True
- return files_found
-
-
-# Utility functions
-
-def _find_all_simple(path):
- """
- Find all files under 'path'
- """
- all_unique = _UniqueDirs.filter(os.walk(path, followlinks=True))
- results = (
- os.path.join(base, file)
- for base, dirs, files in all_unique
- for file in files
- )
- return filter(os.path.isfile, results)
-
-
-class _UniqueDirs(set):
- """
- Exclude previously-seen dirs from walk results,
- avoiding infinite recursion.
- Ref https://bugs.python.org/issue44497.
- """
- def __call__(self, walk_item):
- """
- Given an item from an os.walk result, determine
- if the item represents a unique dir for this instance
- and if not, prevent further traversal.
- """
- base, dirs, files = walk_item
- stat = os.stat(base)
- candidate = stat.st_dev, stat.st_ino
- found = candidate in self
- if found:
- del dirs[:]
- self.add(candidate)
- return not found
-
- @classmethod
- def filter(cls, items):
- return filter(cls(), items)
-
-
-def findall(dir=os.curdir):
- """
- Find all files under 'dir' and return the list of full filenames.
- Unless dir is '.', return full filenames with dir prepended.
- """
- files = _find_all_simple(dir)
- if dir == os.curdir:
- make_rel = functools.partial(os.path.relpath, start=dir)
- files = map(make_rel, files)
- return list(files)
-
-
-def glob_to_re(pattern):
- """Translate a shell-like glob pattern to a regular expression; return
- a string containing the regex. Differs from 'fnmatch.translate()' in
- that '*' does not match "special characters" (which are
- platform-specific).
- """
- pattern_re = fnmatch.translate(pattern)
-
- # '?' and '*' in the glob pattern become '.' and '.*' in the RE, which
- # IMHO is wrong -- '?' and '*' aren't supposed to match slash in Unix,
- # and by extension they shouldn't match such "special characters" under
- # any OS. So change all non-escaped dots in the RE to match any
- # character except the special characters (currently: just os.sep).
- sep = os.sep
- if os.sep == '\\':
- # we're using a regex to manipulate a regex, so we need
- # to escape the backslash twice
- sep = r'\\\\'
- escaped = r'\1[^%s]' % sep
- pattern_re = re.sub(r'((?<!\\)(\\\\)*)\.', escaped, pattern_re)
- return pattern_re
-
-
-def translate_pattern(pattern, anchor=1, prefix=None, is_regex=0):
- """Translate a shell-like wildcard pattern to a compiled regular
- expression. Return the compiled regex. If 'is_regex' true,
- then 'pattern' is directly compiled to a regex (if it's a string)
- or just returned as-is (assumes it's a regex object).
- """
- if is_regex:
- if isinstance(pattern, str):
- return re.compile(pattern)
- else:
- return pattern
-
- # ditch start and end characters
- start, _, end = glob_to_re('_').partition('_')
-
- if pattern:
- pattern_re = glob_to_re(pattern)
- assert pattern_re.startswith(start) and pattern_re.endswith(end)
- else:
- pattern_re = ''
-
- if prefix is not None:
- prefix_re = glob_to_re(prefix)
- assert prefix_re.startswith(start) and prefix_re.endswith(end)
- prefix_re = prefix_re[len(start): len(prefix_re) - len(end)]
- sep = os.sep
- if os.sep == '\\':
- sep = r'\\'
- pattern_re = pattern_re[len(start): len(pattern_re) - len(end)]
- pattern_re = r'%s\A%s%s.*%s%s' % (
- start, prefix_re, sep, pattern_re, end)
- else: # no prefix -- respect anchor flag
- if anchor:
- pattern_re = r'%s\A%s' % (start, pattern_re[len(start):])
-
- return re.compile(pattern_re)
diff --git a/contrib/python/setuptools/py3/setuptools/_distutils/log.py b/contrib/python/setuptools/py3/setuptools/_distutils/log.py
deleted file mode 100644
index 8ef6b28ea2e..00000000000
--- a/contrib/python/setuptools/py3/setuptools/_distutils/log.py
+++ /dev/null
@@ -1,77 +0,0 @@
-"""A simple log mechanism styled after PEP 282."""
-
-# The class here is styled after PEP 282 so that it could later be
-# replaced with a standard Python logging implementation.
-
-DEBUG = 1
-INFO = 2
-WARN = 3
-ERROR = 4
-FATAL = 5
-
-import sys
-
-class Log:
-
- def __init__(self, threshold=WARN):
- self.threshold = threshold
-
- def _log(self, level, msg, args):
- if level not in (DEBUG, INFO, WARN, ERROR, FATAL):
- raise ValueError('%s wrong log level' % str(level))
-
- if level >= self.threshold:
- if args:
- msg = msg % args
- if level in (WARN, ERROR, FATAL):
- stream = sys.stderr
- else:
- stream = sys.stdout
- try:
- stream.write('%s\n' % msg)
- except UnicodeEncodeError:
- # emulate backslashreplace error handler
- encoding = stream.encoding
- msg = msg.encode(encoding, "backslashreplace").decode(encoding)
- stream.write('%s\n' % msg)
- stream.flush()
-
- def log(self, level, msg, *args):
- self._log(level, msg, args)
-
- def debug(self, msg, *args):
- self._log(DEBUG, msg, args)
-
- def info(self, msg, *args):
- self._log(INFO, msg, args)
-
- def warn(self, msg, *args):
- self._log(WARN, msg, args)
-
- def error(self, msg, *args):
- self._log(ERROR, msg, args)
-
- def fatal(self, msg, *args):
- self._log(FATAL, msg, args)
-
-_global_log = Log()
-log = _global_log.log
-debug = _global_log.debug
-info = _global_log.info
-warn = _global_log.warn
-error = _global_log.error
-fatal = _global_log.fatal
-
-def set_threshold(level):
- # return the old threshold for use from tests
- old = _global_log.threshold
- _global_log.threshold = level
- return old
-
-def set_verbosity(v):
- if v <= 0:
- set_threshold(WARN)
- elif v == 1:
- set_threshold(INFO)
- elif v >= 2:
- set_threshold(DEBUG)
diff --git a/contrib/python/setuptools/py3/setuptools/_distutils/msvc9compiler.py b/contrib/python/setuptools/py3/setuptools/_distutils/msvc9compiler.py
deleted file mode 100644
index a1b3b02ff0a..00000000000
--- a/contrib/python/setuptools/py3/setuptools/_distutils/msvc9compiler.py
+++ /dev/null
@@ -1,788 +0,0 @@
-"""distutils.msvc9compiler
-
-Contains MSVCCompiler, an implementation of the abstract CCompiler class
-for the Microsoft Visual Studio 2008.
-
-The module is compatible with VS 2005 and VS 2008. You can find legacy support
-for older versions of VS in distutils.msvccompiler.
-"""
-
-# Written by Perry Stoll
-# hacked by Robin Becker and Thomas Heller to do a better job of
-# finding DevStudio (through the registry)
-# ported to VS2005 and VS 2008 by Christian Heimes
-
-import os
-import subprocess
-import sys
-import re
-
-from distutils.errors import DistutilsExecError, DistutilsPlatformError, \
- CompileError, LibError, LinkError
-from distutils.ccompiler import CCompiler, gen_lib_options
-from distutils import log
-from distutils.util import get_platform
-
-import winreg
-
-RegOpenKeyEx = winreg.OpenKeyEx
-RegEnumKey = winreg.EnumKey
-RegEnumValue = winreg.EnumValue
-RegError = winreg.error
-
-HKEYS = (winreg.HKEY_USERS,
- winreg.HKEY_CURRENT_USER,
- winreg.HKEY_LOCAL_MACHINE,
- winreg.HKEY_CLASSES_ROOT)
-
-NATIVE_WIN64 = (sys.platform == 'win32' and sys.maxsize > 2**32)
-if NATIVE_WIN64:
- # Visual C++ is a 32-bit application, so we need to look in
- # the corresponding registry branch, if we're running a
- # 64-bit Python on Win64
- VS_BASE = r"Software\Wow6432Node\Microsoft\VisualStudio\%0.1f"
- WINSDK_BASE = r"Software\Wow6432Node\Microsoft\Microsoft SDKs\Windows"
- NET_BASE = r"Software\Wow6432Node\Microsoft\.NETFramework"
-else:
- VS_BASE = r"Software\Microsoft\VisualStudio\%0.1f"
- WINSDK_BASE = r"Software\Microsoft\Microsoft SDKs\Windows"
- NET_BASE = r"Software\Microsoft\.NETFramework"
-
-# A map keyed by get_platform() return values to values accepted by
-# 'vcvarsall.bat'. Note a cross-compile may combine these (eg, 'x86_amd64' is
-# the param to cross-compile on x86 targeting amd64.)
-PLAT_TO_VCVARS = {
- 'win32' : 'x86',
- 'win-amd64' : 'amd64',
-}
-
-class Reg:
- """Helper class to read values from the registry
- """
-
- def get_value(cls, path, key):
- for base in HKEYS:
- d = cls.read_values(base, path)
- if d and key in d:
- return d[key]
- raise KeyError(key)
- get_value = classmethod(get_value)
-
- def read_keys(cls, base, key):
- """Return list of registry keys."""
- try:
- handle = RegOpenKeyEx(base, key)
- except RegError:
- return None
- L = []
- i = 0
- while True:
- try:
- k = RegEnumKey(handle, i)
- except RegError:
- break
- L.append(k)
- i += 1
- return L
- read_keys = classmethod(read_keys)
-
- def read_values(cls, base, key):
- """Return dict of registry keys and values.
-
- All names are converted to lowercase.
- """
- try:
- handle = RegOpenKeyEx(base, key)
- except RegError:
- return None
- d = {}
- i = 0
- while True:
- try:
- name, value, type = RegEnumValue(handle, i)
- except RegError:
- break
- name = name.lower()
- d[cls.convert_mbcs(name)] = cls.convert_mbcs(value)
- i += 1
- return d
- read_values = classmethod(read_values)
-
- def convert_mbcs(s):
- dec = getattr(s, "decode", None)
- if dec is not None:
- try:
- s = dec("mbcs")
- except UnicodeError:
- pass
- return s
- convert_mbcs = staticmethod(convert_mbcs)
-
-class MacroExpander:
-
- def __init__(self, version):
- self.macros = {}
- self.vsbase = VS_BASE % version
- self.load_macros(version)
-
- def set_macro(self, macro, path, key):
- self.macros["$(%s)" % macro] = Reg.get_value(path, key)
-
- def load_macros(self, version):
- self.set_macro("VCInstallDir", self.vsbase + r"\Setup\VC", "productdir")
- self.set_macro("VSInstallDir", self.vsbase + r"\Setup\VS", "productdir")
- self.set_macro("FrameworkDir", NET_BASE, "installroot")
- try:
- if version >= 8.0:
- self.set_macro("FrameworkSDKDir", NET_BASE,
- "sdkinstallrootv2.0")
- else:
- raise KeyError("sdkinstallrootv2.0")
- except KeyError:
- raise DistutilsPlatformError(
- """Python was built with Visual Studio 2008;
-extensions must be built with a compiler than can generate compatible binaries.
-Visual Studio 2008 was not found on this system. If you have Cygwin installed,
-you can try compiling with MingW32, by passing "-c mingw32" to setup.py.""")
-
- if version >= 9.0:
- self.set_macro("FrameworkVersion", self.vsbase, "clr version")
- self.set_macro("WindowsSdkDir", WINSDK_BASE, "currentinstallfolder")
- else:
- p = r"Software\Microsoft\NET Framework Setup\Product"
- for base in HKEYS:
- try:
- h = RegOpenKeyEx(base, p)
- except RegError:
- continue
- key = RegEnumKey(h, 0)
- d = Reg.get_value(base, r"%s\%s" % (p, key))
- self.macros["$(FrameworkVersion)"] = d["version"]
-
- def sub(self, s):
- for k, v in self.macros.items():
- s = s.replace(k, v)
- return s
-
-def get_build_version():
- """Return the version of MSVC that was used to build Python.
-
- For Python 2.3 and up, the version number is included in
- sys.version. For earlier versions, assume the compiler is MSVC 6.
- """
- prefix = "MSC v."
- i = sys.version.find(prefix)
- if i == -1:
- return 6
- i = i + len(prefix)
- s, rest = sys.version[i:].split(" ", 1)
- majorVersion = int(s[:-2]) - 6
- if majorVersion >= 13:
- # v13 was skipped and should be v14
- majorVersion += 1
- minorVersion = int(s[2:3]) / 10.0
- # I don't think paths are affected by minor version in version 6
- if majorVersion == 6:
- minorVersion = 0
- if majorVersion >= 6:
- return majorVersion + minorVersion
- # else we don't know what version of the compiler this is
- return None
-
-def normalize_and_reduce_paths(paths):
- """Return a list of normalized paths with duplicates removed.
-
- The current order of paths is maintained.
- """
- # Paths are normalized so things like: /a and /a/ aren't both preserved.
- reduced_paths = []
- for p in paths:
- np = os.path.normpath(p)
- # XXX(nnorwitz): O(n**2), if reduced_paths gets long perhaps use a set.
- if np not in reduced_paths:
- reduced_paths.append(np)
- return reduced_paths
-
-def removeDuplicates(variable):
- """Remove duplicate values of an environment variable.
- """
- oldList = variable.split(os.pathsep)
- newList = []
- for i in oldList:
- if i not in newList:
- newList.append(i)
- newVariable = os.pathsep.join(newList)
- return newVariable
-
-def find_vcvarsall(version):
- """Find the vcvarsall.bat file
-
- At first it tries to find the productdir of VS 2008 in the registry. If
- that fails it falls back to the VS90COMNTOOLS env var.
- """
- vsbase = VS_BASE % version
- try:
- productdir = Reg.get_value(r"%s\Setup\VC" % vsbase,
- "productdir")
- except KeyError:
- log.debug("Unable to find productdir in registry")
- productdir = None
-
- if not productdir or not os.path.isdir(productdir):
- toolskey = "VS%0.f0COMNTOOLS" % version
- toolsdir = os.environ.get(toolskey, None)
-
- if toolsdir and os.path.isdir(toolsdir):
- productdir = os.path.join(toolsdir, os.pardir, os.pardir, "VC")
- productdir = os.path.abspath(productdir)
- if not os.path.isdir(productdir):
- log.debug("%s is not a valid directory" % productdir)
- return None
- else:
- log.debug("Env var %s is not set or invalid" % toolskey)
- if not productdir:
- log.debug("No productdir found")
- return None
- vcvarsall = os.path.join(productdir, "vcvarsall.bat")
- if os.path.isfile(vcvarsall):
- return vcvarsall
- log.debug("Unable to find vcvarsall.bat")
- return None
-
-def query_vcvarsall(version, arch="x86"):
- """Launch vcvarsall.bat and read the settings from its environment
- """
- vcvarsall = find_vcvarsall(version)
- interesting = {"include", "lib", "libpath", "path"}
- result = {}
-
- if vcvarsall is None:
- raise DistutilsPlatformError("Unable to find vcvarsall.bat")
- log.debug("Calling 'vcvarsall.bat %s' (version=%s)", arch, version)
- popen = subprocess.Popen('"%s" %s & set' % (vcvarsall, arch),
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE)
- try:
- stdout, stderr = popen.communicate()
- if popen.wait() != 0:
- raise DistutilsPlatformError(stderr.decode("mbcs"))
-
- stdout = stdout.decode("mbcs")
- for line in stdout.split("\n"):
- line = Reg.convert_mbcs(line)
- if '=' not in line:
- continue
- line = line.strip()
- key, value = line.split('=', 1)
- key = key.lower()
- if key in interesting:
- if value.endswith(os.pathsep):
- value = value[:-1]
- result[key] = removeDuplicates(value)
-
- finally:
- popen.stdout.close()
- popen.stderr.close()
-
- if len(result) != len(interesting):
- raise ValueError(str(list(result.keys())))
-
- return result
-
-# More globals
-VERSION = get_build_version()
-if VERSION < 8.0:
- raise DistutilsPlatformError("VC %0.1f is not supported by this module" % VERSION)
-# MACROS = MacroExpander(VERSION)
-
-class MSVCCompiler(CCompiler) :
- """Concrete class that implements an interface to Microsoft Visual C++,
- as defined by the CCompiler abstract class."""
-
- compiler_type = 'msvc'
-
- # Just set this so CCompiler's constructor doesn't barf. We currently
- # don't use the 'set_executables()' bureaucracy provided by CCompiler,
- # as it really isn't necessary for this sort of single-compiler class.
- # Would be nice to have a consistent interface with UnixCCompiler,
- # though, so it's worth thinking about.
- executables = {}
-
- # Private class data (need to distinguish C from C++ source for compiler)
- _c_extensions = ['.c']
- _cpp_extensions = ['.cc', '.cpp', '.cxx']
- _rc_extensions = ['.rc']
- _mc_extensions = ['.mc']
-
- # Needed for the filename generation methods provided by the
- # base class, CCompiler.
- src_extensions = (_c_extensions + _cpp_extensions +
- _rc_extensions + _mc_extensions)
- res_extension = '.res'
- obj_extension = '.obj'
- static_lib_extension = '.lib'
- shared_lib_extension = '.dll'
- static_lib_format = shared_lib_format = '%s%s'
- exe_extension = '.exe'
-
- def __init__(self, verbose=0, dry_run=0, force=0):
- CCompiler.__init__ (self, verbose, dry_run, force)
- self.__version = VERSION
- self.__root = r"Software\Microsoft\VisualStudio"
- # self.__macros = MACROS
- self.__paths = []
- # target platform (.plat_name is consistent with 'bdist')
- self.plat_name = None
- self.__arch = None # deprecated name
- self.initialized = False
-
- def initialize(self, plat_name=None):
- # multi-init means we would need to check platform same each time...
- assert not self.initialized, "don't init multiple times"
- if plat_name is None:
- plat_name = get_platform()
- # sanity check for platforms to prevent obscure errors later.
- ok_plats = 'win32', 'win-amd64'
- if plat_name not in ok_plats:
- raise DistutilsPlatformError("--plat-name must be one of %s" %
- (ok_plats,))
-
- if "DISTUTILS_USE_SDK" in os.environ and "MSSdk" in os.environ and self.find_exe("cl.exe"):
- # Assume that the SDK set up everything alright; don't try to be
- # smarter
- self.cc = "cl.exe"
- self.linker = "link.exe"
- self.lib = "lib.exe"
- self.rc = "rc.exe"
- self.mc = "mc.exe"
- else:
- # On x86, 'vcvars32.bat amd64' creates an env that doesn't work;
- # to cross compile, you use 'x86_amd64'.
- # On AMD64, 'vcvars32.bat amd64' is a native build env; to cross
- # compile use 'x86' (ie, it runs the x86 compiler directly)
- if plat_name == get_platform() or plat_name == 'win32':
- # native build or cross-compile to win32
- plat_spec = PLAT_TO_VCVARS[plat_name]
- else:
- # cross compile from win32 -> some 64bit
- plat_spec = PLAT_TO_VCVARS[get_platform()] + '_' + \
- PLAT_TO_VCVARS[plat_name]
-
- vc_env = query_vcvarsall(VERSION, plat_spec)
-
- self.__paths = vc_env['path'].split(os.pathsep)
- os.environ['lib'] = vc_env['lib']
- os.environ['include'] = vc_env['include']
-
- if len(self.__paths) == 0:
- raise DistutilsPlatformError("Python was built with %s, "
- "and extensions need to be built with the same "
- "version of the compiler, but it isn't installed."
- % self.__product)
-
- self.cc = self.find_exe("cl.exe")
- self.linker = self.find_exe("link.exe")
- self.lib = self.find_exe("lib.exe")
- self.rc = self.find_exe("rc.exe") # resource compiler
- self.mc = self.find_exe("mc.exe") # message compiler
- #self.set_path_env_var('lib')
- #self.set_path_env_var('include')
-
- # extend the MSVC path with the current path
- try:
- for p in os.environ['path'].split(';'):
- self.__paths.append(p)
- except KeyError:
- pass
- self.__paths = normalize_and_reduce_paths(self.__paths)
- os.environ['path'] = ";".join(self.__paths)
-
- self.preprocess_options = None
- if self.__arch == "x86":
- self.compile_options = [ '/nologo', '/O2', '/MD', '/W3',
- '/DNDEBUG']
- self.compile_options_debug = ['/nologo', '/Od', '/MDd', '/W3',
- '/Z7', '/D_DEBUG']
- else:
- # Win64
- self.compile_options = [ '/nologo', '/O2', '/MD', '/W3', '/GS-' ,
- '/DNDEBUG']
- self.compile_options_debug = ['/nologo', '/Od', '/MDd', '/W3', '/GS-',
- '/Z7', '/D_DEBUG']
-
- self.ldflags_shared = ['/DLL', '/nologo', '/INCREMENTAL:NO']
- if self.__version >= 7:
- self.ldflags_shared_debug = [
- '/DLL', '/nologo', '/INCREMENTAL:no', '/DEBUG'
- ]
- self.ldflags_static = [ '/nologo']
-
- self.initialized = True
-
- # -- Worker methods ------------------------------------------------
-
- def object_filenames(self,
- source_filenames,
- strip_dir=0,
- output_dir=''):
- # Copied from ccompiler.py, extended to return .res as 'object'-file
- # for .rc input file
- if output_dir is None: output_dir = ''
- obj_names = []
- for src_name in source_filenames:
- (base, ext) = os.path.splitext (src_name)
- base = os.path.splitdrive(base)[1] # Chop off the drive
- base = base[os.path.isabs(base):] # If abs, chop off leading /
- if ext not in self.src_extensions:
- # Better to raise an exception instead of silently continuing
- # and later complain about sources and targets having
- # different lengths
- raise CompileError ("Don't know how to compile %s" % src_name)
- if strip_dir:
- base = os.path.basename (base)
- if ext in self._rc_extensions:
- obj_names.append (os.path.join (output_dir,
- base + self.res_extension))
- elif ext in self._mc_extensions:
- obj_names.append (os.path.join (output_dir,
- base + self.res_extension))
- else:
- obj_names.append (os.path.join (output_dir,
- base + self.obj_extension))
- return obj_names
-
-
- def compile(self, sources,
- output_dir=None, macros=None, include_dirs=None, debug=0,
- extra_preargs=None, extra_postargs=None, depends=None):
-
- if not self.initialized:
- self.initialize()
- compile_info = self._setup_compile(output_dir, macros, include_dirs,
- sources, depends, extra_postargs)
- macros, objects, extra_postargs, pp_opts, build = compile_info
-
- compile_opts = extra_preargs or []
- compile_opts.append ('/c')
- if debug:
- compile_opts.extend(self.compile_options_debug)
- else:
- compile_opts.extend(self.compile_options)
-
- for obj in objects:
- try:
- src, ext = build[obj]
- except KeyError:
- continue
- if debug:
- # pass the full pathname to MSVC in debug mode,
- # this allows the debugger to find the source file
- # without asking the user to browse for it
- src = os.path.abspath(src)
-
- if ext in self._c_extensions:
- input_opt = "/Tc" + src
- elif ext in self._cpp_extensions:
- input_opt = "/Tp" + src
- elif ext in self._rc_extensions:
- # compile .RC to .RES file
- input_opt = src
- output_opt = "/fo" + obj
- try:
- self.spawn([self.rc] + pp_opts +
- [output_opt] + [input_opt])
- except DistutilsExecError as msg:
- raise CompileError(msg)
- continue
- elif ext in self._mc_extensions:
- # Compile .MC to .RC file to .RES file.
- # * '-h dir' specifies the directory for the
- # generated include file
- # * '-r dir' specifies the target directory of the
- # generated RC file and the binary message resource
- # it includes
- #
- # For now (since there are no options to change this),
- # we use the source-directory for the include file and
- # the build directory for the RC file and message
- # resources. This works at least for win32all.
- h_dir = os.path.dirname(src)
- rc_dir = os.path.dirname(obj)
- try:
- # first compile .MC to .RC and .H file
- self.spawn([self.mc] +
- ['-h', h_dir, '-r', rc_dir] + [src])
- base, _ = os.path.splitext (os.path.basename (src))
- rc_file = os.path.join (rc_dir, base + '.rc')
- # then compile .RC to .RES file
- self.spawn([self.rc] +
- ["/fo" + obj] + [rc_file])
-
- except DistutilsExecError as msg:
- raise CompileError(msg)
- continue
- else:
- # how to handle this file?
- raise CompileError("Don't know how to compile %s to %s"
- % (src, obj))
-
- output_opt = "/Fo" + obj
- try:
- self.spawn([self.cc] + compile_opts + pp_opts +
- [input_opt, output_opt] +
- extra_postargs)
- except DistutilsExecError as msg:
- raise CompileError(msg)
-
- return objects
-
-
- def create_static_lib(self,
- objects,
- output_libname,
- output_dir=None,
- debug=0,
- target_lang=None):
-
- if not self.initialized:
- self.initialize()
- (objects, output_dir) = self._fix_object_args(objects, output_dir)
- output_filename = self.library_filename(output_libname,
- output_dir=output_dir)
-
- if self._need_link(objects, output_filename):
- lib_args = objects + ['/OUT:' + output_filename]
- if debug:
- pass # XXX what goes here?
- try:
- self.spawn([self.lib] + lib_args)
- except DistutilsExecError as msg:
- raise LibError(msg)
- else:
- log.debug("skipping %s (up-to-date)", output_filename)
-
-
- def link(self,
- target_desc,
- objects,
- output_filename,
- output_dir=None,
- libraries=None,
- library_dirs=None,
- runtime_library_dirs=None,
- export_symbols=None,
- debug=0,
- extra_preargs=None,
- extra_postargs=None,
- build_temp=None,
- target_lang=None):
-
- if not self.initialized:
- self.initialize()
- (objects, output_dir) = self._fix_object_args(objects, output_dir)
- fixed_args = self._fix_lib_args(libraries, library_dirs,
- runtime_library_dirs)
- (libraries, library_dirs, runtime_library_dirs) = fixed_args
-
- if runtime_library_dirs:
- self.warn ("I don't know what to do with 'runtime_library_dirs': "
- + str (runtime_library_dirs))
-
- lib_opts = gen_lib_options(self,
- library_dirs, runtime_library_dirs,
- libraries)
- if output_dir is not None:
- output_filename = os.path.join(output_dir, output_filename)
-
- if self._need_link(objects, output_filename):
- if target_desc == CCompiler.EXECUTABLE:
- if debug:
- ldflags = self.ldflags_shared_debug[1:]
- else:
- ldflags = self.ldflags_shared[1:]
- else:
- if debug:
- ldflags = self.ldflags_shared_debug
- else:
- ldflags = self.ldflags_shared
-
- export_opts = []
- for sym in (export_symbols or []):
- export_opts.append("/EXPORT:" + sym)
-
- ld_args = (ldflags + lib_opts + export_opts +
- objects + ['/OUT:' + output_filename])
-
- # The MSVC linker generates .lib and .exp files, which cannot be
- # suppressed by any linker switches. The .lib files may even be
- # needed! Make sure they are generated in the temporary build
- # directory. Since they have different names for debug and release
- # builds, they can go into the same directory.
- build_temp = os.path.dirname(objects[0])
- if export_symbols is not None:
- (dll_name, dll_ext) = os.path.splitext(
- os.path.basename(output_filename))
- implib_file = os.path.join(
- build_temp,
- self.library_filename(dll_name))
- ld_args.append ('/IMPLIB:' + implib_file)
-
- self.manifest_setup_ldargs(output_filename, build_temp, ld_args)
-
- if extra_preargs:
- ld_args[:0] = extra_preargs
- if extra_postargs:
- ld_args.extend(extra_postargs)
-
- self.mkpath(os.path.dirname(output_filename))
- try:
- self.spawn([self.linker] + ld_args)
- except DistutilsExecError as msg:
- raise LinkError(msg)
-
- # embed the manifest
- # XXX - this is somewhat fragile - if mt.exe fails, distutils
- # will still consider the DLL up-to-date, but it will not have a
- # manifest. Maybe we should link to a temp file? OTOH, that
- # implies a build environment error that shouldn't go undetected.
- mfinfo = self.manifest_get_embed_info(target_desc, ld_args)
- if mfinfo is not None:
- mffilename, mfid = mfinfo
- out_arg = '-outputresource:%s;%s' % (output_filename, mfid)
- try:
- self.spawn(['mt.exe', '-nologo', '-manifest',
- mffilename, out_arg])
- except DistutilsExecError as msg:
- raise LinkError(msg)
- else:
- log.debug("skipping %s (up-to-date)", output_filename)
-
- def manifest_setup_ldargs(self, output_filename, build_temp, ld_args):
- # If we need a manifest at all, an embedded manifest is recommended.
- # See MSDN article titled
- # "How to: Embed a Manifest Inside a C/C++ Application"
- # (currently at http://msdn2.microsoft.com/en-us/library/ms235591(VS.80).aspx)
- # Ask the linker to generate the manifest in the temp dir, so
- # we can check it, and possibly embed it, later.
- temp_manifest = os.path.join(
- build_temp,
- os.path.basename(output_filename) + ".manifest")
- ld_args.append('/MANIFESTFILE:' + temp_manifest)
-
- def manifest_get_embed_info(self, target_desc, ld_args):
- # If a manifest should be embedded, return a tuple of
- # (manifest_filename, resource_id). Returns None if no manifest
- # should be embedded. See http://bugs.python.org/issue7833 for why
- # we want to avoid any manifest for extension modules if we can)
- for arg in ld_args:
- if arg.startswith("/MANIFESTFILE:"):
- temp_manifest = arg.split(":", 1)[1]
- break
- else:
- # no /MANIFESTFILE so nothing to do.
- return None
- if target_desc == CCompiler.EXECUTABLE:
- # by default, executables always get the manifest with the
- # CRT referenced.
- mfid = 1
- else:
- # Extension modules try and avoid any manifest if possible.
- mfid = 2
- temp_manifest = self._remove_visual_c_ref(temp_manifest)
- if temp_manifest is None:
- return None
- return temp_manifest, mfid
-
- def _remove_visual_c_ref(self, manifest_file):
- try:
- # Remove references to the Visual C runtime, so they will
- # fall through to the Visual C dependency of Python.exe.
- # This way, when installed for a restricted user (e.g.
- # runtimes are not in WinSxS folder, but in Python's own
- # folder), the runtimes do not need to be in every folder
- # with .pyd's.
- # Returns either the filename of the modified manifest or
- # None if no manifest should be embedded.
- manifest_f = open(manifest_file)
- try:
- manifest_buf = manifest_f.read()
- finally:
- manifest_f.close()
- pattern = re.compile(
- r"""<assemblyIdentity.*?name=("|')Microsoft\."""\
- r"""VC\d{2}\.CRT("|').*?(/>|</assemblyIdentity>)""",
- re.DOTALL)
- manifest_buf = re.sub(pattern, "", manifest_buf)
- pattern = r"<dependentAssembly>\s*</dependentAssembly>"
- manifest_buf = re.sub(pattern, "", manifest_buf)
- # Now see if any other assemblies are referenced - if not, we
- # don't want a manifest embedded.
- pattern = re.compile(
- r"""<assemblyIdentity.*?name=(?:"|')(.+?)(?:"|')"""
- r""".*?(?:/>|</assemblyIdentity>)""", re.DOTALL)
- if re.search(pattern, manifest_buf) is None:
- return None
-
- manifest_f = open(manifest_file, 'w')
- try:
- manifest_f.write(manifest_buf)
- return manifest_file
- finally:
- manifest_f.close()
- except OSError:
- pass
-
- # -- Miscellaneous methods -----------------------------------------
- # These are all used by the 'gen_lib_options() function, in
- # ccompiler.py.
-
- def library_dir_option(self, dir):
- return "/LIBPATH:" + dir
-
- def runtime_library_dir_option(self, dir):
- raise DistutilsPlatformError(
- "don't know how to set runtime library search path for MSVC++")
-
- def library_option(self, lib):
- return self.library_filename(lib)
-
-
- def find_library_file(self, dirs, lib, debug=0):
- # Prefer a debugging library if found (and requested), but deal
- # with it if we don't have one.
- if debug:
- try_names = [lib + "_d", lib]
- else:
- try_names = [lib]
- for dir in dirs:
- for name in try_names:
- libfile = os.path.join(dir, self.library_filename (name))
- if os.path.exists(libfile):
- return libfile
- else:
- # Oops, didn't find it in *any* of 'dirs'
- return None
-
- # Helper methods for using the MSVC registry settings
-
- def find_exe(self, exe):
- """Return path to an MSVC executable program.
-
- Tries to find the program in several places: first, one of the
- MSVC program search paths from the registry; next, the directories
- in the PATH environment variable. If any of those work, return an
- absolute path that is known to exist. If none of them work, just
- return the original program name, 'exe'.
- """
- for p in self.__paths:
- fn = os.path.join(os.path.abspath(p), exe)
- if os.path.isfile(fn):
- return fn
-
- # didn't find it; try existing path
- for p in os.environ['Path'].split(';'):
- fn = os.path.join(os.path.abspath(p),exe)
- if os.path.isfile(fn):
- return fn
-
- return exe
diff --git a/contrib/python/setuptools/py3/setuptools/_distutils/msvccompiler.py b/contrib/python/setuptools/py3/setuptools/_distutils/msvccompiler.py
deleted file mode 100644
index 2d447b857d3..00000000000
--- a/contrib/python/setuptools/py3/setuptools/_distutils/msvccompiler.py
+++ /dev/null
@@ -1,643 +0,0 @@
-"""distutils.msvccompiler
-
-Contains MSVCCompiler, an implementation of the abstract CCompiler class
-for the Microsoft Visual Studio.
-"""
-
-# Written by Perry Stoll
-# hacked by Robin Becker and Thomas Heller to do a better job of
-# finding DevStudio (through the registry)
-
-import sys, os
-from distutils.errors import \
- DistutilsExecError, DistutilsPlatformError, \
- CompileError, LibError, LinkError
-from distutils.ccompiler import \
- CCompiler, gen_lib_options
-from distutils import log
-
-_can_read_reg = False
-try:
- import winreg
-
- _can_read_reg = True
- hkey_mod = winreg
-
- RegOpenKeyEx = winreg.OpenKeyEx
- RegEnumKey = winreg.EnumKey
- RegEnumValue = winreg.EnumValue
- RegError = winreg.error
-
-except ImportError:
- try:
- import win32api
- import win32con
- _can_read_reg = True
- hkey_mod = win32con
-
- RegOpenKeyEx = win32api.RegOpenKeyEx
- RegEnumKey = win32api.RegEnumKey
- RegEnumValue = win32api.RegEnumValue
- RegError = win32api.error
- except ImportError:
- log.info("Warning: Can't read registry to find the "
- "necessary compiler setting\n"
- "Make sure that Python modules winreg, "
- "win32api or win32con are installed.")
- pass
-
-if _can_read_reg:
- HKEYS = (hkey_mod.HKEY_USERS,
- hkey_mod.HKEY_CURRENT_USER,
- hkey_mod.HKEY_LOCAL_MACHINE,
- hkey_mod.HKEY_CLASSES_ROOT)
-
-def read_keys(base, key):
- """Return list of registry keys."""
- try:
- handle = RegOpenKeyEx(base, key)
- except RegError:
- return None
- L = []
- i = 0
- while True:
- try:
- k = RegEnumKey(handle, i)
- except RegError:
- break
- L.append(k)
- i += 1
- return L
-
-def read_values(base, key):
- """Return dict of registry keys and values.
-
- All names are converted to lowercase.
- """
- try:
- handle = RegOpenKeyEx(base, key)
- except RegError:
- return None
- d = {}
- i = 0
- while True:
- try:
- name, value, type = RegEnumValue(handle, i)
- except RegError:
- break
- name = name.lower()
- d[convert_mbcs(name)] = convert_mbcs(value)
- i += 1
- return d
-
-def convert_mbcs(s):
- dec = getattr(s, "decode", None)
- if dec is not None:
- try:
- s = dec("mbcs")
- except UnicodeError:
- pass
- return s
-
-class MacroExpander:
- def __init__(self, version):
- self.macros = {}
- self.load_macros(version)
-
- def set_macro(self, macro, path, key):
- for base in HKEYS:
- d = read_values(base, path)
- if d:
- self.macros["$(%s)" % macro] = d[key]
- break
-
- def load_macros(self, version):
- vsbase = r"Software\Microsoft\VisualStudio\%0.1f" % version
- self.set_macro("VCInstallDir", vsbase + r"\Setup\VC", "productdir")
- self.set_macro("VSInstallDir", vsbase + r"\Setup\VS", "productdir")
- net = r"Software\Microsoft\.NETFramework"
- self.set_macro("FrameworkDir", net, "installroot")
- try:
- if version > 7.0:
- self.set_macro("FrameworkSDKDir", net, "sdkinstallrootv1.1")
- else:
- self.set_macro("FrameworkSDKDir", net, "sdkinstallroot")
- except KeyError as exc: #
- raise DistutilsPlatformError(
- """Python was built with Visual Studio 2003;
-extensions must be built with a compiler than can generate compatible binaries.
-Visual Studio 2003 was not found on this system. If you have Cygwin installed,
-you can try compiling with MingW32, by passing "-c mingw32" to setup.py.""")
-
- p = r"Software\Microsoft\NET Framework Setup\Product"
- for base in HKEYS:
- try:
- h = RegOpenKeyEx(base, p)
- except RegError:
- continue
- key = RegEnumKey(h, 0)
- d = read_values(base, r"%s\%s" % (p, key))
- self.macros["$(FrameworkVersion)"] = d["version"]
-
- def sub(self, s):
- for k, v in self.macros.items():
- s = s.replace(k, v)
- return s
-
-def get_build_version():
- """Return the version of MSVC that was used to build Python.
-
- For Python 2.3 and up, the version number is included in
- sys.version. For earlier versions, assume the compiler is MSVC 6.
- """
- prefix = "MSC v."
- i = sys.version.find(prefix)
- if i == -1:
- return 6
- i = i + len(prefix)
- s, rest = sys.version[i:].split(" ", 1)
- majorVersion = int(s[:-2]) - 6
- if majorVersion >= 13:
- # v13 was skipped and should be v14
- majorVersion += 1
- minorVersion = int(s[2:3]) / 10.0
- # I don't think paths are affected by minor version in version 6
- if majorVersion == 6:
- minorVersion = 0
- if majorVersion >= 6:
- return majorVersion + minorVersion
- # else we don't know what version of the compiler this is
- return None
-
-def get_build_architecture():
- """Return the processor architecture.
-
- Possible results are "Intel" or "AMD64".
- """
-
- prefix = " bit ("
- i = sys.version.find(prefix)
- if i == -1:
- return "Intel"
- j = sys.version.find(")", i)
- return sys.version[i+len(prefix):j]
-
-def normalize_and_reduce_paths(paths):
- """Return a list of normalized paths with duplicates removed.
-
- The current order of paths is maintained.
- """
- # Paths are normalized so things like: /a and /a/ aren't both preserved.
- reduced_paths = []
- for p in paths:
- np = os.path.normpath(p)
- # XXX(nnorwitz): O(n**2), if reduced_paths gets long perhaps use a set.
- if np not in reduced_paths:
- reduced_paths.append(np)
- return reduced_paths
-
-
-class MSVCCompiler(CCompiler) :
- """Concrete class that implements an interface to Microsoft Visual C++,
- as defined by the CCompiler abstract class."""
-
- compiler_type = 'msvc'
-
- # Just set this so CCompiler's constructor doesn't barf. We currently
- # don't use the 'set_executables()' bureaucracy provided by CCompiler,
- # as it really isn't necessary for this sort of single-compiler class.
- # Would be nice to have a consistent interface with UnixCCompiler,
- # though, so it's worth thinking about.
- executables = {}
-
- # Private class data (need to distinguish C from C++ source for compiler)
- _c_extensions = ['.c']
- _cpp_extensions = ['.cc', '.cpp', '.cxx']
- _rc_extensions = ['.rc']
- _mc_extensions = ['.mc']
-
- # Needed for the filename generation methods provided by the
- # base class, CCompiler.
- src_extensions = (_c_extensions + _cpp_extensions +
- _rc_extensions + _mc_extensions)
- res_extension = '.res'
- obj_extension = '.obj'
- static_lib_extension = '.lib'
- shared_lib_extension = '.dll'
- static_lib_format = shared_lib_format = '%s%s'
- exe_extension = '.exe'
-
- def __init__(self, verbose=0, dry_run=0, force=0):
- CCompiler.__init__ (self, verbose, dry_run, force)
- self.__version = get_build_version()
- self.__arch = get_build_architecture()
- if self.__arch == "Intel":
- # x86
- if self.__version >= 7:
- self.__root = r"Software\Microsoft\VisualStudio"
- self.__macros = MacroExpander(self.__version)
- else:
- self.__root = r"Software\Microsoft\Devstudio"
- self.__product = "Visual Studio version %s" % self.__version
- else:
- # Win64. Assume this was built with the platform SDK
- self.__product = "Microsoft SDK compiler %s" % (self.__version + 6)
-
- self.initialized = False
-
- def initialize(self):
- self.__paths = []
- if "DISTUTILS_USE_SDK" in os.environ and "MSSdk" in os.environ and self.find_exe("cl.exe"):
- # Assume that the SDK set up everything alright; don't try to be
- # smarter
- self.cc = "cl.exe"
- self.linker = "link.exe"
- self.lib = "lib.exe"
- self.rc = "rc.exe"
- self.mc = "mc.exe"
- else:
- self.__paths = self.get_msvc_paths("path")
-
- if len(self.__paths) == 0:
- raise DistutilsPlatformError("Python was built with %s, "
- "and extensions need to be built with the same "
- "version of the compiler, but it isn't installed."
- % self.__product)
-
- self.cc = self.find_exe("cl.exe")
- self.linker = self.find_exe("link.exe")
- self.lib = self.find_exe("lib.exe")
- self.rc = self.find_exe("rc.exe") # resource compiler
- self.mc = self.find_exe("mc.exe") # message compiler
- self.set_path_env_var('lib')
- self.set_path_env_var('include')
-
- # extend the MSVC path with the current path
- try:
- for p in os.environ['path'].split(';'):
- self.__paths.append(p)
- except KeyError:
- pass
- self.__paths = normalize_and_reduce_paths(self.__paths)
- os.environ['path'] = ";".join(self.__paths)
-
- self.preprocess_options = None
- if self.__arch == "Intel":
- self.compile_options = [ '/nologo', '/O2', '/MD', '/W3', '/GX' ,
- '/DNDEBUG']
- self.compile_options_debug = ['/nologo', '/Od', '/MDd', '/W3', '/GX',
- '/Z7', '/D_DEBUG']
- else:
- # Win64
- self.compile_options = [ '/nologo', '/O2', '/MD', '/W3', '/GS-' ,
- '/DNDEBUG']
- self.compile_options_debug = ['/nologo', '/Od', '/MDd', '/W3', '/GS-',
- '/Z7', '/D_DEBUG']
-
- self.ldflags_shared = ['/DLL', '/nologo', '/INCREMENTAL:NO']
- if self.__version >= 7:
- self.ldflags_shared_debug = [
- '/DLL', '/nologo', '/INCREMENTAL:no', '/DEBUG'
- ]
- else:
- self.ldflags_shared_debug = [
- '/DLL', '/nologo', '/INCREMENTAL:no', '/pdb:None', '/DEBUG'
- ]
- self.ldflags_static = [ '/nologo']
-
- self.initialized = True
-
- # -- Worker methods ------------------------------------------------
-
- def object_filenames(self,
- source_filenames,
- strip_dir=0,
- output_dir=''):
- # Copied from ccompiler.py, extended to return .res as 'object'-file
- # for .rc input file
- if output_dir is None: output_dir = ''
- obj_names = []
- for src_name in source_filenames:
- (base, ext) = os.path.splitext (src_name)
- base = os.path.splitdrive(base)[1] # Chop off the drive
- base = base[os.path.isabs(base):] # If abs, chop off leading /
- if ext not in self.src_extensions:
- # Better to raise an exception instead of silently continuing
- # and later complain about sources and targets having
- # different lengths
- raise CompileError ("Don't know how to compile %s" % src_name)
- if strip_dir:
- base = os.path.basename (base)
- if ext in self._rc_extensions:
- obj_names.append (os.path.join (output_dir,
- base + self.res_extension))
- elif ext in self._mc_extensions:
- obj_names.append (os.path.join (output_dir,
- base + self.res_extension))
- else:
- obj_names.append (os.path.join (output_dir,
- base + self.obj_extension))
- return obj_names
-
-
- def compile(self, sources,
- output_dir=None, macros=None, include_dirs=None, debug=0,
- extra_preargs=None, extra_postargs=None, depends=None):
-
- if not self.initialized:
- self.initialize()
- compile_info = self._setup_compile(output_dir, macros, include_dirs,
- sources, depends, extra_postargs)
- macros, objects, extra_postargs, pp_opts, build = compile_info
-
- compile_opts = extra_preargs or []
- compile_opts.append ('/c')
- if debug:
- compile_opts.extend(self.compile_options_debug)
- else:
- compile_opts.extend(self.compile_options)
-
- for obj in objects:
- try:
- src, ext = build[obj]
- except KeyError:
- continue
- if debug:
- # pass the full pathname to MSVC in debug mode,
- # this allows the debugger to find the source file
- # without asking the user to browse for it
- src = os.path.abspath(src)
-
- if ext in self._c_extensions:
- input_opt = "/Tc" + src
- elif ext in self._cpp_extensions:
- input_opt = "/Tp" + src
- elif ext in self._rc_extensions:
- # compile .RC to .RES file
- input_opt = src
- output_opt = "/fo" + obj
- try:
- self.spawn([self.rc] + pp_opts +
- [output_opt] + [input_opt])
- except DistutilsExecError as msg:
- raise CompileError(msg)
- continue
- elif ext in self._mc_extensions:
- # Compile .MC to .RC file to .RES file.
- # * '-h dir' specifies the directory for the
- # generated include file
- # * '-r dir' specifies the target directory of the
- # generated RC file and the binary message resource
- # it includes
- #
- # For now (since there are no options to change this),
- # we use the source-directory for the include file and
- # the build directory for the RC file and message
- # resources. This works at least for win32all.
- h_dir = os.path.dirname(src)
- rc_dir = os.path.dirname(obj)
- try:
- # first compile .MC to .RC and .H file
- self.spawn([self.mc] +
- ['-h', h_dir, '-r', rc_dir] + [src])
- base, _ = os.path.splitext (os.path.basename (src))
- rc_file = os.path.join (rc_dir, base + '.rc')
- # then compile .RC to .RES file
- self.spawn([self.rc] +
- ["/fo" + obj] + [rc_file])
-
- except DistutilsExecError as msg:
- raise CompileError(msg)
- continue
- else:
- # how to handle this file?
- raise CompileError("Don't know how to compile %s to %s"
- % (src, obj))
-
- output_opt = "/Fo" + obj
- try:
- self.spawn([self.cc] + compile_opts + pp_opts +
- [input_opt, output_opt] +
- extra_postargs)
- except DistutilsExecError as msg:
- raise CompileError(msg)
-
- return objects
-
-
- def create_static_lib(self,
- objects,
- output_libname,
- output_dir=None,
- debug=0,
- target_lang=None):
-
- if not self.initialized:
- self.initialize()
- (objects, output_dir) = self._fix_object_args(objects, output_dir)
- output_filename = self.library_filename(output_libname,
- output_dir=output_dir)
-
- if self._need_link(objects, output_filename):
- lib_args = objects + ['/OUT:' + output_filename]
- if debug:
- pass # XXX what goes here?
- try:
- self.spawn([self.lib] + lib_args)
- except DistutilsExecError as msg:
- raise LibError(msg)
- else:
- log.debug("skipping %s (up-to-date)", output_filename)
-
-
- def link(self,
- target_desc,
- objects,
- output_filename,
- output_dir=None,
- libraries=None,
- library_dirs=None,
- runtime_library_dirs=None,
- export_symbols=None,
- debug=0,
- extra_preargs=None,
- extra_postargs=None,
- build_temp=None,
- target_lang=None):
-
- if not self.initialized:
- self.initialize()
- (objects, output_dir) = self._fix_object_args(objects, output_dir)
- fixed_args = self._fix_lib_args(libraries, library_dirs,
- runtime_library_dirs)
- (libraries, library_dirs, runtime_library_dirs) = fixed_args
-
- if runtime_library_dirs:
- self.warn ("I don't know what to do with 'runtime_library_dirs': "
- + str (runtime_library_dirs))
-
- lib_opts = gen_lib_options(self,
- library_dirs, runtime_library_dirs,
- libraries)
- if output_dir is not None:
- output_filename = os.path.join(output_dir, output_filename)
-
- if self._need_link(objects, output_filename):
- if target_desc == CCompiler.EXECUTABLE:
- if debug:
- ldflags = self.ldflags_shared_debug[1:]
- else:
- ldflags = self.ldflags_shared[1:]
- else:
- if debug:
- ldflags = self.ldflags_shared_debug
- else:
- ldflags = self.ldflags_shared
-
- export_opts = []
- for sym in (export_symbols or []):
- export_opts.append("/EXPORT:" + sym)
-
- ld_args = (ldflags + lib_opts + export_opts +
- objects + ['/OUT:' + output_filename])
-
- # The MSVC linker generates .lib and .exp files, which cannot be
- # suppressed by any linker switches. The .lib files may even be
- # needed! Make sure they are generated in the temporary build
- # directory. Since they have different names for debug and release
- # builds, they can go into the same directory.
- if export_symbols is not None:
- (dll_name, dll_ext) = os.path.splitext(
- os.path.basename(output_filename))
- implib_file = os.path.join(
- os.path.dirname(objects[0]),
- self.library_filename(dll_name))
- ld_args.append ('/IMPLIB:' + implib_file)
-
- if extra_preargs:
- ld_args[:0] = extra_preargs
- if extra_postargs:
- ld_args.extend(extra_postargs)
-
- self.mkpath(os.path.dirname(output_filename))
- try:
- self.spawn([self.linker] + ld_args)
- except DistutilsExecError as msg:
- raise LinkError(msg)
-
- else:
- log.debug("skipping %s (up-to-date)", output_filename)
-
-
- # -- Miscellaneous methods -----------------------------------------
- # These are all used by the 'gen_lib_options() function, in
- # ccompiler.py.
-
- def library_dir_option(self, dir):
- return "/LIBPATH:" + dir
-
- def runtime_library_dir_option(self, dir):
- raise DistutilsPlatformError(
- "don't know how to set runtime library search path for MSVC++")
-
- def library_option(self, lib):
- return self.library_filename(lib)
-
-
- def find_library_file(self, dirs, lib, debug=0):
- # Prefer a debugging library if found (and requested), but deal
- # with it if we don't have one.
- if debug:
- try_names = [lib + "_d", lib]
- else:
- try_names = [lib]
- for dir in dirs:
- for name in try_names:
- libfile = os.path.join(dir, self.library_filename (name))
- if os.path.exists(libfile):
- return libfile
- else:
- # Oops, didn't find it in *any* of 'dirs'
- return None
-
- # Helper methods for using the MSVC registry settings
-
- def find_exe(self, exe):
- """Return path to an MSVC executable program.
-
- Tries to find the program in several places: first, one of the
- MSVC program search paths from the registry; next, the directories
- in the PATH environment variable. If any of those work, return an
- absolute path that is known to exist. If none of them work, just
- return the original program name, 'exe'.
- """
- for p in self.__paths:
- fn = os.path.join(os.path.abspath(p), exe)
- if os.path.isfile(fn):
- return fn
-
- # didn't find it; try existing path
- for p in os.environ['Path'].split(';'):
- fn = os.path.join(os.path.abspath(p),exe)
- if os.path.isfile(fn):
- return fn
-
- return exe
-
- def get_msvc_paths(self, path, platform='x86'):
- """Get a list of devstudio directories (include, lib or path).
-
- Return a list of strings. The list will be empty if unable to
- access the registry or appropriate registry keys not found.
- """
- if not _can_read_reg:
- return []
-
- path = path + " dirs"
- if self.__version >= 7:
- key = (r"%s\%0.1f\VC\VC_OBJECTS_PLATFORM_INFO\Win32\Directories"
- % (self.__root, self.__version))
- else:
- key = (r"%s\6.0\Build System\Components\Platforms"
- r"\Win32 (%s)\Directories" % (self.__root, platform))
-
- for base in HKEYS:
- d = read_values(base, key)
- if d:
- if self.__version >= 7:
- return self.__macros.sub(d[path]).split(";")
- else:
- return d[path].split(";")
- # MSVC 6 seems to create the registry entries we need only when
- # the GUI is run.
- if self.__version == 6:
- for base in HKEYS:
- if read_values(base, r"%s\6.0" % self.__root) is not None:
- self.warn("It seems you have Visual Studio 6 installed, "
- "but the expected registry settings are not present.\n"
- "You must at least run the Visual Studio GUI once "
- "so that these entries are created.")
- break
- return []
-
- def set_path_env_var(self, name):
- """Set environment variable 'name' to an MSVC path type value.
-
- This is equivalent to a SET command prior to execution of spawned
- commands.
- """
-
- if name == "lib":
- p = self.get_msvc_paths("library")
- else:
- p = self.get_msvc_paths(name)
- if p:
- os.environ[name] = ';'.join(p)
-
-
-if get_build_version() >= 8.0:
- log.debug("Importing new compiler from distutils.msvc9compiler")
- OldMSVCCompiler = MSVCCompiler
- from distutils.msvc9compiler import MSVCCompiler
- # get_build_architecture not really relevant now we support cross-compile
- from distutils.msvc9compiler import MacroExpander
diff --git a/contrib/python/setuptools/py3/setuptools/_distutils/py35compat.py b/contrib/python/setuptools/py3/setuptools/_distutils/py35compat.py
deleted file mode 100644
index 79b2e7f38c1..00000000000
--- a/contrib/python/setuptools/py3/setuptools/_distutils/py35compat.py
+++ /dev/null
@@ -1,19 +0,0 @@
-import sys
-import subprocess
-
-
-def __optim_args_from_interpreter_flags():
- """Return a list of command-line arguments reproducing the current
- optimization settings in sys.flags."""
- args = []
- value = sys.flags.optimize
- if value > 0:
- args.append("-" + "O" * value)
- return args
-
-
-_optim_args_from_interpreter_flags = getattr(
- subprocess,
- "_optim_args_from_interpreter_flags",
- __optim_args_from_interpreter_flags,
-)
diff --git a/contrib/python/setuptools/py3/setuptools/_distutils/py38compat.py b/contrib/python/setuptools/py3/setuptools/_distutils/py38compat.py
deleted file mode 100644
index 7dbe8cef54a..00000000000
--- a/contrib/python/setuptools/py3/setuptools/_distutils/py38compat.py
+++ /dev/null
@@ -1,7 +0,0 @@
-def aix_platform(osname, version, release):
- try:
- import _aix_support
- return _aix_support.aix_platform()
- except ImportError:
- pass
- return "%s-%s.%s" % (osname, version, release)
diff --git a/contrib/python/setuptools/py3/setuptools/_distutils/spawn.py b/contrib/python/setuptools/py3/setuptools/_distutils/spawn.py
deleted file mode 100644
index 6e1c89f1f23..00000000000
--- a/contrib/python/setuptools/py3/setuptools/_distutils/spawn.py
+++ /dev/null
@@ -1,106 +0,0 @@
-"""distutils.spawn
-
-Provides the 'spawn()' function, a front-end to various platform-
-specific functions for launching another program in a sub-process.
-Also provides the 'find_executable()' to search the path for a given
-executable name.
-"""
-
-import sys
-import os
-import subprocess
-
-from distutils.errors import DistutilsPlatformError, DistutilsExecError
-from distutils.debug import DEBUG
-from distutils import log
-
-
-def spawn(cmd, search_path=1, verbose=0, dry_run=0, env=None):
- """Run another program, specified as a command list 'cmd', in a new process.
-
- 'cmd' is just the argument list for the new process, ie.
- cmd[0] is the program to run and cmd[1:] are the rest of its arguments.
- There is no way to run a program with a name different from that of its
- executable.
-
- If 'search_path' is true (the default), the system's executable
- search path will be used to find the program; otherwise, cmd[0]
- must be the exact path to the executable. If 'dry_run' is true,
- the command will not actually be run.
-
- Raise DistutilsExecError if running the program fails in any way; just
- return on success.
- """
- # cmd is documented as a list, but just in case some code passes a tuple
- # in, protect our %-formatting code against horrible death
- cmd = list(cmd)
-
- log.info(subprocess.list2cmdline(cmd))
- if dry_run:
- return
-
- if search_path:
- executable = find_executable(cmd[0])
- if executable is not None:
- cmd[0] = executable
-
- env = env if env is not None else dict(os.environ)
-
- if sys.platform == 'darwin':
- from distutils.util import MACOSX_VERSION_VAR, get_macosx_target_ver
- macosx_target_ver = get_macosx_target_ver()
- if macosx_target_ver:
- env[MACOSX_VERSION_VAR] = macosx_target_ver
-
- try:
- proc = subprocess.Popen(cmd, env=env)
- proc.wait()
- exitcode = proc.returncode
- except OSError as exc:
- if not DEBUG:
- cmd = cmd[0]
- raise DistutilsExecError(
- "command %r failed: %s" % (cmd, exc.args[-1])) from exc
-
- if exitcode:
- if not DEBUG:
- cmd = cmd[0]
- raise DistutilsExecError(
- "command %r failed with exit code %s" % (cmd, exitcode))
-
-
-def find_executable(executable, path=None):
- """Tries to find 'executable' in the directories listed in 'path'.
-
- A string listing directories separated by 'os.pathsep'; defaults to
- os.environ['PATH']. Returns the complete filename or None if not found.
- """
- _, ext = os.path.splitext(executable)
- if (sys.platform == 'win32') and (ext != '.exe'):
- executable = executable + '.exe'
-
- if os.path.isfile(executable):
- return executable
-
- if path is None:
- path = os.environ.get('PATH', None)
- if path is None:
- try:
- path = os.confstr("CS_PATH")
- except (AttributeError, ValueError):
- # os.confstr() or CS_PATH is not available
- path = os.defpath
- # bpo-35755: Don't use os.defpath if the PATH environment variable is
- # set to an empty string
-
- # PATH='' doesn't match, whereas PATH=':' looks in the current directory
- if not path:
- return None
-
- paths = path.split(os.pathsep)
- for p in paths:
- f = os.path.join(p, executable)
- if os.path.isfile(f):
- # the file exists, we have a shot at spawn working
- return f
- return None
diff --git a/contrib/python/setuptools/py3/setuptools/_distutils/sysconfig.py b/contrib/python/setuptools/py3/setuptools/_distutils/sysconfig.py
deleted file mode 100644
index d36d94f76f8..00000000000
--- a/contrib/python/setuptools/py3/setuptools/_distutils/sysconfig.py
+++ /dev/null
@@ -1,601 +0,0 @@
-"""Provide access to Python's configuration information. The specific
-configuration variables available depend heavily on the platform and
-configuration. The values may be retrieved using
-get_config_var(name), and the list of variables is available via
-get_config_vars().keys(). Additional convenience functions are also
-available.
-
-Written by: Fred L. Drake, Jr.
-"""
-
-import _imp
-import os
-import re
-import sys
-
-from .errors import DistutilsPlatformError
-
-IS_PYPY = '__pypy__' in sys.builtin_module_names
-
-# These are needed in a couple of spots, so just compute them once.
-PREFIX = os.path.normpath(sys.prefix)
-EXEC_PREFIX = os.path.normpath(sys.exec_prefix)
-BASE_PREFIX = os.path.normpath(sys.base_prefix)
-BASE_EXEC_PREFIX = os.path.normpath(sys.base_exec_prefix)
-
-# Path to the base directory of the project. On Windows the binary may
-# live in project/PCbuild/win32 or project/PCbuild/amd64.
-# set for cross builds
-if "_PYTHON_PROJECT_BASE" in os.environ:
- project_base = os.path.abspath(os.environ["_PYTHON_PROJECT_BASE"])
-else:
- if sys.executable:
- project_base = os.path.dirname(os.path.abspath(sys.executable))
- else:
- # sys.executable can be empty if argv[0] has been changed and Python is
- # unable to retrieve the real program name
- project_base = os.getcwd()
-
-
-# python_build: (Boolean) if true, we're either building Python or
-# building an extension with an un-installed Python, so we use
-# different (hard-wired) directories.
-def _is_python_source_dir(d):
- for fn in ("Setup", "Setup.local"):
- if os.path.isfile(os.path.join(d, "Modules", fn)):
- return True
- return False
-
-_sys_home = getattr(sys, '_home', None)
-
-if os.name == 'nt':
- def _fix_pcbuild(d):
- if d and os.path.normcase(d).startswith(
- os.path.normcase(os.path.join(PREFIX, "PCbuild"))):
- return PREFIX
- return d
- project_base = _fix_pcbuild(project_base)
- _sys_home = _fix_pcbuild(_sys_home)
-
-def _python_build():
- if _sys_home:
- return _is_python_source_dir(_sys_home)
- return _is_python_source_dir(project_base)
-
-python_build = _python_build()
-
-
-# Calculate the build qualifier flags if they are defined. Adding the flags
-# to the include and lib directories only makes sense for an installation, not
-# an in-source build.
-build_flags = ''
-try:
- if not python_build:
- build_flags = sys.abiflags
-except AttributeError:
- # It's not a configure-based build, so the sys module doesn't have
- # this attribute, which is fine.
- pass
-
-def get_python_version():
- """Return a string containing the major and minor Python version,
- leaving off the patchlevel. Sample return values could be '1.5'
- or '2.2'.
- """
- return '%d.%d' % sys.version_info[:2]
-
-
-def get_python_inc(plat_specific=0, prefix=None):
- """Return the directory containing installed Python header files.
-
- If 'plat_specific' is false (the default), this is the path to the
- non-platform-specific header files, i.e. Python.h and so on;
- otherwise, this is the path to platform-specific header files
- (namely pyconfig.h).
-
- If 'prefix' is supplied, use it instead of sys.base_prefix or
- sys.base_exec_prefix -- i.e., ignore 'plat_specific'.
- """
- if prefix is None:
- prefix = plat_specific and BASE_EXEC_PREFIX or BASE_PREFIX
- if os.name == "posix":
- if IS_PYPY and sys.version_info < (3, 8):
- return os.path.join(prefix, 'include')
- if python_build:
- # Assume the executable is in the build directory. The
- # pyconfig.h file should be in the same directory. Since
- # the build directory may not be the source directory, we
- # must use "srcdir" from the makefile to find the "Include"
- # directory.
- if plat_specific:
- return _sys_home or project_base
- else:
- incdir = os.path.join(get_config_var('srcdir'), 'Include')
- return os.path.normpath(incdir)
- implementation = 'pypy' if IS_PYPY else 'python'
- python_dir = implementation + get_python_version() + build_flags
- return os.path.join(prefix, "include", python_dir)
- elif os.name == "nt":
- if python_build:
- # Include both the include and PC dir to ensure we can find
- # pyconfig.h
- return (os.path.join(prefix, "include") + os.path.pathsep +
- os.path.join(prefix, "PC"))
- return os.path.join(prefix, "include")
- else:
- raise DistutilsPlatformError(
- "I don't know where Python installs its C header files "
- "on platform '%s'" % os.name)
-
-
-# allow this behavior to be monkey-patched. Ref pypa/distutils#2.
-def _posix_lib(standard_lib, libpython, early_prefix, prefix):
- if standard_lib:
- return libpython
- else:
- return os.path.join(libpython, "site-packages")
-
-
-def get_python_lib(plat_specific=0, standard_lib=0, prefix=None):
- """Return the directory containing the Python library (standard or
- site additions).
-
- If 'plat_specific' is true, return the directory containing
- platform-specific modules, i.e. any module from a non-pure-Python
- module distribution; otherwise, return the platform-shared library
- directory. If 'standard_lib' is true, return the directory
- containing standard Python library modules; otherwise, return the
- directory for site-specific modules.
-
- If 'prefix' is supplied, use it instead of sys.base_prefix or
- sys.base_exec_prefix -- i.e., ignore 'plat_specific'.
- """
-
- if IS_PYPY and sys.version_info < (3, 8):
- # PyPy-specific schema
- if prefix is None:
- prefix = PREFIX
- if standard_lib:
- return os.path.join(prefix, "lib-python", sys.version[0])
- return os.path.join(prefix, 'site-packages')
-
- early_prefix = prefix
-
- if prefix is None:
- if standard_lib:
- prefix = plat_specific and BASE_EXEC_PREFIX or BASE_PREFIX
- else:
- prefix = plat_specific and EXEC_PREFIX or PREFIX
-
- if os.name == "posix":
- if plat_specific or standard_lib:
- # Platform-specific modules (any module from a non-pure-Python
- # module distribution) or standard Python library modules.
- libdir = getattr(sys, "platlibdir", "lib")
- else:
- # Pure Python
- libdir = "lib"
- implementation = 'pypy' if IS_PYPY else 'python'
- libpython = os.path.join(prefix, libdir,
- implementation + get_python_version())
- return _posix_lib(standard_lib, libpython, early_prefix, prefix)
- elif os.name == "nt":
- if standard_lib:
- return os.path.join(prefix, "Lib")
- else:
- return os.path.join(prefix, "Lib", "site-packages")
- else:
- raise DistutilsPlatformError(
- "I don't know where Python installs its library "
- "on platform '%s'" % os.name)
-
-
-
-def customize_compiler(compiler):
- """Do any platform-specific customization of a CCompiler instance.
-
- Mainly needed on Unix, so we can plug in the information that
- varies across Unices and is stored in Python's Makefile.
- """
- if compiler.compiler_type == "unix":
- if sys.platform == "darwin":
- # Perform first-time customization of compiler-related
- # config vars on OS X now that we know we need a compiler.
- # This is primarily to support Pythons from binary
- # installers. The kind and paths to build tools on
- # the user system may vary significantly from the system
- # that Python itself was built on. Also the user OS
- # version and build tools may not support the same set
- # of CPU architectures for universal builds.
- global _config_vars
- # Use get_config_var() to ensure _config_vars is initialized.
- if not get_config_var('CUSTOMIZED_OSX_COMPILER'):
- import _osx_support
- _osx_support.customize_compiler(_config_vars)
- _config_vars['CUSTOMIZED_OSX_COMPILER'] = 'True'
-
- (cc, cxx, cflags, ccshared, ldshared, shlib_suffix, ar, ar_flags) = \
- get_config_vars('CC', 'CXX', 'CFLAGS',
- 'CCSHARED', 'LDSHARED', 'SHLIB_SUFFIX', 'AR', 'ARFLAGS')
-
- if 'CC' in os.environ:
- newcc = os.environ['CC']
- if('LDSHARED' not in os.environ
- and ldshared.startswith(cc)):
- # If CC is overridden, use that as the default
- # command for LDSHARED as well
- ldshared = newcc + ldshared[len(cc):]
- cc = newcc
- if 'CXX' in os.environ:
- cxx = os.environ['CXX']
- if 'LDSHARED' in os.environ:
- ldshared = os.environ['LDSHARED']
- if 'CPP' in os.environ:
- cpp = os.environ['CPP']
- else:
- cpp = cc + " -E" # not always
- if 'LDFLAGS' in os.environ:
- ldshared = ldshared + ' ' + os.environ['LDFLAGS']
- if 'CFLAGS' in os.environ:
- cflags = cflags + ' ' + os.environ['CFLAGS']
- ldshared = ldshared + ' ' + os.environ['CFLAGS']
- if 'CPPFLAGS' in os.environ:
- cpp = cpp + ' ' + os.environ['CPPFLAGS']
- cflags = cflags + ' ' + os.environ['CPPFLAGS']
- ldshared = ldshared + ' ' + os.environ['CPPFLAGS']
- if 'AR' in os.environ:
- ar = os.environ['AR']
- if 'ARFLAGS' in os.environ:
- archiver = ar + ' ' + os.environ['ARFLAGS']
- else:
- archiver = ar + ' ' + ar_flags
-
- cc_cmd = cc + ' ' + cflags
- compiler.set_executables(
- preprocessor=cpp,
- compiler=cc_cmd,
- compiler_so=cc_cmd + ' ' + ccshared,
- compiler_cxx=cxx,
- linker_so=ldshared,
- linker_exe=cc,
- archiver=archiver)
-
- if 'RANLIB' in os.environ and compiler.executables.get('ranlib', None):
- compiler.set_executables(ranlib=os.environ['RANLIB'])
-
- compiler.shared_lib_extension = shlib_suffix
-
-
-def get_config_h_filename():
- """Return full pathname of installed pyconfig.h file."""
- if python_build:
- if os.name == "nt":
- inc_dir = os.path.join(_sys_home or project_base, "PC")
- else:
- inc_dir = _sys_home or project_base
- else:
- inc_dir = get_python_inc(plat_specific=1)
-
- return os.path.join(inc_dir, 'pyconfig.h')
-
-
-# Allow this value to be patched by pkgsrc. Ref pypa/distutils#16.
-_makefile_tmpl = 'config-{python_ver}{build_flags}{multiarch}'
-
-
-def get_makefile_filename():
- """Return full pathname of installed Makefile from the Python build."""
- if python_build:
- return os.path.join(_sys_home or project_base, "Makefile")
- lib_dir = get_python_lib(plat_specific=0, standard_lib=1)
- multiarch = (
- '-%s' % sys.implementation._multiarch
- if hasattr(sys.implementation, '_multiarch') else ''
- )
- config_file = _makefile_tmpl.format(
- python_ver=get_python_version(),
- build_flags=build_flags,
- multiarch=multiarch,
- )
- return os.path.join(lib_dir, config_file, 'Makefile')
-
-
-def parse_config_h(fp, g=None):
- """Parse a config.h-style file.
-
- A dictionary containing name/value pairs is returned. If an
- optional dictionary is passed in as the second argument, it is
- used instead of a new dictionary.
- """
- if g is None:
- g = {}
- define_rx = re.compile("#define ([A-Z][A-Za-z0-9_]+) (.*)\n")
- undef_rx = re.compile("/[*] #undef ([A-Z][A-Za-z0-9_]+) [*]/\n")
- #
- while True:
- line = fp.readline()
- if not line:
- break
- m = define_rx.match(line)
- if m:
- n, v = m.group(1, 2)
- try: v = int(v)
- except ValueError: pass
- g[n] = v
- else:
- m = undef_rx.match(line)
- if m:
- g[m.group(1)] = 0
- return g
-
-
-# Regexes needed for parsing Makefile (and similar syntaxes,
-# like old-style Setup files).
-_variable_rx = re.compile(r"([a-zA-Z][a-zA-Z0-9_]+)\s*=\s*(.*)")
-_findvar1_rx = re.compile(r"\$\(([A-Za-z][A-Za-z0-9_]*)\)")
-_findvar2_rx = re.compile(r"\${([A-Za-z][A-Za-z0-9_]*)}")
-
-def parse_makefile(fn, g=None):
- """Parse a Makefile-style file.
-
- A dictionary containing name/value pairs is returned. If an
- optional dictionary is passed in as the second argument, it is
- used instead of a new dictionary.
- """
- from distutils.text_file import TextFile
- fp = TextFile(fn, strip_comments=1, skip_blanks=1, join_lines=1, errors="surrogateescape")
-
- if g is None:
- g = {}
- done = {}
- notdone = {}
-
- while True:
- line = fp.readline()
- if line is None: # eof
- break
- m = _variable_rx.match(line)
- if m:
- n, v = m.group(1, 2)
- v = v.strip()
- # `$$' is a literal `$' in make
- tmpv = v.replace('$$', '')
-
- if "$" in tmpv:
- notdone[n] = v
- else:
- try:
- v = int(v)
- except ValueError:
- # insert literal `$'
- done[n] = v.replace('$$', '$')
- else:
- done[n] = v
-
- # Variables with a 'PY_' prefix in the makefile. These need to
- # be made available without that prefix through sysconfig.
- # Special care is needed to ensure that variable expansion works, even
- # if the expansion uses the name without a prefix.
- renamed_variables = ('CFLAGS', 'LDFLAGS', 'CPPFLAGS')
-
- # do variable interpolation here
- while notdone:
- for name in list(notdone):
- value = notdone[name]
- m = _findvar1_rx.search(value) or _findvar2_rx.search(value)
- if m:
- n = m.group(1)
- found = True
- if n in done:
- item = str(done[n])
- elif n in notdone:
- # get it on a subsequent round
- found = False
- elif n in os.environ:
- # do it like make: fall back to environment
- item = os.environ[n]
-
- elif n in renamed_variables:
- if name.startswith('PY_') and name[3:] in renamed_variables:
- item = ""
-
- elif 'PY_' + n in notdone:
- found = False
-
- else:
- item = str(done['PY_' + n])
- else:
- done[n] = item = ""
- if found:
- after = value[m.end():]
- value = value[:m.start()] + item + after
- if "$" in after:
- notdone[name] = value
- else:
- try: value = int(value)
- except ValueError:
- done[name] = value.strip()
- else:
- done[name] = value
- del notdone[name]
-
- if name.startswith('PY_') \
- and name[3:] in renamed_variables:
-
- name = name[3:]
- if name not in done:
- done[name] = value
- else:
- # bogus variable reference; just drop it since we can't deal
- del notdone[name]
-
- fp.close()
-
- # strip spurious spaces
- for k, v in done.items():
- if isinstance(v, str):
- done[k] = v.strip()
-
- # save the results in the global dictionary
- g.update(done)
- return g
-
-
-def expand_makefile_vars(s, vars):
- """Expand Makefile-style variables -- "${foo}" or "$(foo)" -- in
- 'string' according to 'vars' (a dictionary mapping variable names to
- values). Variables not present in 'vars' are silently expanded to the
- empty string. The variable values in 'vars' should not contain further
- variable expansions; if 'vars' is the output of 'parse_makefile()',
- you're fine. Returns a variable-expanded version of 's'.
- """
-
- # This algorithm does multiple expansion, so if vars['foo'] contains
- # "${bar}", it will expand ${foo} to ${bar}, and then expand
- # ${bar}... and so forth. This is fine as long as 'vars' comes from
- # 'parse_makefile()', which takes care of such expansions eagerly,
- # according to make's variable expansion semantics.
-
- while True:
- m = _findvar1_rx.search(s) or _findvar2_rx.search(s)
- if m:
- (beg, end) = m.span()
- s = s[0:beg] + vars.get(m.group(1)) + s[end:]
- else:
- break
- return s
-
-
-_config_vars = None
-
-
-_sysconfig_name_tmpl = '_sysconfigdata_{abi}_{platform}_{multiarch}'
-
-
-def _init_posix():
- """Initialize the module as appropriate for POSIX systems."""
- # _sysconfigdata is generated at build time, see the sysconfig module
- name = os.environ.get(
- '_PYTHON_SYSCONFIGDATA_NAME',
- _sysconfig_name_tmpl.format(
- abi=sys.abiflags,
- platform=sys.platform,
- multiarch=getattr(sys.implementation, '_multiarch', ''),
- ),
- )
- try:
- _temp = __import__(name, globals(), locals(), ['build_time_vars'], 0)
- except ImportError:
- # Python 3.5 and pypy 7.3.1
- _temp = __import__(
- '_sysconfigdata', globals(), locals(), ['build_time_vars'], 0)
- build_time_vars = _temp.build_time_vars
- global _config_vars
- _config_vars = {}
- _config_vars.update(build_time_vars)
-
-
-def _init_nt():
- """Initialize the module as appropriate for NT"""
- g = {}
- # set basic install directories
- g['LIBDEST'] = get_python_lib(plat_specific=0, standard_lib=1)
- g['BINLIBDEST'] = get_python_lib(plat_specific=1, standard_lib=1)
-
- # XXX hmmm.. a normal install puts include files here
- g['INCLUDEPY'] = get_python_inc(plat_specific=0)
-
- g['EXT_SUFFIX'] = _imp.extension_suffixes()[0]
- g['EXE'] = ".exe"
- g['VERSION'] = get_python_version().replace(".", "")
- g['BINDIR'] = os.path.dirname(os.path.abspath(sys.executable))
-
- global _config_vars
- _config_vars = g
-
-
-def get_config_vars(*args):
- """With no arguments, return a dictionary of all configuration
- variables relevant for the current platform. Generally this includes
- everything needed to build extensions and install both pure modules and
- extensions. On Unix, this means every variable defined in Python's
- installed Makefile; on Windows it's a much smaller set.
-
- With arguments, return a list of values that result from looking up
- each argument in the configuration variable dictionary.
- """
- global _config_vars
- if _config_vars is None:
- func = globals().get("_init_" + os.name)
- if func:
- func()
- else:
- _config_vars = {}
-
- # Normalized versions of prefix and exec_prefix are handy to have;
- # in fact, these are the standard versions used most places in the
- # Distutils.
- _config_vars['prefix'] = PREFIX
- _config_vars['exec_prefix'] = EXEC_PREFIX
-
- if not IS_PYPY:
- # For backward compatibility, see issue19555
- SO = _config_vars.get('EXT_SUFFIX')
- if SO is not None:
- _config_vars['SO'] = SO
-
- # Always convert srcdir to an absolute path
- srcdir = _config_vars.get('srcdir', project_base)
- if os.name == 'posix':
- if python_build:
- # If srcdir is a relative path (typically '.' or '..')
- # then it should be interpreted relative to the directory
- # containing Makefile.
- base = os.path.dirname(get_makefile_filename())
- srcdir = os.path.join(base, srcdir)
- else:
- # srcdir is not meaningful since the installation is
- # spread about the filesystem. We choose the
- # directory containing the Makefile since we know it
- # exists.
- srcdir = os.path.dirname(get_makefile_filename())
- _config_vars['srcdir'] = os.path.abspath(os.path.normpath(srcdir))
-
- # Convert srcdir into an absolute path if it appears necessary.
- # Normally it is relative to the build directory. However, during
- # testing, for example, we might be running a non-installed python
- # from a different directory.
- if python_build and os.name == "posix":
- base = project_base
- if (not os.path.isabs(_config_vars['srcdir']) and
- base != os.getcwd()):
- # srcdir is relative and we are not in the same directory
- # as the executable. Assume executable is in the build
- # directory and make srcdir absolute.
- srcdir = os.path.join(base, _config_vars['srcdir'])
- _config_vars['srcdir'] = os.path.normpath(srcdir)
-
- # OS X platforms require special customization to handle
- # multi-architecture, multi-os-version installers
- if sys.platform == 'darwin':
- import _osx_support
- _osx_support.customize_config_vars(_config_vars)
-
- if args:
- vals = []
- for name in args:
- vals.append(_config_vars.get(name))
- return vals
- else:
- return _config_vars
-
-def get_config_var(name):
- """Return the value of a single variable using the dictionary
- returned by 'get_config_vars()'. Equivalent to
- get_config_vars().get(name)
- """
- if name == 'SO':
- import warnings
- warnings.warn('SO is deprecated, use EXT_SUFFIX', DeprecationWarning, 2)
- return get_config_vars().get(name)
diff --git a/contrib/python/setuptools/py3/setuptools/_distutils/text_file.py b/contrib/python/setuptools/py3/setuptools/_distutils/text_file.py
deleted file mode 100644
index 93abad38f43..00000000000
--- a/contrib/python/setuptools/py3/setuptools/_distutils/text_file.py
+++ /dev/null
@@ -1,286 +0,0 @@
-"""text_file
-
-provides the TextFile class, which gives an interface to text files
-that (optionally) takes care of stripping comments, ignoring blank
-lines, and joining lines with backslashes."""
-
-import sys, io
-
-
-class TextFile:
- """Provides a file-like object that takes care of all the things you
- commonly want to do when processing a text file that has some
- line-by-line syntax: strip comments (as long as "#" is your
- comment character), skip blank lines, join adjacent lines by
- escaping the newline (ie. backslash at end of line), strip
- leading and/or trailing whitespace. All of these are optional
- and independently controllable.
-
- Provides a 'warn()' method so you can generate warning messages that
- report physical line number, even if the logical line in question
- spans multiple physical lines. Also provides 'unreadline()' for
- implementing line-at-a-time lookahead.
-
- Constructor is called as:
-
- TextFile (filename=None, file=None, **options)
-
- It bombs (RuntimeError) if both 'filename' and 'file' are None;
- 'filename' should be a string, and 'file' a file object (or
- something that provides 'readline()' and 'close()' methods). It is
- recommended that you supply at least 'filename', so that TextFile
- can include it in warning messages. If 'file' is not supplied,
- TextFile creates its own using 'io.open()'.
-
- The options are all boolean, and affect the value returned by
- 'readline()':
- strip_comments [default: true]
- strip from "#" to end-of-line, as well as any whitespace
- leading up to the "#" -- unless it is escaped by a backslash
- lstrip_ws [default: false]
- strip leading whitespace from each line before returning it
- rstrip_ws [default: true]
- strip trailing whitespace (including line terminator!) from
- each line before returning it
- skip_blanks [default: true}
- skip lines that are empty *after* stripping comments and
- whitespace. (If both lstrip_ws and rstrip_ws are false,
- then some lines may consist of solely whitespace: these will
- *not* be skipped, even if 'skip_blanks' is true.)
- join_lines [default: false]
- if a backslash is the last non-newline character on a line
- after stripping comments and whitespace, join the following line
- to it to form one "logical line"; if N consecutive lines end
- with a backslash, then N+1 physical lines will be joined to
- form one logical line.
- collapse_join [default: false]
- strip leading whitespace from lines that are joined to their
- predecessor; only matters if (join_lines and not lstrip_ws)
- errors [default: 'strict']
- error handler used to decode the file content
-
- Note that since 'rstrip_ws' can strip the trailing newline, the
- semantics of 'readline()' must differ from those of the builtin file
- object's 'readline()' method! In particular, 'readline()' returns
- None for end-of-file: an empty string might just be a blank line (or
- an all-whitespace line), if 'rstrip_ws' is true but 'skip_blanks' is
- not."""
-
- default_options = { 'strip_comments': 1,
- 'skip_blanks': 1,
- 'lstrip_ws': 0,
- 'rstrip_ws': 1,
- 'join_lines': 0,
- 'collapse_join': 0,
- 'errors': 'strict',
- }
-
- def __init__(self, filename=None, file=None, **options):
- """Construct a new TextFile object. At least one of 'filename'
- (a string) and 'file' (a file-like object) must be supplied.
- They keyword argument options are described above and affect
- the values returned by 'readline()'."""
- if filename is None and file is None:
- raise RuntimeError("you must supply either or both of 'filename' and 'file'")
-
- # set values for all options -- either from client option hash
- # or fallback to default_options
- for opt in self.default_options.keys():
- if opt in options:
- setattr(self, opt, options[opt])
- else:
- setattr(self, opt, self.default_options[opt])
-
- # sanity check client option hash
- for opt in options.keys():
- if opt not in self.default_options:
- raise KeyError("invalid TextFile option '%s'" % opt)
-
- if file is None:
- self.open(filename)
- else:
- self.filename = filename
- self.file = file
- self.current_line = 0 # assuming that file is at BOF!
-
- # 'linebuf' is a stack of lines that will be emptied before we
- # actually read from the file; it's only populated by an
- # 'unreadline()' operation
- self.linebuf = []
-
- def open(self, filename):
- """Open a new file named 'filename'. This overrides both the
- 'filename' and 'file' arguments to the constructor."""
- self.filename = filename
- self.file = io.open(self.filename, 'r', errors=self.errors)
- self.current_line = 0
-
- def close(self):
- """Close the current file and forget everything we know about it
- (filename, current line number)."""
- file = self.file
- self.file = None
- self.filename = None
- self.current_line = None
- file.close()
-
- def gen_error(self, msg, line=None):
- outmsg = []
- if line is None:
- line = self.current_line
- outmsg.append(self.filename + ", ")
- if isinstance(line, (list, tuple)):
- outmsg.append("lines %d-%d: " % tuple(line))
- else:
- outmsg.append("line %d: " % line)
- outmsg.append(str(msg))
- return "".join(outmsg)
-
- def error(self, msg, line=None):
- raise ValueError("error: " + self.gen_error(msg, line))
-
- def warn(self, msg, line=None):
- """Print (to stderr) a warning message tied to the current logical
- line in the current file. If the current logical line in the
- file spans multiple physical lines, the warning refers to the
- whole range, eg. "lines 3-5". If 'line' supplied, it overrides
- the current line number; it may be a list or tuple to indicate a
- range of physical lines, or an integer for a single physical
- line."""
- sys.stderr.write("warning: " + self.gen_error(msg, line) + "\n")
-
- def readline(self):
- """Read and return a single logical line from the current file (or
- from an internal buffer if lines have previously been "unread"
- with 'unreadline()'). If the 'join_lines' option is true, this
- may involve reading multiple physical lines concatenated into a
- single string. Updates the current line number, so calling
- 'warn()' after 'readline()' emits a warning about the physical
- line(s) just read. Returns None on end-of-file, since the empty
- string can occur if 'rstrip_ws' is true but 'strip_blanks' is
- not."""
- # If any "unread" lines waiting in 'linebuf', return the top
- # one. (We don't actually buffer read-ahead data -- lines only
- # get put in 'linebuf' if the client explicitly does an
- # 'unreadline()'.
- if self.linebuf:
- line = self.linebuf[-1]
- del self.linebuf[-1]
- return line
-
- buildup_line = ''
-
- while True:
- # read the line, make it None if EOF
- line = self.file.readline()
- if line == '':
- line = None
-
- if self.strip_comments and line:
-
- # Look for the first "#" in the line. If none, never
- # mind. If we find one and it's the first character, or
- # is not preceded by "\", then it starts a comment --
- # strip the comment, strip whitespace before it, and
- # carry on. Otherwise, it's just an escaped "#", so
- # unescape it (and any other escaped "#"'s that might be
- # lurking in there) and otherwise leave the line alone.
-
- pos = line.find("#")
- if pos == -1: # no "#" -- no comments
- pass
-
- # It's definitely a comment -- either "#" is the first
- # character, or it's elsewhere and unescaped.
- elif pos == 0 or line[pos-1] != "\\":
- # Have to preserve the trailing newline, because it's
- # the job of a later step (rstrip_ws) to remove it --
- # and if rstrip_ws is false, we'd better preserve it!
- # (NB. this means that if the final line is all comment
- # and has no trailing newline, we will think that it's
- # EOF; I think that's OK.)
- eol = (line[-1] == '\n') and '\n' or ''
- line = line[0:pos] + eol
-
- # If all that's left is whitespace, then skip line
- # *now*, before we try to join it to 'buildup_line' --
- # that way constructs like
- # hello \\
- # # comment that should be ignored
- # there
- # result in "hello there".
- if line.strip() == "":
- continue
- else: # it's an escaped "#"
- line = line.replace("\\#", "#")
-
- # did previous line end with a backslash? then accumulate
- if self.join_lines and buildup_line:
- # oops: end of file
- if line is None:
- self.warn("continuation line immediately precedes "
- "end-of-file")
- return buildup_line
-
- if self.collapse_join:
- line = line.lstrip()
- line = buildup_line + line
-
- # careful: pay attention to line number when incrementing it
- if isinstance(self.current_line, list):
- self.current_line[1] = self.current_line[1] + 1
- else:
- self.current_line = [self.current_line,
- self.current_line + 1]
- # just an ordinary line, read it as usual
- else:
- if line is None: # eof
- return None
-
- # still have to be careful about incrementing the line number!
- if isinstance(self.current_line, list):
- self.current_line = self.current_line[1] + 1
- else:
- self.current_line = self.current_line + 1
-
- # strip whitespace however the client wants (leading and
- # trailing, or one or the other, or neither)
- if self.lstrip_ws and self.rstrip_ws:
- line = line.strip()
- elif self.lstrip_ws:
- line = line.lstrip()
- elif self.rstrip_ws:
- line = line.rstrip()
-
- # blank line (whether we rstrip'ed or not)? skip to next line
- # if appropriate
- if (line == '' or line == '\n') and self.skip_blanks:
- continue
-
- if self.join_lines:
- if line[-1] == '\\':
- buildup_line = line[:-1]
- continue
-
- if line[-2:] == '\\\n':
- buildup_line = line[0:-2] + '\n'
- continue
-
- # well, I guess there's some actual content there: return it
- return line
-
- def readlines(self):
- """Read and return the list of all logical lines remaining in the
- current file."""
- lines = []
- while True:
- line = self.readline()
- if line is None:
- return lines
- lines.append(line)
-
- def unreadline(self, line):
- """Push 'line' (a string) onto an internal buffer that will be
- checked by future 'readline()' calls. Handy for implementing
- a parser with line-at-a-time lookahead."""
- self.linebuf.append(line)
diff --git a/contrib/python/setuptools/py3/setuptools/_distutils/unixccompiler.py b/contrib/python/setuptools/py3/setuptools/_distutils/unixccompiler.py
deleted file mode 100644
index a07e5988904..00000000000
--- a/contrib/python/setuptools/py3/setuptools/_distutils/unixccompiler.py
+++ /dev/null
@@ -1,325 +0,0 @@
-"""distutils.unixccompiler
-
-Contains the UnixCCompiler class, a subclass of CCompiler that handles
-the "typical" Unix-style command-line C compiler:
- * macros defined with -Dname[=value]
- * macros undefined with -Uname
- * include search directories specified with -Idir
- * libraries specified with -lllib
- * library search directories specified with -Ldir
- * compile handled by 'cc' (or similar) executable with -c option:
- compiles .c to .o
- * link static library handled by 'ar' command (possibly with 'ranlib')
- * link shared library handled by 'cc -shared'
-"""
-
-import os, sys, re, shlex
-
-from distutils import sysconfig
-from distutils.dep_util import newer
-from distutils.ccompiler import \
- CCompiler, gen_preprocess_options, gen_lib_options
-from distutils.errors import \
- DistutilsExecError, CompileError, LibError, LinkError
-from distutils import log
-
-if sys.platform == 'darwin':
- import _osx_support
-
-# XXX Things not currently handled:
-# * optimization/debug/warning flags; we just use whatever's in Python's
-# Makefile and live with it. Is this adequate? If not, we might
-# have to have a bunch of subclasses GNUCCompiler, SGICCompiler,
-# SunCCompiler, and I suspect down that road lies madness.
-# * even if we don't know a warning flag from an optimization flag,
-# we need some way for outsiders to feed preprocessor/compiler/linker
-# flags in to us -- eg. a sysadmin might want to mandate certain flags
-# via a site config file, or a user might want to set something for
-# compiling this module distribution only via the setup.py command
-# line, whatever. As long as these options come from something on the
-# current system, they can be as system-dependent as they like, and we
-# should just happily stuff them into the preprocessor/compiler/linker
-# options and carry on.
-
-
-class UnixCCompiler(CCompiler):
-
- compiler_type = 'unix'
-
- # These are used by CCompiler in two places: the constructor sets
- # instance attributes 'preprocessor', 'compiler', etc. from them, and
- # 'set_executable()' allows any of these to be set. The defaults here
- # are pretty generic; they will probably have to be set by an outsider
- # (eg. using information discovered by the sysconfig about building
- # Python extensions).
- executables = {'preprocessor' : None,
- 'compiler' : ["cc"],
- 'compiler_so' : ["cc"],
- 'compiler_cxx' : ["cc"],
- 'linker_so' : ["cc", "-shared"],
- 'linker_exe' : ["cc"],
- 'archiver' : ["ar", "-cr"],
- 'ranlib' : None,
- }
-
- if sys.platform[:6] == "darwin":
- executables['ranlib'] = ["ranlib"]
-
- # Needed for the filename generation methods provided by the base
- # class, CCompiler. NB. whoever instantiates/uses a particular
- # UnixCCompiler instance should set 'shared_lib_ext' -- we set a
- # reasonable common default here, but it's not necessarily used on all
- # Unices!
-
- src_extensions = [".c",".C",".cc",".cxx",".cpp",".m"]
- obj_extension = ".o"
- static_lib_extension = ".a"
- shared_lib_extension = ".so"
- dylib_lib_extension = ".dylib"
- xcode_stub_lib_extension = ".tbd"
- static_lib_format = shared_lib_format = dylib_lib_format = "lib%s%s"
- xcode_stub_lib_format = dylib_lib_format
- if sys.platform == "cygwin":
- exe_extension = ".exe"
-
- def preprocess(self, source, output_file=None, macros=None,
- include_dirs=None, extra_preargs=None, extra_postargs=None):
- fixed_args = self._fix_compile_args(None, macros, include_dirs)
- ignore, macros, include_dirs = fixed_args
- pp_opts = gen_preprocess_options(macros, include_dirs)
- pp_args = self.preprocessor + pp_opts
- if output_file:
- pp_args.extend(['-o', output_file])
- if extra_preargs:
- pp_args[:0] = extra_preargs
- if extra_postargs:
- pp_args.extend(extra_postargs)
- pp_args.append(source)
-
- # We need to preprocess: either we're being forced to, or we're
- # generating output to stdout, or there's a target output file and
- # the source file is newer than the target (or the target doesn't
- # exist).
- if self.force or output_file is None or newer(source, output_file):
- if output_file:
- self.mkpath(os.path.dirname(output_file))
- try:
- self.spawn(pp_args)
- except DistutilsExecError as msg:
- raise CompileError(msg)
-
- def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts):
- compiler_so = self.compiler_so
- if sys.platform == 'darwin':
- compiler_so = _osx_support.compiler_fixup(compiler_so,
- cc_args + extra_postargs)
- try:
- self.spawn(compiler_so + cc_args + [src, '-o', obj] +
- extra_postargs)
- except DistutilsExecError as msg:
- raise CompileError(msg)
-
- def create_static_lib(self, objects, output_libname,
- output_dir=None, debug=0, target_lang=None):
- objects, output_dir = self._fix_object_args(objects, output_dir)
-
- output_filename = \
- self.library_filename(output_libname, output_dir=output_dir)
-
- if self._need_link(objects, output_filename):
- self.mkpath(os.path.dirname(output_filename))
- self.spawn(self.archiver +
- [output_filename] +
- objects + self.objects)
-
- # Not many Unices required ranlib anymore -- SunOS 4.x is, I
- # think the only major Unix that does. Maybe we need some
- # platform intelligence here to skip ranlib if it's not
- # needed -- or maybe Python's configure script took care of
- # it for us, hence the check for leading colon.
- if self.ranlib:
- try:
- self.spawn(self.ranlib + [output_filename])
- except DistutilsExecError as msg:
- raise LibError(msg)
- else:
- log.debug("skipping %s (up-to-date)", output_filename)
-
- def link(self, target_desc, objects,
- output_filename, output_dir=None, libraries=None,
- library_dirs=None, runtime_library_dirs=None,
- export_symbols=None, debug=0, extra_preargs=None,
- extra_postargs=None, build_temp=None, target_lang=None):
- objects, output_dir = self._fix_object_args(objects, output_dir)
- fixed_args = self._fix_lib_args(libraries, library_dirs,
- runtime_library_dirs)
- libraries, library_dirs, runtime_library_dirs = fixed_args
-
- lib_opts = gen_lib_options(self, library_dirs, runtime_library_dirs,
- libraries)
- if not isinstance(output_dir, (str, type(None))):
- raise TypeError("'output_dir' must be a string or None")
- if output_dir is not None:
- output_filename = os.path.join(output_dir, output_filename)
-
- if self._need_link(objects, output_filename):
- ld_args = (objects + self.objects +
- lib_opts + ['-o', output_filename])
- if debug:
- ld_args[:0] = ['-g']
- if extra_preargs:
- ld_args[:0] = extra_preargs
- if extra_postargs:
- ld_args.extend(extra_postargs)
- self.mkpath(os.path.dirname(output_filename))
- try:
- if target_desc == CCompiler.EXECUTABLE:
- linker = self.linker_exe[:]
- else:
- linker = self.linker_so[:]
- if target_lang == "c++" and self.compiler_cxx:
- # skip over environment variable settings if /usr/bin/env
- # is used to set up the linker's environment.
- # This is needed on OSX. Note: this assumes that the
- # normal and C++ compiler have the same environment
- # settings.
- i = 0
- if os.path.basename(linker[0]) == "env":
- i = 1
- while '=' in linker[i]:
- i += 1
-
- if os.path.basename(linker[i]) == 'ld_so_aix':
- # AIX platforms prefix the compiler with the ld_so_aix
- # script, so we need to adjust our linker index
- offset = 1
- else:
- offset = 0
-
- linker[i+offset] = self.compiler_cxx[i]
-
- if sys.platform == 'darwin':
- linker = _osx_support.compiler_fixup(linker, ld_args)
-
- self.spawn(linker + ld_args)
- except DistutilsExecError as msg:
- raise LinkError(msg)
- else:
- log.debug("skipping %s (up-to-date)", output_filename)
-
- # -- Miscellaneous methods -----------------------------------------
- # These are all used by the 'gen_lib_options() function, in
- # ccompiler.py.
-
- def library_dir_option(self, dir):
- return "-L" + dir
-
- def _is_gcc(self, compiler_name):
- return "gcc" in compiler_name or "g++" in compiler_name
-
- def runtime_library_dir_option(self, dir):
- # XXX Hackish, at the very least. See Python bug #445902:
- # http://sourceforge.net/tracker/index.php
- # ?func=detail&aid=445902&group_id=5470&atid=105470
- # Linkers on different platforms need different options to
- # specify that directories need to be added to the list of
- # directories searched for dependencies when a dynamic library
- # is sought. GCC on GNU systems (Linux, FreeBSD, ...) has to
- # be told to pass the -R option through to the linker, whereas
- # other compilers and gcc on other systems just know this.
- # Other compilers may need something slightly different. At
- # this time, there's no way to determine this information from
- # the configuration data stored in the Python installation, so
- # we use this hack.
- compiler = os.path.basename(shlex.split(sysconfig.get_config_var("CC"))[0])
- if sys.platform[:6] == "darwin":
- from distutils.util import get_macosx_target_ver, split_version
- macosx_target_ver = get_macosx_target_ver()
- if macosx_target_ver and split_version(macosx_target_ver) >= [10, 5]:
- return "-Wl,-rpath," + dir
- else: # no support for -rpath on earlier macOS versions
- return "-L" + dir
- elif sys.platform[:7] == "freebsd":
- return "-Wl,-rpath=" + dir
- elif sys.platform[:5] == "hp-ux":
- if self._is_gcc(compiler):
- return ["-Wl,+s", "-L" + dir]
- return ["+s", "-L" + dir]
-
- # For all compilers, `-Wl` is the presumed way to
- # pass a compiler option to the linker and `-R` is
- # the way to pass an RPATH.
- if sysconfig.get_config_var("GNULD") == "yes":
- # GNU ld needs an extra option to get a RUNPATH
- # instead of just an RPATH.
- return "-Wl,--enable-new-dtags,-R" + dir
- else:
- return "-Wl,-R" + dir
-
- def library_option(self, lib):
- return "-l" + lib
-
- def find_library_file(self, dirs, lib, debug=0):
- shared_f = self.library_filename(lib, lib_type='shared')
- dylib_f = self.library_filename(lib, lib_type='dylib')
- xcode_stub_f = self.library_filename(lib, lib_type='xcode_stub')
- static_f = self.library_filename(lib, lib_type='static')
-
- if sys.platform == 'darwin':
- # On OSX users can specify an alternate SDK using
- # '-isysroot', calculate the SDK root if it is specified
- # (and use it further on)
- #
- # Note that, as of Xcode 7, Apple SDKs may contain textual stub
- # libraries with .tbd extensions rather than the normal .dylib
- # shared libraries installed in /. The Apple compiler tool
- # chain handles this transparently but it can cause problems
- # for programs that are being built with an SDK and searching
- # for specific libraries. Callers of find_library_file need to
- # keep in mind that the base filename of the returned SDK library
- # file might have a different extension from that of the library
- # file installed on the running system, for example:
- # /Applications/Xcode.app/Contents/Developer/Platforms/
- # MacOSX.platform/Developer/SDKs/MacOSX10.11.sdk/
- # usr/lib/libedit.tbd
- # vs
- # /usr/lib/libedit.dylib
- cflags = sysconfig.get_config_var('CFLAGS')
- m = re.search(r'-isysroot\s*(\S+)', cflags)
- if m is None:
- sysroot = '/'
- else:
- sysroot = m.group(1)
-
-
-
- for dir in dirs:
- shared = os.path.join(dir, shared_f)
- dylib = os.path.join(dir, dylib_f)
- static = os.path.join(dir, static_f)
- xcode_stub = os.path.join(dir, xcode_stub_f)
-
- if sys.platform == 'darwin' and (
- dir.startswith('/System/') or (
- dir.startswith('/usr/') and not dir.startswith('/usr/local/'))):
-
- shared = os.path.join(sysroot, dir[1:], shared_f)
- dylib = os.path.join(sysroot, dir[1:], dylib_f)
- static = os.path.join(sysroot, dir[1:], static_f)
- xcode_stub = os.path.join(sysroot, dir[1:], xcode_stub_f)
-
- # We're second-guessing the linker here, with not much hard
- # data to go on: GCC seems to prefer the shared library, so I'm
- # assuming that *all* Unix C compilers do. And of course I'm
- # ignoring even GCC's "-static" option. So sue me.
- if os.path.exists(dylib):
- return dylib
- elif os.path.exists(xcode_stub):
- return xcode_stub
- elif os.path.exists(shared):
- return shared
- elif os.path.exists(static):
- return static
-
- # Oops, didn't find it in *any* of 'dirs'
- return None
diff --git a/contrib/python/setuptools/py3/setuptools/_distutils/util.py b/contrib/python/setuptools/py3/setuptools/_distutils/util.py
deleted file mode 100644
index ac6d446d681..00000000000
--- a/contrib/python/setuptools/py3/setuptools/_distutils/util.py
+++ /dev/null
@@ -1,548 +0,0 @@
-"""distutils.util
-
-Miscellaneous utility functions -- anything that doesn't fit into
-one of the other *util.py modules.
-"""
-
-import os
-import re
-import importlib.util
-import string
-import sys
-from distutils.errors import DistutilsPlatformError
-from distutils.dep_util import newer
-from distutils.spawn import spawn
-from distutils import log
-from distutils.errors import DistutilsByteCompileError
-from .py35compat import _optim_args_from_interpreter_flags
-
-
-def get_host_platform():
- """Return a string that identifies the current platform. This is used mainly to
- distinguish platform-specific build directories and platform-specific built
- distributions. Typically includes the OS name and version and the
- architecture (as supplied by 'os.uname()'), although the exact information
- included depends on the OS; eg. on Linux, the kernel version isn't
- particularly important.
-
- Examples of returned values:
- linux-i586
- linux-alpha (?)
- solaris-2.6-sun4u
-
- Windows will return one of:
- win-amd64 (64bit Windows on AMD64 (aka x86_64, Intel64, EM64T, etc)
- win32 (all others - specifically, sys.platform is returned)
-
- For other non-POSIX platforms, currently just returns 'sys.platform'.
-
- """
- if os.name == 'nt':
- if 'amd64' in sys.version.lower():
- return 'win-amd64'
- if '(arm)' in sys.version.lower():
- return 'win-arm32'
- if '(arm64)' in sys.version.lower():
- return 'win-arm64'
- return sys.platform
-
- # Set for cross builds explicitly
- if "_PYTHON_HOST_PLATFORM" in os.environ:
- return os.environ["_PYTHON_HOST_PLATFORM"]
-
- if os.name != "posix" or not hasattr(os, 'uname'):
- # XXX what about the architecture? NT is Intel or Alpha,
- # Mac OS is M68k or PPC, etc.
- return sys.platform
-
- # Try to distinguish various flavours of Unix
-
- (osname, host, release, version, machine) = os.uname()
-
- # Convert the OS name to lowercase, remove '/' characters, and translate
- # spaces (for "Power Macintosh")
- osname = osname.lower().replace('/', '')
- machine = machine.replace(' ', '_')
- machine = machine.replace('/', '-')
-
- if osname[:5] == "linux":
- # At least on Linux/Intel, 'machine' is the processor --
- # i386, etc.
- # XXX what about Alpha, SPARC, etc?
- return "%s-%s" % (osname, machine)
- elif osname[:5] == "sunos":
- if release[0] >= "5": # SunOS 5 == Solaris 2
- osname = "solaris"
- release = "%d.%s" % (int(release[0]) - 3, release[2:])
- # We can't use "platform.architecture()[0]" because a
- # bootstrap problem. We use a dict to get an error
- # if some suspicious happens.
- bitness = {2147483647:"32bit", 9223372036854775807:"64bit"}
- machine += ".%s" % bitness[sys.maxsize]
- # fall through to standard osname-release-machine representation
- elif osname[:3] == "aix":
- from .py38compat import aix_platform
- return aix_platform(osname, version, release)
- elif osname[:6] == "cygwin":
- osname = "cygwin"
- rel_re = re.compile (r'[\d.]+', re.ASCII)
- m = rel_re.match(release)
- if m:
- release = m.group()
- elif osname[:6] == "darwin":
- import _osx_support, distutils.sysconfig
- osname, release, machine = _osx_support.get_platform_osx(
- distutils.sysconfig.get_config_vars(),
- osname, release, machine)
-
- return "%s-%s-%s" % (osname, release, machine)
-
-def get_platform():
- if os.name == 'nt':
- TARGET_TO_PLAT = {
- 'x86' : 'win32',
- 'x64' : 'win-amd64',
- 'arm' : 'win-arm32',
- 'arm64': 'win-arm64',
- }
- return TARGET_TO_PLAT.get(os.environ.get('VSCMD_ARG_TGT_ARCH')) or get_host_platform()
- else:
- return get_host_platform()
-
-
-if sys.platform == 'darwin':
- _syscfg_macosx_ver = None # cache the version pulled from sysconfig
-MACOSX_VERSION_VAR = 'MACOSX_DEPLOYMENT_TARGET'
-
-def _clear_cached_macosx_ver():
- """For testing only. Do not call."""
- global _syscfg_macosx_ver
- _syscfg_macosx_ver = None
-
-def get_macosx_target_ver_from_syscfg():
- """Get the version of macOS latched in the Python interpreter configuration.
- Returns the version as a string or None if can't obtain one. Cached."""
- global _syscfg_macosx_ver
- if _syscfg_macosx_ver is None:
- from distutils import sysconfig
- ver = sysconfig.get_config_var(MACOSX_VERSION_VAR) or ''
- if ver:
- _syscfg_macosx_ver = ver
- return _syscfg_macosx_ver
-
-def get_macosx_target_ver():
- """Return the version of macOS for which we are building.
-
- The target version defaults to the version in sysconfig latched at time
- the Python interpreter was built, unless overridden by an environment
- variable. If neither source has a value, then None is returned"""
-
- syscfg_ver = get_macosx_target_ver_from_syscfg()
- env_ver = os.environ.get(MACOSX_VERSION_VAR)
-
- if env_ver:
- # Validate overridden version against sysconfig version, if have both.
- # Ensure that the deployment target of the build process is not less
- # than 10.3 if the interpreter was built for 10.3 or later. This
- # ensures extension modules are built with correct compatibility
- # values, specifically LDSHARED which can use
- # '-undefined dynamic_lookup' which only works on >= 10.3.
- if syscfg_ver and split_version(syscfg_ver) >= [10, 3] and \
- split_version(env_ver) < [10, 3]:
- my_msg = ('$' + MACOSX_VERSION_VAR + ' mismatch: '
- 'now "%s" but "%s" during configure; '
- 'must use 10.3 or later'
- % (env_ver, syscfg_ver))
- raise DistutilsPlatformError(my_msg)
- return env_ver
- return syscfg_ver
-
-
-def split_version(s):
- """Convert a dot-separated string into a list of numbers for comparisons"""
- return [int(n) for n in s.split('.')]
-
-
-def convert_path (pathname):
- """Return 'pathname' as a name that will work on the native filesystem,
- i.e. split it on '/' and put it back together again using the current
- directory separator. Needed because filenames in the setup script are
- always supplied in Unix style, and have to be converted to the local
- convention before we can actually use them in the filesystem. Raises
- ValueError on non-Unix-ish systems if 'pathname' either starts or
- ends with a slash.
- """
- if os.sep == '/':
- return pathname
- if not pathname:
- return pathname
- if pathname[0] == '/':
- raise ValueError("path '%s' cannot be absolute" % pathname)
- if pathname[-1] == '/':
- raise ValueError("path '%s' cannot end with '/'" % pathname)
-
- paths = pathname.split('/')
- while '.' in paths:
- paths.remove('.')
- if not paths:
- return os.curdir
- return os.path.join(*paths)
-
-# convert_path ()
-
-
-def change_root (new_root, pathname):
- """Return 'pathname' with 'new_root' prepended. If 'pathname' is
- relative, this is equivalent to "os.path.join(new_root,pathname)".
- Otherwise, it requires making 'pathname' relative and then joining the
- two, which is tricky on DOS/Windows and Mac OS.
- """
- if os.name == 'posix':
- if not os.path.isabs(pathname):
- return os.path.join(new_root, pathname)
- else:
- return os.path.join(new_root, pathname[1:])
-
- elif os.name == 'nt':
- (drive, path) = os.path.splitdrive(pathname)
- if path[0] == '\\':
- path = path[1:]
- return os.path.join(new_root, path)
-
- else:
- raise DistutilsPlatformError("nothing known about platform '%s'" % os.name)
-
-
-_environ_checked = 0
-def check_environ ():
- """Ensure that 'os.environ' has all the environment variables we
- guarantee that users can use in config files, command-line options,
- etc. Currently this includes:
- HOME - user's home directory (Unix only)
- PLAT - description of the current platform, including hardware
- and OS (see 'get_platform()')
- """
- global _environ_checked
- if _environ_checked:
- return
-
- if os.name == 'posix' and 'HOME' not in os.environ:
- try:
- import pwd
- os.environ['HOME'] = pwd.getpwuid(os.getuid())[5]
- except (ImportError, KeyError):
- # bpo-10496: if the current user identifier doesn't exist in the
- # password database, do nothing
- pass
-
- if 'PLAT' not in os.environ:
- os.environ['PLAT'] = get_platform()
-
- _environ_checked = 1
-
-
-def subst_vars (s, local_vars):
- """
- Perform variable substitution on 'string'.
- Variables are indicated by format-style braces ("{var}").
- Variable is substituted by the value found in the 'local_vars'
- dictionary or in 'os.environ' if it's not in 'local_vars'.
- 'os.environ' is first checked/augmented to guarantee that it contains
- certain values: see 'check_environ()'. Raise ValueError for any
- variables not found in either 'local_vars' or 'os.environ'.
- """
- check_environ()
- lookup = dict(os.environ)
- lookup.update((name, str(value)) for name, value in local_vars.items())
- try:
- return _subst_compat(s).format_map(lookup)
- except KeyError as var:
- raise ValueError(f"invalid variable {var}")
-
-# subst_vars ()
-
-
-def _subst_compat(s):
- """
- Replace shell/Perl-style variable substitution with
- format-style. For compatibility.
- """
- def _subst(match):
- return f'{{{match.group(1)}}}'
- repl = re.sub(r'\$([a-zA-Z_][a-zA-Z_0-9]*)', _subst, s)
- if repl != s:
- import warnings
- warnings.warn(
- "shell/Perl-style substitions are deprecated",
- DeprecationWarning,
- )
- return repl
-
-
-def grok_environment_error (exc, prefix="error: "):
- # Function kept for backward compatibility.
- # Used to try clever things with EnvironmentErrors,
- # but nowadays str(exception) produces good messages.
- return prefix + str(exc)
-
-
-# Needed by 'split_quoted()'
-_wordchars_re = _squote_re = _dquote_re = None
-def _init_regex():
- global _wordchars_re, _squote_re, _dquote_re
- _wordchars_re = re.compile(r'[^\\\'\"%s ]*' % string.whitespace)
- _squote_re = re.compile(r"'(?:[^'\\]|\\.)*'")
- _dquote_re = re.compile(r'"(?:[^"\\]|\\.)*"')
-
-def split_quoted (s):
- """Split a string up according to Unix shell-like rules for quotes and
- backslashes. In short: words are delimited by spaces, as long as those
- spaces are not escaped by a backslash, or inside a quoted string.
- Single and double quotes are equivalent, and the quote characters can
- be backslash-escaped. The backslash is stripped from any two-character
- escape sequence, leaving only the escaped character. The quote
- characters are stripped from any quoted string. Returns a list of
- words.
- """
-
- # This is a nice algorithm for splitting up a single string, since it
- # doesn't require character-by-character examination. It was a little
- # bit of a brain-bender to get it working right, though...
- if _wordchars_re is None: _init_regex()
-
- s = s.strip()
- words = []
- pos = 0
-
- while s:
- m = _wordchars_re.match(s, pos)
- end = m.end()
- if end == len(s):
- words.append(s[:end])
- break
-
- if s[end] in string.whitespace: # unescaped, unquoted whitespace: now
- words.append(s[:end]) # we definitely have a word delimiter
- s = s[end:].lstrip()
- pos = 0
-
- elif s[end] == '\\': # preserve whatever is being escaped;
- # will become part of the current word
- s = s[:end] + s[end+1:]
- pos = end+1
-
- else:
- if s[end] == "'": # slurp singly-quoted string
- m = _squote_re.match(s, end)
- elif s[end] == '"': # slurp doubly-quoted string
- m = _dquote_re.match(s, end)
- else:
- raise RuntimeError("this can't happen (bad char '%c')" % s[end])
-
- if m is None:
- raise ValueError("bad string (mismatched %s quotes?)" % s[end])
-
- (beg, end) = m.span()
- s = s[:beg] + s[beg+1:end-1] + s[end:]
- pos = m.end() - 2
-
- if pos >= len(s):
- words.append(s)
- break
-
- return words
-
-# split_quoted ()
-
-
-def execute (func, args, msg=None, verbose=0, dry_run=0):
- """Perform some action that affects the outside world (eg. by
- writing to the filesystem). Such actions are special because they
- are disabled by the 'dry_run' flag. This method takes care of all
- that bureaucracy for you; all you have to do is supply the
- function to call and an argument tuple for it (to embody the
- "external action" being performed), and an optional message to
- print.
- """
- if msg is None:
- msg = "%s%r" % (func.__name__, args)
- if msg[-2:] == ',)': # correct for singleton tuple
- msg = msg[0:-2] + ')'
-
- log.info(msg)
- if not dry_run:
- func(*args)
-
-
-def strtobool (val):
- """Convert a string representation of truth to true (1) or false (0).
-
- True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values
- are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if
- 'val' is anything else.
- """
- val = val.lower()
- if val in ('y', 'yes', 't', 'true', 'on', '1'):
- return 1
- elif val in ('n', 'no', 'f', 'false', 'off', '0'):
- return 0
- else:
- raise ValueError("invalid truth value %r" % (val,))
-
-
-def byte_compile (py_files,
- optimize=0, force=0,
- prefix=None, base_dir=None,
- verbose=1, dry_run=0,
- direct=None):
- """Byte-compile a collection of Python source files to .pyc
- files in a __pycache__ subdirectory. 'py_files' is a list
- of files to compile; any files that don't end in ".py" are silently
- skipped. 'optimize' must be one of the following:
- 0 - don't optimize
- 1 - normal optimization (like "python -O")
- 2 - extra optimization (like "python -OO")
- If 'force' is true, all files are recompiled regardless of
- timestamps.
-
- The source filename encoded in each bytecode file defaults to the
- filenames listed in 'py_files'; you can modify these with 'prefix' and
- 'basedir'. 'prefix' is a string that will be stripped off of each
- source filename, and 'base_dir' is a directory name that will be
- prepended (after 'prefix' is stripped). You can supply either or both
- (or neither) of 'prefix' and 'base_dir', as you wish.
-
- If 'dry_run' is true, doesn't actually do anything that would
- affect the filesystem.
-
- Byte-compilation is either done directly in this interpreter process
- with the standard py_compile module, or indirectly by writing a
- temporary script and executing it. Normally, you should let
- 'byte_compile()' figure out to use direct compilation or not (see
- the source for details). The 'direct' flag is used by the script
- generated in indirect mode; unless you know what you're doing, leave
- it set to None.
- """
-
- # Late import to fix a bootstrap issue: _posixsubprocess is built by
- # setup.py, but setup.py uses distutils.
- import subprocess
-
- # nothing is done if sys.dont_write_bytecode is True
- if sys.dont_write_bytecode:
- raise DistutilsByteCompileError('byte-compiling is disabled.')
-
- # First, if the caller didn't force us into direct or indirect mode,
- # figure out which mode we should be in. We take a conservative
- # approach: choose direct mode *only* if the current interpreter is
- # in debug mode and optimize is 0. If we're not in debug mode (-O
- # or -OO), we don't know which level of optimization this
- # interpreter is running with, so we can't do direct
- # byte-compilation and be certain that it's the right thing. Thus,
- # always compile indirectly if the current interpreter is in either
- # optimize mode, or if either optimization level was requested by
- # the caller.
- if direct is None:
- direct = (__debug__ and optimize == 0)
-
- # "Indirect" byte-compilation: write a temporary script and then
- # run it with the appropriate flags.
- if not direct:
- try:
- from tempfile import mkstemp
- (script_fd, script_name) = mkstemp(".py")
- except ImportError:
- from tempfile import mktemp
- (script_fd, script_name) = None, mktemp(".py")
- log.info("writing byte-compilation script '%s'", script_name)
- if not dry_run:
- if script_fd is not None:
- script = os.fdopen(script_fd, "w")
- else:
- script = open(script_name, "w")
-
- with script:
- script.write("""\
-from distutils.util import byte_compile
-files = [
-""")
-
- # XXX would be nice to write absolute filenames, just for
- # safety's sake (script should be more robust in the face of
- # chdir'ing before running it). But this requires abspath'ing
- # 'prefix' as well, and that breaks the hack in build_lib's
- # 'byte_compile()' method that carefully tacks on a trailing
- # slash (os.sep really) to make sure the prefix here is "just
- # right". This whole prefix business is rather delicate -- the
- # problem is that it's really a directory, but I'm treating it
- # as a dumb string, so trailing slashes and so forth matter.
-
- #py_files = map(os.path.abspath, py_files)
- #if prefix:
- # prefix = os.path.abspath(prefix)
-
- script.write(",\n".join(map(repr, py_files)) + "]\n")
- script.write("""
-byte_compile(files, optimize=%r, force=%r,
- prefix=%r, base_dir=%r,
- verbose=%r, dry_run=0,
- direct=1)
-""" % (optimize, force, prefix, base_dir, verbose))
-
- cmd = [sys.executable]
- cmd.extend(_optim_args_from_interpreter_flags())
- cmd.append(script_name)
- spawn(cmd, dry_run=dry_run)
- execute(os.remove, (script_name,), "removing %s" % script_name,
- dry_run=dry_run)
-
- # "Direct" byte-compilation: use the py_compile module to compile
- # right here, right now. Note that the script generated in indirect
- # mode simply calls 'byte_compile()' in direct mode, a weird sort of
- # cross-process recursion. Hey, it works!
- else:
- from py_compile import compile
-
- for file in py_files:
- if file[-3:] != ".py":
- # This lets us be lazy and not filter filenames in
- # the "install_lib" command.
- continue
-
- # Terminology from the py_compile module:
- # cfile - byte-compiled file
- # dfile - purported source filename (same as 'file' by default)
- if optimize >= 0:
- opt = '' if optimize == 0 else optimize
- cfile = importlib.util.cache_from_source(
- file, optimization=opt)
- else:
- cfile = importlib.util.cache_from_source(file)
- dfile = file
- if prefix:
- if file[:len(prefix)] != prefix:
- raise ValueError("invalid prefix: filename %r doesn't start with %r"
- % (file, prefix))
- dfile = dfile[len(prefix):]
- if base_dir:
- dfile = os.path.join(base_dir, dfile)
-
- cfile_base = os.path.basename(cfile)
- if direct:
- if force or newer(file, cfile):
- log.info("byte-compiling %s to %s", file, cfile_base)
- if not dry_run:
- compile(file, cfile, dfile)
- else:
- log.debug("skipping byte-compilation of %s to %s",
- file, cfile_base)
-
-# byte_compile ()
-
-def rfc822_escape (header):
- """Return a version of the string escaped for inclusion in an
- RFC-822 header, by ensuring there are 8 spaces space after each newline.
- """
- lines = header.split('\n')
- sep = '\n' + 8 * ' '
- return sep.join(lines)
diff --git a/contrib/python/setuptools/py3/setuptools/_distutils/version.py b/contrib/python/setuptools/py3/setuptools/_distutils/version.py
deleted file mode 100644
index 35e181dbb6d..00000000000
--- a/contrib/python/setuptools/py3/setuptools/_distutils/version.py
+++ /dev/null
@@ -1,363 +0,0 @@
-#
-# distutils/version.py
-#
-# Implements multiple version numbering conventions for the
-# Python Module Distribution Utilities.
-#
-# $Id$
-#
-
-"""Provides classes to represent module version numbers (one class for
-each style of version numbering). There are currently two such classes
-implemented: StrictVersion and LooseVersion.
-
-Every version number class implements the following interface:
- * the 'parse' method takes a string and parses it to some internal
- representation; if the string is an invalid version number,
- 'parse' raises a ValueError exception
- * the class constructor takes an optional string argument which,
- if supplied, is passed to 'parse'
- * __str__ reconstructs the string that was passed to 'parse' (or
- an equivalent string -- ie. one that will generate an equivalent
- version number instance)
- * __repr__ generates Python code to recreate the version number instance
- * _cmp compares the current instance with either another instance
- of the same class or a string (which will be parsed to an instance
- of the same class, thus must follow the same rules)
-"""
-
-import re
-import warnings
-import contextlib
-
-
-def suppress_known_deprecation():
- with warnings.catch_warnings(record=True) as ctx:
- warnings.filterwarnings(
- action='default',
- category=DeprecationWarning,
- message="distutils Version classes are deprecated.",
- )
- yield ctx
-
-
-class Version:
- """Abstract base class for version numbering classes. Just provides
- constructor (__init__) and reproducer (__repr__), because those
- seem to be the same for all version numbering classes; and route
- rich comparisons to _cmp.
- """
-
- def __init__ (self, vstring=None):
- warnings.warn(
- "distutils Version classes are deprecated. "
- "Use packaging.version instead.",
- DeprecationWarning,
- stacklevel=2,
- )
- if vstring:
- self.parse(vstring)
-
- def __repr__ (self):
- return "%s ('%s')" % (self.__class__.__name__, str(self))
-
- def __eq__(self, other):
- c = self._cmp(other)
- if c is NotImplemented:
- return c
- return c == 0
-
- def __lt__(self, other):
- c = self._cmp(other)
- if c is NotImplemented:
- return c
- return c < 0
-
- def __le__(self, other):
- c = self._cmp(other)
- if c is NotImplemented:
- return c
- return c <= 0
-
- def __gt__(self, other):
- c = self._cmp(other)
- if c is NotImplemented:
- return c
- return c > 0
-
- def __ge__(self, other):
- c = self._cmp(other)
- if c is NotImplemented:
- return c
- return c >= 0
-
-
-# Interface for version-number classes -- must be implemented
-# by the following classes (the concrete ones -- Version should
-# be treated as an abstract class).
-# __init__ (string) - create and take same action as 'parse'
-# (string parameter is optional)
-# parse (string) - convert a string representation to whatever
-# internal representation is appropriate for
-# this style of version numbering
-# __str__ (self) - convert back to a string; should be very similar
-# (if not identical to) the string supplied to parse
-# __repr__ (self) - generate Python code to recreate
-# the instance
-# _cmp (self, other) - compare two version numbers ('other' may
-# be an unparsed version string, or another
-# instance of your version class)
-
-
-class StrictVersion (Version):
-
- """Version numbering for anal retentives and software idealists.
- Implements the standard interface for version number classes as
- described above. A version number consists of two or three
- dot-separated numeric components, with an optional "pre-release" tag
- on the end. The pre-release tag consists of the letter 'a' or 'b'
- followed by a number. If the numeric components of two version
- numbers are equal, then one with a pre-release tag will always
- be deemed earlier (lesser) than one without.
-
- The following are valid version numbers (shown in the order that
- would be obtained by sorting according to the supplied cmp function):
-
- 0.4 0.4.0 (these two are equivalent)
- 0.4.1
- 0.5a1
- 0.5b3
- 0.5
- 0.9.6
- 1.0
- 1.0.4a3
- 1.0.4b1
- 1.0.4
-
- The following are examples of invalid version numbers:
-
- 1
- 2.7.2.2
- 1.3.a4
- 1.3pl1
- 1.3c4
-
- The rationale for this version numbering system will be explained
- in the distutils documentation.
- """
-
- version_re = re.compile(r'^(\d+) \. (\d+) (\. (\d+))? ([ab](\d+))?$',
- re.VERBOSE | re.ASCII)
-
-
- def parse (self, vstring):
- match = self.version_re.match(vstring)
- if not match:
- raise ValueError("invalid version number '%s'" % vstring)
-
- (major, minor, patch, prerelease, prerelease_num) = \
- match.group(1, 2, 4, 5, 6)
-
- if patch:
- self.version = tuple(map(int, [major, minor, patch]))
- else:
- self.version = tuple(map(int, [major, minor])) + (0,)
-
- if prerelease:
- self.prerelease = (prerelease[0], int(prerelease_num))
- else:
- self.prerelease = None
-
-
- def __str__ (self):
-
- if self.version[2] == 0:
- vstring = '.'.join(map(str, self.version[0:2]))
- else:
- vstring = '.'.join(map(str, self.version))
-
- if self.prerelease:
- vstring = vstring + self.prerelease[0] + str(self.prerelease[1])
-
- return vstring
-
-
- def _cmp (self, other):
- if isinstance(other, str):
- with suppress_known_deprecation():
- other = StrictVersion(other)
- elif not isinstance(other, StrictVersion):
- return NotImplemented
-
- if self.version != other.version:
- # numeric versions don't match
- # prerelease stuff doesn't matter
- if self.version < other.version:
- return -1
- else:
- return 1
-
- # have to compare prerelease
- # case 1: neither has prerelease; they're equal
- # case 2: self has prerelease, other doesn't; other is greater
- # case 3: self doesn't have prerelease, other does: self is greater
- # case 4: both have prerelease: must compare them!
-
- if (not self.prerelease and not other.prerelease):
- return 0
- elif (self.prerelease and not other.prerelease):
- return -1
- elif (not self.prerelease and other.prerelease):
- return 1
- elif (self.prerelease and other.prerelease):
- if self.prerelease == other.prerelease:
- return 0
- elif self.prerelease < other.prerelease:
- return -1
- else:
- return 1
- else:
- assert False, "never get here"
-
-# end class StrictVersion
-
-
-# The rules according to Greg Stein:
-# 1) a version number has 1 or more numbers separated by a period or by
-# sequences of letters. If only periods, then these are compared
-# left-to-right to determine an ordering.
-# 2) sequences of letters are part of the tuple for comparison and are
-# compared lexicographically
-# 3) recognize the numeric components may have leading zeroes
-#
-# The LooseVersion class below implements these rules: a version number
-# string is split up into a tuple of integer and string components, and
-# comparison is a simple tuple comparison. This means that version
-# numbers behave in a predictable and obvious way, but a way that might
-# not necessarily be how people *want* version numbers to behave. There
-# wouldn't be a problem if people could stick to purely numeric version
-# numbers: just split on period and compare the numbers as tuples.
-# However, people insist on putting letters into their version numbers;
-# the most common purpose seems to be:
-# - indicating a "pre-release" version
-# ('alpha', 'beta', 'a', 'b', 'pre', 'p')
-# - indicating a post-release patch ('p', 'pl', 'patch')
-# but of course this can't cover all version number schemes, and there's
-# no way to know what a programmer means without asking him.
-#
-# The problem is what to do with letters (and other non-numeric
-# characters) in a version number. The current implementation does the
-# obvious and predictable thing: keep them as strings and compare
-# lexically within a tuple comparison. This has the desired effect if
-# an appended letter sequence implies something "post-release":
-# eg. "0.99" < "0.99pl14" < "1.0", and "5.001" < "5.001m" < "5.002".
-#
-# However, if letters in a version number imply a pre-release version,
-# the "obvious" thing isn't correct. Eg. you would expect that
-# "1.5.1" < "1.5.2a2" < "1.5.2", but under the tuple/lexical comparison
-# implemented here, this just isn't so.
-#
-# Two possible solutions come to mind. The first is to tie the
-# comparison algorithm to a particular set of semantic rules, as has
-# been done in the StrictVersion class above. This works great as long
-# as everyone can go along with bondage and discipline. Hopefully a
-# (large) subset of Python module programmers will agree that the
-# particular flavour of bondage and discipline provided by StrictVersion
-# provides enough benefit to be worth using, and will submit their
-# version numbering scheme to its domination. The free-thinking
-# anarchists in the lot will never give in, though, and something needs
-# to be done to accommodate them.
-#
-# Perhaps a "moderately strict" version class could be implemented that
-# lets almost anything slide (syntactically), and makes some heuristic
-# assumptions about non-digits in version number strings. This could
-# sink into special-case-hell, though; if I was as talented and
-# idiosyncratic as Larry Wall, I'd go ahead and implement a class that
-# somehow knows that "1.2.1" < "1.2.2a2" < "1.2.2" < "1.2.2pl3", and is
-# just as happy dealing with things like "2g6" and "1.13++". I don't
-# think I'm smart enough to do it right though.
-#
-# In any case, I've coded the test suite for this module (see
-# ../test/test_version.py) specifically to fail on things like comparing
-# "1.2a2" and "1.2". That's not because the *code* is doing anything
-# wrong, it's because the simple, obvious design doesn't match my
-# complicated, hairy expectations for real-world version numbers. It
-# would be a snap to fix the test suite to say, "Yep, LooseVersion does
-# the Right Thing" (ie. the code matches the conception). But I'd rather
-# have a conception that matches common notions about version numbers.
-
-class LooseVersion (Version):
-
- """Version numbering for anarchists and software realists.
- Implements the standard interface for version number classes as
- described above. A version number consists of a series of numbers,
- separated by either periods or strings of letters. When comparing
- version numbers, the numeric components will be compared
- numerically, and the alphabetic components lexically. The following
- are all valid version numbers, in no particular order:
-
- 1.5.1
- 1.5.2b2
- 161
- 3.10a
- 8.02
- 3.4j
- 1996.07.12
- 3.2.pl0
- 3.1.1.6
- 2g6
- 11g
- 0.960923
- 2.2beta29
- 1.13++
- 5.5.kw
- 2.0b1pl0
-
- In fact, there is no such thing as an invalid version number under
- this scheme; the rules for comparison are simple and predictable,
- but may not always give the results you want (for some definition
- of "want").
- """
-
- component_re = re.compile(r'(\d+ | [a-z]+ | \.)', re.VERBOSE)
-
- def parse (self, vstring):
- # I've given up on thinking I can reconstruct the version string
- # from the parsed tuple -- so I just store the string here for
- # use by __str__
- self.vstring = vstring
- components = [x for x in self.component_re.split(vstring)
- if x and x != '.']
- for i, obj in enumerate(components):
- try:
- components[i] = int(obj)
- except ValueError:
- pass
-
- self.version = components
-
-
- def __str__ (self):
- return self.vstring
-
-
- def __repr__ (self):
- return "LooseVersion ('%s')" % str(self)
-
-
- def _cmp (self, other):
- if isinstance(other, str):
- other = LooseVersion(other)
- elif not isinstance(other, LooseVersion):
- return NotImplemented
-
- if self.version == other.version:
- return 0
- if self.version < other.version:
- return -1
- if self.version > other.version:
- return 1
-
-
-# end class LooseVersion
diff --git a/contrib/python/setuptools/py3/setuptools/_distutils/versionpredicate.py b/contrib/python/setuptools/py3/setuptools/_distutils/versionpredicate.py
deleted file mode 100644
index 55f25d91ae0..00000000000
--- a/contrib/python/setuptools/py3/setuptools/_distutils/versionpredicate.py
+++ /dev/null
@@ -1,169 +0,0 @@
-"""Module for parsing and testing package version predicate strings.
-"""
-import re
-import distutils.version
-import operator
-
-
-re_validPackage = re.compile(r"(?i)^\s*([a-z_]\w*(?:\.[a-z_]\w*)*)(.*)",
- re.ASCII)
-# (package) (rest)
-
-re_paren = re.compile(r"^\s*\((.*)\)\s*$") # (list) inside of parentheses
-re_splitComparison = re.compile(r"^\s*(<=|>=|<|>|!=|==)\s*([^\s,]+)\s*$")
-# (comp) (version)
-
-
-def splitUp(pred):
- """Parse a single version comparison.
-
- Return (comparison string, StrictVersion)
- """
- res = re_splitComparison.match(pred)
- if not res:
- raise ValueError("bad package restriction syntax: %r" % pred)
- comp, verStr = res.groups()
- with distutils.version.suppress_known_deprecation():
- other = distutils.version.StrictVersion(verStr)
- return (comp, other)
-
-compmap = {"<": operator.lt, "<=": operator.le, "==": operator.eq,
- ">": operator.gt, ">=": operator.ge, "!=": operator.ne}
-
-class VersionPredicate:
- """Parse and test package version predicates.
-
- >>> v = VersionPredicate('pyepat.abc (>1.0, <3333.3a1, !=1555.1b3)')
-
- The `name` attribute provides the full dotted name that is given::
-
- >>> v.name
- 'pyepat.abc'
-
- The str() of a `VersionPredicate` provides a normalized
- human-readable version of the expression::
-
- >>> print(v)
- pyepat.abc (> 1.0, < 3333.3a1, != 1555.1b3)
-
- The `satisfied_by()` method can be used to determine with a given
- version number is included in the set described by the version
- restrictions::
-
- >>> v.satisfied_by('1.1')
- True
- >>> v.satisfied_by('1.4')
- True
- >>> v.satisfied_by('1.0')
- False
- >>> v.satisfied_by('4444.4')
- False
- >>> v.satisfied_by('1555.1b3')
- False
-
- `VersionPredicate` is flexible in accepting extra whitespace::
-
- >>> v = VersionPredicate(' pat( == 0.1 ) ')
- >>> v.name
- 'pat'
- >>> v.satisfied_by('0.1')
- True
- >>> v.satisfied_by('0.2')
- False
-
- If any version numbers passed in do not conform to the
- restrictions of `StrictVersion`, a `ValueError` is raised::
-
- >>> v = VersionPredicate('p1.p2.p3.p4(>=1.0, <=1.3a1, !=1.2zb3)')
- Traceback (most recent call last):
- ...
- ValueError: invalid version number '1.2zb3'
-
- It the module or package name given does not conform to what's
- allowed as a legal module or package name, `ValueError` is
- raised::
-
- >>> v = VersionPredicate('foo-bar')
- Traceback (most recent call last):
- ...
- ValueError: expected parenthesized list: '-bar'
-
- >>> v = VersionPredicate('foo bar (12.21)')
- Traceback (most recent call last):
- ...
- ValueError: expected parenthesized list: 'bar (12.21)'
-
- """
-
- def __init__(self, versionPredicateStr):
- """Parse a version predicate string.
- """
- # Fields:
- # name: package name
- # pred: list of (comparison string, StrictVersion)
-
- versionPredicateStr = versionPredicateStr.strip()
- if not versionPredicateStr:
- raise ValueError("empty package restriction")
- match = re_validPackage.match(versionPredicateStr)
- if not match:
- raise ValueError("bad package name in %r" % versionPredicateStr)
- self.name, paren = match.groups()
- paren = paren.strip()
- if paren:
- match = re_paren.match(paren)
- if not match:
- raise ValueError("expected parenthesized list: %r" % paren)
- str = match.groups()[0]
- self.pred = [splitUp(aPred) for aPred in str.split(",")]
- if not self.pred:
- raise ValueError("empty parenthesized list in %r"
- % versionPredicateStr)
- else:
- self.pred = []
-
- def __str__(self):
- if self.pred:
- seq = [cond + " " + str(ver) for cond, ver in self.pred]
- return self.name + " (" + ", ".join(seq) + ")"
- else:
- return self.name
-
- def satisfied_by(self, version):
- """True if version is compatible with all the predicates in self.
- The parameter version must be acceptable to the StrictVersion
- constructor. It may be either a string or StrictVersion.
- """
- for cond, ver in self.pred:
- if not compmap[cond](version, ver):
- return False
- return True
-
-
-_provision_rx = None
-
-def split_provision(value):
- """Return the name and optional version number of a provision.
-
- The version number, if given, will be returned as a `StrictVersion`
- instance, otherwise it will be `None`.
-
- >>> split_provision('mypkg')
- ('mypkg', None)
- >>> split_provision(' mypkg( 1.2 ) ')
- ('mypkg', StrictVersion ('1.2'))
- """
- global _provision_rx
- if _provision_rx is None:
- _provision_rx = re.compile(
- r"([a-zA-Z_]\w*(?:\.[a-zA-Z_]\w*)*)(?:\s*\(\s*([^)\s]+)\s*\))?$",
- re.ASCII)
- value = value.strip()
- m = _provision_rx.match(value)
- if not m:
- raise ValueError("illegal provides specification: %r" % value)
- ver = m.group(2) or None
- if ver:
- with distutils.version.suppress_known_deprecation():
- ver = distutils.version.StrictVersion(ver)
- return m.group(1), ver
diff --git a/contrib/python/setuptools/py3/setuptools/_imp.py b/contrib/python/setuptools/py3/setuptools/_imp.py
deleted file mode 100644
index 47efd792b3c..00000000000
--- a/contrib/python/setuptools/py3/setuptools/_imp.py
+++ /dev/null
@@ -1,82 +0,0 @@
-"""
-Re-implementation of find_module and get_frozen_object
-from the deprecated imp module.
-"""
-
-import os
-import importlib.util
-import importlib.machinery
-
-from .py34compat import module_from_spec
-
-
-PY_SOURCE = 1
-PY_COMPILED = 2
-C_EXTENSION = 3
-C_BUILTIN = 6
-PY_FROZEN = 7
-
-
-def find_spec(module, paths):
- finder = (
- importlib.machinery.PathFinder().find_spec
- if isinstance(paths, list) else
- importlib.util.find_spec
- )
- return finder(module, paths)
-
-
-def find_module(module, paths=None):
- """Just like 'imp.find_module()', but with package support"""
- spec = find_spec(module, paths)
- if spec is None:
- raise ImportError("Can't find %s" % module)
- if not spec.has_location and hasattr(spec, 'submodule_search_locations'):
- spec = importlib.util.spec_from_loader('__init__.py', spec.loader)
-
- kind = -1
- file = None
- static = isinstance(spec.loader, type)
- if spec.origin == 'frozen' or static and issubclass(
- spec.loader, importlib.machinery.FrozenImporter):
- kind = PY_FROZEN
- path = None # imp compabilty
- suffix = mode = '' # imp compatibility
- elif spec.origin == 'built-in' or static and issubclass(
- spec.loader, importlib.machinery.BuiltinImporter):
- kind = C_BUILTIN
- path = None # imp compabilty
- suffix = mode = '' # imp compatibility
- elif spec.has_location:
- path = spec.origin
- suffix = os.path.splitext(path)[1]
- mode = 'r' if suffix in importlib.machinery.SOURCE_SUFFIXES else 'rb'
-
- if suffix in importlib.machinery.SOURCE_SUFFIXES:
- kind = PY_SOURCE
- elif suffix in importlib.machinery.BYTECODE_SUFFIXES:
- kind = PY_COMPILED
- elif suffix in importlib.machinery.EXTENSION_SUFFIXES:
- kind = C_EXTENSION
-
- if kind in {PY_SOURCE, PY_COMPILED}:
- file = open(path, mode)
- else:
- path = None
- suffix = mode = ''
-
- return file, path, (suffix, mode, kind)
-
-
-def get_frozen_object(module, paths=None):
- spec = find_spec(module, paths)
- if not spec:
- raise ImportError("Can't find %s" % module)
- return spec.loader.get_code(module)
-
-
-def get_module(module, paths, info):
- spec = find_spec(module, paths)
- if not spec:
- raise ImportError("Can't find %s" % module)
- return module_from_spec(spec)
diff --git a/contrib/python/setuptools/py3/setuptools/_vendor/__init__.py b/contrib/python/setuptools/py3/setuptools/_vendor/__init__.py
deleted file mode 100644
index e69de29bb2d..00000000000
--- a/contrib/python/setuptools/py3/setuptools/_vendor/__init__.py
+++ /dev/null
diff --git a/contrib/python/setuptools/py3/setuptools/_vendor/more_itertools/__init__.py b/contrib/python/setuptools/py3/setuptools/_vendor/more_itertools/__init__.py
deleted file mode 100644
index 19a169fc301..00000000000
--- a/contrib/python/setuptools/py3/setuptools/_vendor/more_itertools/__init__.py
+++ /dev/null
@@ -1,4 +0,0 @@
-from .more import * # noqa
-from .recipes import * # noqa
-
-__version__ = '8.8.0'
diff --git a/contrib/python/setuptools/py3/setuptools/_vendor/more_itertools/more.py b/contrib/python/setuptools/py3/setuptools/_vendor/more_itertools/more.py
deleted file mode 100644
index 0f7d282aa5d..00000000000
--- a/contrib/python/setuptools/py3/setuptools/_vendor/more_itertools/more.py
+++ /dev/null
@@ -1,3825 +0,0 @@
-import warnings
-
-from collections import Counter, defaultdict, deque, abc
-from collections.abc import Sequence
-from concurrent.futures import ThreadPoolExecutor
-from functools import partial, reduce, wraps
-from heapq import merge, heapify, heapreplace, heappop
-from itertools import (
- chain,
- compress,
- count,
- cycle,
- dropwhile,
- groupby,
- islice,
- repeat,
- starmap,
- takewhile,
- tee,
- zip_longest,
-)
-from math import exp, factorial, floor, log
-from queue import Empty, Queue
-from random import random, randrange, uniform
-from operator import itemgetter, mul, sub, gt, lt
-from sys import hexversion, maxsize
-from time import monotonic
-
-from .recipes import (
- consume,
- flatten,
- pairwise,
- powerset,
- take,
- unique_everseen,
-)
-
-__all__ = [
- 'AbortThread',
- 'adjacent',
- 'always_iterable',
- 'always_reversible',
- 'bucket',
- 'callback_iter',
- 'chunked',
- 'circular_shifts',
- 'collapse',
- 'collate',
- 'consecutive_groups',
- 'consumer',
- 'countable',
- 'count_cycle',
- 'mark_ends',
- 'difference',
- 'distinct_combinations',
- 'distinct_permutations',
- 'distribute',
- 'divide',
- 'exactly_n',
- 'filter_except',
- 'first',
- 'groupby_transform',
- 'ilen',
- 'interleave_longest',
- 'interleave',
- 'intersperse',
- 'islice_extended',
- 'iterate',
- 'ichunked',
- 'is_sorted',
- 'last',
- 'locate',
- 'lstrip',
- 'make_decorator',
- 'map_except',
- 'map_reduce',
- 'nth_or_last',
- 'nth_permutation',
- 'nth_product',
- 'numeric_range',
- 'one',
- 'only',
- 'padded',
- 'partitions',
- 'set_partitions',
- 'peekable',
- 'repeat_last',
- 'replace',
- 'rlocate',
- 'rstrip',
- 'run_length',
- 'sample',
- 'seekable',
- 'SequenceView',
- 'side_effect',
- 'sliced',
- 'sort_together',
- 'split_at',
- 'split_after',
- 'split_before',
- 'split_when',
- 'split_into',
- 'spy',
- 'stagger',
- 'strip',
- 'substrings',
- 'substrings_indexes',
- 'time_limited',
- 'unique_to_each',
- 'unzip',
- 'windowed',
- 'with_iter',
- 'UnequalIterablesError',
- 'zip_equal',
- 'zip_offset',
- 'windowed_complete',
- 'all_unique',
- 'value_chain',
- 'product_index',
- 'combination_index',
- 'permutation_index',
-]
-
-_marker = object()
-
-
-def chunked(iterable, n, strict=False):
- """Break *iterable* into lists of length *n*:
-
- >>> list(chunked([1, 2, 3, 4, 5, 6], 3))
- [[1, 2, 3], [4, 5, 6]]
-
- By the default, the last yielded list will have fewer than *n* elements
- if the length of *iterable* is not divisible by *n*:
-
- >>> list(chunked([1, 2, 3, 4, 5, 6, 7, 8], 3))
- [[1, 2, 3], [4, 5, 6], [7, 8]]
-
- To use a fill-in value instead, see the :func:`grouper` recipe.
-
- If the length of *iterable* is not divisible by *n* and *strict* is
- ``True``, then ``ValueError`` will be raised before the last
- list is yielded.
-
- """
- iterator = iter(partial(take, n, iter(iterable)), [])
- if strict:
-
- def ret():
- for chunk in iterator:
- if len(chunk) != n:
- raise ValueError('iterable is not divisible by n.')
- yield chunk
-
- return iter(ret())
- else:
- return iterator
-
-
-def first(iterable, default=_marker):
- """Return the first item of *iterable*, or *default* if *iterable* is
- empty.
-
- >>> first([0, 1, 2, 3])
- 0
- >>> first([], 'some default')
- 'some default'
-
- If *default* is not provided and there are no items in the iterable,
- raise ``ValueError``.
-
- :func:`first` is useful when you have a generator of expensive-to-retrieve
- values and want any arbitrary one. It is marginally shorter than
- ``next(iter(iterable), default)``.
-
- """
- try:
- return next(iter(iterable))
- except StopIteration as e:
- if default is _marker:
- raise ValueError(
- 'first() was called on an empty iterable, and no '
- 'default value was provided.'
- ) from e
- return default
-
-
-def last(iterable, default=_marker):
- """Return the last item of *iterable*, or *default* if *iterable* is
- empty.
-
- >>> last([0, 1, 2, 3])
- 3
- >>> last([], 'some default')
- 'some default'
-
- If *default* is not provided and there are no items in the iterable,
- raise ``ValueError``.
- """
- try:
- if isinstance(iterable, Sequence):
- return iterable[-1]
- # Work around https://bugs.python.org/issue38525
- elif hasattr(iterable, '__reversed__') and (hexversion != 0x030800F0):
- return next(reversed(iterable))
- else:
- return deque(iterable, maxlen=1)[-1]
- except (IndexError, TypeError, StopIteration):
- if default is _marker:
- raise ValueError(
- 'last() was called on an empty iterable, and no default was '
- 'provided.'
- )
- return default
-
-
-def nth_or_last(iterable, n, default=_marker):
- """Return the nth or the last item of *iterable*,
- or *default* if *iterable* is empty.
-
- >>> nth_or_last([0, 1, 2, 3], 2)
- 2
- >>> nth_or_last([0, 1], 2)
- 1
- >>> nth_or_last([], 0, 'some default')
- 'some default'
-
- If *default* is not provided and there are no items in the iterable,
- raise ``ValueError``.
- """
- return last(islice(iterable, n + 1), default=default)
-
-
-class peekable:
- """Wrap an iterator to allow lookahead and prepending elements.
-
- Call :meth:`peek` on the result to get the value that will be returned
- by :func:`next`. This won't advance the iterator:
-
- >>> p = peekable(['a', 'b'])
- >>> p.peek()
- 'a'
- >>> next(p)
- 'a'
-
- Pass :meth:`peek` a default value to return that instead of raising
- ``StopIteration`` when the iterator is exhausted.
-
- >>> p = peekable([])
- >>> p.peek('hi')
- 'hi'
-
- peekables also offer a :meth:`prepend` method, which "inserts" items
- at the head of the iterable:
-
- >>> p = peekable([1, 2, 3])
- >>> p.prepend(10, 11, 12)
- >>> next(p)
- 10
- >>> p.peek()
- 11
- >>> list(p)
- [11, 12, 1, 2, 3]
-
- peekables can be indexed. Index 0 is the item that will be returned by
- :func:`next`, index 1 is the item after that, and so on:
- The values up to the given index will be cached.
-
- >>> p = peekable(['a', 'b', 'c', 'd'])
- >>> p[0]
- 'a'
- >>> p[1]
- 'b'
- >>> next(p)
- 'a'
-
- Negative indexes are supported, but be aware that they will cache the
- remaining items in the source iterator, which may require significant
- storage.
-
- To check whether a peekable is exhausted, check its truth value:
-
- >>> p = peekable(['a', 'b'])
- >>> if p: # peekable has items
- ... list(p)
- ['a', 'b']
- >>> if not p: # peekable is exhausted
- ... list(p)
- []
-
- """
-
- def __init__(self, iterable):
- self._it = iter(iterable)
- self._cache = deque()
-
- def __iter__(self):
- return self
-
- def __bool__(self):
- try:
- self.peek()
- except StopIteration:
- return False
- return True
-
- def peek(self, default=_marker):
- """Return the item that will be next returned from ``next()``.
-
- Return ``default`` if there are no items left. If ``default`` is not
- provided, raise ``StopIteration``.
-
- """
- if not self._cache:
- try:
- self._cache.append(next(self._it))
- except StopIteration:
- if default is _marker:
- raise
- return default
- return self._cache[0]
-
- def prepend(self, *items):
- """Stack up items to be the next ones returned from ``next()`` or
- ``self.peek()``. The items will be returned in
- first in, first out order::
-
- >>> p = peekable([1, 2, 3])
- >>> p.prepend(10, 11, 12)
- >>> next(p)
- 10
- >>> list(p)
- [11, 12, 1, 2, 3]
-
- It is possible, by prepending items, to "resurrect" a peekable that
- previously raised ``StopIteration``.
-
- >>> p = peekable([])
- >>> next(p)
- Traceback (most recent call last):
- ...
- StopIteration
- >>> p.prepend(1)
- >>> next(p)
- 1
- >>> next(p)
- Traceback (most recent call last):
- ...
- StopIteration
-
- """
- self._cache.extendleft(reversed(items))
-
- def __next__(self):
- if self._cache:
- return self._cache.popleft()
-
- return next(self._it)
-
- def _get_slice(self, index):
- # Normalize the slice's arguments
- step = 1 if (index.step is None) else index.step
- if step > 0:
- start = 0 if (index.start is None) else index.start
- stop = maxsize if (index.stop is None) else index.stop
- elif step < 0:
- start = -1 if (index.start is None) else index.start
- stop = (-maxsize - 1) if (index.stop is None) else index.stop
- else:
- raise ValueError('slice step cannot be zero')
-
- # If either the start or stop index is negative, we'll need to cache
- # the rest of the iterable in order to slice from the right side.
- if (start < 0) or (stop < 0):
- self._cache.extend(self._it)
- # Otherwise we'll need to find the rightmost index and cache to that
- # point.
- else:
- n = min(max(start, stop) + 1, maxsize)
- cache_len = len(self._cache)
- if n >= cache_len:
- self._cache.extend(islice(self._it, n - cache_len))
-
- return list(self._cache)[index]
-
- def __getitem__(self, index):
- if isinstance(index, slice):
- return self._get_slice(index)
-
- cache_len = len(self._cache)
- if index < 0:
- self._cache.extend(self._it)
- elif index >= cache_len:
- self._cache.extend(islice(self._it, index + 1 - cache_len))
-
- return self._cache[index]
-
-
-def collate(*iterables, **kwargs):
- """Return a sorted merge of the items from each of several already-sorted
- *iterables*.
-
- >>> list(collate('ACDZ', 'AZ', 'JKL'))
- ['A', 'A', 'C', 'D', 'J', 'K', 'L', 'Z', 'Z']
-
- Works lazily, keeping only the next value from each iterable in memory. Use
- :func:`collate` to, for example, perform a n-way mergesort of items that
- don't fit in memory.
-
- If a *key* function is specified, the iterables will be sorted according
- to its result:
-
- >>> key = lambda s: int(s) # Sort by numeric value, not by string
- >>> list(collate(['1', '10'], ['2', '11'], key=key))
- ['1', '2', '10', '11']
-
-
- If the *iterables* are sorted in descending order, set *reverse* to
- ``True``:
-
- >>> list(collate([5, 3, 1], [4, 2, 0], reverse=True))
- [5, 4, 3, 2, 1, 0]
-
- If the elements of the passed-in iterables are out of order, you might get
- unexpected results.
-
- On Python 3.5+, this function is an alias for :func:`heapq.merge`.
-
- """
- warnings.warn(
- "collate is no longer part of more_itertools, use heapq.merge",
- DeprecationWarning,
- )
- return merge(*iterables, **kwargs)
-
-
-def consumer(func):
- """Decorator that automatically advances a PEP-342-style "reverse iterator"
- to its first yield point so you don't have to call ``next()`` on it
- manually.
-
- >>> @consumer
- ... def tally():
- ... i = 0
- ... while True:
- ... print('Thing number %s is %s.' % (i, (yield)))
- ... i += 1
- ...
- >>> t = tally()
- >>> t.send('red')
- Thing number 0 is red.
- >>> t.send('fish')
- Thing number 1 is fish.
-
- Without the decorator, you would have to call ``next(t)`` before
- ``t.send()`` could be used.
-
- """
-
- @wraps(func)
- def wrapper(*args, **kwargs):
- gen = func(*args, **kwargs)
- next(gen)
- return gen
-
- return wrapper
-
-
-def ilen(iterable):
- """Return the number of items in *iterable*.
-
- >>> ilen(x for x in range(1000000) if x % 3 == 0)
- 333334
-
- This consumes the iterable, so handle with care.
-
- """
- # This approach was selected because benchmarks showed it's likely the
- # fastest of the known implementations at the time of writing.
- # See GitHub tracker: #236, #230.
- counter = count()
- deque(zip(iterable, counter), maxlen=0)
- return next(counter)
-
-
-def iterate(func, start):
- """Return ``start``, ``func(start)``, ``func(func(start))``, ...
-
- >>> from itertools import islice
- >>> list(islice(iterate(lambda x: 2*x, 1), 10))
- [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
-
- """
- while True:
- yield start
- start = func(start)
-
-
-def with_iter(context_manager):
- """Wrap an iterable in a ``with`` statement, so it closes once exhausted.
-
- For example, this will close the file when the iterator is exhausted::
-
- upper_lines = (line.upper() for line in with_iter(open('foo')))
-
- Any context manager which returns an iterable is a candidate for
- ``with_iter``.
-
- """
- with context_manager as iterable:
- yield from iterable
-
-
-def one(iterable, too_short=None, too_long=None):
- """Return the first item from *iterable*, which is expected to contain only
- that item. Raise an exception if *iterable* is empty or has more than one
- item.
-
- :func:`one` is useful for ensuring that an iterable contains only one item.
- For example, it can be used to retrieve the result of a database query
- that is expected to return a single row.
-
- If *iterable* is empty, ``ValueError`` will be raised. You may specify a
- different exception with the *too_short* keyword:
-
- >>> it = []
- >>> one(it) # doctest: +IGNORE_EXCEPTION_DETAIL
- Traceback (most recent call last):
- ...
- ValueError: too many items in iterable (expected 1)'
- >>> too_short = IndexError('too few items')
- >>> one(it, too_short=too_short) # doctest: +IGNORE_EXCEPTION_DETAIL
- Traceback (most recent call last):
- ...
- IndexError: too few items
-
- Similarly, if *iterable* contains more than one item, ``ValueError`` will
- be raised. You may specify a different exception with the *too_long*
- keyword:
-
- >>> it = ['too', 'many']
- >>> one(it) # doctest: +IGNORE_EXCEPTION_DETAIL
- Traceback (most recent call last):
- ...
- ValueError: Expected exactly one item in iterable, but got 'too',
- 'many', and perhaps more.
- >>> too_long = RuntimeError
- >>> one(it, too_long=too_long) # doctest: +IGNORE_EXCEPTION_DETAIL
- Traceback (most recent call last):
- ...
- RuntimeError
-
- Note that :func:`one` attempts to advance *iterable* twice to ensure there
- is only one item. See :func:`spy` or :func:`peekable` to check iterable
- contents less destructively.
-
- """
- it = iter(iterable)
-
- try:
- first_value = next(it)
- except StopIteration as e:
- raise (
- too_short or ValueError('too few items in iterable (expected 1)')
- ) from e
-
- try:
- second_value = next(it)
- except StopIteration:
- pass
- else:
- msg = (
- 'Expected exactly one item in iterable, but got {!r}, {!r}, '
- 'and perhaps more.'.format(first_value, second_value)
- )
- raise too_long or ValueError(msg)
-
- return first_value
-
-
-def distinct_permutations(iterable, r=None):
- """Yield successive distinct permutations of the elements in *iterable*.
-
- >>> sorted(distinct_permutations([1, 0, 1]))
- [(0, 1, 1), (1, 0, 1), (1, 1, 0)]
-
- Equivalent to ``set(permutations(iterable))``, except duplicates are not
- generated and thrown away. For larger input sequences this is much more
- efficient.
-
- Duplicate permutations arise when there are duplicated elements in the
- input iterable. The number of items returned is
- `n! / (x_1! * x_2! * ... * x_n!)`, where `n` is the total number of
- items input, and each `x_i` is the count of a distinct item in the input
- sequence.
-
- If *r* is given, only the *r*-length permutations are yielded.
-
- >>> sorted(distinct_permutations([1, 0, 1], r=2))
- [(0, 1), (1, 0), (1, 1)]
- >>> sorted(distinct_permutations(range(3), r=2))
- [(0, 1), (0, 2), (1, 0), (1, 2), (2, 0), (2, 1)]
-
- """
- # Algorithm: https://w.wiki/Qai
- def _full(A):
- while True:
- # Yield the permutation we have
- yield tuple(A)
-
- # Find the largest index i such that A[i] < A[i + 1]
- for i in range(size - 2, -1, -1):
- if A[i] < A[i + 1]:
- break
- # If no such index exists, this permutation is the last one
- else:
- return
-
- # Find the largest index j greater than j such that A[i] < A[j]
- for j in range(size - 1, i, -1):
- if A[i] < A[j]:
- break
-
- # Swap the value of A[i] with that of A[j], then reverse the
- # sequence from A[i + 1] to form the new permutation
- A[i], A[j] = A[j], A[i]
- A[i + 1 :] = A[: i - size : -1] # A[i + 1:][::-1]
-
- # Algorithm: modified from the above
- def _partial(A, r):
- # Split A into the first r items and the last r items
- head, tail = A[:r], A[r:]
- right_head_indexes = range(r - 1, -1, -1)
- left_tail_indexes = range(len(tail))
-
- while True:
- # Yield the permutation we have
- yield tuple(head)
-
- # Starting from the right, find the first index of the head with
- # value smaller than the maximum value of the tail - call it i.
- pivot = tail[-1]
- for i in right_head_indexes:
- if head[i] < pivot:
- break
- pivot = head[i]
- else:
- return
-
- # Starting from the left, find the first value of the tail
- # with a value greater than head[i] and swap.
- for j in left_tail_indexes:
- if tail[j] > head[i]:
- head[i], tail[j] = tail[j], head[i]
- break
- # If we didn't find one, start from the right and find the first
- # index of the head with a value greater than head[i] and swap.
- else:
- for j in right_head_indexes:
- if head[j] > head[i]:
- head[i], head[j] = head[j], head[i]
- break
-
- # Reverse head[i + 1:] and swap it with tail[:r - (i + 1)]
- tail += head[: i - r : -1] # head[i + 1:][::-1]
- i += 1
- head[i:], tail[:] = tail[: r - i], tail[r - i :]
-
- items = sorted(iterable)
-
- size = len(items)
- if r is None:
- r = size
-
- if 0 < r <= size:
- return _full(items) if (r == size) else _partial(items, r)
-
- return iter(() if r else ((),))
-
-
-def intersperse(e, iterable, n=1):
- """Intersperse filler element *e* among the items in *iterable*, leaving
- *n* items between each filler element.
-
- >>> list(intersperse('!', [1, 2, 3, 4, 5]))
- [1, '!', 2, '!', 3, '!', 4, '!', 5]
-
- >>> list(intersperse(None, [1, 2, 3, 4, 5], n=2))
- [1, 2, None, 3, 4, None, 5]
-
- """
- if n == 0:
- raise ValueError('n must be > 0')
- elif n == 1:
- # interleave(repeat(e), iterable) -> e, x_0, e, e, x_1, e, x_2...
- # islice(..., 1, None) -> x_0, e, e, x_1, e, x_2...
- return islice(interleave(repeat(e), iterable), 1, None)
- else:
- # interleave(filler, chunks) -> [e], [x_0, x_1], [e], [x_2, x_3]...
- # islice(..., 1, None) -> [x_0, x_1], [e], [x_2, x_3]...
- # flatten(...) -> x_0, x_1, e, x_2, x_3...
- filler = repeat([e])
- chunks = chunked(iterable, n)
- return flatten(islice(interleave(filler, chunks), 1, None))
-
-
-def unique_to_each(*iterables):
- """Return the elements from each of the input iterables that aren't in the
- other input iterables.
-
- For example, suppose you have a set of packages, each with a set of
- dependencies::
-
- {'pkg_1': {'A', 'B'}, 'pkg_2': {'B', 'C'}, 'pkg_3': {'B', 'D'}}
-
- If you remove one package, which dependencies can also be removed?
-
- If ``pkg_1`` is removed, then ``A`` is no longer necessary - it is not
- associated with ``pkg_2`` or ``pkg_3``. Similarly, ``C`` is only needed for
- ``pkg_2``, and ``D`` is only needed for ``pkg_3``::
-
- >>> unique_to_each({'A', 'B'}, {'B', 'C'}, {'B', 'D'})
- [['A'], ['C'], ['D']]
-
- If there are duplicates in one input iterable that aren't in the others
- they will be duplicated in the output. Input order is preserved::
-
- >>> unique_to_each("mississippi", "missouri")
- [['p', 'p'], ['o', 'u', 'r']]
-
- It is assumed that the elements of each iterable are hashable.
-
- """
- pool = [list(it) for it in iterables]
- counts = Counter(chain.from_iterable(map(set, pool)))
- uniques = {element for element in counts if counts[element] == 1}
- return [list(filter(uniques.__contains__, it)) for it in pool]
-
-
-def windowed(seq, n, fillvalue=None, step=1):
- """Return a sliding window of width *n* over the given iterable.
-
- >>> all_windows = windowed([1, 2, 3, 4, 5], 3)
- >>> list(all_windows)
- [(1, 2, 3), (2, 3, 4), (3, 4, 5)]
-
- When the window is larger than the iterable, *fillvalue* is used in place
- of missing values:
-
- >>> list(windowed([1, 2, 3], 4))
- [(1, 2, 3, None)]
-
- Each window will advance in increments of *step*:
-
- >>> list(windowed([1, 2, 3, 4, 5, 6], 3, fillvalue='!', step=2))
- [(1, 2, 3), (3, 4, 5), (5, 6, '!')]
-
- To slide into the iterable's items, use :func:`chain` to add filler items
- to the left:
-
- >>> iterable = [1, 2, 3, 4]
- >>> n = 3
- >>> padding = [None] * (n - 1)
- >>> list(windowed(chain(padding, iterable), 3))
- [(None, None, 1), (None, 1, 2), (1, 2, 3), (2, 3, 4)]
- """
- if n < 0:
- raise ValueError('n must be >= 0')
- if n == 0:
- yield tuple()
- return
- if step < 1:
- raise ValueError('step must be >= 1')
-
- window = deque(maxlen=n)
- i = n
- for _ in map(window.append, seq):
- i -= 1
- if not i:
- i = step
- yield tuple(window)
-
- size = len(window)
- if size < n:
- yield tuple(chain(window, repeat(fillvalue, n - size)))
- elif 0 < i < min(step, n):
- window += (fillvalue,) * i
- yield tuple(window)
-
-
-def substrings(iterable):
- """Yield all of the substrings of *iterable*.
-
- >>> [''.join(s) for s in substrings('more')]
- ['m', 'o', 'r', 'e', 'mo', 'or', 're', 'mor', 'ore', 'more']
-
- Note that non-string iterables can also be subdivided.
-
- >>> list(substrings([0, 1, 2]))
- [(0,), (1,), (2,), (0, 1), (1, 2), (0, 1, 2)]
-
- """
- # The length-1 substrings
- seq = []
- for item in iter(iterable):
- seq.append(item)
- yield (item,)
- seq = tuple(seq)
- item_count = len(seq)
-
- # And the rest
- for n in range(2, item_count + 1):
- for i in range(item_count - n + 1):
- yield seq[i : i + n]
-
-
-def substrings_indexes(seq, reverse=False):
- """Yield all substrings and their positions in *seq*
-
- The items yielded will be a tuple of the form ``(substr, i, j)``, where
- ``substr == seq[i:j]``.
-
- This function only works for iterables that support slicing, such as
- ``str`` objects.
-
- >>> for item in substrings_indexes('more'):
- ... print(item)
- ('m', 0, 1)
- ('o', 1, 2)
- ('r', 2, 3)
- ('e', 3, 4)
- ('mo', 0, 2)
- ('or', 1, 3)
- ('re', 2, 4)
- ('mor', 0, 3)
- ('ore', 1, 4)
- ('more', 0, 4)
-
- Set *reverse* to ``True`` to yield the same items in the opposite order.
-
-
- """
- r = range(1, len(seq) + 1)
- if reverse:
- r = reversed(r)
- return (
- (seq[i : i + L], i, i + L) for L in r for i in range(len(seq) - L + 1)
- )
-
-
-class bucket:
- """Wrap *iterable* and return an object that buckets it iterable into
- child iterables based on a *key* function.
-
- >>> iterable = ['a1', 'b1', 'c1', 'a2', 'b2', 'c2', 'b3']
- >>> s = bucket(iterable, key=lambda x: x[0]) # Bucket by 1st character
- >>> sorted(list(s)) # Get the keys
- ['a', 'b', 'c']
- >>> a_iterable = s['a']
- >>> next(a_iterable)
- 'a1'
- >>> next(a_iterable)
- 'a2'
- >>> list(s['b'])
- ['b1', 'b2', 'b3']
-
- The original iterable will be advanced and its items will be cached until
- they are used by the child iterables. This may require significant storage.
-
- By default, attempting to select a bucket to which no items belong will
- exhaust the iterable and cache all values.
- If you specify a *validator* function, selected buckets will instead be
- checked against it.
-
- >>> from itertools import count
- >>> it = count(1, 2) # Infinite sequence of odd numbers
- >>> key = lambda x: x % 10 # Bucket by last digit
- >>> validator = lambda x: x in {1, 3, 5, 7, 9} # Odd digits only
- >>> s = bucket(it, key=key, validator=validator)
- >>> 2 in s
- False
- >>> list(s[2])
- []
-
- """
-
- def __init__(self, iterable, key, validator=None):
- self._it = iter(iterable)
- self._key = key
- self._cache = defaultdict(deque)
- self._validator = validator or (lambda x: True)
-
- def __contains__(self, value):
- if not self._validator(value):
- return False
-
- try:
- item = next(self[value])
- except StopIteration:
- return False
- else:
- self._cache[value].appendleft(item)
-
- return True
-
- def _get_values(self, value):
- """
- Helper to yield items from the parent iterator that match *value*.
- Items that don't match are stored in the local cache as they
- are encountered.
- """
- while True:
- # If we've cached some items that match the target value, emit
- # the first one and evict it from the cache.
- if self._cache[value]:
- yield self._cache[value].popleft()
- # Otherwise we need to advance the parent iterator to search for
- # a matching item, caching the rest.
- else:
- while True:
- try:
- item = next(self._it)
- except StopIteration:
- return
- item_value = self._key(item)
- if item_value == value:
- yield item
- break
- elif self._validator(item_value):
- self._cache[item_value].append(item)
-
- def __iter__(self):
- for item in self._it:
- item_value = self._key(item)
- if self._validator(item_value):
- self._cache[item_value].append(item)
-
- yield from self._cache.keys()
-
- def __getitem__(self, value):
- if not self._validator(value):
- return iter(())
-
- return self._get_values(value)
-
-
-def spy(iterable, n=1):
- """Return a 2-tuple with a list containing the first *n* elements of
- *iterable*, and an iterator with the same items as *iterable*.
- This allows you to "look ahead" at the items in the iterable without
- advancing it.
-
- There is one item in the list by default:
-
- >>> iterable = 'abcdefg'
- >>> head, iterable = spy(iterable)
- >>> head
- ['a']
- >>> list(iterable)
- ['a', 'b', 'c', 'd', 'e', 'f', 'g']
-
- You may use unpacking to retrieve items instead of lists:
-
- >>> (head,), iterable = spy('abcdefg')
- >>> head
- 'a'
- >>> (first, second), iterable = spy('abcdefg', 2)
- >>> first
- 'a'
- >>> second
- 'b'
-
- The number of items requested can be larger than the number of items in
- the iterable:
-
- >>> iterable = [1, 2, 3, 4, 5]
- >>> head, iterable = spy(iterable, 10)
- >>> head
- [1, 2, 3, 4, 5]
- >>> list(iterable)
- [1, 2, 3, 4, 5]
-
- """
- it = iter(iterable)
- head = take(n, it)
-
- return head.copy(), chain(head, it)
-
-
-def interleave(*iterables):
- """Return a new iterable yielding from each iterable in turn,
- until the shortest is exhausted.
-
- >>> list(interleave([1, 2, 3], [4, 5], [6, 7, 8]))
- [1, 4, 6, 2, 5, 7]
-
- For a version that doesn't terminate after the shortest iterable is
- exhausted, see :func:`interleave_longest`.
-
- """
- return chain.from_iterable(zip(*iterables))
-
-
-def interleave_longest(*iterables):
- """Return a new iterable yielding from each iterable in turn,
- skipping any that are exhausted.
-
- >>> list(interleave_longest([1, 2, 3], [4, 5], [6, 7, 8]))
- [1, 4, 6, 2, 5, 7, 3, 8]
-
- This function produces the same output as :func:`roundrobin`, but may
- perform better for some inputs (in particular when the number of iterables
- is large).
-
- """
- i = chain.from_iterable(zip_longest(*iterables, fillvalue=_marker))
- return (x for x in i if x is not _marker)
-
-
-def collapse(iterable, base_type=None, levels=None):
- """Flatten an iterable with multiple levels of nesting (e.g., a list of
- lists of tuples) into non-iterable types.
-
- >>> iterable = [(1, 2), ([3, 4], [[5], [6]])]
- >>> list(collapse(iterable))
- [1, 2, 3, 4, 5, 6]
-
- Binary and text strings are not considered iterable and
- will not be collapsed.
-
- To avoid collapsing other types, specify *base_type*:
-
- >>> iterable = ['ab', ('cd', 'ef'), ['gh', 'ij']]
- >>> list(collapse(iterable, base_type=tuple))
- ['ab', ('cd', 'ef'), 'gh', 'ij']
-
- Specify *levels* to stop flattening after a certain level:
-
- >>> iterable = [('a', ['b']), ('c', ['d'])]
- >>> list(collapse(iterable)) # Fully flattened
- ['a', 'b', 'c', 'd']
- >>> list(collapse(iterable, levels=1)) # Only one level flattened
- ['a', ['b'], 'c', ['d']]
-
- """
-
- def walk(node, level):
- if (
- ((levels is not None) and (level > levels))
- or isinstance(node, (str, bytes))
- or ((base_type is not None) and isinstance(node, base_type))
- ):
- yield node
- return
-
- try:
- tree = iter(node)
- except TypeError:
- yield node
- return
- else:
- for child in tree:
- yield from walk(child, level + 1)
-
- yield from walk(iterable, 0)
-
-
-def side_effect(func, iterable, chunk_size=None, before=None, after=None):
- """Invoke *func* on each item in *iterable* (or on each *chunk_size* group
- of items) before yielding the item.
-
- `func` must be a function that takes a single argument. Its return value
- will be discarded.
-
- *before* and *after* are optional functions that take no arguments. They
- will be executed before iteration starts and after it ends, respectively.
-
- `side_effect` can be used for logging, updating progress bars, or anything
- that is not functionally "pure."
-
- Emitting a status message:
-
- >>> from more_itertools import consume
- >>> func = lambda item: print('Received {}'.format(item))
- >>> consume(side_effect(func, range(2)))
- Received 0
- Received 1
-
- Operating on chunks of items:
-
- >>> pair_sums = []
- >>> func = lambda chunk: pair_sums.append(sum(chunk))
- >>> list(side_effect(func, [0, 1, 2, 3, 4, 5], 2))
- [0, 1, 2, 3, 4, 5]
- >>> list(pair_sums)
- [1, 5, 9]
-
- Writing to a file-like object:
-
- >>> from io import StringIO
- >>> from more_itertools import consume
- >>> f = StringIO()
- >>> func = lambda x: print(x, file=f)
- >>> before = lambda: print(u'HEADER', file=f)
- >>> after = f.close
- >>> it = [u'a', u'b', u'c']
- >>> consume(side_effect(func, it, before=before, after=after))
- >>> f.closed
- True
-
- """
- try:
- if before is not None:
- before()
-
- if chunk_size is None:
- for item in iterable:
- func(item)
- yield item
- else:
- for chunk in chunked(iterable, chunk_size):
- func(chunk)
- yield from chunk
- finally:
- if after is not None:
- after()
-
-
-def sliced(seq, n, strict=False):
- """Yield slices of length *n* from the sequence *seq*.
-
- >>> list(sliced((1, 2, 3, 4, 5, 6), 3))
- [(1, 2, 3), (4, 5, 6)]
-
- By the default, the last yielded slice will have fewer than *n* elements
- if the length of *seq* is not divisible by *n*:
-
- >>> list(sliced((1, 2, 3, 4, 5, 6, 7, 8), 3))
- [(1, 2, 3), (4, 5, 6), (7, 8)]
-
- If the length of *seq* is not divisible by *n* and *strict* is
- ``True``, then ``ValueError`` will be raised before the last
- slice is yielded.
-
- This function will only work for iterables that support slicing.
- For non-sliceable iterables, see :func:`chunked`.
-
- """
- iterator = takewhile(len, (seq[i : i + n] for i in count(0, n)))
- if strict:
-
- def ret():
- for _slice in iterator:
- if len(_slice) != n:
- raise ValueError("seq is not divisible by n.")
- yield _slice
-
- return iter(ret())
- else:
- return iterator
-
-
-def split_at(iterable, pred, maxsplit=-1, keep_separator=False):
- """Yield lists of items from *iterable*, where each list is delimited by
- an item where callable *pred* returns ``True``.
-
- >>> list(split_at('abcdcba', lambda x: x == 'b'))
- [['a'], ['c', 'd', 'c'], ['a']]
-
- >>> list(split_at(range(10), lambda n: n % 2 == 1))
- [[0], [2], [4], [6], [8], []]
-
- At most *maxsplit* splits are done. If *maxsplit* is not specified or -1,
- then there is no limit on the number of splits:
-
- >>> list(split_at(range(10), lambda n: n % 2 == 1, maxsplit=2))
- [[0], [2], [4, 5, 6, 7, 8, 9]]
-
- By default, the delimiting items are not included in the output.
- The include them, set *keep_separator* to ``True``.
-
- >>> list(split_at('abcdcba', lambda x: x == 'b', keep_separator=True))
- [['a'], ['b'], ['c', 'd', 'c'], ['b'], ['a']]
-
- """
- if maxsplit == 0:
- yield list(iterable)
- return
-
- buf = []
- it = iter(iterable)
- for item in it:
- if pred(item):
- yield buf
- if keep_separator:
- yield [item]
- if maxsplit == 1:
- yield list(it)
- return
- buf = []
- maxsplit -= 1
- else:
- buf.append(item)
- yield buf
-
-
-def split_before(iterable, pred, maxsplit=-1):
- """Yield lists of items from *iterable*, where each list ends just before
- an item for which callable *pred* returns ``True``:
-
- >>> list(split_before('OneTwo', lambda s: s.isupper()))
- [['O', 'n', 'e'], ['T', 'w', 'o']]
-
- >>> list(split_before(range(10), lambda n: n % 3 == 0))
- [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]
-
- At most *maxsplit* splits are done. If *maxsplit* is not specified or -1,
- then there is no limit on the number of splits:
-
- >>> list(split_before(range(10), lambda n: n % 3 == 0, maxsplit=2))
- [[0, 1, 2], [3, 4, 5], [6, 7, 8, 9]]
- """
- if maxsplit == 0:
- yield list(iterable)
- return
-
- buf = []
- it = iter(iterable)
- for item in it:
- if pred(item) and buf:
- yield buf
- if maxsplit == 1:
- yield [item] + list(it)
- return
- buf = []
- maxsplit -= 1
- buf.append(item)
- if buf:
- yield buf
-
-
-def split_after(iterable, pred, maxsplit=-1):
- """Yield lists of items from *iterable*, where each list ends with an
- item where callable *pred* returns ``True``:
-
- >>> list(split_after('one1two2', lambda s: s.isdigit()))
- [['o', 'n', 'e', '1'], ['t', 'w', 'o', '2']]
-
- >>> list(split_after(range(10), lambda n: n % 3 == 0))
- [[0], [1, 2, 3], [4, 5, 6], [7, 8, 9]]
-
- At most *maxsplit* splits are done. If *maxsplit* is not specified or -1,
- then there is no limit on the number of splits:
-
- >>> list(split_after(range(10), lambda n: n % 3 == 0, maxsplit=2))
- [[0], [1, 2, 3], [4, 5, 6, 7, 8, 9]]
-
- """
- if maxsplit == 0:
- yield list(iterable)
- return
-
- buf = []
- it = iter(iterable)
- for item in it:
- buf.append(item)
- if pred(item) and buf:
- yield buf
- if maxsplit == 1:
- yield list(it)
- return
- buf = []
- maxsplit -= 1
- if buf:
- yield buf
-
-
-def split_when(iterable, pred, maxsplit=-1):
- """Split *iterable* into pieces based on the output of *pred*.
- *pred* should be a function that takes successive pairs of items and
- returns ``True`` if the iterable should be split in between them.
-
- For example, to find runs of increasing numbers, split the iterable when
- element ``i`` is larger than element ``i + 1``:
-
- >>> list(split_when([1, 2, 3, 3, 2, 5, 2, 4, 2], lambda x, y: x > y))
- [[1, 2, 3, 3], [2, 5], [2, 4], [2]]
-
- At most *maxsplit* splits are done. If *maxsplit* is not specified or -1,
- then there is no limit on the number of splits:
-
- >>> list(split_when([1, 2, 3, 3, 2, 5, 2, 4, 2],
- ... lambda x, y: x > y, maxsplit=2))
- [[1, 2, 3, 3], [2, 5], [2, 4, 2]]
-
- """
- if maxsplit == 0:
- yield list(iterable)
- return
-
- it = iter(iterable)
- try:
- cur_item = next(it)
- except StopIteration:
- return
-
- buf = [cur_item]
- for next_item in it:
- if pred(cur_item, next_item):
- yield buf
- if maxsplit == 1:
- yield [next_item] + list(it)
- return
- buf = []
- maxsplit -= 1
-
- buf.append(next_item)
- cur_item = next_item
-
- yield buf
-
-
-def split_into(iterable, sizes):
- """Yield a list of sequential items from *iterable* of length 'n' for each
- integer 'n' in *sizes*.
-
- >>> list(split_into([1,2,3,4,5,6], [1,2,3]))
- [[1], [2, 3], [4, 5, 6]]
-
- If the sum of *sizes* is smaller than the length of *iterable*, then the
- remaining items of *iterable* will not be returned.
-
- >>> list(split_into([1,2,3,4,5,6], [2,3]))
- [[1, 2], [3, 4, 5]]
-
- If the sum of *sizes* is larger than the length of *iterable*, fewer items
- will be returned in the iteration that overruns *iterable* and further
- lists will be empty:
-
- >>> list(split_into([1,2,3,4], [1,2,3,4]))
- [[1], [2, 3], [4], []]
-
- When a ``None`` object is encountered in *sizes*, the returned list will
- contain items up to the end of *iterable* the same way that itertools.slice
- does:
-
- >>> list(split_into([1,2,3,4,5,6,7,8,9,0], [2,3,None]))
- [[1, 2], [3, 4, 5], [6, 7, 8, 9, 0]]
-
- :func:`split_into` can be useful for grouping a series of items where the
- sizes of the groups are not uniform. An example would be where in a row
- from a table, multiple columns represent elements of the same feature
- (e.g. a point represented by x,y,z) but, the format is not the same for
- all columns.
- """
- # convert the iterable argument into an iterator so its contents can
- # be consumed by islice in case it is a generator
- it = iter(iterable)
-
- for size in sizes:
- if size is None:
- yield list(it)
- return
- else:
- yield list(islice(it, size))
-
-
-def padded(iterable, fillvalue=None, n=None, next_multiple=False):
- """Yield the elements from *iterable*, followed by *fillvalue*, such that
- at least *n* items are emitted.
-
- >>> list(padded([1, 2, 3], '?', 5))
- [1, 2, 3, '?', '?']
-
- If *next_multiple* is ``True``, *fillvalue* will be emitted until the
- number of items emitted is a multiple of *n*::
-
- >>> list(padded([1, 2, 3, 4], n=3, next_multiple=True))
- [1, 2, 3, 4, None, None]
-
- If *n* is ``None``, *fillvalue* will be emitted indefinitely.
-
- """
- it = iter(iterable)
- if n is None:
- yield from chain(it, repeat(fillvalue))
- elif n < 1:
- raise ValueError('n must be at least 1')
- else:
- item_count = 0
- for item in it:
- yield item
- item_count += 1
-
- remaining = (n - item_count) % n if next_multiple else n - item_count
- for _ in range(remaining):
- yield fillvalue
-
-
-def repeat_last(iterable, default=None):
- """After the *iterable* is exhausted, keep yielding its last element.
-
- >>> list(islice(repeat_last(range(3)), 5))
- [0, 1, 2, 2, 2]
-
- If the iterable is empty, yield *default* forever::
-
- >>> list(islice(repeat_last(range(0), 42), 5))
- [42, 42, 42, 42, 42]
-
- """
- item = _marker
- for item in iterable:
- yield item
- final = default if item is _marker else item
- yield from repeat(final)
-
-
-def distribute(n, iterable):
- """Distribute the items from *iterable* among *n* smaller iterables.
-
- >>> group_1, group_2 = distribute(2, [1, 2, 3, 4, 5, 6])
- >>> list(group_1)
- [1, 3, 5]
- >>> list(group_2)
- [2, 4, 6]
-
- If the length of *iterable* is not evenly divisible by *n*, then the
- length of the returned iterables will not be identical:
-
- >>> children = distribute(3, [1, 2, 3, 4, 5, 6, 7])
- >>> [list(c) for c in children]
- [[1, 4, 7], [2, 5], [3, 6]]
-
- If the length of *iterable* is smaller than *n*, then the last returned
- iterables will be empty:
-
- >>> children = distribute(5, [1, 2, 3])
- >>> [list(c) for c in children]
- [[1], [2], [3], [], []]
-
- This function uses :func:`itertools.tee` and may require significant
- storage. If you need the order items in the smaller iterables to match the
- original iterable, see :func:`divide`.
-
- """
- if n < 1:
- raise ValueError('n must be at least 1')
-
- children = tee(iterable, n)
- return [islice(it, index, None, n) for index, it in enumerate(children)]
-
-
-def stagger(iterable, offsets=(-1, 0, 1), longest=False, fillvalue=None):
- """Yield tuples whose elements are offset from *iterable*.
- The amount by which the `i`-th item in each tuple is offset is given by
- the `i`-th item in *offsets*.
-
- >>> list(stagger([0, 1, 2, 3]))
- [(None, 0, 1), (0, 1, 2), (1, 2, 3)]
- >>> list(stagger(range(8), offsets=(0, 2, 4)))
- [(0, 2, 4), (1, 3, 5), (2, 4, 6), (3, 5, 7)]
-
- By default, the sequence will end when the final element of a tuple is the
- last item in the iterable. To continue until the first element of a tuple
- is the last item in the iterable, set *longest* to ``True``::
-
- >>> list(stagger([0, 1, 2, 3], longest=True))
- [(None, 0, 1), (0, 1, 2), (1, 2, 3), (2, 3, None), (3, None, None)]
-
- By default, ``None`` will be used to replace offsets beyond the end of the
- sequence. Specify *fillvalue* to use some other value.
-
- """
- children = tee(iterable, len(offsets))
-
- return zip_offset(
- *children, offsets=offsets, longest=longest, fillvalue=fillvalue
- )
-
-
-class UnequalIterablesError(ValueError):
- def __init__(self, details=None):
- msg = 'Iterables have different lengths'
- if details is not None:
- msg += (': index 0 has length {}; index {} has length {}').format(
- *details
- )
-
- super().__init__(msg)
-
-
-def _zip_equal_generator(iterables):
- for combo in zip_longest(*iterables, fillvalue=_marker):
- for val in combo:
- if val is _marker:
- raise UnequalIterablesError()
- yield combo
-
-
-def zip_equal(*iterables):
- """``zip`` the input *iterables* together, but raise
- ``UnequalIterablesError`` if they aren't all the same length.
-
- >>> it_1 = range(3)
- >>> it_2 = iter('abc')
- >>> list(zip_equal(it_1, it_2))
- [(0, 'a'), (1, 'b'), (2, 'c')]
-
- >>> it_1 = range(3)
- >>> it_2 = iter('abcd')
- >>> list(zip_equal(it_1, it_2)) # doctest: +IGNORE_EXCEPTION_DETAIL
- Traceback (most recent call last):
- ...
- more_itertools.more.UnequalIterablesError: Iterables have different
- lengths
-
- """
- if hexversion >= 0x30A00A6:
- warnings.warn(
- (
- 'zip_equal will be removed in a future version of '
- 'more-itertools. Use the builtin zip function with '
- 'strict=True instead.'
- ),
- DeprecationWarning,
- )
- # Check whether the iterables are all the same size.
- try:
- first_size = len(iterables[0])
- for i, it in enumerate(iterables[1:], 1):
- size = len(it)
- if size != first_size:
- break
- else:
- # If we didn't break out, we can use the built-in zip.
- return zip(*iterables)
-
- # If we did break out, there was a mismatch.
- raise UnequalIterablesError(details=(first_size, i, size))
- # If any one of the iterables didn't have a length, start reading
- # them until one runs out.
- except TypeError:
- return _zip_equal_generator(iterables)
-
-
-def zip_offset(*iterables, offsets, longest=False, fillvalue=None):
- """``zip`` the input *iterables* together, but offset the `i`-th iterable
- by the `i`-th item in *offsets*.
-
- >>> list(zip_offset('0123', 'abcdef', offsets=(0, 1)))
- [('0', 'b'), ('1', 'c'), ('2', 'd'), ('3', 'e')]
-
- This can be used as a lightweight alternative to SciPy or pandas to analyze
- data sets in which some series have a lead or lag relationship.
-
- By default, the sequence will end when the shortest iterable is exhausted.
- To continue until the longest iterable is exhausted, set *longest* to
- ``True``.
-
- >>> list(zip_offset('0123', 'abcdef', offsets=(0, 1), longest=True))
- [('0', 'b'), ('1', 'c'), ('2', 'd'), ('3', 'e'), (None, 'f')]
-
- By default, ``None`` will be used to replace offsets beyond the end of the
- sequence. Specify *fillvalue* to use some other value.
-
- """
- if len(iterables) != len(offsets):
- raise ValueError("Number of iterables and offsets didn't match")
-
- staggered = []
- for it, n in zip(iterables, offsets):
- if n < 0:
- staggered.append(chain(repeat(fillvalue, -n), it))
- elif n > 0:
- staggered.append(islice(it, n, None))
- else:
- staggered.append(it)
-
- if longest:
- return zip_longest(*staggered, fillvalue=fillvalue)
-
- return zip(*staggered)
-
-
-def sort_together(iterables, key_list=(0,), key=None, reverse=False):
- """Return the input iterables sorted together, with *key_list* as the
- priority for sorting. All iterables are trimmed to the length of the
- shortest one.
-
- This can be used like the sorting function in a spreadsheet. If each
- iterable represents a column of data, the key list determines which
- columns are used for sorting.
-
- By default, all iterables are sorted using the ``0``-th iterable::
-
- >>> iterables = [(4, 3, 2, 1), ('a', 'b', 'c', 'd')]
- >>> sort_together(iterables)
- [(1, 2, 3, 4), ('d', 'c', 'b', 'a')]
-
- Set a different key list to sort according to another iterable.
- Specifying multiple keys dictates how ties are broken::
-
- >>> iterables = [(3, 1, 2), (0, 1, 0), ('c', 'b', 'a')]
- >>> sort_together(iterables, key_list=(1, 2))
- [(2, 3, 1), (0, 0, 1), ('a', 'c', 'b')]
-
- To sort by a function of the elements of the iterable, pass a *key*
- function. Its arguments are the elements of the iterables corresponding to
- the key list::
-
- >>> names = ('a', 'b', 'c')
- >>> lengths = (1, 2, 3)
- >>> widths = (5, 2, 1)
- >>> def area(length, width):
- ... return length * width
- >>> sort_together([names, lengths, widths], key_list=(1, 2), key=area)
- [('c', 'b', 'a'), (3, 2, 1), (1, 2, 5)]
-
- Set *reverse* to ``True`` to sort in descending order.
-
- >>> sort_together([(1, 2, 3), ('c', 'b', 'a')], reverse=True)
- [(3, 2, 1), ('a', 'b', 'c')]
-
- """
- if key is None:
- # if there is no key function, the key argument to sorted is an
- # itemgetter
- key_argument = itemgetter(*key_list)
- else:
- # if there is a key function, call it with the items at the offsets
- # specified by the key function as arguments
- key_list = list(key_list)
- if len(key_list) == 1:
- # if key_list contains a single item, pass the item at that offset
- # as the only argument to the key function
- key_offset = key_list[0]
- key_argument = lambda zipped_items: key(zipped_items[key_offset])
- else:
- # if key_list contains multiple items, use itemgetter to return a
- # tuple of items, which we pass as *args to the key function
- get_key_items = itemgetter(*key_list)
- key_argument = lambda zipped_items: key(
- *get_key_items(zipped_items)
- )
-
- return list(
- zip(*sorted(zip(*iterables), key=key_argument, reverse=reverse))
- )
-
-
-def unzip(iterable):
- """The inverse of :func:`zip`, this function disaggregates the elements
- of the zipped *iterable*.
-
- The ``i``-th iterable contains the ``i``-th element from each element
- of the zipped iterable. The first element is used to to determine the
- length of the remaining elements.
-
- >>> iterable = [('a', 1), ('b', 2), ('c', 3), ('d', 4)]
- >>> letters, numbers = unzip(iterable)
- >>> list(letters)
- ['a', 'b', 'c', 'd']
- >>> list(numbers)
- [1, 2, 3, 4]
-
- This is similar to using ``zip(*iterable)``, but it avoids reading
- *iterable* into memory. Note, however, that this function uses
- :func:`itertools.tee` and thus may require significant storage.
-
- """
- head, iterable = spy(iter(iterable))
- if not head:
- # empty iterable, e.g. zip([], [], [])
- return ()
- # spy returns a one-length iterable as head
- head = head[0]
- iterables = tee(iterable, len(head))
-
- def itemgetter(i):
- def getter(obj):
- try:
- return obj[i]
- except IndexError:
- # basically if we have an iterable like
- # iter([(1, 2, 3), (4, 5), (6,)])
- # the second unzipped iterable would fail at the third tuple
- # since it would try to access tup[1]
- # same with the third unzipped iterable and the second tuple
- # to support these "improperly zipped" iterables,
- # we create a custom itemgetter
- # which just stops the unzipped iterables
- # at first length mismatch
- raise StopIteration
-
- return getter
-
- return tuple(map(itemgetter(i), it) for i, it in enumerate(iterables))
-
-
-def divide(n, iterable):
- """Divide the elements from *iterable* into *n* parts, maintaining
- order.
-
- >>> group_1, group_2 = divide(2, [1, 2, 3, 4, 5, 6])
- >>> list(group_1)
- [1, 2, 3]
- >>> list(group_2)
- [4, 5, 6]
-
- If the length of *iterable* is not evenly divisible by *n*, then the
- length of the returned iterables will not be identical:
-
- >>> children = divide(3, [1, 2, 3, 4, 5, 6, 7])
- >>> [list(c) for c in children]
- [[1, 2, 3], [4, 5], [6, 7]]
-
- If the length of the iterable is smaller than n, then the last returned
- iterables will be empty:
-
- >>> children = divide(5, [1, 2, 3])
- >>> [list(c) for c in children]
- [[1], [2], [3], [], []]
-
- This function will exhaust the iterable before returning and may require
- significant storage. If order is not important, see :func:`distribute`,
- which does not first pull the iterable into memory.
-
- """
- if n < 1:
- raise ValueError('n must be at least 1')
-
- try:
- iterable[:0]
- except TypeError:
- seq = tuple(iterable)
- else:
- seq = iterable
-
- q, r = divmod(len(seq), n)
-
- ret = []
- stop = 0
- for i in range(1, n + 1):
- start = stop
- stop += q + 1 if i <= r else q
- ret.append(iter(seq[start:stop]))
-
- return ret
-
-
-def always_iterable(obj, base_type=(str, bytes)):
- """If *obj* is iterable, return an iterator over its items::
-
- >>> obj = (1, 2, 3)
- >>> list(always_iterable(obj))
- [1, 2, 3]
-
- If *obj* is not iterable, return a one-item iterable containing *obj*::
-
- >>> obj = 1
- >>> list(always_iterable(obj))
- [1]
-
- If *obj* is ``None``, return an empty iterable:
-
- >>> obj = None
- >>> list(always_iterable(None))
- []
-
- By default, binary and text strings are not considered iterable::
-
- >>> obj = 'foo'
- >>> list(always_iterable(obj))
- ['foo']
-
- If *base_type* is set, objects for which ``isinstance(obj, base_type)``
- returns ``True`` won't be considered iterable.
-
- >>> obj = {'a': 1}
- >>> list(always_iterable(obj)) # Iterate over the dict's keys
- ['a']
- >>> list(always_iterable(obj, base_type=dict)) # Treat dicts as a unit
- [{'a': 1}]
-
- Set *base_type* to ``None`` to avoid any special handling and treat objects
- Python considers iterable as iterable:
-
- >>> obj = 'foo'
- >>> list(always_iterable(obj, base_type=None))
- ['f', 'o', 'o']
- """
- if obj is None:
- return iter(())
-
- if (base_type is not None) and isinstance(obj, base_type):
- return iter((obj,))
-
- try:
- return iter(obj)
- except TypeError:
- return iter((obj,))
-
-
-def adjacent(predicate, iterable, distance=1):
- """Return an iterable over `(bool, item)` tuples where the `item` is
- drawn from *iterable* and the `bool` indicates whether
- that item satisfies the *predicate* or is adjacent to an item that does.
-
- For example, to find whether items are adjacent to a ``3``::
-
- >>> list(adjacent(lambda x: x == 3, range(6)))
- [(False, 0), (False, 1), (True, 2), (True, 3), (True, 4), (False, 5)]
-
- Set *distance* to change what counts as adjacent. For example, to find
- whether items are two places away from a ``3``:
-
- >>> list(adjacent(lambda x: x == 3, range(6), distance=2))
- [(False, 0), (True, 1), (True, 2), (True, 3), (True, 4), (True, 5)]
-
- This is useful for contextualizing the results of a search function.
- For example, a code comparison tool might want to identify lines that
- have changed, but also surrounding lines to give the viewer of the diff
- context.
-
- The predicate function will only be called once for each item in the
- iterable.
-
- See also :func:`groupby_transform`, which can be used with this function
- to group ranges of items with the same `bool` value.
-
- """
- # Allow distance=0 mainly for testing that it reproduces results with map()
- if distance < 0:
- raise ValueError('distance must be at least 0')
-
- i1, i2 = tee(iterable)
- padding = [False] * distance
- selected = chain(padding, map(predicate, i1), padding)
- adjacent_to_selected = map(any, windowed(selected, 2 * distance + 1))
- return zip(adjacent_to_selected, i2)
-
-
-def groupby_transform(iterable, keyfunc=None, valuefunc=None, reducefunc=None):
- """An extension of :func:`itertools.groupby` that can apply transformations
- to the grouped data.
-
- * *keyfunc* is a function computing a key value for each item in *iterable*
- * *valuefunc* is a function that transforms the individual items from
- *iterable* after grouping
- * *reducefunc* is a function that transforms each group of items
-
- >>> iterable = 'aAAbBBcCC'
- >>> keyfunc = lambda k: k.upper()
- >>> valuefunc = lambda v: v.lower()
- >>> reducefunc = lambda g: ''.join(g)
- >>> list(groupby_transform(iterable, keyfunc, valuefunc, reducefunc))
- [('A', 'aaa'), ('B', 'bbb'), ('C', 'ccc')]
-
- Each optional argument defaults to an identity function if not specified.
-
- :func:`groupby_transform` is useful when grouping elements of an iterable
- using a separate iterable as the key. To do this, :func:`zip` the iterables
- and pass a *keyfunc* that extracts the first element and a *valuefunc*
- that extracts the second element::
-
- >>> from operator import itemgetter
- >>> keys = [0, 0, 1, 1, 1, 2, 2, 2, 3]
- >>> values = 'abcdefghi'
- >>> iterable = zip(keys, values)
- >>> grouper = groupby_transform(iterable, itemgetter(0), itemgetter(1))
- >>> [(k, ''.join(g)) for k, g in grouper]
- [(0, 'ab'), (1, 'cde'), (2, 'fgh'), (3, 'i')]
-
- Note that the order of items in the iterable is significant.
- Only adjacent items are grouped together, so if you don't want any
- duplicate groups, you should sort the iterable by the key function.
-
- """
- ret = groupby(iterable, keyfunc)
- if valuefunc:
- ret = ((k, map(valuefunc, g)) for k, g in ret)
- if reducefunc:
- ret = ((k, reducefunc(g)) for k, g in ret)
-
- return ret
-
-
-class numeric_range(abc.Sequence, abc.Hashable):
- """An extension of the built-in ``range()`` function whose arguments can
- be any orderable numeric type.
-
- With only *stop* specified, *start* defaults to ``0`` and *step*
- defaults to ``1``. The output items will match the type of *stop*:
-
- >>> list(numeric_range(3.5))
- [0.0, 1.0, 2.0, 3.0]
-
- With only *start* and *stop* specified, *step* defaults to ``1``. The
- output items will match the type of *start*:
-
- >>> from decimal import Decimal
- >>> start = Decimal('2.1')
- >>> stop = Decimal('5.1')
- >>> list(numeric_range(start, stop))
- [Decimal('2.1'), Decimal('3.1'), Decimal('4.1')]
-
- With *start*, *stop*, and *step* specified the output items will match
- the type of ``start + step``:
-
- >>> from fractions import Fraction
- >>> start = Fraction(1, 2) # Start at 1/2
- >>> stop = Fraction(5, 2) # End at 5/2
- >>> step = Fraction(1, 2) # Count by 1/2
- >>> list(numeric_range(start, stop, step))
- [Fraction(1, 2), Fraction(1, 1), Fraction(3, 2), Fraction(2, 1)]
-
- If *step* is zero, ``ValueError`` is raised. Negative steps are supported:
-
- >>> list(numeric_range(3, -1, -1.0))
- [3.0, 2.0, 1.0, 0.0]
-
- Be aware of the limitations of floating point numbers; the representation
- of the yielded numbers may be surprising.
-
- ``datetime.datetime`` objects can be used for *start* and *stop*, if *step*
- is a ``datetime.timedelta`` object:
-
- >>> import datetime
- >>> start = datetime.datetime(2019, 1, 1)
- >>> stop = datetime.datetime(2019, 1, 3)
- >>> step = datetime.timedelta(days=1)
- >>> items = iter(numeric_range(start, stop, step))
- >>> next(items)
- datetime.datetime(2019, 1, 1, 0, 0)
- >>> next(items)
- datetime.datetime(2019, 1, 2, 0, 0)
-
- """
-
- _EMPTY_HASH = hash(range(0, 0))
-
- def __init__(self, *args):
- argc = len(args)
- if argc == 1:
- (self._stop,) = args
- self._start = type(self._stop)(0)
- self._step = type(self._stop - self._start)(1)
- elif argc == 2:
- self._start, self._stop = args
- self._step = type(self._stop - self._start)(1)
- elif argc == 3:
- self._start, self._stop, self._step = args
- elif argc == 0:
- raise TypeError(
- 'numeric_range expected at least '
- '1 argument, got {}'.format(argc)
- )
- else:
- raise TypeError(
- 'numeric_range expected at most '
- '3 arguments, got {}'.format(argc)
- )
-
- self._zero = type(self._step)(0)
- if self._step == self._zero:
- raise ValueError('numeric_range() arg 3 must not be zero')
- self._growing = self._step > self._zero
- self._init_len()
-
- def __bool__(self):
- if self._growing:
- return self._start < self._stop
- else:
- return self._start > self._stop
-
- def __contains__(self, elem):
- if self._growing:
- if self._start <= elem < self._stop:
- return (elem - self._start) % self._step == self._zero
- else:
- if self._start >= elem > self._stop:
- return (self._start - elem) % (-self._step) == self._zero
-
- return False
-
- def __eq__(self, other):
- if isinstance(other, numeric_range):
- empty_self = not bool(self)
- empty_other = not bool(other)
- if empty_self or empty_other:
- return empty_self and empty_other # True if both empty
- else:
- return (
- self._start == other._start
- and self._step == other._step
- and self._get_by_index(-1) == other._get_by_index(-1)
- )
- else:
- return False
-
- def __getitem__(self, key):
- if isinstance(key, int):
- return self._get_by_index(key)
- elif isinstance(key, slice):
- step = self._step if key.step is None else key.step * self._step
-
- if key.start is None or key.start <= -self._len:
- start = self._start
- elif key.start >= self._len:
- start = self._stop
- else: # -self._len < key.start < self._len
- start = self._get_by_index(key.start)
-
- if key.stop is None or key.stop >= self._len:
- stop = self._stop
- elif key.stop <= -self._len:
- stop = self._start
- else: # -self._len < key.stop < self._len
- stop = self._get_by_index(key.stop)
-
- return numeric_range(start, stop, step)
- else:
- raise TypeError(
- 'numeric range indices must be '
- 'integers or slices, not {}'.format(type(key).__name__)
- )
-
- def __hash__(self):
- if self:
- return hash((self._start, self._get_by_index(-1), self._step))
- else:
- return self._EMPTY_HASH
-
- def __iter__(self):
- values = (self._start + (n * self._step) for n in count())
- if self._growing:
- return takewhile(partial(gt, self._stop), values)
- else:
- return takewhile(partial(lt, self._stop), values)
-
- def __len__(self):
- return self._len
-
- def _init_len(self):
- if self._growing:
- start = self._start
- stop = self._stop
- step = self._step
- else:
- start = self._stop
- stop = self._start
- step = -self._step
- distance = stop - start
- if distance <= self._zero:
- self._len = 0
- else: # distance > 0 and step > 0: regular euclidean division
- q, r = divmod(distance, step)
- self._len = int(q) + int(r != self._zero)
-
- def __reduce__(self):
- return numeric_range, (self._start, self._stop, self._step)
-
- def __repr__(self):
- if self._step == 1:
- return "numeric_range({}, {})".format(
- repr(self._start), repr(self._stop)
- )
- else:
- return "numeric_range({}, {}, {})".format(
- repr(self._start), repr(self._stop), repr(self._step)
- )
-
- def __reversed__(self):
- return iter(
- numeric_range(
- self._get_by_index(-1), self._start - self._step, -self._step
- )
- )
-
- def count(self, value):
- return int(value in self)
-
- def index(self, value):
- if self._growing:
- if self._start <= value < self._stop:
- q, r = divmod(value - self._start, self._step)
- if r == self._zero:
- return int(q)
- else:
- if self._start >= value > self._stop:
- q, r = divmod(self._start - value, -self._step)
- if r == self._zero:
- return int(q)
-
- raise ValueError("{} is not in numeric range".format(value))
-
- def _get_by_index(self, i):
- if i < 0:
- i += self._len
- if i < 0 or i >= self._len:
- raise IndexError("numeric range object index out of range")
- return self._start + i * self._step
-
-
-def count_cycle(iterable, n=None):
- """Cycle through the items from *iterable* up to *n* times, yielding
- the number of completed cycles along with each item. If *n* is omitted the
- process repeats indefinitely.
-
- >>> list(count_cycle('AB', 3))
- [(0, 'A'), (0, 'B'), (1, 'A'), (1, 'B'), (2, 'A'), (2, 'B')]
-
- """
- iterable = tuple(iterable)
- if not iterable:
- return iter(())
- counter = count() if n is None else range(n)
- return ((i, item) for i in counter for item in iterable)
-
-
-def mark_ends(iterable):
- """Yield 3-tuples of the form ``(is_first, is_last, item)``.
-
- >>> list(mark_ends('ABC'))
- [(True, False, 'A'), (False, False, 'B'), (False, True, 'C')]
-
- Use this when looping over an iterable to take special action on its first
- and/or last items:
-
- >>> iterable = ['Header', 100, 200, 'Footer']
- >>> total = 0
- >>> for is_first, is_last, item in mark_ends(iterable):
- ... if is_first:
- ... continue # Skip the header
- ... if is_last:
- ... continue # Skip the footer
- ... total += item
- >>> print(total)
- 300
- """
- it = iter(iterable)
-
- try:
- b = next(it)
- except StopIteration:
- return
-
- try:
- for i in count():
- a = b
- b = next(it)
- yield i == 0, False, a
-
- except StopIteration:
- yield i == 0, True, a
-
-
-def locate(iterable, pred=bool, window_size=None):
- """Yield the index of each item in *iterable* for which *pred* returns
- ``True``.
-
- *pred* defaults to :func:`bool`, which will select truthy items:
-
- >>> list(locate([0, 1, 1, 0, 1, 0, 0]))
- [1, 2, 4]
-
- Set *pred* to a custom function to, e.g., find the indexes for a particular
- item.
-
- >>> list(locate(['a', 'b', 'c', 'b'], lambda x: x == 'b'))
- [1, 3]
-
- If *window_size* is given, then the *pred* function will be called with
- that many items. This enables searching for sub-sequences:
-
- >>> iterable = [0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3]
- >>> pred = lambda *args: args == (1, 2, 3)
- >>> list(locate(iterable, pred=pred, window_size=3))
- [1, 5, 9]
-
- Use with :func:`seekable` to find indexes and then retrieve the associated
- items:
-
- >>> from itertools import count
- >>> from more_itertools import seekable
- >>> source = (3 * n + 1 if (n % 2) else n // 2 for n in count())
- >>> it = seekable(source)
- >>> pred = lambda x: x > 100
- >>> indexes = locate(it, pred=pred)
- >>> i = next(indexes)
- >>> it.seek(i)
- >>> next(it)
- 106
-
- """
- if window_size is None:
- return compress(count(), map(pred, iterable))
-
- if window_size < 1:
- raise ValueError('window size must be at least 1')
-
- it = windowed(iterable, window_size, fillvalue=_marker)
- return compress(count(), starmap(pred, it))
-
-
-def lstrip(iterable, pred):
- """Yield the items from *iterable*, but strip any from the beginning
- for which *pred* returns ``True``.
-
- For example, to remove a set of items from the start of an iterable:
-
- >>> iterable = (None, False, None, 1, 2, None, 3, False, None)
- >>> pred = lambda x: x in {None, False, ''}
- >>> list(lstrip(iterable, pred))
- [1, 2, None, 3, False, None]
-
- This function is analogous to to :func:`str.lstrip`, and is essentially
- an wrapper for :func:`itertools.dropwhile`.
-
- """
- return dropwhile(pred, iterable)
-
-
-def rstrip(iterable, pred):
- """Yield the items from *iterable*, but strip any from the end
- for which *pred* returns ``True``.
-
- For example, to remove a set of items from the end of an iterable:
-
- >>> iterable = (None, False, None, 1, 2, None, 3, False, None)
- >>> pred = lambda x: x in {None, False, ''}
- >>> list(rstrip(iterable, pred))
- [None, False, None, 1, 2, None, 3]
-
- This function is analogous to :func:`str.rstrip`.
-
- """
- cache = []
- cache_append = cache.append
- cache_clear = cache.clear
- for x in iterable:
- if pred(x):
- cache_append(x)
- else:
- yield from cache
- cache_clear()
- yield x
-
-
-def strip(iterable, pred):
- """Yield the items from *iterable*, but strip any from the
- beginning and end for which *pred* returns ``True``.
-
- For example, to remove a set of items from both ends of an iterable:
-
- >>> iterable = (None, False, None, 1, 2, None, 3, False, None)
- >>> pred = lambda x: x in {None, False, ''}
- >>> list(strip(iterable, pred))
- [1, 2, None, 3]
-
- This function is analogous to :func:`str.strip`.
-
- """
- return rstrip(lstrip(iterable, pred), pred)
-
-
-class islice_extended:
- """An extension of :func:`itertools.islice` that supports negative values
- for *stop*, *start*, and *step*.
-
- >>> iterable = iter('abcdefgh')
- >>> list(islice_extended(iterable, -4, -1))
- ['e', 'f', 'g']
-
- Slices with negative values require some caching of *iterable*, but this
- function takes care to minimize the amount of memory required.
-
- For example, you can use a negative step with an infinite iterator:
-
- >>> from itertools import count
- >>> list(islice_extended(count(), 110, 99, -2))
- [110, 108, 106, 104, 102, 100]
-
- You can also use slice notation directly:
-
- >>> iterable = map(str, count())
- >>> it = islice_extended(iterable)[10:20:2]
- >>> list(it)
- ['10', '12', '14', '16', '18']
-
- """
-
- def __init__(self, iterable, *args):
- it = iter(iterable)
- if args:
- self._iterable = _islice_helper(it, slice(*args))
- else:
- self._iterable = it
-
- def __iter__(self):
- return self
-
- def __next__(self):
- return next(self._iterable)
-
- def __getitem__(self, key):
- if isinstance(key, slice):
- return islice_extended(_islice_helper(self._iterable, key))
-
- raise TypeError('islice_extended.__getitem__ argument must be a slice')
-
-
-def _islice_helper(it, s):
- start = s.start
- stop = s.stop
- if s.step == 0:
- raise ValueError('step argument must be a non-zero integer or None.')
- step = s.step or 1
-
- if step > 0:
- start = 0 if (start is None) else start
-
- if start < 0:
- # Consume all but the last -start items
- cache = deque(enumerate(it, 1), maxlen=-start)
- len_iter = cache[-1][0] if cache else 0
-
- # Adjust start to be positive
- i = max(len_iter + start, 0)
-
- # Adjust stop to be positive
- if stop is None:
- j = len_iter
- elif stop >= 0:
- j = min(stop, len_iter)
- else:
- j = max(len_iter + stop, 0)
-
- # Slice the cache
- n = j - i
- if n <= 0:
- return
-
- for index, item in islice(cache, 0, n, step):
- yield item
- elif (stop is not None) and (stop < 0):
- # Advance to the start position
- next(islice(it, start, start), None)
-
- # When stop is negative, we have to carry -stop items while
- # iterating
- cache = deque(islice(it, -stop), maxlen=-stop)
-
- for index, item in enumerate(it):
- cached_item = cache.popleft()
- if index % step == 0:
- yield cached_item
- cache.append(item)
- else:
- # When both start and stop are positive we have the normal case
- yield from islice(it, start, stop, step)
- else:
- start = -1 if (start is None) else start
-
- if (stop is not None) and (stop < 0):
- # Consume all but the last items
- n = -stop - 1
- cache = deque(enumerate(it, 1), maxlen=n)
- len_iter = cache[-1][0] if cache else 0
-
- # If start and stop are both negative they are comparable and
- # we can just slice. Otherwise we can adjust start to be negative
- # and then slice.
- if start < 0:
- i, j = start, stop
- else:
- i, j = min(start - len_iter, -1), None
-
- for index, item in list(cache)[i:j:step]:
- yield item
- else:
- # Advance to the stop position
- if stop is not None:
- m = stop + 1
- next(islice(it, m, m), None)
-
- # stop is positive, so if start is negative they are not comparable
- # and we need the rest of the items.
- if start < 0:
- i = start
- n = None
- # stop is None and start is positive, so we just need items up to
- # the start index.
- elif stop is None:
- i = None
- n = start + 1
- # Both stop and start are positive, so they are comparable.
- else:
- i = None
- n = start - stop
- if n <= 0:
- return
-
- cache = list(islice(it, n))
-
- yield from cache[i::step]
-
-
-def always_reversible(iterable):
- """An extension of :func:`reversed` that supports all iterables, not
- just those which implement the ``Reversible`` or ``Sequence`` protocols.
-
- >>> print(*always_reversible(x for x in range(3)))
- 2 1 0
-
- If the iterable is already reversible, this function returns the
- result of :func:`reversed()`. If the iterable is not reversible,
- this function will cache the remaining items in the iterable and
- yield them in reverse order, which may require significant storage.
- """
- try:
- return reversed(iterable)
- except TypeError:
- return reversed(list(iterable))
-
-
-def consecutive_groups(iterable, ordering=lambda x: x):
- """Yield groups of consecutive items using :func:`itertools.groupby`.
- The *ordering* function determines whether two items are adjacent by
- returning their position.
-
- By default, the ordering function is the identity function. This is
- suitable for finding runs of numbers:
-
- >>> iterable = [1, 10, 11, 12, 20, 30, 31, 32, 33, 40]
- >>> for group in consecutive_groups(iterable):
- ... print(list(group))
- [1]
- [10, 11, 12]
- [20]
- [30, 31, 32, 33]
- [40]
-
- For finding runs of adjacent letters, try using the :meth:`index` method
- of a string of letters:
-
- >>> from string import ascii_lowercase
- >>> iterable = 'abcdfgilmnop'
- >>> ordering = ascii_lowercase.index
- >>> for group in consecutive_groups(iterable, ordering):
- ... print(list(group))
- ['a', 'b', 'c', 'd']
- ['f', 'g']
- ['i']
- ['l', 'm', 'n', 'o', 'p']
-
- Each group of consecutive items is an iterator that shares it source with
- *iterable*. When an an output group is advanced, the previous group is
- no longer available unless its elements are copied (e.g., into a ``list``).
-
- >>> iterable = [1, 2, 11, 12, 21, 22]
- >>> saved_groups = []
- >>> for group in consecutive_groups(iterable):
- ... saved_groups.append(list(group)) # Copy group elements
- >>> saved_groups
- [[1, 2], [11, 12], [21, 22]]
-
- """
- for k, g in groupby(
- enumerate(iterable), key=lambda x: x[0] - ordering(x[1])
- ):
- yield map(itemgetter(1), g)
-
-
-def difference(iterable, func=sub, *, initial=None):
- """This function is the inverse of :func:`itertools.accumulate`. By default
- it will compute the first difference of *iterable* using
- :func:`operator.sub`:
-
- >>> from itertools import accumulate
- >>> iterable = accumulate([0, 1, 2, 3, 4]) # produces 0, 1, 3, 6, 10
- >>> list(difference(iterable))
- [0, 1, 2, 3, 4]
-
- *func* defaults to :func:`operator.sub`, but other functions can be
- specified. They will be applied as follows::
-
- A, B, C, D, ... --> A, func(B, A), func(C, B), func(D, C), ...
-
- For example, to do progressive division:
-
- >>> iterable = [1, 2, 6, 24, 120]
- >>> func = lambda x, y: x // y
- >>> list(difference(iterable, func))
- [1, 2, 3, 4, 5]
-
- If the *initial* keyword is set, the first element will be skipped when
- computing successive differences.
-
- >>> it = [10, 11, 13, 16] # from accumulate([1, 2, 3], initial=10)
- >>> list(difference(it, initial=10))
- [1, 2, 3]
-
- """
- a, b = tee(iterable)
- try:
- first = [next(b)]
- except StopIteration:
- return iter([])
-
- if initial is not None:
- first = []
-
- return chain(first, starmap(func, zip(b, a)))
-
-
-class SequenceView(Sequence):
- """Return a read-only view of the sequence object *target*.
-
- :class:`SequenceView` objects are analogous to Python's built-in
- "dictionary view" types. They provide a dynamic view of a sequence's items,
- meaning that when the sequence updates, so does the view.
-
- >>> seq = ['0', '1', '2']
- >>> view = SequenceView(seq)
- >>> view
- SequenceView(['0', '1', '2'])
- >>> seq.append('3')
- >>> view
- SequenceView(['0', '1', '2', '3'])
-
- Sequence views support indexing, slicing, and length queries. They act
- like the underlying sequence, except they don't allow assignment:
-
- >>> view[1]
- '1'
- >>> view[1:-1]
- ['1', '2']
- >>> len(view)
- 4
-
- Sequence views are useful as an alternative to copying, as they don't
- require (much) extra storage.
-
- """
-
- def __init__(self, target):
- if not isinstance(target, Sequence):
- raise TypeError
- self._target = target
-
- def __getitem__(self, index):
- return self._target[index]
-
- def __len__(self):
- return len(self._target)
-
- def __repr__(self):
- return '{}({})'.format(self.__class__.__name__, repr(self._target))
-
-
-class seekable:
- """Wrap an iterator to allow for seeking backward and forward. This
- progressively caches the items in the source iterable so they can be
- re-visited.
-
- Call :meth:`seek` with an index to seek to that position in the source
- iterable.
-
- To "reset" an iterator, seek to ``0``:
-
- >>> from itertools import count
- >>> it = seekable((str(n) for n in count()))
- >>> next(it), next(it), next(it)
- ('0', '1', '2')
- >>> it.seek(0)
- >>> next(it), next(it), next(it)
- ('0', '1', '2')
- >>> next(it)
- '3'
-
- You can also seek forward:
-
- >>> it = seekable((str(n) for n in range(20)))
- >>> it.seek(10)
- >>> next(it)
- '10'
- >>> it.seek(20) # Seeking past the end of the source isn't a problem
- >>> list(it)
- []
- >>> it.seek(0) # Resetting works even after hitting the end
- >>> next(it), next(it), next(it)
- ('0', '1', '2')
-
- Call :meth:`peek` to look ahead one item without advancing the iterator:
-
- >>> it = seekable('1234')
- >>> it.peek()
- '1'
- >>> list(it)
- ['1', '2', '3', '4']
- >>> it.peek(default='empty')
- 'empty'
-
- Before the iterator is at its end, calling :func:`bool` on it will return
- ``True``. After it will return ``False``:
-
- >>> it = seekable('5678')
- >>> bool(it)
- True
- >>> list(it)
- ['5', '6', '7', '8']
- >>> bool(it)
- False
-
- You may view the contents of the cache with the :meth:`elements` method.
- That returns a :class:`SequenceView`, a view that updates automatically:
-
- >>> it = seekable((str(n) for n in range(10)))
- >>> next(it), next(it), next(it)
- ('0', '1', '2')
- >>> elements = it.elements()
- >>> elements
- SequenceView(['0', '1', '2'])
- >>> next(it)
- '3'
- >>> elements
- SequenceView(['0', '1', '2', '3'])
-
- By default, the cache grows as the source iterable progresses, so beware of
- wrapping very large or infinite iterables. Supply *maxlen* to limit the
- size of the cache (this of course limits how far back you can seek).
-
- >>> from itertools import count
- >>> it = seekable((str(n) for n in count()), maxlen=2)
- >>> next(it), next(it), next(it), next(it)
- ('0', '1', '2', '3')
- >>> list(it.elements())
- ['2', '3']
- >>> it.seek(0)
- >>> next(it), next(it), next(it), next(it)
- ('2', '3', '4', '5')
- >>> next(it)
- '6'
-
- """
-
- def __init__(self, iterable, maxlen=None):
- self._source = iter(iterable)
- if maxlen is None:
- self._cache = []
- else:
- self._cache = deque([], maxlen)
- self._index = None
-
- def __iter__(self):
- return self
-
- def __next__(self):
- if self._index is not None:
- try:
- item = self._cache[self._index]
- except IndexError:
- self._index = None
- else:
- self._index += 1
- return item
-
- item = next(self._source)
- self._cache.append(item)
- return item
-
- def __bool__(self):
- try:
- self.peek()
- except StopIteration:
- return False
- return True
-
- def peek(self, default=_marker):
- try:
- peeked = next(self)
- except StopIteration:
- if default is _marker:
- raise
- return default
- if self._index is None:
- self._index = len(self._cache)
- self._index -= 1
- return peeked
-
- def elements(self):
- return SequenceView(self._cache)
-
- def seek(self, index):
- self._index = index
- remainder = index - len(self._cache)
- if remainder > 0:
- consume(self, remainder)
-
-
-class run_length:
- """
- :func:`run_length.encode` compresses an iterable with run-length encoding.
- It yields groups of repeated items with the count of how many times they
- were repeated:
-
- >>> uncompressed = 'abbcccdddd'
- >>> list(run_length.encode(uncompressed))
- [('a', 1), ('b', 2), ('c', 3), ('d', 4)]
-
- :func:`run_length.decode` decompresses an iterable that was previously
- compressed with run-length encoding. It yields the items of the
- decompressed iterable:
-
- >>> compressed = [('a', 1), ('b', 2), ('c', 3), ('d', 4)]
- >>> list(run_length.decode(compressed))
- ['a', 'b', 'b', 'c', 'c', 'c', 'd', 'd', 'd', 'd']
-
- """
-
- @staticmethod
- def encode(iterable):
- return ((k, ilen(g)) for k, g in groupby(iterable))
-
- @staticmethod
- def decode(iterable):
- return chain.from_iterable(repeat(k, n) for k, n in iterable)
-
-
-def exactly_n(iterable, n, predicate=bool):
- """Return ``True`` if exactly ``n`` items in the iterable are ``True``
- according to the *predicate* function.
-
- >>> exactly_n([True, True, False], 2)
- True
- >>> exactly_n([True, True, False], 1)
- False
- >>> exactly_n([0, 1, 2, 3, 4, 5], 3, lambda x: x < 3)
- True
-
- The iterable will be advanced until ``n + 1`` truthy items are encountered,
- so avoid calling it on infinite iterables.
-
- """
- return len(take(n + 1, filter(predicate, iterable))) == n
-
-
-def circular_shifts(iterable):
- """Return a list of circular shifts of *iterable*.
-
- >>> circular_shifts(range(4))
- [(0, 1, 2, 3), (1, 2, 3, 0), (2, 3, 0, 1), (3, 0, 1, 2)]
- """
- lst = list(iterable)
- return take(len(lst), windowed(cycle(lst), len(lst)))
-
-
-def make_decorator(wrapping_func, result_index=0):
- """Return a decorator version of *wrapping_func*, which is a function that
- modifies an iterable. *result_index* is the position in that function's
- signature where the iterable goes.
-
- This lets you use itertools on the "production end," i.e. at function
- definition. This can augment what the function returns without changing the
- function's code.
-
- For example, to produce a decorator version of :func:`chunked`:
-
- >>> from more_itertools import chunked
- >>> chunker = make_decorator(chunked, result_index=0)
- >>> @chunker(3)
- ... def iter_range(n):
- ... return iter(range(n))
- ...
- >>> list(iter_range(9))
- [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
-
- To only allow truthy items to be returned:
-
- >>> truth_serum = make_decorator(filter, result_index=1)
- >>> @truth_serum(bool)
- ... def boolean_test():
- ... return [0, 1, '', ' ', False, True]
- ...
- >>> list(boolean_test())
- [1, ' ', True]
-
- The :func:`peekable` and :func:`seekable` wrappers make for practical
- decorators:
-
- >>> from more_itertools import peekable
- >>> peekable_function = make_decorator(peekable)
- >>> @peekable_function()
- ... def str_range(*args):
- ... return (str(x) for x in range(*args))
- ...
- >>> it = str_range(1, 20, 2)
- >>> next(it), next(it), next(it)
- ('1', '3', '5')
- >>> it.peek()
- '7'
- >>> next(it)
- '7'
-
- """
- # See https://sites.google.com/site/bbayles/index/decorator_factory for
- # notes on how this works.
- def decorator(*wrapping_args, **wrapping_kwargs):
- def outer_wrapper(f):
- def inner_wrapper(*args, **kwargs):
- result = f(*args, **kwargs)
- wrapping_args_ = list(wrapping_args)
- wrapping_args_.insert(result_index, result)
- return wrapping_func(*wrapping_args_, **wrapping_kwargs)
-
- return inner_wrapper
-
- return outer_wrapper
-
- return decorator
-
-
-def map_reduce(iterable, keyfunc, valuefunc=None, reducefunc=None):
- """Return a dictionary that maps the items in *iterable* to categories
- defined by *keyfunc*, transforms them with *valuefunc*, and
- then summarizes them by category with *reducefunc*.
-
- *valuefunc* defaults to the identity function if it is unspecified.
- If *reducefunc* is unspecified, no summarization takes place:
-
- >>> keyfunc = lambda x: x.upper()
- >>> result = map_reduce('abbccc', keyfunc)
- >>> sorted(result.items())
- [('A', ['a']), ('B', ['b', 'b']), ('C', ['c', 'c', 'c'])]
-
- Specifying *valuefunc* transforms the categorized items:
-
- >>> keyfunc = lambda x: x.upper()
- >>> valuefunc = lambda x: 1
- >>> result = map_reduce('abbccc', keyfunc, valuefunc)
- >>> sorted(result.items())
- [('A', [1]), ('B', [1, 1]), ('C', [1, 1, 1])]
-
- Specifying *reducefunc* summarizes the categorized items:
-
- >>> keyfunc = lambda x: x.upper()
- >>> valuefunc = lambda x: 1
- >>> reducefunc = sum
- >>> result = map_reduce('abbccc', keyfunc, valuefunc, reducefunc)
- >>> sorted(result.items())
- [('A', 1), ('B', 2), ('C', 3)]
-
- You may want to filter the input iterable before applying the map/reduce
- procedure:
-
- >>> all_items = range(30)
- >>> items = [x for x in all_items if 10 <= x <= 20] # Filter
- >>> keyfunc = lambda x: x % 2 # Evens map to 0; odds to 1
- >>> categories = map_reduce(items, keyfunc=keyfunc)
- >>> sorted(categories.items())
- [(0, [10, 12, 14, 16, 18, 20]), (1, [11, 13, 15, 17, 19])]
- >>> summaries = map_reduce(items, keyfunc=keyfunc, reducefunc=sum)
- >>> sorted(summaries.items())
- [(0, 90), (1, 75)]
-
- Note that all items in the iterable are gathered into a list before the
- summarization step, which may require significant storage.
-
- The returned object is a :obj:`collections.defaultdict` with the
- ``default_factory`` set to ``None``, such that it behaves like a normal
- dictionary.
-
- """
- valuefunc = (lambda x: x) if (valuefunc is None) else valuefunc
-
- ret = defaultdict(list)
- for item in iterable:
- key = keyfunc(item)
- value = valuefunc(item)
- ret[key].append(value)
-
- if reducefunc is not None:
- for key, value_list in ret.items():
- ret[key] = reducefunc(value_list)
-
- ret.default_factory = None
- return ret
-
-
-def rlocate(iterable, pred=bool, window_size=None):
- """Yield the index of each item in *iterable* for which *pred* returns
- ``True``, starting from the right and moving left.
-
- *pred* defaults to :func:`bool`, which will select truthy items:
-
- >>> list(rlocate([0, 1, 1, 0, 1, 0, 0])) # Truthy at 1, 2, and 4
- [4, 2, 1]
-
- Set *pred* to a custom function to, e.g., find the indexes for a particular
- item:
-
- >>> iterable = iter('abcb')
- >>> pred = lambda x: x == 'b'
- >>> list(rlocate(iterable, pred))
- [3, 1]
-
- If *window_size* is given, then the *pred* function will be called with
- that many items. This enables searching for sub-sequences:
-
- >>> iterable = [0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3]
- >>> pred = lambda *args: args == (1, 2, 3)
- >>> list(rlocate(iterable, pred=pred, window_size=3))
- [9, 5, 1]
-
- Beware, this function won't return anything for infinite iterables.
- If *iterable* is reversible, ``rlocate`` will reverse it and search from
- the right. Otherwise, it will search from the left and return the results
- in reverse order.
-
- See :func:`locate` to for other example applications.
-
- """
- if window_size is None:
- try:
- len_iter = len(iterable)
- return (len_iter - i - 1 for i in locate(reversed(iterable), pred))
- except TypeError:
- pass
-
- return reversed(list(locate(iterable, pred, window_size)))
-
-
-def replace(iterable, pred, substitutes, count=None, window_size=1):
- """Yield the items from *iterable*, replacing the items for which *pred*
- returns ``True`` with the items from the iterable *substitutes*.
-
- >>> iterable = [1, 1, 0, 1, 1, 0, 1, 1]
- >>> pred = lambda x: x == 0
- >>> substitutes = (2, 3)
- >>> list(replace(iterable, pred, substitutes))
- [1, 1, 2, 3, 1, 1, 2, 3, 1, 1]
-
- If *count* is given, the number of replacements will be limited:
-
- >>> iterable = [1, 1, 0, 1, 1, 0, 1, 1, 0]
- >>> pred = lambda x: x == 0
- >>> substitutes = [None]
- >>> list(replace(iterable, pred, substitutes, count=2))
- [1, 1, None, 1, 1, None, 1, 1, 0]
-
- Use *window_size* to control the number of items passed as arguments to
- *pred*. This allows for locating and replacing subsequences.
-
- >>> iterable = [0, 1, 2, 5, 0, 1, 2, 5]
- >>> window_size = 3
- >>> pred = lambda *args: args == (0, 1, 2) # 3 items passed to pred
- >>> substitutes = [3, 4] # Splice in these items
- >>> list(replace(iterable, pred, substitutes, window_size=window_size))
- [3, 4, 5, 3, 4, 5]
-
- """
- if window_size < 1:
- raise ValueError('window_size must be at least 1')
-
- # Save the substitutes iterable, since it's used more than once
- substitutes = tuple(substitutes)
-
- # Add padding such that the number of windows matches the length of the
- # iterable
- it = chain(iterable, [_marker] * (window_size - 1))
- windows = windowed(it, window_size)
-
- n = 0
- for w in windows:
- # If the current window matches our predicate (and we haven't hit
- # our maximum number of replacements), splice in the substitutes
- # and then consume the following windows that overlap with this one.
- # For example, if the iterable is (0, 1, 2, 3, 4...)
- # and the window size is 2, we have (0, 1), (1, 2), (2, 3)...
- # If the predicate matches on (0, 1), we need to zap (0, 1) and (1, 2)
- if pred(*w):
- if (count is None) or (n < count):
- n += 1
- yield from substitutes
- consume(windows, window_size - 1)
- continue
-
- # If there was no match (or we've reached the replacement limit),
- # yield the first item from the window.
- if w and (w[0] is not _marker):
- yield w[0]
-
-
-def partitions(iterable):
- """Yield all possible order-preserving partitions of *iterable*.
-
- >>> iterable = 'abc'
- >>> for part in partitions(iterable):
- ... print([''.join(p) for p in part])
- ['abc']
- ['a', 'bc']
- ['ab', 'c']
- ['a', 'b', 'c']
-
- This is unrelated to :func:`partition`.
-
- """
- sequence = list(iterable)
- n = len(sequence)
- for i in powerset(range(1, n)):
- yield [sequence[i:j] for i, j in zip((0,) + i, i + (n,))]
-
-
-def set_partitions(iterable, k=None):
- """
- Yield the set partitions of *iterable* into *k* parts. Set partitions are
- not order-preserving.
-
- >>> iterable = 'abc'
- >>> for part in set_partitions(iterable, 2):
- ... print([''.join(p) for p in part])
- ['a', 'bc']
- ['ab', 'c']
- ['b', 'ac']
-
-
- If *k* is not given, every set partition is generated.
-
- >>> iterable = 'abc'
- >>> for part in set_partitions(iterable):
- ... print([''.join(p) for p in part])
- ['abc']
- ['a', 'bc']
- ['ab', 'c']
- ['b', 'ac']
- ['a', 'b', 'c']
-
- """
- L = list(iterable)
- n = len(L)
- if k is not None:
- if k < 1:
- raise ValueError(
- "Can't partition in a negative or zero number of groups"
- )
- elif k > n:
- return
-
- def set_partitions_helper(L, k):
- n = len(L)
- if k == 1:
- yield [L]
- elif n == k:
- yield [[s] for s in L]
- else:
- e, *M = L
- for p in set_partitions_helper(M, k - 1):
- yield [[e], *p]
- for p in set_partitions_helper(M, k):
- for i in range(len(p)):
- yield p[:i] + [[e] + p[i]] + p[i + 1 :]
-
- if k is None:
- for k in range(1, n + 1):
- yield from set_partitions_helper(L, k)
- else:
- yield from set_partitions_helper(L, k)
-
-
-class time_limited:
- """
- Yield items from *iterable* until *limit_seconds* have passed.
- If the time limit expires before all items have been yielded, the
- ``timed_out`` parameter will be set to ``True``.
-
- >>> from time import sleep
- >>> def generator():
- ... yield 1
- ... yield 2
- ... sleep(0.2)
- ... yield 3
- >>> iterable = time_limited(0.1, generator())
- >>> list(iterable)
- [1, 2]
- >>> iterable.timed_out
- True
-
- Note that the time is checked before each item is yielded, and iteration
- stops if the time elapsed is greater than *limit_seconds*. If your time
- limit is 1 second, but it takes 2 seconds to generate the first item from
- the iterable, the function will run for 2 seconds and not yield anything.
-
- """
-
- def __init__(self, limit_seconds, iterable):
- if limit_seconds < 0:
- raise ValueError('limit_seconds must be positive')
- self.limit_seconds = limit_seconds
- self._iterable = iter(iterable)
- self._start_time = monotonic()
- self.timed_out = False
-
- def __iter__(self):
- return self
-
- def __next__(self):
- item = next(self._iterable)
- if monotonic() - self._start_time > self.limit_seconds:
- self.timed_out = True
- raise StopIteration
-
- return item
-
-
-def only(iterable, default=None, too_long=None):
- """If *iterable* has only one item, return it.
- If it has zero items, return *default*.
- If it has more than one item, raise the exception given by *too_long*,
- which is ``ValueError`` by default.
-
- >>> only([], default='missing')
- 'missing'
- >>> only([1])
- 1
- >>> only([1, 2]) # doctest: +IGNORE_EXCEPTION_DETAIL
- Traceback (most recent call last):
- ...
- ValueError: Expected exactly one item in iterable, but got 1, 2,
- and perhaps more.'
- >>> only([1, 2], too_long=TypeError) # doctest: +IGNORE_EXCEPTION_DETAIL
- Traceback (most recent call last):
- ...
- TypeError
-
- Note that :func:`only` attempts to advance *iterable* twice to ensure there
- is only one item. See :func:`spy` or :func:`peekable` to check
- iterable contents less destructively.
- """
- it = iter(iterable)
- first_value = next(it, default)
-
- try:
- second_value = next(it)
- except StopIteration:
- pass
- else:
- msg = (
- 'Expected exactly one item in iterable, but got {!r}, {!r}, '
- 'and perhaps more.'.format(first_value, second_value)
- )
- raise too_long or ValueError(msg)
-
- return first_value
-
-
-def ichunked(iterable, n):
- """Break *iterable* into sub-iterables with *n* elements each.
- :func:`ichunked` is like :func:`chunked`, but it yields iterables
- instead of lists.
-
- If the sub-iterables are read in order, the elements of *iterable*
- won't be stored in memory.
- If they are read out of order, :func:`itertools.tee` is used to cache
- elements as necessary.
-
- >>> from itertools import count
- >>> all_chunks = ichunked(count(), 4)
- >>> c_1, c_2, c_3 = next(all_chunks), next(all_chunks), next(all_chunks)
- >>> list(c_2) # c_1's elements have been cached; c_3's haven't been
- [4, 5, 6, 7]
- >>> list(c_1)
- [0, 1, 2, 3]
- >>> list(c_3)
- [8, 9, 10, 11]
-
- """
- source = iter(iterable)
-
- while True:
- # Check to see whether we're at the end of the source iterable
- item = next(source, _marker)
- if item is _marker:
- return
-
- # Clone the source and yield an n-length slice
- source, it = tee(chain([item], source))
- yield islice(it, n)
-
- # Advance the source iterable
- consume(source, n)
-
-
-def distinct_combinations(iterable, r):
- """Yield the distinct combinations of *r* items taken from *iterable*.
-
- >>> list(distinct_combinations([0, 0, 1], 2))
- [(0, 0), (0, 1)]
-
- Equivalent to ``set(combinations(iterable))``, except duplicates are not
- generated and thrown away. For larger input sequences this is much more
- efficient.
-
- """
- if r < 0:
- raise ValueError('r must be non-negative')
- elif r == 0:
- yield ()
- return
- pool = tuple(iterable)
- generators = [unique_everseen(enumerate(pool), key=itemgetter(1))]
- current_combo = [None] * r
- level = 0
- while generators:
- try:
- cur_idx, p = next(generators[-1])
- except StopIteration:
- generators.pop()
- level -= 1
- continue
- current_combo[level] = p
- if level + 1 == r:
- yield tuple(current_combo)
- else:
- generators.append(
- unique_everseen(
- enumerate(pool[cur_idx + 1 :], cur_idx + 1),
- key=itemgetter(1),
- )
- )
- level += 1
-
-
-def filter_except(validator, iterable, *exceptions):
- """Yield the items from *iterable* for which the *validator* function does
- not raise one of the specified *exceptions*.
-
- *validator* is called for each item in *iterable*.
- It should be a function that accepts one argument and raises an exception
- if that item is not valid.
-
- >>> iterable = ['1', '2', 'three', '4', None]
- >>> list(filter_except(int, iterable, ValueError, TypeError))
- ['1', '2', '4']
-
- If an exception other than one given by *exceptions* is raised by
- *validator*, it is raised like normal.
- """
- for item in iterable:
- try:
- validator(item)
- except exceptions:
- pass
- else:
- yield item
-
-
-def map_except(function, iterable, *exceptions):
- """Transform each item from *iterable* with *function* and yield the
- result, unless *function* raises one of the specified *exceptions*.
-
- *function* is called to transform each item in *iterable*.
- It should be a accept one argument.
-
- >>> iterable = ['1', '2', 'three', '4', None]
- >>> list(map_except(int, iterable, ValueError, TypeError))
- [1, 2, 4]
-
- If an exception other than one given by *exceptions* is raised by
- *function*, it is raised like normal.
- """
- for item in iterable:
- try:
- yield function(item)
- except exceptions:
- pass
-
-
-def _sample_unweighted(iterable, k):
- # Implementation of "Algorithm L" from the 1994 paper by Kim-Hung Li:
- # "Reservoir-Sampling Algorithms of Time Complexity O(n(1+log(N/n)))".
-
- # Fill up the reservoir (collection of samples) with the first `k` samples
- reservoir = take(k, iterable)
-
- # Generate random number that's the largest in a sample of k U(0,1) numbers
- # Largest order statistic: https://en.wikipedia.org/wiki/Order_statistic
- W = exp(log(random()) / k)
-
- # The number of elements to skip before changing the reservoir is a random
- # number with a geometric distribution. Sample it using random() and logs.
- next_index = k + floor(log(random()) / log(1 - W))
-
- for index, element in enumerate(iterable, k):
-
- if index == next_index:
- reservoir[randrange(k)] = element
- # The new W is the largest in a sample of k U(0, `old_W`) numbers
- W *= exp(log(random()) / k)
- next_index += floor(log(random()) / log(1 - W)) + 1
-
- return reservoir
-
-
-def _sample_weighted(iterable, k, weights):
- # Implementation of "A-ExpJ" from the 2006 paper by Efraimidis et al. :
- # "Weighted random sampling with a reservoir".
-
- # Log-transform for numerical stability for weights that are small/large
- weight_keys = (log(random()) / weight for weight in weights)
-
- # Fill up the reservoir (collection of samples) with the first `k`
- # weight-keys and elements, then heapify the list.
- reservoir = take(k, zip(weight_keys, iterable))
- heapify(reservoir)
-
- # The number of jumps before changing the reservoir is a random variable
- # with an exponential distribution. Sample it using random() and logs.
- smallest_weight_key, _ = reservoir[0]
- weights_to_skip = log(random()) / smallest_weight_key
-
- for weight, element in zip(weights, iterable):
- if weight >= weights_to_skip:
- # The notation here is consistent with the paper, but we store
- # the weight-keys in log-space for better numerical stability.
- smallest_weight_key, _ = reservoir[0]
- t_w = exp(weight * smallest_weight_key)
- r_2 = uniform(t_w, 1) # generate U(t_w, 1)
- weight_key = log(r_2) / weight
- heapreplace(reservoir, (weight_key, element))
- smallest_weight_key, _ = reservoir[0]
- weights_to_skip = log(random()) / smallest_weight_key
- else:
- weights_to_skip -= weight
-
- # Equivalent to [element for weight_key, element in sorted(reservoir)]
- return [heappop(reservoir)[1] for _ in range(k)]
-
-
-def sample(iterable, k, weights=None):
- """Return a *k*-length list of elements chosen (without replacement)
- from the *iterable*. Like :func:`random.sample`, but works on iterables
- of unknown length.
-
- >>> iterable = range(100)
- >>> sample(iterable, 5) # doctest: +SKIP
- [81, 60, 96, 16, 4]
-
- An iterable with *weights* may also be given:
-
- >>> iterable = range(100)
- >>> weights = (i * i + 1 for i in range(100))
- >>> sampled = sample(iterable, 5, weights=weights) # doctest: +SKIP
- [79, 67, 74, 66, 78]
-
- The algorithm can also be used to generate weighted random permutations.
- The relative weight of each item determines the probability that it
- appears late in the permutation.
-
- >>> data = "abcdefgh"
- >>> weights = range(1, len(data) + 1)
- >>> sample(data, k=len(data), weights=weights) # doctest: +SKIP
- ['c', 'a', 'b', 'e', 'g', 'd', 'h', 'f']
- """
- if k == 0:
- return []
-
- iterable = iter(iterable)
- if weights is None:
- return _sample_unweighted(iterable, k)
- else:
- weights = iter(weights)
- return _sample_weighted(iterable, k, weights)
-
-
-def is_sorted(iterable, key=None, reverse=False):
- """Returns ``True`` if the items of iterable are in sorted order, and
- ``False`` otherwise. *key* and *reverse* have the same meaning that they do
- in the built-in :func:`sorted` function.
-
- >>> is_sorted(['1', '2', '3', '4', '5'], key=int)
- True
- >>> is_sorted([5, 4, 3, 1, 2], reverse=True)
- False
-
- The function returns ``False`` after encountering the first out-of-order
- item. If there are no out-of-order items, the iterable is exhausted.
- """
-
- compare = lt if reverse else gt
- it = iterable if (key is None) else map(key, iterable)
- return not any(starmap(compare, pairwise(it)))
-
-
-class AbortThread(BaseException):
- pass
-
-
-class callback_iter:
- """Convert a function that uses callbacks to an iterator.
-
- Let *func* be a function that takes a `callback` keyword argument.
- For example:
-
- >>> def func(callback=None):
- ... for i, c in [(1, 'a'), (2, 'b'), (3, 'c')]:
- ... if callback:
- ... callback(i, c)
- ... return 4
-
-
- Use ``with callback_iter(func)`` to get an iterator over the parameters
- that are delivered to the callback.
-
- >>> with callback_iter(func) as it:
- ... for args, kwargs in it:
- ... print(args)
- (1, 'a')
- (2, 'b')
- (3, 'c')
-
- The function will be called in a background thread. The ``done`` property
- indicates whether it has completed execution.
-
- >>> it.done
- True
-
- If it completes successfully, its return value will be available
- in the ``result`` property.
-
- >>> it.result
- 4
-
- Notes:
-
- * If the function uses some keyword argument besides ``callback``, supply
- *callback_kwd*.
- * If it finished executing, but raised an exception, accessing the
- ``result`` property will raise the same exception.
- * If it hasn't finished executing, accessing the ``result``
- property from within the ``with`` block will raise ``RuntimeError``.
- * If it hasn't finished executing, accessing the ``result`` property from
- outside the ``with`` block will raise a
- ``more_itertools.AbortThread`` exception.
- * Provide *wait_seconds* to adjust how frequently the it is polled for
- output.
-
- """
-
- def __init__(self, func, callback_kwd='callback', wait_seconds=0.1):
- self._func = func
- self._callback_kwd = callback_kwd
- self._aborted = False
- self._future = None
- self._wait_seconds = wait_seconds
- self._executor = ThreadPoolExecutor(max_workers=1)
- self._iterator = self._reader()
-
- def __enter__(self):
- return self
-
- def __exit__(self, exc_type, exc_value, traceback):
- self._aborted = True
- self._executor.shutdown()
-
- def __iter__(self):
- return self
-
- def __next__(self):
- return next(self._iterator)
-
- @property
- def done(self):
- if self._future is None:
- return False
- return self._future.done()
-
- @property
- def result(self):
- if not self.done:
- raise RuntimeError('Function has not yet completed')
-
- return self._future.result()
-
- def _reader(self):
- q = Queue()
-
- def callback(*args, **kwargs):
- if self._aborted:
- raise AbortThread('canceled by user')
-
- q.put((args, kwargs))
-
- self._future = self._executor.submit(
- self._func, **{self._callback_kwd: callback}
- )
-
- while True:
- try:
- item = q.get(timeout=self._wait_seconds)
- except Empty:
- pass
- else:
- q.task_done()
- yield item
-
- if self._future.done():
- break
-
- remaining = []
- while True:
- try:
- item = q.get_nowait()
- except Empty:
- break
- else:
- q.task_done()
- remaining.append(item)
- q.join()
- yield from remaining
-
-
-def windowed_complete(iterable, n):
- """
- Yield ``(beginning, middle, end)`` tuples, where:
-
- * Each ``middle`` has *n* items from *iterable*
- * Each ``beginning`` has the items before the ones in ``middle``
- * Each ``end`` has the items after the ones in ``middle``
-
- >>> iterable = range(7)
- >>> n = 3
- >>> for beginning, middle, end in windowed_complete(iterable, n):
- ... print(beginning, middle, end)
- () (0, 1, 2) (3, 4, 5, 6)
- (0,) (1, 2, 3) (4, 5, 6)
- (0, 1) (2, 3, 4) (5, 6)
- (0, 1, 2) (3, 4, 5) (6,)
- (0, 1, 2, 3) (4, 5, 6) ()
-
- Note that *n* must be at least 0 and most equal to the length of
- *iterable*.
-
- This function will exhaust the iterable and may require significant
- storage.
- """
- if n < 0:
- raise ValueError('n must be >= 0')
-
- seq = tuple(iterable)
- size = len(seq)
-
- if n > size:
- raise ValueError('n must be <= len(seq)')
-
- for i in range(size - n + 1):
- beginning = seq[:i]
- middle = seq[i : i + n]
- end = seq[i + n :]
- yield beginning, middle, end
-
-
-def all_unique(iterable, key=None):
- """
- Returns ``True`` if all the elements of *iterable* are unique (no two
- elements are equal).
-
- >>> all_unique('ABCB')
- False
-
- If a *key* function is specified, it will be used to make comparisons.
-
- >>> all_unique('ABCb')
- True
- >>> all_unique('ABCb', str.lower)
- False
-
- The function returns as soon as the first non-unique element is
- encountered. Iterables with a mix of hashable and unhashable items can
- be used, but the function will be slower for unhashable items.
- """
- seenset = set()
- seenset_add = seenset.add
- seenlist = []
- seenlist_add = seenlist.append
- for element in map(key, iterable) if key else iterable:
- try:
- if element in seenset:
- return False
- seenset_add(element)
- except TypeError:
- if element in seenlist:
- return False
- seenlist_add(element)
- return True
-
-
-def nth_product(index, *args):
- """Equivalent to ``list(product(*args))[index]``.
-
- The products of *args* can be ordered lexicographically.
- :func:`nth_product` computes the product at sort position *index* without
- computing the previous products.
-
- >>> nth_product(8, range(2), range(2), range(2), range(2))
- (1, 0, 0, 0)
-
- ``IndexError`` will be raised if the given *index* is invalid.
- """
- pools = list(map(tuple, reversed(args)))
- ns = list(map(len, pools))
-
- c = reduce(mul, ns)
-
- if index < 0:
- index += c
-
- if not 0 <= index < c:
- raise IndexError
-
- result = []
- for pool, n in zip(pools, ns):
- result.append(pool[index % n])
- index //= n
-
- return tuple(reversed(result))
-
-
-def nth_permutation(iterable, r, index):
- """Equivalent to ``list(permutations(iterable, r))[index]```
-
- The subsequences of *iterable* that are of length *r* where order is
- important can be ordered lexicographically. :func:`nth_permutation`
- computes the subsequence at sort position *index* directly, without
- computing the previous subsequences.
-
- >>> nth_permutation('ghijk', 2, 5)
- ('h', 'i')
-
- ``ValueError`` will be raised If *r* is negative or greater than the length
- of *iterable*.
- ``IndexError`` will be raised if the given *index* is invalid.
- """
- pool = list(iterable)
- n = len(pool)
-
- if r is None or r == n:
- r, c = n, factorial(n)
- elif not 0 <= r < n:
- raise ValueError
- else:
- c = factorial(n) // factorial(n - r)
-
- if index < 0:
- index += c
-
- if not 0 <= index < c:
- raise IndexError
-
- if c == 0:
- return tuple()
-
- result = [0] * r
- q = index * factorial(n) // c if r < n else index
- for d in range(1, n + 1):
- q, i = divmod(q, d)
- if 0 <= n - d < r:
- result[n - d] = i
- if q == 0:
- break
-
- return tuple(map(pool.pop, result))
-
-
-def value_chain(*args):
- """Yield all arguments passed to the function in the same order in which
- they were passed. If an argument itself is iterable then iterate over its
- values.
-
- >>> list(value_chain(1, 2, 3, [4, 5, 6]))
- [1, 2, 3, 4, 5, 6]
-
- Binary and text strings are not considered iterable and are emitted
- as-is:
-
- >>> list(value_chain('12', '34', ['56', '78']))
- ['12', '34', '56', '78']
-
-
- Multiple levels of nesting are not flattened.
-
- """
- for value in args:
- if isinstance(value, (str, bytes)):
- yield value
- continue
- try:
- yield from value
- except TypeError:
- yield value
-
-
-def product_index(element, *args):
- """Equivalent to ``list(product(*args)).index(element)``
-
- The products of *args* can be ordered lexicographically.
- :func:`product_index` computes the first index of *element* without
- computing the previous products.
-
- >>> product_index([8, 2], range(10), range(5))
- 42
-
- ``ValueError`` will be raised if the given *element* isn't in the product
- of *args*.
- """
- index = 0
-
- for x, pool in zip_longest(element, args, fillvalue=_marker):
- if x is _marker or pool is _marker:
- raise ValueError('element is not a product of args')
-
- pool = tuple(pool)
- index = index * len(pool) + pool.index(x)
-
- return index
-
-
-def combination_index(element, iterable):
- """Equivalent to ``list(combinations(iterable, r)).index(element)``
-
- The subsequences of *iterable* that are of length *r* can be ordered
- lexicographically. :func:`combination_index` computes the index of the
- first *element*, without computing the previous combinations.
-
- >>> combination_index('adf', 'abcdefg')
- 10
-
- ``ValueError`` will be raised if the given *element* isn't one of the
- combinations of *iterable*.
- """
- element = enumerate(element)
- k, y = next(element, (None, None))
- if k is None:
- return 0
-
- indexes = []
- pool = enumerate(iterable)
- for n, x in pool:
- if x == y:
- indexes.append(n)
- tmp, y = next(element, (None, None))
- if tmp is None:
- break
- else:
- k = tmp
- else:
- raise ValueError('element is not a combination of iterable')
-
- n, _ = last(pool, default=(n, None))
-
- # Python versiosn below 3.8 don't have math.comb
- index = 1
- for i, j in enumerate(reversed(indexes), start=1):
- j = n - j
- if i <= j:
- index += factorial(j) // (factorial(i) * factorial(j - i))
-
- return factorial(n + 1) // (factorial(k + 1) * factorial(n - k)) - index
-
-
-def permutation_index(element, iterable):
- """Equivalent to ``list(permutations(iterable, r)).index(element)```
-
- The subsequences of *iterable* that are of length *r* where order is
- important can be ordered lexicographically. :func:`permutation_index`
- computes the index of the first *element* directly, without computing
- the previous permutations.
-
- >>> permutation_index([1, 3, 2], range(5))
- 19
-
- ``ValueError`` will be raised if the given *element* isn't one of the
- permutations of *iterable*.
- """
- index = 0
- pool = list(iterable)
- for i, x in zip(range(len(pool), -1, -1), element):
- r = pool.index(x)
- index = index * i + r
- del pool[r]
-
- return index
-
-
-class countable:
- """Wrap *iterable* and keep a count of how many items have been consumed.
-
- The ``items_seen`` attribute starts at ``0`` and increments as the iterable
- is consumed:
-
- >>> iterable = map(str, range(10))
- >>> it = countable(iterable)
- >>> it.items_seen
- 0
- >>> next(it), next(it)
- ('0', '1')
- >>> list(it)
- ['2', '3', '4', '5', '6', '7', '8', '9']
- >>> it.items_seen
- 10
- """
-
- def __init__(self, iterable):
- self._it = iter(iterable)
- self.items_seen = 0
-
- def __iter__(self):
- return self
-
- def __next__(self):
- item = next(self._it)
- self.items_seen += 1
-
- return item
diff --git a/contrib/python/setuptools/py3/setuptools/_vendor/more_itertools/recipes.py b/contrib/python/setuptools/py3/setuptools/_vendor/more_itertools/recipes.py
deleted file mode 100644
index 521abd7c2ca..00000000000
--- a/contrib/python/setuptools/py3/setuptools/_vendor/more_itertools/recipes.py
+++ /dev/null
@@ -1,620 +0,0 @@
-"""Imported from the recipes section of the itertools documentation.
-
-All functions taken from the recipes section of the itertools library docs
-[1]_.
-Some backward-compatible usability improvements have been made.
-
-.. [1] http://docs.python.org/library/itertools.html#recipes
-
-"""
-import warnings
-from collections import deque
-from itertools import (
- chain,
- combinations,
- count,
- cycle,
- groupby,
- islice,
- repeat,
- starmap,
- tee,
- zip_longest,
-)
-import operator
-from random import randrange, sample, choice
-
-__all__ = [
- 'all_equal',
- 'consume',
- 'convolve',
- 'dotproduct',
- 'first_true',
- 'flatten',
- 'grouper',
- 'iter_except',
- 'ncycles',
- 'nth',
- 'nth_combination',
- 'padnone',
- 'pad_none',
- 'pairwise',
- 'partition',
- 'powerset',
- 'prepend',
- 'quantify',
- 'random_combination_with_replacement',
- 'random_combination',
- 'random_permutation',
- 'random_product',
- 'repeatfunc',
- 'roundrobin',
- 'tabulate',
- 'tail',
- 'take',
- 'unique_everseen',
- 'unique_justseen',
-]
-
-
-def take(n, iterable):
- """Return first *n* items of the iterable as a list.
-
- >>> take(3, range(10))
- [0, 1, 2]
-
- If there are fewer than *n* items in the iterable, all of them are
- returned.
-
- >>> take(10, range(3))
- [0, 1, 2]
-
- """
- return list(islice(iterable, n))
-
-
-def tabulate(function, start=0):
- """Return an iterator over the results of ``func(start)``,
- ``func(start + 1)``, ``func(start + 2)``...
-
- *func* should be a function that accepts one integer argument.
-
- If *start* is not specified it defaults to 0. It will be incremented each
- time the iterator is advanced.
-
- >>> square = lambda x: x ** 2
- >>> iterator = tabulate(square, -3)
- >>> take(4, iterator)
- [9, 4, 1, 0]
-
- """
- return map(function, count(start))
-
-
-def tail(n, iterable):
- """Return an iterator over the last *n* items of *iterable*.
-
- >>> t = tail(3, 'ABCDEFG')
- >>> list(t)
- ['E', 'F', 'G']
-
- """
- return iter(deque(iterable, maxlen=n))
-
-
-def consume(iterator, n=None):
- """Advance *iterable* by *n* steps. If *n* is ``None``, consume it
- entirely.
-
- Efficiently exhausts an iterator without returning values. Defaults to
- consuming the whole iterator, but an optional second argument may be
- provided to limit consumption.
-
- >>> i = (x for x in range(10))
- >>> next(i)
- 0
- >>> consume(i, 3)
- >>> next(i)
- 4
- >>> consume(i)
- >>> next(i)
- Traceback (most recent call last):
- File "<stdin>", line 1, in <module>
- StopIteration
-
- If the iterator has fewer items remaining than the provided limit, the
- whole iterator will be consumed.
-
- >>> i = (x for x in range(3))
- >>> consume(i, 5)
- >>> next(i)
- Traceback (most recent call last):
- File "<stdin>", line 1, in <module>
- StopIteration
-
- """
- # Use functions that consume iterators at C speed.
- if n is None:
- # feed the entire iterator into a zero-length deque
- deque(iterator, maxlen=0)
- else:
- # advance to the empty slice starting at position n
- next(islice(iterator, n, n), None)
-
-
-def nth(iterable, n, default=None):
- """Returns the nth item or a default value.
-
- >>> l = range(10)
- >>> nth(l, 3)
- 3
- >>> nth(l, 20, "zebra")
- 'zebra'
-
- """
- return next(islice(iterable, n, None), default)
-
-
-def all_equal(iterable):
- """
- Returns ``True`` if all the elements are equal to each other.
-
- >>> all_equal('aaaa')
- True
- >>> all_equal('aaab')
- False
-
- """
- g = groupby(iterable)
- return next(g, True) and not next(g, False)
-
-
-def quantify(iterable, pred=bool):
- """Return the how many times the predicate is true.
-
- >>> quantify([True, False, True])
- 2
-
- """
- return sum(map(pred, iterable))
-
-
-def pad_none(iterable):
- """Returns the sequence of elements and then returns ``None`` indefinitely.
-
- >>> take(5, pad_none(range(3)))
- [0, 1, 2, None, None]
-
- Useful for emulating the behavior of the built-in :func:`map` function.
-
- See also :func:`padded`.
-
- """
- return chain(iterable, repeat(None))
-
-
-padnone = pad_none
-
-
-def ncycles(iterable, n):
- """Returns the sequence elements *n* times
-
- >>> list(ncycles(["a", "b"], 3))
- ['a', 'b', 'a', 'b', 'a', 'b']
-
- """
- return chain.from_iterable(repeat(tuple(iterable), n))
-
-
-def dotproduct(vec1, vec2):
- """Returns the dot product of the two iterables.
-
- >>> dotproduct([10, 10], [20, 20])
- 400
-
- """
- return sum(map(operator.mul, vec1, vec2))
-
-
-def flatten(listOfLists):
- """Return an iterator flattening one level of nesting in a list of lists.
-
- >>> list(flatten([[0, 1], [2, 3]]))
- [0, 1, 2, 3]
-
- See also :func:`collapse`, which can flatten multiple levels of nesting.
-
- """
- return chain.from_iterable(listOfLists)
-
-
-def repeatfunc(func, times=None, *args):
- """Call *func* with *args* repeatedly, returning an iterable over the
- results.
-
- If *times* is specified, the iterable will terminate after that many
- repetitions:
-
- >>> from operator import add
- >>> times = 4
- >>> args = 3, 5
- >>> list(repeatfunc(add, times, *args))
- [8, 8, 8, 8]
-
- If *times* is ``None`` the iterable will not terminate:
-
- >>> from random import randrange
- >>> times = None
- >>> args = 1, 11
- >>> take(6, repeatfunc(randrange, times, *args)) # doctest:+SKIP
- [2, 4, 8, 1, 8, 4]
-
- """
- if times is None:
- return starmap(func, repeat(args))
- return starmap(func, repeat(args, times))
-
-
-def _pairwise(iterable):
- """Returns an iterator of paired items, overlapping, from the original
-
- >>> take(4, pairwise(count()))
- [(0, 1), (1, 2), (2, 3), (3, 4)]
-
- On Python 3.10 and above, this is an alias for :func:`itertools.pairwise`.
-
- """
- a, b = tee(iterable)
- next(b, None)
- yield from zip(a, b)
-
-
-try:
- from itertools import pairwise as itertools_pairwise
-except ImportError:
- pairwise = _pairwise
-else:
-
- def pairwise(iterable):
- yield from itertools_pairwise(iterable)
-
- pairwise.__doc__ = _pairwise.__doc__
-
-
-def grouper(iterable, n, fillvalue=None):
- """Collect data into fixed-length chunks or blocks.
-
- >>> list(grouper('ABCDEFG', 3, 'x'))
- [('A', 'B', 'C'), ('D', 'E', 'F'), ('G', 'x', 'x')]
-
- """
- if isinstance(iterable, int):
- warnings.warn(
- "grouper expects iterable as first parameter", DeprecationWarning
- )
- n, iterable = iterable, n
- args = [iter(iterable)] * n
- return zip_longest(fillvalue=fillvalue, *args)
-
-
-def roundrobin(*iterables):
- """Yields an item from each iterable, alternating between them.
-
- >>> list(roundrobin('ABC', 'D', 'EF'))
- ['A', 'D', 'E', 'B', 'F', 'C']
-
- This function produces the same output as :func:`interleave_longest`, but
- may perform better for some inputs (in particular when the number of
- iterables is small).
-
- """
- # Recipe credited to George Sakkis
- pending = len(iterables)
- nexts = cycle(iter(it).__next__ for it in iterables)
- while pending:
- try:
- for next in nexts:
- yield next()
- except StopIteration:
- pending -= 1
- nexts = cycle(islice(nexts, pending))
-
-
-def partition(pred, iterable):
- """
- Returns a 2-tuple of iterables derived from the input iterable.
- The first yields the items that have ``pred(item) == False``.
- The second yields the items that have ``pred(item) == True``.
-
- >>> is_odd = lambda x: x % 2 != 0
- >>> iterable = range(10)
- >>> even_items, odd_items = partition(is_odd, iterable)
- >>> list(even_items), list(odd_items)
- ([0, 2, 4, 6, 8], [1, 3, 5, 7, 9])
-
- If *pred* is None, :func:`bool` is used.
-
- >>> iterable = [0, 1, False, True, '', ' ']
- >>> false_items, true_items = partition(None, iterable)
- >>> list(false_items), list(true_items)
- ([0, False, ''], [1, True, ' '])
-
- """
- if pred is None:
- pred = bool
-
- evaluations = ((pred(x), x) for x in iterable)
- t1, t2 = tee(evaluations)
- return (
- (x for (cond, x) in t1 if not cond),
- (x for (cond, x) in t2 if cond),
- )
-
-
-def powerset(iterable):
- """Yields all possible subsets of the iterable.
-
- >>> list(powerset([1, 2, 3]))
- [(), (1,), (2,), (3,), (1, 2), (1, 3), (2, 3), (1, 2, 3)]
-
- :func:`powerset` will operate on iterables that aren't :class:`set`
- instances, so repeated elements in the input will produce repeated elements
- in the output. Use :func:`unique_everseen` on the input to avoid generating
- duplicates:
-
- >>> seq = [1, 1, 0]
- >>> list(powerset(seq))
- [(), (1,), (1,), (0,), (1, 1), (1, 0), (1, 0), (1, 1, 0)]
- >>> from more_itertools import unique_everseen
- >>> list(powerset(unique_everseen(seq)))
- [(), (1,), (0,), (1, 0)]
-
- """
- s = list(iterable)
- return chain.from_iterable(combinations(s, r) for r in range(len(s) + 1))
-
-
-def unique_everseen(iterable, key=None):
- """
- Yield unique elements, preserving order.
-
- >>> list(unique_everseen('AAAABBBCCDAABBB'))
- ['A', 'B', 'C', 'D']
- >>> list(unique_everseen('ABBCcAD', str.lower))
- ['A', 'B', 'C', 'D']
-
- Sequences with a mix of hashable and unhashable items can be used.
- The function will be slower (i.e., `O(n^2)`) for unhashable items.
-
- Remember that ``list`` objects are unhashable - you can use the *key*
- parameter to transform the list to a tuple (which is hashable) to
- avoid a slowdown.
-
- >>> iterable = ([1, 2], [2, 3], [1, 2])
- >>> list(unique_everseen(iterable)) # Slow
- [[1, 2], [2, 3]]
- >>> list(unique_everseen(iterable, key=tuple)) # Faster
- [[1, 2], [2, 3]]
-
- Similary, you may want to convert unhashable ``set`` objects with
- ``key=frozenset``. For ``dict`` objects,
- ``key=lambda x: frozenset(x.items())`` can be used.
-
- """
- seenset = set()
- seenset_add = seenset.add
- seenlist = []
- seenlist_add = seenlist.append
- use_key = key is not None
-
- for element in iterable:
- k = key(element) if use_key else element
- try:
- if k not in seenset:
- seenset_add(k)
- yield element
- except TypeError:
- if k not in seenlist:
- seenlist_add(k)
- yield element
-
-
-def unique_justseen(iterable, key=None):
- """Yields elements in order, ignoring serial duplicates
-
- >>> list(unique_justseen('AAAABBBCCDAABBB'))
- ['A', 'B', 'C', 'D', 'A', 'B']
- >>> list(unique_justseen('ABBCcAD', str.lower))
- ['A', 'B', 'C', 'A', 'D']
-
- """
- return map(next, map(operator.itemgetter(1), groupby(iterable, key)))
-
-
-def iter_except(func, exception, first=None):
- """Yields results from a function repeatedly until an exception is raised.
-
- Converts a call-until-exception interface to an iterator interface.
- Like ``iter(func, sentinel)``, but uses an exception instead of a sentinel
- to end the loop.
-
- >>> l = [0, 1, 2]
- >>> list(iter_except(l.pop, IndexError))
- [2, 1, 0]
-
- """
- try:
- if first is not None:
- yield first()
- while 1:
- yield func()
- except exception:
- pass
-
-
-def first_true(iterable, default=None, pred=None):
- """
- Returns the first true value in the iterable.
-
- If no true value is found, returns *default*
-
- If *pred* is not None, returns the first item for which
- ``pred(item) == True`` .
-
- >>> first_true(range(10))
- 1
- >>> first_true(range(10), pred=lambda x: x > 5)
- 6
- >>> first_true(range(10), default='missing', pred=lambda x: x > 9)
- 'missing'
-
- """
- return next(filter(pred, iterable), default)
-
-
-def random_product(*args, repeat=1):
- """Draw an item at random from each of the input iterables.
-
- >>> random_product('abc', range(4), 'XYZ') # doctest:+SKIP
- ('c', 3, 'Z')
-
- If *repeat* is provided as a keyword argument, that many items will be
- drawn from each iterable.
-
- >>> random_product('abcd', range(4), repeat=2) # doctest:+SKIP
- ('a', 2, 'd', 3)
-
- This equivalent to taking a random selection from
- ``itertools.product(*args, **kwarg)``.
-
- """
- pools = [tuple(pool) for pool in args] * repeat
- return tuple(choice(pool) for pool in pools)
-
-
-def random_permutation(iterable, r=None):
- """Return a random *r* length permutation of the elements in *iterable*.
-
- If *r* is not specified or is ``None``, then *r* defaults to the length of
- *iterable*.
-
- >>> random_permutation(range(5)) # doctest:+SKIP
- (3, 4, 0, 1, 2)
-
- This equivalent to taking a random selection from
- ``itertools.permutations(iterable, r)``.
-
- """
- pool = tuple(iterable)
- r = len(pool) if r is None else r
- return tuple(sample(pool, r))
-
-
-def random_combination(iterable, r):
- """Return a random *r* length subsequence of the elements in *iterable*.
-
- >>> random_combination(range(5), 3) # doctest:+SKIP
- (2, 3, 4)
-
- This equivalent to taking a random selection from
- ``itertools.combinations(iterable, r)``.
-
- """
- pool = tuple(iterable)
- n = len(pool)
- indices = sorted(sample(range(n), r))
- return tuple(pool[i] for i in indices)
-
-
-def random_combination_with_replacement(iterable, r):
- """Return a random *r* length subsequence of elements in *iterable*,
- allowing individual elements to be repeated.
-
- >>> random_combination_with_replacement(range(3), 5) # doctest:+SKIP
- (0, 0, 1, 2, 2)
-
- This equivalent to taking a random selection from
- ``itertools.combinations_with_replacement(iterable, r)``.
-
- """
- pool = tuple(iterable)
- n = len(pool)
- indices = sorted(randrange(n) for i in range(r))
- return tuple(pool[i] for i in indices)
-
-
-def nth_combination(iterable, r, index):
- """Equivalent to ``list(combinations(iterable, r))[index]``.
-
- The subsequences of *iterable* that are of length *r* can be ordered
- lexicographically. :func:`nth_combination` computes the subsequence at
- sort position *index* directly, without computing the previous
- subsequences.
-
- >>> nth_combination(range(5), 3, 5)
- (0, 3, 4)
-
- ``ValueError`` will be raised If *r* is negative or greater than the length
- of *iterable*.
- ``IndexError`` will be raised if the given *index* is invalid.
- """
- pool = tuple(iterable)
- n = len(pool)
- if (r < 0) or (r > n):
- raise ValueError
-
- c = 1
- k = min(r, n - r)
- for i in range(1, k + 1):
- c = c * (n - k + i) // i
-
- if index < 0:
- index += c
-
- if (index < 0) or (index >= c):
- raise IndexError
-
- result = []
- while r:
- c, n, r = c * r // n, n - 1, r - 1
- while index >= c:
- index -= c
- c, n = c * (n - r) // n, n - 1
- result.append(pool[-1 - n])
-
- return tuple(result)
-
-
-def prepend(value, iterator):
- """Yield *value*, followed by the elements in *iterator*.
-
- >>> value = '0'
- >>> iterator = ['1', '2', '3']
- >>> list(prepend(value, iterator))
- ['0', '1', '2', '3']
-
- To prepend multiple values, see :func:`itertools.chain`
- or :func:`value_chain`.
-
- """
- return chain([value], iterator)
-
-
-def convolve(signal, kernel):
- """Convolve the iterable *signal* with the iterable *kernel*.
-
- >>> signal = (1, 2, 3, 4, 5)
- >>> kernel = [3, 2, 1]
- >>> list(convolve(signal, kernel))
- [3, 8, 14, 20, 26, 14, 5]
-
- Note: the input arguments are not interchangeable, as the *kernel*
- is immediately consumed and stored.
-
- """
- kernel = tuple(kernel)[::-1]
- n = len(kernel)
- window = deque([0], maxlen=n) * n
- for x in chain(signal, repeat(0, n - 1)):
- window.append(x)
- yield sum(map(operator.mul, kernel, window))
diff --git a/contrib/python/setuptools/py3/setuptools/_vendor/ordered_set.py b/contrib/python/setuptools/py3/setuptools/_vendor/ordered_set.py
deleted file mode 100644
index 14876000de8..00000000000
--- a/contrib/python/setuptools/py3/setuptools/_vendor/ordered_set.py
+++ /dev/null
@@ -1,488 +0,0 @@
-"""
-An OrderedSet is a custom MutableSet that remembers its order, so that every
-entry has an index that can be looked up.
-
-Based on a recipe originally posted to ActiveState Recipes by Raymond Hettiger,
-and released under the MIT license.
-"""
-import itertools as it
-from collections import deque
-
-try:
- # Python 3
- from collections.abc import MutableSet, Sequence
-except ImportError:
- # Python 2.7
- from collections import MutableSet, Sequence
-
-SLICE_ALL = slice(None)
-__version__ = "3.1"
-
-
-def is_iterable(obj):
- """
- Are we being asked to look up a list of things, instead of a single thing?
- We check for the `__iter__` attribute so that this can cover types that
- don't have to be known by this module, such as NumPy arrays.
-
- Strings, however, should be considered as atomic values to look up, not
- iterables. The same goes for tuples, since they are immutable and therefore
- valid entries.
-
- We don't need to check for the Python 2 `unicode` type, because it doesn't
- have an `__iter__` attribute anyway.
- """
- return (
- hasattr(obj, "__iter__")
- and not isinstance(obj, str)
- and not isinstance(obj, tuple)
- )
-
-
-class OrderedSet(MutableSet, Sequence):
- """
- An OrderedSet is a custom MutableSet that remembers its order, so that
- every entry has an index that can be looked up.
-
- Example:
- >>> OrderedSet([1, 1, 2, 3, 2])
- OrderedSet([1, 2, 3])
- """
-
- def __init__(self, iterable=None):
- self.items = []
- self.map = {}
- if iterable is not None:
- self |= iterable
-
- def __len__(self):
- """
- Returns the number of unique elements in the ordered set
-
- Example:
- >>> len(OrderedSet([]))
- 0
- >>> len(OrderedSet([1, 2]))
- 2
- """
- return len(self.items)
-
- def __getitem__(self, index):
- """
- Get the item at a given index.
-
- If `index` is a slice, you will get back that slice of items, as a
- new OrderedSet.
-
- If `index` is a list or a similar iterable, you'll get a list of
- items corresponding to those indices. This is similar to NumPy's
- "fancy indexing". The result is not an OrderedSet because you may ask
- for duplicate indices, and the number of elements returned should be
- the number of elements asked for.
-
- Example:
- >>> oset = OrderedSet([1, 2, 3])
- >>> oset[1]
- 2
- """
- if isinstance(index, slice) and index == SLICE_ALL:
- return self.copy()
- elif is_iterable(index):
- return [self.items[i] for i in index]
- elif hasattr(index, "__index__") or isinstance(index, slice):
- result = self.items[index]
- if isinstance(result, list):
- return self.__class__(result)
- else:
- return result
- else:
- raise TypeError("Don't know how to index an OrderedSet by %r" % index)
-
- def copy(self):
- """
- Return a shallow copy of this object.
-
- Example:
- >>> this = OrderedSet([1, 2, 3])
- >>> other = this.copy()
- >>> this == other
- True
- >>> this is other
- False
- """
- return self.__class__(self)
-
- def __getstate__(self):
- if len(self) == 0:
- # The state can't be an empty list.
- # We need to return a truthy value, or else __setstate__ won't be run.
- #
- # This could have been done more gracefully by always putting the state
- # in a tuple, but this way is backwards- and forwards- compatible with
- # previous versions of OrderedSet.
- return (None,)
- else:
- return list(self)
-
- def __setstate__(self, state):
- if state == (None,):
- self.__init__([])
- else:
- self.__init__(state)
-
- def __contains__(self, key):
- """
- Test if the item is in this ordered set
-
- Example:
- >>> 1 in OrderedSet([1, 3, 2])
- True
- >>> 5 in OrderedSet([1, 3, 2])
- False
- """
- return key in self.map
-
- def add(self, key):
- """
- Add `key` as an item to this OrderedSet, then return its index.
-
- If `key` is already in the OrderedSet, return the index it already
- had.
-
- Example:
- >>> oset = OrderedSet()
- >>> oset.append(3)
- 0
- >>> print(oset)
- OrderedSet([3])
- """
- if key not in self.map:
- self.map[key] = len(self.items)
- self.items.append(key)
- return self.map[key]
-
- append = add
-
- def update(self, sequence):
- """
- Update the set with the given iterable sequence, then return the index
- of the last element inserted.
-
- Example:
- >>> oset = OrderedSet([1, 2, 3])
- >>> oset.update([3, 1, 5, 1, 4])
- 4
- >>> print(oset)
- OrderedSet([1, 2, 3, 5, 4])
- """
- item_index = None
- try:
- for item in sequence:
- item_index = self.add(item)
- except TypeError:
- raise ValueError(
- "Argument needs to be an iterable, got %s" % type(sequence)
- )
- return item_index
-
- def index(self, key):
- """
- Get the index of a given entry, raising an IndexError if it's not
- present.
-
- `key` can be an iterable of entries that is not a string, in which case
- this returns a list of indices.
-
- Example:
- >>> oset = OrderedSet([1, 2, 3])
- >>> oset.index(2)
- 1
- """
- if is_iterable(key):
- return [self.index(subkey) for subkey in key]
- return self.map[key]
-
- # Provide some compatibility with pd.Index
- get_loc = index
- get_indexer = index
-
- def pop(self):
- """
- Remove and return the last element from the set.
-
- Raises KeyError if the set is empty.
-
- Example:
- >>> oset = OrderedSet([1, 2, 3])
- >>> oset.pop()
- 3
- """
- if not self.items:
- raise KeyError("Set is empty")
-
- elem = self.items[-1]
- del self.items[-1]
- del self.map[elem]
- return elem
-
- def discard(self, key):
- """
- Remove an element. Do not raise an exception if absent.
-
- The MutableSet mixin uses this to implement the .remove() method, which
- *does* raise an error when asked to remove a non-existent item.
-
- Example:
- >>> oset = OrderedSet([1, 2, 3])
- >>> oset.discard(2)
- >>> print(oset)
- OrderedSet([1, 3])
- >>> oset.discard(2)
- >>> print(oset)
- OrderedSet([1, 3])
- """
- if key in self:
- i = self.map[key]
- del self.items[i]
- del self.map[key]
- for k, v in self.map.items():
- if v >= i:
- self.map[k] = v - 1
-
- def clear(self):
- """
- Remove all items from this OrderedSet.
- """
- del self.items[:]
- self.map.clear()
-
- def __iter__(self):
- """
- Example:
- >>> list(iter(OrderedSet([1, 2, 3])))
- [1, 2, 3]
- """
- return iter(self.items)
-
- def __reversed__(self):
- """
- Example:
- >>> list(reversed(OrderedSet([1, 2, 3])))
- [3, 2, 1]
- """
- return reversed(self.items)
-
- def __repr__(self):
- if not self:
- return "%s()" % (self.__class__.__name__,)
- return "%s(%r)" % (self.__class__.__name__, list(self))
-
- def __eq__(self, other):
- """
- Returns true if the containers have the same items. If `other` is a
- Sequence, then order is checked, otherwise it is ignored.
-
- Example:
- >>> oset = OrderedSet([1, 3, 2])
- >>> oset == [1, 3, 2]
- True
- >>> oset == [1, 2, 3]
- False
- >>> oset == [2, 3]
- False
- >>> oset == OrderedSet([3, 2, 1])
- False
- """
- # In Python 2 deque is not a Sequence, so treat it as one for
- # consistent behavior with Python 3.
- if isinstance(other, (Sequence, deque)):
- # Check that this OrderedSet contains the same elements, in the
- # same order, as the other object.
- return list(self) == list(other)
- try:
- other_as_set = set(other)
- except TypeError:
- # If `other` can't be converted into a set, it's not equal.
- return False
- else:
- return set(self) == other_as_set
-
- def union(self, *sets):
- """
- Combines all unique items.
- Each items order is defined by its first appearance.
-
- Example:
- >>> oset = OrderedSet.union(OrderedSet([3, 1, 4, 1, 5]), [1, 3], [2, 0])
- >>> print(oset)
- OrderedSet([3, 1, 4, 5, 2, 0])
- >>> oset.union([8, 9])
- OrderedSet([3, 1, 4, 5, 2, 0, 8, 9])
- >>> oset | {10}
- OrderedSet([3, 1, 4, 5, 2, 0, 10])
- """
- cls = self.__class__ if isinstance(self, OrderedSet) else OrderedSet
- containers = map(list, it.chain([self], sets))
- items = it.chain.from_iterable(containers)
- return cls(items)
-
- def __and__(self, other):
- # the parent implementation of this is backwards
- return self.intersection(other)
-
- def intersection(self, *sets):
- """
- Returns elements in common between all sets. Order is defined only
- by the first set.
-
- Example:
- >>> oset = OrderedSet.intersection(OrderedSet([0, 1, 2, 3]), [1, 2, 3])
- >>> print(oset)
- OrderedSet([1, 2, 3])
- >>> oset.intersection([2, 4, 5], [1, 2, 3, 4])
- OrderedSet([2])
- >>> oset.intersection()
- OrderedSet([1, 2, 3])
- """
- cls = self.__class__ if isinstance(self, OrderedSet) else OrderedSet
- if sets:
- common = set.intersection(*map(set, sets))
- items = (item for item in self if item in common)
- else:
- items = self
- return cls(items)
-
- def difference(self, *sets):
- """
- Returns all elements that are in this set but not the others.
-
- Example:
- >>> OrderedSet([1, 2, 3]).difference(OrderedSet([2]))
- OrderedSet([1, 3])
- >>> OrderedSet([1, 2, 3]).difference(OrderedSet([2]), OrderedSet([3]))
- OrderedSet([1])
- >>> OrderedSet([1, 2, 3]) - OrderedSet([2])
- OrderedSet([1, 3])
- >>> OrderedSet([1, 2, 3]).difference()
- OrderedSet([1, 2, 3])
- """
- cls = self.__class__
- if sets:
- other = set.union(*map(set, sets))
- items = (item for item in self if item not in other)
- else:
- items = self
- return cls(items)
-
- def issubset(self, other):
- """
- Report whether another set contains this set.
-
- Example:
- >>> OrderedSet([1, 2, 3]).issubset({1, 2})
- False
- >>> OrderedSet([1, 2, 3]).issubset({1, 2, 3, 4})
- True
- >>> OrderedSet([1, 2, 3]).issubset({1, 4, 3, 5})
- False
- """
- if len(self) > len(other): # Fast check for obvious cases
- return False
- return all(item in other for item in self)
-
- def issuperset(self, other):
- """
- Report whether this set contains another set.
-
- Example:
- >>> OrderedSet([1, 2]).issuperset([1, 2, 3])
- False
- >>> OrderedSet([1, 2, 3, 4]).issuperset({1, 2, 3})
- True
- >>> OrderedSet([1, 4, 3, 5]).issuperset({1, 2, 3})
- False
- """
- if len(self) < len(other): # Fast check for obvious cases
- return False
- return all(item in self for item in other)
-
- def symmetric_difference(self, other):
- """
- Return the symmetric difference of two OrderedSets as a new set.
- That is, the new set will contain all elements that are in exactly
- one of the sets.
-
- Their order will be preserved, with elements from `self` preceding
- elements from `other`.
-
- Example:
- >>> this = OrderedSet([1, 4, 3, 5, 7])
- >>> other = OrderedSet([9, 7, 1, 3, 2])
- >>> this.symmetric_difference(other)
- OrderedSet([4, 5, 9, 2])
- """
- cls = self.__class__ if isinstance(self, OrderedSet) else OrderedSet
- diff1 = cls(self).difference(other)
- diff2 = cls(other).difference(self)
- return diff1.union(diff2)
-
- def _update_items(self, items):
- """
- Replace the 'items' list of this OrderedSet with a new one, updating
- self.map accordingly.
- """
- self.items = items
- self.map = {item: idx for (idx, item) in enumerate(items)}
-
- def difference_update(self, *sets):
- """
- Update this OrderedSet to remove items from one or more other sets.
-
- Example:
- >>> this = OrderedSet([1, 2, 3])
- >>> this.difference_update(OrderedSet([2, 4]))
- >>> print(this)
- OrderedSet([1, 3])
-
- >>> this = OrderedSet([1, 2, 3, 4, 5])
- >>> this.difference_update(OrderedSet([2, 4]), OrderedSet([1, 4, 6]))
- >>> print(this)
- OrderedSet([3, 5])
- """
- items_to_remove = set()
- for other in sets:
- items_to_remove |= set(other)
- self._update_items([item for item in self.items if item not in items_to_remove])
-
- def intersection_update(self, other):
- """
- Update this OrderedSet to keep only items in another set, preserving
- their order in this set.
-
- Example:
- >>> this = OrderedSet([1, 4, 3, 5, 7])
- >>> other = OrderedSet([9, 7, 1, 3, 2])
- >>> this.intersection_update(other)
- >>> print(this)
- OrderedSet([1, 3, 7])
- """
- other = set(other)
- self._update_items([item for item in self.items if item in other])
-
- def symmetric_difference_update(self, other):
- """
- Update this OrderedSet to remove items from another set, then
- add items from the other set that were not present in this set.
-
- Example:
- >>> this = OrderedSet([1, 4, 3, 5, 7])
- >>> other = OrderedSet([9, 7, 1, 3, 2])
- >>> this.symmetric_difference_update(other)
- >>> print(this)
- OrderedSet([4, 5, 9, 2])
- """
- items_to_add = [item for item in other if item not in self]
- items_to_remove = set(other)
- self._update_items(
- [item for item in self.items if item not in items_to_remove] + items_to_add
- )
diff --git a/contrib/python/setuptools/py3/setuptools/_vendor/packaging/__about__.py b/contrib/python/setuptools/py3/setuptools/_vendor/packaging/__about__.py
deleted file mode 100644
index c359122f971..00000000000
--- a/contrib/python/setuptools/py3/setuptools/_vendor/packaging/__about__.py
+++ /dev/null
@@ -1,26 +0,0 @@
-# This file is dual licensed under the terms of the Apache License, Version
-# 2.0, and the BSD License. See the LICENSE file in the root of this repository
-# for complete details.
-
-__all__ = [
- "__title__",
- "__summary__",
- "__uri__",
- "__version__",
- "__author__",
- "__email__",
- "__license__",
- "__copyright__",
-]
-
-__title__ = "packaging"
-__summary__ = "Core utilities for Python packages"
-__uri__ = "https://github.com/pypa/packaging"
-
-__version__ = "21.2"
-
-__author__ = "Donald Stufft and individual contributors"
-__email__ = "[email protected]"
-
-__license__ = "BSD-2-Clause or Apache-2.0"
-__copyright__ = "2014-2019 %s" % __author__
diff --git a/contrib/python/setuptools/py3/setuptools/_vendor/packaging/__init__.py b/contrib/python/setuptools/py3/setuptools/_vendor/packaging/__init__.py
deleted file mode 100644
index 3c50c5dcfee..00000000000
--- a/contrib/python/setuptools/py3/setuptools/_vendor/packaging/__init__.py
+++ /dev/null
@@ -1,25 +0,0 @@
-# This file is dual licensed under the terms of the Apache License, Version
-# 2.0, and the BSD License. See the LICENSE file in the root of this repository
-# for complete details.
-
-from .__about__ import (
- __author__,
- __copyright__,
- __email__,
- __license__,
- __summary__,
- __title__,
- __uri__,
- __version__,
-)
-
-__all__ = [
- "__title__",
- "__summary__",
- "__uri__",
- "__version__",
- "__author__",
- "__email__",
- "__license__",
- "__copyright__",
-]
diff --git a/contrib/python/setuptools/py3/setuptools/_vendor/packaging/_manylinux.py b/contrib/python/setuptools/py3/setuptools/_vendor/packaging/_manylinux.py
deleted file mode 100644
index 4c379aa6f69..00000000000
--- a/contrib/python/setuptools/py3/setuptools/_vendor/packaging/_manylinux.py
+++ /dev/null
@@ -1,301 +0,0 @@
-import collections
-import functools
-import os
-import re
-import struct
-import sys
-import warnings
-from typing import IO, Dict, Iterator, NamedTuple, Optional, Tuple
-
-
-# Python does not provide platform information at sufficient granularity to
-# identify the architecture of the running executable in some cases, so we
-# determine it dynamically by reading the information from the running
-# process. This only applies on Linux, which uses the ELF format.
-class _ELFFileHeader:
- # https://en.wikipedia.org/wiki/Executable_and_Linkable_Format#File_header
- class _InvalidELFFileHeader(ValueError):
- """
- An invalid ELF file header was found.
- """
-
- ELF_MAGIC_NUMBER = 0x7F454C46
- ELFCLASS32 = 1
- ELFCLASS64 = 2
- ELFDATA2LSB = 1
- ELFDATA2MSB = 2
- EM_386 = 3
- EM_S390 = 22
- EM_ARM = 40
- EM_X86_64 = 62
- EF_ARM_ABIMASK = 0xFF000000
- EF_ARM_ABI_VER5 = 0x05000000
- EF_ARM_ABI_FLOAT_HARD = 0x00000400
-
- def __init__(self, file: IO[bytes]) -> None:
- def unpack(fmt: str) -> int:
- try:
- data = file.read(struct.calcsize(fmt))
- result: Tuple[int, ...] = struct.unpack(fmt, data)
- except struct.error:
- raise _ELFFileHeader._InvalidELFFileHeader()
- return result[0]
-
- self.e_ident_magic = unpack(">I")
- if self.e_ident_magic != self.ELF_MAGIC_NUMBER:
- raise _ELFFileHeader._InvalidELFFileHeader()
- self.e_ident_class = unpack("B")
- if self.e_ident_class not in {self.ELFCLASS32, self.ELFCLASS64}:
- raise _ELFFileHeader._InvalidELFFileHeader()
- self.e_ident_data = unpack("B")
- if self.e_ident_data not in {self.ELFDATA2LSB, self.ELFDATA2MSB}:
- raise _ELFFileHeader._InvalidELFFileHeader()
- self.e_ident_version = unpack("B")
- self.e_ident_osabi = unpack("B")
- self.e_ident_abiversion = unpack("B")
- self.e_ident_pad = file.read(7)
- format_h = "<H" if self.e_ident_data == self.ELFDATA2LSB else ">H"
- format_i = "<I" if self.e_ident_data == self.ELFDATA2LSB else ">I"
- format_q = "<Q" if self.e_ident_data == self.ELFDATA2LSB else ">Q"
- format_p = format_i if self.e_ident_class == self.ELFCLASS32 else format_q
- self.e_type = unpack(format_h)
- self.e_machine = unpack(format_h)
- self.e_version = unpack(format_i)
- self.e_entry = unpack(format_p)
- self.e_phoff = unpack(format_p)
- self.e_shoff = unpack(format_p)
- self.e_flags = unpack(format_i)
- self.e_ehsize = unpack(format_h)
- self.e_phentsize = unpack(format_h)
- self.e_phnum = unpack(format_h)
- self.e_shentsize = unpack(format_h)
- self.e_shnum = unpack(format_h)
- self.e_shstrndx = unpack(format_h)
-
-
-def _get_elf_header() -> Optional[_ELFFileHeader]:
- try:
- with open(sys.executable, "rb") as f:
- elf_header = _ELFFileHeader(f)
- except (OSError, TypeError, _ELFFileHeader._InvalidELFFileHeader):
- return None
- return elf_header
-
-
-def _is_linux_armhf() -> bool:
- # hard-float ABI can be detected from the ELF header of the running
- # process
- # https://static.docs.arm.com/ihi0044/g/aaelf32.pdf
- elf_header = _get_elf_header()
- if elf_header is None:
- return False
- result = elf_header.e_ident_class == elf_header.ELFCLASS32
- result &= elf_header.e_ident_data == elf_header.ELFDATA2LSB
- result &= elf_header.e_machine == elf_header.EM_ARM
- result &= (
- elf_header.e_flags & elf_header.EF_ARM_ABIMASK
- ) == elf_header.EF_ARM_ABI_VER5
- result &= (
- elf_header.e_flags & elf_header.EF_ARM_ABI_FLOAT_HARD
- ) == elf_header.EF_ARM_ABI_FLOAT_HARD
- return result
-
-
-def _is_linux_i686() -> bool:
- elf_header = _get_elf_header()
- if elf_header is None:
- return False
- result = elf_header.e_ident_class == elf_header.ELFCLASS32
- result &= elf_header.e_ident_data == elf_header.ELFDATA2LSB
- result &= elf_header.e_machine == elf_header.EM_386
- return result
-
-
-def _have_compatible_abi(arch: str) -> bool:
- if arch == "armv7l":
- return _is_linux_armhf()
- if arch == "i686":
- return _is_linux_i686()
- return arch in {"x86_64", "aarch64", "ppc64", "ppc64le", "s390x"}
-
-
-# If glibc ever changes its major version, we need to know what the last
-# minor version was, so we can build the complete list of all versions.
-# For now, guess what the highest minor version might be, assume it will
-# be 50 for testing. Once this actually happens, update the dictionary
-# with the actual value.
-_LAST_GLIBC_MINOR: Dict[int, int] = collections.defaultdict(lambda: 50)
-
-
-class _GLibCVersion(NamedTuple):
- major: int
- minor: int
-
-
-def _glibc_version_string_confstr() -> Optional[str]:
- """
- Primary implementation of glibc_version_string using os.confstr.
- """
- # os.confstr is quite a bit faster than ctypes.DLL. It's also less likely
- # to be broken or missing. This strategy is used in the standard library
- # platform module.
- # https://github.com/python/cpython/blob/fcf1d003bf4f0100c/Lib/platform.py#L175-L183
- try:
- # os.confstr("CS_GNU_LIBC_VERSION") returns a string like "glibc 2.17".
- version_string = os.confstr("CS_GNU_LIBC_VERSION")
- assert version_string is not None
- _, version = version_string.split()
- except (AssertionError, AttributeError, OSError, ValueError):
- # os.confstr() or CS_GNU_LIBC_VERSION not available (or a bad value)...
- return None
- return version
-
-
-def _glibc_version_string_ctypes() -> Optional[str]:
- """
- Fallback implementation of glibc_version_string using ctypes.
- """
- try:
- import ctypes
- except ImportError:
- return None
-
- # ctypes.CDLL(None) internally calls dlopen(NULL), and as the dlopen
- # manpage says, "If filename is NULL, then the returned handle is for the
- # main program". This way we can let the linker do the work to figure out
- # which libc our process is actually using.
- #
- # We must also handle the special case where the executable is not a
- # dynamically linked executable. This can occur when using musl libc,
- # for example. In this situation, dlopen() will error, leading to an
- # OSError. Interestingly, at least in the case of musl, there is no
- # errno set on the OSError. The single string argument used to construct
- # OSError comes from libc itself and is therefore not portable to
- # hard code here. In any case, failure to call dlopen() means we
- # can proceed, so we bail on our attempt.
- try:
- process_namespace = ctypes.CDLL(None)
- except OSError:
- return None
-
- try:
- gnu_get_libc_version = process_namespace.gnu_get_libc_version
- except AttributeError:
- # Symbol doesn't exist -> therefore, we are not linked to
- # glibc.
- return None
-
- # Call gnu_get_libc_version, which returns a string like "2.5"
- gnu_get_libc_version.restype = ctypes.c_char_p
- version_str: str = gnu_get_libc_version()
- # py2 / py3 compatibility:
- if not isinstance(version_str, str):
- version_str = version_str.decode("ascii")
-
- return version_str
-
-
-def _glibc_version_string() -> Optional[str]:
- """Returns glibc version string, or None if not using glibc."""
- return _glibc_version_string_confstr() or _glibc_version_string_ctypes()
-
-
-def _parse_glibc_version(version_str: str) -> Tuple[int, int]:
- """Parse glibc version.
-
- We use a regexp instead of str.split because we want to discard any
- random junk that might come after the minor version -- this might happen
- in patched/forked versions of glibc (e.g. Linaro's version of glibc
- uses version strings like "2.20-2014.11"). See gh-3588.
- """
- m = re.match(r"(?P<major>[0-9]+)\.(?P<minor>[0-9]+)", version_str)
- if not m:
- warnings.warn(
- "Expected glibc version with 2 components major.minor,"
- " got: %s" % version_str,
- RuntimeWarning,
- )
- return -1, -1
- return int(m.group("major")), int(m.group("minor"))
-
-
-def _get_glibc_version() -> Tuple[int, int]:
- version_str = _glibc_version_string()
- if version_str is None:
- return (-1, -1)
- return _parse_glibc_version(version_str)
-
-
-# From PEP 513, PEP 600
-def _is_compatible(name: str, arch: str, version: _GLibCVersion) -> bool:
- sys_glibc = _get_glibc_version()
- if sys_glibc < version:
- return False
- # Check for presence of _manylinux module.
- try:
- import _manylinux # noqa
- except ImportError:
- return True
- if hasattr(_manylinux, "manylinux_compatible"):
- result = _manylinux.manylinux_compatible(version[0], version[1], arch)
- if result is not None:
- return bool(result)
- return True
- if version == _GLibCVersion(2, 5):
- if hasattr(_manylinux, "manylinux1_compatible"):
- return bool(_manylinux.manylinux1_compatible)
- if version == _GLibCVersion(2, 12):
- if hasattr(_manylinux, "manylinux2010_compatible"):
- return bool(_manylinux.manylinux2010_compatible)
- if version == _GLibCVersion(2, 17):
- if hasattr(_manylinux, "manylinux2014_compatible"):
- return bool(_manylinux.manylinux2014_compatible)
- return True
-
-
-_LEGACY_MANYLINUX_MAP = {
- # CentOS 7 w/ glibc 2.17 (PEP 599)
- (2, 17): "manylinux2014",
- # CentOS 6 w/ glibc 2.12 (PEP 571)
- (2, 12): "manylinux2010",
- # CentOS 5 w/ glibc 2.5 (PEP 513)
- (2, 5): "manylinux1",
-}
-
-
-def platform_tags(linux: str, arch: str) -> Iterator[str]:
- if not _have_compatible_abi(arch):
- return
- # Oldest glibc to be supported regardless of architecture is (2, 17).
- too_old_glibc2 = _GLibCVersion(2, 16)
- if arch in {"x86_64", "i686"}:
- # On x86/i686 also oldest glibc to be supported is (2, 5).
- too_old_glibc2 = _GLibCVersion(2, 4)
- current_glibc = _GLibCVersion(*_get_glibc_version())
- glibc_max_list = [current_glibc]
- # We can assume compatibility across glibc major versions.
- # https://sourceware.org/bugzilla/show_bug.cgi?id=24636
- #
- # Build a list of maximum glibc versions so that we can
- # output the canonical list of all glibc from current_glibc
- # down to too_old_glibc2, including all intermediary versions.
- for glibc_major in range(current_glibc.major - 1, 1, -1):
- glibc_minor = _LAST_GLIBC_MINOR[glibc_major]
- glibc_max_list.append(_GLibCVersion(glibc_major, glibc_minor))
- for glibc_max in glibc_max_list:
- if glibc_max.major == too_old_glibc2.major:
- min_minor = too_old_glibc2.minor
- else:
- # For other glibc major versions oldest supported is (x, 0).
- min_minor = -1
- for glibc_minor in range(glibc_max.minor, min_minor, -1):
- glibc_version = _GLibCVersion(glibc_max.major, glibc_minor)
- tag = "manylinux_{}_{}".format(*glibc_version)
- if _is_compatible(tag, arch, glibc_version):
- yield linux.replace("linux", tag)
- # Handle the legacy manylinux1, manylinux2010, manylinux2014 tags.
- if glibc_version in _LEGACY_MANYLINUX_MAP:
- legacy_tag = _LEGACY_MANYLINUX_MAP[glibc_version]
- if _is_compatible(legacy_tag, arch, glibc_version):
- yield linux.replace("linux", legacy_tag)
diff --git a/contrib/python/setuptools/py3/setuptools/_vendor/packaging/_musllinux.py b/contrib/python/setuptools/py3/setuptools/_vendor/packaging/_musllinux.py
deleted file mode 100644
index 85450fafa34..00000000000
--- a/contrib/python/setuptools/py3/setuptools/_vendor/packaging/_musllinux.py
+++ /dev/null
@@ -1,136 +0,0 @@
-"""PEP 656 support.
-
-This module implements logic to detect if the currently running Python is
-linked against musl, and what musl version is used.
-"""
-
-import contextlib
-import functools
-import operator
-import os
-import re
-import struct
-import subprocess
-import sys
-from typing import IO, Iterator, NamedTuple, Optional, Tuple
-
-
-def _read_unpacked(f: IO[bytes], fmt: str) -> Tuple[int, ...]:
- return struct.unpack(fmt, f.read(struct.calcsize(fmt)))
-
-
-def _parse_ld_musl_from_elf(f: IO[bytes]) -> Optional[str]:
- """Detect musl libc location by parsing the Python executable.
-
- Based on: https://gist.github.com/lyssdod/f51579ae8d93c8657a5564aefc2ffbca
- ELF header: https://refspecs.linuxfoundation.org/elf/gabi4+/ch4.eheader.html
- """
- f.seek(0)
- try:
- ident = _read_unpacked(f, "16B")
- except struct.error:
- return None
- if ident[:4] != tuple(b"\x7fELF"): # Invalid magic, not ELF.
- return None
- f.seek(struct.calcsize("HHI"), 1) # Skip file type, machine, and version.
-
- try:
- # e_fmt: Format for program header.
- # p_fmt: Format for section header.
- # p_idx: Indexes to find p_type, p_offset, and p_filesz.
- e_fmt, p_fmt, p_idx = {
- 1: ("IIIIHHH", "IIIIIIII", (0, 1, 4)), # 32-bit.
- 2: ("QQQIHHH", "IIQQQQQQ", (0, 2, 5)), # 64-bit.
- }[ident[4]]
- except KeyError:
- return None
- else:
- p_get = operator.itemgetter(*p_idx)
-
- # Find the interpreter section and return its content.
- try:
- _, e_phoff, _, _, _, e_phentsize, e_phnum = _read_unpacked(f, e_fmt)
- except struct.error:
- return None
- for i in range(e_phnum + 1):
- f.seek(e_phoff + e_phentsize * i)
- try:
- p_type, p_offset, p_filesz = p_get(_read_unpacked(f, p_fmt))
- except struct.error:
- return None
- if p_type != 3: # Not PT_INTERP.
- continue
- f.seek(p_offset)
- interpreter = os.fsdecode(f.read(p_filesz)).strip("\0")
- if "musl" not in interpreter:
- return None
- return interpreter
- return None
-
-
-class _MuslVersion(NamedTuple):
- major: int
- minor: int
-
-
-def _parse_musl_version(output: str) -> Optional[_MuslVersion]:
- lines = [n for n in (n.strip() for n in output.splitlines()) if n]
- if len(lines) < 2 or lines[0][:4] != "musl":
- return None
- m = re.match(r"Version (\d+)\.(\d+)", lines[1])
- if not m:
- return None
- return _MuslVersion(major=int(m.group(1)), minor=int(m.group(2)))
-
-
-def _get_musl_version(executable: str) -> Optional[_MuslVersion]:
- """Detect currently-running musl runtime version.
-
- This is done by checking the specified executable's dynamic linking
- information, and invoking the loader to parse its output for a version
- string. If the loader is musl, the output would be something like::
-
- musl libc (x86_64)
- Version 1.2.2
- Dynamic Program Loader
- """
- with contextlib.ExitStack() as stack:
- try:
- f = stack.enter_context(open(executable, "rb"))
- except IOError:
- return None
- ld = _parse_ld_musl_from_elf(f)
- if not ld:
- return None
- proc = subprocess.run([ld], stderr=subprocess.PIPE, universal_newlines=True)
- return _parse_musl_version(proc.stderr)
-
-
-def platform_tags(arch: str) -> Iterator[str]:
- """Generate musllinux tags compatible to the current platform.
-
- :param arch: Should be the part of platform tag after the ``linux_``
- prefix, e.g. ``x86_64``. The ``linux_`` prefix is assumed as a
- prerequisite for the current platform to be musllinux-compatible.
-
- :returns: An iterator of compatible musllinux tags.
- """
- sys_musl = _get_musl_version(sys.executable)
- if sys_musl is None: # Python not dynamically linked against musl.
- return
- for minor in range(sys_musl.minor, -1, -1):
- yield f"musllinux_{sys_musl.major}_{minor}_{arch}"
-
-
-if __name__ == "__main__": # pragma: no cover
- import sysconfig
-
- plat = sysconfig.get_platform()
- assert plat.startswith("linux-"), "not linux"
-
- print("plat:", plat)
- print("musl:", _get_musl_version(sys.executable))
- print("tags:", end=" ")
- for t in platform_tags(re.sub(r"[.-]", "_", plat.split("-", 1)[-1])):
- print(t, end="\n ")
diff --git a/contrib/python/setuptools/py3/setuptools/_vendor/packaging/_structures.py b/contrib/python/setuptools/py3/setuptools/_vendor/packaging/_structures.py
deleted file mode 100644
index 951549753af..00000000000
--- a/contrib/python/setuptools/py3/setuptools/_vendor/packaging/_structures.py
+++ /dev/null
@@ -1,67 +0,0 @@
-# This file is dual licensed under the terms of the Apache License, Version
-# 2.0, and the BSD License. See the LICENSE file in the root of this repository
-# for complete details.
-
-
-class InfinityType:
- def __repr__(self) -> str:
- return "Infinity"
-
- def __hash__(self) -> int:
- return hash(repr(self))
-
- def __lt__(self, other: object) -> bool:
- return False
-
- def __le__(self, other: object) -> bool:
- return False
-
- def __eq__(self, other: object) -> bool:
- return isinstance(other, self.__class__)
-
- def __ne__(self, other: object) -> bool:
- return not isinstance(other, self.__class__)
-
- def __gt__(self, other: object) -> bool:
- return True
-
- def __ge__(self, other: object) -> bool:
- return True
-
- def __neg__(self: object) -> "NegativeInfinityType":
- return NegativeInfinity
-
-
-Infinity = InfinityType()
-
-
-class NegativeInfinityType:
- def __repr__(self) -> str:
- return "-Infinity"
-
- def __hash__(self) -> int:
- return hash(repr(self))
-
- def __lt__(self, other: object) -> bool:
- return True
-
- def __le__(self, other: object) -> bool:
- return True
-
- def __eq__(self, other: object) -> bool:
- return isinstance(other, self.__class__)
-
- def __ne__(self, other: object) -> bool:
- return not isinstance(other, self.__class__)
-
- def __gt__(self, other: object) -> bool:
- return False
-
- def __ge__(self, other: object) -> bool:
- return False
-
- def __neg__(self: object) -> InfinityType:
- return Infinity
-
-
-NegativeInfinity = NegativeInfinityType()
diff --git a/contrib/python/setuptools/py3/setuptools/_vendor/packaging/markers.py b/contrib/python/setuptools/py3/setuptools/_vendor/packaging/markers.py
deleted file mode 100644
index eb0541b83a7..00000000000
--- a/contrib/python/setuptools/py3/setuptools/_vendor/packaging/markers.py
+++ /dev/null
@@ -1,304 +0,0 @@
-# This file is dual licensed under the terms of the Apache License, Version
-# 2.0, and the BSD License. See the LICENSE file in the root of this repository
-# for complete details.
-
-import operator
-import os
-import platform
-import sys
-from typing import Any, Callable, Dict, List, Optional, Tuple, Union
-
-from setuptools.extern.pyparsing import ( # noqa: N817
- Forward,
- Group,
- Literal as L,
- ParseException,
- ParseResults,
- QuotedString,
- ZeroOrMore,
- stringEnd,
- stringStart,
-)
-
-from .specifiers import InvalidSpecifier, Specifier
-
-__all__ = [
- "InvalidMarker",
- "UndefinedComparison",
- "UndefinedEnvironmentName",
- "Marker",
- "default_environment",
-]
-
-Operator = Callable[[str, str], bool]
-
-
-class InvalidMarker(ValueError):
- """
- An invalid marker was found, users should refer to PEP 508.
- """
-
-
-class UndefinedComparison(ValueError):
- """
- An invalid operation was attempted on a value that doesn't support it.
- """
-
-
-class UndefinedEnvironmentName(ValueError):
- """
- A name was attempted to be used that does not exist inside of the
- environment.
- """
-
-
-class Node:
- def __init__(self, value: Any) -> None:
- self.value = value
-
- def __str__(self) -> str:
- return str(self.value)
-
- def __repr__(self) -> str:
- return f"<{self.__class__.__name__}('{self}')>"
-
- def serialize(self) -> str:
- raise NotImplementedError
-
-
-class Variable(Node):
- def serialize(self) -> str:
- return str(self)
-
-
-class Value(Node):
- def serialize(self) -> str:
- return f'"{self}"'
-
-
-class Op(Node):
- def serialize(self) -> str:
- return str(self)
-
-
-VARIABLE = (
- L("implementation_version")
- | L("platform_python_implementation")
- | L("implementation_name")
- | L("python_full_version")
- | L("platform_release")
- | L("platform_version")
- | L("platform_machine")
- | L("platform_system")
- | L("python_version")
- | L("sys_platform")
- | L("os_name")
- | L("os.name") # PEP-345
- | L("sys.platform") # PEP-345
- | L("platform.version") # PEP-345
- | L("platform.machine") # PEP-345
- | L("platform.python_implementation") # PEP-345
- | L("python_implementation") # undocumented setuptools legacy
- | L("extra") # PEP-508
-)
-ALIASES = {
- "os.name": "os_name",
- "sys.platform": "sys_platform",
- "platform.version": "platform_version",
- "platform.machine": "platform_machine",
- "platform.python_implementation": "platform_python_implementation",
- "python_implementation": "platform_python_implementation",
-}
-VARIABLE.setParseAction(lambda s, l, t: Variable(ALIASES.get(t[0], t[0])))
-
-VERSION_CMP = (
- L("===") | L("==") | L(">=") | L("<=") | L("!=") | L("~=") | L(">") | L("<")
-)
-
-MARKER_OP = VERSION_CMP | L("not in") | L("in")
-MARKER_OP.setParseAction(lambda s, l, t: Op(t[0]))
-
-MARKER_VALUE = QuotedString("'") | QuotedString('"')
-MARKER_VALUE.setParseAction(lambda s, l, t: Value(t[0]))
-
-BOOLOP = L("and") | L("or")
-
-MARKER_VAR = VARIABLE | MARKER_VALUE
-
-MARKER_ITEM = Group(MARKER_VAR + MARKER_OP + MARKER_VAR)
-MARKER_ITEM.setParseAction(lambda s, l, t: tuple(t[0]))
-
-LPAREN = L("(").suppress()
-RPAREN = L(")").suppress()
-
-MARKER_EXPR = Forward()
-MARKER_ATOM = MARKER_ITEM | Group(LPAREN + MARKER_EXPR + RPAREN)
-MARKER_EXPR << MARKER_ATOM + ZeroOrMore(BOOLOP + MARKER_EXPR)
-
-MARKER = stringStart + MARKER_EXPR + stringEnd
-
-
-def _coerce_parse_result(results: Union[ParseResults, List[Any]]) -> List[Any]:
- if isinstance(results, ParseResults):
- return [_coerce_parse_result(i) for i in results]
- else:
- return results
-
-
-def _format_marker(
- marker: Union[List[str], Tuple[Node, ...], str], first: Optional[bool] = True
-) -> str:
-
- assert isinstance(marker, (list, tuple, str))
-
- # Sometimes we have a structure like [[...]] which is a single item list
- # where the single item is itself it's own list. In that case we want skip
- # the rest of this function so that we don't get extraneous () on the
- # outside.
- if (
- isinstance(marker, list)
- and len(marker) == 1
- and isinstance(marker[0], (list, tuple))
- ):
- return _format_marker(marker[0])
-
- if isinstance(marker, list):
- inner = (_format_marker(m, first=False) for m in marker)
- if first:
- return " ".join(inner)
- else:
- return "(" + " ".join(inner) + ")"
- elif isinstance(marker, tuple):
- return " ".join([m.serialize() for m in marker])
- else:
- return marker
-
-
-_operators: Dict[str, Operator] = {
- "in": lambda lhs, rhs: lhs in rhs,
- "not in": lambda lhs, rhs: lhs not in rhs,
- "<": operator.lt,
- "<=": operator.le,
- "==": operator.eq,
- "!=": operator.ne,
- ">=": operator.ge,
- ">": operator.gt,
-}
-
-
-def _eval_op(lhs: str, op: Op, rhs: str) -> bool:
- try:
- spec = Specifier("".join([op.serialize(), rhs]))
- except InvalidSpecifier:
- pass
- else:
- return spec.contains(lhs)
-
- oper: Optional[Operator] = _operators.get(op.serialize())
- if oper is None:
- raise UndefinedComparison(f"Undefined {op!r} on {lhs!r} and {rhs!r}.")
-
- return oper(lhs, rhs)
-
-
-class Undefined:
- pass
-
-
-_undefined = Undefined()
-
-
-def _get_env(environment: Dict[str, str], name: str) -> str:
- value: Union[str, Undefined] = environment.get(name, _undefined)
-
- if isinstance(value, Undefined):
- raise UndefinedEnvironmentName(
- f"{name!r} does not exist in evaluation environment."
- )
-
- return value
-
-
-def _evaluate_markers(markers: List[Any], environment: Dict[str, str]) -> bool:
- groups: List[List[bool]] = [[]]
-
- for marker in markers:
- assert isinstance(marker, (list, tuple, str))
-
- if isinstance(marker, list):
- groups[-1].append(_evaluate_markers(marker, environment))
- elif isinstance(marker, tuple):
- lhs, op, rhs = marker
-
- if isinstance(lhs, Variable):
- lhs_value = _get_env(environment, lhs.value)
- rhs_value = rhs.value
- else:
- lhs_value = lhs.value
- rhs_value = _get_env(environment, rhs.value)
-
- groups[-1].append(_eval_op(lhs_value, op, rhs_value))
- else:
- assert marker in ["and", "or"]
- if marker == "or":
- groups.append([])
-
- return any(all(item) for item in groups)
-
-
-def format_full_version(info: "sys._version_info") -> str:
- version = "{0.major}.{0.minor}.{0.micro}".format(info)
- kind = info.releaselevel
- if kind != "final":
- version += kind[0] + str(info.serial)
- return version
-
-
-def default_environment() -> Dict[str, str]:
- iver = format_full_version(sys.implementation.version)
- implementation_name = sys.implementation.name
- return {
- "implementation_name": implementation_name,
- "implementation_version": iver,
- "os_name": os.name,
- "platform_machine": platform.machine(),
- "platform_release": platform.release(),
- "platform_system": platform.system(),
- "platform_version": platform.version(),
- "python_full_version": platform.python_version(),
- "platform_python_implementation": platform.python_implementation(),
- "python_version": ".".join(platform.python_version_tuple()[:2]),
- "sys_platform": sys.platform,
- }
-
-
-class Marker:
- def __init__(self, marker: str) -> None:
- try:
- self._markers = _coerce_parse_result(MARKER.parseString(marker))
- except ParseException as e:
- raise InvalidMarker(
- f"Invalid marker: {marker!r}, parse error at "
- f"{marker[e.loc : e.loc + 8]!r}"
- )
-
- def __str__(self) -> str:
- return _format_marker(self._markers)
-
- def __repr__(self) -> str:
- return f"<Marker('{self}')>"
-
- def evaluate(self, environment: Optional[Dict[str, str]] = None) -> bool:
- """Evaluate a marker.
-
- Return the boolean from evaluating the given marker against the
- environment. environment is an optional argument to override all or
- part of the determined environment.
-
- The environment is determined from the current Python process.
- """
- current_environment = default_environment()
- if environment is not None:
- current_environment.update(environment)
-
- return _evaluate_markers(self._markers, current_environment)
diff --git a/contrib/python/setuptools/py3/setuptools/_vendor/packaging/requirements.py b/contrib/python/setuptools/py3/setuptools/_vendor/packaging/requirements.py
deleted file mode 100644
index 0d93231b461..00000000000
--- a/contrib/python/setuptools/py3/setuptools/_vendor/packaging/requirements.py
+++ /dev/null
@@ -1,146 +0,0 @@
-# This file is dual licensed under the terms of the Apache License, Version
-# 2.0, and the BSD License. See the LICENSE file in the root of this repository
-# for complete details.
-
-import re
-import string
-import urllib.parse
-from typing import List, Optional as TOptional, Set
-
-from setuptools.extern.pyparsing import ( # noqa
- Combine,
- Literal as L,
- Optional,
- ParseException,
- Regex,
- Word,
- ZeroOrMore,
- originalTextFor,
- stringEnd,
- stringStart,
-)
-
-from .markers import MARKER_EXPR, Marker
-from .specifiers import LegacySpecifier, Specifier, SpecifierSet
-
-
-class InvalidRequirement(ValueError):
- """
- An invalid requirement was found, users should refer to PEP 508.
- """
-
-
-ALPHANUM = Word(string.ascii_letters + string.digits)
-
-LBRACKET = L("[").suppress()
-RBRACKET = L("]").suppress()
-LPAREN = L("(").suppress()
-RPAREN = L(")").suppress()
-COMMA = L(",").suppress()
-SEMICOLON = L(";").suppress()
-AT = L("@").suppress()
-
-PUNCTUATION = Word("-_.")
-IDENTIFIER_END = ALPHANUM | (ZeroOrMore(PUNCTUATION) + ALPHANUM)
-IDENTIFIER = Combine(ALPHANUM + ZeroOrMore(IDENTIFIER_END))
-
-NAME = IDENTIFIER("name")
-EXTRA = IDENTIFIER
-
-URI = Regex(r"[^ ]+")("url")
-URL = AT + URI
-
-EXTRAS_LIST = EXTRA + ZeroOrMore(COMMA + EXTRA)
-EXTRAS = (LBRACKET + Optional(EXTRAS_LIST) + RBRACKET)("extras")
-
-VERSION_PEP440 = Regex(Specifier._regex_str, re.VERBOSE | re.IGNORECASE)
-VERSION_LEGACY = Regex(LegacySpecifier._regex_str, re.VERBOSE | re.IGNORECASE)
-
-VERSION_ONE = VERSION_PEP440 ^ VERSION_LEGACY
-VERSION_MANY = Combine(
- VERSION_ONE + ZeroOrMore(COMMA + VERSION_ONE), joinString=",", adjacent=False
-)("_raw_spec")
-_VERSION_SPEC = Optional((LPAREN + VERSION_MANY + RPAREN) | VERSION_MANY)
-_VERSION_SPEC.setParseAction(lambda s, l, t: t._raw_spec or "")
-
-VERSION_SPEC = originalTextFor(_VERSION_SPEC)("specifier")
-VERSION_SPEC.setParseAction(lambda s, l, t: t[1])
-
-MARKER_EXPR = originalTextFor(MARKER_EXPR())("marker")
-MARKER_EXPR.setParseAction(
- lambda s, l, t: Marker(s[t._original_start : t._original_end])
-)
-MARKER_SEPARATOR = SEMICOLON
-MARKER = MARKER_SEPARATOR + MARKER_EXPR
-
-VERSION_AND_MARKER = VERSION_SPEC + Optional(MARKER)
-URL_AND_MARKER = URL + Optional(MARKER)
-
-NAMED_REQUIREMENT = NAME + Optional(EXTRAS) + (URL_AND_MARKER | VERSION_AND_MARKER)
-
-REQUIREMENT = stringStart + NAMED_REQUIREMENT + stringEnd
-# setuptools.extern.pyparsing isn't thread safe during initialization, so we do it eagerly, see
-# issue #104
-REQUIREMENT.parseString("x[]")
-
-
-class Requirement:
- """Parse a requirement.
-
- Parse a given requirement string into its parts, such as name, specifier,
- URL, and extras. Raises InvalidRequirement on a badly-formed requirement
- string.
- """
-
- # TODO: Can we test whether something is contained within a requirement?
- # If so how do we do that? Do we need to test against the _name_ of
- # the thing as well as the version? What about the markers?
- # TODO: Can we normalize the name and extra name?
-
- def __init__(self, requirement_string: str) -> None:
- try:
- req = REQUIREMENT.parseString(requirement_string)
- except ParseException as e:
- raise InvalidRequirement(
- f'Parse error at "{ requirement_string[e.loc : e.loc + 8]!r}": {e.msg}'
- )
-
- self.name: str = req.name
- if req.url:
- parsed_url = urllib.parse.urlparse(req.url)
- if parsed_url.scheme == "file":
- if urllib.parse.urlunparse(parsed_url) != req.url:
- raise InvalidRequirement("Invalid URL given")
- elif not (parsed_url.scheme and parsed_url.netloc) or (
- not parsed_url.scheme and not parsed_url.netloc
- ):
- raise InvalidRequirement(f"Invalid URL: {req.url}")
- self.url: TOptional[str] = req.url
- else:
- self.url = None
- self.extras: Set[str] = set(req.extras.asList() if req.extras else [])
- self.specifier: SpecifierSet = SpecifierSet(req.specifier)
- self.marker: TOptional[Marker] = req.marker if req.marker else None
-
- def __str__(self) -> str:
- parts: List[str] = [self.name]
-
- if self.extras:
- formatted_extras = ",".join(sorted(self.extras))
- parts.append(f"[{formatted_extras}]")
-
- if self.specifier:
- parts.append(str(self.specifier))
-
- if self.url:
- parts.append(f"@ {self.url}")
- if self.marker:
- parts.append(" ")
-
- if self.marker:
- parts.append(f"; {self.marker}")
-
- return "".join(parts)
-
- def __repr__(self) -> str:
- return f"<Requirement('{self}')>"
diff --git a/contrib/python/setuptools/py3/setuptools/_vendor/packaging/specifiers.py b/contrib/python/setuptools/py3/setuptools/_vendor/packaging/specifiers.py
deleted file mode 100644
index ce66bd4addb..00000000000
--- a/contrib/python/setuptools/py3/setuptools/_vendor/packaging/specifiers.py
+++ /dev/null
@@ -1,828 +0,0 @@
-# This file is dual licensed under the terms of the Apache License, Version
-# 2.0, and the BSD License. See the LICENSE file in the root of this repository
-# for complete details.
-
-import abc
-import functools
-import itertools
-import re
-import warnings
-from typing import (
- Callable,
- Dict,
- Iterable,
- Iterator,
- List,
- Optional,
- Pattern,
- Set,
- Tuple,
- TypeVar,
- Union,
-)
-
-from .utils import canonicalize_version
-from .version import LegacyVersion, Version, parse
-
-ParsedVersion = Union[Version, LegacyVersion]
-UnparsedVersion = Union[Version, LegacyVersion, str]
-VersionTypeVar = TypeVar("VersionTypeVar", bound=UnparsedVersion)
-CallableOperator = Callable[[ParsedVersion, str], bool]
-
-
-class InvalidSpecifier(ValueError):
- """
- An invalid specifier was found, users should refer to PEP 440.
- """
-
-
-class BaseSpecifier(metaclass=abc.ABCMeta):
- @abc.abstractmethod
- def __str__(self) -> str:
- """
- Returns the str representation of this Specifier like object. This
- should be representative of the Specifier itself.
- """
-
- @abc.abstractmethod
- def __hash__(self) -> int:
- """
- Returns a hash value for this Specifier like object.
- """
-
- @abc.abstractmethod
- def __eq__(self, other: object) -> bool:
- """
- Returns a boolean representing whether or not the two Specifier like
- objects are equal.
- """
-
- @abc.abstractmethod
- def __ne__(self, other: object) -> bool:
- """
- Returns a boolean representing whether or not the two Specifier like
- objects are not equal.
- """
-
- @abc.abstractproperty
- def prereleases(self) -> Optional[bool]:
- """
- Returns whether or not pre-releases as a whole are allowed by this
- specifier.
- """
-
- @prereleases.setter
- def prereleases(self, value: bool) -> None:
- """
- Sets whether or not pre-releases as a whole are allowed by this
- specifier.
- """
-
- @abc.abstractmethod
- def contains(self, item: str, prereleases: Optional[bool] = None) -> bool:
- """
- Determines if the given item is contained within this specifier.
- """
-
- @abc.abstractmethod
- def filter(
- self, iterable: Iterable[VersionTypeVar], prereleases: Optional[bool] = None
- ) -> Iterable[VersionTypeVar]:
- """
- Takes an iterable of items and filters them so that only items which
- are contained within this specifier are allowed in it.
- """
-
-
-class _IndividualSpecifier(BaseSpecifier):
-
- _operators: Dict[str, str] = {}
- _regex: Pattern[str]
-
- def __init__(self, spec: str = "", prereleases: Optional[bool] = None) -> None:
- match = self._regex.search(spec)
- if not match:
- raise InvalidSpecifier(f"Invalid specifier: '{spec}'")
-
- self._spec: Tuple[str, str] = (
- match.group("operator").strip(),
- match.group("version").strip(),
- )
-
- # Store whether or not this Specifier should accept prereleases
- self._prereleases = prereleases
-
- def __repr__(self) -> str:
- pre = (
- f", prereleases={self.prereleases!r}"
- if self._prereleases is not None
- else ""
- )
-
- return "<{}({!r}{})>".format(self.__class__.__name__, str(self), pre)
-
- def __str__(self) -> str:
- return "{}{}".format(*self._spec)
-
- @property
- def _canonical_spec(self) -> Tuple[str, str]:
- return self._spec[0], canonicalize_version(self._spec[1])
-
- def __hash__(self) -> int:
- return hash(self._canonical_spec)
-
- def __eq__(self, other: object) -> bool:
- if isinstance(other, str):
- try:
- other = self.__class__(str(other))
- except InvalidSpecifier:
- return NotImplemented
- elif not isinstance(other, self.__class__):
- return NotImplemented
-
- return self._canonical_spec == other._canonical_spec
-
- def __ne__(self, other: object) -> bool:
- if isinstance(other, str):
- try:
- other = self.__class__(str(other))
- except InvalidSpecifier:
- return NotImplemented
- elif not isinstance(other, self.__class__):
- return NotImplemented
-
- return self._spec != other._spec
-
- def _get_operator(self, op: str) -> CallableOperator:
- operator_callable: CallableOperator = getattr(
- self, f"_compare_{self._operators[op]}"
- )
- return operator_callable
-
- def _coerce_version(self, version: UnparsedVersion) -> ParsedVersion:
- if not isinstance(version, (LegacyVersion, Version)):
- version = parse(version)
- return version
-
- @property
- def operator(self) -> str:
- return self._spec[0]
-
- @property
- def version(self) -> str:
- return self._spec[1]
-
- @property
- def prereleases(self) -> Optional[bool]:
- return self._prereleases
-
- @prereleases.setter
- def prereleases(self, value: bool) -> None:
- self._prereleases = value
-
- def __contains__(self, item: str) -> bool:
- return self.contains(item)
-
- def contains(
- self, item: UnparsedVersion, prereleases: Optional[bool] = None
- ) -> bool:
-
- # Determine if prereleases are to be allowed or not.
- if prereleases is None:
- prereleases = self.prereleases
-
- # Normalize item to a Version or LegacyVersion, this allows us to have
- # a shortcut for ``"2.0" in Specifier(">=2")
- normalized_item = self._coerce_version(item)
-
- # Determine if we should be supporting prereleases in this specifier
- # or not, if we do not support prereleases than we can short circuit
- # logic if this version is a prereleases.
- if normalized_item.is_prerelease and not prereleases:
- return False
-
- # Actually do the comparison to determine if this item is contained
- # within this Specifier or not.
- operator_callable: CallableOperator = self._get_operator(self.operator)
- return operator_callable(normalized_item, self.version)
-
- def filter(
- self, iterable: Iterable[VersionTypeVar], prereleases: Optional[bool] = None
- ) -> Iterable[VersionTypeVar]:
-
- yielded = False
- found_prereleases = []
-
- kw = {"prereleases": prereleases if prereleases is not None else True}
-
- # Attempt to iterate over all the values in the iterable and if any of
- # them match, yield them.
- for version in iterable:
- parsed_version = self._coerce_version(version)
-
- if self.contains(parsed_version, **kw):
- # If our version is a prerelease, and we were not set to allow
- # prereleases, then we'll store it for later in case nothing
- # else matches this specifier.
- if parsed_version.is_prerelease and not (
- prereleases or self.prereleases
- ):
- found_prereleases.append(version)
- # Either this is not a prerelease, or we should have been
- # accepting prereleases from the beginning.
- else:
- yielded = True
- yield version
-
- # Now that we've iterated over everything, determine if we've yielded
- # any values, and if we have not and we have any prereleases stored up
- # then we will go ahead and yield the prereleases.
- if not yielded and found_prereleases:
- for version in found_prereleases:
- yield version
-
-
-class LegacySpecifier(_IndividualSpecifier):
-
- _regex_str = r"""
- (?P<operator>(==|!=|<=|>=|<|>))
- \s*
- (?P<version>
- [^,;\s)]* # Since this is a "legacy" specifier, and the version
- # string can be just about anything, we match everything
- # except for whitespace, a semi-colon for marker support,
- # a closing paren since versions can be enclosed in
- # them, and a comma since it's a version separator.
- )
- """
-
- _regex = re.compile(r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE)
-
- _operators = {
- "==": "equal",
- "!=": "not_equal",
- "<=": "less_than_equal",
- ">=": "greater_than_equal",
- "<": "less_than",
- ">": "greater_than",
- }
-
- def __init__(self, spec: str = "", prereleases: Optional[bool] = None) -> None:
- super().__init__(spec, prereleases)
-
- warnings.warn(
- "Creating a LegacyVersion has been deprecated and will be "
- "removed in the next major release",
- DeprecationWarning,
- )
-
- def _coerce_version(self, version: UnparsedVersion) -> LegacyVersion:
- if not isinstance(version, LegacyVersion):
- version = LegacyVersion(str(version))
- return version
-
- def _compare_equal(self, prospective: LegacyVersion, spec: str) -> bool:
- return prospective == self._coerce_version(spec)
-
- def _compare_not_equal(self, prospective: LegacyVersion, spec: str) -> bool:
- return prospective != self._coerce_version(spec)
-
- def _compare_less_than_equal(self, prospective: LegacyVersion, spec: str) -> bool:
- return prospective <= self._coerce_version(spec)
-
- def _compare_greater_than_equal(
- self, prospective: LegacyVersion, spec: str
- ) -> bool:
- return prospective >= self._coerce_version(spec)
-
- def _compare_less_than(self, prospective: LegacyVersion, spec: str) -> bool:
- return prospective < self._coerce_version(spec)
-
- def _compare_greater_than(self, prospective: LegacyVersion, spec: str) -> bool:
- return prospective > self._coerce_version(spec)
-
-
-def _require_version_compare(
- fn: Callable[["Specifier", ParsedVersion, str], bool]
-) -> Callable[["Specifier", ParsedVersion, str], bool]:
- @functools.wraps(fn)
- def wrapped(self: "Specifier", prospective: ParsedVersion, spec: str) -> bool:
- if not isinstance(prospective, Version):
- return False
- return fn(self, prospective, spec)
-
- return wrapped
-
-
-class Specifier(_IndividualSpecifier):
-
- _regex_str = r"""
- (?P<operator>(~=|==|!=|<=|>=|<|>|===))
- (?P<version>
- (?:
- # The identity operators allow for an escape hatch that will
- # do an exact string match of the version you wish to install.
- # This will not be parsed by PEP 440 and we cannot determine
- # any semantic meaning from it. This operator is discouraged
- # but included entirely as an escape hatch.
- (?<====) # Only match for the identity operator
- \s*
- [^\s]* # We just match everything, except for whitespace
- # since we are only testing for strict identity.
- )
- |
- (?:
- # The (non)equality operators allow for wild card and local
- # versions to be specified so we have to define these two
- # operators separately to enable that.
- (?<===|!=) # Only match for equals and not equals
-
- \s*
- v?
- (?:[0-9]+!)? # epoch
- [0-9]+(?:\.[0-9]+)* # release
- (?: # pre release
- [-_\.]?
- (a|b|c|rc|alpha|beta|pre|preview)
- [-_\.]?
- [0-9]*
- )?
- (?: # post release
- (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
- )?
-
- # You cannot use a wild card and a dev or local version
- # together so group them with a | and make them optional.
- (?:
- (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
- (?:\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*)? # local
- |
- \.\* # Wild card syntax of .*
- )?
- )
- |
- (?:
- # The compatible operator requires at least two digits in the
- # release segment.
- (?<=~=) # Only match for the compatible operator
-
- \s*
- v?
- (?:[0-9]+!)? # epoch
- [0-9]+(?:\.[0-9]+)+ # release (We have a + instead of a *)
- (?: # pre release
- [-_\.]?
- (a|b|c|rc|alpha|beta|pre|preview)
- [-_\.]?
- [0-9]*
- )?
- (?: # post release
- (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
- )?
- (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
- )
- |
- (?:
- # All other operators only allow a sub set of what the
- # (non)equality operators do. Specifically they do not allow
- # local versions to be specified nor do they allow the prefix
- # matching wild cards.
- (?<!==|!=|~=) # We have special cases for these
- # operators so we want to make sure they
- # don't match here.
-
- \s*
- v?
- (?:[0-9]+!)? # epoch
- [0-9]+(?:\.[0-9]+)* # release
- (?: # pre release
- [-_\.]?
- (a|b|c|rc|alpha|beta|pre|preview)
- [-_\.]?
- [0-9]*
- )?
- (?: # post release
- (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
- )?
- (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
- )
- )
- """
-
- _regex = re.compile(r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE)
-
- _operators = {
- "~=": "compatible",
- "==": "equal",
- "!=": "not_equal",
- "<=": "less_than_equal",
- ">=": "greater_than_equal",
- "<": "less_than",
- ">": "greater_than",
- "===": "arbitrary",
- }
-
- @_require_version_compare
- def _compare_compatible(self, prospective: ParsedVersion, spec: str) -> bool:
-
- # Compatible releases have an equivalent combination of >= and ==. That
- # is that ~=2.2 is equivalent to >=2.2,==2.*. This allows us to
- # implement this in terms of the other specifiers instead of
- # implementing it ourselves. The only thing we need to do is construct
- # the other specifiers.
-
- # We want everything but the last item in the version, but we want to
- # ignore suffix segments.
- prefix = ".".join(
- list(itertools.takewhile(_is_not_suffix, _version_split(spec)))[:-1]
- )
-
- # Add the prefix notation to the end of our string
- prefix += ".*"
-
- return self._get_operator(">=")(prospective, spec) and self._get_operator("==")(
- prospective, prefix
- )
-
- @_require_version_compare
- def _compare_equal(self, prospective: ParsedVersion, spec: str) -> bool:
-
- # We need special logic to handle prefix matching
- if spec.endswith(".*"):
- # In the case of prefix matching we want to ignore local segment.
- prospective = Version(prospective.public)
- # Split the spec out by dots, and pretend that there is an implicit
- # dot in between a release segment and a pre-release segment.
- split_spec = _version_split(spec[:-2]) # Remove the trailing .*
-
- # Split the prospective version out by dots, and pretend that there
- # is an implicit dot in between a release segment and a pre-release
- # segment.
- split_prospective = _version_split(str(prospective))
-
- # Shorten the prospective version to be the same length as the spec
- # so that we can determine if the specifier is a prefix of the
- # prospective version or not.
- shortened_prospective = split_prospective[: len(split_spec)]
-
- # Pad out our two sides with zeros so that they both equal the same
- # length.
- padded_spec, padded_prospective = _pad_version(
- split_spec, shortened_prospective
- )
-
- return padded_prospective == padded_spec
- else:
- # Convert our spec string into a Version
- spec_version = Version(spec)
-
- # If the specifier does not have a local segment, then we want to
- # act as if the prospective version also does not have a local
- # segment.
- if not spec_version.local:
- prospective = Version(prospective.public)
-
- return prospective == spec_version
-
- @_require_version_compare
- def _compare_not_equal(self, prospective: ParsedVersion, spec: str) -> bool:
- return not self._compare_equal(prospective, spec)
-
- @_require_version_compare
- def _compare_less_than_equal(self, prospective: ParsedVersion, spec: str) -> bool:
-
- # NB: Local version identifiers are NOT permitted in the version
- # specifier, so local version labels can be universally removed from
- # the prospective version.
- return Version(prospective.public) <= Version(spec)
-
- @_require_version_compare
- def _compare_greater_than_equal(
- self, prospective: ParsedVersion, spec: str
- ) -> bool:
-
- # NB: Local version identifiers are NOT permitted in the version
- # specifier, so local version labels can be universally removed from
- # the prospective version.
- return Version(prospective.public) >= Version(spec)
-
- @_require_version_compare
- def _compare_less_than(self, prospective: ParsedVersion, spec_str: str) -> bool:
-
- # Convert our spec to a Version instance, since we'll want to work with
- # it as a version.
- spec = Version(spec_str)
-
- # Check to see if the prospective version is less than the spec
- # version. If it's not we can short circuit and just return False now
- # instead of doing extra unneeded work.
- if not prospective < spec:
- return False
-
- # This special case is here so that, unless the specifier itself
- # includes is a pre-release version, that we do not accept pre-release
- # versions for the version mentioned in the specifier (e.g. <3.1 should
- # not match 3.1.dev0, but should match 3.0.dev0).
- if not spec.is_prerelease and prospective.is_prerelease:
- if Version(prospective.base_version) == Version(spec.base_version):
- return False
-
- # If we've gotten to here, it means that prospective version is both
- # less than the spec version *and* it's not a pre-release of the same
- # version in the spec.
- return True
-
- @_require_version_compare
- def _compare_greater_than(self, prospective: ParsedVersion, spec_str: str) -> bool:
-
- # Convert our spec to a Version instance, since we'll want to work with
- # it as a version.
- spec = Version(spec_str)
-
- # Check to see if the prospective version is greater than the spec
- # version. If it's not we can short circuit and just return False now
- # instead of doing extra unneeded work.
- if not prospective > spec:
- return False
-
- # This special case is here so that, unless the specifier itself
- # includes is a post-release version, that we do not accept
- # post-release versions for the version mentioned in the specifier
- # (e.g. >3.1 should not match 3.0.post0, but should match 3.2.post0).
- if not spec.is_postrelease and prospective.is_postrelease:
- if Version(prospective.base_version) == Version(spec.base_version):
- return False
-
- # Ensure that we do not allow a local version of the version mentioned
- # in the specifier, which is technically greater than, to match.
- if prospective.local is not None:
- if Version(prospective.base_version) == Version(spec.base_version):
- return False
-
- # If we've gotten to here, it means that prospective version is both
- # greater than the spec version *and* it's not a pre-release of the
- # same version in the spec.
- return True
-
- def _compare_arbitrary(self, prospective: Version, spec: str) -> bool:
- return str(prospective).lower() == str(spec).lower()
-
- @property
- def prereleases(self) -> bool:
-
- # If there is an explicit prereleases set for this, then we'll just
- # blindly use that.
- if self._prereleases is not None:
- return self._prereleases
-
- # Look at all of our specifiers and determine if they are inclusive
- # operators, and if they are if they are including an explicit
- # prerelease.
- operator, version = self._spec
- if operator in ["==", ">=", "<=", "~=", "==="]:
- # The == specifier can include a trailing .*, if it does we
- # want to remove before parsing.
- if operator == "==" and version.endswith(".*"):
- version = version[:-2]
-
- # Parse the version, and if it is a pre-release than this
- # specifier allows pre-releases.
- if parse(version).is_prerelease:
- return True
-
- return False
-
- @prereleases.setter
- def prereleases(self, value: bool) -> None:
- self._prereleases = value
-
-
-_prefix_regex = re.compile(r"^([0-9]+)((?:a|b|c|rc)[0-9]+)$")
-
-
-def _version_split(version: str) -> List[str]:
- result: List[str] = []
- for item in version.split("."):
- match = _prefix_regex.search(item)
- if match:
- result.extend(match.groups())
- else:
- result.append(item)
- return result
-
-
-def _is_not_suffix(segment: str) -> bool:
- return not any(
- segment.startswith(prefix) for prefix in ("dev", "a", "b", "rc", "post")
- )
-
-
-def _pad_version(left: List[str], right: List[str]) -> Tuple[List[str], List[str]]:
- left_split, right_split = [], []
-
- # Get the release segment of our versions
- left_split.append(list(itertools.takewhile(lambda x: x.isdigit(), left)))
- right_split.append(list(itertools.takewhile(lambda x: x.isdigit(), right)))
-
- # Get the rest of our versions
- left_split.append(left[len(left_split[0]) :])
- right_split.append(right[len(right_split[0]) :])
-
- # Insert our padding
- left_split.insert(1, ["0"] * max(0, len(right_split[0]) - len(left_split[0])))
- right_split.insert(1, ["0"] * max(0, len(left_split[0]) - len(right_split[0])))
-
- return (list(itertools.chain(*left_split)), list(itertools.chain(*right_split)))
-
-
-class SpecifierSet(BaseSpecifier):
- def __init__(
- self, specifiers: str = "", prereleases: Optional[bool] = None
- ) -> None:
-
- # Split on , to break each individual specifier into it's own item, and
- # strip each item to remove leading/trailing whitespace.
- split_specifiers = [s.strip() for s in specifiers.split(",") if s.strip()]
-
- # Parsed each individual specifier, attempting first to make it a
- # Specifier and falling back to a LegacySpecifier.
- parsed: Set[_IndividualSpecifier] = set()
- for specifier in split_specifiers:
- try:
- parsed.add(Specifier(specifier))
- except InvalidSpecifier:
- parsed.add(LegacySpecifier(specifier))
-
- # Turn our parsed specifiers into a frozen set and save them for later.
- self._specs = frozenset(parsed)
-
- # Store our prereleases value so we can use it later to determine if
- # we accept prereleases or not.
- self._prereleases = prereleases
-
- def __repr__(self) -> str:
- pre = (
- f", prereleases={self.prereleases!r}"
- if self._prereleases is not None
- else ""
- )
-
- return "<SpecifierSet({!r}{})>".format(str(self), pre)
-
- def __str__(self) -> str:
- return ",".join(sorted(str(s) for s in self._specs))
-
- def __hash__(self) -> int:
- return hash(self._specs)
-
- def __and__(self, other: Union["SpecifierSet", str]) -> "SpecifierSet":
- if isinstance(other, str):
- other = SpecifierSet(other)
- elif not isinstance(other, SpecifierSet):
- return NotImplemented
-
- specifier = SpecifierSet()
- specifier._specs = frozenset(self._specs | other._specs)
-
- if self._prereleases is None and other._prereleases is not None:
- specifier._prereleases = other._prereleases
- elif self._prereleases is not None and other._prereleases is None:
- specifier._prereleases = self._prereleases
- elif self._prereleases == other._prereleases:
- specifier._prereleases = self._prereleases
- else:
- raise ValueError(
- "Cannot combine SpecifierSets with True and False prerelease "
- "overrides."
- )
-
- return specifier
-
- def __eq__(self, other: object) -> bool:
- if isinstance(other, (str, _IndividualSpecifier)):
- other = SpecifierSet(str(other))
- elif not isinstance(other, SpecifierSet):
- return NotImplemented
-
- return self._specs == other._specs
-
- def __ne__(self, other: object) -> bool:
- if isinstance(other, (str, _IndividualSpecifier)):
- other = SpecifierSet(str(other))
- elif not isinstance(other, SpecifierSet):
- return NotImplemented
-
- return self._specs != other._specs
-
- def __len__(self) -> int:
- return len(self._specs)
-
- def __iter__(self) -> Iterator[_IndividualSpecifier]:
- return iter(self._specs)
-
- @property
- def prereleases(self) -> Optional[bool]:
-
- # If we have been given an explicit prerelease modifier, then we'll
- # pass that through here.
- if self._prereleases is not None:
- return self._prereleases
-
- # If we don't have any specifiers, and we don't have a forced value,
- # then we'll just return None since we don't know if this should have
- # pre-releases or not.
- if not self._specs:
- return None
-
- # Otherwise we'll see if any of the given specifiers accept
- # prereleases, if any of them do we'll return True, otherwise False.
- return any(s.prereleases for s in self._specs)
-
- @prereleases.setter
- def prereleases(self, value: bool) -> None:
- self._prereleases = value
-
- def __contains__(self, item: UnparsedVersion) -> bool:
- return self.contains(item)
-
- def contains(
- self, item: UnparsedVersion, prereleases: Optional[bool] = None
- ) -> bool:
-
- # Ensure that our item is a Version or LegacyVersion instance.
- if not isinstance(item, (LegacyVersion, Version)):
- item = parse(item)
-
- # Determine if we're forcing a prerelease or not, if we're not forcing
- # one for this particular filter call, then we'll use whatever the
- # SpecifierSet thinks for whether or not we should support prereleases.
- if prereleases is None:
- prereleases = self.prereleases
-
- # We can determine if we're going to allow pre-releases by looking to
- # see if any of the underlying items supports them. If none of them do
- # and this item is a pre-release then we do not allow it and we can
- # short circuit that here.
- # Note: This means that 1.0.dev1 would not be contained in something
- # like >=1.0.devabc however it would be in >=1.0.debabc,>0.0.dev0
- if not prereleases and item.is_prerelease:
- return False
-
- # We simply dispatch to the underlying specs here to make sure that the
- # given version is contained within all of them.
- # Note: This use of all() here means that an empty set of specifiers
- # will always return True, this is an explicit design decision.
- return all(s.contains(item, prereleases=prereleases) for s in self._specs)
-
- def filter(
- self, iterable: Iterable[VersionTypeVar], prereleases: Optional[bool] = None
- ) -> Iterable[VersionTypeVar]:
-
- # Determine if we're forcing a prerelease or not, if we're not forcing
- # one for this particular filter call, then we'll use whatever the
- # SpecifierSet thinks for whether or not we should support prereleases.
- if prereleases is None:
- prereleases = self.prereleases
-
- # If we have any specifiers, then we want to wrap our iterable in the
- # filter method for each one, this will act as a logical AND amongst
- # each specifier.
- if self._specs:
- for spec in self._specs:
- iterable = spec.filter(iterable, prereleases=bool(prereleases))
- return iterable
- # If we do not have any specifiers, then we need to have a rough filter
- # which will filter out any pre-releases, unless there are no final
- # releases, and which will filter out LegacyVersion in general.
- else:
- filtered: List[VersionTypeVar] = []
- found_prereleases: List[VersionTypeVar] = []
-
- item: UnparsedVersion
- parsed_version: Union[Version, LegacyVersion]
-
- for item in iterable:
- # Ensure that we some kind of Version class for this item.
- if not isinstance(item, (LegacyVersion, Version)):
- parsed_version = parse(item)
- else:
- parsed_version = item
-
- # Filter out any item which is parsed as a LegacyVersion
- if isinstance(parsed_version, LegacyVersion):
- continue
-
- # Store any item which is a pre-release for later unless we've
- # already found a final version or we are accepting prereleases
- if parsed_version.is_prerelease and not prereleases:
- if not filtered:
- found_prereleases.append(item)
- else:
- filtered.append(item)
-
- # If we've found no items except for pre-releases, then we'll go
- # ahead and use the pre-releases
- if not filtered and found_prereleases and prereleases is None:
- return found_prereleases
-
- return filtered
diff --git a/contrib/python/setuptools/py3/setuptools/_vendor/packaging/tags.py b/contrib/python/setuptools/py3/setuptools/_vendor/packaging/tags.py
deleted file mode 100644
index e65890a90cd..00000000000
--- a/contrib/python/setuptools/py3/setuptools/_vendor/packaging/tags.py
+++ /dev/null
@@ -1,484 +0,0 @@
-# This file is dual licensed under the terms of the Apache License, Version
-# 2.0, and the BSD License. See the LICENSE file in the root of this repository
-# for complete details.
-
-import logging
-import platform
-import sys
-import sysconfig
-from importlib.machinery import EXTENSION_SUFFIXES
-from typing import (
- Dict,
- FrozenSet,
- Iterable,
- Iterator,
- List,
- Optional,
- Sequence,
- Tuple,
- Union,
- cast,
-)
-
-from . import _manylinux, _musllinux
-
-logger = logging.getLogger(__name__)
-
-PythonVersion = Sequence[int]
-MacVersion = Tuple[int, int]
-
-INTERPRETER_SHORT_NAMES: Dict[str, str] = {
- "python": "py", # Generic.
- "cpython": "cp",
- "pypy": "pp",
- "ironpython": "ip",
- "jython": "jy",
-}
-
-
-_32_BIT_INTERPRETER = sys.maxsize <= 2 ** 32
-
-
-class Tag:
- """
- A representation of the tag triple for a wheel.
-
- Instances are considered immutable and thus are hashable. Equality checking
- is also supported.
- """
-
- __slots__ = ["_interpreter", "_abi", "_platform", "_hash"]
-
- def __init__(self, interpreter: str, abi: str, platform: str) -> None:
- self._interpreter = interpreter.lower()
- self._abi = abi.lower()
- self._platform = platform.lower()
- # The __hash__ of every single element in a Set[Tag] will be evaluated each time
- # that a set calls its `.disjoint()` method, which may be called hundreds of
- # times when scanning a page of links for packages with tags matching that
- # Set[Tag]. Pre-computing the value here produces significant speedups for
- # downstream consumers.
- self._hash = hash((self._interpreter, self._abi, self._platform))
-
- @property
- def interpreter(self) -> str:
- return self._interpreter
-
- @property
- def abi(self) -> str:
- return self._abi
-
- @property
- def platform(self) -> str:
- return self._platform
-
- def __eq__(self, other: object) -> bool:
- if not isinstance(other, Tag):
- return NotImplemented
-
- return (
- (self._hash == other._hash) # Short-circuit ASAP for perf reasons.
- and (self._platform == other._platform)
- and (self._abi == other._abi)
- and (self._interpreter == other._interpreter)
- )
-
- def __hash__(self) -> int:
- return self._hash
-
- def __str__(self) -> str:
- return f"{self._interpreter}-{self._abi}-{self._platform}"
-
- def __repr__(self) -> str:
- return "<{self} @ {self_id}>".format(self=self, self_id=id(self))
-
-
-def parse_tag(tag: str) -> FrozenSet[Tag]:
- """
- Parses the provided tag (e.g. `py3-none-any`) into a frozenset of Tag instances.
-
- Returning a set is required due to the possibility that the tag is a
- compressed tag set.
- """
- tags = set()
- interpreters, abis, platforms = tag.split("-")
- for interpreter in interpreters.split("."):
- for abi in abis.split("."):
- for platform_ in platforms.split("."):
- tags.add(Tag(interpreter, abi, platform_))
- return frozenset(tags)
-
-
-def _get_config_var(name: str, warn: bool = False) -> Union[int, str, None]:
- value = sysconfig.get_config_var(name)
- if value is None and warn:
- logger.debug(
- "Config variable '%s' is unset, Python ABI tag may be incorrect", name
- )
- return value
-
-
-def _normalize_string(string: str) -> str:
- return string.replace(".", "_").replace("-", "_")
-
-
-def _abi3_applies(python_version: PythonVersion) -> bool:
- """
- Determine if the Python version supports abi3.
-
- PEP 384 was first implemented in Python 3.2.
- """
- return len(python_version) > 1 and tuple(python_version) >= (3, 2)
-
-
-def _cpython_abis(py_version: PythonVersion, warn: bool = False) -> List[str]:
- py_version = tuple(py_version) # To allow for version comparison.
- abis = []
- version = _version_nodot(py_version[:2])
- debug = pymalloc = ucs4 = ""
- with_debug = _get_config_var("Py_DEBUG", warn)
- has_refcount = hasattr(sys, "gettotalrefcount")
- # Windows doesn't set Py_DEBUG, so checking for support of debug-compiled
- # extension modules is the best option.
- # https://github.com/pypa/pip/issues/3383#issuecomment-173267692
- has_ext = "_d.pyd" in EXTENSION_SUFFIXES
- if with_debug or (with_debug is None and (has_refcount or has_ext)):
- debug = "d"
- if py_version < (3, 8):
- with_pymalloc = _get_config_var("WITH_PYMALLOC", warn)
- if with_pymalloc or with_pymalloc is None:
- pymalloc = "m"
- if py_version < (3, 3):
- unicode_size = _get_config_var("Py_UNICODE_SIZE", warn)
- if unicode_size == 4 or (
- unicode_size is None and sys.maxunicode == 0x10FFFF
- ):
- ucs4 = "u"
- elif debug:
- # Debug builds can also load "normal" extension modules.
- # We can also assume no UCS-4 or pymalloc requirement.
- abis.append(f"cp{version}")
- abis.insert(
- 0,
- "cp{version}{debug}{pymalloc}{ucs4}".format(
- version=version, debug=debug, pymalloc=pymalloc, ucs4=ucs4
- ),
- )
- return abis
-
-
-def cpython_tags(
- python_version: Optional[PythonVersion] = None,
- abis: Optional[Iterable[str]] = None,
- platforms: Optional[Iterable[str]] = None,
- *,
- warn: bool = False,
-) -> Iterator[Tag]:
- """
- Yields the tags for a CPython interpreter.
-
- The tags consist of:
- - cp<python_version>-<abi>-<platform>
- - cp<python_version>-abi3-<platform>
- - cp<python_version>-none-<platform>
- - cp<less than python_version>-abi3-<platform> # Older Python versions down to 3.2.
-
- If python_version only specifies a major version then user-provided ABIs and
- the 'none' ABItag will be used.
-
- If 'abi3' or 'none' are specified in 'abis' then they will be yielded at
- their normal position and not at the beginning.
- """
- if not python_version:
- python_version = sys.version_info[:2]
-
- interpreter = "cp{}".format(_version_nodot(python_version[:2]))
-
- if abis is None:
- if len(python_version) > 1:
- abis = _cpython_abis(python_version, warn)
- else:
- abis = []
- abis = list(abis)
- # 'abi3' and 'none' are explicitly handled later.
- for explicit_abi in ("abi3", "none"):
- try:
- abis.remove(explicit_abi)
- except ValueError:
- pass
-
- platforms = list(platforms or platform_tags())
- for abi in abis:
- for platform_ in platforms:
- yield Tag(interpreter, abi, platform_)
- if _abi3_applies(python_version):
- yield from (Tag(interpreter, "abi3", platform_) for platform_ in platforms)
- yield from (Tag(interpreter, "none", platform_) for platform_ in platforms)
-
- if _abi3_applies(python_version):
- for minor_version in range(python_version[1] - 1, 1, -1):
- for platform_ in platforms:
- interpreter = "cp{version}".format(
- version=_version_nodot((python_version[0], minor_version))
- )
- yield Tag(interpreter, "abi3", platform_)
-
-
-def _generic_abi() -> Iterator[str]:
- abi = sysconfig.get_config_var("SOABI")
- if abi:
- yield _normalize_string(abi)
-
-
-def generic_tags(
- interpreter: Optional[str] = None,
- abis: Optional[Iterable[str]] = None,
- platforms: Optional[Iterable[str]] = None,
- *,
- warn: bool = False,
-) -> Iterator[Tag]:
- """
- Yields the tags for a generic interpreter.
-
- The tags consist of:
- - <interpreter>-<abi>-<platform>
-
- The "none" ABI will be added if it was not explicitly provided.
- """
- if not interpreter:
- interp_name = interpreter_name()
- interp_version = interpreter_version(warn=warn)
- interpreter = "".join([interp_name, interp_version])
- if abis is None:
- abis = _generic_abi()
- platforms = list(platforms or platform_tags())
- abis = list(abis)
- if "none" not in abis:
- abis.append("none")
- for abi in abis:
- for platform_ in platforms:
- yield Tag(interpreter, abi, platform_)
-
-
-def _py_interpreter_range(py_version: PythonVersion) -> Iterator[str]:
- """
- Yields Python versions in descending order.
-
- After the latest version, the major-only version will be yielded, and then
- all previous versions of that major version.
- """
- if len(py_version) > 1:
- yield "py{version}".format(version=_version_nodot(py_version[:2]))
- yield "py{major}".format(major=py_version[0])
- if len(py_version) > 1:
- for minor in range(py_version[1] - 1, -1, -1):
- yield "py{version}".format(version=_version_nodot((py_version[0], minor)))
-
-
-def compatible_tags(
- python_version: Optional[PythonVersion] = None,
- interpreter: Optional[str] = None,
- platforms: Optional[Iterable[str]] = None,
-) -> Iterator[Tag]:
- """
- Yields the sequence of tags that are compatible with a specific version of Python.
-
- The tags consist of:
- - py*-none-<platform>
- - <interpreter>-none-any # ... if `interpreter` is provided.
- - py*-none-any
- """
- if not python_version:
- python_version = sys.version_info[:2]
- platforms = list(platforms or platform_tags())
- for version in _py_interpreter_range(python_version):
- for platform_ in platforms:
- yield Tag(version, "none", platform_)
- if interpreter:
- yield Tag(interpreter, "none", "any")
- for version in _py_interpreter_range(python_version):
- yield Tag(version, "none", "any")
-
-
-def _mac_arch(arch: str, is_32bit: bool = _32_BIT_INTERPRETER) -> str:
- if not is_32bit:
- return arch
-
- if arch.startswith("ppc"):
- return "ppc"
-
- return "i386"
-
-
-def _mac_binary_formats(version: MacVersion, cpu_arch: str) -> List[str]:
- formats = [cpu_arch]
- if cpu_arch == "x86_64":
- if version < (10, 4):
- return []
- formats.extend(["intel", "fat64", "fat32"])
-
- elif cpu_arch == "i386":
- if version < (10, 4):
- return []
- formats.extend(["intel", "fat32", "fat"])
-
- elif cpu_arch == "ppc64":
- # TODO: Need to care about 32-bit PPC for ppc64 through 10.2?
- if version > (10, 5) or version < (10, 4):
- return []
- formats.append("fat64")
-
- elif cpu_arch == "ppc":
- if version > (10, 6):
- return []
- formats.extend(["fat32", "fat"])
-
- if cpu_arch in {"arm64", "x86_64"}:
- formats.append("universal2")
-
- if cpu_arch in {"x86_64", "i386", "ppc64", "ppc", "intel"}:
- formats.append("universal")
-
- return formats
-
-
-def mac_platforms(
- version: Optional[MacVersion] = None, arch: Optional[str] = None
-) -> Iterator[str]:
- """
- Yields the platform tags for a macOS system.
-
- The `version` parameter is a two-item tuple specifying the macOS version to
- generate platform tags for. The `arch` parameter is the CPU architecture to
- generate platform tags for. Both parameters default to the appropriate value
- for the current system.
- """
- version_str, _, cpu_arch = platform.mac_ver()
- if version is None:
- version = cast("MacVersion", tuple(map(int, version_str.split(".")[:2])))
- else:
- version = version
- if arch is None:
- arch = _mac_arch(cpu_arch)
- else:
- arch = arch
-
- if (10, 0) <= version and version < (11, 0):
- # Prior to Mac OS 11, each yearly release of Mac OS bumped the
- # "minor" version number. The major version was always 10.
- for minor_version in range(version[1], -1, -1):
- compat_version = 10, minor_version
- binary_formats = _mac_binary_formats(compat_version, arch)
- for binary_format in binary_formats:
- yield "macosx_{major}_{minor}_{binary_format}".format(
- major=10, minor=minor_version, binary_format=binary_format
- )
-
- if version >= (11, 0):
- # Starting with Mac OS 11, each yearly release bumps the major version
- # number. The minor versions are now the midyear updates.
- for major_version in range(version[0], 10, -1):
- compat_version = major_version, 0
- binary_formats = _mac_binary_formats(compat_version, arch)
- for binary_format in binary_formats:
- yield "macosx_{major}_{minor}_{binary_format}".format(
- major=major_version, minor=0, binary_format=binary_format
- )
-
- if version >= (11, 0):
- # Mac OS 11 on x86_64 is compatible with binaries from previous releases.
- # Arm64 support was introduced in 11.0, so no Arm binaries from previous
- # releases exist.
- #
- # However, the "universal2" binary format can have a
- # macOS version earlier than 11.0 when the x86_64 part of the binary supports
- # that version of macOS.
- if arch == "x86_64":
- for minor_version in range(16, 3, -1):
- compat_version = 10, minor_version
- binary_formats = _mac_binary_formats(compat_version, arch)
- for binary_format in binary_formats:
- yield "macosx_{major}_{minor}_{binary_format}".format(
- major=compat_version[0],
- minor=compat_version[1],
- binary_format=binary_format,
- )
- else:
- for minor_version in range(16, 3, -1):
- compat_version = 10, minor_version
- binary_format = "universal2"
- yield "macosx_{major}_{minor}_{binary_format}".format(
- major=compat_version[0],
- minor=compat_version[1],
- binary_format=binary_format,
- )
-
-
-def _linux_platforms(is_32bit: bool = _32_BIT_INTERPRETER) -> Iterator[str]:
- linux = _normalize_string(sysconfig.get_platform())
- if is_32bit:
- if linux == "linux_x86_64":
- linux = "linux_i686"
- elif linux == "linux_aarch64":
- linux = "linux_armv7l"
- _, arch = linux.split("_", 1)
- yield from _manylinux.platform_tags(linux, arch)
- yield from _musllinux.platform_tags(arch)
- yield linux
-
-
-def _generic_platforms() -> Iterator[str]:
- yield _normalize_string(sysconfig.get_platform())
-
-
-def platform_tags() -> Iterator[str]:
- """
- Provides the platform tags for this installation.
- """
- if platform.system() == "Darwin":
- return mac_platforms()
- elif platform.system() == "Linux":
- return _linux_platforms()
- else:
- return _generic_platforms()
-
-
-def interpreter_name() -> str:
- """
- Returns the name of the running interpreter.
- """
- name = sys.implementation.name
- return INTERPRETER_SHORT_NAMES.get(name) or name
-
-
-def interpreter_version(*, warn: bool = False) -> str:
- """
- Returns the version of the running interpreter.
- """
- version = _get_config_var("py_version_nodot", warn=warn)
- if version:
- version = str(version)
- else:
- version = _version_nodot(sys.version_info[:2])
- return version
-
-
-def _version_nodot(version: PythonVersion) -> str:
- return "".join(map(str, version))
-
-
-def sys_tags(*, warn: bool = False) -> Iterator[Tag]:
- """
- Returns the sequence of tag triples for the running interpreter.
-
- The order of the sequence corresponds to priority order for the
- interpreter, from most to least important.
- """
-
- interp_name = interpreter_name()
- if interp_name == "cp":
- yield from cpython_tags(warn=warn)
- else:
- yield from generic_tags()
-
- yield from compatible_tags()
diff --git a/contrib/python/setuptools/py3/setuptools/_vendor/packaging/utils.py b/contrib/python/setuptools/py3/setuptools/_vendor/packaging/utils.py
deleted file mode 100644
index bab11b80c60..00000000000
--- a/contrib/python/setuptools/py3/setuptools/_vendor/packaging/utils.py
+++ /dev/null
@@ -1,136 +0,0 @@
-# This file is dual licensed under the terms of the Apache License, Version
-# 2.0, and the BSD License. See the LICENSE file in the root of this repository
-# for complete details.
-
-import re
-from typing import FrozenSet, NewType, Tuple, Union, cast
-
-from .tags import Tag, parse_tag
-from .version import InvalidVersion, Version
-
-BuildTag = Union[Tuple[()], Tuple[int, str]]
-NormalizedName = NewType("NormalizedName", str)
-
-
-class InvalidWheelFilename(ValueError):
- """
- An invalid wheel filename was found, users should refer to PEP 427.
- """
-
-
-class InvalidSdistFilename(ValueError):
- """
- An invalid sdist filename was found, users should refer to the packaging user guide.
- """
-
-
-_canonicalize_regex = re.compile(r"[-_.]+")
-# PEP 427: The build number must start with a digit.
-_build_tag_regex = re.compile(r"(\d+)(.*)")
-
-
-def canonicalize_name(name: str) -> NormalizedName:
- # This is taken from PEP 503.
- value = _canonicalize_regex.sub("-", name).lower()
- return cast(NormalizedName, value)
-
-
-def canonicalize_version(version: Union[Version, str]) -> str:
- """
- This is very similar to Version.__str__, but has one subtle difference
- with the way it handles the release segment.
- """
- if isinstance(version, str):
- try:
- parsed = Version(version)
- except InvalidVersion:
- # Legacy versions cannot be normalized
- return version
- else:
- parsed = version
-
- parts = []
-
- # Epoch
- if parsed.epoch != 0:
- parts.append(f"{parsed.epoch}!")
-
- # Release segment
- # NB: This strips trailing '.0's to normalize
- parts.append(re.sub(r"(\.0)+$", "", ".".join(str(x) for x in parsed.release)))
-
- # Pre-release
- if parsed.pre is not None:
- parts.append("".join(str(x) for x in parsed.pre))
-
- # Post-release
- if parsed.post is not None:
- parts.append(f".post{parsed.post}")
-
- # Development release
- if parsed.dev is not None:
- parts.append(f".dev{parsed.dev}")
-
- # Local version segment
- if parsed.local is not None:
- parts.append(f"+{parsed.local}")
-
- return "".join(parts)
-
-
-def parse_wheel_filename(
- filename: str,
-) -> Tuple[NormalizedName, Version, BuildTag, FrozenSet[Tag]]:
- if not filename.endswith(".whl"):
- raise InvalidWheelFilename(
- f"Invalid wheel filename (extension must be '.whl'): {filename}"
- )
-
- filename = filename[:-4]
- dashes = filename.count("-")
- if dashes not in (4, 5):
- raise InvalidWheelFilename(
- f"Invalid wheel filename (wrong number of parts): {filename}"
- )
-
- parts = filename.split("-", dashes - 2)
- name_part = parts[0]
- # See PEP 427 for the rules on escaping the project name
- if "__" in name_part or re.match(r"^[\w\d._]*$", name_part, re.UNICODE) is None:
- raise InvalidWheelFilename(f"Invalid project name: {filename}")
- name = canonicalize_name(name_part)
- version = Version(parts[1])
- if dashes == 5:
- build_part = parts[2]
- build_match = _build_tag_regex.match(build_part)
- if build_match is None:
- raise InvalidWheelFilename(
- f"Invalid build number: {build_part} in '{filename}'"
- )
- build = cast(BuildTag, (int(build_match.group(1)), build_match.group(2)))
- else:
- build = ()
- tags = parse_tag(parts[-1])
- return (name, version, build, tags)
-
-
-def parse_sdist_filename(filename: str) -> Tuple[NormalizedName, Version]:
- if filename.endswith(".tar.gz"):
- file_stem = filename[: -len(".tar.gz")]
- elif filename.endswith(".zip"):
- file_stem = filename[: -len(".zip")]
- else:
- raise InvalidSdistFilename(
- f"Invalid sdist filename (extension must be '.tar.gz' or '.zip'):"
- f" {filename}"
- )
-
- # We are requiring a PEP 440 version, which cannot contain dashes,
- # so we split on the last dash.
- name_part, sep, version_part = file_stem.rpartition("-")
- if not sep:
- raise InvalidSdistFilename(f"Invalid sdist filename: {filename}")
-
- name = canonicalize_name(name_part)
- version = Version(version_part)
- return (name, version)
diff --git a/contrib/python/setuptools/py3/setuptools/_vendor/packaging/version.py b/contrib/python/setuptools/py3/setuptools/_vendor/packaging/version.py
deleted file mode 100644
index de9a09a4ed3..00000000000
--- a/contrib/python/setuptools/py3/setuptools/_vendor/packaging/version.py
+++ /dev/null
@@ -1,504 +0,0 @@
-# This file is dual licensed under the terms of the Apache License, Version
-# 2.0, and the BSD License. See the LICENSE file in the root of this repository
-# for complete details.
-
-import collections
-import itertools
-import re
-import warnings
-from typing import Callable, Iterator, List, Optional, SupportsInt, Tuple, Union
-
-from ._structures import Infinity, InfinityType, NegativeInfinity, NegativeInfinityType
-
-__all__ = ["parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN"]
-
-InfiniteTypes = Union[InfinityType, NegativeInfinityType]
-PrePostDevType = Union[InfiniteTypes, Tuple[str, int]]
-SubLocalType = Union[InfiniteTypes, int, str]
-LocalType = Union[
- NegativeInfinityType,
- Tuple[
- Union[
- SubLocalType,
- Tuple[SubLocalType, str],
- Tuple[NegativeInfinityType, SubLocalType],
- ],
- ...,
- ],
-]
-CmpKey = Tuple[
- int, Tuple[int, ...], PrePostDevType, PrePostDevType, PrePostDevType, LocalType
-]
-LegacyCmpKey = Tuple[int, Tuple[str, ...]]
-VersionComparisonMethod = Callable[
- [Union[CmpKey, LegacyCmpKey], Union[CmpKey, LegacyCmpKey]], bool
-]
-
-_Version = collections.namedtuple(
- "_Version", ["epoch", "release", "dev", "pre", "post", "local"]
-)
-
-
-def parse(version: str) -> Union["LegacyVersion", "Version"]:
- """
- Parse the given version string and return either a :class:`Version` object
- or a :class:`LegacyVersion` object depending on if the given version is
- a valid PEP 440 version or a legacy version.
- """
- try:
- return Version(version)
- except InvalidVersion:
- return LegacyVersion(version)
-
-
-class InvalidVersion(ValueError):
- """
- An invalid version was found, users should refer to PEP 440.
- """
-
-
-class _BaseVersion:
- _key: Union[CmpKey, LegacyCmpKey]
-
- def __hash__(self) -> int:
- return hash(self._key)
-
- # Please keep the duplicated `isinstance` check
- # in the six comparisons hereunder
- # unless you find a way to avoid adding overhead function calls.
- def __lt__(self, other: "_BaseVersion") -> bool:
- if not isinstance(other, _BaseVersion):
- return NotImplemented
-
- return self._key < other._key
-
- def __le__(self, other: "_BaseVersion") -> bool:
- if not isinstance(other, _BaseVersion):
- return NotImplemented
-
- return self._key <= other._key
-
- def __eq__(self, other: object) -> bool:
- if not isinstance(other, _BaseVersion):
- return NotImplemented
-
- return self._key == other._key
-
- def __ge__(self, other: "_BaseVersion") -> bool:
- if not isinstance(other, _BaseVersion):
- return NotImplemented
-
- return self._key >= other._key
-
- def __gt__(self, other: "_BaseVersion") -> bool:
- if not isinstance(other, _BaseVersion):
- return NotImplemented
-
- return self._key > other._key
-
- def __ne__(self, other: object) -> bool:
- if not isinstance(other, _BaseVersion):
- return NotImplemented
-
- return self._key != other._key
-
-
-class LegacyVersion(_BaseVersion):
- def __init__(self, version: str) -> None:
- self._version = str(version)
- self._key = _legacy_cmpkey(self._version)
-
- warnings.warn(
- "Creating a LegacyVersion has been deprecated and will be "
- "removed in the next major release",
- DeprecationWarning,
- )
-
- def __str__(self) -> str:
- return self._version
-
- def __repr__(self) -> str:
- return f"<LegacyVersion('{self}')>"
-
- @property
- def public(self) -> str:
- return self._version
-
- @property
- def base_version(self) -> str:
- return self._version
-
- @property
- def epoch(self) -> int:
- return -1
-
- @property
- def release(self) -> None:
- return None
-
- @property
- def pre(self) -> None:
- return None
-
- @property
- def post(self) -> None:
- return None
-
- @property
- def dev(self) -> None:
- return None
-
- @property
- def local(self) -> None:
- return None
-
- @property
- def is_prerelease(self) -> bool:
- return False
-
- @property
- def is_postrelease(self) -> bool:
- return False
-
- @property
- def is_devrelease(self) -> bool:
- return False
-
-
-_legacy_version_component_re = re.compile(r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE)
-
-_legacy_version_replacement_map = {
- "pre": "c",
- "preview": "c",
- "-": "final-",
- "rc": "c",
- "dev": "@",
-}
-
-
-def _parse_version_parts(s: str) -> Iterator[str]:
- for part in _legacy_version_component_re.split(s):
- part = _legacy_version_replacement_map.get(part, part)
-
- if not part or part == ".":
- continue
-
- if part[:1] in "0123456789":
- # pad for numeric comparison
- yield part.zfill(8)
- else:
- yield "*" + part
-
- # ensure that alpha/beta/candidate are before final
- yield "*final"
-
-
-def _legacy_cmpkey(version: str) -> LegacyCmpKey:
-
- # We hardcode an epoch of -1 here. A PEP 440 version can only have a epoch
- # greater than or equal to 0. This will effectively put the LegacyVersion,
- # which uses the defacto standard originally implemented by setuptools,
- # as before all PEP 440 versions.
- epoch = -1
-
- # This scheme is taken from pkg_resources.parse_version setuptools prior to
- # it's adoption of the packaging library.
- parts: List[str] = []
- for part in _parse_version_parts(version.lower()):
- if part.startswith("*"):
- # remove "-" before a prerelease tag
- if part < "*final":
- while parts and parts[-1] == "*final-":
- parts.pop()
-
- # remove trailing zeros from each series of numeric parts
- while parts and parts[-1] == "00000000":
- parts.pop()
-
- parts.append(part)
-
- return epoch, tuple(parts)
-
-
-# Deliberately not anchored to the start and end of the string, to make it
-# easier for 3rd party code to reuse
-VERSION_PATTERN = r"""
- v?
- (?:
- (?:(?P<epoch>[0-9]+)!)? # epoch
- (?P<release>[0-9]+(?:\.[0-9]+)*) # release segment
- (?P<pre> # pre-release
- [-_\.]?
- (?P<pre_l>(a|b|c|rc|alpha|beta|pre|preview))
- [-_\.]?
- (?P<pre_n>[0-9]+)?
- )?
- (?P<post> # post release
- (?:-(?P<post_n1>[0-9]+))
- |
- (?:
- [-_\.]?
- (?P<post_l>post|rev|r)
- [-_\.]?
- (?P<post_n2>[0-9]+)?
- )
- )?
- (?P<dev> # dev release
- [-_\.]?
- (?P<dev_l>dev)
- [-_\.]?
- (?P<dev_n>[0-9]+)?
- )?
- )
- (?:\+(?P<local>[a-z0-9]+(?:[-_\.][a-z0-9]+)*))? # local version
-"""
-
-
-class Version(_BaseVersion):
-
- _regex = re.compile(r"^\s*" + VERSION_PATTERN + r"\s*$", re.VERBOSE | re.IGNORECASE)
-
- def __init__(self, version: str) -> None:
-
- # Validate the version and parse it into pieces
- match = self._regex.search(version)
- if not match:
- raise InvalidVersion(f"Invalid version: '{version}'")
-
- # Store the parsed out pieces of the version
- self._version = _Version(
- epoch=int(match.group("epoch")) if match.group("epoch") else 0,
- release=tuple(int(i) for i in match.group("release").split(".")),
- pre=_parse_letter_version(match.group("pre_l"), match.group("pre_n")),
- post=_parse_letter_version(
- match.group("post_l"), match.group("post_n1") or match.group("post_n2")
- ),
- dev=_parse_letter_version(match.group("dev_l"), match.group("dev_n")),
- local=_parse_local_version(match.group("local")),
- )
-
- # Generate a key which will be used for sorting
- self._key = _cmpkey(
- self._version.epoch,
- self._version.release,
- self._version.pre,
- self._version.post,
- self._version.dev,
- self._version.local,
- )
-
- def __repr__(self) -> str:
- return f"<Version('{self}')>"
-
- def __str__(self) -> str:
- parts = []
-
- # Epoch
- if self.epoch != 0:
- parts.append(f"{self.epoch}!")
-
- # Release segment
- parts.append(".".join(str(x) for x in self.release))
-
- # Pre-release
- if self.pre is not None:
- parts.append("".join(str(x) for x in self.pre))
-
- # Post-release
- if self.post is not None:
- parts.append(f".post{self.post}")
-
- # Development release
- if self.dev is not None:
- parts.append(f".dev{self.dev}")
-
- # Local version segment
- if self.local is not None:
- parts.append(f"+{self.local}")
-
- return "".join(parts)
-
- @property
- def epoch(self) -> int:
- _epoch: int = self._version.epoch
- return _epoch
-
- @property
- def release(self) -> Tuple[int, ...]:
- _release: Tuple[int, ...] = self._version.release
- return _release
-
- @property
- def pre(self) -> Optional[Tuple[str, int]]:
- _pre: Optional[Tuple[str, int]] = self._version.pre
- return _pre
-
- @property
- def post(self) -> Optional[int]:
- return self._version.post[1] if self._version.post else None
-
- @property
- def dev(self) -> Optional[int]:
- return self._version.dev[1] if self._version.dev else None
-
- @property
- def local(self) -> Optional[str]:
- if self._version.local:
- return ".".join(str(x) for x in self._version.local)
- else:
- return None
-
- @property
- def public(self) -> str:
- return str(self).split("+", 1)[0]
-
- @property
- def base_version(self) -> str:
- parts = []
-
- # Epoch
- if self.epoch != 0:
- parts.append(f"{self.epoch}!")
-
- # Release segment
- parts.append(".".join(str(x) for x in self.release))
-
- return "".join(parts)
-
- @property
- def is_prerelease(self) -> bool:
- return self.dev is not None or self.pre is not None
-
- @property
- def is_postrelease(self) -> bool:
- return self.post is not None
-
- @property
- def is_devrelease(self) -> bool:
- return self.dev is not None
-
- @property
- def major(self) -> int:
- return self.release[0] if len(self.release) >= 1 else 0
-
- @property
- def minor(self) -> int:
- return self.release[1] if len(self.release) >= 2 else 0
-
- @property
- def micro(self) -> int:
- return self.release[2] if len(self.release) >= 3 else 0
-
-
-def _parse_letter_version(
- letter: str, number: Union[str, bytes, SupportsInt]
-) -> Optional[Tuple[str, int]]:
-
- if letter:
- # We consider there to be an implicit 0 in a pre-release if there is
- # not a numeral associated with it.
- if number is None:
- number = 0
-
- # We normalize any letters to their lower case form
- letter = letter.lower()
-
- # We consider some words to be alternate spellings of other words and
- # in those cases we want to normalize the spellings to our preferred
- # spelling.
- if letter == "alpha":
- letter = "a"
- elif letter == "beta":
- letter = "b"
- elif letter in ["c", "pre", "preview"]:
- letter = "rc"
- elif letter in ["rev", "r"]:
- letter = "post"
-
- return letter, int(number)
- if not letter and number:
- # We assume if we are given a number, but we are not given a letter
- # then this is using the implicit post release syntax (e.g. 1.0-1)
- letter = "post"
-
- return letter, int(number)
-
- return None
-
-
-_local_version_separators = re.compile(r"[\._-]")
-
-
-def _parse_local_version(local: str) -> Optional[LocalType]:
- """
- Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
- """
- if local is not None:
- return tuple(
- part.lower() if not part.isdigit() else int(part)
- for part in _local_version_separators.split(local)
- )
- return None
-
-
-def _cmpkey(
- epoch: int,
- release: Tuple[int, ...],
- pre: Optional[Tuple[str, int]],
- post: Optional[Tuple[str, int]],
- dev: Optional[Tuple[str, int]],
- local: Optional[Tuple[SubLocalType]],
-) -> CmpKey:
-
- # When we compare a release version, we want to compare it with all of the
- # trailing zeros removed. So we'll use a reverse the list, drop all the now
- # leading zeros until we come to something non zero, then take the rest
- # re-reverse it back into the correct order and make it a tuple and use
- # that for our sorting key.
- _release = tuple(
- reversed(list(itertools.dropwhile(lambda x: x == 0, reversed(release))))
- )
-
- # We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
- # We'll do this by abusing the pre segment, but we _only_ want to do this
- # if there is not a pre or a post segment. If we have one of those then
- # the normal sorting rules will handle this case correctly.
- if pre is None and post is None and dev is not None:
- _pre: PrePostDevType = NegativeInfinity
- # Versions without a pre-release (except as noted above) should sort after
- # those with one.
- elif pre is None:
- _pre = Infinity
- else:
- _pre = pre
-
- # Versions without a post segment should sort before those with one.
- if post is None:
- _post: PrePostDevType = NegativeInfinity
-
- else:
- _post = post
-
- # Versions without a development segment should sort after those with one.
- if dev is None:
- _dev: PrePostDevType = Infinity
-
- else:
- _dev = dev
-
- if local is None:
- # Versions without a local segment should sort before those with one.
- _local: LocalType = NegativeInfinity
- else:
- # Versions with a local segment need that segment parsed to implement
- # the sorting rules in PEP440.
- # - Alpha numeric segments sort before numeric segments
- # - Alpha numeric segments sort lexicographically
- # - Numeric segments sort numerically
- # - Shorter versions sort before longer versions when the prefixes
- # match exactly
- _local = tuple(
- (i, "") if isinstance(i, int) else (NegativeInfinity, i) for i in local
- )
-
- return epoch, _release, _pre, _post, _dev, _local
diff --git a/contrib/python/setuptools/py3/setuptools/_vendor/pyparsing.py b/contrib/python/setuptools/py3/setuptools/_vendor/pyparsing.py
deleted file mode 100644
index cf75e1e5fcb..00000000000
--- a/contrib/python/setuptools/py3/setuptools/_vendor/pyparsing.py
+++ /dev/null
@@ -1,5742 +0,0 @@
-# module pyparsing.py
-#
-# Copyright (c) 2003-2018 Paul T. McGuire
-#
-# Permission is hereby granted, free of charge, to any person obtaining
-# a copy of this software and associated documentation files (the
-# "Software"), to deal in the Software without restriction, including
-# without limitation the rights to use, copy, modify, merge, publish,
-# distribute, sublicense, and/or sell copies of the Software, and to
-# permit persons to whom the Software is furnished to do so, subject to
-# the following conditions:
-#
-# The above copyright notice and this permission notice shall be
-# included in all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
-# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-#
-
-__doc__ = \
-"""
-pyparsing module - Classes and methods to define and execute parsing grammars
-=============================================================================
-
-The pyparsing module is an alternative approach to creating and executing simple grammars,
-vs. the traditional lex/yacc approach, or the use of regular expressions. With pyparsing, you
-don't need to learn a new syntax for defining grammars or matching expressions - the parsing module
-provides a library of classes that you use to construct the grammar directly in Python.
-
-Here is a program to parse "Hello, World!" (or any greeting of the form
-C{"<salutation>, <addressee>!"}), built up using L{Word}, L{Literal}, and L{And} elements
-(L{'+'<ParserElement.__add__>} operator gives L{And} expressions, strings are auto-converted to
-L{Literal} expressions)::
-
- from pyparsing import Word, alphas
-
- # define grammar of a greeting
- greet = Word(alphas) + "," + Word(alphas) + "!"
-
- hello = "Hello, World!"
- print (hello, "->", greet.parseString(hello))
-
-The program outputs the following::
-
- Hello, World! -> ['Hello', ',', 'World', '!']
-
-The Python representation of the grammar is quite readable, owing to the self-explanatory
-class names, and the use of '+', '|' and '^' operators.
-
-The L{ParseResults} object returned from L{ParserElement.parseString<ParserElement.parseString>} can be accessed as a nested list, a dictionary, or an
-object with named attributes.
-
-The pyparsing module handles some of the problems that are typically vexing when writing text parsers:
- - extra or missing whitespace (the above program will also handle "Hello,World!", "Hello , World !", etc.)
- - quoted strings
- - embedded comments
-
-
-Getting Started -
------------------
-Visit the classes L{ParserElement} and L{ParseResults} to see the base classes that most other pyparsing
-classes inherit from. Use the docstrings for examples of how to:
- - construct literal match expressions from L{Literal} and L{CaselessLiteral} classes
- - construct character word-group expressions using the L{Word} class
- - see how to create repetitive expressions using L{ZeroOrMore} and L{OneOrMore} classes
- - use L{'+'<And>}, L{'|'<MatchFirst>}, L{'^'<Or>}, and L{'&'<Each>} operators to combine simple expressions into more complex ones
- - associate names with your parsed results using L{ParserElement.setResultsName}
- - find some helpful expression short-cuts like L{delimitedList} and L{oneOf}
- - find more useful common expressions in the L{pyparsing_common} namespace class
-"""
-
-__version__ = "2.2.1"
-__versionTime__ = "18 Sep 2018 00:49 UTC"
-__author__ = "Paul McGuire <[email protected]>"
-
-import string
-from weakref import ref as wkref
-import copy
-import sys
-import warnings
-import re
-import sre_constants
-import collections
-import pprint
-import traceback
-import types
-from datetime import datetime
-
-try:
- from _thread import RLock
-except ImportError:
- from threading import RLock
-
-try:
- # Python 3
- from collections.abc import Iterable
- from collections.abc import MutableMapping
-except ImportError:
- # Python 2.7
- from collections import Iterable
- from collections import MutableMapping
-
-try:
- from collections import OrderedDict as _OrderedDict
-except ImportError:
- try:
- from ordereddict import OrderedDict as _OrderedDict
- except ImportError:
- _OrderedDict = None
-
-#~ sys.stderr.write( "testing pyparsing module, version %s, %s\n" % (__version__,__versionTime__ ) )
-
-__all__ = [
-'And', 'CaselessKeyword', 'CaselessLiteral', 'CharsNotIn', 'Combine', 'Dict', 'Each', 'Empty',
-'FollowedBy', 'Forward', 'GoToColumn', 'Group', 'Keyword', 'LineEnd', 'LineStart', 'Literal',
-'MatchFirst', 'NoMatch', 'NotAny', 'OneOrMore', 'OnlyOnce', 'Optional', 'Or',
-'ParseBaseException', 'ParseElementEnhance', 'ParseException', 'ParseExpression', 'ParseFatalException',
-'ParseResults', 'ParseSyntaxException', 'ParserElement', 'QuotedString', 'RecursiveGrammarException',
-'Regex', 'SkipTo', 'StringEnd', 'StringStart', 'Suppress', 'Token', 'TokenConverter',
-'White', 'Word', 'WordEnd', 'WordStart', 'ZeroOrMore',
-'alphanums', 'alphas', 'alphas8bit', 'anyCloseTag', 'anyOpenTag', 'cStyleComment', 'col',
-'commaSeparatedList', 'commonHTMLEntity', 'countedArray', 'cppStyleComment', 'dblQuotedString',
-'dblSlashComment', 'delimitedList', 'dictOf', 'downcaseTokens', 'empty', 'hexnums',
-'htmlComment', 'javaStyleComment', 'line', 'lineEnd', 'lineStart', 'lineno',
-'makeHTMLTags', 'makeXMLTags', 'matchOnlyAtCol', 'matchPreviousExpr', 'matchPreviousLiteral',
-'nestedExpr', 'nullDebugAction', 'nums', 'oneOf', 'opAssoc', 'operatorPrecedence', 'printables',
-'punc8bit', 'pythonStyleComment', 'quotedString', 'removeQuotes', 'replaceHTMLEntity',
-'replaceWith', 'restOfLine', 'sglQuotedString', 'srange', 'stringEnd',
-'stringStart', 'traceParseAction', 'unicodeString', 'upcaseTokens', 'withAttribute',
-'indentedBlock', 'originalTextFor', 'ungroup', 'infixNotation','locatedExpr', 'withClass',
-'CloseMatch', 'tokenMap', 'pyparsing_common',
-]
-
-system_version = tuple(sys.version_info)[:3]
-PY_3 = system_version[0] == 3
-if PY_3:
- _MAX_INT = sys.maxsize
- basestring = str
- unichr = chr
- _ustr = str
-
- # build list of single arg builtins, that can be used as parse actions
- singleArgBuiltins = [sum, len, sorted, reversed, list, tuple, set, any, all, min, max]
-
-else:
- _MAX_INT = sys.maxint
- range = xrange
-
- def _ustr(obj):
- """Drop-in replacement for str(obj) that tries to be Unicode friendly. It first tries
- str(obj). If that fails with a UnicodeEncodeError, then it tries unicode(obj). It
- then < returns the unicode object | encodes it with the default encoding | ... >.
- """
- if isinstance(obj,unicode):
- return obj
-
- try:
- # If this works, then _ustr(obj) has the same behaviour as str(obj), so
- # it won't break any existing code.
- return str(obj)
-
- except UnicodeEncodeError:
- # Else encode it
- ret = unicode(obj).encode(sys.getdefaultencoding(), 'xmlcharrefreplace')
- xmlcharref = Regex(r'&#\d+;')
- xmlcharref.setParseAction(lambda t: '\\u' + hex(int(t[0][2:-1]))[2:])
- return xmlcharref.transformString(ret)
-
- # build list of single arg builtins, tolerant of Python version, that can be used as parse actions
- singleArgBuiltins = []
- import __builtin__
- for fname in "sum len sorted reversed list tuple set any all min max".split():
- try:
- singleArgBuiltins.append(getattr(__builtin__,fname))
- except AttributeError:
- continue
-
-_generatorType = type((y for y in range(1)))
-
-def _xml_escape(data):
- """Escape &, <, >, ", ', etc. in a string of data."""
-
- # ampersand must be replaced first
- from_symbols = '&><"\''
- to_symbols = ('&'+s+';' for s in "amp gt lt quot apos".split())
- for from_,to_ in zip(from_symbols, to_symbols):
- data = data.replace(from_, to_)
- return data
-
-class _Constants(object):
- pass
-
-alphas = string.ascii_uppercase + string.ascii_lowercase
-nums = "0123456789"
-hexnums = nums + "ABCDEFabcdef"
-alphanums = alphas + nums
-_bslash = chr(92)
-printables = "".join(c for c in string.printable if c not in string.whitespace)
-
-class ParseBaseException(Exception):
- """base exception class for all parsing runtime exceptions"""
- # Performance tuning: we construct a *lot* of these, so keep this
- # constructor as small and fast as possible
- def __init__( self, pstr, loc=0, msg=None, elem=None ):
- self.loc = loc
- if msg is None:
- self.msg = pstr
- self.pstr = ""
- else:
- self.msg = msg
- self.pstr = pstr
- self.parserElement = elem
- self.args = (pstr, loc, msg)
-
- @classmethod
- def _from_exception(cls, pe):
- """
- internal factory method to simplify creating one type of ParseException
- from another - avoids having __init__ signature conflicts among subclasses
- """
- return cls(pe.pstr, pe.loc, pe.msg, pe.parserElement)
-
- def __getattr__( self, aname ):
- """supported attributes by name are:
- - lineno - returns the line number of the exception text
- - col - returns the column number of the exception text
- - line - returns the line containing the exception text
- """
- if( aname == "lineno" ):
- return lineno( self.loc, self.pstr )
- elif( aname in ("col", "column") ):
- return col( self.loc, self.pstr )
- elif( aname == "line" ):
- return line( self.loc, self.pstr )
- else:
- raise AttributeError(aname)
-
- def __str__( self ):
- return "%s (at char %d), (line:%d, col:%d)" % \
- ( self.msg, self.loc, self.lineno, self.column )
- def __repr__( self ):
- return _ustr(self)
- def markInputline( self, markerString = ">!<" ):
- """Extracts the exception line from the input string, and marks
- the location of the exception with a special symbol.
- """
- line_str = self.line
- line_column = self.column - 1
- if markerString:
- line_str = "".join((line_str[:line_column],
- markerString, line_str[line_column:]))
- return line_str.strip()
- def __dir__(self):
- return "lineno col line".split() + dir(type(self))
-
-class ParseException(ParseBaseException):
- """
- Exception thrown when parse expressions don't match class;
- supported attributes by name are:
- - lineno - returns the line number of the exception text
- - col - returns the column number of the exception text
- - line - returns the line containing the exception text
-
- Example::
- try:
- Word(nums).setName("integer").parseString("ABC")
- except ParseException as pe:
- print(pe)
- print("column: {}".format(pe.col))
-
- prints::
- Expected integer (at char 0), (line:1, col:1)
- column: 1
- """
- pass
-
-class ParseFatalException(ParseBaseException):
- """user-throwable exception thrown when inconsistent parse content
- is found; stops all parsing immediately"""
- pass
-
-class ParseSyntaxException(ParseFatalException):
- """just like L{ParseFatalException}, but thrown internally when an
- L{ErrorStop<And._ErrorStop>} ('-' operator) indicates that parsing is to stop
- immediately because an unbacktrackable syntax error has been found"""
- pass
-
-#~ class ReparseException(ParseBaseException):
- #~ """Experimental class - parse actions can raise this exception to cause
- #~ pyparsing to reparse the input string:
- #~ - with a modified input string, and/or
- #~ - with a modified start location
- #~ Set the values of the ReparseException in the constructor, and raise the
- #~ exception in a parse action to cause pyparsing to use the new string/location.
- #~ Setting the values as None causes no change to be made.
- #~ """
- #~ def __init_( self, newstring, restartLoc ):
- #~ self.newParseText = newstring
- #~ self.reparseLoc = restartLoc
-
-class RecursiveGrammarException(Exception):
- """exception thrown by L{ParserElement.validate} if the grammar could be improperly recursive"""
- def __init__( self, parseElementList ):
- self.parseElementTrace = parseElementList
-
- def __str__( self ):
- return "RecursiveGrammarException: %s" % self.parseElementTrace
-
-class _ParseResultsWithOffset(object):
- def __init__(self,p1,p2):
- self.tup = (p1,p2)
- def __getitem__(self,i):
- return self.tup[i]
- def __repr__(self):
- return repr(self.tup[0])
- def setOffset(self,i):
- self.tup = (self.tup[0],i)
-
-class ParseResults(object):
- """
- Structured parse results, to provide multiple means of access to the parsed data:
- - as a list (C{len(results)})
- - by list index (C{results[0], results[1]}, etc.)
- - by attribute (C{results.<resultsName>} - see L{ParserElement.setResultsName})
-
- Example::
- integer = Word(nums)
- date_str = (integer.setResultsName("year") + '/'
- + integer.setResultsName("month") + '/'
- + integer.setResultsName("day"))
- # equivalent form:
- # date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
-
- # parseString returns a ParseResults object
- result = date_str.parseString("1999/12/31")
-
- def test(s, fn=repr):
- print("%s -> %s" % (s, fn(eval(s))))
- test("list(result)")
- test("result[0]")
- test("result['month']")
- test("result.day")
- test("'month' in result")
- test("'minutes' in result")
- test("result.dump()", str)
- prints::
- list(result) -> ['1999', '/', '12', '/', '31']
- result[0] -> '1999'
- result['month'] -> '12'
- result.day -> '31'
- 'month' in result -> True
- 'minutes' in result -> False
- result.dump() -> ['1999', '/', '12', '/', '31']
- - day: 31
- - month: 12
- - year: 1999
- """
- def __new__(cls, toklist=None, name=None, asList=True, modal=True ):
- if isinstance(toklist, cls):
- return toklist
- retobj = object.__new__(cls)
- retobj.__doinit = True
- return retobj
-
- # Performance tuning: we construct a *lot* of these, so keep this
- # constructor as small and fast as possible
- def __init__( self, toklist=None, name=None, asList=True, modal=True, isinstance=isinstance ):
- if self.__doinit:
- self.__doinit = False
- self.__name = None
- self.__parent = None
- self.__accumNames = {}
- self.__asList = asList
- self.__modal = modal
- if toklist is None:
- toklist = []
- if isinstance(toklist, list):
- self.__toklist = toklist[:]
- elif isinstance(toklist, _generatorType):
- self.__toklist = list(toklist)
- else:
- self.__toklist = [toklist]
- self.__tokdict = dict()
-
- if name is not None and name:
- if not modal:
- self.__accumNames[name] = 0
- if isinstance(name,int):
- name = _ustr(name) # will always return a str, but use _ustr for consistency
- self.__name = name
- if not (isinstance(toklist, (type(None), basestring, list)) and toklist in (None,'',[])):
- if isinstance(toklist,basestring):
- toklist = [ toklist ]
- if asList:
- if isinstance(toklist,ParseResults):
- self[name] = _ParseResultsWithOffset(toklist.copy(),0)
- else:
- self[name] = _ParseResultsWithOffset(ParseResults(toklist[0]),0)
- self[name].__name = name
- else:
- try:
- self[name] = toklist[0]
- except (KeyError,TypeError,IndexError):
- self[name] = toklist
-
- def __getitem__( self, i ):
- if isinstance( i, (int,slice) ):
- return self.__toklist[i]
- else:
- if i not in self.__accumNames:
- return self.__tokdict[i][-1][0]
- else:
- return ParseResults([ v[0] for v in self.__tokdict[i] ])
-
- def __setitem__( self, k, v, isinstance=isinstance ):
- if isinstance(v,_ParseResultsWithOffset):
- self.__tokdict[k] = self.__tokdict.get(k,list()) + [v]
- sub = v[0]
- elif isinstance(k,(int,slice)):
- self.__toklist[k] = v
- sub = v
- else:
- self.__tokdict[k] = self.__tokdict.get(k,list()) + [_ParseResultsWithOffset(v,0)]
- sub = v
- if isinstance(sub,ParseResults):
- sub.__parent = wkref(self)
-
- def __delitem__( self, i ):
- if isinstance(i,(int,slice)):
- mylen = len( self.__toklist )
- del self.__toklist[i]
-
- # convert int to slice
- if isinstance(i, int):
- if i < 0:
- i += mylen
- i = slice(i, i+1)
- # get removed indices
- removed = list(range(*i.indices(mylen)))
- removed.reverse()
- # fixup indices in token dictionary
- for name,occurrences in self.__tokdict.items():
- for j in removed:
- for k, (value, position) in enumerate(occurrences):
- occurrences[k] = _ParseResultsWithOffset(value, position - (position > j))
- else:
- del self.__tokdict[i]
-
- def __contains__( self, k ):
- return k in self.__tokdict
-
- def __len__( self ): return len( self.__toklist )
- def __bool__(self): return ( not not self.__toklist )
- __nonzero__ = __bool__
- def __iter__( self ): return iter( self.__toklist )
- def __reversed__( self ): return iter( self.__toklist[::-1] )
- def _iterkeys( self ):
- if hasattr(self.__tokdict, "iterkeys"):
- return self.__tokdict.iterkeys()
- else:
- return iter(self.__tokdict)
-
- def _itervalues( self ):
- return (self[k] for k in self._iterkeys())
-
- def _iteritems( self ):
- return ((k, self[k]) for k in self._iterkeys())
-
- if PY_3:
- keys = _iterkeys
- """Returns an iterator of all named result keys (Python 3.x only)."""
-
- values = _itervalues
- """Returns an iterator of all named result values (Python 3.x only)."""
-
- items = _iteritems
- """Returns an iterator of all named result key-value tuples (Python 3.x only)."""
-
- else:
- iterkeys = _iterkeys
- """Returns an iterator of all named result keys (Python 2.x only)."""
-
- itervalues = _itervalues
- """Returns an iterator of all named result values (Python 2.x only)."""
-
- iteritems = _iteritems
- """Returns an iterator of all named result key-value tuples (Python 2.x only)."""
-
- def keys( self ):
- """Returns all named result keys (as a list in Python 2.x, as an iterator in Python 3.x)."""
- return list(self.iterkeys())
-
- def values( self ):
- """Returns all named result values (as a list in Python 2.x, as an iterator in Python 3.x)."""
- return list(self.itervalues())
-
- def items( self ):
- """Returns all named result key-values (as a list of tuples in Python 2.x, as an iterator in Python 3.x)."""
- return list(self.iteritems())
-
- def haskeys( self ):
- """Since keys() returns an iterator, this method is helpful in bypassing
- code that looks for the existence of any defined results names."""
- return bool(self.__tokdict)
-
- def pop( self, *args, **kwargs):
- """
- Removes and returns item at specified index (default=C{last}).
- Supports both C{list} and C{dict} semantics for C{pop()}. If passed no
- argument or an integer argument, it will use C{list} semantics
- and pop tokens from the list of parsed tokens. If passed a
- non-integer argument (most likely a string), it will use C{dict}
- semantics and pop the corresponding value from any defined
- results names. A second default return value argument is
- supported, just as in C{dict.pop()}.
-
- Example::
- def remove_first(tokens):
- tokens.pop(0)
- print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
- print(OneOrMore(Word(nums)).addParseAction(remove_first).parseString("0 123 321")) # -> ['123', '321']
-
- label = Word(alphas)
- patt = label("LABEL") + OneOrMore(Word(nums))
- print(patt.parseString("AAB 123 321").dump())
-
- # Use pop() in a parse action to remove named result (note that corresponding value is not
- # removed from list form of results)
- def remove_LABEL(tokens):
- tokens.pop("LABEL")
- return tokens
- patt.addParseAction(remove_LABEL)
- print(patt.parseString("AAB 123 321").dump())
- prints::
- ['AAB', '123', '321']
- - LABEL: AAB
-
- ['AAB', '123', '321']
- """
- if not args:
- args = [-1]
- for k,v in kwargs.items():
- if k == 'default':
- args = (args[0], v)
- else:
- raise TypeError("pop() got an unexpected keyword argument '%s'" % k)
- if (isinstance(args[0], int) or
- len(args) == 1 or
- args[0] in self):
- index = args[0]
- ret = self[index]
- del self[index]
- return ret
- else:
- defaultvalue = args[1]
- return defaultvalue
-
- def get(self, key, defaultValue=None):
- """
- Returns named result matching the given key, or if there is no
- such name, then returns the given C{defaultValue} or C{None} if no
- C{defaultValue} is specified.
-
- Similar to C{dict.get()}.
-
- Example::
- integer = Word(nums)
- date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
-
- result = date_str.parseString("1999/12/31")
- print(result.get("year")) # -> '1999'
- print(result.get("hour", "not specified")) # -> 'not specified'
- print(result.get("hour")) # -> None
- """
- if key in self:
- return self[key]
- else:
- return defaultValue
-
- def insert( self, index, insStr ):
- """
- Inserts new element at location index in the list of parsed tokens.
-
- Similar to C{list.insert()}.
-
- Example::
- print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
-
- # use a parse action to insert the parse location in the front of the parsed results
- def insert_locn(locn, tokens):
- tokens.insert(0, locn)
- print(OneOrMore(Word(nums)).addParseAction(insert_locn).parseString("0 123 321")) # -> [0, '0', '123', '321']
- """
- self.__toklist.insert(index, insStr)
- # fixup indices in token dictionary
- for name,occurrences in self.__tokdict.items():
- for k, (value, position) in enumerate(occurrences):
- occurrences[k] = _ParseResultsWithOffset(value, position + (position > index))
-
- def append( self, item ):
- """
- Add single element to end of ParseResults list of elements.
-
- Example::
- print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
-
- # use a parse action to compute the sum of the parsed integers, and add it to the end
- def append_sum(tokens):
- tokens.append(sum(map(int, tokens)))
- print(OneOrMore(Word(nums)).addParseAction(append_sum).parseString("0 123 321")) # -> ['0', '123', '321', 444]
- """
- self.__toklist.append(item)
-
- def extend( self, itemseq ):
- """
- Add sequence of elements to end of ParseResults list of elements.
-
- Example::
- patt = OneOrMore(Word(alphas))
-
- # use a parse action to append the reverse of the matched strings, to make a palindrome
- def make_palindrome(tokens):
- tokens.extend(reversed([t[::-1] for t in tokens]))
- return ''.join(tokens)
- print(patt.addParseAction(make_palindrome).parseString("lskdj sdlkjf lksd")) # -> 'lskdjsdlkjflksddsklfjkldsjdksl'
- """
- if isinstance(itemseq, ParseResults):
- self += itemseq
- else:
- self.__toklist.extend(itemseq)
-
- def clear( self ):
- """
- Clear all elements and results names.
- """
- del self.__toklist[:]
- self.__tokdict.clear()
-
- def __getattr__( self, name ):
- try:
- return self[name]
- except KeyError:
- return ""
-
- if name in self.__tokdict:
- if name not in self.__accumNames:
- return self.__tokdict[name][-1][0]
- else:
- return ParseResults([ v[0] for v in self.__tokdict[name] ])
- else:
- return ""
-
- def __add__( self, other ):
- ret = self.copy()
- ret += other
- return ret
-
- def __iadd__( self, other ):
- if other.__tokdict:
- offset = len(self.__toklist)
- addoffset = lambda a: offset if a<0 else a+offset
- otheritems = other.__tokdict.items()
- otherdictitems = [(k, _ParseResultsWithOffset(v[0],addoffset(v[1])) )
- for (k,vlist) in otheritems for v in vlist]
- for k,v in otherdictitems:
- self[k] = v
- if isinstance(v[0],ParseResults):
- v[0].__parent = wkref(self)
-
- self.__toklist += other.__toklist
- self.__accumNames.update( other.__accumNames )
- return self
-
- def __radd__(self, other):
- if isinstance(other,int) and other == 0:
- # useful for merging many ParseResults using sum() builtin
- return self.copy()
- else:
- # this may raise a TypeError - so be it
- return other + self
-
- def __repr__( self ):
- return "(%s, %s)" % ( repr( self.__toklist ), repr( self.__tokdict ) )
-
- def __str__( self ):
- return '[' + ', '.join(_ustr(i) if isinstance(i, ParseResults) else repr(i) for i in self.__toklist) + ']'
-
- def _asStringList( self, sep='' ):
- out = []
- for item in self.__toklist:
- if out and sep:
- out.append(sep)
- if isinstance( item, ParseResults ):
- out += item._asStringList()
- else:
- out.append( _ustr(item) )
- return out
-
- def asList( self ):
- """
- Returns the parse results as a nested list of matching tokens, all converted to strings.
-
- Example::
- patt = OneOrMore(Word(alphas))
- result = patt.parseString("sldkj lsdkj sldkj")
- # even though the result prints in string-like form, it is actually a pyparsing ParseResults
- print(type(result), result) # -> <class 'pyparsing.ParseResults'> ['sldkj', 'lsdkj', 'sldkj']
-
- # Use asList() to create an actual list
- result_list = result.asList()
- print(type(result_list), result_list) # -> <class 'list'> ['sldkj', 'lsdkj', 'sldkj']
- """
- return [res.asList() if isinstance(res,ParseResults) else res for res in self.__toklist]
-
- def asDict( self ):
- """
- Returns the named parse results as a nested dictionary.
-
- Example::
- integer = Word(nums)
- date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
-
- result = date_str.parseString('12/31/1999')
- print(type(result), repr(result)) # -> <class 'pyparsing.ParseResults'> (['12', '/', '31', '/', '1999'], {'day': [('1999', 4)], 'year': [('12', 0)], 'month': [('31', 2)]})
-
- result_dict = result.asDict()
- print(type(result_dict), repr(result_dict)) # -> <class 'dict'> {'day': '1999', 'year': '12', 'month': '31'}
-
- # even though a ParseResults supports dict-like access, sometime you just need to have a dict
- import json
- print(json.dumps(result)) # -> Exception: TypeError: ... is not JSON serializable
- print(json.dumps(result.asDict())) # -> {"month": "31", "day": "1999", "year": "12"}
- """
- if PY_3:
- item_fn = self.items
- else:
- item_fn = self.iteritems
-
- def toItem(obj):
- if isinstance(obj, ParseResults):
- if obj.haskeys():
- return obj.asDict()
- else:
- return [toItem(v) for v in obj]
- else:
- return obj
-
- return dict((k,toItem(v)) for k,v in item_fn())
-
- def copy( self ):
- """
- Returns a new copy of a C{ParseResults} object.
- """
- ret = ParseResults( self.__toklist )
- ret.__tokdict = self.__tokdict.copy()
- ret.__parent = self.__parent
- ret.__accumNames.update( self.__accumNames )
- ret.__name = self.__name
- return ret
-
- def asXML( self, doctag=None, namedItemsOnly=False, indent="", formatted=True ):
- """
- (Deprecated) Returns the parse results as XML. Tags are created for tokens and lists that have defined results names.
- """
- nl = "\n"
- out = []
- namedItems = dict((v[1],k) for (k,vlist) in self.__tokdict.items()
- for v in vlist)
- nextLevelIndent = indent + " "
-
- # collapse out indents if formatting is not desired
- if not formatted:
- indent = ""
- nextLevelIndent = ""
- nl = ""
-
- selfTag = None
- if doctag is not None:
- selfTag = doctag
- else:
- if self.__name:
- selfTag = self.__name
-
- if not selfTag:
- if namedItemsOnly:
- return ""
- else:
- selfTag = "ITEM"
-
- out += [ nl, indent, "<", selfTag, ">" ]
-
- for i,res in enumerate(self.__toklist):
- if isinstance(res,ParseResults):
- if i in namedItems:
- out += [ res.asXML(namedItems[i],
- namedItemsOnly and doctag is None,
- nextLevelIndent,
- formatted)]
- else:
- out += [ res.asXML(None,
- namedItemsOnly and doctag is None,
- nextLevelIndent,
- formatted)]
- else:
- # individual token, see if there is a name for it
- resTag = None
- if i in namedItems:
- resTag = namedItems[i]
- if not resTag:
- if namedItemsOnly:
- continue
- else:
- resTag = "ITEM"
- xmlBodyText = _xml_escape(_ustr(res))
- out += [ nl, nextLevelIndent, "<", resTag, ">",
- xmlBodyText,
- "</", resTag, ">" ]
-
- out += [ nl, indent, "</", selfTag, ">" ]
- return "".join(out)
-
- def __lookup(self,sub):
- for k,vlist in self.__tokdict.items():
- for v,loc in vlist:
- if sub is v:
- return k
- return None
-
- def getName(self):
- r"""
- Returns the results name for this token expression. Useful when several
- different expressions might match at a particular location.
-
- Example::
- integer = Word(nums)
- ssn_expr = Regex(r"\d\d\d-\d\d-\d\d\d\d")
- house_number_expr = Suppress('#') + Word(nums, alphanums)
- user_data = (Group(house_number_expr)("house_number")
- | Group(ssn_expr)("ssn")
- | Group(integer)("age"))
- user_info = OneOrMore(user_data)
-
- result = user_info.parseString("22 111-22-3333 #221B")
- for item in result:
- print(item.getName(), ':', item[0])
- prints::
- age : 22
- ssn : 111-22-3333
- house_number : 221B
- """
- if self.__name:
- return self.__name
- elif self.__parent:
- par = self.__parent()
- if par:
- return par.__lookup(self)
- else:
- return None
- elif (len(self) == 1 and
- len(self.__tokdict) == 1 and
- next(iter(self.__tokdict.values()))[0][1] in (0,-1)):
- return next(iter(self.__tokdict.keys()))
- else:
- return None
-
- def dump(self, indent='', depth=0, full=True):
- """
- Diagnostic method for listing out the contents of a C{ParseResults}.
- Accepts an optional C{indent} argument so that this string can be embedded
- in a nested display of other data.
-
- Example::
- integer = Word(nums)
- date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
-
- result = date_str.parseString('12/31/1999')
- print(result.dump())
- prints::
- ['12', '/', '31', '/', '1999']
- - day: 1999
- - month: 31
- - year: 12
- """
- out = []
- NL = '\n'
- out.append( indent+_ustr(self.asList()) )
- if full:
- if self.haskeys():
- items = sorted((str(k), v) for k,v in self.items())
- for k,v in items:
- if out:
- out.append(NL)
- out.append( "%s%s- %s: " % (indent,(' '*depth), k) )
- if isinstance(v,ParseResults):
- if v:
- out.append( v.dump(indent,depth+1) )
- else:
- out.append(_ustr(v))
- else:
- out.append(repr(v))
- elif any(isinstance(vv,ParseResults) for vv in self):
- v = self
- for i,vv in enumerate(v):
- if isinstance(vv,ParseResults):
- out.append("\n%s%s[%d]:\n%s%s%s" % (indent,(' '*(depth)),i,indent,(' '*(depth+1)),vv.dump(indent,depth+1) ))
- else:
- out.append("\n%s%s[%d]:\n%s%s%s" % (indent,(' '*(depth)),i,indent,(' '*(depth+1)),_ustr(vv)))
-
- return "".join(out)
-
- def pprint(self, *args, **kwargs):
- """
- Pretty-printer for parsed results as a list, using the C{pprint} module.
- Accepts additional positional or keyword args as defined for the
- C{pprint.pprint} method. (U{http://docs.python.org/3/library/pprint.html#pprint.pprint})
-
- Example::
- ident = Word(alphas, alphanums)
- num = Word(nums)
- func = Forward()
- term = ident | num | Group('(' + func + ')')
- func <<= ident + Group(Optional(delimitedList(term)))
- result = func.parseString("fna a,b,(fnb c,d,200),100")
- result.pprint(width=40)
- prints::
- ['fna',
- ['a',
- 'b',
- ['(', 'fnb', ['c', 'd', '200'], ')'],
- '100']]
- """
- pprint.pprint(self.asList(), *args, **kwargs)
-
- # add support for pickle protocol
- def __getstate__(self):
- return ( self.__toklist,
- ( self.__tokdict.copy(),
- self.__parent is not None and self.__parent() or None,
- self.__accumNames,
- self.__name ) )
-
- def __setstate__(self,state):
- self.__toklist = state[0]
- (self.__tokdict,
- par,
- inAccumNames,
- self.__name) = state[1]
- self.__accumNames = {}
- self.__accumNames.update(inAccumNames)
- if par is not None:
- self.__parent = wkref(par)
- else:
- self.__parent = None
-
- def __getnewargs__(self):
- return self.__toklist, self.__name, self.__asList, self.__modal
-
- def __dir__(self):
- return (dir(type(self)) + list(self.keys()))
-
-MutableMapping.register(ParseResults)
-
-def col (loc,strg):
- """Returns current column within a string, counting newlines as line separators.
- The first column is number 1.
-
- Note: the default parsing behavior is to expand tabs in the input string
- before starting the parsing process. See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information
- on parsing strings containing C{<TAB>}s, and suggested methods to maintain a
- consistent view of the parsed string, the parse location, and line and column
- positions within the parsed string.
- """
- s = strg
- return 1 if 0<loc<len(s) and s[loc-1] == '\n' else loc - s.rfind("\n", 0, loc)
-
-def lineno(loc,strg):
- """Returns current line number within a string, counting newlines as line separators.
- The first line is number 1.
-
- Note: the default parsing behavior is to expand tabs in the input string
- before starting the parsing process. See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information
- on parsing strings containing C{<TAB>}s, and suggested methods to maintain a
- consistent view of the parsed string, the parse location, and line and column
- positions within the parsed string.
- """
- return strg.count("\n",0,loc) + 1
-
-def line( loc, strg ):
- """Returns the line of text containing loc within a string, counting newlines as line separators.
- """
- lastCR = strg.rfind("\n", 0, loc)
- nextCR = strg.find("\n", loc)
- if nextCR >= 0:
- return strg[lastCR+1:nextCR]
- else:
- return strg[lastCR+1:]
-
-def _defaultStartDebugAction( instring, loc, expr ):
- print (("Match " + _ustr(expr) + " at loc " + _ustr(loc) + "(%d,%d)" % ( lineno(loc,instring), col(loc,instring) )))
-
-def _defaultSuccessDebugAction( instring, startloc, endloc, expr, toks ):
- print ("Matched " + _ustr(expr) + " -> " + str(toks.asList()))
-
-def _defaultExceptionDebugAction( instring, loc, expr, exc ):
- print ("Exception raised:" + _ustr(exc))
-
-def nullDebugAction(*args):
- """'Do-nothing' debug action, to suppress debugging output during parsing."""
- pass
-
-# Only works on Python 3.x - nonlocal is toxic to Python 2 installs
-#~ 'decorator to trim function calls to match the arity of the target'
-#~ def _trim_arity(func, maxargs=3):
- #~ if func in singleArgBuiltins:
- #~ return lambda s,l,t: func(t)
- #~ limit = 0
- #~ foundArity = False
- #~ def wrapper(*args):
- #~ nonlocal limit,foundArity
- #~ while 1:
- #~ try:
- #~ ret = func(*args[limit:])
- #~ foundArity = True
- #~ return ret
- #~ except TypeError:
- #~ if limit == maxargs or foundArity:
- #~ raise
- #~ limit += 1
- #~ continue
- #~ return wrapper
-
-# this version is Python 2.x-3.x cross-compatible
-'decorator to trim function calls to match the arity of the target'
-def _trim_arity(func, maxargs=2):
- if func in singleArgBuiltins:
- return lambda s,l,t: func(t)
- limit = [0]
- foundArity = [False]
-
- # traceback return data structure changed in Py3.5 - normalize back to plain tuples
- if system_version[:2] >= (3,5):
- def extract_stack(limit=0):
- # special handling for Python 3.5.0 - extra deep call stack by 1
- offset = -3 if system_version == (3,5,0) else -2
- frame_summary = traceback.extract_stack(limit=-offset+limit-1)[offset]
- return [frame_summary[:2]]
- def extract_tb(tb, limit=0):
- frames = traceback.extract_tb(tb, limit=limit)
- frame_summary = frames[-1]
- return [frame_summary[:2]]
- else:
- extract_stack = traceback.extract_stack
- extract_tb = traceback.extract_tb
-
- # synthesize what would be returned by traceback.extract_stack at the call to
- # user's parse action 'func', so that we don't incur call penalty at parse time
-
- LINE_DIFF = 6
- # IF ANY CODE CHANGES, EVEN JUST COMMENTS OR BLANK LINES, BETWEEN THE NEXT LINE AND
- # THE CALL TO FUNC INSIDE WRAPPER, LINE_DIFF MUST BE MODIFIED!!!!
- this_line = extract_stack(limit=2)[-1]
- pa_call_line_synth = (this_line[0], this_line[1]+LINE_DIFF)
-
- def wrapper(*args):
- while 1:
- try:
- ret = func(*args[limit[0]:])
- foundArity[0] = True
- return ret
- except TypeError:
- # re-raise TypeErrors if they did not come from our arity testing
- if foundArity[0]:
- raise
- else:
- try:
- tb = sys.exc_info()[-1]
- if not extract_tb(tb, limit=2)[-1][:2] == pa_call_line_synth:
- raise
- finally:
- del tb
-
- if limit[0] <= maxargs:
- limit[0] += 1
- continue
- raise
-
- # copy func name to wrapper for sensible debug output
- func_name = "<parse action>"
- try:
- func_name = getattr(func, '__name__',
- getattr(func, '__class__').__name__)
- except Exception:
- func_name = str(func)
- wrapper.__name__ = func_name
-
- return wrapper
-
-class ParserElement(object):
- """Abstract base level parser element class."""
- DEFAULT_WHITE_CHARS = " \n\t\r"
- verbose_stacktrace = False
-
- @staticmethod
- def setDefaultWhitespaceChars( chars ):
- r"""
- Overrides the default whitespace chars
-
- Example::
- # default whitespace chars are space, <TAB> and newline
- OneOrMore(Word(alphas)).parseString("abc def\nghi jkl") # -> ['abc', 'def', 'ghi', 'jkl']
-
- # change to just treat newline as significant
- ParserElement.setDefaultWhitespaceChars(" \t")
- OneOrMore(Word(alphas)).parseString("abc def\nghi jkl") # -> ['abc', 'def']
- """
- ParserElement.DEFAULT_WHITE_CHARS = chars
-
- @staticmethod
- def inlineLiteralsUsing(cls):
- """
- Set class to be used for inclusion of string literals into a parser.
-
- Example::
- # default literal class used is Literal
- integer = Word(nums)
- date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
-
- date_str.parseString("1999/12/31") # -> ['1999', '/', '12', '/', '31']
-
-
- # change to Suppress
- ParserElement.inlineLiteralsUsing(Suppress)
- date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
-
- date_str.parseString("1999/12/31") # -> ['1999', '12', '31']
- """
- ParserElement._literalStringClass = cls
-
- def __init__( self, savelist=False ):
- self.parseAction = list()
- self.failAction = None
- #~ self.name = "<unknown>" # don't define self.name, let subclasses try/except upcall
- self.strRepr = None
- self.resultsName = None
- self.saveAsList = savelist
- self.skipWhitespace = True
- self.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
- self.copyDefaultWhiteChars = True
- self.mayReturnEmpty = False # used when checking for left-recursion
- self.keepTabs = False
- self.ignoreExprs = list()
- self.debug = False
- self.streamlined = False
- self.mayIndexError = True # used to optimize exception handling for subclasses that don't advance parse index
- self.errmsg = ""
- self.modalResults = True # used to mark results names as modal (report only last) or cumulative (list all)
- self.debugActions = ( None, None, None ) #custom debug actions
- self.re = None
- self.callPreparse = True # used to avoid redundant calls to preParse
- self.callDuringTry = False
-
- def copy( self ):
- """
- Make a copy of this C{ParserElement}. Useful for defining different parse actions
- for the same parsing pattern, using copies of the original parse element.
-
- Example::
- integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
- integerK = integer.copy().addParseAction(lambda toks: toks[0]*1024) + Suppress("K")
- integerM = integer.copy().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M")
-
- print(OneOrMore(integerK | integerM | integer).parseString("5K 100 640K 256M"))
- prints::
- [5120, 100, 655360, 268435456]
- Equivalent form of C{expr.copy()} is just C{expr()}::
- integerM = integer().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M")
- """
- cpy = copy.copy( self )
- cpy.parseAction = self.parseAction[:]
- cpy.ignoreExprs = self.ignoreExprs[:]
- if self.copyDefaultWhiteChars:
- cpy.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
- return cpy
-
- def setName( self, name ):
- """
- Define name for this expression, makes debugging and exception messages clearer.
-
- Example::
- Word(nums).parseString("ABC") # -> Exception: Expected W:(0123...) (at char 0), (line:1, col:1)
- Word(nums).setName("integer").parseString("ABC") # -> Exception: Expected integer (at char 0), (line:1, col:1)
- """
- self.name = name
- self.errmsg = "Expected " + self.name
- if hasattr(self,"exception"):
- self.exception.msg = self.errmsg
- return self
-
- def setResultsName( self, name, listAllMatches=False ):
- """
- Define name for referencing matching tokens as a nested attribute
- of the returned parse results.
- NOTE: this returns a *copy* of the original C{ParserElement} object;
- this is so that the client can define a basic element, such as an
- integer, and reference it in multiple places with different names.
-
- You can also set results names using the abbreviated syntax,
- C{expr("name")} in place of C{expr.setResultsName("name")} -
- see L{I{__call__}<__call__>}.
-
- Example::
- date_str = (integer.setResultsName("year") + '/'
- + integer.setResultsName("month") + '/'
- + integer.setResultsName("day"))
-
- # equivalent form:
- date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
- """
- newself = self.copy()
- if name.endswith("*"):
- name = name[:-1]
- listAllMatches=True
- newself.resultsName = name
- newself.modalResults = not listAllMatches
- return newself
-
- def setBreak(self,breakFlag = True):
- """Method to invoke the Python pdb debugger when this element is
- about to be parsed. Set C{breakFlag} to True to enable, False to
- disable.
- """
- if breakFlag:
- _parseMethod = self._parse
- def breaker(instring, loc, doActions=True, callPreParse=True):
- import pdb
- pdb.set_trace()
- return _parseMethod( instring, loc, doActions, callPreParse )
- breaker._originalParseMethod = _parseMethod
- self._parse = breaker
- else:
- if hasattr(self._parse,"_originalParseMethod"):
- self._parse = self._parse._originalParseMethod
- return self
-
- def setParseAction( self, *fns, **kwargs ):
- """
- Define one or more actions to perform when successfully matching parse element definition.
- Parse action fn is a callable method with 0-3 arguments, called as C{fn(s,loc,toks)},
- C{fn(loc,toks)}, C{fn(toks)}, or just C{fn()}, where:
- - s = the original string being parsed (see note below)
- - loc = the location of the matching substring
- - toks = a list of the matched tokens, packaged as a C{L{ParseResults}} object
- If the functions in fns modify the tokens, they can return them as the return
- value from fn, and the modified list of tokens will replace the original.
- Otherwise, fn does not need to return any value.
-
- Optional keyword arguments:
- - callDuringTry = (default=C{False}) indicate if parse action should be run during lookaheads and alternate testing
-
- Note: the default parsing behavior is to expand tabs in the input string
- before starting the parsing process. See L{I{parseString}<parseString>} for more information
- on parsing strings containing C{<TAB>}s, and suggested methods to maintain a
- consistent view of the parsed string, the parse location, and line and column
- positions within the parsed string.
-
- Example::
- integer = Word(nums)
- date_str = integer + '/' + integer + '/' + integer
-
- date_str.parseString("1999/12/31") # -> ['1999', '/', '12', '/', '31']
-
- # use parse action to convert to ints at parse time
- integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
- date_str = integer + '/' + integer + '/' + integer
-
- # note that integer fields are now ints, not strings
- date_str.parseString("1999/12/31") # -> [1999, '/', 12, '/', 31]
- """
- self.parseAction = list(map(_trim_arity, list(fns)))
- self.callDuringTry = kwargs.get("callDuringTry", False)
- return self
-
- def addParseAction( self, *fns, **kwargs ):
- """
- Add one or more parse actions to expression's list of parse actions. See L{I{setParseAction}<setParseAction>}.
-
- See examples in L{I{copy}<copy>}.
- """
- self.parseAction += list(map(_trim_arity, list(fns)))
- self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False)
- return self
-
- def addCondition(self, *fns, **kwargs):
- """Add a boolean predicate function to expression's list of parse actions. See
- L{I{setParseAction}<setParseAction>} for function call signatures. Unlike C{setParseAction},
- functions passed to C{addCondition} need to return boolean success/fail of the condition.
-
- Optional keyword arguments:
- - message = define a custom message to be used in the raised exception
- - fatal = if True, will raise ParseFatalException to stop parsing immediately; otherwise will raise ParseException
-
- Example::
- integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
- year_int = integer.copy()
- year_int.addCondition(lambda toks: toks[0] >= 2000, message="Only support years 2000 and later")
- date_str = year_int + '/' + integer + '/' + integer
-
- result = date_str.parseString("1999/12/31") # -> Exception: Only support years 2000 and later (at char 0), (line:1, col:1)
- """
- msg = kwargs.get("message", "failed user-defined condition")
- exc_type = ParseFatalException if kwargs.get("fatal", False) else ParseException
- for fn in fns:
- def pa(s,l,t):
- if not bool(_trim_arity(fn)(s,l,t)):
- raise exc_type(s,l,msg)
- self.parseAction.append(pa)
- self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False)
- return self
-
- def setFailAction( self, fn ):
- """Define action to perform if parsing fails at this expression.
- Fail acton fn is a callable function that takes the arguments
- C{fn(s,loc,expr,err)} where:
- - s = string being parsed
- - loc = location where expression match was attempted and failed
- - expr = the parse expression that failed
- - err = the exception thrown
- The function returns no value. It may throw C{L{ParseFatalException}}
- if it is desired to stop parsing immediately."""
- self.failAction = fn
- return self
-
- def _skipIgnorables( self, instring, loc ):
- exprsFound = True
- while exprsFound:
- exprsFound = False
- for e in self.ignoreExprs:
- try:
- while 1:
- loc,dummy = e._parse( instring, loc )
- exprsFound = True
- except ParseException:
- pass
- return loc
-
- def preParse( self, instring, loc ):
- if self.ignoreExprs:
- loc = self._skipIgnorables( instring, loc )
-
- if self.skipWhitespace:
- wt = self.whiteChars
- instrlen = len(instring)
- while loc < instrlen and instring[loc] in wt:
- loc += 1
-
- return loc
-
- def parseImpl( self, instring, loc, doActions=True ):
- return loc, []
-
- def postParse( self, instring, loc, tokenlist ):
- return tokenlist
-
- #~ @profile
- def _parseNoCache( self, instring, loc, doActions=True, callPreParse=True ):
- debugging = ( self.debug ) #and doActions )
-
- if debugging or self.failAction:
- #~ print ("Match",self,"at loc",loc,"(%d,%d)" % ( lineno(loc,instring), col(loc,instring) ))
- if (self.debugActions[0] ):
- self.debugActions[0]( instring, loc, self )
- if callPreParse and self.callPreparse:
- preloc = self.preParse( instring, loc )
- else:
- preloc = loc
- tokensStart = preloc
- try:
- try:
- loc,tokens = self.parseImpl( instring, preloc, doActions )
- except IndexError:
- raise ParseException( instring, len(instring), self.errmsg, self )
- except ParseBaseException as err:
- #~ print ("Exception raised:", err)
- if self.debugActions[2]:
- self.debugActions[2]( instring, tokensStart, self, err )
- if self.failAction:
- self.failAction( instring, tokensStart, self, err )
- raise
- else:
- if callPreParse and self.callPreparse:
- preloc = self.preParse( instring, loc )
- else:
- preloc = loc
- tokensStart = preloc
- if self.mayIndexError or preloc >= len(instring):
- try:
- loc,tokens = self.parseImpl( instring, preloc, doActions )
- except IndexError:
- raise ParseException( instring, len(instring), self.errmsg, self )
- else:
- loc,tokens = self.parseImpl( instring, preloc, doActions )
-
- tokens = self.postParse( instring, loc, tokens )
-
- retTokens = ParseResults( tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults )
- if self.parseAction and (doActions or self.callDuringTry):
- if debugging:
- try:
- for fn in self.parseAction:
- tokens = fn( instring, tokensStart, retTokens )
- if tokens is not None:
- retTokens = ParseResults( tokens,
- self.resultsName,
- asList=self.saveAsList and isinstance(tokens,(ParseResults,list)),
- modal=self.modalResults )
- except ParseBaseException as err:
- #~ print "Exception raised in user parse action:", err
- if (self.debugActions[2] ):
- self.debugActions[2]( instring, tokensStart, self, err )
- raise
- else:
- for fn in self.parseAction:
- tokens = fn( instring, tokensStart, retTokens )
- if tokens is not None:
- retTokens = ParseResults( tokens,
- self.resultsName,
- asList=self.saveAsList and isinstance(tokens,(ParseResults,list)),
- modal=self.modalResults )
- if debugging:
- #~ print ("Matched",self,"->",retTokens.asList())
- if (self.debugActions[1] ):
- self.debugActions[1]( instring, tokensStart, loc, self, retTokens )
-
- return loc, retTokens
-
- def tryParse( self, instring, loc ):
- try:
- return self._parse( instring, loc, doActions=False )[0]
- except ParseFatalException:
- raise ParseException( instring, loc, self.errmsg, self)
-
- def canParseNext(self, instring, loc):
- try:
- self.tryParse(instring, loc)
- except (ParseException, IndexError):
- return False
- else:
- return True
-
- class _UnboundedCache(object):
- def __init__(self):
- cache = {}
- self.not_in_cache = not_in_cache = object()
-
- def get(self, key):
- return cache.get(key, not_in_cache)
-
- def set(self, key, value):
- cache[key] = value
-
- def clear(self):
- cache.clear()
-
- def cache_len(self):
- return len(cache)
-
- self.get = types.MethodType(get, self)
- self.set = types.MethodType(set, self)
- self.clear = types.MethodType(clear, self)
- self.__len__ = types.MethodType(cache_len, self)
-
- if _OrderedDict is not None:
- class _FifoCache(object):
- def __init__(self, size):
- self.not_in_cache = not_in_cache = object()
-
- cache = _OrderedDict()
-
- def get(self, key):
- return cache.get(key, not_in_cache)
-
- def set(self, key, value):
- cache[key] = value
- while len(cache) > size:
- try:
- cache.popitem(False)
- except KeyError:
- pass
-
- def clear(self):
- cache.clear()
-
- def cache_len(self):
- return len(cache)
-
- self.get = types.MethodType(get, self)
- self.set = types.MethodType(set, self)
- self.clear = types.MethodType(clear, self)
- self.__len__ = types.MethodType(cache_len, self)
-
- else:
- class _FifoCache(object):
- def __init__(self, size):
- self.not_in_cache = not_in_cache = object()
-
- cache = {}
- key_fifo = collections.deque([], size)
-
- def get(self, key):
- return cache.get(key, not_in_cache)
-
- def set(self, key, value):
- cache[key] = value
- while len(key_fifo) > size:
- cache.pop(key_fifo.popleft(), None)
- key_fifo.append(key)
-
- def clear(self):
- cache.clear()
- key_fifo.clear()
-
- def cache_len(self):
- return len(cache)
-
- self.get = types.MethodType(get, self)
- self.set = types.MethodType(set, self)
- self.clear = types.MethodType(clear, self)
- self.__len__ = types.MethodType(cache_len, self)
-
- # argument cache for optimizing repeated calls when backtracking through recursive expressions
- packrat_cache = {} # this is set later by enabledPackrat(); this is here so that resetCache() doesn't fail
- packrat_cache_lock = RLock()
- packrat_cache_stats = [0, 0]
-
- # this method gets repeatedly called during backtracking with the same arguments -
- # we can cache these arguments and save ourselves the trouble of re-parsing the contained expression
- def _parseCache( self, instring, loc, doActions=True, callPreParse=True ):
- HIT, MISS = 0, 1
- lookup = (self, instring, loc, callPreParse, doActions)
- with ParserElement.packrat_cache_lock:
- cache = ParserElement.packrat_cache
- value = cache.get(lookup)
- if value is cache.not_in_cache:
- ParserElement.packrat_cache_stats[MISS] += 1
- try:
- value = self._parseNoCache(instring, loc, doActions, callPreParse)
- except ParseBaseException as pe:
- # cache a copy of the exception, without the traceback
- cache.set(lookup, pe.__class__(*pe.args))
- raise
- else:
- cache.set(lookup, (value[0], value[1].copy()))
- return value
- else:
- ParserElement.packrat_cache_stats[HIT] += 1
- if isinstance(value, Exception):
- raise value
- return (value[0], value[1].copy())
-
- _parse = _parseNoCache
-
- @staticmethod
- def resetCache():
- ParserElement.packrat_cache.clear()
- ParserElement.packrat_cache_stats[:] = [0] * len(ParserElement.packrat_cache_stats)
-
- _packratEnabled = False
- @staticmethod
- def enablePackrat(cache_size_limit=128):
- """Enables "packrat" parsing, which adds memoizing to the parsing logic.
- Repeated parse attempts at the same string location (which happens
- often in many complex grammars) can immediately return a cached value,
- instead of re-executing parsing/validating code. Memoizing is done of
- both valid results and parsing exceptions.
-
- Parameters:
- - cache_size_limit - (default=C{128}) - if an integer value is provided
- will limit the size of the packrat cache; if None is passed, then
- the cache size will be unbounded; if 0 is passed, the cache will
- be effectively disabled.
-
- This speedup may break existing programs that use parse actions that
- have side-effects. For this reason, packrat parsing is disabled when
- you first import pyparsing. To activate the packrat feature, your
- program must call the class method C{ParserElement.enablePackrat()}. If
- your program uses C{psyco} to "compile as you go", you must call
- C{enablePackrat} before calling C{psyco.full()}. If you do not do this,
- Python will crash. For best results, call C{enablePackrat()} immediately
- after importing pyparsing.
-
- Example::
- import pyparsing
- pyparsing.ParserElement.enablePackrat()
- """
- if not ParserElement._packratEnabled:
- ParserElement._packratEnabled = True
- if cache_size_limit is None:
- ParserElement.packrat_cache = ParserElement._UnboundedCache()
- else:
- ParserElement.packrat_cache = ParserElement._FifoCache(cache_size_limit)
- ParserElement._parse = ParserElement._parseCache
-
- def parseString( self, instring, parseAll=False ):
- """
- Execute the parse expression with the given string.
- This is the main interface to the client code, once the complete
- expression has been built.
-
- If you want the grammar to require that the entire input string be
- successfully parsed, then set C{parseAll} to True (equivalent to ending
- the grammar with C{L{StringEnd()}}).
-
- Note: C{parseString} implicitly calls C{expandtabs()} on the input string,
- in order to report proper column numbers in parse actions.
- If the input string contains tabs and
- the grammar uses parse actions that use the C{loc} argument to index into the
- string being parsed, you can ensure you have a consistent view of the input
- string by:
- - calling C{parseWithTabs} on your grammar before calling C{parseString}
- (see L{I{parseWithTabs}<parseWithTabs>})
- - define your parse action using the full C{(s,loc,toks)} signature, and
- reference the input string using the parse action's C{s} argument
- - explictly expand the tabs in your input string before calling
- C{parseString}
-
- Example::
- Word('a').parseString('aaaaabaaa') # -> ['aaaaa']
- Word('a').parseString('aaaaabaaa', parseAll=True) # -> Exception: Expected end of text
- """
- ParserElement.resetCache()
- if not self.streamlined:
- self.streamline()
- #~ self.saveAsList = True
- for e in self.ignoreExprs:
- e.streamline()
- if not self.keepTabs:
- instring = instring.expandtabs()
- try:
- loc, tokens = self._parse( instring, 0 )
- if parseAll:
- loc = self.preParse( instring, loc )
- se = Empty() + StringEnd()
- se._parse( instring, loc )
- except ParseBaseException as exc:
- if ParserElement.verbose_stacktrace:
- raise
- else:
- # catch and re-raise exception from here, clears out pyparsing internal stack trace
- raise exc
- else:
- return tokens
-
- def scanString( self, instring, maxMatches=_MAX_INT, overlap=False ):
- """
- Scan the input string for expression matches. Each match will return the
- matching tokens, start location, and end location. May be called with optional
- C{maxMatches} argument, to clip scanning after 'n' matches are found. If
- C{overlap} is specified, then overlapping matches will be reported.
-
- Note that the start and end locations are reported relative to the string
- being parsed. See L{I{parseString}<parseString>} for more information on parsing
- strings with embedded tabs.
-
- Example::
- source = "sldjf123lsdjjkf345sldkjf879lkjsfd987"
- print(source)
- for tokens,start,end in Word(alphas).scanString(source):
- print(' '*start + '^'*(end-start))
- print(' '*start + tokens[0])
-
- prints::
-
- sldjf123lsdjjkf345sldkjf879lkjsfd987
- ^^^^^
- sldjf
- ^^^^^^^
- lsdjjkf
- ^^^^^^
- sldkjf
- ^^^^^^
- lkjsfd
- """
- if not self.streamlined:
- self.streamline()
- for e in self.ignoreExprs:
- e.streamline()
-
- if not self.keepTabs:
- instring = _ustr(instring).expandtabs()
- instrlen = len(instring)
- loc = 0
- preparseFn = self.preParse
- parseFn = self._parse
- ParserElement.resetCache()
- matches = 0
- try:
- while loc <= instrlen and matches < maxMatches:
- try:
- preloc = preparseFn( instring, loc )
- nextLoc,tokens = parseFn( instring, preloc, callPreParse=False )
- except ParseException:
- loc = preloc+1
- else:
- if nextLoc > loc:
- matches += 1
- yield tokens, preloc, nextLoc
- if overlap:
- nextloc = preparseFn( instring, loc )
- if nextloc > loc:
- loc = nextLoc
- else:
- loc += 1
- else:
- loc = nextLoc
- else:
- loc = preloc+1
- except ParseBaseException as exc:
- if ParserElement.verbose_stacktrace:
- raise
- else:
- # catch and re-raise exception from here, clears out pyparsing internal stack trace
- raise exc
-
- def transformString( self, instring ):
- """
- Extension to C{L{scanString}}, to modify matching text with modified tokens that may
- be returned from a parse action. To use C{transformString}, define a grammar and
- attach a parse action to it that modifies the returned token list.
- Invoking C{transformString()} on a target string will then scan for matches,
- and replace the matched text patterns according to the logic in the parse
- action. C{transformString()} returns the resulting transformed string.
-
- Example::
- wd = Word(alphas)
- wd.setParseAction(lambda toks: toks[0].title())
-
- print(wd.transformString("now is the winter of our discontent made glorious summer by this sun of york."))
- Prints::
- Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York.
- """
- out = []
- lastE = 0
- # force preservation of <TAB>s, to minimize unwanted transformation of string, and to
- # keep string locs straight between transformString and scanString
- self.keepTabs = True
- try:
- for t,s,e in self.scanString( instring ):
- out.append( instring[lastE:s] )
- if t:
- if isinstance(t,ParseResults):
- out += t.asList()
- elif isinstance(t,list):
- out += t
- else:
- out.append(t)
- lastE = e
- out.append(instring[lastE:])
- out = [o for o in out if o]
- return "".join(map(_ustr,_flatten(out)))
- except ParseBaseException as exc:
- if ParserElement.verbose_stacktrace:
- raise
- else:
- # catch and re-raise exception from here, clears out pyparsing internal stack trace
- raise exc
-
- def searchString( self, instring, maxMatches=_MAX_INT ):
- """
- Another extension to C{L{scanString}}, simplifying the access to the tokens found
- to match the given parse expression. May be called with optional
- C{maxMatches} argument, to clip searching after 'n' matches are found.
-
- Example::
- # a capitalized word starts with an uppercase letter, followed by zero or more lowercase letters
- cap_word = Word(alphas.upper(), alphas.lower())
-
- print(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity"))
-
- # the sum() builtin can be used to merge results into a single ParseResults object
- print(sum(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity")))
- prints::
- [['More'], ['Iron'], ['Lead'], ['Gold'], ['I'], ['Electricity']]
- ['More', 'Iron', 'Lead', 'Gold', 'I', 'Electricity']
- """
- try:
- return ParseResults([ t for t,s,e in self.scanString( instring, maxMatches ) ])
- except ParseBaseException as exc:
- if ParserElement.verbose_stacktrace:
- raise
- else:
- # catch and re-raise exception from here, clears out pyparsing internal stack trace
- raise exc
-
- def split(self, instring, maxsplit=_MAX_INT, includeSeparators=False):
- """
- Generator method to split a string using the given expression as a separator.
- May be called with optional C{maxsplit} argument, to limit the number of splits;
- and the optional C{includeSeparators} argument (default=C{False}), if the separating
- matching text should be included in the split results.
-
- Example::
- punc = oneOf(list(".,;:/-!?"))
- print(list(punc.split("This, this?, this sentence, is badly punctuated!")))
- prints::
- ['This', ' this', '', ' this sentence', ' is badly punctuated', '']
- """
- splits = 0
- last = 0
- for t,s,e in self.scanString(instring, maxMatches=maxsplit):
- yield instring[last:s]
- if includeSeparators:
- yield t[0]
- last = e
- yield instring[last:]
-
- def __add__(self, other ):
- """
- Implementation of + operator - returns C{L{And}}. Adding strings to a ParserElement
- converts them to L{Literal}s by default.
-
- Example::
- greet = Word(alphas) + "," + Word(alphas) + "!"
- hello = "Hello, World!"
- print (hello, "->", greet.parseString(hello))
- Prints::
- Hello, World! -> ['Hello', ',', 'World', '!']
- """
- if isinstance( other, basestring ):
- other = ParserElement._literalStringClass( other )
- if not isinstance( other, ParserElement ):
- warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
- SyntaxWarning, stacklevel=2)
- return None
- return And( [ self, other ] )
-
- def __radd__(self, other ):
- """
- Implementation of + operator when left operand is not a C{L{ParserElement}}
- """
- if isinstance( other, basestring ):
- other = ParserElement._literalStringClass( other )
- if not isinstance( other, ParserElement ):
- warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
- SyntaxWarning, stacklevel=2)
- return None
- return other + self
-
- def __sub__(self, other):
- """
- Implementation of - operator, returns C{L{And}} with error stop
- """
- if isinstance( other, basestring ):
- other = ParserElement._literalStringClass( other )
- if not isinstance( other, ParserElement ):
- warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
- SyntaxWarning, stacklevel=2)
- return None
- return self + And._ErrorStop() + other
-
- def __rsub__(self, other ):
- """
- Implementation of - operator when left operand is not a C{L{ParserElement}}
- """
- if isinstance( other, basestring ):
- other = ParserElement._literalStringClass( other )
- if not isinstance( other, ParserElement ):
- warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
- SyntaxWarning, stacklevel=2)
- return None
- return other - self
-
- def __mul__(self,other):
- """
- Implementation of * operator, allows use of C{expr * 3} in place of
- C{expr + expr + expr}. Expressions may also me multiplied by a 2-integer
- tuple, similar to C{{min,max}} multipliers in regular expressions. Tuples
- may also include C{None} as in:
- - C{expr*(n,None)} or C{expr*(n,)} is equivalent
- to C{expr*n + L{ZeroOrMore}(expr)}
- (read as "at least n instances of C{expr}")
- - C{expr*(None,n)} is equivalent to C{expr*(0,n)}
- (read as "0 to n instances of C{expr}")
- - C{expr*(None,None)} is equivalent to C{L{ZeroOrMore}(expr)}
- - C{expr*(1,None)} is equivalent to C{L{OneOrMore}(expr)}
-
- Note that C{expr*(None,n)} does not raise an exception if
- more than n exprs exist in the input stream; that is,
- C{expr*(None,n)} does not enforce a maximum number of expr
- occurrences. If this behavior is desired, then write
- C{expr*(None,n) + ~expr}
- """
- if isinstance(other,int):
- minElements, optElements = other,0
- elif isinstance(other,tuple):
- other = (other + (None, None))[:2]
- if other[0] is None:
- other = (0, other[1])
- if isinstance(other[0],int) and other[1] is None:
- if other[0] == 0:
- return ZeroOrMore(self)
- if other[0] == 1:
- return OneOrMore(self)
- else:
- return self*other[0] + ZeroOrMore(self)
- elif isinstance(other[0],int) and isinstance(other[1],int):
- minElements, optElements = other
- optElements -= minElements
- else:
- raise TypeError("cannot multiply 'ParserElement' and ('%s','%s') objects", type(other[0]),type(other[1]))
- else:
- raise TypeError("cannot multiply 'ParserElement' and '%s' objects", type(other))
-
- if minElements < 0:
- raise ValueError("cannot multiply ParserElement by negative value")
- if optElements < 0:
- raise ValueError("second tuple value must be greater or equal to first tuple value")
- if minElements == optElements == 0:
- raise ValueError("cannot multiply ParserElement by 0 or (0,0)")
-
- if (optElements):
- def makeOptionalList(n):
- if n>1:
- return Optional(self + makeOptionalList(n-1))
- else:
- return Optional(self)
- if minElements:
- if minElements == 1:
- ret = self + makeOptionalList(optElements)
- else:
- ret = And([self]*minElements) + makeOptionalList(optElements)
- else:
- ret = makeOptionalList(optElements)
- else:
- if minElements == 1:
- ret = self
- else:
- ret = And([self]*minElements)
- return ret
-
- def __rmul__(self, other):
- return self.__mul__(other)
-
- def __or__(self, other ):
- """
- Implementation of | operator - returns C{L{MatchFirst}}
- """
- if isinstance( other, basestring ):
- other = ParserElement._literalStringClass( other )
- if not isinstance( other, ParserElement ):
- warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
- SyntaxWarning, stacklevel=2)
- return None
- return MatchFirst( [ self, other ] )
-
- def __ror__(self, other ):
- """
- Implementation of | operator when left operand is not a C{L{ParserElement}}
- """
- if isinstance( other, basestring ):
- other = ParserElement._literalStringClass( other )
- if not isinstance( other, ParserElement ):
- warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
- SyntaxWarning, stacklevel=2)
- return None
- return other | self
-
- def __xor__(self, other ):
- """
- Implementation of ^ operator - returns C{L{Or}}
- """
- if isinstance( other, basestring ):
- other = ParserElement._literalStringClass( other )
- if not isinstance( other, ParserElement ):
- warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
- SyntaxWarning, stacklevel=2)
- return None
- return Or( [ self, other ] )
-
- def __rxor__(self, other ):
- """
- Implementation of ^ operator when left operand is not a C{L{ParserElement}}
- """
- if isinstance( other, basestring ):
- other = ParserElement._literalStringClass( other )
- if not isinstance( other, ParserElement ):
- warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
- SyntaxWarning, stacklevel=2)
- return None
- return other ^ self
-
- def __and__(self, other ):
- """
- Implementation of & operator - returns C{L{Each}}
- """
- if isinstance( other, basestring ):
- other = ParserElement._literalStringClass( other )
- if not isinstance( other, ParserElement ):
- warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
- SyntaxWarning, stacklevel=2)
- return None
- return Each( [ self, other ] )
-
- def __rand__(self, other ):
- """
- Implementation of & operator when left operand is not a C{L{ParserElement}}
- """
- if isinstance( other, basestring ):
- other = ParserElement._literalStringClass( other )
- if not isinstance( other, ParserElement ):
- warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
- SyntaxWarning, stacklevel=2)
- return None
- return other & self
-
- def __invert__( self ):
- """
- Implementation of ~ operator - returns C{L{NotAny}}
- """
- return NotAny( self )
-
- def __call__(self, name=None):
- """
- Shortcut for C{L{setResultsName}}, with C{listAllMatches=False}.
-
- If C{name} is given with a trailing C{'*'} character, then C{listAllMatches} will be
- passed as C{True}.
-
- If C{name} is omitted, same as calling C{L{copy}}.
-
- Example::
- # these are equivalent
- userdata = Word(alphas).setResultsName("name") + Word(nums+"-").setResultsName("socsecno")
- userdata = Word(alphas)("name") + Word(nums+"-")("socsecno")
- """
- if name is not None:
- return self.setResultsName(name)
- else:
- return self.copy()
-
- def suppress( self ):
- """
- Suppresses the output of this C{ParserElement}; useful to keep punctuation from
- cluttering up returned output.
- """
- return Suppress( self )
-
- def leaveWhitespace( self ):
- """
- Disables the skipping of whitespace before matching the characters in the
- C{ParserElement}'s defined pattern. This is normally only used internally by
- the pyparsing module, but may be needed in some whitespace-sensitive grammars.
- """
- self.skipWhitespace = False
- return self
-
- def setWhitespaceChars( self, chars ):
- """
- Overrides the default whitespace chars
- """
- self.skipWhitespace = True
- self.whiteChars = chars
- self.copyDefaultWhiteChars = False
- return self
-
- def parseWithTabs( self ):
- """
- Overrides default behavior to expand C{<TAB>}s to spaces before parsing the input string.
- Must be called before C{parseString} when the input grammar contains elements that
- match C{<TAB>} characters.
- """
- self.keepTabs = True
- return self
-
- def ignore( self, other ):
- """
- Define expression to be ignored (e.g., comments) while doing pattern
- matching; may be called repeatedly, to define multiple comment or other
- ignorable patterns.
-
- Example::
- patt = OneOrMore(Word(alphas))
- patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj']
-
- patt.ignore(cStyleComment)
- patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj', 'lskjd']
- """
- if isinstance(other, basestring):
- other = Suppress(other)
-
- if isinstance( other, Suppress ):
- if other not in self.ignoreExprs:
- self.ignoreExprs.append(other)
- else:
- self.ignoreExprs.append( Suppress( other.copy() ) )
- return self
-
- def setDebugActions( self, startAction, successAction, exceptionAction ):
- """
- Enable display of debugging messages while doing pattern matching.
- """
- self.debugActions = (startAction or _defaultStartDebugAction,
- successAction or _defaultSuccessDebugAction,
- exceptionAction or _defaultExceptionDebugAction)
- self.debug = True
- return self
-
- def setDebug( self, flag=True ):
- """
- Enable display of debugging messages while doing pattern matching.
- Set C{flag} to True to enable, False to disable.
-
- Example::
- wd = Word(alphas).setName("alphaword")
- integer = Word(nums).setName("numword")
- term = wd | integer
-
- # turn on debugging for wd
- wd.setDebug()
-
- OneOrMore(term).parseString("abc 123 xyz 890")
-
- prints::
- Match alphaword at loc 0(1,1)
- Matched alphaword -> ['abc']
- Match alphaword at loc 3(1,4)
- Exception raised:Expected alphaword (at char 4), (line:1, col:5)
- Match alphaword at loc 7(1,8)
- Matched alphaword -> ['xyz']
- Match alphaword at loc 11(1,12)
- Exception raised:Expected alphaword (at char 12), (line:1, col:13)
- Match alphaword at loc 15(1,16)
- Exception raised:Expected alphaword (at char 15), (line:1, col:16)
-
- The output shown is that produced by the default debug actions - custom debug actions can be
- specified using L{setDebugActions}. Prior to attempting
- to match the C{wd} expression, the debugging message C{"Match <exprname> at loc <n>(<line>,<col>)"}
- is shown. Then if the parse succeeds, a C{"Matched"} message is shown, or an C{"Exception raised"}
- message is shown. Also note the use of L{setName} to assign a human-readable name to the expression,
- which makes debugging and exception messages easier to understand - for instance, the default
- name created for the C{Word} expression without calling C{setName} is C{"W:(ABCD...)"}.
- """
- if flag:
- self.setDebugActions( _defaultStartDebugAction, _defaultSuccessDebugAction, _defaultExceptionDebugAction )
- else:
- self.debug = False
- return self
-
- def __str__( self ):
- return self.name
-
- def __repr__( self ):
- return _ustr(self)
-
- def streamline( self ):
- self.streamlined = True
- self.strRepr = None
- return self
-
- def checkRecursion( self, parseElementList ):
- pass
-
- def validate( self, validateTrace=[] ):
- """
- Check defined expressions for valid structure, check for infinite recursive definitions.
- """
- self.checkRecursion( [] )
-
- def parseFile( self, file_or_filename, parseAll=False ):
- """
- Execute the parse expression on the given file or filename.
- If a filename is specified (instead of a file object),
- the entire file is opened, read, and closed before parsing.
- """
- try:
- file_contents = file_or_filename.read()
- except AttributeError:
- with open(file_or_filename, "r") as f:
- file_contents = f.read()
- try:
- return self.parseString(file_contents, parseAll)
- except ParseBaseException as exc:
- if ParserElement.verbose_stacktrace:
- raise
- else:
- # catch and re-raise exception from here, clears out pyparsing internal stack trace
- raise exc
-
- def __eq__(self,other):
- if isinstance(other, ParserElement):
- return self is other or vars(self) == vars(other)
- elif isinstance(other, basestring):
- return self.matches(other)
- else:
- return super(ParserElement,self)==other
-
- def __ne__(self,other):
- return not (self == other)
-
- def __hash__(self):
- return hash(id(self))
-
- def __req__(self,other):
- return self == other
-
- def __rne__(self,other):
- return not (self == other)
-
- def matches(self, testString, parseAll=True):
- """
- Method for quick testing of a parser against a test string. Good for simple
- inline microtests of sub expressions while building up larger parser.
-
- Parameters:
- - testString - to test against this expression for a match
- - parseAll - (default=C{True}) - flag to pass to C{L{parseString}} when running tests
-
- Example::
- expr = Word(nums)
- assert expr.matches("100")
- """
- try:
- self.parseString(_ustr(testString), parseAll=parseAll)
- return True
- except ParseBaseException:
- return False
-
- def runTests(self, tests, parseAll=True, comment='#', fullDump=True, printResults=True, failureTests=False):
- """
- Execute the parse expression on a series of test strings, showing each
- test, the parsed results or where the parse failed. Quick and easy way to
- run a parse expression against a list of sample strings.
-
- Parameters:
- - tests - a list of separate test strings, or a multiline string of test strings
- - parseAll - (default=C{True}) - flag to pass to C{L{parseString}} when running tests
- - comment - (default=C{'#'}) - expression for indicating embedded comments in the test
- string; pass None to disable comment filtering
- - fullDump - (default=C{True}) - dump results as list followed by results names in nested outline;
- if False, only dump nested list
- - printResults - (default=C{True}) prints test output to stdout
- - failureTests - (default=C{False}) indicates if these tests are expected to fail parsing
-
- Returns: a (success, results) tuple, where success indicates that all tests succeeded
- (or failed if C{failureTests} is True), and the results contain a list of lines of each
- test's output
-
- Example::
- number_expr = pyparsing_common.number.copy()
-
- result = number_expr.runTests('''
- # unsigned integer
- 100
- # negative integer
- -100
- # float with scientific notation
- 6.02e23
- # integer with scientific notation
- 1e-12
- ''')
- print("Success" if result[0] else "Failed!")
-
- result = number_expr.runTests('''
- # stray character
- 100Z
- # missing leading digit before '.'
- -.100
- # too many '.'
- 3.14.159
- ''', failureTests=True)
- print("Success" if result[0] else "Failed!")
- prints::
- # unsigned integer
- 100
- [100]
-
- # negative integer
- -100
- [-100]
-
- # float with scientific notation
- 6.02e23
- [6.02e+23]
-
- # integer with scientific notation
- 1e-12
- [1e-12]
-
- Success
-
- # stray character
- 100Z
- ^
- FAIL: Expected end of text (at char 3), (line:1, col:4)
-
- # missing leading digit before '.'
- -.100
- ^
- FAIL: Expected {real number with scientific notation | real number | signed integer} (at char 0), (line:1, col:1)
-
- # too many '.'
- 3.14.159
- ^
- FAIL: Expected end of text (at char 4), (line:1, col:5)
-
- Success
-
- Each test string must be on a single line. If you want to test a string that spans multiple
- lines, create a test like this::
-
- expr.runTest(r"this is a test\\n of strings that spans \\n 3 lines")
-
- (Note that this is a raw string literal, you must include the leading 'r'.)
- """
- if isinstance(tests, basestring):
- tests = list(map(str.strip, tests.rstrip().splitlines()))
- if isinstance(comment, basestring):
- comment = Literal(comment)
- allResults = []
- comments = []
- success = True
- for t in tests:
- if comment is not None and comment.matches(t, False) or comments and not t:
- comments.append(t)
- continue
- if not t:
- continue
- out = ['\n'.join(comments), t]
- comments = []
- try:
- t = t.replace(r'\n','\n')
- result = self.parseString(t, parseAll=parseAll)
- out.append(result.dump(full=fullDump))
- success = success and not failureTests
- except ParseBaseException as pe:
- fatal = "(FATAL)" if isinstance(pe, ParseFatalException) else ""
- if '\n' in t:
- out.append(line(pe.loc, t))
- out.append(' '*(col(pe.loc,t)-1) + '^' + fatal)
- else:
- out.append(' '*pe.loc + '^' + fatal)
- out.append("FAIL: " + str(pe))
- success = success and failureTests
- result = pe
- except Exception as exc:
- out.append("FAIL-EXCEPTION: " + str(exc))
- success = success and failureTests
- result = exc
-
- if printResults:
- if fullDump:
- out.append('')
- print('\n'.join(out))
-
- allResults.append((t, result))
-
- return success, allResults
-
-
-class Token(ParserElement):
- """
- Abstract C{ParserElement} subclass, for defining atomic matching patterns.
- """
- def __init__( self ):
- super(Token,self).__init__( savelist=False )
-
-
-class Empty(Token):
- """
- An empty token, will always match.
- """
- def __init__( self ):
- super(Empty,self).__init__()
- self.name = "Empty"
- self.mayReturnEmpty = True
- self.mayIndexError = False
-
-
-class NoMatch(Token):
- """
- A token that will never match.
- """
- def __init__( self ):
- super(NoMatch,self).__init__()
- self.name = "NoMatch"
- self.mayReturnEmpty = True
- self.mayIndexError = False
- self.errmsg = "Unmatchable token"
-
- def parseImpl( self, instring, loc, doActions=True ):
- raise ParseException(instring, loc, self.errmsg, self)
-
-
-class Literal(Token):
- """
- Token to exactly match a specified string.
-
- Example::
- Literal('blah').parseString('blah') # -> ['blah']
- Literal('blah').parseString('blahfooblah') # -> ['blah']
- Literal('blah').parseString('bla') # -> Exception: Expected "blah"
-
- For case-insensitive matching, use L{CaselessLiteral}.
-
- For keyword matching (force word break before and after the matched string),
- use L{Keyword} or L{CaselessKeyword}.
- """
- def __init__( self, matchString ):
- super(Literal,self).__init__()
- self.match = matchString
- self.matchLen = len(matchString)
- try:
- self.firstMatchChar = matchString[0]
- except IndexError:
- warnings.warn("null string passed to Literal; use Empty() instead",
- SyntaxWarning, stacklevel=2)
- self.__class__ = Empty
- self.name = '"%s"' % _ustr(self.match)
- self.errmsg = "Expected " + self.name
- self.mayReturnEmpty = False
- self.mayIndexError = False
-
- # Performance tuning: this routine gets called a *lot*
- # if this is a single character match string and the first character matches,
- # short-circuit as quickly as possible, and avoid calling startswith
- #~ @profile
- def parseImpl( self, instring, loc, doActions=True ):
- if (instring[loc] == self.firstMatchChar and
- (self.matchLen==1 or instring.startswith(self.match,loc)) ):
- return loc+self.matchLen, self.match
- raise ParseException(instring, loc, self.errmsg, self)
-_L = Literal
-ParserElement._literalStringClass = Literal
-
-class Keyword(Token):
- """
- Token to exactly match a specified string as a keyword, that is, it must be
- immediately followed by a non-keyword character. Compare with C{L{Literal}}:
- - C{Literal("if")} will match the leading C{'if'} in C{'ifAndOnlyIf'}.
- - C{Keyword("if")} will not; it will only match the leading C{'if'} in C{'if x=1'}, or C{'if(y==2)'}
- Accepts two optional constructor arguments in addition to the keyword string:
- - C{identChars} is a string of characters that would be valid identifier characters,
- defaulting to all alphanumerics + "_" and "$"
- - C{caseless} allows case-insensitive matching, default is C{False}.
-
- Example::
- Keyword("start").parseString("start") # -> ['start']
- Keyword("start").parseString("starting") # -> Exception
-
- For case-insensitive matching, use L{CaselessKeyword}.
- """
- DEFAULT_KEYWORD_CHARS = alphanums+"_$"
-
- def __init__( self, matchString, identChars=None, caseless=False ):
- super(Keyword,self).__init__()
- if identChars is None:
- identChars = Keyword.DEFAULT_KEYWORD_CHARS
- self.match = matchString
- self.matchLen = len(matchString)
- try:
- self.firstMatchChar = matchString[0]
- except IndexError:
- warnings.warn("null string passed to Keyword; use Empty() instead",
- SyntaxWarning, stacklevel=2)
- self.name = '"%s"' % self.match
- self.errmsg = "Expected " + self.name
- self.mayReturnEmpty = False
- self.mayIndexError = False
- self.caseless = caseless
- if caseless:
- self.caselessmatch = matchString.upper()
- identChars = identChars.upper()
- self.identChars = set(identChars)
-
- def parseImpl( self, instring, loc, doActions=True ):
- if self.caseless:
- if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and
- (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) and
- (loc == 0 or instring[loc-1].upper() not in self.identChars) ):
- return loc+self.matchLen, self.match
- else:
- if (instring[loc] == self.firstMatchChar and
- (self.matchLen==1 or instring.startswith(self.match,loc)) and
- (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen] not in self.identChars) and
- (loc == 0 or instring[loc-1] not in self.identChars) ):
- return loc+self.matchLen, self.match
- raise ParseException(instring, loc, self.errmsg, self)
-
- def copy(self):
- c = super(Keyword,self).copy()
- c.identChars = Keyword.DEFAULT_KEYWORD_CHARS
- return c
-
- @staticmethod
- def setDefaultKeywordChars( chars ):
- """Overrides the default Keyword chars
- """
- Keyword.DEFAULT_KEYWORD_CHARS = chars
-
-class CaselessLiteral(Literal):
- """
- Token to match a specified string, ignoring case of letters.
- Note: the matched results will always be in the case of the given
- match string, NOT the case of the input text.
-
- Example::
- OneOrMore(CaselessLiteral("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD', 'CMD']
-
- (Contrast with example for L{CaselessKeyword}.)
- """
- def __init__( self, matchString ):
- super(CaselessLiteral,self).__init__( matchString.upper() )
- # Preserve the defining literal.
- self.returnString = matchString
- self.name = "'%s'" % self.returnString
- self.errmsg = "Expected " + self.name
-
- def parseImpl( self, instring, loc, doActions=True ):
- if instring[ loc:loc+self.matchLen ].upper() == self.match:
- return loc+self.matchLen, self.returnString
- raise ParseException(instring, loc, self.errmsg, self)
-
-class CaselessKeyword(Keyword):
- """
- Caseless version of L{Keyword}.
-
- Example::
- OneOrMore(CaselessKeyword("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD']
-
- (Contrast with example for L{CaselessLiteral}.)
- """
- def __init__( self, matchString, identChars=None ):
- super(CaselessKeyword,self).__init__( matchString, identChars, caseless=True )
-
- def parseImpl( self, instring, loc, doActions=True ):
- if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and
- (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) ):
- return loc+self.matchLen, self.match
- raise ParseException(instring, loc, self.errmsg, self)
-
-class CloseMatch(Token):
- """
- A variation on L{Literal} which matches "close" matches, that is,
- strings with at most 'n' mismatching characters. C{CloseMatch} takes parameters:
- - C{match_string} - string to be matched
- - C{maxMismatches} - (C{default=1}) maximum number of mismatches allowed to count as a match
-
- The results from a successful parse will contain the matched text from the input string and the following named results:
- - C{mismatches} - a list of the positions within the match_string where mismatches were found
- - C{original} - the original match_string used to compare against the input string
-
- If C{mismatches} is an empty list, then the match was an exact match.
-
- Example::
- patt = CloseMatch("ATCATCGAATGGA")
- patt.parseString("ATCATCGAAXGGA") # -> (['ATCATCGAAXGGA'], {'mismatches': [[9]], 'original': ['ATCATCGAATGGA']})
- patt.parseString("ATCAXCGAAXGGA") # -> Exception: Expected 'ATCATCGAATGGA' (with up to 1 mismatches) (at char 0), (line:1, col:1)
-
- # exact match
- patt.parseString("ATCATCGAATGGA") # -> (['ATCATCGAATGGA'], {'mismatches': [[]], 'original': ['ATCATCGAATGGA']})
-
- # close match allowing up to 2 mismatches
- patt = CloseMatch("ATCATCGAATGGA", maxMismatches=2)
- patt.parseString("ATCAXCGAAXGGA") # -> (['ATCAXCGAAXGGA'], {'mismatches': [[4, 9]], 'original': ['ATCATCGAATGGA']})
- """
- def __init__(self, match_string, maxMismatches=1):
- super(CloseMatch,self).__init__()
- self.name = match_string
- self.match_string = match_string
- self.maxMismatches = maxMismatches
- self.errmsg = "Expected %r (with up to %d mismatches)" % (self.match_string, self.maxMismatches)
- self.mayIndexError = False
- self.mayReturnEmpty = False
-
- def parseImpl( self, instring, loc, doActions=True ):
- start = loc
- instrlen = len(instring)
- maxloc = start + len(self.match_string)
-
- if maxloc <= instrlen:
- match_string = self.match_string
- match_stringloc = 0
- mismatches = []
- maxMismatches = self.maxMismatches
-
- for match_stringloc,s_m in enumerate(zip(instring[loc:maxloc], self.match_string)):
- src,mat = s_m
- if src != mat:
- mismatches.append(match_stringloc)
- if len(mismatches) > maxMismatches:
- break
- else:
- loc = match_stringloc + 1
- results = ParseResults([instring[start:loc]])
- results['original'] = self.match_string
- results['mismatches'] = mismatches
- return loc, results
-
- raise ParseException(instring, loc, self.errmsg, self)
-
-
-class Word(Token):
- """
- Token for matching words composed of allowed character sets.
- Defined with string containing all allowed initial characters,
- an optional string containing allowed body characters (if omitted,
- defaults to the initial character set), and an optional minimum,
- maximum, and/or exact length. The default value for C{min} is 1 (a
- minimum value < 1 is not valid); the default values for C{max} and C{exact}
- are 0, meaning no maximum or exact length restriction. An optional
- C{excludeChars} parameter can list characters that might be found in
- the input C{bodyChars} string; useful to define a word of all printables
- except for one or two characters, for instance.
-
- L{srange} is useful for defining custom character set strings for defining
- C{Word} expressions, using range notation from regular expression character sets.
-
- A common mistake is to use C{Word} to match a specific literal string, as in
- C{Word("Address")}. Remember that C{Word} uses the string argument to define
- I{sets} of matchable characters. This expression would match "Add", "AAA",
- "dAred", or any other word made up of the characters 'A', 'd', 'r', 'e', and 's'.
- To match an exact literal string, use L{Literal} or L{Keyword}.
-
- pyparsing includes helper strings for building Words:
- - L{alphas}
- - L{nums}
- - L{alphanums}
- - L{hexnums}
- - L{alphas8bit} (alphabetic characters in ASCII range 128-255 - accented, tilded, umlauted, etc.)
- - L{punc8bit} (non-alphabetic characters in ASCII range 128-255 - currency, symbols, superscripts, diacriticals, etc.)
- - L{printables} (any non-whitespace character)
-
- Example::
- # a word composed of digits
- integer = Word(nums) # equivalent to Word("0123456789") or Word(srange("0-9"))
-
- # a word with a leading capital, and zero or more lowercase
- capital_word = Word(alphas.upper(), alphas.lower())
-
- # hostnames are alphanumeric, with leading alpha, and '-'
- hostname = Word(alphas, alphanums+'-')
-
- # roman numeral (not a strict parser, accepts invalid mix of characters)
- roman = Word("IVXLCDM")
-
- # any string of non-whitespace characters, except for ','
- csv_value = Word(printables, excludeChars=",")
- """
- def __init__( self, initChars, bodyChars=None, min=1, max=0, exact=0, asKeyword=False, excludeChars=None ):
- super(Word,self).__init__()
- if excludeChars:
- initChars = ''.join(c for c in initChars if c not in excludeChars)
- if bodyChars:
- bodyChars = ''.join(c for c in bodyChars if c not in excludeChars)
- self.initCharsOrig = initChars
- self.initChars = set(initChars)
- if bodyChars :
- self.bodyCharsOrig = bodyChars
- self.bodyChars = set(bodyChars)
- else:
- self.bodyCharsOrig = initChars
- self.bodyChars = set(initChars)
-
- self.maxSpecified = max > 0
-
- if min < 1:
- raise ValueError("cannot specify a minimum length < 1; use Optional(Word()) if zero-length word is permitted")
-
- self.minLen = min
-
- if max > 0:
- self.maxLen = max
- else:
- self.maxLen = _MAX_INT
-
- if exact > 0:
- self.maxLen = exact
- self.minLen = exact
-
- self.name = _ustr(self)
- self.errmsg = "Expected " + self.name
- self.mayIndexError = False
- self.asKeyword = asKeyword
-
- if ' ' not in self.initCharsOrig+self.bodyCharsOrig and (min==1 and max==0 and exact==0):
- if self.bodyCharsOrig == self.initCharsOrig:
- self.reString = "[%s]+" % _escapeRegexRangeChars(self.initCharsOrig)
- elif len(self.initCharsOrig) == 1:
- self.reString = "%s[%s]*" % \
- (re.escape(self.initCharsOrig),
- _escapeRegexRangeChars(self.bodyCharsOrig),)
- else:
- self.reString = "[%s][%s]*" % \
- (_escapeRegexRangeChars(self.initCharsOrig),
- _escapeRegexRangeChars(self.bodyCharsOrig),)
- if self.asKeyword:
- self.reString = r"\b"+self.reString+r"\b"
- try:
- self.re = re.compile( self.reString )
- except Exception:
- self.re = None
-
- def parseImpl( self, instring, loc, doActions=True ):
- if self.re:
- result = self.re.match(instring,loc)
- if not result:
- raise ParseException(instring, loc, self.errmsg, self)
-
- loc = result.end()
- return loc, result.group()
-
- if not(instring[ loc ] in self.initChars):
- raise ParseException(instring, loc, self.errmsg, self)
-
- start = loc
- loc += 1
- instrlen = len(instring)
- bodychars = self.bodyChars
- maxloc = start + self.maxLen
- maxloc = min( maxloc, instrlen )
- while loc < maxloc and instring[loc] in bodychars:
- loc += 1
-
- throwException = False
- if loc - start < self.minLen:
- throwException = True
- if self.maxSpecified and loc < instrlen and instring[loc] in bodychars:
- throwException = True
- if self.asKeyword:
- if (start>0 and instring[start-1] in bodychars) or (loc<instrlen and instring[loc] in bodychars):
- throwException = True
-
- if throwException:
- raise ParseException(instring, loc, self.errmsg, self)
-
- return loc, instring[start:loc]
-
- def __str__( self ):
- try:
- return super(Word,self).__str__()
- except Exception:
- pass
-
-
- if self.strRepr is None:
-
- def charsAsStr(s):
- if len(s)>4:
- return s[:4]+"..."
- else:
- return s
-
- if ( self.initCharsOrig != self.bodyCharsOrig ):
- self.strRepr = "W:(%s,%s)" % ( charsAsStr(self.initCharsOrig), charsAsStr(self.bodyCharsOrig) )
- else:
- self.strRepr = "W:(%s)" % charsAsStr(self.initCharsOrig)
-
- return self.strRepr
-
-
-class Regex(Token):
- r"""
- Token for matching strings that match a given regular expression.
- Defined with string specifying the regular expression in a form recognized by the inbuilt Python re module.
- If the given regex contains named groups (defined using C{(?P<name>...)}), these will be preserved as
- named parse results.
-
- Example::
- realnum = Regex(r"[+-]?\d+\.\d*")
- date = Regex(r'(?P<year>\d{4})-(?P<month>\d\d?)-(?P<day>\d\d?)')
- # ref: http://stackoverflow.com/questions/267399/how-do-you-match-only-valid-roman-numerals-with-a-regular-expression
- roman = Regex(r"M{0,4}(CM|CD|D?C{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})")
- """
- compiledREtype = type(re.compile("[A-Z]"))
- def __init__( self, pattern, flags=0):
- """The parameters C{pattern} and C{flags} are passed to the C{re.compile()} function as-is. See the Python C{re} module for an explanation of the acceptable patterns and flags."""
- super(Regex,self).__init__()
-
- if isinstance(pattern, basestring):
- if not pattern:
- warnings.warn("null string passed to Regex; use Empty() instead",
- SyntaxWarning, stacklevel=2)
-
- self.pattern = pattern
- self.flags = flags
-
- try:
- self.re = re.compile(self.pattern, self.flags)
- self.reString = self.pattern
- except sre_constants.error:
- warnings.warn("invalid pattern (%s) passed to Regex" % pattern,
- SyntaxWarning, stacklevel=2)
- raise
-
- elif isinstance(pattern, Regex.compiledREtype):
- self.re = pattern
- self.pattern = \
- self.reString = str(pattern)
- self.flags = flags
-
- else:
- raise ValueError("Regex may only be constructed with a string or a compiled RE object")
-
- self.name = _ustr(self)
- self.errmsg = "Expected " + self.name
- self.mayIndexError = False
- self.mayReturnEmpty = True
-
- def parseImpl( self, instring, loc, doActions=True ):
- result = self.re.match(instring,loc)
- if not result:
- raise ParseException(instring, loc, self.errmsg, self)
-
- loc = result.end()
- d = result.groupdict()
- ret = ParseResults(result.group())
- if d:
- for k in d:
- ret[k] = d[k]
- return loc,ret
-
- def __str__( self ):
- try:
- return super(Regex,self).__str__()
- except Exception:
- pass
-
- if self.strRepr is None:
- self.strRepr = "Re:(%s)" % repr(self.pattern)
-
- return self.strRepr
-
-
-class QuotedString(Token):
- r"""
- Token for matching strings that are delimited by quoting characters.
-
- Defined with the following parameters:
- - quoteChar - string of one or more characters defining the quote delimiting string
- - escChar - character to escape quotes, typically backslash (default=C{None})
- - escQuote - special quote sequence to escape an embedded quote string (such as SQL's "" to escape an embedded ") (default=C{None})
- - multiline - boolean indicating whether quotes can span multiple lines (default=C{False})
- - unquoteResults - boolean indicating whether the matched text should be unquoted (default=C{True})
- - endQuoteChar - string of one or more characters defining the end of the quote delimited string (default=C{None} => same as quoteChar)
- - convertWhitespaceEscapes - convert escaped whitespace (C{'\t'}, C{'\n'}, etc.) to actual whitespace (default=C{True})
-
- Example::
- qs = QuotedString('"')
- print(qs.searchString('lsjdf "This is the quote" sldjf'))
- complex_qs = QuotedString('{{', endQuoteChar='}}')
- print(complex_qs.searchString('lsjdf {{This is the "quote"}} sldjf'))
- sql_qs = QuotedString('"', escQuote='""')
- print(sql_qs.searchString('lsjdf "This is the quote with ""embedded"" quotes" sldjf'))
- prints::
- [['This is the quote']]
- [['This is the "quote"']]
- [['This is the quote with "embedded" quotes']]
- """
- def __init__( self, quoteChar, escChar=None, escQuote=None, multiline=False, unquoteResults=True, endQuoteChar=None, convertWhitespaceEscapes=True):
- super(QuotedString,self).__init__()
-
- # remove white space from quote chars - wont work anyway
- quoteChar = quoteChar.strip()
- if not quoteChar:
- warnings.warn("quoteChar cannot be the empty string",SyntaxWarning,stacklevel=2)
- raise SyntaxError()
-
- if endQuoteChar is None:
- endQuoteChar = quoteChar
- else:
- endQuoteChar = endQuoteChar.strip()
- if not endQuoteChar:
- warnings.warn("endQuoteChar cannot be the empty string",SyntaxWarning,stacklevel=2)
- raise SyntaxError()
-
- self.quoteChar = quoteChar
- self.quoteCharLen = len(quoteChar)
- self.firstQuoteChar = quoteChar[0]
- self.endQuoteChar = endQuoteChar
- self.endQuoteCharLen = len(endQuoteChar)
- self.escChar = escChar
- self.escQuote = escQuote
- self.unquoteResults = unquoteResults
- self.convertWhitespaceEscapes = convertWhitespaceEscapes
-
- if multiline:
- self.flags = re.MULTILINE | re.DOTALL
- self.pattern = r'%s(?:[^%s%s]' % \
- ( re.escape(self.quoteChar),
- _escapeRegexRangeChars(self.endQuoteChar[0]),
- (escChar is not None and _escapeRegexRangeChars(escChar) or '') )
- else:
- self.flags = 0
- self.pattern = r'%s(?:[^%s\n\r%s]' % \
- ( re.escape(self.quoteChar),
- _escapeRegexRangeChars(self.endQuoteChar[0]),
- (escChar is not None and _escapeRegexRangeChars(escChar) or '') )
- if len(self.endQuoteChar) > 1:
- self.pattern += (
- '|(?:' + ')|(?:'.join("%s[^%s]" % (re.escape(self.endQuoteChar[:i]),
- _escapeRegexRangeChars(self.endQuoteChar[i]))
- for i in range(len(self.endQuoteChar)-1,0,-1)) + ')'
- )
- if escQuote:
- self.pattern += (r'|(?:%s)' % re.escape(escQuote))
- if escChar:
- self.pattern += (r'|(?:%s.)' % re.escape(escChar))
- self.escCharReplacePattern = re.escape(self.escChar)+"(.)"
- self.pattern += (r')*%s' % re.escape(self.endQuoteChar))
-
- try:
- self.re = re.compile(self.pattern, self.flags)
- self.reString = self.pattern
- except sre_constants.error:
- warnings.warn("invalid pattern (%s) passed to Regex" % self.pattern,
- SyntaxWarning, stacklevel=2)
- raise
-
- self.name = _ustr(self)
- self.errmsg = "Expected " + self.name
- self.mayIndexError = False
- self.mayReturnEmpty = True
-
- def parseImpl( self, instring, loc, doActions=True ):
- result = instring[loc] == self.firstQuoteChar and self.re.match(instring,loc) or None
- if not result:
- raise ParseException(instring, loc, self.errmsg, self)
-
- loc = result.end()
- ret = result.group()
-
- if self.unquoteResults:
-
- # strip off quotes
- ret = ret[self.quoteCharLen:-self.endQuoteCharLen]
-
- if isinstance(ret,basestring):
- # replace escaped whitespace
- if '\\' in ret and self.convertWhitespaceEscapes:
- ws_map = {
- r'\t' : '\t',
- r'\n' : '\n',
- r'\f' : '\f',
- r'\r' : '\r',
- }
- for wslit,wschar in ws_map.items():
- ret = ret.replace(wslit, wschar)
-
- # replace escaped characters
- if self.escChar:
- ret = re.sub(self.escCharReplacePattern, r"\g<1>", ret)
-
- # replace escaped quotes
- if self.escQuote:
- ret = ret.replace(self.escQuote, self.endQuoteChar)
-
- return loc, ret
-
- def __str__( self ):
- try:
- return super(QuotedString,self).__str__()
- except Exception:
- pass
-
- if self.strRepr is None:
- self.strRepr = "quoted string, starting with %s ending with %s" % (self.quoteChar, self.endQuoteChar)
-
- return self.strRepr
-
-
-class CharsNotIn(Token):
- """
- Token for matching words composed of characters I{not} in a given set (will
- include whitespace in matched characters if not listed in the provided exclusion set - see example).
- Defined with string containing all disallowed characters, and an optional
- minimum, maximum, and/or exact length. The default value for C{min} is 1 (a
- minimum value < 1 is not valid); the default values for C{max} and C{exact}
- are 0, meaning no maximum or exact length restriction.
-
- Example::
- # define a comma-separated-value as anything that is not a ','
- csv_value = CharsNotIn(',')
- print(delimitedList(csv_value).parseString("dkls,lsdkjf,s12 34,@!#,213"))
- prints::
- ['dkls', 'lsdkjf', 's12 34', '@!#', '213']
- """
- def __init__( self, notChars, min=1, max=0, exact=0 ):
- super(CharsNotIn,self).__init__()
- self.skipWhitespace = False
- self.notChars = notChars
-
- if min < 1:
- raise ValueError("cannot specify a minimum length < 1; use Optional(CharsNotIn()) if zero-length char group is permitted")
-
- self.minLen = min
-
- if max > 0:
- self.maxLen = max
- else:
- self.maxLen = _MAX_INT
-
- if exact > 0:
- self.maxLen = exact
- self.minLen = exact
-
- self.name = _ustr(self)
- self.errmsg = "Expected " + self.name
- self.mayReturnEmpty = ( self.minLen == 0 )
- self.mayIndexError = False
-
- def parseImpl( self, instring, loc, doActions=True ):
- if instring[loc] in self.notChars:
- raise ParseException(instring, loc, self.errmsg, self)
-
- start = loc
- loc += 1
- notchars = self.notChars
- maxlen = min( start+self.maxLen, len(instring) )
- while loc < maxlen and \
- (instring[loc] not in notchars):
- loc += 1
-
- if loc - start < self.minLen:
- raise ParseException(instring, loc, self.errmsg, self)
-
- return loc, instring[start:loc]
-
- def __str__( self ):
- try:
- return super(CharsNotIn, self).__str__()
- except Exception:
- pass
-
- if self.strRepr is None:
- if len(self.notChars) > 4:
- self.strRepr = "!W:(%s...)" % self.notChars[:4]
- else:
- self.strRepr = "!W:(%s)" % self.notChars
-
- return self.strRepr
-
-class White(Token):
- """
- Special matching class for matching whitespace. Normally, whitespace is ignored
- by pyparsing grammars. This class is included when some whitespace structures
- are significant. Define with a string containing the whitespace characters to be
- matched; default is C{" \\t\\r\\n"}. Also takes optional C{min}, C{max}, and C{exact} arguments,
- as defined for the C{L{Word}} class.
- """
- whiteStrs = {
- " " : "<SPC>",
- "\t": "<TAB>",
- "\n": "<LF>",
- "\r": "<CR>",
- "\f": "<FF>",
- }
- def __init__(self, ws=" \t\r\n", min=1, max=0, exact=0):
- super(White,self).__init__()
- self.matchWhite = ws
- self.setWhitespaceChars( "".join(c for c in self.whiteChars if c not in self.matchWhite) )
- #~ self.leaveWhitespace()
- self.name = ("".join(White.whiteStrs[c] for c in self.matchWhite))
- self.mayReturnEmpty = True
- self.errmsg = "Expected " + self.name
-
- self.minLen = min
-
- if max > 0:
- self.maxLen = max
- else:
- self.maxLen = _MAX_INT
-
- if exact > 0:
- self.maxLen = exact
- self.minLen = exact
-
- def parseImpl( self, instring, loc, doActions=True ):
- if not(instring[ loc ] in self.matchWhite):
- raise ParseException(instring, loc, self.errmsg, self)
- start = loc
- loc += 1
- maxloc = start + self.maxLen
- maxloc = min( maxloc, len(instring) )
- while loc < maxloc and instring[loc] in self.matchWhite:
- loc += 1
-
- if loc - start < self.minLen:
- raise ParseException(instring, loc, self.errmsg, self)
-
- return loc, instring[start:loc]
-
-
-class _PositionToken(Token):
- def __init__( self ):
- super(_PositionToken,self).__init__()
- self.name=self.__class__.__name__
- self.mayReturnEmpty = True
- self.mayIndexError = False
-
-class GoToColumn(_PositionToken):
- """
- Token to advance to a specific column of input text; useful for tabular report scraping.
- """
- def __init__( self, colno ):
- super(GoToColumn,self).__init__()
- self.col = colno
-
- def preParse( self, instring, loc ):
- if col(loc,instring) != self.col:
- instrlen = len(instring)
- if self.ignoreExprs:
- loc = self._skipIgnorables( instring, loc )
- while loc < instrlen and instring[loc].isspace() and col( loc, instring ) != self.col :
- loc += 1
- return loc
-
- def parseImpl( self, instring, loc, doActions=True ):
- thiscol = col( loc, instring )
- if thiscol > self.col:
- raise ParseException( instring, loc, "Text not in expected column", self )
- newloc = loc + self.col - thiscol
- ret = instring[ loc: newloc ]
- return newloc, ret
-
-
-class LineStart(_PositionToken):
- """
- Matches if current position is at the beginning of a line within the parse string
-
- Example::
-
- test = '''\
- AAA this line
- AAA and this line
- AAA but not this one
- B AAA and definitely not this one
- '''
-
- for t in (LineStart() + 'AAA' + restOfLine).searchString(test):
- print(t)
-
- Prints::
- ['AAA', ' this line']
- ['AAA', ' and this line']
-
- """
- def __init__( self ):
- super(LineStart,self).__init__()
- self.errmsg = "Expected start of line"
-
- def parseImpl( self, instring, loc, doActions=True ):
- if col(loc, instring) == 1:
- return loc, []
- raise ParseException(instring, loc, self.errmsg, self)
-
-class LineEnd(_PositionToken):
- """
- Matches if current position is at the end of a line within the parse string
- """
- def __init__( self ):
- super(LineEnd,self).__init__()
- self.setWhitespaceChars( ParserElement.DEFAULT_WHITE_CHARS.replace("\n","") )
- self.errmsg = "Expected end of line"
-
- def parseImpl( self, instring, loc, doActions=True ):
- if loc<len(instring):
- if instring[loc] == "\n":
- return loc+1, "\n"
- else:
- raise ParseException(instring, loc, self.errmsg, self)
- elif loc == len(instring):
- return loc+1, []
- else:
- raise ParseException(instring, loc, self.errmsg, self)
-
-class StringStart(_PositionToken):
- """
- Matches if current position is at the beginning of the parse string
- """
- def __init__( self ):
- super(StringStart,self).__init__()
- self.errmsg = "Expected start of text"
-
- def parseImpl( self, instring, loc, doActions=True ):
- if loc != 0:
- # see if entire string up to here is just whitespace and ignoreables
- if loc != self.preParse( instring, 0 ):
- raise ParseException(instring, loc, self.errmsg, self)
- return loc, []
-
-class StringEnd(_PositionToken):
- """
- Matches if current position is at the end of the parse string
- """
- def __init__( self ):
- super(StringEnd,self).__init__()
- self.errmsg = "Expected end of text"
-
- def parseImpl( self, instring, loc, doActions=True ):
- if loc < len(instring):
- raise ParseException(instring, loc, self.errmsg, self)
- elif loc == len(instring):
- return loc+1, []
- elif loc > len(instring):
- return loc, []
- else:
- raise ParseException(instring, loc, self.errmsg, self)
-
-class WordStart(_PositionToken):
- """
- Matches if the current position is at the beginning of a Word, and
- is not preceded by any character in a given set of C{wordChars}
- (default=C{printables}). To emulate the C{\b} behavior of regular expressions,
- use C{WordStart(alphanums)}. C{WordStart} will also match at the beginning of
- the string being parsed, or at the beginning of a line.
- """
- def __init__(self, wordChars = printables):
- super(WordStart,self).__init__()
- self.wordChars = set(wordChars)
- self.errmsg = "Not at the start of a word"
-
- def parseImpl(self, instring, loc, doActions=True ):
- if loc != 0:
- if (instring[loc-1] in self.wordChars or
- instring[loc] not in self.wordChars):
- raise ParseException(instring, loc, self.errmsg, self)
- return loc, []
-
-class WordEnd(_PositionToken):
- """
- Matches if the current position is at the end of a Word, and
- is not followed by any character in a given set of C{wordChars}
- (default=C{printables}). To emulate the C{\b} behavior of regular expressions,
- use C{WordEnd(alphanums)}. C{WordEnd} will also match at the end of
- the string being parsed, or at the end of a line.
- """
- def __init__(self, wordChars = printables):
- super(WordEnd,self).__init__()
- self.wordChars = set(wordChars)
- self.skipWhitespace = False
- self.errmsg = "Not at the end of a word"
-
- def parseImpl(self, instring, loc, doActions=True ):
- instrlen = len(instring)
- if instrlen>0 and loc<instrlen:
- if (instring[loc] in self.wordChars or
- instring[loc-1] not in self.wordChars):
- raise ParseException(instring, loc, self.errmsg, self)
- return loc, []
-
-
-class ParseExpression(ParserElement):
- """
- Abstract subclass of ParserElement, for combining and post-processing parsed tokens.
- """
- def __init__( self, exprs, savelist = False ):
- super(ParseExpression,self).__init__(savelist)
- if isinstance( exprs, _generatorType ):
- exprs = list(exprs)
-
- if isinstance( exprs, basestring ):
- self.exprs = [ ParserElement._literalStringClass( exprs ) ]
- elif isinstance( exprs, Iterable ):
- exprs = list(exprs)
- # if sequence of strings provided, wrap with Literal
- if all(isinstance(expr, basestring) for expr in exprs):
- exprs = map(ParserElement._literalStringClass, exprs)
- self.exprs = list(exprs)
- else:
- try:
- self.exprs = list( exprs )
- except TypeError:
- self.exprs = [ exprs ]
- self.callPreparse = False
-
- def __getitem__( self, i ):
- return self.exprs[i]
-
- def append( self, other ):
- self.exprs.append( other )
- self.strRepr = None
- return self
-
- def leaveWhitespace( self ):
- """Extends C{leaveWhitespace} defined in base class, and also invokes C{leaveWhitespace} on
- all contained expressions."""
- self.skipWhitespace = False
- self.exprs = [ e.copy() for e in self.exprs ]
- for e in self.exprs:
- e.leaveWhitespace()
- return self
-
- def ignore( self, other ):
- if isinstance( other, Suppress ):
- if other not in self.ignoreExprs:
- super( ParseExpression, self).ignore( other )
- for e in self.exprs:
- e.ignore( self.ignoreExprs[-1] )
- else:
- super( ParseExpression, self).ignore( other )
- for e in self.exprs:
- e.ignore( self.ignoreExprs[-1] )
- return self
-
- def __str__( self ):
- try:
- return super(ParseExpression,self).__str__()
- except Exception:
- pass
-
- if self.strRepr is None:
- self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.exprs) )
- return self.strRepr
-
- def streamline( self ):
- super(ParseExpression,self).streamline()
-
- for e in self.exprs:
- e.streamline()
-
- # collapse nested And's of the form And( And( And( a,b), c), d) to And( a,b,c,d )
- # but only if there are no parse actions or resultsNames on the nested And's
- # (likewise for Or's and MatchFirst's)
- if ( len(self.exprs) == 2 ):
- other = self.exprs[0]
- if ( isinstance( other, self.__class__ ) and
- not(other.parseAction) and
- other.resultsName is None and
- not other.debug ):
- self.exprs = other.exprs[:] + [ self.exprs[1] ]
- self.strRepr = None
- self.mayReturnEmpty |= other.mayReturnEmpty
- self.mayIndexError |= other.mayIndexError
-
- other = self.exprs[-1]
- if ( isinstance( other, self.__class__ ) and
- not(other.parseAction) and
- other.resultsName is None and
- not other.debug ):
- self.exprs = self.exprs[:-1] + other.exprs[:]
- self.strRepr = None
- self.mayReturnEmpty |= other.mayReturnEmpty
- self.mayIndexError |= other.mayIndexError
-
- self.errmsg = "Expected " + _ustr(self)
-
- return self
-
- def setResultsName( self, name, listAllMatches=False ):
- ret = super(ParseExpression,self).setResultsName(name,listAllMatches)
- return ret
-
- def validate( self, validateTrace=[] ):
- tmp = validateTrace[:]+[self]
- for e in self.exprs:
- e.validate(tmp)
- self.checkRecursion( [] )
-
- def copy(self):
- ret = super(ParseExpression,self).copy()
- ret.exprs = [e.copy() for e in self.exprs]
- return ret
-
-class And(ParseExpression):
- """
- Requires all given C{ParseExpression}s to be found in the given order.
- Expressions may be separated by whitespace.
- May be constructed using the C{'+'} operator.
- May also be constructed using the C{'-'} operator, which will suppress backtracking.
-
- Example::
- integer = Word(nums)
- name_expr = OneOrMore(Word(alphas))
-
- expr = And([integer("id"),name_expr("name"),integer("age")])
- # more easily written as:
- expr = integer("id") + name_expr("name") + integer("age")
- """
-
- class _ErrorStop(Empty):
- def __init__(self, *args, **kwargs):
- super(And._ErrorStop,self).__init__(*args, **kwargs)
- self.name = '-'
- self.leaveWhitespace()
-
- def __init__( self, exprs, savelist = True ):
- super(And,self).__init__(exprs, savelist)
- self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
- self.setWhitespaceChars( self.exprs[0].whiteChars )
- self.skipWhitespace = self.exprs[0].skipWhitespace
- self.callPreparse = True
-
- def parseImpl( self, instring, loc, doActions=True ):
- # pass False as last arg to _parse for first element, since we already
- # pre-parsed the string as part of our And pre-parsing
- loc, resultlist = self.exprs[0]._parse( instring, loc, doActions, callPreParse=False )
- errorStop = False
- for e in self.exprs[1:]:
- if isinstance(e, And._ErrorStop):
- errorStop = True
- continue
- if errorStop:
- try:
- loc, exprtokens = e._parse( instring, loc, doActions )
- except ParseSyntaxException:
- raise
- except ParseBaseException as pe:
- pe.__traceback__ = None
- raise ParseSyntaxException._from_exception(pe)
- except IndexError:
- raise ParseSyntaxException(instring, len(instring), self.errmsg, self)
- else:
- loc, exprtokens = e._parse( instring, loc, doActions )
- if exprtokens or exprtokens.haskeys():
- resultlist += exprtokens
- return loc, resultlist
-
- def __iadd__(self, other ):
- if isinstance( other, basestring ):
- other = ParserElement._literalStringClass( other )
- return self.append( other ) #And( [ self, other ] )
-
- def checkRecursion( self, parseElementList ):
- subRecCheckList = parseElementList[:] + [ self ]
- for e in self.exprs:
- e.checkRecursion( subRecCheckList )
- if not e.mayReturnEmpty:
- break
-
- def __str__( self ):
- if hasattr(self,"name"):
- return self.name
-
- if self.strRepr is None:
- self.strRepr = "{" + " ".join(_ustr(e) for e in self.exprs) + "}"
-
- return self.strRepr
-
-
-class Or(ParseExpression):
- """
- Requires that at least one C{ParseExpression} is found.
- If two expressions match, the expression that matches the longest string will be used.
- May be constructed using the C{'^'} operator.
-
- Example::
- # construct Or using '^' operator
-
- number = Word(nums) ^ Combine(Word(nums) + '.' + Word(nums))
- print(number.searchString("123 3.1416 789"))
- prints::
- [['123'], ['3.1416'], ['789']]
- """
- def __init__( self, exprs, savelist = False ):
- super(Or,self).__init__(exprs, savelist)
- if self.exprs:
- self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)
- else:
- self.mayReturnEmpty = True
-
- def parseImpl( self, instring, loc, doActions=True ):
- maxExcLoc = -1
- maxException = None
- matches = []
- for e in self.exprs:
- try:
- loc2 = e.tryParse( instring, loc )
- except ParseException as err:
- err.__traceback__ = None
- if err.loc > maxExcLoc:
- maxException = err
- maxExcLoc = err.loc
- except IndexError:
- if len(instring) > maxExcLoc:
- maxException = ParseException(instring,len(instring),e.errmsg,self)
- maxExcLoc = len(instring)
- else:
- # save match among all matches, to retry longest to shortest
- matches.append((loc2, e))
-
- if matches:
- matches.sort(key=lambda x: -x[0])
- for _,e in matches:
- try:
- return e._parse( instring, loc, doActions )
- except ParseException as err:
- err.__traceback__ = None
- if err.loc > maxExcLoc:
- maxException = err
- maxExcLoc = err.loc
-
- if maxException is not None:
- maxException.msg = self.errmsg
- raise maxException
- else:
- raise ParseException(instring, loc, "no defined alternatives to match", self)
-
-
- def __ixor__(self, other ):
- if isinstance( other, basestring ):
- other = ParserElement._literalStringClass( other )
- return self.append( other ) #Or( [ self, other ] )
-
- def __str__( self ):
- if hasattr(self,"name"):
- return self.name
-
- if self.strRepr is None:
- self.strRepr = "{" + " ^ ".join(_ustr(e) for e in self.exprs) + "}"
-
- return self.strRepr
-
- def checkRecursion( self, parseElementList ):
- subRecCheckList = parseElementList[:] + [ self ]
- for e in self.exprs:
- e.checkRecursion( subRecCheckList )
-
-
-class MatchFirst(ParseExpression):
- """
- Requires that at least one C{ParseExpression} is found.
- If two expressions match, the first one listed is the one that will match.
- May be constructed using the C{'|'} operator.
-
- Example::
- # construct MatchFirst using '|' operator
-
- # watch the order of expressions to match
- number = Word(nums) | Combine(Word(nums) + '.' + Word(nums))
- print(number.searchString("123 3.1416 789")) # Fail! -> [['123'], ['3'], ['1416'], ['789']]
-
- # put more selective expression first
- number = Combine(Word(nums) + '.' + Word(nums)) | Word(nums)
- print(number.searchString("123 3.1416 789")) # Better -> [['123'], ['3.1416'], ['789']]
- """
- def __init__( self, exprs, savelist = False ):
- super(MatchFirst,self).__init__(exprs, savelist)
- if self.exprs:
- self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)
- else:
- self.mayReturnEmpty = True
-
- def parseImpl( self, instring, loc, doActions=True ):
- maxExcLoc = -1
- maxException = None
- for e in self.exprs:
- try:
- ret = e._parse( instring, loc, doActions )
- return ret
- except ParseException as err:
- if err.loc > maxExcLoc:
- maxException = err
- maxExcLoc = err.loc
- except IndexError:
- if len(instring) > maxExcLoc:
- maxException = ParseException(instring,len(instring),e.errmsg,self)
- maxExcLoc = len(instring)
-
- # only got here if no expression matched, raise exception for match that made it the furthest
- else:
- if maxException is not None:
- maxException.msg = self.errmsg
- raise maxException
- else:
- raise ParseException(instring, loc, "no defined alternatives to match", self)
-
- def __ior__(self, other ):
- if isinstance( other, basestring ):
- other = ParserElement._literalStringClass( other )
- return self.append( other ) #MatchFirst( [ self, other ] )
-
- def __str__( self ):
- if hasattr(self,"name"):
- return self.name
-
- if self.strRepr is None:
- self.strRepr = "{" + " | ".join(_ustr(e) for e in self.exprs) + "}"
-
- return self.strRepr
-
- def checkRecursion( self, parseElementList ):
- subRecCheckList = parseElementList[:] + [ self ]
- for e in self.exprs:
- e.checkRecursion( subRecCheckList )
-
-
-class Each(ParseExpression):
- """
- Requires all given C{ParseExpression}s to be found, but in any order.
- Expressions may be separated by whitespace.
- May be constructed using the C{'&'} operator.
-
- Example::
- color = oneOf("RED ORANGE YELLOW GREEN BLUE PURPLE BLACK WHITE BROWN")
- shape_type = oneOf("SQUARE CIRCLE TRIANGLE STAR HEXAGON OCTAGON")
- integer = Word(nums)
- shape_attr = "shape:" + shape_type("shape")
- posn_attr = "posn:" + Group(integer("x") + ',' + integer("y"))("posn")
- color_attr = "color:" + color("color")
- size_attr = "size:" + integer("size")
-
- # use Each (using operator '&') to accept attributes in any order
- # (shape and posn are required, color and size are optional)
- shape_spec = shape_attr & posn_attr & Optional(color_attr) & Optional(size_attr)
-
- shape_spec.runTests('''
- shape: SQUARE color: BLACK posn: 100, 120
- shape: CIRCLE size: 50 color: BLUE posn: 50,80
- color:GREEN size:20 shape:TRIANGLE posn:20,40
- '''
- )
- prints::
- shape: SQUARE color: BLACK posn: 100, 120
- ['shape:', 'SQUARE', 'color:', 'BLACK', 'posn:', ['100', ',', '120']]
- - color: BLACK
- - posn: ['100', ',', '120']
- - x: 100
- - y: 120
- - shape: SQUARE
-
-
- shape: CIRCLE size: 50 color: BLUE posn: 50,80
- ['shape:', 'CIRCLE', 'size:', '50', 'color:', 'BLUE', 'posn:', ['50', ',', '80']]
- - color: BLUE
- - posn: ['50', ',', '80']
- - x: 50
- - y: 80
- - shape: CIRCLE
- - size: 50
-
-
- color: GREEN size: 20 shape: TRIANGLE posn: 20,40
- ['color:', 'GREEN', 'size:', '20', 'shape:', 'TRIANGLE', 'posn:', ['20', ',', '40']]
- - color: GREEN
- - posn: ['20', ',', '40']
- - x: 20
- - y: 40
- - shape: TRIANGLE
- - size: 20
- """
- def __init__( self, exprs, savelist = True ):
- super(Each,self).__init__(exprs, savelist)
- self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
- self.skipWhitespace = True
- self.initExprGroups = True
-
- def parseImpl( self, instring, loc, doActions=True ):
- if self.initExprGroups:
- self.opt1map = dict((id(e.expr),e) for e in self.exprs if isinstance(e,Optional))
- opt1 = [ e.expr for e in self.exprs if isinstance(e,Optional) ]
- opt2 = [ e for e in self.exprs if e.mayReturnEmpty and not isinstance(e,Optional)]
- self.optionals = opt1 + opt2
- self.multioptionals = [ e.expr for e in self.exprs if isinstance(e,ZeroOrMore) ]
- self.multirequired = [ e.expr for e in self.exprs if isinstance(e,OneOrMore) ]
- self.required = [ e for e in self.exprs if not isinstance(e,(Optional,ZeroOrMore,OneOrMore)) ]
- self.required += self.multirequired
- self.initExprGroups = False
- tmpLoc = loc
- tmpReqd = self.required[:]
- tmpOpt = self.optionals[:]
- matchOrder = []
-
- keepMatching = True
- while keepMatching:
- tmpExprs = tmpReqd + tmpOpt + self.multioptionals + self.multirequired
- failed = []
- for e in tmpExprs:
- try:
- tmpLoc = e.tryParse( instring, tmpLoc )
- except ParseException:
- failed.append(e)
- else:
- matchOrder.append(self.opt1map.get(id(e),e))
- if e in tmpReqd:
- tmpReqd.remove(e)
- elif e in tmpOpt:
- tmpOpt.remove(e)
- if len(failed) == len(tmpExprs):
- keepMatching = False
-
- if tmpReqd:
- missing = ", ".join(_ustr(e) for e in tmpReqd)
- raise ParseException(instring,loc,"Missing one or more required elements (%s)" % missing )
-
- # add any unmatched Optionals, in case they have default values defined
- matchOrder += [e for e in self.exprs if isinstance(e,Optional) and e.expr in tmpOpt]
-
- resultlist = []
- for e in matchOrder:
- loc,results = e._parse(instring,loc,doActions)
- resultlist.append(results)
-
- finalResults = sum(resultlist, ParseResults([]))
- return loc, finalResults
-
- def __str__( self ):
- if hasattr(self,"name"):
- return self.name
-
- if self.strRepr is None:
- self.strRepr = "{" + " & ".join(_ustr(e) for e in self.exprs) + "}"
-
- return self.strRepr
-
- def checkRecursion( self, parseElementList ):
- subRecCheckList = parseElementList[:] + [ self ]
- for e in self.exprs:
- e.checkRecursion( subRecCheckList )
-
-
-class ParseElementEnhance(ParserElement):
- """
- Abstract subclass of C{ParserElement}, for combining and post-processing parsed tokens.
- """
- def __init__( self, expr, savelist=False ):
- super(ParseElementEnhance,self).__init__(savelist)
- if isinstance( expr, basestring ):
- if issubclass(ParserElement._literalStringClass, Token):
- expr = ParserElement._literalStringClass(expr)
- else:
- expr = ParserElement._literalStringClass(Literal(expr))
- self.expr = expr
- self.strRepr = None
- if expr is not None:
- self.mayIndexError = expr.mayIndexError
- self.mayReturnEmpty = expr.mayReturnEmpty
- self.setWhitespaceChars( expr.whiteChars )
- self.skipWhitespace = expr.skipWhitespace
- self.saveAsList = expr.saveAsList
- self.callPreparse = expr.callPreparse
- self.ignoreExprs.extend(expr.ignoreExprs)
-
- def parseImpl( self, instring, loc, doActions=True ):
- if self.expr is not None:
- return self.expr._parse( instring, loc, doActions, callPreParse=False )
- else:
- raise ParseException("",loc,self.errmsg,self)
-
- def leaveWhitespace( self ):
- self.skipWhitespace = False
- self.expr = self.expr.copy()
- if self.expr is not None:
- self.expr.leaveWhitespace()
- return self
-
- def ignore( self, other ):
- if isinstance( other, Suppress ):
- if other not in self.ignoreExprs:
- super( ParseElementEnhance, self).ignore( other )
- if self.expr is not None:
- self.expr.ignore( self.ignoreExprs[-1] )
- else:
- super( ParseElementEnhance, self).ignore( other )
- if self.expr is not None:
- self.expr.ignore( self.ignoreExprs[-1] )
- return self
-
- def streamline( self ):
- super(ParseElementEnhance,self).streamline()
- if self.expr is not None:
- self.expr.streamline()
- return self
-
- def checkRecursion( self, parseElementList ):
- if self in parseElementList:
- raise RecursiveGrammarException( parseElementList+[self] )
- subRecCheckList = parseElementList[:] + [ self ]
- if self.expr is not None:
- self.expr.checkRecursion( subRecCheckList )
-
- def validate( self, validateTrace=[] ):
- tmp = validateTrace[:]+[self]
- if self.expr is not None:
- self.expr.validate(tmp)
- self.checkRecursion( [] )
-
- def __str__( self ):
- try:
- return super(ParseElementEnhance,self).__str__()
- except Exception:
- pass
-
- if self.strRepr is None and self.expr is not None:
- self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.expr) )
- return self.strRepr
-
-
-class FollowedBy(ParseElementEnhance):
- """
- Lookahead matching of the given parse expression. C{FollowedBy}
- does I{not} advance the parsing position within the input string, it only
- verifies that the specified parse expression matches at the current
- position. C{FollowedBy} always returns a null token list.
-
- Example::
- # use FollowedBy to match a label only if it is followed by a ':'
- data_word = Word(alphas)
- label = data_word + FollowedBy(':')
- attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
-
- OneOrMore(attr_expr).parseString("shape: SQUARE color: BLACK posn: upper left").pprint()
- prints::
- [['shape', 'SQUARE'], ['color', 'BLACK'], ['posn', 'upper left']]
- """
- def __init__( self, expr ):
- super(FollowedBy,self).__init__(expr)
- self.mayReturnEmpty = True
-
- def parseImpl( self, instring, loc, doActions=True ):
- self.expr.tryParse( instring, loc )
- return loc, []
-
-
-class NotAny(ParseElementEnhance):
- """
- Lookahead to disallow matching with the given parse expression. C{NotAny}
- does I{not} advance the parsing position within the input string, it only
- verifies that the specified parse expression does I{not} match at the current
- position. Also, C{NotAny} does I{not} skip over leading whitespace. C{NotAny}
- always returns a null token list. May be constructed using the '~' operator.
-
- Example::
-
- """
- def __init__( self, expr ):
- super(NotAny,self).__init__(expr)
- #~ self.leaveWhitespace()
- self.skipWhitespace = False # do NOT use self.leaveWhitespace(), don't want to propagate to exprs
- self.mayReturnEmpty = True
- self.errmsg = "Found unwanted token, "+_ustr(self.expr)
-
- def parseImpl( self, instring, loc, doActions=True ):
- if self.expr.canParseNext(instring, loc):
- raise ParseException(instring, loc, self.errmsg, self)
- return loc, []
-
- def __str__( self ):
- if hasattr(self,"name"):
- return self.name
-
- if self.strRepr is None:
- self.strRepr = "~{" + _ustr(self.expr) + "}"
-
- return self.strRepr
-
-class _MultipleMatch(ParseElementEnhance):
- def __init__( self, expr, stopOn=None):
- super(_MultipleMatch, self).__init__(expr)
- self.saveAsList = True
- ender = stopOn
- if isinstance(ender, basestring):
- ender = ParserElement._literalStringClass(ender)
- self.not_ender = ~ender if ender is not None else None
-
- def parseImpl( self, instring, loc, doActions=True ):
- self_expr_parse = self.expr._parse
- self_skip_ignorables = self._skipIgnorables
- check_ender = self.not_ender is not None
- if check_ender:
- try_not_ender = self.not_ender.tryParse
-
- # must be at least one (but first see if we are the stopOn sentinel;
- # if so, fail)
- if check_ender:
- try_not_ender(instring, loc)
- loc, tokens = self_expr_parse( instring, loc, doActions, callPreParse=False )
- try:
- hasIgnoreExprs = (not not self.ignoreExprs)
- while 1:
- if check_ender:
- try_not_ender(instring, loc)
- if hasIgnoreExprs:
- preloc = self_skip_ignorables( instring, loc )
- else:
- preloc = loc
- loc, tmptokens = self_expr_parse( instring, preloc, doActions )
- if tmptokens or tmptokens.haskeys():
- tokens += tmptokens
- except (ParseException,IndexError):
- pass
-
- return loc, tokens
-
-class OneOrMore(_MultipleMatch):
- """
- Repetition of one or more of the given expression.
-
- Parameters:
- - expr - expression that must match one or more times
- - stopOn - (default=C{None}) - expression for a terminating sentinel
- (only required if the sentinel would ordinarily match the repetition
- expression)
-
- Example::
- data_word = Word(alphas)
- label = data_word + FollowedBy(':')
- attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join))
-
- text = "shape: SQUARE posn: upper left color: BLACK"
- OneOrMore(attr_expr).parseString(text).pprint() # Fail! read 'color' as data instead of next label -> [['shape', 'SQUARE color']]
-
- # use stopOn attribute for OneOrMore to avoid reading label string as part of the data
- attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
- OneOrMore(attr_expr).parseString(text).pprint() # Better -> [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'BLACK']]
-
- # could also be written as
- (attr_expr * (1,)).parseString(text).pprint()
- """
-
- def __str__( self ):
- if hasattr(self,"name"):
- return self.name
-
- if self.strRepr is None:
- self.strRepr = "{" + _ustr(self.expr) + "}..."
-
- return self.strRepr
-
-class ZeroOrMore(_MultipleMatch):
- """
- Optional repetition of zero or more of the given expression.
-
- Parameters:
- - expr - expression that must match zero or more times
- - stopOn - (default=C{None}) - expression for a terminating sentinel
- (only required if the sentinel would ordinarily match the repetition
- expression)
-
- Example: similar to L{OneOrMore}
- """
- def __init__( self, expr, stopOn=None):
- super(ZeroOrMore,self).__init__(expr, stopOn=stopOn)
- self.mayReturnEmpty = True
-
- def parseImpl( self, instring, loc, doActions=True ):
- try:
- return super(ZeroOrMore, self).parseImpl(instring, loc, doActions)
- except (ParseException,IndexError):
- return loc, []
-
- def __str__( self ):
- if hasattr(self,"name"):
- return self.name
-
- if self.strRepr is None:
- self.strRepr = "[" + _ustr(self.expr) + "]..."
-
- return self.strRepr
-
-class _NullToken(object):
- def __bool__(self):
- return False
- __nonzero__ = __bool__
- def __str__(self):
- return ""
-
-_optionalNotMatched = _NullToken()
-class Optional(ParseElementEnhance):
- """
- Optional matching of the given expression.
-
- Parameters:
- - expr - expression that must match zero or more times
- - default (optional) - value to be returned if the optional expression is not found.
-
- Example::
- # US postal code can be a 5-digit zip, plus optional 4-digit qualifier
- zip = Combine(Word(nums, exact=5) + Optional('-' + Word(nums, exact=4)))
- zip.runTests('''
- # traditional ZIP code
- 12345
-
- # ZIP+4 form
- 12101-0001
-
- # invalid ZIP
- 98765-
- ''')
- prints::
- # traditional ZIP code
- 12345
- ['12345']
-
- # ZIP+4 form
- 12101-0001
- ['12101-0001']
-
- # invalid ZIP
- 98765-
- ^
- FAIL: Expected end of text (at char 5), (line:1, col:6)
- """
- def __init__( self, expr, default=_optionalNotMatched ):
- super(Optional,self).__init__( expr, savelist=False )
- self.saveAsList = self.expr.saveAsList
- self.defaultValue = default
- self.mayReturnEmpty = True
-
- def parseImpl( self, instring, loc, doActions=True ):
- try:
- loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False )
- except (ParseException,IndexError):
- if self.defaultValue is not _optionalNotMatched:
- if self.expr.resultsName:
- tokens = ParseResults([ self.defaultValue ])
- tokens[self.expr.resultsName] = self.defaultValue
- else:
- tokens = [ self.defaultValue ]
- else:
- tokens = []
- return loc, tokens
-
- def __str__( self ):
- if hasattr(self,"name"):
- return self.name
-
- if self.strRepr is None:
- self.strRepr = "[" + _ustr(self.expr) + "]"
-
- return self.strRepr
-
-class SkipTo(ParseElementEnhance):
- """
- Token for skipping over all undefined text until the matched expression is found.
-
- Parameters:
- - expr - target expression marking the end of the data to be skipped
- - include - (default=C{False}) if True, the target expression is also parsed
- (the skipped text and target expression are returned as a 2-element list).
- - ignore - (default=C{None}) used to define grammars (typically quoted strings and
- comments) that might contain false matches to the target expression
- - failOn - (default=C{None}) define expressions that are not allowed to be
- included in the skipped test; if found before the target expression is found,
- the SkipTo is not a match
-
- Example::
- report = '''
- Outstanding Issues Report - 1 Jan 2000
-
- # | Severity | Description | Days Open
- -----+----------+-------------------------------------------+-----------
- 101 | Critical | Intermittent system crash | 6
- 94 | Cosmetic | Spelling error on Login ('log|n') | 14
- 79 | Minor | System slow when running too many reports | 47
- '''
- integer = Word(nums)
- SEP = Suppress('|')
- # use SkipTo to simply match everything up until the next SEP
- # - ignore quoted strings, so that a '|' character inside a quoted string does not match
- # - parse action will call token.strip() for each matched token, i.e., the description body
- string_data = SkipTo(SEP, ignore=quotedString)
- string_data.setParseAction(tokenMap(str.strip))
- ticket_expr = (integer("issue_num") + SEP
- + string_data("sev") + SEP
- + string_data("desc") + SEP
- + integer("days_open"))
-
- for tkt in ticket_expr.searchString(report):
- print tkt.dump()
- prints::
- ['101', 'Critical', 'Intermittent system crash', '6']
- - days_open: 6
- - desc: Intermittent system crash
- - issue_num: 101
- - sev: Critical
- ['94', 'Cosmetic', "Spelling error on Login ('log|n')", '14']
- - days_open: 14
- - desc: Spelling error on Login ('log|n')
- - issue_num: 94
- - sev: Cosmetic
- ['79', 'Minor', 'System slow when running too many reports', '47']
- - days_open: 47
- - desc: System slow when running too many reports
- - issue_num: 79
- - sev: Minor
- """
- def __init__( self, other, include=False, ignore=None, failOn=None ):
- super( SkipTo, self ).__init__( other )
- self.ignoreExpr = ignore
- self.mayReturnEmpty = True
- self.mayIndexError = False
- self.includeMatch = include
- self.asList = False
- if isinstance(failOn, basestring):
- self.failOn = ParserElement._literalStringClass(failOn)
- else:
- self.failOn = failOn
- self.errmsg = "No match found for "+_ustr(self.expr)
-
- def parseImpl( self, instring, loc, doActions=True ):
- startloc = loc
- instrlen = len(instring)
- expr = self.expr
- expr_parse = self.expr._parse
- self_failOn_canParseNext = self.failOn.canParseNext if self.failOn is not None else None
- self_ignoreExpr_tryParse = self.ignoreExpr.tryParse if self.ignoreExpr is not None else None
-
- tmploc = loc
- while tmploc <= instrlen:
- if self_failOn_canParseNext is not None:
- # break if failOn expression matches
- if self_failOn_canParseNext(instring, tmploc):
- break
-
- if self_ignoreExpr_tryParse is not None:
- # advance past ignore expressions
- while 1:
- try:
- tmploc = self_ignoreExpr_tryParse(instring, tmploc)
- except ParseBaseException:
- break
-
- try:
- expr_parse(instring, tmploc, doActions=False, callPreParse=False)
- except (ParseException, IndexError):
- # no match, advance loc in string
- tmploc += 1
- else:
- # matched skipto expr, done
- break
-
- else:
- # ran off the end of the input string without matching skipto expr, fail
- raise ParseException(instring, loc, self.errmsg, self)
-
- # build up return values
- loc = tmploc
- skiptext = instring[startloc:loc]
- skipresult = ParseResults(skiptext)
-
- if self.includeMatch:
- loc, mat = expr_parse(instring,loc,doActions,callPreParse=False)
- skipresult += mat
-
- return loc, skipresult
-
-class Forward(ParseElementEnhance):
- """
- Forward declaration of an expression to be defined later -
- used for recursive grammars, such as algebraic infix notation.
- When the expression is known, it is assigned to the C{Forward} variable using the '<<' operator.
-
- Note: take care when assigning to C{Forward} not to overlook precedence of operators.
- Specifically, '|' has a lower precedence than '<<', so that::
- fwdExpr << a | b | c
- will actually be evaluated as::
- (fwdExpr << a) | b | c
- thereby leaving b and c out as parseable alternatives. It is recommended that you
- explicitly group the values inserted into the C{Forward}::
- fwdExpr << (a | b | c)
- Converting to use the '<<=' operator instead will avoid this problem.
-
- See L{ParseResults.pprint} for an example of a recursive parser created using
- C{Forward}.
- """
- def __init__( self, other=None ):
- super(Forward,self).__init__( other, savelist=False )
-
- def __lshift__( self, other ):
- if isinstance( other, basestring ):
- other = ParserElement._literalStringClass(other)
- self.expr = other
- self.strRepr = None
- self.mayIndexError = self.expr.mayIndexError
- self.mayReturnEmpty = self.expr.mayReturnEmpty
- self.setWhitespaceChars( self.expr.whiteChars )
- self.skipWhitespace = self.expr.skipWhitespace
- self.saveAsList = self.expr.saveAsList
- self.ignoreExprs.extend(self.expr.ignoreExprs)
- return self
-
- def __ilshift__(self, other):
- return self << other
-
- def leaveWhitespace( self ):
- self.skipWhitespace = False
- return self
-
- def streamline( self ):
- if not self.streamlined:
- self.streamlined = True
- if self.expr is not None:
- self.expr.streamline()
- return self
-
- def validate( self, validateTrace=[] ):
- if self not in validateTrace:
- tmp = validateTrace[:]+[self]
- if self.expr is not None:
- self.expr.validate(tmp)
- self.checkRecursion([])
-
- def __str__( self ):
- if hasattr(self,"name"):
- return self.name
- return self.__class__.__name__ + ": ..."
-
- # stubbed out for now - creates awful memory and perf issues
- self._revertClass = self.__class__
- self.__class__ = _ForwardNoRecurse
- try:
- if self.expr is not None:
- retString = _ustr(self.expr)
- else:
- retString = "None"
- finally:
- self.__class__ = self._revertClass
- return self.__class__.__name__ + ": " + retString
-
- def copy(self):
- if self.expr is not None:
- return super(Forward,self).copy()
- else:
- ret = Forward()
- ret <<= self
- return ret
-
-class _ForwardNoRecurse(Forward):
- def __str__( self ):
- return "..."
-
-class TokenConverter(ParseElementEnhance):
- """
- Abstract subclass of C{ParseExpression}, for converting parsed results.
- """
- def __init__( self, expr, savelist=False ):
- super(TokenConverter,self).__init__( expr )#, savelist )
- self.saveAsList = False
-
-class Combine(TokenConverter):
- """
- Converter to concatenate all matching tokens to a single string.
- By default, the matching patterns must also be contiguous in the input string;
- this can be disabled by specifying C{'adjacent=False'} in the constructor.
-
- Example::
- real = Word(nums) + '.' + Word(nums)
- print(real.parseString('3.1416')) # -> ['3', '.', '1416']
- # will also erroneously match the following
- print(real.parseString('3. 1416')) # -> ['3', '.', '1416']
-
- real = Combine(Word(nums) + '.' + Word(nums))
- print(real.parseString('3.1416')) # -> ['3.1416']
- # no match when there are internal spaces
- print(real.parseString('3. 1416')) # -> Exception: Expected W:(0123...)
- """
- def __init__( self, expr, joinString="", adjacent=True ):
- super(Combine,self).__init__( expr )
- # suppress whitespace-stripping in contained parse expressions, but re-enable it on the Combine itself
- if adjacent:
- self.leaveWhitespace()
- self.adjacent = adjacent
- self.skipWhitespace = True
- self.joinString = joinString
- self.callPreparse = True
-
- def ignore( self, other ):
- if self.adjacent:
- ParserElement.ignore(self, other)
- else:
- super( Combine, self).ignore( other )
- return self
-
- def postParse( self, instring, loc, tokenlist ):
- retToks = tokenlist.copy()
- del retToks[:]
- retToks += ParseResults([ "".join(tokenlist._asStringList(self.joinString)) ], modal=self.modalResults)
-
- if self.resultsName and retToks.haskeys():
- return [ retToks ]
- else:
- return retToks
-
-class Group(TokenConverter):
- """
- Converter to return the matched tokens as a list - useful for returning tokens of C{L{ZeroOrMore}} and C{L{OneOrMore}} expressions.
-
- Example::
- ident = Word(alphas)
- num = Word(nums)
- term = ident | num
- func = ident + Optional(delimitedList(term))
- print(func.parseString("fn a,b,100")) # -> ['fn', 'a', 'b', '100']
-
- func = ident + Group(Optional(delimitedList(term)))
- print(func.parseString("fn a,b,100")) # -> ['fn', ['a', 'b', '100']]
- """
- def __init__( self, expr ):
- super(Group,self).__init__( expr )
- self.saveAsList = True
-
- def postParse( self, instring, loc, tokenlist ):
- return [ tokenlist ]
-
-class Dict(TokenConverter):
- """
- Converter to return a repetitive expression as a list, but also as a dictionary.
- Each element can also be referenced using the first token in the expression as its key.
- Useful for tabular report scraping when the first column can be used as a item key.
-
- Example::
- data_word = Word(alphas)
- label = data_word + FollowedBy(':')
- attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join))
-
- text = "shape: SQUARE posn: upper left color: light blue texture: burlap"
- attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
-
- # print attributes as plain groups
- print(OneOrMore(attr_expr).parseString(text).dump())
-
- # instead of OneOrMore(expr), parse using Dict(OneOrMore(Group(expr))) - Dict will auto-assign names
- result = Dict(OneOrMore(Group(attr_expr))).parseString(text)
- print(result.dump())
-
- # access named fields as dict entries, or output as dict
- print(result['shape'])
- print(result.asDict())
- prints::
- ['shape', 'SQUARE', 'posn', 'upper left', 'color', 'light blue', 'texture', 'burlap']
-
- [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']]
- - color: light blue
- - posn: upper left
- - shape: SQUARE
- - texture: burlap
- SQUARE
- {'color': 'light blue', 'posn': 'upper left', 'texture': 'burlap', 'shape': 'SQUARE'}
- See more examples at L{ParseResults} of accessing fields by results name.
- """
- def __init__( self, expr ):
- super(Dict,self).__init__( expr )
- self.saveAsList = True
-
- def postParse( self, instring, loc, tokenlist ):
- for i,tok in enumerate(tokenlist):
- if len(tok) == 0:
- continue
- ikey = tok[0]
- if isinstance(ikey,int):
- ikey = _ustr(tok[0]).strip()
- if len(tok)==1:
- tokenlist[ikey] = _ParseResultsWithOffset("",i)
- elif len(tok)==2 and not isinstance(tok[1],ParseResults):
- tokenlist[ikey] = _ParseResultsWithOffset(tok[1],i)
- else:
- dictvalue = tok.copy() #ParseResults(i)
- del dictvalue[0]
- if len(dictvalue)!= 1 or (isinstance(dictvalue,ParseResults) and dictvalue.haskeys()):
- tokenlist[ikey] = _ParseResultsWithOffset(dictvalue,i)
- else:
- tokenlist[ikey] = _ParseResultsWithOffset(dictvalue[0],i)
-
- if self.resultsName:
- return [ tokenlist ]
- else:
- return tokenlist
-
-
-class Suppress(TokenConverter):
- """
- Converter for ignoring the results of a parsed expression.
-
- Example::
- source = "a, b, c,d"
- wd = Word(alphas)
- wd_list1 = wd + ZeroOrMore(',' + wd)
- print(wd_list1.parseString(source))
-
- # often, delimiters that are useful during parsing are just in the
- # way afterward - use Suppress to keep them out of the parsed output
- wd_list2 = wd + ZeroOrMore(Suppress(',') + wd)
- print(wd_list2.parseString(source))
- prints::
- ['a', ',', 'b', ',', 'c', ',', 'd']
- ['a', 'b', 'c', 'd']
- (See also L{delimitedList}.)
- """
- def postParse( self, instring, loc, tokenlist ):
- return []
-
- def suppress( self ):
- return self
-
-
-class OnlyOnce(object):
- """
- Wrapper for parse actions, to ensure they are only called once.
- """
- def __init__(self, methodCall):
- self.callable = _trim_arity(methodCall)
- self.called = False
- def __call__(self,s,l,t):
- if not self.called:
- results = self.callable(s,l,t)
- self.called = True
- return results
- raise ParseException(s,l,"")
- def reset(self):
- self.called = False
-
-def traceParseAction(f):
- """
- Decorator for debugging parse actions.
-
- When the parse action is called, this decorator will print C{">> entering I{method-name}(line:I{current_source_line}, I{parse_location}, I{matched_tokens})".}
- When the parse action completes, the decorator will print C{"<<"} followed by the returned value, or any exception that the parse action raised.
-
- Example::
- wd = Word(alphas)
-
- @traceParseAction
- def remove_duplicate_chars(tokens):
- return ''.join(sorted(set(''.join(tokens))))
-
- wds = OneOrMore(wd).setParseAction(remove_duplicate_chars)
- print(wds.parseString("slkdjs sld sldd sdlf sdljf"))
- prints::
- >>entering remove_duplicate_chars(line: 'slkdjs sld sldd sdlf sdljf', 0, (['slkdjs', 'sld', 'sldd', 'sdlf', 'sdljf'], {}))
- <<leaving remove_duplicate_chars (ret: 'dfjkls')
- ['dfjkls']
- """
- f = _trim_arity(f)
- def z(*paArgs):
- thisFunc = f.__name__
- s,l,t = paArgs[-3:]
- if len(paArgs)>3:
- thisFunc = paArgs[0].__class__.__name__ + '.' + thisFunc
- sys.stderr.write( ">>entering %s(line: '%s', %d, %r)\n" % (thisFunc,line(l,s),l,t) )
- try:
- ret = f(*paArgs)
- except Exception as exc:
- sys.stderr.write( "<<leaving %s (exception: %s)\n" % (thisFunc,exc) )
- raise
- sys.stderr.write( "<<leaving %s (ret: %r)\n" % (thisFunc,ret) )
- return ret
- try:
- z.__name__ = f.__name__
- except AttributeError:
- pass
- return z
-
-#
-# global helpers
-#
-def delimitedList( expr, delim=",", combine=False ):
- """
- Helper to define a delimited list of expressions - the delimiter defaults to ','.
- By default, the list elements and delimiters can have intervening whitespace, and
- comments, but this can be overridden by passing C{combine=True} in the constructor.
- If C{combine} is set to C{True}, the matching tokens are returned as a single token
- string, with the delimiters included; otherwise, the matching tokens are returned
- as a list of tokens, with the delimiters suppressed.
-
- Example::
- delimitedList(Word(alphas)).parseString("aa,bb,cc") # -> ['aa', 'bb', 'cc']
- delimitedList(Word(hexnums), delim=':', combine=True).parseString("AA:BB:CC:DD:EE") # -> ['AA:BB:CC:DD:EE']
- """
- dlName = _ustr(expr)+" ["+_ustr(delim)+" "+_ustr(expr)+"]..."
- if combine:
- return Combine( expr + ZeroOrMore( delim + expr ) ).setName(dlName)
- else:
- return ( expr + ZeroOrMore( Suppress( delim ) + expr ) ).setName(dlName)
-
-def countedArray( expr, intExpr=None ):
- """
- Helper to define a counted list of expressions.
- This helper defines a pattern of the form::
- integer expr expr expr...
- where the leading integer tells how many expr expressions follow.
- The matched tokens returns the array of expr tokens as a list - the leading count token is suppressed.
-
- If C{intExpr} is specified, it should be a pyparsing expression that produces an integer value.
-
- Example::
- countedArray(Word(alphas)).parseString('2 ab cd ef') # -> ['ab', 'cd']
-
- # in this parser, the leading integer value is given in binary,
- # '10' indicating that 2 values are in the array
- binaryConstant = Word('01').setParseAction(lambda t: int(t[0], 2))
- countedArray(Word(alphas), intExpr=binaryConstant).parseString('10 ab cd ef') # -> ['ab', 'cd']
- """
- arrayExpr = Forward()
- def countFieldParseAction(s,l,t):
- n = t[0]
- arrayExpr << (n and Group(And([expr]*n)) or Group(empty))
- return []
- if intExpr is None:
- intExpr = Word(nums).setParseAction(lambda t:int(t[0]))
- else:
- intExpr = intExpr.copy()
- intExpr.setName("arrayLen")
- intExpr.addParseAction(countFieldParseAction, callDuringTry=True)
- return ( intExpr + arrayExpr ).setName('(len) ' + _ustr(expr) + '...')
-
-def _flatten(L):
- ret = []
- for i in L:
- if isinstance(i,list):
- ret.extend(_flatten(i))
- else:
- ret.append(i)
- return ret
-
-def matchPreviousLiteral(expr):
- """
- Helper to define an expression that is indirectly defined from
- the tokens matched in a previous expression, that is, it looks
- for a 'repeat' of a previous expression. For example::
- first = Word(nums)
- second = matchPreviousLiteral(first)
- matchExpr = first + ":" + second
- will match C{"1:1"}, but not C{"1:2"}. Because this matches a
- previous literal, will also match the leading C{"1:1"} in C{"1:10"}.
- If this is not desired, use C{matchPreviousExpr}.
- Do I{not} use with packrat parsing enabled.
- """
- rep = Forward()
- def copyTokenToRepeater(s,l,t):
- if t:
- if len(t) == 1:
- rep << t[0]
- else:
- # flatten t tokens
- tflat = _flatten(t.asList())
- rep << And(Literal(tt) for tt in tflat)
- else:
- rep << Empty()
- expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
- rep.setName('(prev) ' + _ustr(expr))
- return rep
-
-def matchPreviousExpr(expr):
- """
- Helper to define an expression that is indirectly defined from
- the tokens matched in a previous expression, that is, it looks
- for a 'repeat' of a previous expression. For example::
- first = Word(nums)
- second = matchPreviousExpr(first)
- matchExpr = first + ":" + second
- will match C{"1:1"}, but not C{"1:2"}. Because this matches by
- expressions, will I{not} match the leading C{"1:1"} in C{"1:10"};
- the expressions are evaluated first, and then compared, so
- C{"1"} is compared with C{"10"}.
- Do I{not} use with packrat parsing enabled.
- """
- rep = Forward()
- e2 = expr.copy()
- rep <<= e2
- def copyTokenToRepeater(s,l,t):
- matchTokens = _flatten(t.asList())
- def mustMatchTheseTokens(s,l,t):
- theseTokens = _flatten(t.asList())
- if theseTokens != matchTokens:
- raise ParseException("",0,"")
- rep.setParseAction( mustMatchTheseTokens, callDuringTry=True )
- expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
- rep.setName('(prev) ' + _ustr(expr))
- return rep
-
-def _escapeRegexRangeChars(s):
- #~ escape these chars: ^-]
- for c in r"\^-]":
- s = s.replace(c,_bslash+c)
- s = s.replace("\n",r"\n")
- s = s.replace("\t",r"\t")
- return _ustr(s)
-
-def oneOf( strs, caseless=False, useRegex=True ):
- """
- Helper to quickly define a set of alternative Literals, and makes sure to do
- longest-first testing when there is a conflict, regardless of the input order,
- but returns a C{L{MatchFirst}} for best performance.
-
- Parameters:
- - strs - a string of space-delimited literals, or a collection of string literals
- - caseless - (default=C{False}) - treat all literals as caseless
- - useRegex - (default=C{True}) - as an optimization, will generate a Regex
- object; otherwise, will generate a C{MatchFirst} object (if C{caseless=True}, or
- if creating a C{Regex} raises an exception)
-
- Example::
- comp_oper = oneOf("< = > <= >= !=")
- var = Word(alphas)
- number = Word(nums)
- term = var | number
- comparison_expr = term + comp_oper + term
- print(comparison_expr.searchString("B = 12 AA=23 B<=AA AA>12"))
- prints::
- [['B', '=', '12'], ['AA', '=', '23'], ['B', '<=', 'AA'], ['AA', '>', '12']]
- """
- if caseless:
- isequal = ( lambda a,b: a.upper() == b.upper() )
- masks = ( lambda a,b: b.upper().startswith(a.upper()) )
- parseElementClass = CaselessLiteral
- else:
- isequal = ( lambda a,b: a == b )
- masks = ( lambda a,b: b.startswith(a) )
- parseElementClass = Literal
-
- symbols = []
- if isinstance(strs,basestring):
- symbols = strs.split()
- elif isinstance(strs, Iterable):
- symbols = list(strs)
- else:
- warnings.warn("Invalid argument to oneOf, expected string or iterable",
- SyntaxWarning, stacklevel=2)
- if not symbols:
- return NoMatch()
-
- i = 0
- while i < len(symbols)-1:
- cur = symbols[i]
- for j,other in enumerate(symbols[i+1:]):
- if ( isequal(other, cur) ):
- del symbols[i+j+1]
- break
- elif ( masks(cur, other) ):
- del symbols[i+j+1]
- symbols.insert(i,other)
- cur = other
- break
- else:
- i += 1
-
- if not caseless and useRegex:
- #~ print (strs,"->", "|".join( [ _escapeRegexChars(sym) for sym in symbols] ))
- try:
- if len(symbols)==len("".join(symbols)):
- return Regex( "[%s]" % "".join(_escapeRegexRangeChars(sym) for sym in symbols) ).setName(' | '.join(symbols))
- else:
- return Regex( "|".join(re.escape(sym) for sym in symbols) ).setName(' | '.join(symbols))
- except Exception:
- warnings.warn("Exception creating Regex for oneOf, building MatchFirst",
- SyntaxWarning, stacklevel=2)
-
-
- # last resort, just use MatchFirst
- return MatchFirst(parseElementClass(sym) for sym in symbols).setName(' | '.join(symbols))
-
-def dictOf( key, value ):
- """
- Helper to easily and clearly define a dictionary by specifying the respective patterns
- for the key and value. Takes care of defining the C{L{Dict}}, C{L{ZeroOrMore}}, and C{L{Group}} tokens
- in the proper order. The key pattern can include delimiting markers or punctuation,
- as long as they are suppressed, thereby leaving the significant key text. The value
- pattern can include named results, so that the C{Dict} results can include named token
- fields.
-
- Example::
- text = "shape: SQUARE posn: upper left color: light blue texture: burlap"
- attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
- print(OneOrMore(attr_expr).parseString(text).dump())
-
- attr_label = label
- attr_value = Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)
-
- # similar to Dict, but simpler call format
- result = dictOf(attr_label, attr_value).parseString(text)
- print(result.dump())
- print(result['shape'])
- print(result.shape) # object attribute access works too
- print(result.asDict())
- prints::
- [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']]
- - color: light blue
- - posn: upper left
- - shape: SQUARE
- - texture: burlap
- SQUARE
- SQUARE
- {'color': 'light blue', 'shape': 'SQUARE', 'posn': 'upper left', 'texture': 'burlap'}
- """
- return Dict( ZeroOrMore( Group ( key + value ) ) )
-
-def originalTextFor(expr, asString=True):
- """
- Helper to return the original, untokenized text for a given expression. Useful to
- restore the parsed fields of an HTML start tag into the raw tag text itself, or to
- revert separate tokens with intervening whitespace back to the original matching
- input text. By default, returns astring containing the original parsed text.
-
- If the optional C{asString} argument is passed as C{False}, then the return value is a
- C{L{ParseResults}} containing any results names that were originally matched, and a
- single token containing the original matched text from the input string. So if
- the expression passed to C{L{originalTextFor}} contains expressions with defined
- results names, you must set C{asString} to C{False} if you want to preserve those
- results name values.
-
- Example::
- src = "this is test <b> bold <i>text</i> </b> normal text "
- for tag in ("b","i"):
- opener,closer = makeHTMLTags(tag)
- patt = originalTextFor(opener + SkipTo(closer) + closer)
- print(patt.searchString(src)[0])
- prints::
- ['<b> bold <i>text</i> </b>']
- ['<i>text</i>']
- """
- locMarker = Empty().setParseAction(lambda s,loc,t: loc)
- endlocMarker = locMarker.copy()
- endlocMarker.callPreparse = False
- matchExpr = locMarker("_original_start") + expr + endlocMarker("_original_end")
- if asString:
- extractText = lambda s,l,t: s[t._original_start:t._original_end]
- else:
- def extractText(s,l,t):
- t[:] = [s[t.pop('_original_start'):t.pop('_original_end')]]
- matchExpr.setParseAction(extractText)
- matchExpr.ignoreExprs = expr.ignoreExprs
- return matchExpr
-
-def ungroup(expr):
- """
- Helper to undo pyparsing's default grouping of And expressions, even
- if all but one are non-empty.
- """
- return TokenConverter(expr).setParseAction(lambda t:t[0])
-
-def locatedExpr(expr):
- """
- Helper to decorate a returned token with its starting and ending locations in the input string.
- This helper adds the following results names:
- - locn_start = location where matched expression begins
- - locn_end = location where matched expression ends
- - value = the actual parsed results
-
- Be careful if the input text contains C{<TAB>} characters, you may want to call
- C{L{ParserElement.parseWithTabs}}
-
- Example::
- wd = Word(alphas)
- for match in locatedExpr(wd).searchString("ljsdf123lksdjjf123lkkjj1222"):
- print(match)
- prints::
- [[0, 'ljsdf', 5]]
- [[8, 'lksdjjf', 15]]
- [[18, 'lkkjj', 23]]
- """
- locator = Empty().setParseAction(lambda s,l,t: l)
- return Group(locator("locn_start") + expr("value") + locator.copy().leaveWhitespace()("locn_end"))
-
-
-# convenience constants for positional expressions
-empty = Empty().setName("empty")
-lineStart = LineStart().setName("lineStart")
-lineEnd = LineEnd().setName("lineEnd")
-stringStart = StringStart().setName("stringStart")
-stringEnd = StringEnd().setName("stringEnd")
-
-_escapedPunc = Word( _bslash, r"\[]-*.$+^?()~ ", exact=2 ).setParseAction(lambda s,l,t:t[0][1])
-_escapedHexChar = Regex(r"\\0?[xX][0-9a-fA-F]+").setParseAction(lambda s,l,t:unichr(int(t[0].lstrip(r'\0x'),16)))
-_escapedOctChar = Regex(r"\\0[0-7]+").setParseAction(lambda s,l,t:unichr(int(t[0][1:],8)))
-_singleChar = _escapedPunc | _escapedHexChar | _escapedOctChar | CharsNotIn(r'\]', exact=1)
-_charRange = Group(_singleChar + Suppress("-") + _singleChar)
-_reBracketExpr = Literal("[") + Optional("^").setResultsName("negate") + Group( OneOrMore( _charRange | _singleChar ) ).setResultsName("body") + "]"
-
-def srange(s):
- r"""
- Helper to easily define string ranges for use in Word construction. Borrows
- syntax from regexp '[]' string range definitions::
- srange("[0-9]") -> "0123456789"
- srange("[a-z]") -> "abcdefghijklmnopqrstuvwxyz"
- srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_"
- The input string must be enclosed in []'s, and the returned string is the expanded
- character set joined into a single string.
- The values enclosed in the []'s may be:
- - a single character
- - an escaped character with a leading backslash (such as C{\-} or C{\]})
- - an escaped hex character with a leading C{'\x'} (C{\x21}, which is a C{'!'} character)
- (C{\0x##} is also supported for backwards compatibility)
- - an escaped octal character with a leading C{'\0'} (C{\041}, which is a C{'!'} character)
- - a range of any of the above, separated by a dash (C{'a-z'}, etc.)
- - any combination of the above (C{'aeiouy'}, C{'a-zA-Z0-9_$'}, etc.)
- """
- _expanded = lambda p: p if not isinstance(p,ParseResults) else ''.join(unichr(c) for c in range(ord(p[0]),ord(p[1])+1))
- try:
- return "".join(_expanded(part) for part in _reBracketExpr.parseString(s).body)
- except Exception:
- return ""
-
-def matchOnlyAtCol(n):
- """
- Helper method for defining parse actions that require matching at a specific
- column in the input text.
- """
- def verifyCol(strg,locn,toks):
- if col(locn,strg) != n:
- raise ParseException(strg,locn,"matched token not at column %d" % n)
- return verifyCol
-
-def replaceWith(replStr):
- """
- Helper method for common parse actions that simply return a literal value. Especially
- useful when used with C{L{transformString<ParserElement.transformString>}()}.
-
- Example::
- num = Word(nums).setParseAction(lambda toks: int(toks[0]))
- na = oneOf("N/A NA").setParseAction(replaceWith(math.nan))
- term = na | num
-
- OneOrMore(term).parseString("324 234 N/A 234") # -> [324, 234, nan, 234]
- """
- return lambda s,l,t: [replStr]
-
-def removeQuotes(s,l,t):
- """
- Helper parse action for removing quotation marks from parsed quoted strings.
-
- Example::
- # by default, quotation marks are included in parsed results
- quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["'Now is the Winter of our Discontent'"]
-
- # use removeQuotes to strip quotation marks from parsed results
- quotedString.setParseAction(removeQuotes)
- quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["Now is the Winter of our Discontent"]
- """
- return t[0][1:-1]
-
-def tokenMap(func, *args):
- """
- Helper to define a parse action by mapping a function to all elements of a ParseResults list.If any additional
- args are passed, they are forwarded to the given function as additional arguments after
- the token, as in C{hex_integer = Word(hexnums).setParseAction(tokenMap(int, 16))}, which will convert the
- parsed data to an integer using base 16.
-
- Example (compare the last to example in L{ParserElement.transformString}::
- hex_ints = OneOrMore(Word(hexnums)).setParseAction(tokenMap(int, 16))
- hex_ints.runTests('''
- 00 11 22 aa FF 0a 0d 1a
- ''')
-
- upperword = Word(alphas).setParseAction(tokenMap(str.upper))
- OneOrMore(upperword).runTests('''
- my kingdom for a horse
- ''')
-
- wd = Word(alphas).setParseAction(tokenMap(str.title))
- OneOrMore(wd).setParseAction(' '.join).runTests('''
- now is the winter of our discontent made glorious summer by this sun of york
- ''')
- prints::
- 00 11 22 aa FF 0a 0d 1a
- [0, 17, 34, 170, 255, 10, 13, 26]
-
- my kingdom for a horse
- ['MY', 'KINGDOM', 'FOR', 'A', 'HORSE']
-
- now is the winter of our discontent made glorious summer by this sun of york
- ['Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York']
- """
- def pa(s,l,t):
- return [func(tokn, *args) for tokn in t]
-
- try:
- func_name = getattr(func, '__name__',
- getattr(func, '__class__').__name__)
- except Exception:
- func_name = str(func)
- pa.__name__ = func_name
-
- return pa
-
-upcaseTokens = tokenMap(lambda t: _ustr(t).upper())
-"""(Deprecated) Helper parse action to convert tokens to upper case. Deprecated in favor of L{pyparsing_common.upcaseTokens}"""
-
-downcaseTokens = tokenMap(lambda t: _ustr(t).lower())
-"""(Deprecated) Helper parse action to convert tokens to lower case. Deprecated in favor of L{pyparsing_common.downcaseTokens}"""
-
-def _makeTags(tagStr, xml):
- """Internal helper to construct opening and closing tag expressions, given a tag name"""
- if isinstance(tagStr,basestring):
- resname = tagStr
- tagStr = Keyword(tagStr, caseless=not xml)
- else:
- resname = tagStr.name
-
- tagAttrName = Word(alphas,alphanums+"_-:")
- if (xml):
- tagAttrValue = dblQuotedString.copy().setParseAction( removeQuotes )
- openTag = Suppress("<") + tagStr("tag") + \
- Dict(ZeroOrMore(Group( tagAttrName + Suppress("=") + tagAttrValue ))) + \
- Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">")
- else:
- printablesLessRAbrack = "".join(c for c in printables if c not in ">")
- tagAttrValue = quotedString.copy().setParseAction( removeQuotes ) | Word(printablesLessRAbrack)
- openTag = Suppress("<") + tagStr("tag") + \
- Dict(ZeroOrMore(Group( tagAttrName.setParseAction(downcaseTokens) + \
- Optional( Suppress("=") + tagAttrValue ) ))) + \
- Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">")
- closeTag = Combine(_L("</") + tagStr + ">")
-
- openTag = openTag.setResultsName("start"+"".join(resname.replace(":"," ").title().split())).setName("<%s>" % resname)
- closeTag = closeTag.setResultsName("end"+"".join(resname.replace(":"," ").title().split())).setName("</%s>" % resname)
- openTag.tag = resname
- closeTag.tag = resname
- return openTag, closeTag
-
-def makeHTMLTags(tagStr):
- """
- Helper to construct opening and closing tag expressions for HTML, given a tag name. Matches
- tags in either upper or lower case, attributes with namespaces and with quoted or unquoted values.
-
- Example::
- text = '<td>More info at the <a href="http://pyparsing.wikispaces.com">pyparsing</a> wiki page</td>'
- # makeHTMLTags returns pyparsing expressions for the opening and closing tags as a 2-tuple
- a,a_end = makeHTMLTags("A")
- link_expr = a + SkipTo(a_end)("link_text") + a_end
-
- for link in link_expr.searchString(text):
- # attributes in the <A> tag (like "href" shown here) are also accessible as named results
- print(link.link_text, '->', link.href)
- prints::
- pyparsing -> http://pyparsing.wikispaces.com
- """
- return _makeTags( tagStr, False )
-
-def makeXMLTags(tagStr):
- """
- Helper to construct opening and closing tag expressions for XML, given a tag name. Matches
- tags only in the given upper/lower case.
-
- Example: similar to L{makeHTMLTags}
- """
- return _makeTags( tagStr, True )
-
-def withAttribute(*args,**attrDict):
- """
- Helper to create a validating parse action to be used with start tags created
- with C{L{makeXMLTags}} or C{L{makeHTMLTags}}. Use C{withAttribute} to qualify a starting tag
- with a required attribute value, to avoid false matches on common tags such as
- C{<TD>} or C{<DIV>}.
-
- Call C{withAttribute} with a series of attribute names and values. Specify the list
- of filter attributes names and values as:
- - keyword arguments, as in C{(align="right")}, or
- - as an explicit dict with C{**} operator, when an attribute name is also a Python
- reserved word, as in C{**{"class":"Customer", "align":"right"}}
- - a list of name-value tuples, as in ( ("ns1:class", "Customer"), ("ns2:align","right") )
- For attribute names with a namespace prefix, you must use the second form. Attribute
- names are matched insensitive to upper/lower case.
-
- If just testing for C{class} (with or without a namespace), use C{L{withClass}}.
-
- To verify that the attribute exists, but without specifying a value, pass
- C{withAttribute.ANY_VALUE} as the value.
-
- Example::
- html = '''
- <div>
- Some text
- <div type="grid">1 4 0 1 0</div>
- <div type="graph">1,3 2,3 1,1</div>
- <div>this has no type</div>
- </div>
-
- '''
- div,div_end = makeHTMLTags("div")
-
- # only match div tag having a type attribute with value "grid"
- div_grid = div().setParseAction(withAttribute(type="grid"))
- grid_expr = div_grid + SkipTo(div | div_end)("body")
- for grid_header in grid_expr.searchString(html):
- print(grid_header.body)
-
- # construct a match with any div tag having a type attribute, regardless of the value
- div_any_type = div().setParseAction(withAttribute(type=withAttribute.ANY_VALUE))
- div_expr = div_any_type + SkipTo(div | div_end)("body")
- for div_header in div_expr.searchString(html):
- print(div_header.body)
- prints::
- 1 4 0 1 0
-
- 1 4 0 1 0
- 1,3 2,3 1,1
- """
- if args:
- attrs = args[:]
- else:
- attrs = attrDict.items()
- attrs = [(k,v) for k,v in attrs]
- def pa(s,l,tokens):
- for attrName,attrValue in attrs:
- if attrName not in tokens:
- raise ParseException(s,l,"no matching attribute " + attrName)
- if attrValue != withAttribute.ANY_VALUE and tokens[attrName] != attrValue:
- raise ParseException(s,l,"attribute '%s' has value '%s', must be '%s'" %
- (attrName, tokens[attrName], attrValue))
- return pa
-withAttribute.ANY_VALUE = object()
-
-def withClass(classname, namespace=''):
- """
- Simplified version of C{L{withAttribute}} when matching on a div class - made
- difficult because C{class} is a reserved word in Python.
-
- Example::
- html = '''
- <div>
- Some text
- <div class="grid">1 4 0 1 0</div>
- <div class="graph">1,3 2,3 1,1</div>
- <div>this &lt;div&gt; has no class</div>
- </div>
-
- '''
- div,div_end = makeHTMLTags("div")
- div_grid = div().setParseAction(withClass("grid"))
-
- grid_expr = div_grid + SkipTo(div | div_end)("body")
- for grid_header in grid_expr.searchString(html):
- print(grid_header.body)
-
- div_any_type = div().setParseAction(withClass(withAttribute.ANY_VALUE))
- div_expr = div_any_type + SkipTo(div | div_end)("body")
- for div_header in div_expr.searchString(html):
- print(div_header.body)
- prints::
- 1 4 0 1 0
-
- 1 4 0 1 0
- 1,3 2,3 1,1
- """
- classattr = "%s:class" % namespace if namespace else "class"
- return withAttribute(**{classattr : classname})
-
-opAssoc = _Constants()
-opAssoc.LEFT = object()
-opAssoc.RIGHT = object()
-
-def infixNotation( baseExpr, opList, lpar=Suppress('('), rpar=Suppress(')') ):
- """
- Helper method for constructing grammars of expressions made up of
- operators working in a precedence hierarchy. Operators may be unary or
- binary, left- or right-associative. Parse actions can also be attached
- to operator expressions. The generated parser will also recognize the use
- of parentheses to override operator precedences (see example below).
-
- Note: if you define a deep operator list, you may see performance issues
- when using infixNotation. See L{ParserElement.enablePackrat} for a
- mechanism to potentially improve your parser performance.
-
- Parameters:
- - baseExpr - expression representing the most basic element for the nested
- - opList - list of tuples, one for each operator precedence level in the
- expression grammar; each tuple is of the form
- (opExpr, numTerms, rightLeftAssoc, parseAction), where:
- - opExpr is the pyparsing expression for the operator;
- may also be a string, which will be converted to a Literal;
- if numTerms is 3, opExpr is a tuple of two expressions, for the
- two operators separating the 3 terms
- - numTerms is the number of terms for this operator (must
- be 1, 2, or 3)
- - rightLeftAssoc is the indicator whether the operator is
- right or left associative, using the pyparsing-defined
- constants C{opAssoc.RIGHT} and C{opAssoc.LEFT}.
- - parseAction is the parse action to be associated with
- expressions matching this operator expression (the
- parse action tuple member may be omitted); if the parse action
- is passed a tuple or list of functions, this is equivalent to
- calling C{setParseAction(*fn)} (L{ParserElement.setParseAction})
- - lpar - expression for matching left-parentheses (default=C{Suppress('(')})
- - rpar - expression for matching right-parentheses (default=C{Suppress(')')})
-
- Example::
- # simple example of four-function arithmetic with ints and variable names
- integer = pyparsing_common.signed_integer
- varname = pyparsing_common.identifier
-
- arith_expr = infixNotation(integer | varname,
- [
- ('-', 1, opAssoc.RIGHT),
- (oneOf('* /'), 2, opAssoc.LEFT),
- (oneOf('+ -'), 2, opAssoc.LEFT),
- ])
-
- arith_expr.runTests('''
- 5+3*6
- (5+3)*6
- -2--11
- ''', fullDump=False)
- prints::
- 5+3*6
- [[5, '+', [3, '*', 6]]]
-
- (5+3)*6
- [[[5, '+', 3], '*', 6]]
-
- -2--11
- [[['-', 2], '-', ['-', 11]]]
- """
- ret = Forward()
- lastExpr = baseExpr | ( lpar + ret + rpar )
- for i,operDef in enumerate(opList):
- opExpr,arity,rightLeftAssoc,pa = (operDef + (None,))[:4]
- termName = "%s term" % opExpr if arity < 3 else "%s%s term" % opExpr
- if arity == 3:
- if opExpr is None or len(opExpr) != 2:
- raise ValueError("if numterms=3, opExpr must be a tuple or list of two expressions")
- opExpr1, opExpr2 = opExpr
- thisExpr = Forward().setName(termName)
- if rightLeftAssoc == opAssoc.LEFT:
- if arity == 1:
- matchExpr = FollowedBy(lastExpr + opExpr) + Group( lastExpr + OneOrMore( opExpr ) )
- elif arity == 2:
- if opExpr is not None:
- matchExpr = FollowedBy(lastExpr + opExpr + lastExpr) + Group( lastExpr + OneOrMore( opExpr + lastExpr ) )
- else:
- matchExpr = FollowedBy(lastExpr+lastExpr) + Group( lastExpr + OneOrMore(lastExpr) )
- elif arity == 3:
- matchExpr = FollowedBy(lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr) + \
- Group( lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr )
- else:
- raise ValueError("operator must be unary (1), binary (2), or ternary (3)")
- elif rightLeftAssoc == opAssoc.RIGHT:
- if arity == 1:
- # try to avoid LR with this extra test
- if not isinstance(opExpr, Optional):
- opExpr = Optional(opExpr)
- matchExpr = FollowedBy(opExpr.expr + thisExpr) + Group( opExpr + thisExpr )
- elif arity == 2:
- if opExpr is not None:
- matchExpr = FollowedBy(lastExpr + opExpr + thisExpr) + Group( lastExpr + OneOrMore( opExpr + thisExpr ) )
- else:
- matchExpr = FollowedBy(lastExpr + thisExpr) + Group( lastExpr + OneOrMore( thisExpr ) )
- elif arity == 3:
- matchExpr = FollowedBy(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr) + \
- Group( lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr )
- else:
- raise ValueError("operator must be unary (1), binary (2), or ternary (3)")
- else:
- raise ValueError("operator must indicate right or left associativity")
- if pa:
- if isinstance(pa, (tuple, list)):
- matchExpr.setParseAction(*pa)
- else:
- matchExpr.setParseAction(pa)
- thisExpr <<= ( matchExpr.setName(termName) | lastExpr )
- lastExpr = thisExpr
- ret <<= lastExpr
- return ret
-
-operatorPrecedence = infixNotation
-"""(Deprecated) Former name of C{L{infixNotation}}, will be dropped in a future release."""
-
-dblQuotedString = Combine(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*')+'"').setName("string enclosed in double quotes")
-sglQuotedString = Combine(Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*")+"'").setName("string enclosed in single quotes")
-quotedString = Combine(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*')+'"'|
- Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*")+"'").setName("quotedString using single or double quotes")
-unicodeString = Combine(_L('u') + quotedString.copy()).setName("unicode string literal")
-
-def nestedExpr(opener="(", closer=")", content=None, ignoreExpr=quotedString.copy()):
- """
- Helper method for defining nested lists enclosed in opening and closing
- delimiters ("(" and ")" are the default).
-
- Parameters:
- - opener - opening character for a nested list (default=C{"("}); can also be a pyparsing expression
- - closer - closing character for a nested list (default=C{")"}); can also be a pyparsing expression
- - content - expression for items within the nested lists (default=C{None})
- - ignoreExpr - expression for ignoring opening and closing delimiters (default=C{quotedString})
-
- If an expression is not provided for the content argument, the nested
- expression will capture all whitespace-delimited content between delimiters
- as a list of separate values.
-
- Use the C{ignoreExpr} argument to define expressions that may contain
- opening or closing characters that should not be treated as opening
- or closing characters for nesting, such as quotedString or a comment
- expression. Specify multiple expressions using an C{L{Or}} or C{L{MatchFirst}}.
- The default is L{quotedString}, but if no expressions are to be ignored,
- then pass C{None} for this argument.
-
- Example::
- data_type = oneOf("void int short long char float double")
- decl_data_type = Combine(data_type + Optional(Word('*')))
- ident = Word(alphas+'_', alphanums+'_')
- number = pyparsing_common.number
- arg = Group(decl_data_type + ident)
- LPAR,RPAR = map(Suppress, "()")
-
- code_body = nestedExpr('{', '}', ignoreExpr=(quotedString | cStyleComment))
-
- c_function = (decl_data_type("type")
- + ident("name")
- + LPAR + Optional(delimitedList(arg), [])("args") + RPAR
- + code_body("body"))
- c_function.ignore(cStyleComment)
-
- source_code = '''
- int is_odd(int x) {
- return (x%2);
- }
-
- int dec_to_hex(char hchar) {
- if (hchar >= '0' && hchar <= '9') {
- return (ord(hchar)-ord('0'));
- } else {
- return (10+ord(hchar)-ord('A'));
- }
- }
- '''
- for func in c_function.searchString(source_code):
- print("%(name)s (%(type)s) args: %(args)s" % func)
-
- prints::
- is_odd (int) args: [['int', 'x']]
- dec_to_hex (int) args: [['char', 'hchar']]
- """
- if opener == closer:
- raise ValueError("opening and closing strings cannot be the same")
- if content is None:
- if isinstance(opener,basestring) and isinstance(closer,basestring):
- if len(opener) == 1 and len(closer)==1:
- if ignoreExpr is not None:
- content = (Combine(OneOrMore(~ignoreExpr +
- CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS,exact=1))
- ).setParseAction(lambda t:t[0].strip()))
- else:
- content = (empty.copy()+CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS
- ).setParseAction(lambda t:t[0].strip()))
- else:
- if ignoreExpr is not None:
- content = (Combine(OneOrMore(~ignoreExpr +
- ~Literal(opener) + ~Literal(closer) +
- CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1))
- ).setParseAction(lambda t:t[0].strip()))
- else:
- content = (Combine(OneOrMore(~Literal(opener) + ~Literal(closer) +
- CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1))
- ).setParseAction(lambda t:t[0].strip()))
- else:
- raise ValueError("opening and closing arguments must be strings if no content expression is given")
- ret = Forward()
- if ignoreExpr is not None:
- ret <<= Group( Suppress(opener) + ZeroOrMore( ignoreExpr | ret | content ) + Suppress(closer) )
- else:
- ret <<= Group( Suppress(opener) + ZeroOrMore( ret | content ) + Suppress(closer) )
- ret.setName('nested %s%s expression' % (opener,closer))
- return ret
-
-def indentedBlock(blockStatementExpr, indentStack, indent=True):
- """
- Helper method for defining space-delimited indentation blocks, such as
- those used to define block statements in Python source code.
-
- Parameters:
- - blockStatementExpr - expression defining syntax of statement that
- is repeated within the indented block
- - indentStack - list created by caller to manage indentation stack
- (multiple statementWithIndentedBlock expressions within a single grammar
- should share a common indentStack)
- - indent - boolean indicating whether block must be indented beyond the
- the current level; set to False for block of left-most statements
- (default=C{True})
-
- A valid block must contain at least one C{blockStatement}.
-
- Example::
- data = '''
- def A(z):
- A1
- B = 100
- G = A2
- A2
- A3
- B
- def BB(a,b,c):
- BB1
- def BBA():
- bba1
- bba2
- bba3
- C
- D
- def spam(x,y):
- def eggs(z):
- pass
- '''
-
-
- indentStack = [1]
- stmt = Forward()
-
- identifier = Word(alphas, alphanums)
- funcDecl = ("def" + identifier + Group( "(" + Optional( delimitedList(identifier) ) + ")" ) + ":")
- func_body = indentedBlock(stmt, indentStack)
- funcDef = Group( funcDecl + func_body )
-
- rvalue = Forward()
- funcCall = Group(identifier + "(" + Optional(delimitedList(rvalue)) + ")")
- rvalue << (funcCall | identifier | Word(nums))
- assignment = Group(identifier + "=" + rvalue)
- stmt << ( funcDef | assignment | identifier )
-
- module_body = OneOrMore(stmt)
-
- parseTree = module_body.parseString(data)
- parseTree.pprint()
- prints::
- [['def',
- 'A',
- ['(', 'z', ')'],
- ':',
- [['A1'], [['B', '=', '100']], [['G', '=', 'A2']], ['A2'], ['A3']]],
- 'B',
- ['def',
- 'BB',
- ['(', 'a', 'b', 'c', ')'],
- ':',
- [['BB1'], [['def', 'BBA', ['(', ')'], ':', [['bba1'], ['bba2'], ['bba3']]]]]],
- 'C',
- 'D',
- ['def',
- 'spam',
- ['(', 'x', 'y', ')'],
- ':',
- [[['def', 'eggs', ['(', 'z', ')'], ':', [['pass']]]]]]]
- """
- def checkPeerIndent(s,l,t):
- if l >= len(s): return
- curCol = col(l,s)
- if curCol != indentStack[-1]:
- if curCol > indentStack[-1]:
- raise ParseFatalException(s,l,"illegal nesting")
- raise ParseException(s,l,"not a peer entry")
-
- def checkSubIndent(s,l,t):
- curCol = col(l,s)
- if curCol > indentStack[-1]:
- indentStack.append( curCol )
- else:
- raise ParseException(s,l,"not a subentry")
-
- def checkUnindent(s,l,t):
- if l >= len(s): return
- curCol = col(l,s)
- if not(indentStack and curCol < indentStack[-1] and curCol <= indentStack[-2]):
- raise ParseException(s,l,"not an unindent")
- indentStack.pop()
-
- NL = OneOrMore(LineEnd().setWhitespaceChars("\t ").suppress())
- INDENT = (Empty() + Empty().setParseAction(checkSubIndent)).setName('INDENT')
- PEER = Empty().setParseAction(checkPeerIndent).setName('')
- UNDENT = Empty().setParseAction(checkUnindent).setName('UNINDENT')
- if indent:
- smExpr = Group( Optional(NL) +
- #~ FollowedBy(blockStatementExpr) +
- INDENT + (OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) + UNDENT)
- else:
- smExpr = Group( Optional(NL) +
- (OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) )
- blockStatementExpr.ignore(_bslash + LineEnd())
- return smExpr.setName('indented block')
-
-alphas8bit = srange(r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]")
-punc8bit = srange(r"[\0xa1-\0xbf\0xd7\0xf7]")
-
-anyOpenTag,anyCloseTag = makeHTMLTags(Word(alphas,alphanums+"_:").setName('any tag'))
-_htmlEntityMap = dict(zip("gt lt amp nbsp quot apos".split(),'><& "\''))
-commonHTMLEntity = Regex('&(?P<entity>' + '|'.join(_htmlEntityMap.keys()) +");").setName("common HTML entity")
-def replaceHTMLEntity(t):
- """Helper parser action to replace common HTML entities with their special characters"""
- return _htmlEntityMap.get(t.entity)
-
-# it's easy to get these comment structures wrong - they're very common, so may as well make them available
-cStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/').setName("C style comment")
-"Comment of the form C{/* ... */}"
-
-htmlComment = Regex(r"<!--[\s\S]*?-->").setName("HTML comment")
-"Comment of the form C{<!-- ... -->}"
-
-restOfLine = Regex(r".*").leaveWhitespace().setName("rest of line")
-dblSlashComment = Regex(r"//(?:\\\n|[^\n])*").setName("// comment")
-"Comment of the form C{// ... (to end of line)}"
-
-cppStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/'| dblSlashComment).setName("C++ style comment")
-"Comment of either form C{L{cStyleComment}} or C{L{dblSlashComment}}"
-
-javaStyleComment = cppStyleComment
-"Same as C{L{cppStyleComment}}"
-
-pythonStyleComment = Regex(r"#.*").setName("Python style comment")
-"Comment of the form C{# ... (to end of line)}"
-
-_commasepitem = Combine(OneOrMore(Word(printables, excludeChars=',') +
- Optional( Word(" \t") +
- ~Literal(",") + ~LineEnd() ) ) ).streamline().setName("commaItem")
-commaSeparatedList = delimitedList( Optional( quotedString.copy() | _commasepitem, default="") ).setName("commaSeparatedList")
-"""(Deprecated) Predefined expression of 1 or more printable words or quoted strings, separated by commas.
- This expression is deprecated in favor of L{pyparsing_common.comma_separated_list}."""
-
-# some other useful expressions - using lower-case class name since we are really using this as a namespace
-class pyparsing_common:
- """
- Here are some common low-level expressions that may be useful in jump-starting parser development:
- - numeric forms (L{integers<integer>}, L{reals<real>}, L{scientific notation<sci_real>})
- - common L{programming identifiers<identifier>}
- - network addresses (L{MAC<mac_address>}, L{IPv4<ipv4_address>}, L{IPv6<ipv6_address>})
- - ISO8601 L{dates<iso8601_date>} and L{datetime<iso8601_datetime>}
- - L{UUID<uuid>}
- - L{comma-separated list<comma_separated_list>}
- Parse actions:
- - C{L{convertToInteger}}
- - C{L{convertToFloat}}
- - C{L{convertToDate}}
- - C{L{convertToDatetime}}
- - C{L{stripHTMLTags}}
- - C{L{upcaseTokens}}
- - C{L{downcaseTokens}}
-
- Example::
- pyparsing_common.number.runTests('''
- # any int or real number, returned as the appropriate type
- 100
- -100
- +100
- 3.14159
- 6.02e23
- 1e-12
- ''')
-
- pyparsing_common.fnumber.runTests('''
- # any int or real number, returned as float
- 100
- -100
- +100
- 3.14159
- 6.02e23
- 1e-12
- ''')
-
- pyparsing_common.hex_integer.runTests('''
- # hex numbers
- 100
- FF
- ''')
-
- pyparsing_common.fraction.runTests('''
- # fractions
- 1/2
- -3/4
- ''')
-
- pyparsing_common.mixed_integer.runTests('''
- # mixed fractions
- 1
- 1/2
- -3/4
- 1-3/4
- ''')
-
- import uuid
- pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID))
- pyparsing_common.uuid.runTests('''
- # uuid
- 12345678-1234-5678-1234-567812345678
- ''')
- prints::
- # any int or real number, returned as the appropriate type
- 100
- [100]
-
- -100
- [-100]
-
- +100
- [100]
-
- 3.14159
- [3.14159]
-
- 6.02e23
- [6.02e+23]
-
- 1e-12
- [1e-12]
-
- # any int or real number, returned as float
- 100
- [100.0]
-
- -100
- [-100.0]
-
- +100
- [100.0]
-
- 3.14159
- [3.14159]
-
- 6.02e23
- [6.02e+23]
-
- 1e-12
- [1e-12]
-
- # hex numbers
- 100
- [256]
-
- FF
- [255]
-
- # fractions
- 1/2
- [0.5]
-
- -3/4
- [-0.75]
-
- # mixed fractions
- 1
- [1]
-
- 1/2
- [0.5]
-
- -3/4
- [-0.75]
-
- 1-3/4
- [1.75]
-
- # uuid
- 12345678-1234-5678-1234-567812345678
- [UUID('12345678-1234-5678-1234-567812345678')]
- """
-
- convertToInteger = tokenMap(int)
- """
- Parse action for converting parsed integers to Python int
- """
-
- convertToFloat = tokenMap(float)
- """
- Parse action for converting parsed numbers to Python float
- """
-
- integer = Word(nums).setName("integer").setParseAction(convertToInteger)
- """expression that parses an unsigned integer, returns an int"""
-
- hex_integer = Word(hexnums).setName("hex integer").setParseAction(tokenMap(int,16))
- """expression that parses a hexadecimal integer, returns an int"""
-
- signed_integer = Regex(r'[+-]?\d+').setName("signed integer").setParseAction(convertToInteger)
- """expression that parses an integer with optional leading sign, returns an int"""
-
- fraction = (signed_integer().setParseAction(convertToFloat) + '/' + signed_integer().setParseAction(convertToFloat)).setName("fraction")
- """fractional expression of an integer divided by an integer, returns a float"""
- fraction.addParseAction(lambda t: t[0]/t[-1])
-
- mixed_integer = (fraction | signed_integer + Optional(Optional('-').suppress() + fraction)).setName("fraction or mixed integer-fraction")
- """mixed integer of the form 'integer - fraction', with optional leading integer, returns float"""
- mixed_integer.addParseAction(sum)
-
- real = Regex(r'[+-]?\d+\.\d*').setName("real number").setParseAction(convertToFloat)
- """expression that parses a floating point number and returns a float"""
-
- sci_real = Regex(r'[+-]?\d+([eE][+-]?\d+|\.\d*([eE][+-]?\d+)?)').setName("real number with scientific notation").setParseAction(convertToFloat)
- """expression that parses a floating point number with optional scientific notation and returns a float"""
-
- # streamlining this expression makes the docs nicer-looking
- number = (sci_real | real | signed_integer).streamline()
- """any numeric expression, returns the corresponding Python type"""
-
- fnumber = Regex(r'[+-]?\d+\.?\d*([eE][+-]?\d+)?').setName("fnumber").setParseAction(convertToFloat)
- """any int or real number, returned as float"""
-
- identifier = Word(alphas+'_', alphanums+'_').setName("identifier")
- """typical code identifier (leading alpha or '_', followed by 0 or more alphas, nums, or '_')"""
-
- ipv4_address = Regex(r'(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})(\.(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})){3}').setName("IPv4 address")
- "IPv4 address (C{0.0.0.0 - 255.255.255.255})"
-
- _ipv6_part = Regex(r'[0-9a-fA-F]{1,4}').setName("hex_integer")
- _full_ipv6_address = (_ipv6_part + (':' + _ipv6_part)*7).setName("full IPv6 address")
- _short_ipv6_address = (Optional(_ipv6_part + (':' + _ipv6_part)*(0,6)) + "::" + Optional(_ipv6_part + (':' + _ipv6_part)*(0,6))).setName("short IPv6 address")
- _short_ipv6_address.addCondition(lambda t: sum(1 for tt in t if pyparsing_common._ipv6_part.matches(tt)) < 8)
- _mixed_ipv6_address = ("::ffff:" + ipv4_address).setName("mixed IPv6 address")
- ipv6_address = Combine((_full_ipv6_address | _mixed_ipv6_address | _short_ipv6_address).setName("IPv6 address")).setName("IPv6 address")
- "IPv6 address (long, short, or mixed form)"
-
- mac_address = Regex(r'[0-9a-fA-F]{2}([:.-])[0-9a-fA-F]{2}(?:\1[0-9a-fA-F]{2}){4}').setName("MAC address")
- "MAC address xx:xx:xx:xx:xx (may also have '-' or '.' delimiters)"
-
- @staticmethod
- def convertToDate(fmt="%Y-%m-%d"):
- """
- Helper to create a parse action for converting parsed date string to Python datetime.date
-
- Params -
- - fmt - format to be passed to datetime.strptime (default=C{"%Y-%m-%d"})
-
- Example::
- date_expr = pyparsing_common.iso8601_date.copy()
- date_expr.setParseAction(pyparsing_common.convertToDate())
- print(date_expr.parseString("1999-12-31"))
- prints::
- [datetime.date(1999, 12, 31)]
- """
- def cvt_fn(s,l,t):
- try:
- return datetime.strptime(t[0], fmt).date()
- except ValueError as ve:
- raise ParseException(s, l, str(ve))
- return cvt_fn
-
- @staticmethod
- def convertToDatetime(fmt="%Y-%m-%dT%H:%M:%S.%f"):
- """
- Helper to create a parse action for converting parsed datetime string to Python datetime.datetime
-
- Params -
- - fmt - format to be passed to datetime.strptime (default=C{"%Y-%m-%dT%H:%M:%S.%f"})
-
- Example::
- dt_expr = pyparsing_common.iso8601_datetime.copy()
- dt_expr.setParseAction(pyparsing_common.convertToDatetime())
- print(dt_expr.parseString("1999-12-31T23:59:59.999"))
- prints::
- [datetime.datetime(1999, 12, 31, 23, 59, 59, 999000)]
- """
- def cvt_fn(s,l,t):
- try:
- return datetime.strptime(t[0], fmt)
- except ValueError as ve:
- raise ParseException(s, l, str(ve))
- return cvt_fn
-
- iso8601_date = Regex(r'(?P<year>\d{4})(?:-(?P<month>\d\d)(?:-(?P<day>\d\d))?)?').setName("ISO8601 date")
- "ISO8601 date (C{yyyy-mm-dd})"
-
- iso8601_datetime = Regex(r'(?P<year>\d{4})-(?P<month>\d\d)-(?P<day>\d\d)[T ](?P<hour>\d\d):(?P<minute>\d\d)(:(?P<second>\d\d(\.\d*)?)?)?(?P<tz>Z|[+-]\d\d:?\d\d)?').setName("ISO8601 datetime")
- "ISO8601 datetime (C{yyyy-mm-ddThh:mm:ss.s(Z|+-00:00)}) - trailing seconds, milliseconds, and timezone optional; accepts separating C{'T'} or C{' '}"
-
- uuid = Regex(r'[0-9a-fA-F]{8}(-[0-9a-fA-F]{4}){3}-[0-9a-fA-F]{12}').setName("UUID")
- "UUID (C{xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx})"
-
- _html_stripper = anyOpenTag.suppress() | anyCloseTag.suppress()
- @staticmethod
- def stripHTMLTags(s, l, tokens):
- """
- Parse action to remove HTML tags from web page HTML source
-
- Example::
- # strip HTML links from normal text
- text = '<td>More info at the <a href="http://pyparsing.wikispaces.com">pyparsing</a> wiki page</td>'
- td,td_end = makeHTMLTags("TD")
- table_text = td + SkipTo(td_end).setParseAction(pyparsing_common.stripHTMLTags)("body") + td_end
-
- print(table_text.parseString(text).body) # -> 'More info at the pyparsing wiki page'
- """
- return pyparsing_common._html_stripper.transformString(tokens[0])
-
- _commasepitem = Combine(OneOrMore(~Literal(",") + ~LineEnd() + Word(printables, excludeChars=',')
- + Optional( White(" \t") ) ) ).streamline().setName("commaItem")
- comma_separated_list = delimitedList( Optional( quotedString.copy() | _commasepitem, default="") ).setName("comma separated list")
- """Predefined expression of 1 or more printable words or quoted strings, separated by commas."""
-
- upcaseTokens = staticmethod(tokenMap(lambda t: _ustr(t).upper()))
- """Parse action to convert tokens to upper case."""
-
- downcaseTokens = staticmethod(tokenMap(lambda t: _ustr(t).lower()))
- """Parse action to convert tokens to lower case."""
-
-
-if __name__ == "__main__":
-
- selectToken = CaselessLiteral("select")
- fromToken = CaselessLiteral("from")
-
- ident = Word(alphas, alphanums + "_$")
-
- columnName = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens)
- columnNameList = Group(delimitedList(columnName)).setName("columns")
- columnSpec = ('*' | columnNameList)
-
- tableName = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens)
- tableNameList = Group(delimitedList(tableName)).setName("tables")
-
- simpleSQL = selectToken("command") + columnSpec("columns") + fromToken + tableNameList("tables")
-
- # demo runTests method, including embedded comments in test string
- simpleSQL.runTests("""
- # '*' as column list and dotted table name
- select * from SYS.XYZZY
-
- # caseless match on "SELECT", and casts back to "select"
- SELECT * from XYZZY, ABC
-
- # list of column names, and mixed case SELECT keyword
- Select AA,BB,CC from Sys.dual
-
- # multiple tables
- Select A, B, C from Sys.dual, Table2
-
- # invalid SELECT keyword - should fail
- Xelect A, B, C from Sys.dual
-
- # incomplete command - should fail
- Select
-
- # invalid column name - should fail
- Select ^^^ frox Sys.dual
-
- """)
-
- pyparsing_common.number.runTests("""
- 100
- -100
- +100
- 3.14159
- 6.02e23
- 1e-12
- """)
-
- # any int or real number, returned as float
- pyparsing_common.fnumber.runTests("""
- 100
- -100
- +100
- 3.14159
- 6.02e23
- 1e-12
- """)
-
- pyparsing_common.hex_integer.runTests("""
- 100
- FF
- """)
-
- import uuid
- pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID))
- pyparsing_common.uuid.runTests("""
- 12345678-1234-5678-1234-567812345678
- """)
diff --git a/contrib/python/setuptools/py3/setuptools/archive_util.py b/contrib/python/setuptools/py3/setuptools/archive_util.py
deleted file mode 100644
index 0f70284822f..00000000000
--- a/contrib/python/setuptools/py3/setuptools/archive_util.py
+++ /dev/null
@@ -1,205 +0,0 @@
-"""Utilities for extracting common archive formats"""
-
-import zipfile
-import tarfile
-import os
-import shutil
-import posixpath
-import contextlib
-from distutils.errors import DistutilsError
-
-from pkg_resources import ensure_directory
-
-__all__ = [
- "unpack_archive", "unpack_zipfile", "unpack_tarfile", "default_filter",
- "UnrecognizedFormat", "extraction_drivers", "unpack_directory",
-]
-
-
-class UnrecognizedFormat(DistutilsError):
- """Couldn't recognize the archive type"""
-
-
-def default_filter(src, dst):
- """The default progress/filter callback; returns True for all files"""
- return dst
-
-
-def unpack_archive(
- filename, extract_dir, progress_filter=default_filter,
- drivers=None):
- """Unpack `filename` to `extract_dir`, or raise ``UnrecognizedFormat``
-
- `progress_filter` is a function taking two arguments: a source path
- internal to the archive ('/'-separated), and a filesystem path where it
- will be extracted. The callback must return the desired extract path
- (which may be the same as the one passed in), or else ``None`` to skip
- that file or directory. The callback can thus be used to report on the
- progress of the extraction, as well as to filter the items extracted or
- alter their extraction paths.
-
- `drivers`, if supplied, must be a non-empty sequence of functions with the
- same signature as this function (minus the `drivers` argument), that raise
- ``UnrecognizedFormat`` if they do not support extracting the designated
- archive type. The `drivers` are tried in sequence until one is found that
- does not raise an error, or until all are exhausted (in which case
- ``UnrecognizedFormat`` is raised). If you do not supply a sequence of
- drivers, the module's ``extraction_drivers`` constant will be used, which
- means that ``unpack_zipfile`` and ``unpack_tarfile`` will be tried, in that
- order.
- """
- for driver in drivers or extraction_drivers:
- try:
- driver(filename, extract_dir, progress_filter)
- except UnrecognizedFormat:
- continue
- else:
- return
- else:
- raise UnrecognizedFormat(
- "Not a recognized archive type: %s" % filename
- )
-
-
-def unpack_directory(filename, extract_dir, progress_filter=default_filter):
- """"Unpack" a directory, using the same interface as for archives
-
- Raises ``UnrecognizedFormat`` if `filename` is not a directory
- """
- if not os.path.isdir(filename):
- raise UnrecognizedFormat("%s is not a directory" % filename)
-
- paths = {
- filename: ('', extract_dir),
- }
- for base, dirs, files in os.walk(filename):
- src, dst = paths[base]
- for d in dirs:
- paths[os.path.join(base, d)] = src + d + '/', os.path.join(dst, d)
- for f in files:
- target = os.path.join(dst, f)
- target = progress_filter(src + f, target)
- if not target:
- # skip non-files
- continue
- ensure_directory(target)
- f = os.path.join(base, f)
- shutil.copyfile(f, target)
- shutil.copystat(f, target)
-
-
-def unpack_zipfile(filename, extract_dir, progress_filter=default_filter):
- """Unpack zip `filename` to `extract_dir`
-
- Raises ``UnrecognizedFormat`` if `filename` is not a zipfile (as determined
- by ``zipfile.is_zipfile()``). See ``unpack_archive()`` for an explanation
- of the `progress_filter` argument.
- """
-
- if not zipfile.is_zipfile(filename):
- raise UnrecognizedFormat("%s is not a zip file" % (filename,))
-
- with zipfile.ZipFile(filename) as z:
- for info in z.infolist():
- name = info.filename
-
- # don't extract absolute paths or ones with .. in them
- if name.startswith('/') or '..' in name.split('/'):
- continue
-
- target = os.path.join(extract_dir, *name.split('/'))
- target = progress_filter(name, target)
- if not target:
- continue
- if name.endswith('/'):
- # directory
- ensure_directory(target)
- else:
- # file
- ensure_directory(target)
- data = z.read(info.filename)
- with open(target, 'wb') as f:
- f.write(data)
- unix_attributes = info.external_attr >> 16
- if unix_attributes:
- os.chmod(target, unix_attributes)
-
-
-def _resolve_tar_file_or_dir(tar_obj, tar_member_obj):
- """Resolve any links and extract link targets as normal files."""
- while tar_member_obj is not None and (
- tar_member_obj.islnk() or tar_member_obj.issym()):
- linkpath = tar_member_obj.linkname
- if tar_member_obj.issym():
- base = posixpath.dirname(tar_member_obj.name)
- linkpath = posixpath.join(base, linkpath)
- linkpath = posixpath.normpath(linkpath)
- tar_member_obj = tar_obj._getmember(linkpath)
-
- is_file_or_dir = (
- tar_member_obj is not None and
- (tar_member_obj.isfile() or tar_member_obj.isdir())
- )
- if is_file_or_dir:
- return tar_member_obj
-
- raise LookupError('Got unknown file type')
-
-
-def _iter_open_tar(tar_obj, extract_dir, progress_filter):
- """Emit member-destination pairs from a tar archive."""
- # don't do any chowning!
- tar_obj.chown = lambda *args: None
-
- with contextlib.closing(tar_obj):
- for member in tar_obj:
- name = member.name
- # don't extract absolute paths or ones with .. in them
- if name.startswith('/') or '..' in name.split('/'):
- continue
-
- prelim_dst = os.path.join(extract_dir, *name.split('/'))
-
- try:
- member = _resolve_tar_file_or_dir(tar_obj, member)
- except LookupError:
- continue
-
- final_dst = progress_filter(name, prelim_dst)
- if not final_dst:
- continue
-
- if final_dst.endswith(os.sep):
- final_dst = final_dst[:-1]
-
- yield member, final_dst
-
-
-def unpack_tarfile(filename, extract_dir, progress_filter=default_filter):
- """Unpack tar/tar.gz/tar.bz2 `filename` to `extract_dir`
-
- Raises ``UnrecognizedFormat`` if `filename` is not a tarfile (as determined
- by ``tarfile.open()``). See ``unpack_archive()`` for an explanation
- of the `progress_filter` argument.
- """
- try:
- tarobj = tarfile.open(filename)
- except tarfile.TarError as e:
- raise UnrecognizedFormat(
- "%s is not a compressed or uncompressed tar file" % (filename,)
- ) from e
-
- for member, final_dst in _iter_open_tar(
- tarobj, extract_dir, progress_filter,
- ):
- try:
- # XXX Ugh
- tarobj._extract_member(member, final_dst)
- except tarfile.ExtractError:
- # chown/chmod/mkfifo/mknode/makedev failed
- pass
-
- return True
-
-
-extraction_drivers = unpack_directory, unpack_zipfile, unpack_tarfile
diff --git a/contrib/python/setuptools/py3/setuptools/build_meta.py b/contrib/python/setuptools/py3/setuptools/build_meta.py
deleted file mode 100644
index d0ac613ba38..00000000000
--- a/contrib/python/setuptools/py3/setuptools/build_meta.py
+++ /dev/null
@@ -1,290 +0,0 @@
-"""A PEP 517 interface to setuptools
-
-Previously, when a user or a command line tool (let's call it a "frontend")
-needed to make a request of setuptools to take a certain action, for
-example, generating a list of installation requirements, the frontend would
-would call "setup.py egg_info" or "setup.py bdist_wheel" on the command line.
-
-PEP 517 defines a different method of interfacing with setuptools. Rather
-than calling "setup.py" directly, the frontend should:
-
- 1. Set the current directory to the directory with a setup.py file
- 2. Import this module into a safe python interpreter (one in which
- setuptools can potentially set global variables or crash hard).
- 3. Call one of the functions defined in PEP 517.
-
-What each function does is defined in PEP 517. However, here is a "casual"
-definition of the functions (this definition should not be relied on for
-bug reports or API stability):
-
- - `build_wheel`: build a wheel in the folder and return the basename
- - `get_requires_for_build_wheel`: get the `setup_requires` to build
- - `prepare_metadata_for_build_wheel`: get the `install_requires`
- - `build_sdist`: build an sdist in the folder and return the basename
- - `get_requires_for_build_sdist`: get the `setup_requires` to build
-
-Again, this is not a formal definition! Just a "taste" of the module.
-"""
-
-import io
-import os
-import sys
-import tokenize
-import shutil
-import contextlib
-import tempfile
-import warnings
-
-import setuptools
-import distutils
-
-from pkg_resources import parse_requirements
-
-__all__ = ['get_requires_for_build_sdist',
- 'get_requires_for_build_wheel',
- 'prepare_metadata_for_build_wheel',
- 'build_wheel',
- 'build_sdist',
- '__legacy__',
- 'SetupRequirementsError']
-
-
-class SetupRequirementsError(BaseException):
- def __init__(self, specifiers):
- self.specifiers = specifiers
-
-
-class Distribution(setuptools.dist.Distribution):
- def fetch_build_eggs(self, specifiers):
- specifier_list = list(map(str, parse_requirements(specifiers)))
-
- raise SetupRequirementsError(specifier_list)
-
- @classmethod
- @contextlib.contextmanager
- def patch(cls):
- """
- Replace
- distutils.dist.Distribution with this class
- for the duration of this context.
- """
- orig = distutils.core.Distribution
- distutils.core.Distribution = cls
- try:
- yield
- finally:
- distutils.core.Distribution = orig
-
-
-def no_install_setup_requires():
- """Temporarily disable installing setup_requires
-
- Under PEP 517, the backend reports build dependencies to the frontend,
- and the frontend is responsible for ensuring they're installed.
- So setuptools (acting as a backend) should not try to install them.
- """
- orig = setuptools._install_setup_requires
- setuptools._install_setup_requires = lambda attrs: None
- try:
- yield
- finally:
- setuptools._install_setup_requires = orig
-
-
-def _get_immediate_subdirectories(a_dir):
- return [name for name in os.listdir(a_dir)
- if os.path.isdir(os.path.join(a_dir, name))]
-
-
-def _file_with_extension(directory, extension):
- matching = (
- f for f in os.listdir(directory)
- if f.endswith(extension)
- )
- try:
- file, = matching
- except ValueError:
- raise ValueError(
- 'No distribution was found. Ensure that `setup.py` '
- 'is not empty and that it calls `setup()`.')
- return file
-
-
-def _open_setup_script(setup_script):
- if not os.path.exists(setup_script):
- # Supply a default setup.py
- return io.StringIO(u"from setuptools import setup; setup()")
-
- return getattr(tokenize, 'open', open)(setup_script)
-
-
-def suppress_known_deprecation():
- with warnings.catch_warnings():
- warnings.filterwarnings('ignore', 'setup.py install is deprecated')
- yield
-
-
-class _BuildMetaBackend(object):
-
- def _fix_config(self, config_settings):
- config_settings = config_settings or {}
- config_settings.setdefault('--global-option', [])
- return config_settings
-
- def _get_build_requires(self, config_settings, requirements):
- config_settings = self._fix_config(config_settings)
-
- sys.argv = sys.argv[:1] + ['egg_info'] + \
- config_settings["--global-option"]
- try:
- with Distribution.patch():
- self.run_setup()
- except SetupRequirementsError as e:
- requirements += e.specifiers
-
- return requirements
-
- def run_setup(self, setup_script='setup.py'):
- # Note that we can reuse our build directory between calls
- # Correctness comes first, then optimization later
- __file__ = setup_script
- __name__ = '__main__'
-
- with _open_setup_script(__file__) as f:
- code = f.read().replace(r'\r\n', r'\n')
-
- exec(compile(code, __file__, 'exec'), locals())
-
- def get_requires_for_build_wheel(self, config_settings=None):
- config_settings = self._fix_config(config_settings)
- return self._get_build_requires(
- config_settings, requirements=['wheel'])
-
- def get_requires_for_build_sdist(self, config_settings=None):
- config_settings = self._fix_config(config_settings)
- return self._get_build_requires(config_settings, requirements=[])
-
- def prepare_metadata_for_build_wheel(self, metadata_directory,
- config_settings=None):
- sys.argv = sys.argv[:1] + [
- 'dist_info', '--egg-base', metadata_directory]
- with no_install_setup_requires():
- self.run_setup()
-
- dist_info_directory = metadata_directory
- while True:
- dist_infos = [f for f in os.listdir(dist_info_directory)
- if f.endswith('.dist-info')]
-
- if (
- len(dist_infos) == 0 and
- len(_get_immediate_subdirectories(dist_info_directory)) == 1
- ):
-
- dist_info_directory = os.path.join(
- dist_info_directory, os.listdir(dist_info_directory)[0])
- continue
-
- assert len(dist_infos) == 1
- break
-
- # PEP 517 requires that the .dist-info directory be placed in the
- # metadata_directory. To comply, we MUST copy the directory to the root
- if dist_info_directory != metadata_directory:
- shutil.move(
- os.path.join(dist_info_directory, dist_infos[0]),
- metadata_directory)
- shutil.rmtree(dist_info_directory, ignore_errors=True)
-
- return dist_infos[0]
-
- def _build_with_temp_dir(self, setup_command, result_extension,
- result_directory, config_settings):
- config_settings = self._fix_config(config_settings)
- result_directory = os.path.abspath(result_directory)
-
- # Build in a temporary directory, then copy to the target.
- os.makedirs(result_directory, exist_ok=True)
- with tempfile.TemporaryDirectory(dir=result_directory) as tmp_dist_dir:
- sys.argv = (sys.argv[:1] + setup_command +
- ['--dist-dir', tmp_dist_dir] +
- config_settings["--global-option"])
- with no_install_setup_requires():
- self.run_setup()
-
- result_basename = _file_with_extension(
- tmp_dist_dir, result_extension)
- result_path = os.path.join(result_directory, result_basename)
- if os.path.exists(result_path):
- # os.rename will fail overwriting on non-Unix.
- os.remove(result_path)
- os.rename(os.path.join(tmp_dist_dir, result_basename), result_path)
-
- return result_basename
-
- def build_wheel(self, wheel_directory, config_settings=None,
- metadata_directory=None):
- with suppress_known_deprecation():
- return self._build_with_temp_dir(['bdist_wheel'], '.whl',
- wheel_directory, config_settings)
-
- def build_sdist(self, sdist_directory, config_settings=None):
- return self._build_with_temp_dir(['sdist', '--formats', 'gztar'],
- '.tar.gz', sdist_directory,
- config_settings)
-
-
-class _BuildMetaLegacyBackend(_BuildMetaBackend):
- """Compatibility backend for setuptools
-
- This is a version of setuptools.build_meta that endeavors
- to maintain backwards
- compatibility with pre-PEP 517 modes of invocation. It
- exists as a temporary
- bridge between the old packaging mechanism and the new
- packaging mechanism,
- and will eventually be removed.
- """
- def run_setup(self, setup_script='setup.py'):
- # In order to maintain compatibility with scripts assuming that
- # the setup.py script is in a directory on the PYTHONPATH, inject
- # '' into sys.path. (pypa/setuptools#1642)
- sys_path = list(sys.path) # Save the original path
-
- script_dir = os.path.dirname(os.path.abspath(setup_script))
- if script_dir not in sys.path:
- sys.path.insert(0, script_dir)
-
- # Some setup.py scripts (e.g. in pygame and numpy) use sys.argv[0] to
- # get the directory of the source code. They expect it to refer to the
- # setup.py script.
- sys_argv_0 = sys.argv[0]
- sys.argv[0] = setup_script
-
- try:
- super(_BuildMetaLegacyBackend,
- self).run_setup(setup_script=setup_script)
- finally:
- # While PEP 517 frontends should be calling each hook in a fresh
- # subprocess according to the standard (and thus it should not be
- # strictly necessary to restore the old sys.path), we'll restore
- # the original path so that the path manipulation does not persist
- # within the hook after run_setup is called.
- sys.path[:] = sys_path
- sys.argv[0] = sys_argv_0
-
-
-# The primary backend
-_BACKEND = _BuildMetaBackend()
-
-get_requires_for_build_wheel = _BACKEND.get_requires_for_build_wheel
-get_requires_for_build_sdist = _BACKEND.get_requires_for_build_sdist
-prepare_metadata_for_build_wheel = _BACKEND.prepare_metadata_for_build_wheel
-build_wheel = _BACKEND.build_wheel
-build_sdist = _BACKEND.build_sdist
-
-
-# The legacy backend
-__legacy__ = _BuildMetaLegacyBackend()
diff --git a/contrib/python/setuptools/py3/setuptools/command/__init__.py b/contrib/python/setuptools/py3/setuptools/command/__init__.py
deleted file mode 100644
index b966dcea57a..00000000000
--- a/contrib/python/setuptools/py3/setuptools/command/__init__.py
+++ /dev/null
@@ -1,8 +0,0 @@
-from distutils.command.bdist import bdist
-import sys
-
-if 'egg' not in bdist.format_commands:
- bdist.format_command['egg'] = ('bdist_egg', "Python .egg file")
- bdist.format_commands.append('egg')
-
-del bdist, sys
diff --git a/contrib/python/setuptools/py3/setuptools/command/alias.py b/contrib/python/setuptools/py3/setuptools/command/alias.py
deleted file mode 100644
index 452a9244ea6..00000000000
--- a/contrib/python/setuptools/py3/setuptools/command/alias.py
+++ /dev/null
@@ -1,78 +0,0 @@
-from distutils.errors import DistutilsOptionError
-
-from setuptools.command.setopt import edit_config, option_base, config_file
-
-
-def shquote(arg):
- """Quote an argument for later parsing by shlex.split()"""
- for c in '"', "'", "\\", "#":
- if c in arg:
- return repr(arg)
- if arg.split() != [arg]:
- return repr(arg)
- return arg
-
-
-class alias(option_base):
- """Define a shortcut that invokes one or more commands"""
-
- description = "define a shortcut to invoke one or more commands"
- command_consumes_arguments = True
-
- user_options = [
- ('remove', 'r', 'remove (unset) the alias'),
- ] + option_base.user_options
-
- boolean_options = option_base.boolean_options + ['remove']
-
- def initialize_options(self):
- option_base.initialize_options(self)
- self.args = None
- self.remove = None
-
- def finalize_options(self):
- option_base.finalize_options(self)
- if self.remove and len(self.args) != 1:
- raise DistutilsOptionError(
- "Must specify exactly one argument (the alias name) when "
- "using --remove"
- )
-
- def run(self):
- aliases = self.distribution.get_option_dict('aliases')
-
- if not self.args:
- print("Command Aliases")
- print("---------------")
- for alias in aliases:
- print("setup.py alias", format_alias(alias, aliases))
- return
-
- elif len(self.args) == 1:
- alias, = self.args
- if self.remove:
- command = None
- elif alias in aliases:
- print("setup.py alias", format_alias(alias, aliases))
- return
- else:
- print("No alias definition found for %r" % alias)
- return
- else:
- alias = self.args[0]
- command = ' '.join(map(shquote, self.args[1:]))
-
- edit_config(self.filename, {'aliases': {alias: command}}, self.dry_run)
-
-
-def format_alias(name, aliases):
- source, command = aliases[name]
- if source == config_file('global'):
- source = '--global-config '
- elif source == config_file('user'):
- source = '--user-config '
- elif source == config_file('local'):
- source = ''
- else:
- source = '--filename=%r' % source
- return source + name + ' ' + command
diff --git a/contrib/python/setuptools/py3/setuptools/command/bdist_egg.py b/contrib/python/setuptools/py3/setuptools/command/bdist_egg.py
deleted file mode 100644
index e6b1609f7ba..00000000000
--- a/contrib/python/setuptools/py3/setuptools/command/bdist_egg.py
+++ /dev/null
@@ -1,456 +0,0 @@
-"""setuptools.command.bdist_egg
-
-Build .egg distributions"""
-
-from distutils.dir_util import remove_tree, mkpath
-from distutils import log
-from types import CodeType
-import sys
-import os
-import re
-import textwrap
-import marshal
-
-from pkg_resources import get_build_platform, Distribution, ensure_directory
-from setuptools.extension import Library
-from setuptools import Command
-
-from sysconfig import get_path, get_python_version
-
-
-def _get_purelib():
- return get_path("purelib")
-
-
-def strip_module(filename):
- if '.' in filename:
- filename = os.path.splitext(filename)[0]
- if filename.endswith('module'):
- filename = filename[:-6]
- return filename
-
-
-def sorted_walk(dir):
- """Do os.walk in a reproducible way,
- independent of indeterministic filesystem readdir order
- """
- for base, dirs, files in os.walk(dir):
- dirs.sort()
- files.sort()
- yield base, dirs, files
-
-
-def write_stub(resource, pyfile):
- _stub_template = textwrap.dedent("""
- def __bootstrap__():
- global __bootstrap__, __loader__, __file__
- import sys, pkg_resources, importlib.util
- __file__ = pkg_resources.resource_filename(__name__, %r)
- __loader__ = None; del __bootstrap__, __loader__
- spec = importlib.util.spec_from_file_location(__name__,__file__)
- mod = importlib.util.module_from_spec(spec)
- spec.loader.exec_module(mod)
- __bootstrap__()
- """).lstrip()
- with open(pyfile, 'w') as f:
- f.write(_stub_template % resource)
-
-
-class bdist_egg(Command):
- description = "create an \"egg\" distribution"
-
- user_options = [
- ('bdist-dir=', 'b',
- "temporary directory for creating the distribution"),
- ('plat-name=', 'p', "platform name to embed in generated filenames "
- "(default: %s)" % get_build_platform()),
- ('exclude-source-files', None,
- "remove all .py files from the generated egg"),
- ('keep-temp', 'k',
- "keep the pseudo-installation tree around after " +
- "creating the distribution archive"),
- ('dist-dir=', 'd',
- "directory to put final built distributions in"),
- ('skip-build', None,
- "skip rebuilding everything (for testing/debugging)"),
- ]
-
- boolean_options = [
- 'keep-temp', 'skip-build', 'exclude-source-files'
- ]
-
- def initialize_options(self):
- self.bdist_dir = None
- self.plat_name = None
- self.keep_temp = 0
- self.dist_dir = None
- self.skip_build = 0
- self.egg_output = None
- self.exclude_source_files = None
-
- def finalize_options(self):
- ei_cmd = self.ei_cmd = self.get_finalized_command("egg_info")
- self.egg_info = ei_cmd.egg_info
-
- if self.bdist_dir is None:
- bdist_base = self.get_finalized_command('bdist').bdist_base
- self.bdist_dir = os.path.join(bdist_base, 'egg')
-
- if self.plat_name is None:
- self.plat_name = get_build_platform()
-
- self.set_undefined_options('bdist', ('dist_dir', 'dist_dir'))
-
- if self.egg_output is None:
-
- # Compute filename of the output egg
- basename = Distribution(
- None, None, ei_cmd.egg_name, ei_cmd.egg_version,
- get_python_version(),
- self.distribution.has_ext_modules() and self.plat_name
- ).egg_name()
-
- self.egg_output = os.path.join(self.dist_dir, basename + '.egg')
-
- def do_install_data(self):
- # Hack for packages that install data to install's --install-lib
- self.get_finalized_command('install').install_lib = self.bdist_dir
-
- site_packages = os.path.normcase(os.path.realpath(_get_purelib()))
- old, self.distribution.data_files = self.distribution.data_files, []
-
- for item in old:
- if isinstance(item, tuple) and len(item) == 2:
- if os.path.isabs(item[0]):
- realpath = os.path.realpath(item[0])
- normalized = os.path.normcase(realpath)
- if normalized == site_packages or normalized.startswith(
- site_packages + os.sep
- ):
- item = realpath[len(site_packages) + 1:], item[1]
- # XXX else: raise ???
- self.distribution.data_files.append(item)
-
- try:
- log.info("installing package data to %s", self.bdist_dir)
- self.call_command('install_data', force=0, root=None)
- finally:
- self.distribution.data_files = old
-
- def get_outputs(self):
- return [self.egg_output]
-
- def call_command(self, cmdname, **kw):
- """Invoke reinitialized command `cmdname` with keyword args"""
- for dirname in INSTALL_DIRECTORY_ATTRS:
- kw.setdefault(dirname, self.bdist_dir)
- kw.setdefault('skip_build', self.skip_build)
- kw.setdefault('dry_run', self.dry_run)
- cmd = self.reinitialize_command(cmdname, **kw)
- self.run_command(cmdname)
- return cmd
-
- def run(self): # noqa: C901 # is too complex (14) # FIXME
- # Generate metadata first
- self.run_command("egg_info")
- # We run install_lib before install_data, because some data hacks
- # pull their data path from the install_lib command.
- log.info("installing library code to %s", self.bdist_dir)
- instcmd = self.get_finalized_command('install')
- old_root = instcmd.root
- instcmd.root = None
- if self.distribution.has_c_libraries() and not self.skip_build:
- self.run_command('build_clib')
- cmd = self.call_command('install_lib', warn_dir=0)
- instcmd.root = old_root
-
- all_outputs, ext_outputs = self.get_ext_outputs()
- self.stubs = []
- to_compile = []
- for (p, ext_name) in enumerate(ext_outputs):
- filename, ext = os.path.splitext(ext_name)
- pyfile = os.path.join(self.bdist_dir, strip_module(filename) +
- '.py')
- self.stubs.append(pyfile)
- log.info("creating stub loader for %s", ext_name)
- if not self.dry_run:
- write_stub(os.path.basename(ext_name), pyfile)
- to_compile.append(pyfile)
- ext_outputs[p] = ext_name.replace(os.sep, '/')
-
- if to_compile:
- cmd.byte_compile(to_compile)
- if self.distribution.data_files:
- self.do_install_data()
-
- # Make the EGG-INFO directory
- archive_root = self.bdist_dir
- egg_info = os.path.join(archive_root, 'EGG-INFO')
- self.mkpath(egg_info)
- if self.distribution.scripts:
- script_dir = os.path.join(egg_info, 'scripts')
- log.info("installing scripts to %s", script_dir)
- self.call_command('install_scripts', install_dir=script_dir,
- no_ep=1)
-
- self.copy_metadata_to(egg_info)
- native_libs = os.path.join(egg_info, "native_libs.txt")
- if all_outputs:
- log.info("writing %s", native_libs)
- if not self.dry_run:
- ensure_directory(native_libs)
- libs_file = open(native_libs, 'wt')
- libs_file.write('\n'.join(all_outputs))
- libs_file.write('\n')
- libs_file.close()
- elif os.path.isfile(native_libs):
- log.info("removing %s", native_libs)
- if not self.dry_run:
- os.unlink(native_libs)
-
- write_safety_flag(
- os.path.join(archive_root, 'EGG-INFO'), self.zip_safe()
- )
-
- if os.path.exists(os.path.join(self.egg_info, 'depends.txt')):
- log.warn(
- "WARNING: 'depends.txt' will not be used by setuptools 0.6!\n"
- "Use the install_requires/extras_require setup() args instead."
- )
-
- if self.exclude_source_files:
- self.zap_pyfiles()
-
- # Make the archive
- make_zipfile(self.egg_output, archive_root, verbose=self.verbose,
- dry_run=self.dry_run, mode=self.gen_header())
- if not self.keep_temp:
- remove_tree(self.bdist_dir, dry_run=self.dry_run)
-
- # Add to 'Distribution.dist_files' so that the "upload" command works
- getattr(self.distribution, 'dist_files', []).append(
- ('bdist_egg', get_python_version(), self.egg_output))
-
- def zap_pyfiles(self):
- log.info("Removing .py files from temporary directory")
- for base, dirs, files in walk_egg(self.bdist_dir):
- for name in files:
- path = os.path.join(base, name)
-
- if name.endswith('.py'):
- log.debug("Deleting %s", path)
- os.unlink(path)
-
- if base.endswith('__pycache__'):
- path_old = path
-
- pattern = r'(?P<name>.+)\.(?P<magic>[^.]+)\.pyc'
- m = re.match(pattern, name)
- path_new = os.path.join(
- base, os.pardir, m.group('name') + '.pyc')
- log.info(
- "Renaming file from [%s] to [%s]"
- % (path_old, path_new))
- try:
- os.remove(path_new)
- except OSError:
- pass
- os.rename(path_old, path_new)
-
- def zip_safe(self):
- safe = getattr(self.distribution, 'zip_safe', None)
- if safe is not None:
- return safe
- log.warn("zip_safe flag not set; analyzing archive contents...")
- return analyze_egg(self.bdist_dir, self.stubs)
-
- def gen_header(self):
- return 'w'
-
- def copy_metadata_to(self, target_dir):
- "Copy metadata (egg info) to the target_dir"
- # normalize the path (so that a forward-slash in egg_info will
- # match using startswith below)
- norm_egg_info = os.path.normpath(self.egg_info)
- prefix = os.path.join(norm_egg_info, '')
- for path in self.ei_cmd.filelist.files:
- if path.startswith(prefix):
- target = os.path.join(target_dir, path[len(prefix):])
- ensure_directory(target)
- self.copy_file(path, target)
-
- def get_ext_outputs(self):
- """Get a list of relative paths to C extensions in the output distro"""
-
- all_outputs = []
- ext_outputs = []
-
- paths = {self.bdist_dir: ''}
- for base, dirs, files in sorted_walk(self.bdist_dir):
- for filename in files:
- if os.path.splitext(filename)[1].lower() in NATIVE_EXTENSIONS:
- all_outputs.append(paths[base] + filename)
- for filename in dirs:
- paths[os.path.join(base, filename)] = (paths[base] +
- filename + '/')
-
- if self.distribution.has_ext_modules():
- build_cmd = self.get_finalized_command('build_ext')
- for ext in build_cmd.extensions:
- if isinstance(ext, Library):
- continue
- fullname = build_cmd.get_ext_fullname(ext.name)
- filename = build_cmd.get_ext_filename(fullname)
- if not os.path.basename(filename).startswith('dl-'):
- if os.path.exists(os.path.join(self.bdist_dir, filename)):
- ext_outputs.append(filename)
-
- return all_outputs, ext_outputs
-
-
-NATIVE_EXTENSIONS = dict.fromkeys('.dll .so .dylib .pyd'.split())
-
-
-def walk_egg(egg_dir):
- """Walk an unpacked egg's contents, skipping the metadata directory"""
- walker = sorted_walk(egg_dir)
- base, dirs, files = next(walker)
- if 'EGG-INFO' in dirs:
- dirs.remove('EGG-INFO')
- yield base, dirs, files
- for bdf in walker:
- yield bdf
-
-
-def analyze_egg(egg_dir, stubs):
- # check for existing flag in EGG-INFO
- for flag, fn in safety_flags.items():
- if os.path.exists(os.path.join(egg_dir, 'EGG-INFO', fn)):
- return flag
- if not can_scan():
- return False
- safe = True
- for base, dirs, files in walk_egg(egg_dir):
- for name in files:
- if name.endswith('.py') or name.endswith('.pyw'):
- continue
- elif name.endswith('.pyc') or name.endswith('.pyo'):
- # always scan, even if we already know we're not safe
- safe = scan_module(egg_dir, base, name, stubs) and safe
- return safe
-
-
-def write_safety_flag(egg_dir, safe):
- # Write or remove zip safety flag file(s)
- for flag, fn in safety_flags.items():
- fn = os.path.join(egg_dir, fn)
- if os.path.exists(fn):
- if safe is None or bool(safe) != flag:
- os.unlink(fn)
- elif safe is not None and bool(safe) == flag:
- f = open(fn, 'wt')
- f.write('\n')
- f.close()
-
-
-safety_flags = {
- True: 'zip-safe',
- False: 'not-zip-safe',
-}
-
-
-def scan_module(egg_dir, base, name, stubs):
- """Check whether module possibly uses unsafe-for-zipfile stuff"""
-
- filename = os.path.join(base, name)
- if filename[:-1] in stubs:
- return True # Extension module
- pkg = base[len(egg_dir) + 1:].replace(os.sep, '.')
- module = pkg + (pkg and '.' or '') + os.path.splitext(name)[0]
- if sys.version_info < (3, 7):
- skip = 12 # skip magic & date & file size
- else:
- skip = 16 # skip magic & reserved? & date & file size
- f = open(filename, 'rb')
- f.read(skip)
- code = marshal.load(f)
- f.close()
- safe = True
- symbols = dict.fromkeys(iter_symbols(code))
- for bad in ['__file__', '__path__']:
- if bad in symbols:
- log.warn("%s: module references %s", module, bad)
- safe = False
- if 'inspect' in symbols:
- for bad in [
- 'getsource', 'getabsfile', 'getsourcefile', 'getfile'
- 'getsourcelines', 'findsource', 'getcomments', 'getframeinfo',
- 'getinnerframes', 'getouterframes', 'stack', 'trace'
- ]:
- if bad in symbols:
- log.warn("%s: module MAY be using inspect.%s", module, bad)
- safe = False
- return safe
-
-
-def iter_symbols(code):
- """Yield names and strings used by `code` and its nested code objects"""
- for name in code.co_names:
- yield name
- for const in code.co_consts:
- if isinstance(const, str):
- yield const
- elif isinstance(const, CodeType):
- for name in iter_symbols(const):
- yield name
-
-
-def can_scan():
- if not sys.platform.startswith('java') and sys.platform != 'cli':
- # CPython, PyPy, etc.
- return True
- log.warn("Unable to analyze compiled code on this platform.")
- log.warn("Please ask the author to include a 'zip_safe'"
- " setting (either True or False) in the package's setup.py")
-
-
-# Attribute names of options for commands that might need to be convinced to
-# install to the egg build directory
-
-INSTALL_DIRECTORY_ATTRS = [
- 'install_lib', 'install_dir', 'install_data', 'install_base'
-]
-
-
-def make_zipfile(zip_filename, base_dir, verbose=0, dry_run=0, compress=True,
- mode='w'):
- """Create a zip file from all the files under 'base_dir'. The output
- zip file will be named 'base_dir' + ".zip". Uses either the "zipfile"
- Python module (if available) or the InfoZIP "zip" utility (if installed
- and found on the default search path). If neither tool is available,
- raises DistutilsExecError. Returns the name of the output zip file.
- """
- import zipfile
-
- mkpath(os.path.dirname(zip_filename), dry_run=dry_run)
- log.info("creating '%s' and adding '%s' to it", zip_filename, base_dir)
-
- def visit(z, dirname, names):
- for name in names:
- path = os.path.normpath(os.path.join(dirname, name))
- if os.path.isfile(path):
- p = path[len(base_dir) + 1:]
- if not dry_run:
- z.write(path, p)
- log.debug("adding '%s'", p)
-
- compression = zipfile.ZIP_DEFLATED if compress else zipfile.ZIP_STORED
- if not dry_run:
- z = zipfile.ZipFile(zip_filename, mode, compression=compression)
- for dirname, dirs, files in sorted_walk(base_dir):
- visit(z, dirname, files)
- z.close()
- else:
- for dirname, dirs, files in sorted_walk(base_dir):
- visit(None, dirname, files)
- return zip_filename
diff --git a/contrib/python/setuptools/py3/setuptools/command/bdist_rpm.py b/contrib/python/setuptools/py3/setuptools/command/bdist_rpm.py
deleted file mode 100644
index 98bf5dea846..00000000000
--- a/contrib/python/setuptools/py3/setuptools/command/bdist_rpm.py
+++ /dev/null
@@ -1,40 +0,0 @@
-import distutils.command.bdist_rpm as orig
-import warnings
-
-from setuptools import SetuptoolsDeprecationWarning
-
-
-class bdist_rpm(orig.bdist_rpm):
- """
- Override the default bdist_rpm behavior to do the following:
-
- 1. Run egg_info to ensure the name and version are properly calculated.
- 2. Always run 'install' using --single-version-externally-managed to
- disable eggs in RPM distributions.
- """
-
- def run(self):
- warnings.warn(
- "bdist_rpm is deprecated and will be removed in a future "
- "version. Use bdist_wheel (wheel packages) instead.",
- SetuptoolsDeprecationWarning,
- )
-
- # ensure distro name is up-to-date
- self.run_command('egg_info')
-
- orig.bdist_rpm.run(self)
-
- def _make_spec_file(self):
- spec = orig.bdist_rpm._make_spec_file(self)
- spec = [
- line.replace(
- "setup.py install ",
- "setup.py install --single-version-externally-managed "
- ).replace(
- "%setup",
- "%setup -n %{name}-%{unmangled_version}"
- )
- for line in spec
- ]
- return spec
diff --git a/contrib/python/setuptools/py3/setuptools/command/build_clib.py b/contrib/python/setuptools/py3/setuptools/command/build_clib.py
deleted file mode 100644
index 67ce2444ea6..00000000000
--- a/contrib/python/setuptools/py3/setuptools/command/build_clib.py
+++ /dev/null
@@ -1,101 +0,0 @@
-import distutils.command.build_clib as orig
-from distutils.errors import DistutilsSetupError
-from distutils import log
-from setuptools.dep_util import newer_pairwise_group
-
-
-class build_clib(orig.build_clib):
- """
- Override the default build_clib behaviour to do the following:
-
- 1. Implement a rudimentary timestamp-based dependency system
- so 'compile()' doesn't run every time.
- 2. Add more keys to the 'build_info' dictionary:
- * obj_deps - specify dependencies for each object compiled.
- this should be a dictionary mapping a key
- with the source filename to a list of
- dependencies. Use an empty string for global
- dependencies.
- * cflags - specify a list of additional flags to pass to
- the compiler.
- """
-
- def build_libraries(self, libraries):
- for (lib_name, build_info) in libraries:
- sources = build_info.get('sources')
- if sources is None or not isinstance(sources, (list, tuple)):
- raise DistutilsSetupError(
- "in 'libraries' option (library '%s'), "
- "'sources' must be present and must be "
- "a list of source filenames" % lib_name)
- sources = list(sources)
-
- log.info("building '%s' library", lib_name)
-
- # Make sure everything is the correct type.
- # obj_deps should be a dictionary of keys as sources
- # and a list/tuple of files that are its dependencies.
- obj_deps = build_info.get('obj_deps', dict())
- if not isinstance(obj_deps, dict):
- raise DistutilsSetupError(
- "in 'libraries' option (library '%s'), "
- "'obj_deps' must be a dictionary of "
- "type 'source: list'" % lib_name)
- dependencies = []
-
- # Get the global dependencies that are specified by the '' key.
- # These will go into every source's dependency list.
- global_deps = obj_deps.get('', list())
- if not isinstance(global_deps, (list, tuple)):
- raise DistutilsSetupError(
- "in 'libraries' option (library '%s'), "
- "'obj_deps' must be a dictionary of "
- "type 'source: list'" % lib_name)
-
- # Build the list to be used by newer_pairwise_group
- # each source will be auto-added to its dependencies.
- for source in sources:
- src_deps = [source]
- src_deps.extend(global_deps)
- extra_deps = obj_deps.get(source, list())
- if not isinstance(extra_deps, (list, tuple)):
- raise DistutilsSetupError(
- "in 'libraries' option (library '%s'), "
- "'obj_deps' must be a dictionary of "
- "type 'source: list'" % lib_name)
- src_deps.extend(extra_deps)
- dependencies.append(src_deps)
-
- expected_objects = self.compiler.object_filenames(
- sources,
- output_dir=self.build_temp,
- )
-
- if (
- newer_pairwise_group(dependencies, expected_objects)
- != ([], [])
- ):
- # First, compile the source code to object files in the library
- # directory. (This should probably change to putting object
- # files in a temporary build directory.)
- macros = build_info.get('macros')
- include_dirs = build_info.get('include_dirs')
- cflags = build_info.get('cflags')
- self.compiler.compile(
- sources,
- output_dir=self.build_temp,
- macros=macros,
- include_dirs=include_dirs,
- extra_postargs=cflags,
- debug=self.debug
- )
-
- # Now "link" the object files together into a static library.
- # (On Unix at least, this isn't really linking -- it just
- # builds an archive. Whatever.)
- self.compiler.create_static_lib(
- expected_objects,
- lib_name,
- output_dir=self.build_clib,
- debug=self.debug
- )
diff --git a/contrib/python/setuptools/py3/setuptools/command/build_ext.py b/contrib/python/setuptools/py3/setuptools/command/build_ext.py
deleted file mode 100644
index c59eff8bbf7..00000000000
--- a/contrib/python/setuptools/py3/setuptools/command/build_ext.py
+++ /dev/null
@@ -1,328 +0,0 @@
-import os
-import sys
-import itertools
-from importlib.machinery import EXTENSION_SUFFIXES
-from distutils.command.build_ext import build_ext as _du_build_ext
-from distutils.file_util import copy_file
-from distutils.ccompiler import new_compiler
-from distutils.sysconfig import customize_compiler, get_config_var
-from distutils.errors import DistutilsError
-from distutils import log
-
-from setuptools.extension import Library
-
-try:
- # Attempt to use Cython for building extensions, if available
- from Cython.Distutils.build_ext import build_ext as _build_ext
- # Additionally, assert that the compiler module will load
- # also. Ref #1229.
- __import__('Cython.Compiler.Main')
-except ImportError:
- _build_ext = _du_build_ext
-
-# make sure _config_vars is initialized
-get_config_var("LDSHARED")
-from distutils.sysconfig import _config_vars as _CONFIG_VARS # noqa
-
-
-def _customize_compiler_for_shlib(compiler):
- if sys.platform == "darwin":
- # building .dylib requires additional compiler flags on OSX; here we
- # temporarily substitute the pyconfig.h variables so that distutils'
- # 'customize_compiler' uses them before we build the shared libraries.
- tmp = _CONFIG_VARS.copy()
- try:
- # XXX Help! I don't have any idea whether these are right...
- _CONFIG_VARS['LDSHARED'] = (
- "gcc -Wl,-x -dynamiclib -undefined dynamic_lookup")
- _CONFIG_VARS['CCSHARED'] = " -dynamiclib"
- _CONFIG_VARS['SO'] = ".dylib"
- customize_compiler(compiler)
- finally:
- _CONFIG_VARS.clear()
- _CONFIG_VARS.update(tmp)
- else:
- customize_compiler(compiler)
-
-
-have_rtld = False
-use_stubs = False
-libtype = 'shared'
-
-if sys.platform == "darwin":
- use_stubs = True
-elif os.name != 'nt':
- try:
- import dl
- use_stubs = have_rtld = hasattr(dl, 'RTLD_NOW')
- except ImportError:
- pass
-
-
-def if_dl(s):
- return s if have_rtld else ''
-
-
-def get_abi3_suffix():
- """Return the file extension for an abi3-compliant Extension()"""
- for suffix in EXTENSION_SUFFIXES:
- if '.abi3' in suffix: # Unix
- return suffix
- elif suffix == '.pyd': # Windows
- return suffix
-
-
-class build_ext(_build_ext):
- def run(self):
- """Build extensions in build directory, then copy if --inplace"""
- old_inplace, self.inplace = self.inplace, 0
- _build_ext.run(self)
- self.inplace = old_inplace
- if old_inplace:
- self.copy_extensions_to_source()
-
- def copy_extensions_to_source(self):
- build_py = self.get_finalized_command('build_py')
- for ext in self.extensions:
- fullname = self.get_ext_fullname(ext.name)
- filename = self.get_ext_filename(fullname)
- modpath = fullname.split('.')
- package = '.'.join(modpath[:-1])
- package_dir = build_py.get_package_dir(package)
- dest_filename = os.path.join(package_dir,
- os.path.basename(filename))
- src_filename = os.path.join(self.build_lib, filename)
-
- # Always copy, even if source is older than destination, to ensure
- # that the right extensions for the current Python/platform are
- # used.
- copy_file(
- src_filename, dest_filename, verbose=self.verbose,
- dry_run=self.dry_run
- )
- if ext._needs_stub:
- self.write_stub(package_dir or os.curdir, ext, True)
-
- def get_ext_filename(self, fullname):
- so_ext = os.getenv('SETUPTOOLS_EXT_SUFFIX')
- if so_ext:
- filename = os.path.join(*fullname.split('.')) + so_ext
- else:
- filename = _build_ext.get_ext_filename(self, fullname)
- so_ext = get_config_var('EXT_SUFFIX')
-
- if fullname in self.ext_map:
- ext = self.ext_map[fullname]
- use_abi3 = getattr(ext, 'py_limited_api') and get_abi3_suffix()
- if use_abi3:
- filename = filename[:-len(so_ext)]
- so_ext = get_abi3_suffix()
- filename = filename + so_ext
- if isinstance(ext, Library):
- fn, ext = os.path.splitext(filename)
- return self.shlib_compiler.library_filename(fn, libtype)
- elif use_stubs and ext._links_to_dynamic:
- d, fn = os.path.split(filename)
- return os.path.join(d, 'dl-' + fn)
- return filename
-
- def initialize_options(self):
- _build_ext.initialize_options(self)
- self.shlib_compiler = None
- self.shlibs = []
- self.ext_map = {}
-
- def finalize_options(self):
- _build_ext.finalize_options(self)
- self.extensions = self.extensions or []
- self.check_extensions_list(self.extensions)
- self.shlibs = [ext for ext in self.extensions
- if isinstance(ext, Library)]
- if self.shlibs:
- self.setup_shlib_compiler()
- for ext in self.extensions:
- ext._full_name = self.get_ext_fullname(ext.name)
- for ext in self.extensions:
- fullname = ext._full_name
- self.ext_map[fullname] = ext
-
- # distutils 3.1 will also ask for module names
- # XXX what to do with conflicts?
- self.ext_map[fullname.split('.')[-1]] = ext
-
- ltd = self.shlibs and self.links_to_dynamic(ext) or False
- ns = ltd and use_stubs and not isinstance(ext, Library)
- ext._links_to_dynamic = ltd
- ext._needs_stub = ns
- filename = ext._file_name = self.get_ext_filename(fullname)
- libdir = os.path.dirname(os.path.join(self.build_lib, filename))
- if ltd and libdir not in ext.library_dirs:
- ext.library_dirs.append(libdir)
- if ltd and use_stubs and os.curdir not in ext.runtime_library_dirs:
- ext.runtime_library_dirs.append(os.curdir)
-
- def setup_shlib_compiler(self):
- compiler = self.shlib_compiler = new_compiler(
- compiler=self.compiler, dry_run=self.dry_run, force=self.force
- )
- _customize_compiler_for_shlib(compiler)
-
- if self.include_dirs is not None:
- compiler.set_include_dirs(self.include_dirs)
- if self.define is not None:
- # 'define' option is a list of (name,value) tuples
- for (name, value) in self.define:
- compiler.define_macro(name, value)
- if self.undef is not None:
- for macro in self.undef:
- compiler.undefine_macro(macro)
- if self.libraries is not None:
- compiler.set_libraries(self.libraries)
- if self.library_dirs is not None:
- compiler.set_library_dirs(self.library_dirs)
- if self.rpath is not None:
- compiler.set_runtime_library_dirs(self.rpath)
- if self.link_objects is not None:
- compiler.set_link_objects(self.link_objects)
-
- # hack so distutils' build_extension() builds a library instead
- compiler.link_shared_object = link_shared_object.__get__(compiler)
-
- def get_export_symbols(self, ext):
- if isinstance(ext, Library):
- return ext.export_symbols
- return _build_ext.get_export_symbols(self, ext)
-
- def build_extension(self, ext):
- ext._convert_pyx_sources_to_lang()
- _compiler = self.compiler
- try:
- if isinstance(ext, Library):
- self.compiler = self.shlib_compiler
- _build_ext.build_extension(self, ext)
- if ext._needs_stub:
- cmd = self.get_finalized_command('build_py').build_lib
- self.write_stub(cmd, ext)
- finally:
- self.compiler = _compiler
-
- def links_to_dynamic(self, ext):
- """Return true if 'ext' links to a dynamic lib in the same package"""
- # XXX this should check to ensure the lib is actually being built
- # XXX as dynamic, and not just using a locally-found version or a
- # XXX static-compiled version
- libnames = dict.fromkeys([lib._full_name for lib in self.shlibs])
- pkg = '.'.join(ext._full_name.split('.')[:-1] + [''])
- return any(pkg + libname in libnames for libname in ext.libraries)
-
- def get_outputs(self):
- return _build_ext.get_outputs(self) + self.__get_stubs_outputs()
-
- def __get_stubs_outputs(self):
- # assemble the base name for each extension that needs a stub
- ns_ext_bases = (
- os.path.join(self.build_lib, *ext._full_name.split('.'))
- for ext in self.extensions
- if ext._needs_stub
- )
- # pair each base with the extension
- pairs = itertools.product(ns_ext_bases, self.__get_output_extensions())
- return list(base + fnext for base, fnext in pairs)
-
- def __get_output_extensions(self):
- yield '.py'
- yield '.pyc'
- if self.get_finalized_command('build_py').optimize:
- yield '.pyo'
-
- def write_stub(self, output_dir, ext, compile=False):
- log.info("writing stub loader for %s to %s", ext._full_name,
- output_dir)
- stub_file = (os.path.join(output_dir, *ext._full_name.split('.')) +
- '.py')
- if compile and os.path.exists(stub_file):
- raise DistutilsError(stub_file + " already exists! Please delete.")
- if not self.dry_run:
- f = open(stub_file, 'w')
- f.write(
- '\n'.join([
- "def __bootstrap__():",
- " global __bootstrap__, __file__, __loader__",
- " import sys, os, pkg_resources, importlib.util" +
- if_dl(", dl"),
- " __file__ = pkg_resources.resource_filename"
- "(__name__,%r)"
- % os.path.basename(ext._file_name),
- " del __bootstrap__",
- " if '__loader__' in globals():",
- " del __loader__",
- if_dl(" old_flags = sys.getdlopenflags()"),
- " old_dir = os.getcwd()",
- " try:",
- " os.chdir(os.path.dirname(__file__))",
- if_dl(" sys.setdlopenflags(dl.RTLD_NOW)"),
- " spec = importlib.util.spec_from_file_location(",
- " __name__, __file__)",
- " mod = importlib.util.module_from_spec(spec)",
- " spec.loader.exec_module(mod)",
- " finally:",
- if_dl(" sys.setdlopenflags(old_flags)"),
- " os.chdir(old_dir)",
- "__bootstrap__()",
- "" # terminal \n
- ])
- )
- f.close()
- if compile:
- from distutils.util import byte_compile
-
- byte_compile([stub_file], optimize=0,
- force=True, dry_run=self.dry_run)
- optimize = self.get_finalized_command('install_lib').optimize
- if optimize > 0:
- byte_compile([stub_file], optimize=optimize,
- force=True, dry_run=self.dry_run)
- if os.path.exists(stub_file) and not self.dry_run:
- os.unlink(stub_file)
-
-
-if use_stubs or os.name == 'nt':
- # Build shared libraries
- #
- def link_shared_object(
- self, objects, output_libname, output_dir=None, libraries=None,
- library_dirs=None, runtime_library_dirs=None, export_symbols=None,
- debug=0, extra_preargs=None, extra_postargs=None, build_temp=None,
- target_lang=None):
- self.link(
- self.SHARED_LIBRARY, objects, output_libname,
- output_dir, libraries, library_dirs, runtime_library_dirs,
- export_symbols, debug, extra_preargs, extra_postargs,
- build_temp, target_lang
- )
-else:
- # Build static libraries everywhere else
- libtype = 'static'
-
- def link_shared_object(
- self, objects, output_libname, output_dir=None, libraries=None,
- library_dirs=None, runtime_library_dirs=None, export_symbols=None,
- debug=0, extra_preargs=None, extra_postargs=None, build_temp=None,
- target_lang=None):
- # XXX we need to either disallow these attrs on Library instances,
- # or warn/abort here if set, or something...
- # libraries=None, library_dirs=None, runtime_library_dirs=None,
- # export_symbols=None, extra_preargs=None, extra_postargs=None,
- # build_temp=None
-
- assert output_dir is None # distutils build_ext doesn't pass this
- output_dir, filename = os.path.split(output_libname)
- basename, ext = os.path.splitext(filename)
- if self.library_filename("x").startswith('lib'):
- # strip 'lib' prefix; this is kludgy if some platform uses
- # a different prefix
- basename = basename[3:]
-
- self.create_static_lib(
- objects, basename, output_dir, debug, target_lang
- )
diff --git a/contrib/python/setuptools/py3/setuptools/command/build_py.py b/contrib/python/setuptools/py3/setuptools/command/build_py.py
deleted file mode 100644
index c3fdc0927c5..00000000000
--- a/contrib/python/setuptools/py3/setuptools/command/build_py.py
+++ /dev/null
@@ -1,242 +0,0 @@
-from glob import glob
-from distutils.util import convert_path
-import distutils.command.build_py as orig
-import os
-import fnmatch
-import textwrap
-import io
-import distutils.errors
-import itertools
-import stat
-from setuptools.extern.more_itertools import unique_everseen
-
-
-def make_writable(target):
- os.chmod(target, os.stat(target).st_mode | stat.S_IWRITE)
-
-
-class build_py(orig.build_py):
- """Enhanced 'build_py' command that includes data files with packages
-
- The data files are specified via a 'package_data' argument to 'setup()'.
- See 'setuptools.dist.Distribution' for more details.
-
- Also, this version of the 'build_py' command allows you to specify both
- 'py_modules' and 'packages' in the same setup operation.
- """
-
- def finalize_options(self):
- orig.build_py.finalize_options(self)
- self.package_data = self.distribution.package_data
- self.exclude_package_data = self.distribution.exclude_package_data or {}
- if 'data_files' in self.__dict__:
- del self.__dict__['data_files']
- self.__updated_files = []
-
- def run(self):
- """Build modules, packages, and copy data files to build directory"""
- if not self.py_modules and not self.packages:
- return
-
- if self.py_modules:
- self.build_modules()
-
- if self.packages:
- self.build_packages()
- self.build_package_data()
-
- # Only compile actual .py files, using our base class' idea of what our
- # output files are.
- self.byte_compile(orig.build_py.get_outputs(self, include_bytecode=0))
-
- def __getattr__(self, attr):
- "lazily compute data files"
- if attr == 'data_files':
- self.data_files = self._get_data_files()
- return self.data_files
- return orig.build_py.__getattr__(self, attr)
-
- def build_module(self, module, module_file, package):
- outfile, copied = orig.build_py.build_module(self, module, module_file, package)
- if copied:
- self.__updated_files.append(outfile)
- return outfile, copied
-
- def _get_data_files(self):
- """Generate list of '(package,src_dir,build_dir,filenames)' tuples"""
- self.analyze_manifest()
- return list(map(self._get_pkg_data_files, self.packages or ()))
-
- def get_data_files_without_manifest(self):
- """
- Generate list of ``(package,src_dir,build_dir,filenames)`` tuples,
- but without triggering any attempt to analyze or build the manifest.
- """
- # Prevent eventual errors from unset `manifest_files`
- # (that would otherwise be set by `analyze_manifest`)
- self.__dict__.setdefault('manifest_files', {})
- return list(map(self._get_pkg_data_files, self.packages or ()))
-
- def _get_pkg_data_files(self, package):
- # Locate package source directory
- src_dir = self.get_package_dir(package)
-
- # Compute package build directory
- build_dir = os.path.join(*([self.build_lib] + package.split('.')))
-
- # Strip directory from globbed filenames
- filenames = [
- os.path.relpath(file, src_dir)
- for file in self.find_data_files(package, src_dir)
- ]
- return package, src_dir, build_dir, filenames
-
- def find_data_files(self, package, src_dir):
- """Return filenames for package's data files in 'src_dir'"""
- patterns = self._get_platform_patterns(
- self.package_data,
- package,
- src_dir,
- )
- globs_expanded = map(glob, patterns)
- # flatten the expanded globs into an iterable of matches
- globs_matches = itertools.chain.from_iterable(globs_expanded)
- glob_files = filter(os.path.isfile, globs_matches)
- files = itertools.chain(
- self.manifest_files.get(package, []),
- glob_files,
- )
- return self.exclude_data_files(package, src_dir, files)
-
- def build_package_data(self):
- """Copy data files into build directory"""
- for package, src_dir, build_dir, filenames in self.data_files:
- for filename in filenames:
- target = os.path.join(build_dir, filename)
- self.mkpath(os.path.dirname(target))
- srcfile = os.path.join(src_dir, filename)
- outf, copied = self.copy_file(srcfile, target)
- make_writable(target)
- srcfile = os.path.abspath(srcfile)
-
- def analyze_manifest(self):
- self.manifest_files = mf = {}
- if not self.distribution.include_package_data:
- return
- src_dirs = {}
- for package in self.packages or ():
- # Locate package source directory
- src_dirs[assert_relative(self.get_package_dir(package))] = package
-
- self.run_command('egg_info')
- ei_cmd = self.get_finalized_command('egg_info')
- for path in ei_cmd.filelist.files:
- d, f = os.path.split(assert_relative(path))
- prev = None
- oldf = f
- while d and d != prev and d not in src_dirs:
- prev = d
- d, df = os.path.split(d)
- f = os.path.join(df, f)
- if d in src_dirs:
- if path.endswith('.py') and f == oldf:
- continue # it's a module, not data
- mf.setdefault(src_dirs[d], []).append(path)
-
- def get_data_files(self):
- pass # Lazily compute data files in _get_data_files() function.
-
- def check_package(self, package, package_dir):
- """Check namespace packages' __init__ for declare_namespace"""
- try:
- return self.packages_checked[package]
- except KeyError:
- pass
-
- init_py = orig.build_py.check_package(self, package, package_dir)
- self.packages_checked[package] = init_py
-
- if not init_py or not self.distribution.namespace_packages:
- return init_py
-
- for pkg in self.distribution.namespace_packages:
- if pkg == package or pkg.startswith(package + '.'):
- break
- else:
- return init_py
-
- with io.open(init_py, 'rb') as f:
- contents = f.read()
- if b'declare_namespace' not in contents:
- raise distutils.errors.DistutilsError(
- "Namespace package problem: %s is a namespace package, but "
- "its\n__init__.py does not call declare_namespace()! Please "
- 'fix it.\n(See the setuptools manual under '
- '"Namespace Packages" for details.)\n"' % (package,)
- )
- return init_py
-
- def initialize_options(self):
- self.packages_checked = {}
- orig.build_py.initialize_options(self)
-
- def get_package_dir(self, package):
- res = orig.build_py.get_package_dir(self, package)
- if self.distribution.src_root is not None:
- return os.path.join(self.distribution.src_root, res)
- return res
-
- def exclude_data_files(self, package, src_dir, files):
- """Filter filenames for package's data files in 'src_dir'"""
- files = list(files)
- patterns = self._get_platform_patterns(
- self.exclude_package_data,
- package,
- src_dir,
- )
- match_groups = (fnmatch.filter(files, pattern) for pattern in patterns)
- # flatten the groups of matches into an iterable of matches
- matches = itertools.chain.from_iterable(match_groups)
- bad = set(matches)
- keepers = (fn for fn in files if fn not in bad)
- # ditch dupes
- return list(unique_everseen(keepers))
-
- @staticmethod
- def _get_platform_patterns(spec, package, src_dir):
- """
- yield platform-specific path patterns (suitable for glob
- or fn_match) from a glob-based spec (such as
- self.package_data or self.exclude_package_data)
- matching package in src_dir.
- """
- raw_patterns = itertools.chain(
- spec.get('', []),
- spec.get(package, []),
- )
- return (
- # Each pattern has to be converted to a platform-specific path
- os.path.join(src_dir, convert_path(pattern))
- for pattern in raw_patterns
- )
-
-
-def assert_relative(path):
- if not os.path.isabs(path):
- return path
- from distutils.errors import DistutilsSetupError
-
- msg = (
- textwrap.dedent(
- """
- Error: setup script specifies an absolute path:
-
- %s
-
- setup() arguments must *always* be /-separated paths relative to the
- setup.py directory, *never* absolute paths.
- """
- ).lstrip()
- % path
- )
- raise DistutilsSetupError(msg)
diff --git a/contrib/python/setuptools/py3/setuptools/command/develop.py b/contrib/python/setuptools/py3/setuptools/command/develop.py
deleted file mode 100644
index 24fb0a7c81b..00000000000
--- a/contrib/python/setuptools/py3/setuptools/command/develop.py
+++ /dev/null
@@ -1,193 +0,0 @@
-from distutils.util import convert_path
-from distutils import log
-from distutils.errors import DistutilsError, DistutilsOptionError
-import os
-import glob
-import io
-
-import pkg_resources
-from setuptools.command.easy_install import easy_install
-from setuptools import namespaces
-import setuptools
-
-
-class develop(namespaces.DevelopInstaller, easy_install):
- """Set up package for development"""
-
- description = "install package in 'development mode'"
-
- user_options = easy_install.user_options + [
- ("uninstall", "u", "Uninstall this source package"),
- ("egg-path=", None, "Set the path to be used in the .egg-link file"),
- ]
-
- boolean_options = easy_install.boolean_options + ['uninstall']
-
- command_consumes_arguments = False # override base
-
- def run(self):
- if self.uninstall:
- self.multi_version = True
- self.uninstall_link()
- self.uninstall_namespaces()
- else:
- self.install_for_development()
- self.warn_deprecated_options()
-
- def initialize_options(self):
- self.uninstall = None
- self.egg_path = None
- easy_install.initialize_options(self)
- self.setup_path = None
- self.always_copy_from = '.' # always copy eggs installed in curdir
-
- def finalize_options(self):
- ei = self.get_finalized_command("egg_info")
- if ei.broken_egg_info:
- template = "Please rename %r to %r before using 'develop'"
- args = ei.egg_info, ei.broken_egg_info
- raise DistutilsError(template % args)
- self.args = [ei.egg_name]
-
- easy_install.finalize_options(self)
- self.expand_basedirs()
- self.expand_dirs()
- # pick up setup-dir .egg files only: no .egg-info
- self.package_index.scan(glob.glob('*.egg'))
-
- egg_link_fn = ei.egg_name + '.egg-link'
- self.egg_link = os.path.join(self.install_dir, egg_link_fn)
- self.egg_base = ei.egg_base
- if self.egg_path is None:
- self.egg_path = os.path.abspath(ei.egg_base)
-
- target = pkg_resources.normalize_path(self.egg_base)
- egg_path = pkg_resources.normalize_path(
- os.path.join(self.install_dir, self.egg_path)
- )
- if egg_path != target:
- raise DistutilsOptionError(
- "--egg-path must be a relative path from the install"
- " directory to " + target
- )
-
- # Make a distribution for the package's source
- self.dist = pkg_resources.Distribution(
- target,
- pkg_resources.PathMetadata(target, os.path.abspath(ei.egg_info)),
- project_name=ei.egg_name,
- )
-
- self.setup_path = self._resolve_setup_path(
- self.egg_base,
- self.install_dir,
- self.egg_path,
- )
-
- @staticmethod
- def _resolve_setup_path(egg_base, install_dir, egg_path):
- """
- Generate a path from egg_base back to '.' where the
- setup script resides and ensure that path points to the
- setup path from $install_dir/$egg_path.
- """
- path_to_setup = egg_base.replace(os.sep, '/').rstrip('/')
- if path_to_setup != os.curdir:
- path_to_setup = '../' * (path_to_setup.count('/') + 1)
- resolved = pkg_resources.normalize_path(
- os.path.join(install_dir, egg_path, path_to_setup)
- )
- if resolved != pkg_resources.normalize_path(os.curdir):
- raise DistutilsOptionError(
- "Can't get a consistent path to setup script from"
- " installation directory",
- resolved,
- pkg_resources.normalize_path(os.curdir),
- )
- return path_to_setup
-
- def install_for_development(self):
- self.run_command('egg_info')
-
- # Build extensions in-place
- self.reinitialize_command('build_ext', inplace=1)
- self.run_command('build_ext')
-
- if setuptools.bootstrap_install_from:
- self.easy_install(setuptools.bootstrap_install_from)
- setuptools.bootstrap_install_from = None
-
- self.install_namespaces()
-
- # create an .egg-link in the installation dir, pointing to our egg
- log.info("Creating %s (link to %s)", self.egg_link, self.egg_base)
- if not self.dry_run:
- with open(self.egg_link, "w") as f:
- f.write(self.egg_path + "\n" + self.setup_path)
- # postprocess the installed distro, fixing up .pth, installing scripts,
- # and handling requirements
- self.process_distribution(None, self.dist, not self.no_deps)
-
- def uninstall_link(self):
- if os.path.exists(self.egg_link):
- log.info("Removing %s (link to %s)", self.egg_link, self.egg_base)
- egg_link_file = open(self.egg_link)
- contents = [line.rstrip() for line in egg_link_file]
- egg_link_file.close()
- if contents not in ([self.egg_path], [self.egg_path, self.setup_path]):
- log.warn("Link points to %s: uninstall aborted", contents)
- return
- if not self.dry_run:
- os.unlink(self.egg_link)
- if not self.dry_run:
- self.update_pth(self.dist) # remove any .pth link to us
- if self.distribution.scripts:
- # XXX should also check for entry point scripts!
- log.warn("Note: you must uninstall or replace scripts manually!")
-
- def install_egg_scripts(self, dist):
- if dist is not self.dist:
- # Installing a dependency, so fall back to normal behavior
- return easy_install.install_egg_scripts(self, dist)
-
- # create wrapper scripts in the script dir, pointing to dist.scripts
-
- # new-style...
- self.install_wrapper_scripts(dist)
-
- # ...and old-style
- for script_name in self.distribution.scripts or []:
- script_path = os.path.abspath(convert_path(script_name))
- script_name = os.path.basename(script_path)
- with io.open(script_path) as strm:
- script_text = strm.read()
- self.install_script(dist, script_name, script_text, script_path)
-
- def install_wrapper_scripts(self, dist):
- dist = VersionlessRequirement(dist)
- return easy_install.install_wrapper_scripts(self, dist)
-
-
-class VersionlessRequirement:
- """
- Adapt a pkg_resources.Distribution to simply return the project
- name as the 'requirement' so that scripts will work across
- multiple versions.
-
- >>> from pkg_resources import Distribution
- >>> dist = Distribution(project_name='foo', version='1.0')
- >>> str(dist.as_requirement())
- 'foo==1.0'
- >>> adapted_dist = VersionlessRequirement(dist)
- >>> str(adapted_dist.as_requirement())
- 'foo'
- """
-
- def __init__(self, dist):
- self.__dist = dist
-
- def __getattr__(self, name):
- return getattr(self.__dist, name)
-
- def as_requirement(self):
- return self.project_name
diff --git a/contrib/python/setuptools/py3/setuptools/command/dist_info.py b/contrib/python/setuptools/py3/setuptools/command/dist_info.py
deleted file mode 100644
index c45258fa03a..00000000000
--- a/contrib/python/setuptools/py3/setuptools/command/dist_info.py
+++ /dev/null
@@ -1,36 +0,0 @@
-"""
-Create a dist_info directory
-As defined in the wheel specification
-"""
-
-import os
-
-from distutils.core import Command
-from distutils import log
-
-
-class dist_info(Command):
-
- description = 'create a .dist-info directory'
-
- user_options = [
- ('egg-base=', 'e', "directory containing .egg-info directories"
- " (default: top of the source tree)"),
- ]
-
- def initialize_options(self):
- self.egg_base = None
-
- def finalize_options(self):
- pass
-
- def run(self):
- egg_info = self.get_finalized_command('egg_info')
- egg_info.egg_base = self.egg_base
- egg_info.finalize_options()
- egg_info.run()
- dist_info_dir = egg_info.egg_info[:-len('.egg-info')] + '.dist-info'
- log.info("creating '{}'".format(os.path.abspath(dist_info_dir)))
-
- bdist_wheel = self.get_finalized_command('bdist_wheel')
- bdist_wheel.egg2dist(egg_info.egg_info, dist_info_dir)
diff --git a/contrib/python/setuptools/py3/setuptools/command/easy_install.py b/contrib/python/setuptools/py3/setuptools/command/easy_install.py
deleted file mode 100644
index fc848d0d1c3..00000000000
--- a/contrib/python/setuptools/py3/setuptools/command/easy_install.py
+++ /dev/null
@@ -1,2299 +0,0 @@
-"""
-Easy Install
-------------
-
-A tool for doing automatic download/extract/build of distutils-based Python
-packages. For detailed documentation, see the accompanying EasyInstall.txt
-file, or visit the `EasyInstall home page`__.
-
-__ https://setuptools.pypa.io/en/latest/deprecated/easy_install.html
-
-"""
-
-from glob import glob
-from distutils.util import get_platform
-from distutils.util import convert_path, subst_vars
-from distutils.errors import (
- DistutilsArgError, DistutilsOptionError,
- DistutilsError, DistutilsPlatformError,
-)
-from distutils.command.install import INSTALL_SCHEMES, SCHEME_KEYS
-from distutils import log, dir_util
-from distutils.command.build_scripts import first_line_re
-from distutils.spawn import find_executable
-import sys
-import os
-import zipimport
-import shutil
-import tempfile
-import zipfile
-import re
-import stat
-import random
-import textwrap
-import warnings
-import site
-import struct
-import contextlib
-import subprocess
-import shlex
-import io
-import configparser
-
-
-from sysconfig import get_config_vars, get_path
-
-from setuptools import SetuptoolsDeprecationWarning
-
-from setuptools import Command
-from setuptools.sandbox import run_setup
-from setuptools.command import setopt
-from setuptools.archive_util import unpack_archive
-from setuptools.package_index import (
- PackageIndex, parse_requirement_arg, URL_SCHEME,
-)
-from setuptools.command import bdist_egg, egg_info
-from setuptools.wheel import Wheel
-from pkg_resources import (
- yield_lines, normalize_path, resource_string, ensure_directory,
- get_distribution, find_distributions, Environment, Requirement,
- Distribution, PathMetadata, EggMetadata, WorkingSet, DistributionNotFound,
- VersionConflict, DEVELOP_DIST,
-)
-import pkg_resources
-
-# Turn on PEP440Warnings
-warnings.filterwarnings("default", category=pkg_resources.PEP440Warning)
-
-__all__ = [
- 'samefile', 'easy_install', 'PthDistributions', 'extract_wininst_cfg',
- 'get_exe_prefixes',
-]
-
-
-def is_64bit():
- return struct.calcsize("P") == 8
-
-
-def samefile(p1, p2):
- """
- Determine if two paths reference the same file.
-
- Augments os.path.samefile to work on Windows and
- suppresses errors if the path doesn't exist.
- """
- both_exist = os.path.exists(p1) and os.path.exists(p2)
- use_samefile = hasattr(os.path, 'samefile') and both_exist
- if use_samefile:
- return os.path.samefile(p1, p2)
- norm_p1 = os.path.normpath(os.path.normcase(p1))
- norm_p2 = os.path.normpath(os.path.normcase(p2))
- return norm_p1 == norm_p2
-
-
-def _to_bytes(s):
- return s.encode('utf8')
-
-
-def isascii(s):
- try:
- s.encode('ascii')
- return True
- except UnicodeError:
- return False
-
-
-def _one_liner(text):
- return textwrap.dedent(text).strip().replace('\n', '; ')
-
-
-class easy_install(Command):
- """Manage a download/build/install process"""
- description = "Find/get/install Python packages"
- command_consumes_arguments = True
-
- user_options = [
- ('prefix=', None, "installation prefix"),
- ("zip-ok", "z", "install package as a zipfile"),
- ("multi-version", "m", "make apps have to require() a version"),
- ("upgrade", "U", "force upgrade (searches PyPI for latest versions)"),
- ("install-dir=", "d", "install package to DIR"),
- ("script-dir=", "s", "install scripts to DIR"),
- ("exclude-scripts", "x", "Don't install scripts"),
- ("always-copy", "a", "Copy all needed packages to install dir"),
- ("index-url=", "i", "base URL of Python Package Index"),
- ("find-links=", "f", "additional URL(s) to search for packages"),
- ("build-directory=", "b",
- "download/extract/build in DIR; keep the results"),
- ('optimize=', 'O',
- "also compile with optimization: -O1 for \"python -O\", "
- "-O2 for \"python -OO\", and -O0 to disable [default: -O0]"),
- ('record=', None,
- "filename in which to record list of installed files"),
- ('always-unzip', 'Z', "don't install as a zipfile, no matter what"),
- ('site-dirs=', 'S', "list of directories where .pth files work"),
- ('editable', 'e', "Install specified packages in editable form"),
- ('no-deps', 'N', "don't install dependencies"),
- ('allow-hosts=', 'H', "pattern(s) that hostnames must match"),
- ('local-snapshots-ok', 'l',
- "allow building eggs from local checkouts"),
- ('version', None, "print version information and exit"),
- ('no-find-links', None,
- "Don't load find-links defined in packages being installed"),
- ('user', None, "install in user site-package '%s'" % site.USER_SITE)
- ]
- boolean_options = [
- 'zip-ok', 'multi-version', 'exclude-scripts', 'upgrade', 'always-copy',
- 'editable',
- 'no-deps', 'local-snapshots-ok', 'version',
- 'user'
- ]
-
- negative_opt = {'always-unzip': 'zip-ok'}
- create_index = PackageIndex
-
- def initialize_options(self):
- warnings.warn(
- "easy_install command is deprecated. "
- "Use build and pip and other standards-based tools.",
- EasyInstallDeprecationWarning,
- )
-
- # the --user option seems to be an opt-in one,
- # so the default should be False.
- self.user = 0
- self.zip_ok = self.local_snapshots_ok = None
- self.install_dir = self.script_dir = self.exclude_scripts = None
- self.index_url = None
- self.find_links = None
- self.build_directory = None
- self.args = None
- self.optimize = self.record = None
- self.upgrade = self.always_copy = self.multi_version = None
- self.editable = self.no_deps = self.allow_hosts = None
- self.root = self.prefix = self.no_report = None
- self.version = None
- self.install_purelib = None # for pure module distributions
- self.install_platlib = None # non-pure (dists w/ extensions)
- self.install_headers = None # for C/C++ headers
- self.install_lib = None # set to either purelib or platlib
- self.install_scripts = None
- self.install_data = None
- self.install_base = None
- self.install_platbase = None
- if site.ENABLE_USER_SITE:
- self.install_userbase = site.USER_BASE
- self.install_usersite = site.USER_SITE
- else:
- self.install_userbase = None
- self.install_usersite = None
- self.no_find_links = None
-
- # Options not specifiable via command line
- self.package_index = None
- self.pth_file = self.always_copy_from = None
- self.site_dirs = None
- self.installed_projects = {}
- # Always read easy_install options, even if we are subclassed, or have
- # an independent instance created. This ensures that defaults will
- # always come from the standard configuration file(s)' "easy_install"
- # section, even if this is a "develop" or "install" command, or some
- # other embedding.
- self._dry_run = None
- self.verbose = self.distribution.verbose
- self.distribution._set_command_options(
- self, self.distribution.get_option_dict('easy_install')
- )
-
- def delete_blockers(self, blockers):
- extant_blockers = (
- filename for filename in blockers
- if os.path.exists(filename) or os.path.islink(filename)
- )
- list(map(self._delete_path, extant_blockers))
-
- def _delete_path(self, path):
- log.info("Deleting %s", path)
- if self.dry_run:
- return
-
- is_tree = os.path.isdir(path) and not os.path.islink(path)
- remover = rmtree if is_tree else os.unlink
- remover(path)
-
- @staticmethod
- def _render_version():
- """
- Render the Setuptools version and installation details, then exit.
- """
- ver = '{}.{}'.format(*sys.version_info)
- dist = get_distribution('setuptools')
- tmpl = 'setuptools {dist.version} from {dist.location} (Python {ver})'
- print(tmpl.format(**locals()))
- raise SystemExit()
-
- def finalize_options(self): # noqa: C901 # is too complex (25) # FIXME
- self.version and self._render_version()
-
- py_version = sys.version.split()[0]
- prefix, exec_prefix = get_config_vars('prefix', 'exec_prefix')
-
- self.config_vars = {
- 'dist_name': self.distribution.get_name(),
- 'dist_version': self.distribution.get_version(),
- 'dist_fullname': self.distribution.get_fullname(),
- 'py_version': py_version,
- 'py_version_short': py_version[0:3],
- 'py_version_nodot': py_version[0] + py_version[2],
- 'sys_prefix': prefix,
- 'prefix': prefix,
- 'sys_exec_prefix': exec_prefix,
- 'exec_prefix': exec_prefix,
- # Only python 3.2+ has abiflags
- 'abiflags': getattr(sys, 'abiflags', ''),
- }
-
- if site.ENABLE_USER_SITE:
- self.config_vars['userbase'] = self.install_userbase
- self.config_vars['usersite'] = self.install_usersite
-
- elif self.user:
- log.warn("WARNING: The user site-packages directory is disabled.")
-
- self._fix_install_dir_for_user_site()
-
- self.expand_basedirs()
- self.expand_dirs()
-
- self._expand(
- 'install_dir', 'script_dir', 'build_directory',
- 'site_dirs',
- )
- # If a non-default installation directory was specified, default the
- # script directory to match it.
- if self.script_dir is None:
- self.script_dir = self.install_dir
-
- if self.no_find_links is None:
- self.no_find_links = False
-
- # Let install_dir get set by install_lib command, which in turn
- # gets its info from the install command, and takes into account
- # --prefix and --home and all that other crud.
- self.set_undefined_options(
- 'install_lib', ('install_dir', 'install_dir')
- )
- # Likewise, set default script_dir from 'install_scripts.install_dir'
- self.set_undefined_options(
- 'install_scripts', ('install_dir', 'script_dir')
- )
-
- if self.user and self.install_purelib:
- self.install_dir = self.install_purelib
- self.script_dir = self.install_scripts
- # default --record from the install command
- self.set_undefined_options('install', ('record', 'record'))
- # Should this be moved to the if statement below? It's not used
- # elsewhere
- normpath = map(normalize_path, sys.path)
- self.all_site_dirs = get_site_dirs()
- if self.site_dirs is not None:
- site_dirs = [
- os.path.expanduser(s.strip()) for s in
- self.site_dirs.split(',')
- ]
- for d in site_dirs:
- if not os.path.isdir(d):
- log.warn("%s (in --site-dirs) does not exist", d)
- elif normalize_path(d) not in normpath:
- raise DistutilsOptionError(
- d + " (in --site-dirs) is not on sys.path"
- )
- else:
- self.all_site_dirs.append(normalize_path(d))
- if not self.editable:
- self.check_site_dir()
- self.index_url = self.index_url or "https://pypi.org/simple/"
- self.shadow_path = self.all_site_dirs[:]
- for path_item in self.install_dir, normalize_path(self.script_dir):
- if path_item not in self.shadow_path:
- self.shadow_path.insert(0, path_item)
-
- if self.allow_hosts is not None:
- hosts = [s.strip() for s in self.allow_hosts.split(',')]
- else:
- hosts = ['*']
- if self.package_index is None:
- self.package_index = self.create_index(
- self.index_url, search_path=self.shadow_path, hosts=hosts,
- )
- self.local_index = Environment(self.shadow_path + sys.path)
-
- if self.find_links is not None:
- if isinstance(self.find_links, str):
- self.find_links = self.find_links.split()
- else:
- self.find_links = []
- if self.local_snapshots_ok:
- self.package_index.scan_egg_links(self.shadow_path + sys.path)
- if not self.no_find_links:
- self.package_index.add_find_links(self.find_links)
- self.set_undefined_options('install_lib', ('optimize', 'optimize'))
- if not isinstance(self.optimize, int):
- try:
- self.optimize = int(self.optimize)
- if not (0 <= self.optimize <= 2):
- raise ValueError
- except ValueError as e:
- raise DistutilsOptionError(
- "--optimize must be 0, 1, or 2"
- ) from e
-
- if self.editable and not self.build_directory:
- raise DistutilsArgError(
- "Must specify a build directory (-b) when using --editable"
- )
- if not self.args:
- raise DistutilsArgError(
- "No urls, filenames, or requirements specified (see --help)")
-
- self.outputs = []
-
- def _fix_install_dir_for_user_site(self):
- """
- Fix the install_dir if "--user" was used.
- """
- if not self.user or not site.ENABLE_USER_SITE:
- return
-
- self.create_home_path()
- if self.install_userbase is None:
- msg = "User base directory is not specified"
- raise DistutilsPlatformError(msg)
- self.install_base = self.install_platbase = self.install_userbase
- scheme_name = os.name.replace('posix', 'unix') + '_user'
- self.select_scheme(scheme_name)
-
- def _expand_attrs(self, attrs):
- for attr in attrs:
- val = getattr(self, attr)
- if val is not None:
- if os.name == 'posix' or os.name == 'nt':
- val = os.path.expanduser(val)
- val = subst_vars(val, self.config_vars)
- setattr(self, attr, val)
-
- def expand_basedirs(self):
- """Calls `os.path.expanduser` on install_base, install_platbase and
- root."""
- self._expand_attrs(['install_base', 'install_platbase', 'root'])
-
- def expand_dirs(self):
- """Calls `os.path.expanduser` on install dirs."""
- dirs = [
- 'install_purelib',
- 'install_platlib',
- 'install_lib',
- 'install_headers',
- 'install_scripts',
- 'install_data',
- ]
- self._expand_attrs(dirs)
-
- def run(self, show_deprecation=True):
- if show_deprecation:
- self.announce(
- "WARNING: The easy_install command is deprecated "
- "and will be removed in a future version.",
- log.WARN,
- )
- if self.verbose != self.distribution.verbose:
- log.set_verbosity(self.verbose)
- try:
- for spec in self.args:
- self.easy_install(spec, not self.no_deps)
- if self.record:
- outputs = self.outputs
- if self.root: # strip any package prefix
- root_len = len(self.root)
- for counter in range(len(outputs)):
- outputs[counter] = outputs[counter][root_len:]
- from distutils import file_util
-
- self.execute(
- file_util.write_file, (self.record, outputs),
- "writing list of installed files to '%s'" %
- self.record
- )
- self.warn_deprecated_options()
- finally:
- log.set_verbosity(self.distribution.verbose)
-
- def pseudo_tempname(self):
- """Return a pseudo-tempname base in the install directory.
- This code is intentionally naive; if a malicious party can write to
- the target directory you're already in deep doodoo.
- """
- try:
- pid = os.getpid()
- except Exception:
- pid = random.randint(0, sys.maxsize)
- return os.path.join(self.install_dir, "test-easy-install-%s" % pid)
-
- def warn_deprecated_options(self):
- pass
-
- def check_site_dir(self): # noqa: C901 # is too complex (12) # FIXME
- """Verify that self.install_dir is .pth-capable dir, if needed"""
-
- instdir = normalize_path(self.install_dir)
- pth_file = os.path.join(instdir, 'easy-install.pth')
-
- if not os.path.exists(instdir):
- try:
- os.makedirs(instdir)
- except (OSError, IOError):
- self.cant_write_to_target()
-
- # Is it a configured, PYTHONPATH, implicit, or explicit site dir?
- is_site_dir = instdir in self.all_site_dirs
-
- if not is_site_dir and not self.multi_version:
- # No? Then directly test whether it does .pth file processing
- is_site_dir = self.check_pth_processing()
- else:
- # make sure we can write to target dir
- testfile = self.pseudo_tempname() + '.write-test'
- test_exists = os.path.exists(testfile)
- try:
- if test_exists:
- os.unlink(testfile)
- open(testfile, 'w').close()
- os.unlink(testfile)
- except (OSError, IOError):
- self.cant_write_to_target()
-
- if not is_site_dir and not self.multi_version:
- # Can't install non-multi to non-site dir with easy_install
- pythonpath = os.environ.get('PYTHONPATH', '')
- log.warn(self.__no_default_msg, self.install_dir, pythonpath)
-
- if is_site_dir:
- if self.pth_file is None:
- self.pth_file = PthDistributions(pth_file, self.all_site_dirs)
- else:
- self.pth_file = None
-
- if self.multi_version and not os.path.exists(pth_file):
- self.pth_file = None # don't create a .pth file
- self.install_dir = instdir
-
- __cant_write_msg = textwrap.dedent("""
- can't create or remove files in install directory
-
- The following error occurred while trying to add or remove files in the
- installation directory:
-
- %s
-
- The installation directory you specified (via --install-dir, --prefix, or
- the distutils default setting) was:
-
- %s
- """).lstrip() # noqa
-
- __not_exists_id = textwrap.dedent("""
- This directory does not currently exist. Please create it and try again, or
- choose a different installation directory (using the -d or --install-dir
- option).
- """).lstrip() # noqa
-
- __access_msg = textwrap.dedent("""
- Perhaps your account does not have write access to this directory? If the
- installation directory is a system-owned directory, you may need to sign in
- as the administrator or "root" account. If you do not have administrative
- access to this machine, you may wish to choose a different installation
- directory, preferably one that is listed in your PYTHONPATH environment
- variable.
-
- For information on other options, you may wish to consult the
- documentation at:
-
- https://setuptools.pypa.io/en/latest/deprecated/easy_install.html
-
- Please make the appropriate changes for your system and try again.
- """).lstrip() # noqa
-
- def cant_write_to_target(self):
- msg = self.__cant_write_msg % (sys.exc_info()[1], self.install_dir,)
-
- if not os.path.exists(self.install_dir):
- msg += '\n' + self.__not_exists_id
- else:
- msg += '\n' + self.__access_msg
- raise DistutilsError(msg)
-
- def check_pth_processing(self):
- """Empirically verify whether .pth files are supported in inst. dir"""
- instdir = self.install_dir
- log.info("Checking .pth file support in %s", instdir)
- pth_file = self.pseudo_tempname() + ".pth"
- ok_file = pth_file + '.ok'
- ok_exists = os.path.exists(ok_file)
- tmpl = _one_liner("""
- import os
- f = open({ok_file!r}, 'w')
- f.write('OK')
- f.close()
- """) + '\n'
- try:
- if ok_exists:
- os.unlink(ok_file)
- dirname = os.path.dirname(ok_file)
- os.makedirs(dirname, exist_ok=True)
- f = open(pth_file, 'w')
- except (OSError, IOError):
- self.cant_write_to_target()
- else:
- try:
- f.write(tmpl.format(**locals()))
- f.close()
- f = None
- executable = sys.executable
- if os.name == 'nt':
- dirname, basename = os.path.split(executable)
- alt = os.path.join(dirname, 'pythonw.exe')
- use_alt = (
- basename.lower() == 'python.exe' and
- os.path.exists(alt)
- )
- if use_alt:
- # use pythonw.exe to avoid opening a console window
- executable = alt
-
- from distutils.spawn import spawn
-
- spawn([executable, '-E', '-c', 'pass'], 0)
-
- if os.path.exists(ok_file):
- log.info(
- "TEST PASSED: %s appears to support .pth files",
- instdir
- )
- return True
- finally:
- if f:
- f.close()
- if os.path.exists(ok_file):
- os.unlink(ok_file)
- if os.path.exists(pth_file):
- os.unlink(pth_file)
- if not self.multi_version:
- log.warn("TEST FAILED: %s does NOT support .pth files", instdir)
- return False
-
- def install_egg_scripts(self, dist):
- """Write all the scripts for `dist`, unless scripts are excluded"""
- if not self.exclude_scripts and dist.metadata_isdir('scripts'):
- for script_name in dist.metadata_listdir('scripts'):
- if dist.metadata_isdir('scripts/' + script_name):
- # The "script" is a directory, likely a Python 3
- # __pycache__ directory, so skip it.
- continue
- self.install_script(
- dist, script_name,
- dist.get_metadata('scripts/' + script_name)
- )
- self.install_wrapper_scripts(dist)
-
- def add_output(self, path):
- if os.path.isdir(path):
- for base, dirs, files in os.walk(path):
- for filename in files:
- self.outputs.append(os.path.join(base, filename))
- else:
- self.outputs.append(path)
-
- def not_editable(self, spec):
- if self.editable:
- raise DistutilsArgError(
- "Invalid argument %r: you can't use filenames or URLs "
- "with --editable (except via the --find-links option)."
- % (spec,)
- )
-
- def check_editable(self, spec):
- if not self.editable:
- return
-
- if os.path.exists(os.path.join(self.build_directory, spec.key)):
- raise DistutilsArgError(
- "%r already exists in %s; can't do a checkout there" %
- (spec.key, self.build_directory)
- )
-
- @contextlib.contextmanager
- def _tmpdir(self):
- tmpdir = tempfile.mkdtemp(prefix=u"easy_install-")
- try:
- # cast to str as workaround for #709 and #710 and #712
- yield str(tmpdir)
- finally:
- os.path.exists(tmpdir) and rmtree(tmpdir)
-
- def easy_install(self, spec, deps=False):
- with self._tmpdir() as tmpdir:
- if not isinstance(spec, Requirement):
- if URL_SCHEME(spec):
- # It's a url, download it to tmpdir and process
- self.not_editable(spec)
- dl = self.package_index.download(spec, tmpdir)
- return self.install_item(None, dl, tmpdir, deps, True)
-
- elif os.path.exists(spec):
- # Existing file or directory, just process it directly
- self.not_editable(spec)
- return self.install_item(None, spec, tmpdir, deps, True)
- else:
- spec = parse_requirement_arg(spec)
-
- self.check_editable(spec)
- dist = self.package_index.fetch_distribution(
- spec, tmpdir, self.upgrade, self.editable,
- not self.always_copy, self.local_index
- )
- if dist is None:
- msg = "Could not find suitable distribution for %r" % spec
- if self.always_copy:
- msg += " (--always-copy skips system and development eggs)"
- raise DistutilsError(msg)
- elif dist.precedence == DEVELOP_DIST:
- # .egg-info dists don't need installing, just process deps
- self.process_distribution(spec, dist, deps, "Using")
- return dist
- else:
- return self.install_item(spec, dist.location, tmpdir, deps)
-
- def install_item(self, spec, download, tmpdir, deps, install_needed=False):
-
- # Installation is also needed if file in tmpdir or is not an egg
- install_needed = install_needed or self.always_copy
- install_needed = install_needed or os.path.dirname(download) == tmpdir
- install_needed = install_needed or not download.endswith('.egg')
- install_needed = install_needed or (
- self.always_copy_from is not None and
- os.path.dirname(normalize_path(download)) ==
- normalize_path(self.always_copy_from)
- )
-
- if spec and not install_needed:
- # at this point, we know it's a local .egg, we just don't know if
- # it's already installed.
- for dist in self.local_index[spec.project_name]:
- if dist.location == download:
- break
- else:
- install_needed = True # it's not in the local index
-
- log.info("Processing %s", os.path.basename(download))
-
- if install_needed:
- dists = self.install_eggs(spec, download, tmpdir)
- for dist in dists:
- self.process_distribution(spec, dist, deps)
- else:
- dists = [self.egg_distribution(download)]
- self.process_distribution(spec, dists[0], deps, "Using")
-
- if spec is not None:
- for dist in dists:
- if dist in spec:
- return dist
-
- def select_scheme(self, name):
- """Sets the install directories by applying the install schemes."""
- # it's the caller's problem if they supply a bad name!
- scheme = INSTALL_SCHEMES[name]
- for key in SCHEME_KEYS:
- attrname = 'install_' + key
- if getattr(self, attrname) is None:
- setattr(self, attrname, scheme[key])
-
- # FIXME: 'easy_install.process_distribution' is too complex (12)
- def process_distribution( # noqa: C901
- self, requirement, dist, deps=True, *info,
- ):
- self.update_pth(dist)
- self.package_index.add(dist)
- if dist in self.local_index[dist.key]:
- self.local_index.remove(dist)
- self.local_index.add(dist)
- self.install_egg_scripts(dist)
- self.installed_projects[dist.key] = dist
- log.info(self.installation_report(requirement, dist, *info))
- if (dist.has_metadata('dependency_links.txt') and
- not self.no_find_links):
- self.package_index.add_find_links(
- dist.get_metadata_lines('dependency_links.txt')
- )
- if not deps and not self.always_copy:
- return
- elif requirement is not None and dist.key != requirement.key:
- log.warn("Skipping dependencies for %s", dist)
- return # XXX this is not the distribution we were looking for
- elif requirement is None or dist not in requirement:
- # if we wound up with a different version, resolve what we've got
- distreq = dist.as_requirement()
- requirement = Requirement(str(distreq))
- log.info("Processing dependencies for %s", requirement)
- try:
- distros = WorkingSet([]).resolve(
- [requirement], self.local_index, self.easy_install
- )
- except DistributionNotFound as e:
- raise DistutilsError(str(e)) from e
- except VersionConflict as e:
- raise DistutilsError(e.report()) from e
- if self.always_copy or self.always_copy_from:
- # Force all the relevant distros to be copied or activated
- for dist in distros:
- if dist.key not in self.installed_projects:
- self.easy_install(dist.as_requirement())
- log.info("Finished processing dependencies for %s", requirement)
-
- def should_unzip(self, dist):
- if self.zip_ok is not None:
- return not self.zip_ok
- if dist.has_metadata('not-zip-safe'):
- return True
- if not dist.has_metadata('zip-safe'):
- return True
- return False
-
- def maybe_move(self, spec, dist_filename, setup_base):
- dst = os.path.join(self.build_directory, spec.key)
- if os.path.exists(dst):
- msg = (
- "%r already exists in %s; build directory %s will not be kept"
- )
- log.warn(msg, spec.key, self.build_directory, setup_base)
- return setup_base
- if os.path.isdir(dist_filename):
- setup_base = dist_filename
- else:
- if os.path.dirname(dist_filename) == setup_base:
- os.unlink(dist_filename) # get it out of the tmp dir
- contents = os.listdir(setup_base)
- if len(contents) == 1:
- dist_filename = os.path.join(setup_base, contents[0])
- if os.path.isdir(dist_filename):
- # if the only thing there is a directory, move it instead
- setup_base = dist_filename
- ensure_directory(dst)
- shutil.move(setup_base, dst)
- return dst
-
- def install_wrapper_scripts(self, dist):
- if self.exclude_scripts:
- return
- for args in ScriptWriter.best().get_args(dist):
- self.write_script(*args)
-
- def install_script(self, dist, script_name, script_text, dev_path=None):
- """Generate a legacy script wrapper and install it"""
- spec = str(dist.as_requirement())
- is_script = is_python_script(script_text, script_name)
-
- if is_script:
- body = self._load_template(dev_path) % locals()
- script_text = ScriptWriter.get_header(script_text) + body
- self.write_script(script_name, _to_bytes(script_text), 'b')
-
- @staticmethod
- def _load_template(dev_path):
- """
- There are a couple of template scripts in the package. This
- function loads one of them and prepares it for use.
- """
- # See https://github.com/pypa/setuptools/issues/134 for info
- # on script file naming and downstream issues with SVR4
- name = 'script.tmpl'
- if dev_path:
- name = name.replace('.tmpl', ' (dev).tmpl')
-
- raw_bytes = resource_string('setuptools', name)
- return raw_bytes.decode('utf-8')
-
- def write_script(self, script_name, contents, mode="t", blockers=()):
- """Write an executable file to the scripts directory"""
- self.delete_blockers( # clean up old .py/.pyw w/o a script
- [os.path.join(self.script_dir, x) for x in blockers]
- )
- log.info("Installing %s script to %s", script_name, self.script_dir)
- target = os.path.join(self.script_dir, script_name)
- self.add_output(target)
-
- if self.dry_run:
- return
-
- mask = current_umask()
- ensure_directory(target)
- if os.path.exists(target):
- os.unlink(target)
- with open(target, "w" + mode) as f:
- f.write(contents)
- chmod(target, 0o777 - mask)
-
- def install_eggs(self, spec, dist_filename, tmpdir):
- # .egg dirs or files are already built, so just return them
- installer_map = {
- '.egg': self.install_egg,
- '.exe': self.install_exe,
- '.whl': self.install_wheel,
- }
- try:
- install_dist = installer_map[
- dist_filename.lower()[-4:]
- ]
- except KeyError:
- pass
- else:
- return [install_dist(dist_filename, tmpdir)]
-
- # Anything else, try to extract and build
- setup_base = tmpdir
- if os.path.isfile(dist_filename) and not dist_filename.endswith('.py'):
- unpack_archive(dist_filename, tmpdir, self.unpack_progress)
- elif os.path.isdir(dist_filename):
- setup_base = os.path.abspath(dist_filename)
-
- if (setup_base.startswith(tmpdir) # something we downloaded
- and self.build_directory and spec is not None):
- setup_base = self.maybe_move(spec, dist_filename, setup_base)
-
- # Find the setup.py file
- setup_script = os.path.join(setup_base, 'setup.py')
-
- if not os.path.exists(setup_script):
- setups = glob(os.path.join(setup_base, '*', 'setup.py'))
- if not setups:
- raise DistutilsError(
- "Couldn't find a setup script in %s" %
- os.path.abspath(dist_filename)
- )
- if len(setups) > 1:
- raise DistutilsError(
- "Multiple setup scripts in %s" %
- os.path.abspath(dist_filename)
- )
- setup_script = setups[0]
-
- # Now run it, and return the result
- if self.editable:
- log.info(self.report_editable(spec, setup_script))
- return []
- else:
- return self.build_and_install(setup_script, setup_base)
-
- def egg_distribution(self, egg_path):
- if os.path.isdir(egg_path):
- metadata = PathMetadata(egg_path, os.path.join(egg_path,
- 'EGG-INFO'))
- else:
- metadata = EggMetadata(zipimport.zipimporter(egg_path))
- return Distribution.from_filename(egg_path, metadata=metadata)
-
- # FIXME: 'easy_install.install_egg' is too complex (11)
- def install_egg(self, egg_path, tmpdir): # noqa: C901
- destination = os.path.join(
- self.install_dir,
- os.path.basename(egg_path),
- )
- destination = os.path.abspath(destination)
- if not self.dry_run:
- ensure_directory(destination)
-
- dist = self.egg_distribution(egg_path)
- if not samefile(egg_path, destination):
- if os.path.isdir(destination) and not os.path.islink(destination):
- dir_util.remove_tree(destination, dry_run=self.dry_run)
- elif os.path.exists(destination):
- self.execute(
- os.unlink,
- (destination,),
- "Removing " + destination,
- )
- try:
- new_dist_is_zipped = False
- if os.path.isdir(egg_path):
- if egg_path.startswith(tmpdir):
- f, m = shutil.move, "Moving"
- else:
- f, m = shutil.copytree, "Copying"
- elif self.should_unzip(dist):
- self.mkpath(destination)
- f, m = self.unpack_and_compile, "Extracting"
- else:
- new_dist_is_zipped = True
- if egg_path.startswith(tmpdir):
- f, m = shutil.move, "Moving"
- else:
- f, m = shutil.copy2, "Copying"
- self.execute(
- f,
- (egg_path, destination),
- (m + " %s to %s") % (
- os.path.basename(egg_path),
- os.path.dirname(destination)
- ),
- )
- update_dist_caches(
- destination,
- fix_zipimporter_caches=new_dist_is_zipped,
- )
- except Exception:
- update_dist_caches(destination, fix_zipimporter_caches=False)
- raise
-
- self.add_output(destination)
- return self.egg_distribution(destination)
-
- def install_exe(self, dist_filename, tmpdir):
- # See if it's valid, get data
- cfg = extract_wininst_cfg(dist_filename)
- if cfg is None:
- raise DistutilsError(
- "%s is not a valid distutils Windows .exe" % dist_filename
- )
- # Create a dummy distribution object until we build the real distro
- dist = Distribution(
- None,
- project_name=cfg.get('metadata', 'name'),
- version=cfg.get('metadata', 'version'), platform=get_platform(),
- )
-
- # Convert the .exe to an unpacked egg
- egg_path = os.path.join(tmpdir, dist.egg_name() + '.egg')
- dist.location = egg_path
- egg_tmp = egg_path + '.tmp'
- _egg_info = os.path.join(egg_tmp, 'EGG-INFO')
- pkg_inf = os.path.join(_egg_info, 'PKG-INFO')
- ensure_directory(pkg_inf) # make sure EGG-INFO dir exists
- dist._provider = PathMetadata(egg_tmp, _egg_info) # XXX
- self.exe_to_egg(dist_filename, egg_tmp)
-
- # Write EGG-INFO/PKG-INFO
- if not os.path.exists(pkg_inf):
- f = open(pkg_inf, 'w')
- f.write('Metadata-Version: 1.0\n')
- for k, v in cfg.items('metadata'):
- if k != 'target_version':
- f.write('%s: %s\n' % (k.replace('_', '-').title(), v))
- f.close()
- script_dir = os.path.join(_egg_info, 'scripts')
- # delete entry-point scripts to avoid duping
- self.delete_blockers([
- os.path.join(script_dir, args[0])
- for args in ScriptWriter.get_args(dist)
- ])
- # Build .egg file from tmpdir
- bdist_egg.make_zipfile(
- egg_path, egg_tmp, verbose=self.verbose, dry_run=self.dry_run,
- )
- # install the .egg
- return self.install_egg(egg_path, tmpdir)
-
- # FIXME: 'easy_install.exe_to_egg' is too complex (12)
- def exe_to_egg(self, dist_filename, egg_tmp): # noqa: C901
- """Extract a bdist_wininst to the directories an egg would use"""
- # Check for .pth file and set up prefix translations
- prefixes = get_exe_prefixes(dist_filename)
- to_compile = []
- native_libs = []
- top_level = {}
-
- def process(src, dst):
- s = src.lower()
- for old, new in prefixes:
- if s.startswith(old):
- src = new + src[len(old):]
- parts = src.split('/')
- dst = os.path.join(egg_tmp, *parts)
- dl = dst.lower()
- if dl.endswith('.pyd') or dl.endswith('.dll'):
- parts[-1] = bdist_egg.strip_module(parts[-1])
- top_level[os.path.splitext(parts[0])[0]] = 1
- native_libs.append(src)
- elif dl.endswith('.py') and old != 'SCRIPTS/':
- top_level[os.path.splitext(parts[0])[0]] = 1
- to_compile.append(dst)
- return dst
- if not src.endswith('.pth'):
- log.warn("WARNING: can't process %s", src)
- return None
-
- # extract, tracking .pyd/.dll->native_libs and .py -> to_compile
- unpack_archive(dist_filename, egg_tmp, process)
- stubs = []
- for res in native_libs:
- if res.lower().endswith('.pyd'): # create stubs for .pyd's
- parts = res.split('/')
- resource = parts[-1]
- parts[-1] = bdist_egg.strip_module(parts[-1]) + '.py'
- pyfile = os.path.join(egg_tmp, *parts)
- to_compile.append(pyfile)
- stubs.append(pyfile)
- bdist_egg.write_stub(resource, pyfile)
- self.byte_compile(to_compile) # compile .py's
- bdist_egg.write_safety_flag(
- os.path.join(egg_tmp, 'EGG-INFO'),
- bdist_egg.analyze_egg(egg_tmp, stubs)) # write zip-safety flag
-
- for name in 'top_level', 'native_libs':
- if locals()[name]:
- txt = os.path.join(egg_tmp, 'EGG-INFO', name + '.txt')
- if not os.path.exists(txt):
- f = open(txt, 'w')
- f.write('\n'.join(locals()[name]) + '\n')
- f.close()
-
- def install_wheel(self, wheel_path, tmpdir):
- wheel = Wheel(wheel_path)
- assert wheel.is_compatible()
- destination = os.path.join(self.install_dir, wheel.egg_name())
- destination = os.path.abspath(destination)
- if not self.dry_run:
- ensure_directory(destination)
- if os.path.isdir(destination) and not os.path.islink(destination):
- dir_util.remove_tree(destination, dry_run=self.dry_run)
- elif os.path.exists(destination):
- self.execute(
- os.unlink,
- (destination,),
- "Removing " + destination,
- )
- try:
- self.execute(
- wheel.install_as_egg,
- (destination,),
- ("Installing %s to %s") % (
- os.path.basename(wheel_path),
- os.path.dirname(destination)
- ),
- )
- finally:
- update_dist_caches(destination, fix_zipimporter_caches=False)
- self.add_output(destination)
- return self.egg_distribution(destination)
-
- __mv_warning = textwrap.dedent("""
- Because this distribution was installed --multi-version, before you can
- import modules from this package in an application, you will need to
- 'import pkg_resources' and then use a 'require()' call similar to one of
- these examples, in order to select the desired version:
-
- pkg_resources.require("%(name)s") # latest installed version
- pkg_resources.require("%(name)s==%(version)s") # this exact version
- pkg_resources.require("%(name)s>=%(version)s") # this version or higher
- """).lstrip() # noqa
-
- __id_warning = textwrap.dedent("""
- Note also that the installation directory must be on sys.path at runtime for
- this to work. (e.g. by being the application's script directory, by being on
- PYTHONPATH, or by being added to sys.path by your code.)
- """) # noqa
-
- def installation_report(self, req, dist, what="Installed"):
- """Helpful installation message for display to package users"""
- msg = "\n%(what)s %(eggloc)s%(extras)s"
- if self.multi_version and not self.no_report:
- msg += '\n' + self.__mv_warning
- if self.install_dir not in map(normalize_path, sys.path):
- msg += '\n' + self.__id_warning
-
- eggloc = dist.location
- name = dist.project_name
- version = dist.version
- extras = '' # TODO: self.report_extras(req, dist)
- return msg % locals()
-
- __editable_msg = textwrap.dedent("""
- Extracted editable version of %(spec)s to %(dirname)s
-
- If it uses setuptools in its setup script, you can activate it in
- "development" mode by going to that directory and running::
-
- %(python)s setup.py develop
-
- See the setuptools documentation for the "develop" command for more info.
- """).lstrip() # noqa
-
- def report_editable(self, spec, setup_script):
- dirname = os.path.dirname(setup_script)
- python = sys.executable
- return '\n' + self.__editable_msg % locals()
-
- def run_setup(self, setup_script, setup_base, args):
- sys.modules.setdefault('distutils.command.bdist_egg', bdist_egg)
- sys.modules.setdefault('distutils.command.egg_info', egg_info)
-
- args = list(args)
- if self.verbose > 2:
- v = 'v' * (self.verbose - 1)
- args.insert(0, '-' + v)
- elif self.verbose < 2:
- args.insert(0, '-q')
- if self.dry_run:
- args.insert(0, '-n')
- log.info(
- "Running %s %s", setup_script[len(setup_base) + 1:], ' '.join(args)
- )
- try:
- run_setup(setup_script, args)
- except SystemExit as v:
- raise DistutilsError(
- "Setup script exited with %s" % (v.args[0],)
- ) from v
-
- def build_and_install(self, setup_script, setup_base):
- args = ['bdist_egg', '--dist-dir']
-
- dist_dir = tempfile.mkdtemp(
- prefix='egg-dist-tmp-', dir=os.path.dirname(setup_script)
- )
- try:
- self._set_fetcher_options(os.path.dirname(setup_script))
- args.append(dist_dir)
-
- self.run_setup(setup_script, setup_base, args)
- all_eggs = Environment([dist_dir])
- eggs = []
- for key in all_eggs:
- for dist in all_eggs[key]:
- eggs.append(self.install_egg(dist.location, setup_base))
- if not eggs and not self.dry_run:
- log.warn("No eggs found in %s (setup script problem?)",
- dist_dir)
- return eggs
- finally:
- rmtree(dist_dir)
- log.set_verbosity(self.verbose) # restore our log verbosity
-
- def _set_fetcher_options(self, base):
- """
- When easy_install is about to run bdist_egg on a source dist, that
- source dist might have 'setup_requires' directives, requiring
- additional fetching. Ensure the fetcher options given to easy_install
- are available to that command as well.
- """
- # find the fetch options from easy_install and write them out
- # to the setup.cfg file.
- ei_opts = self.distribution.get_option_dict('easy_install').copy()
- fetch_directives = (
- 'find_links', 'site_dirs', 'index_url', 'optimize', 'allow_hosts',
- )
- fetch_options = {}
- for key, val in ei_opts.items():
- if key not in fetch_directives:
- continue
- fetch_options[key] = val[1]
- # create a settings dictionary suitable for `edit_config`
- settings = dict(easy_install=fetch_options)
- cfg_filename = os.path.join(base, 'setup.cfg')
- setopt.edit_config(cfg_filename, settings)
-
- def update_pth(self, dist): # noqa: C901 # is too complex (11) # FIXME
- if self.pth_file is None:
- return
-
- for d in self.pth_file[dist.key]: # drop old entries
- if not self.multi_version and d.location == dist.location:
- continue
-
- log.info("Removing %s from easy-install.pth file", d)
- self.pth_file.remove(d)
- if d.location in self.shadow_path:
- self.shadow_path.remove(d.location)
-
- if not self.multi_version:
- if dist.location in self.pth_file.paths:
- log.info(
- "%s is already the active version in easy-install.pth",
- dist,
- )
- else:
- log.info("Adding %s to easy-install.pth file", dist)
- self.pth_file.add(dist) # add new entry
- if dist.location not in self.shadow_path:
- self.shadow_path.append(dist.location)
-
- if self.dry_run:
- return
-
- self.pth_file.save()
-
- if dist.key != 'setuptools':
- return
-
- # Ensure that setuptools itself never becomes unavailable!
- # XXX should this check for latest version?
- filename = os.path.join(self.install_dir, 'setuptools.pth')
- if os.path.islink(filename):
- os.unlink(filename)
- with open(filename, 'wt') as f:
- f.write(self.pth_file.make_relative(dist.location) + '\n')
-
- def unpack_progress(self, src, dst):
- # Progress filter for unpacking
- log.debug("Unpacking %s to %s", src, dst)
- return dst # only unpack-and-compile skips files for dry run
-
- def unpack_and_compile(self, egg_path, destination):
- to_compile = []
- to_chmod = []
-
- def pf(src, dst):
- if dst.endswith('.py') and not src.startswith('EGG-INFO/'):
- to_compile.append(dst)
- elif dst.endswith('.dll') or dst.endswith('.so'):
- to_chmod.append(dst)
- self.unpack_progress(src, dst)
- return not self.dry_run and dst or None
-
- unpack_archive(egg_path, destination, pf)
- self.byte_compile(to_compile)
- if not self.dry_run:
- for f in to_chmod:
- mode = ((os.stat(f)[stat.ST_MODE]) | 0o555) & 0o7755
- chmod(f, mode)
-
- def byte_compile(self, to_compile):
- if sys.dont_write_bytecode:
- return
-
- from distutils.util import byte_compile
-
- try:
- # try to make the byte compile messages quieter
- log.set_verbosity(self.verbose - 1)
-
- byte_compile(to_compile, optimize=0, force=1, dry_run=self.dry_run)
- if self.optimize:
- byte_compile(
- to_compile, optimize=self.optimize, force=1,
- dry_run=self.dry_run,
- )
- finally:
- log.set_verbosity(self.verbose) # restore original verbosity
-
- __no_default_msg = textwrap.dedent("""
- bad install directory or PYTHONPATH
-
- You are attempting to install a package to a directory that is not
- on PYTHONPATH and which Python does not read ".pth" files from. The
- installation directory you specified (via --install-dir, --prefix, or
- the distutils default setting) was:
-
- %s
-
- and your PYTHONPATH environment variable currently contains:
-
- %r
-
- Here are some of your options for correcting the problem:
-
- * You can choose a different installation directory, i.e., one that is
- on PYTHONPATH or supports .pth files
-
- * You can add the installation directory to the PYTHONPATH environment
- variable. (It must then also be on PYTHONPATH whenever you run
- Python and want to use the package(s) you are installing.)
-
- * You can set up the installation directory to support ".pth" files by
- using one of the approaches described here:
-
- https://setuptools.pypa.io/en/latest/deprecated/easy_install.html#custom-installation-locations
-
-
- Please make the appropriate changes for your system and try again.
- """).strip()
-
- def create_home_path(self):
- """Create directories under ~."""
- if not self.user:
- return
- home = convert_path(os.path.expanduser("~"))
- for name, path in self.config_vars.items():
- if path.startswith(home) and not os.path.isdir(path):
- self.debug_print("os.makedirs('%s', 0o700)" % path)
- os.makedirs(path, 0o700)
-
- INSTALL_SCHEMES = dict(
- posix=dict(
- install_dir='$base/lib/python$py_version_short/site-packages',
- script_dir='$base/bin',
- ),
- )
-
- DEFAULT_SCHEME = dict(
- install_dir='$base/Lib/site-packages',
- script_dir='$base/Scripts',
- )
-
- def _expand(self, *attrs):
- config_vars = self.get_finalized_command('install').config_vars
-
- if self.prefix:
- # Set default install_dir/scripts from --prefix
- config_vars = config_vars.copy()
- config_vars['base'] = self.prefix
- scheme = self.INSTALL_SCHEMES.get(os.name, self.DEFAULT_SCHEME)
- for attr, val in scheme.items():
- if getattr(self, attr, None) is None:
- setattr(self, attr, val)
-
- from distutils.util import subst_vars
-
- for attr in attrs:
- val = getattr(self, attr)
- if val is not None:
- val = subst_vars(val, config_vars)
- if os.name == 'posix':
- val = os.path.expanduser(val)
- setattr(self, attr, val)
-
-
-def _pythonpath():
- items = os.environ.get('PYTHONPATH', '').split(os.pathsep)
- return filter(None, items)
-
-
-def get_site_dirs():
- """
- Return a list of 'site' dirs
- """
-
- sitedirs = []
-
- # start with PYTHONPATH
- sitedirs.extend(_pythonpath())
-
- prefixes = [sys.prefix]
- if sys.exec_prefix != sys.prefix:
- prefixes.append(sys.exec_prefix)
- for prefix in prefixes:
- if not prefix:
- continue
-
- if sys.platform in ('os2emx', 'riscos'):
- sitedirs.append(os.path.join(prefix, "Lib", "site-packages"))
- elif os.sep == '/':
- sitedirs.extend([
- os.path.join(
- prefix,
- "lib",
- "python{}.{}".format(*sys.version_info),
- "site-packages",
- ),
- os.path.join(prefix, "lib", "site-python"),
- ])
- else:
- sitedirs.extend([
- prefix,
- os.path.join(prefix, "lib", "site-packages"),
- ])
- if sys.platform != 'darwin':
- continue
-
- # for framework builds *only* we add the standard Apple
- # locations. Currently only per-user, but /Library and
- # /Network/Library could be added too
- if 'Python.framework' not in prefix:
- continue
-
- home = os.environ.get('HOME')
- if not home:
- continue
-
- home_sp = os.path.join(
- home,
- 'Library',
- 'Python',
- '{}.{}'.format(*sys.version_info),
- 'site-packages',
- )
- sitedirs.append(home_sp)
- lib_paths = get_path('purelib'), get_path('platlib')
-
- sitedirs.extend(s for s in lib_paths if s not in sitedirs)
-
- if site.ENABLE_USER_SITE:
- sitedirs.append(site.USER_SITE)
-
- with contextlib.suppress(AttributeError):
- sitedirs.extend(site.getsitepackages())
-
- sitedirs = list(map(normalize_path, sitedirs))
-
- return sitedirs
-
-
-def expand_paths(inputs): # noqa: C901 # is too complex (11) # FIXME
- """Yield sys.path directories that might contain "old-style" packages"""
-
- seen = {}
-
- for dirname in inputs:
- dirname = normalize_path(dirname)
- if dirname in seen:
- continue
-
- seen[dirname] = 1
- if not os.path.isdir(dirname):
- continue
-
- files = os.listdir(dirname)
- yield dirname, files
-
- for name in files:
- if not name.endswith('.pth'):
- # We only care about the .pth files
- continue
- if name in ('easy-install.pth', 'setuptools.pth'):
- # Ignore .pth files that we control
- continue
-
- # Read the .pth file
- f = open(os.path.join(dirname, name))
- lines = list(yield_lines(f))
- f.close()
-
- # Yield existing non-dupe, non-import directory lines from it
- for line in lines:
- if line.startswith("import"):
- continue
-
- line = normalize_path(line.rstrip())
- if line in seen:
- continue
-
- seen[line] = 1
- if not os.path.isdir(line):
- continue
-
- yield line, os.listdir(line)
-
-
-def extract_wininst_cfg(dist_filename):
- """Extract configuration data from a bdist_wininst .exe
-
- Returns a configparser.RawConfigParser, or None
- """
- f = open(dist_filename, 'rb')
- try:
- endrec = zipfile._EndRecData(f)
- if endrec is None:
- return None
-
- prepended = (endrec[9] - endrec[5]) - endrec[6]
- if prepended < 12: # no wininst data here
- return None
- f.seek(prepended - 12)
-
- tag, cfglen, bmlen = struct.unpack("<iii", f.read(12))
- if tag not in (0x1234567A, 0x1234567B):
- return None # not a valid tag
-
- f.seek(prepended - (12 + cfglen))
- init = {'version': '', 'target_version': ''}
- cfg = configparser.RawConfigParser(init)
- try:
- part = f.read(cfglen)
- # Read up to the first null byte.
- config = part.split(b'\0', 1)[0]
- # Now the config is in bytes, but for RawConfigParser, it should
- # be text, so decode it.
- config = config.decode(sys.getfilesystemencoding())
- cfg.read_file(io.StringIO(config))
- except configparser.Error:
- return None
- if not cfg.has_section('metadata') or not cfg.has_section('Setup'):
- return None
- return cfg
-
- finally:
- f.close()
-
-
-def get_exe_prefixes(exe_filename):
- """Get exe->egg path translations for a given .exe file"""
-
- prefixes = [
- ('PURELIB/', ''),
- ('PLATLIB/pywin32_system32', ''),
- ('PLATLIB/', ''),
- ('SCRIPTS/', 'EGG-INFO/scripts/'),
- ('DATA/lib/site-packages', ''),
- ]
- z = zipfile.ZipFile(exe_filename)
- try:
- for info in z.infolist():
- name = info.filename
- parts = name.split('/')
- if len(parts) == 3 and parts[2] == 'PKG-INFO':
- if parts[1].endswith('.egg-info'):
- prefixes.insert(0, ('/'.join(parts[:2]), 'EGG-INFO/'))
- break
- if len(parts) != 2 or not name.endswith('.pth'):
- continue
- if name.endswith('-nspkg.pth'):
- continue
- if parts[0].upper() in ('PURELIB', 'PLATLIB'):
- contents = z.read(name).decode()
- for pth in yield_lines(contents):
- pth = pth.strip().replace('\\', '/')
- if not pth.startswith('import'):
- prefixes.append((('%s/%s/' % (parts[0], pth)), ''))
- finally:
- z.close()
- prefixes = [(x.lower(), y) for x, y in prefixes]
- prefixes.sort()
- prefixes.reverse()
- return prefixes
-
-
-class PthDistributions(Environment):
- """A .pth file with Distribution paths in it"""
-
- dirty = False
-
- def __init__(self, filename, sitedirs=()):
- self.filename = filename
- self.sitedirs = list(map(normalize_path, sitedirs))
- self.basedir = normalize_path(os.path.dirname(self.filename))
- self._load()
- Environment.__init__(self, [], None, None)
- for path in yield_lines(self.paths):
- list(map(self.add, find_distributions(path, True)))
-
- def _load(self):
- self.paths = []
- saw_import = False
- seen = dict.fromkeys(self.sitedirs)
- if os.path.isfile(self.filename):
- f = open(self.filename, 'rt')
- for line in f:
- if line.startswith('import'):
- saw_import = True
- continue
- path = line.rstrip()
- self.paths.append(path)
- if not path.strip() or path.strip().startswith('#'):
- continue
- # skip non-existent paths, in case somebody deleted a package
- # manually, and duplicate paths as well
- path = self.paths[-1] = normalize_path(
- os.path.join(self.basedir, path)
- )
- if not os.path.exists(path) or path in seen:
- self.paths.pop() # skip it
- self.dirty = True # we cleaned up, so we're dirty now :)
- continue
- seen[path] = 1
- f.close()
-
- if self.paths and not saw_import:
- self.dirty = True # ensure anything we touch has import wrappers
- while self.paths and not self.paths[-1].strip():
- self.paths.pop()
-
- def save(self):
- """Write changed .pth file back to disk"""
- if not self.dirty:
- return
-
- rel_paths = list(map(self.make_relative, self.paths))
- if rel_paths:
- log.debug("Saving %s", self.filename)
- lines = self._wrap_lines(rel_paths)
- data = '\n'.join(lines) + '\n'
-
- if os.path.islink(self.filename):
- os.unlink(self.filename)
- with open(self.filename, 'wt') as f:
- f.write(data)
-
- elif os.path.exists(self.filename):
- log.debug("Deleting empty %s", self.filename)
- os.unlink(self.filename)
-
- self.dirty = False
-
- @staticmethod
- def _wrap_lines(lines):
- return lines
-
- def add(self, dist):
- """Add `dist` to the distribution map"""
- new_path = (
- dist.location not in self.paths and (
- dist.location not in self.sitedirs or
- # account for '.' being in PYTHONPATH
- dist.location == os.getcwd()
- )
- )
- if new_path:
- self.paths.append(dist.location)
- self.dirty = True
- Environment.add(self, dist)
-
- def remove(self, dist):
- """Remove `dist` from the distribution map"""
- while dist.location in self.paths:
- self.paths.remove(dist.location)
- self.dirty = True
- Environment.remove(self, dist)
-
- def make_relative(self, path):
- npath, last = os.path.split(normalize_path(path))
- baselen = len(self.basedir)
- parts = [last]
- sep = os.altsep == '/' and '/' or os.sep
- while len(npath) >= baselen:
- if npath == self.basedir:
- parts.append(os.curdir)
- parts.reverse()
- return sep.join(parts)
- npath, last = os.path.split(npath)
- parts.append(last)
- else:
- return path
-
-
-class RewritePthDistributions(PthDistributions):
- @classmethod
- def _wrap_lines(cls, lines):
- yield cls.prelude
- for line in lines:
- yield line
- yield cls.postlude
-
- prelude = _one_liner("""
- import sys
- sys.__plen = len(sys.path)
- """)
- postlude = _one_liner("""
- import sys
- new = sys.path[sys.__plen:]
- del sys.path[sys.__plen:]
- p = getattr(sys, '__egginsert', 0)
- sys.path[p:p] = new
- sys.__egginsert = p + len(new)
- """)
-
-
-if os.environ.get('SETUPTOOLS_SYS_PATH_TECHNIQUE', 'raw') == 'rewrite':
- PthDistributions = RewritePthDistributions
-
-
-def _first_line_re():
- """
- Return a regular expression based on first_line_re suitable for matching
- strings.
- """
- if isinstance(first_line_re.pattern, str):
- return first_line_re
-
- # first_line_re in Python >=3.1.4 and >=3.2.1 is a bytes pattern.
- return re.compile(first_line_re.pattern.decode())
-
-
-def auto_chmod(func, arg, exc):
- if func in [os.unlink, os.remove] and os.name == 'nt':
- chmod(arg, stat.S_IWRITE)
- return func(arg)
- et, ev, _ = sys.exc_info()
- # TODO: This code doesn't make sense. What is it trying to do?
- raise (ev[0], ev[1] + (" %s %s" % (func, arg)))
-
-
-def update_dist_caches(dist_path, fix_zipimporter_caches):
- """
- Fix any globally cached `dist_path` related data
-
- `dist_path` should be a path of a newly installed egg distribution (zipped
- or unzipped).
-
- sys.path_importer_cache contains finder objects that have been cached when
- importing data from the original distribution. Any such finders need to be
- cleared since the replacement distribution might be packaged differently,
- e.g. a zipped egg distribution might get replaced with an unzipped egg
- folder or vice versa. Having the old finders cached may then cause Python
- to attempt loading modules from the replacement distribution using an
- incorrect loader.
-
- zipimport.zipimporter objects are Python loaders charged with importing
- data packaged inside zip archives. If stale loaders referencing the
- original distribution, are left behind, they can fail to load modules from
- the replacement distribution. E.g. if an old zipimport.zipimporter instance
- is used to load data from a new zipped egg archive, it may cause the
- operation to attempt to locate the requested data in the wrong location -
- one indicated by the original distribution's zip archive directory
- information. Such an operation may then fail outright, e.g. report having
- read a 'bad local file header', or even worse, it may fail silently &
- return invalid data.
-
- zipimport._zip_directory_cache contains cached zip archive directory
- information for all existing zipimport.zipimporter instances and all such
- instances connected to the same archive share the same cached directory
- information.
-
- If asked, and the underlying Python implementation allows it, we can fix
- all existing zipimport.zipimporter instances instead of having to track
- them down and remove them one by one, by updating their shared cached zip
- archive directory information. This, of course, assumes that the
- replacement distribution is packaged as a zipped egg.
-
- If not asked to fix existing zipimport.zipimporter instances, we still do
- our best to clear any remaining zipimport.zipimporter related cached data
- that might somehow later get used when attempting to load data from the new
- distribution and thus cause such load operations to fail. Note that when
- tracking down such remaining stale data, we can not catch every conceivable
- usage from here, and we clear only those that we know of and have found to
- cause problems if left alive. Any remaining caches should be updated by
- whomever is in charge of maintaining them, i.e. they should be ready to
- handle us replacing their zip archives with new distributions at runtime.
-
- """
- # There are several other known sources of stale zipimport.zipimporter
- # instances that we do not clear here, but might if ever given a reason to
- # do so:
- # * Global setuptools pkg_resources.working_set (a.k.a. 'master working
- # set') may contain distributions which may in turn contain their
- # zipimport.zipimporter loaders.
- # * Several zipimport.zipimporter loaders held by local variables further
- # up the function call stack when running the setuptools installation.
- # * Already loaded modules may have their __loader__ attribute set to the
- # exact loader instance used when importing them. Python 3.4 docs state
- # that this information is intended mostly for introspection and so is
- # not expected to cause us problems.
- normalized_path = normalize_path(dist_path)
- _uncache(normalized_path, sys.path_importer_cache)
- if fix_zipimporter_caches:
- _replace_zip_directory_cache_data(normalized_path)
- else:
- # Here, even though we do not want to fix existing and now stale
- # zipimporter cache information, we still want to remove it. Related to
- # Python's zip archive directory information cache, we clear each of
- # its stale entries in two phases:
- # 1. Clear the entry so attempting to access zip archive information
- # via any existing stale zipimport.zipimporter instances fails.
- # 2. Remove the entry from the cache so any newly constructed
- # zipimport.zipimporter instances do not end up using old stale
- # zip archive directory information.
- # This whole stale data removal step does not seem strictly necessary,
- # but has been left in because it was done before we started replacing
- # the zip archive directory information cache content if possible, and
- # there are no relevant unit tests that we can depend on to tell us if
- # this is really needed.
- _remove_and_clear_zip_directory_cache_data(normalized_path)
-
-
-def _collect_zipimporter_cache_entries(normalized_path, cache):
- """
- Return zipimporter cache entry keys related to a given normalized path.
-
- Alternative path spellings (e.g. those using different character case or
- those using alternative path separators) related to the same path are
- included. Any sub-path entries are included as well, i.e. those
- corresponding to zip archives embedded in other zip archives.
-
- """
- result = []
- prefix_len = len(normalized_path)
- for p in cache:
- np = normalize_path(p)
- if (np.startswith(normalized_path) and
- np[prefix_len:prefix_len + 1] in (os.sep, '')):
- result.append(p)
- return result
-
-
-def _update_zipimporter_cache(normalized_path, cache, updater=None):
- """
- Update zipimporter cache data for a given normalized path.
-
- Any sub-path entries are processed as well, i.e. those corresponding to zip
- archives embedded in other zip archives.
-
- Given updater is a callable taking a cache entry key and the original entry
- (after already removing the entry from the cache), and expected to update
- the entry and possibly return a new one to be inserted in its place.
- Returning None indicates that the entry should not be replaced with a new
- one. If no updater is given, the cache entries are simply removed without
- any additional processing, the same as if the updater simply returned None.
-
- """
- for p in _collect_zipimporter_cache_entries(normalized_path, cache):
- # N.B. pypy's custom zipimport._zip_directory_cache implementation does
- # not support the complete dict interface:
- # * Does not support item assignment, thus not allowing this function
- # to be used only for removing existing cache entries.
- # * Does not support the dict.pop() method, forcing us to use the
- # get/del patterns instead. For more detailed information see the
- # following links:
- # https://github.com/pypa/setuptools/issues/202#issuecomment-202913420
- # http://bit.ly/2h9itJX
- old_entry = cache[p]
- del cache[p]
- new_entry = updater and updater(p, old_entry)
- if new_entry is not None:
- cache[p] = new_entry
-
-
-def _uncache(normalized_path, cache):
- _update_zipimporter_cache(normalized_path, cache)
-
-
-def _remove_and_clear_zip_directory_cache_data(normalized_path):
- def clear_and_remove_cached_zip_archive_directory_data(path, old_entry):
- old_entry.clear()
-
- _update_zipimporter_cache(
- normalized_path, zipimport._zip_directory_cache,
- updater=clear_and_remove_cached_zip_archive_directory_data)
-
-
-# PyPy Python implementation does not allow directly writing to the
-# zipimport._zip_directory_cache and so prevents us from attempting to correct
-# its content. The best we can do there is clear the problematic cache content
-# and have PyPy repopulate it as needed. The downside is that if there are any
-# stale zipimport.zipimporter instances laying around, attempting to use them
-# will fail due to not having its zip archive directory information available
-# instead of being automatically corrected to use the new correct zip archive
-# directory information.
-if '__pypy__' in sys.builtin_module_names:
- _replace_zip_directory_cache_data = \
- _remove_and_clear_zip_directory_cache_data
-else:
-
- def _replace_zip_directory_cache_data(normalized_path):
- def replace_cached_zip_archive_directory_data(path, old_entry):
- # N.B. In theory, we could load the zip directory information just
- # once for all updated path spellings, and then copy it locally and
- # update its contained path strings to contain the correct
- # spelling, but that seems like a way too invasive move (this cache
- # structure is not officially documented anywhere and could in
- # theory change with new Python releases) for no significant
- # benefit.
- old_entry.clear()
- zipimport.zipimporter(path)
- old_entry.update(zipimport._zip_directory_cache[path])
- return old_entry
-
- _update_zipimporter_cache(
- normalized_path, zipimport._zip_directory_cache,
- updater=replace_cached_zip_archive_directory_data)
-
-
-def is_python(text, filename='<string>'):
- "Is this string a valid Python script?"
- try:
- compile(text, filename, 'exec')
- except (SyntaxError, TypeError):
- return False
- else:
- return True
-
-
-def is_sh(executable):
- """Determine if the specified executable is a .sh (contains a #! line)"""
- try:
- with io.open(executable, encoding='latin-1') as fp:
- magic = fp.read(2)
- except (OSError, IOError):
- return executable
- return magic == '#!'
-
-
-def nt_quote_arg(arg):
- """Quote a command line argument according to Windows parsing rules"""
- return subprocess.list2cmdline([arg])
-
-
-def is_python_script(script_text, filename):
- """Is this text, as a whole, a Python script? (as opposed to shell/bat/etc.
- """
- if filename.endswith('.py') or filename.endswith('.pyw'):
- return True # extension says it's Python
- if is_python(script_text, filename):
- return True # it's syntactically valid Python
- if script_text.startswith('#!'):
- # It begins with a '#!' line, so check if 'python' is in it somewhere
- return 'python' in script_text.splitlines()[0].lower()
-
- return False # Not any Python I can recognize
-
-
-try:
- from os import chmod as _chmod
-except ImportError:
- # Jython compatibility
- def _chmod(*args):
- pass
-
-
-def chmod(path, mode):
- log.debug("changing mode of %s to %o", path, mode)
- try:
- _chmod(path, mode)
- except os.error as e:
- log.debug("chmod failed: %s", e)
-
-
-class CommandSpec(list):
- """
- A command spec for a #! header, specified as a list of arguments akin to
- those passed to Popen.
- """
-
- options = []
- split_args = dict()
-
- @classmethod
- def best(cls):
- """
- Choose the best CommandSpec class based on environmental conditions.
- """
- return cls
-
- @classmethod
- def _sys_executable(cls):
- _default = os.path.normpath(sys.executable)
- return os.environ.get('__PYVENV_LAUNCHER__', _default)
-
- @classmethod
- def from_param(cls, param):
- """
- Construct a CommandSpec from a parameter to build_scripts, which may
- be None.
- """
- if isinstance(param, cls):
- return param
- if isinstance(param, list):
- return cls(param)
- if param is None:
- return cls.from_environment()
- # otherwise, assume it's a string.
- return cls.from_string(param)
-
- @classmethod
- def from_environment(cls):
- return cls([cls._sys_executable()])
-
- @classmethod
- def from_string(cls, string):
- """
- Construct a command spec from a simple string representing a command
- line parseable by shlex.split.
- """
- items = shlex.split(string, **cls.split_args)
- return cls(items)
-
- def install_options(self, script_text):
- self.options = shlex.split(self._extract_options(script_text))
- cmdline = subprocess.list2cmdline(self)
- if not isascii(cmdline):
- self.options[:0] = ['-x']
-
- @staticmethod
- def _extract_options(orig_script):
- """
- Extract any options from the first line of the script.
- """
- first = (orig_script + '\n').splitlines()[0]
- match = _first_line_re().match(first)
- options = match.group(1) or '' if match else ''
- return options.strip()
-
- def as_header(self):
- return self._render(self + list(self.options))
-
- @staticmethod
- def _strip_quotes(item):
- _QUOTES = '"\''
- for q in _QUOTES:
- if item.startswith(q) and item.endswith(q):
- return item[1:-1]
- return item
-
- @staticmethod
- def _render(items):
- cmdline = subprocess.list2cmdline(
- CommandSpec._strip_quotes(item.strip()) for item in items)
- return '#!' + cmdline + '\n'
-
-
-# For pbr compat; will be removed in a future version.
-sys_executable = CommandSpec._sys_executable()
-
-
-class WindowsCommandSpec(CommandSpec):
- split_args = dict(posix=False)
-
-
-class ScriptWriter:
- """
- Encapsulates behavior around writing entry point scripts for console and
- gui apps.
- """
-
- template = textwrap.dedent(r"""
- # EASY-INSTALL-ENTRY-SCRIPT: %(spec)r,%(group)r,%(name)r
- import re
- import sys
-
- # for compatibility with easy_install; see #2198
- __requires__ = %(spec)r
-
- try:
- from importlib.metadata import distribution
- except ImportError:
- try:
- from importlib_metadata import distribution
- except ImportError:
- from pkg_resources import load_entry_point
-
-
- def importlib_load_entry_point(spec, group, name):
- dist_name, _, _ = spec.partition('==')
- matches = (
- entry_point
- for entry_point in distribution(dist_name).entry_points
- if entry_point.group == group and entry_point.name == name
- )
- return next(matches).load()
-
-
- globals().setdefault('load_entry_point', importlib_load_entry_point)
-
-
- if __name__ == '__main__':
- sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
- sys.exit(load_entry_point(%(spec)r, %(group)r, %(name)r)())
- """).lstrip()
-
- command_spec_class = CommandSpec
-
- @classmethod
- def get_script_args(cls, dist, executable=None, wininst=False):
- # for backward compatibility
- warnings.warn("Use get_args", EasyInstallDeprecationWarning)
- writer = (WindowsScriptWriter if wininst else ScriptWriter).best()
- header = cls.get_script_header("", executable, wininst)
- return writer.get_args(dist, header)
-
- @classmethod
- def get_script_header(cls, script_text, executable=None, wininst=False):
- # for backward compatibility
- warnings.warn(
- "Use get_header", EasyInstallDeprecationWarning, stacklevel=2)
- if wininst:
- executable = "python.exe"
- return cls.get_header(script_text, executable)
-
- @classmethod
- def get_args(cls, dist, header=None):
- """
- Yield write_script() argument tuples for a distribution's
- console_scripts and gui_scripts entry points.
- """
- if header is None:
- header = cls.get_header()
- spec = str(dist.as_requirement())
- for type_ in 'console', 'gui':
- group = type_ + '_scripts'
- for name, ep in dist.get_entry_map(group).items():
- cls._ensure_safe_name(name)
- script_text = cls.template % locals()
- args = cls._get_script_args(type_, name, header, script_text)
- for res in args:
- yield res
-
- @staticmethod
- def _ensure_safe_name(name):
- """
- Prevent paths in *_scripts entry point names.
- """
- has_path_sep = re.search(r'[\\/]', name)
- if has_path_sep:
- raise ValueError("Path separators not allowed in script names")
-
- @classmethod
- def get_writer(cls, force_windows):
- # for backward compatibility
- warnings.warn("Use best", EasyInstallDeprecationWarning)
- return WindowsScriptWriter.best() if force_windows else cls.best()
-
- @classmethod
- def best(cls):
- """
- Select the best ScriptWriter for this environment.
- """
- if sys.platform == 'win32' or (os.name == 'java' and os._name == 'nt'):
- return WindowsScriptWriter.best()
- else:
- return cls
-
- @classmethod
- def _get_script_args(cls, type_, name, header, script_text):
- # Simply write the stub with no extension.
- yield (name, header + script_text)
-
- @classmethod
- def get_header(cls, script_text="", executable=None):
- """Create a #! line, getting options (if any) from script_text"""
- cmd = cls.command_spec_class.best().from_param(executable)
- cmd.install_options(script_text)
- return cmd.as_header()
-
-
-class WindowsScriptWriter(ScriptWriter):
- command_spec_class = WindowsCommandSpec
-
- @classmethod
- def get_writer(cls):
- # for backward compatibility
- warnings.warn("Use best", EasyInstallDeprecationWarning)
- return cls.best()
-
- @classmethod
- def best(cls):
- """
- Select the best ScriptWriter suitable for Windows
- """
- writer_lookup = dict(
- executable=WindowsExecutableLauncherWriter,
- natural=cls,
- )
- # for compatibility, use the executable launcher by default
- launcher = os.environ.get('SETUPTOOLS_LAUNCHER', 'executable')
- return writer_lookup[launcher]
-
- @classmethod
- def _get_script_args(cls, type_, name, header, script_text):
- "For Windows, add a .py extension"
- ext = dict(console='.pya', gui='.pyw')[type_]
- if ext not in os.environ['PATHEXT'].lower().split(';'):
- msg = (
- "{ext} not listed in PATHEXT; scripts will not be "
- "recognized as executables."
- ).format(**locals())
- warnings.warn(msg, UserWarning)
- old = ['.pya', '.py', '-script.py', '.pyc', '.pyo', '.pyw', '.exe']
- old.remove(ext)
- header = cls._adjust_header(type_, header)
- blockers = [name + x for x in old]
- yield name + ext, header + script_text, 't', blockers
-
- @classmethod
- def _adjust_header(cls, type_, orig_header):
- """
- Make sure 'pythonw' is used for gui and 'python' is used for
- console (regardless of what sys.executable is).
- """
- pattern = 'pythonw.exe'
- repl = 'python.exe'
- if type_ == 'gui':
- pattern, repl = repl, pattern
- pattern_ob = re.compile(re.escape(pattern), re.IGNORECASE)
- new_header = pattern_ob.sub(string=orig_header, repl=repl)
- return new_header if cls._use_header(new_header) else orig_header
-
- @staticmethod
- def _use_header(new_header):
- """
- Should _adjust_header use the replaced header?
-
- On non-windows systems, always use. On
- Windows systems, only use the replaced header if it resolves
- to an executable on the system.
- """
- clean_header = new_header[2:-1].strip('"')
- return sys.platform != 'win32' or find_executable(clean_header)
-
-
-class WindowsExecutableLauncherWriter(WindowsScriptWriter):
- @classmethod
- def _get_script_args(cls, type_, name, header, script_text):
- """
- For Windows, add a .py extension and an .exe launcher
- """
- if type_ == 'gui':
- launcher_type = 'gui'
- ext = '-script.pyw'
- old = ['.pyw']
- else:
- launcher_type = 'cli'
- ext = '-script.py'
- old = ['.py', '.pyc', '.pyo']
- hdr = cls._adjust_header(type_, header)
- blockers = [name + x for x in old]
- yield (name + ext, hdr + script_text, 't', blockers)
- yield (
- name + '.exe', get_win_launcher(launcher_type),
- 'b' # write in binary mode
- )
- if not is_64bit():
- # install a manifest for the launcher to prevent Windows
- # from detecting it as an installer (which it will for
- # launchers like easy_install.exe). Consider only
- # adding a manifest for launchers detected as installers.
- # See Distribute #143 for details.
- m_name = name + '.exe.manifest'
- yield (m_name, load_launcher_manifest(name), 't')
-
-
-# for backward-compatibility
-get_script_args = ScriptWriter.get_script_args
-get_script_header = ScriptWriter.get_script_header
-
-
-def get_win_launcher(type):
- """
- Load the Windows launcher (executable) suitable for launching a script.
-
- `type` should be either 'cli' or 'gui'
-
- Returns the executable as a byte string.
- """
- launcher_fn = '%s.exe' % type
- if is_64bit():
- if get_platform() == "win-arm64":
- launcher_fn = launcher_fn.replace(".", "-arm64.")
- else:
- launcher_fn = launcher_fn.replace(".", "-64.")
- else:
- launcher_fn = launcher_fn.replace(".", "-32.")
- return resource_string('setuptools', launcher_fn)
-
-
-def load_launcher_manifest(name):
- manifest = pkg_resources.resource_string(__name__, 'launcher manifest.xml')
- return manifest.decode('utf-8') % vars()
-
-
-def rmtree(path, ignore_errors=False, onerror=auto_chmod):
- return shutil.rmtree(path, ignore_errors, onerror)
-
-
-def current_umask():
- tmp = os.umask(0o022)
- os.umask(tmp)
- return tmp
-
-
-class EasyInstallDeprecationWarning(SetuptoolsDeprecationWarning):
- """
- Warning for EasyInstall deprecations, bypassing suppression.
- """
diff --git a/contrib/python/setuptools/py3/setuptools/command/egg_info.py b/contrib/python/setuptools/py3/setuptools/command/egg_info.py
deleted file mode 100644
index f2210292e3e..00000000000
--- a/contrib/python/setuptools/py3/setuptools/command/egg_info.py
+++ /dev/null
@@ -1,755 +0,0 @@
-"""setuptools.command.egg_info
-
-Create a distribution's .egg-info directory and contents"""
-
-from distutils.filelist import FileList as _FileList
-from distutils.errors import DistutilsInternalError
-from distutils.util import convert_path
-from distutils import log
-import distutils.errors
-import distutils.filelist
-import functools
-import os
-import re
-import sys
-import io
-import warnings
-import time
-import collections
-
-from setuptools import Command
-from setuptools.command.sdist import sdist
-from setuptools.command.sdist import walk_revctrl
-from setuptools.command.setopt import edit_config
-from setuptools.command import bdist_egg
-from pkg_resources import (
- parse_requirements, safe_name, parse_version,
- safe_version, yield_lines, EntryPoint, iter_entry_points, to_filename)
-import setuptools.unicode_utils as unicode_utils
-from setuptools.glob import glob
-
-from setuptools.extern import packaging
-from setuptools import SetuptoolsDeprecationWarning
-
-
-def translate_pattern(glob): # noqa: C901 # is too complex (14) # FIXME
- """
- Translate a file path glob like '*.txt' in to a regular expression.
- This differs from fnmatch.translate which allows wildcards to match
- directory separators. It also knows about '**/' which matches any number of
- directories.
- """
- pat = ''
-
- # This will split on '/' within [character classes]. This is deliberate.
- chunks = glob.split(os.path.sep)
-
- sep = re.escape(os.sep)
- valid_char = '[^%s]' % (sep,)
-
- for c, chunk in enumerate(chunks):
- last_chunk = c == len(chunks) - 1
-
- # Chunks that are a literal ** are globstars. They match anything.
- if chunk == '**':
- if last_chunk:
- # Match anything if this is the last component
- pat += '.*'
- else:
- # Match '(name/)*'
- pat += '(?:%s+%s)*' % (valid_char, sep)
- continue # Break here as the whole path component has been handled
-
- # Find any special characters in the remainder
- i = 0
- chunk_len = len(chunk)
- while i < chunk_len:
- char = chunk[i]
- if char == '*':
- # Match any number of name characters
- pat += valid_char + '*'
- elif char == '?':
- # Match a name character
- pat += valid_char
- elif char == '[':
- # Character class
- inner_i = i + 1
- # Skip initial !/] chars
- if inner_i < chunk_len and chunk[inner_i] == '!':
- inner_i = inner_i + 1
- if inner_i < chunk_len and chunk[inner_i] == ']':
- inner_i = inner_i + 1
-
- # Loop till the closing ] is found
- while inner_i < chunk_len and chunk[inner_i] != ']':
- inner_i = inner_i + 1
-
- if inner_i >= chunk_len:
- # Got to the end of the string without finding a closing ]
- # Do not treat this as a matching group, but as a literal [
- pat += re.escape(char)
- else:
- # Grab the insides of the [brackets]
- inner = chunk[i + 1:inner_i]
- char_class = ''
-
- # Class negation
- if inner[0] == '!':
- char_class = '^'
- inner = inner[1:]
-
- char_class += re.escape(inner)
- pat += '[%s]' % (char_class,)
-
- # Skip to the end ]
- i = inner_i
- else:
- pat += re.escape(char)
- i += 1
-
- # Join each chunk with the dir separator
- if not last_chunk:
- pat += sep
-
- pat += r'\Z'
- return re.compile(pat, flags=re.MULTILINE | re.DOTALL)
-
-
-class InfoCommon:
- tag_build = None
- tag_date = None
-
- @property
- def name(self):
- return safe_name(self.distribution.get_name())
-
- def tagged_version(self):
- return safe_version(self._maybe_tag(self.distribution.get_version()))
-
- def _maybe_tag(self, version):
- """
- egg_info may be called more than once for a distribution,
- in which case the version string already contains all tags.
- """
- return (
- version if self.vtags and version.endswith(self.vtags)
- else version + self.vtags
- )
-
- def tags(self):
- version = ''
- if self.tag_build:
- version += self.tag_build
- if self.tag_date:
- version += time.strftime("-%Y%m%d")
- return version
- vtags = property(tags)
-
-
-class egg_info(InfoCommon, Command):
- description = "create a distribution's .egg-info directory"
-
- user_options = [
- ('egg-base=', 'e', "directory containing .egg-info directories"
- " (default: top of the source tree)"),
- ('tag-date', 'd', "Add date stamp (e.g. 20050528) to version number"),
- ('tag-build=', 'b', "Specify explicit tag to add to version number"),
- ('no-date', 'D', "Don't include date stamp [default]"),
- ]
-
- boolean_options = ['tag-date']
- negative_opt = {
- 'no-date': 'tag-date',
- }
-
- def initialize_options(self):
- self.egg_base = None
- self.egg_name = None
- self.egg_info = None
- self.egg_version = None
- self.broken_egg_info = False
-
- ####################################
- # allow the 'tag_svn_revision' to be detected and
- # set, supporting sdists built on older Setuptools.
- @property
- def tag_svn_revision(self):
- pass
-
- @tag_svn_revision.setter
- def tag_svn_revision(self, value):
- pass
- ####################################
-
- def save_version_info(self, filename):
- """
- Materialize the value of date into the
- build tag. Install build keys in a deterministic order
- to avoid arbitrary reordering on subsequent builds.
- """
- egg_info = collections.OrderedDict()
- # follow the order these keys would have been added
- # when PYTHONHASHSEED=0
- egg_info['tag_build'] = self.tags()
- egg_info['tag_date'] = 0
- edit_config(filename, dict(egg_info=egg_info))
-
- def finalize_options(self):
- # Note: we need to capture the current value returned
- # by `self.tagged_version()`, so we can later update
- # `self.distribution.metadata.version` without
- # repercussions.
- self.egg_name = self.name
- self.egg_version = self.tagged_version()
- parsed_version = parse_version(self.egg_version)
-
- try:
- is_version = isinstance(parsed_version, packaging.version.Version)
- spec = (
- "%s==%s" if is_version else "%s===%s"
- )
- list(
- parse_requirements(spec % (self.egg_name, self.egg_version))
- )
- except ValueError as e:
- raise distutils.errors.DistutilsOptionError(
- "Invalid distribution name or version syntax: %s-%s" %
- (self.egg_name, self.egg_version)
- ) from e
-
- if self.egg_base is None:
- dirs = self.distribution.package_dir
- self.egg_base = (dirs or {}).get('', os.curdir)
-
- self.ensure_dirname('egg_base')
- self.egg_info = to_filename(self.egg_name) + '.egg-info'
- if self.egg_base != os.curdir:
- self.egg_info = os.path.join(self.egg_base, self.egg_info)
- if '-' in self.egg_name:
- self.check_broken_egg_info()
-
- # Set package version for the benefit of dumber commands
- # (e.g. sdist, bdist_wininst, etc.)
- #
- self.distribution.metadata.version = self.egg_version
-
- # If we bootstrapped around the lack of a PKG-INFO, as might be the
- # case in a fresh checkout, make sure that any special tags get added
- # to the version info
- #
- pd = self.distribution._patched_dist
- if pd is not None and pd.key == self.egg_name.lower():
- pd._version = self.egg_version
- pd._parsed_version = parse_version(self.egg_version)
- self.distribution._patched_dist = None
-
- def write_or_delete_file(self, what, filename, data, force=False):
- """Write `data` to `filename` or delete if empty
-
- If `data` is non-empty, this routine is the same as ``write_file()``.
- If `data` is empty but not ``None``, this is the same as calling
- ``delete_file(filename)`. If `data` is ``None``, then this is a no-op
- unless `filename` exists, in which case a warning is issued about the
- orphaned file (if `force` is false), or deleted (if `force` is true).
- """
- if data:
- self.write_file(what, filename, data)
- elif os.path.exists(filename):
- if data is None and not force:
- log.warn(
- "%s not set in setup(), but %s exists", what, filename
- )
- return
- else:
- self.delete_file(filename)
-
- def write_file(self, what, filename, data):
- """Write `data` to `filename` (if not a dry run) after announcing it
-
- `what` is used in a log message to identify what is being written
- to the file.
- """
- log.info("writing %s to %s", what, filename)
- data = data.encode("utf-8")
- if not self.dry_run:
- f = open(filename, 'wb')
- f.write(data)
- f.close()
-
- def delete_file(self, filename):
- """Delete `filename` (if not a dry run) after announcing it"""
- log.info("deleting %s", filename)
- if not self.dry_run:
- os.unlink(filename)
-
- def run(self):
- self.mkpath(self.egg_info)
- os.utime(self.egg_info, None)
- installer = self.distribution.fetch_build_egg
- for ep in iter_entry_points('egg_info.writers'):
- ep.require(installer=installer)
- writer = ep.resolve()
- writer(self, ep.name, os.path.join(self.egg_info, ep.name))
-
- # Get rid of native_libs.txt if it was put there by older bdist_egg
- nl = os.path.join(self.egg_info, "native_libs.txt")
- if os.path.exists(nl):
- self.delete_file(nl)
-
- self.find_sources()
-
- def find_sources(self):
- """Generate SOURCES.txt manifest file"""
- manifest_filename = os.path.join(self.egg_info, "SOURCES.txt")
- mm = manifest_maker(self.distribution)
- mm.manifest = manifest_filename
- mm.run()
- self.filelist = mm.filelist
-
- def check_broken_egg_info(self):
- bei = self.egg_name + '.egg-info'
- if self.egg_base != os.curdir:
- bei = os.path.join(self.egg_base, bei)
- if os.path.exists(bei):
- log.warn(
- "-" * 78 + '\n'
- "Note: Your current .egg-info directory has a '-' in its name;"
- '\nthis will not work correctly with "setup.py develop".\n\n'
- 'Please rename %s to %s to correct this problem.\n' + '-' * 78,
- bei, self.egg_info
- )
- self.broken_egg_info = self.egg_info
- self.egg_info = bei # make it work for now
-
-
-class FileList(_FileList):
- # Implementations of the various MANIFEST.in commands
-
- def process_template_line(self, line):
- # Parse the line: split it up, make sure the right number of words
- # is there, and return the relevant words. 'action' is always
- # defined: it's the first word of the line. Which of the other
- # three are defined depends on the action; it'll be either
- # patterns, (dir and patterns), or (dir_pattern).
- (action, patterns, dir, dir_pattern) = self._parse_template_line(line)
-
- action_map = {
- 'include': self.include,
- 'exclude': self.exclude,
- 'global-include': self.global_include,
- 'global-exclude': self.global_exclude,
- 'recursive-include': functools.partial(
- self.recursive_include, dir,
- ),
- 'recursive-exclude': functools.partial(
- self.recursive_exclude, dir,
- ),
- 'graft': self.graft,
- 'prune': self.prune,
- }
- log_map = {
- 'include': "warning: no files found matching '%s'",
- 'exclude': (
- "warning: no previously-included files found "
- "matching '%s'"
- ),
- 'global-include': (
- "warning: no files found matching '%s' "
- "anywhere in distribution"
- ),
- 'global-exclude': (
- "warning: no previously-included files matching "
- "'%s' found anywhere in distribution"
- ),
- 'recursive-include': (
- "warning: no files found matching '%s' "
- "under directory '%s'"
- ),
- 'recursive-exclude': (
- "warning: no previously-included files matching "
- "'%s' found under directory '%s'"
- ),
- 'graft': "warning: no directories found matching '%s'",
- 'prune': "no previously-included directories found matching '%s'",
- }
-
- try:
- process_action = action_map[action]
- except KeyError:
- raise DistutilsInternalError(
- "this cannot happen: invalid action '{action!s}'".
- format(action=action),
- )
-
- # OK, now we know that the action is valid and we have the
- # right number of words on the line for that action -- so we
- # can proceed with minimal error-checking.
-
- action_is_recursive = action.startswith('recursive-')
- if action in {'graft', 'prune'}:
- patterns = [dir_pattern]
- extra_log_args = (dir, ) if action_is_recursive else ()
- log_tmpl = log_map[action]
-
- self.debug_print(
- ' '.join(
- [action] +
- ([dir] if action_is_recursive else []) +
- patterns,
- )
- )
- for pattern in patterns:
- if not process_action(pattern):
- log.warn(log_tmpl, pattern, *extra_log_args)
-
- def _remove_files(self, predicate):
- """
- Remove all files from the file list that match the predicate.
- Return True if any matching files were removed
- """
- found = False
- for i in range(len(self.files) - 1, -1, -1):
- if predicate(self.files[i]):
- self.debug_print(" removing " + self.files[i])
- del self.files[i]
- found = True
- return found
-
- def include(self, pattern):
- """Include files that match 'pattern'."""
- found = [f for f in glob(pattern) if not os.path.isdir(f)]
- self.extend(found)
- return bool(found)
-
- def exclude(self, pattern):
- """Exclude files that match 'pattern'."""
- match = translate_pattern(pattern)
- return self._remove_files(match.match)
-
- def recursive_include(self, dir, pattern):
- """
- Include all files anywhere in 'dir/' that match the pattern.
- """
- full_pattern = os.path.join(dir, '**', pattern)
- found = [f for f in glob(full_pattern, recursive=True)
- if not os.path.isdir(f)]
- self.extend(found)
- return bool(found)
-
- def recursive_exclude(self, dir, pattern):
- """
- Exclude any file anywhere in 'dir/' that match the pattern.
- """
- match = translate_pattern(os.path.join(dir, '**', pattern))
- return self._remove_files(match.match)
-
- def graft(self, dir):
- """Include all files from 'dir/'."""
- found = [
- item
- for match_dir in glob(dir)
- for item in distutils.filelist.findall(match_dir)
- ]
- self.extend(found)
- return bool(found)
-
- def prune(self, dir):
- """Filter out files from 'dir/'."""
- match = translate_pattern(os.path.join(dir, '**'))
- return self._remove_files(match.match)
-
- def global_include(self, pattern):
- """
- Include all files anywhere in the current directory that match the
- pattern. This is very inefficient on large file trees.
- """
- if self.allfiles is None:
- self.findall()
- match = translate_pattern(os.path.join('**', pattern))
- found = [f for f in self.allfiles if match.match(f)]
- self.extend(found)
- return bool(found)
-
- def global_exclude(self, pattern):
- """
- Exclude all files anywhere that match the pattern.
- """
- match = translate_pattern(os.path.join('**', pattern))
- return self._remove_files(match.match)
-
- def append(self, item):
- if item.endswith('\r'): # Fix older sdists built on Windows
- item = item[:-1]
- path = convert_path(item)
-
- if self._safe_path(path):
- self.files.append(path)
-
- def extend(self, paths):
- self.files.extend(filter(self._safe_path, paths))
-
- def _repair(self):
- """
- Replace self.files with only safe paths
-
- Because some owners of FileList manipulate the underlying
- ``files`` attribute directly, this method must be called to
- repair those paths.
- """
- self.files = list(filter(self._safe_path, self.files))
-
- def _safe_path(self, path):
- enc_warn = "'%s' not %s encodable -- skipping"
-
- # To avoid accidental trans-codings errors, first to unicode
- u_path = unicode_utils.filesys_decode(path)
- if u_path is None:
- log.warn("'%s' in unexpected encoding -- skipping" % path)
- return False
-
- # Must ensure utf-8 encodability
- utf8_path = unicode_utils.try_encode(u_path, "utf-8")
- if utf8_path is None:
- log.warn(enc_warn, path, 'utf-8')
- return False
-
- try:
- # accept is either way checks out
- if os.path.exists(u_path) or os.path.exists(utf8_path):
- return True
- # this will catch any encode errors decoding u_path
- except UnicodeEncodeError:
- log.warn(enc_warn, path, sys.getfilesystemencoding())
-
-
-class manifest_maker(sdist):
- template = "MANIFEST.in"
-
- def initialize_options(self):
- self.use_defaults = 1
- self.prune = 1
- self.manifest_only = 1
- self.force_manifest = 1
-
- def finalize_options(self):
- pass
-
- def run(self):
- self.filelist = FileList()
- if not os.path.exists(self.manifest):
- self.write_manifest() # it must exist so it'll get in the list
- self.add_defaults()
- if os.path.exists(self.template):
- self.read_template()
- self.add_license_files()
- self.prune_file_list()
- self.filelist.sort()
- self.filelist.remove_duplicates()
- self.write_manifest()
-
- def _manifest_normalize(self, path):
- path = unicode_utils.filesys_decode(path)
- return path.replace(os.sep, '/')
-
- def write_manifest(self):
- """
- Write the file list in 'self.filelist' to the manifest file
- named by 'self.manifest'.
- """
- self.filelist._repair()
-
- # Now _repairs should encodability, but not unicode
- files = [self._manifest_normalize(f) for f in self.filelist.files]
- msg = "writing manifest file '%s'" % self.manifest
- self.execute(write_file, (self.manifest, files), msg)
-
- def warn(self, msg):
- if not self._should_suppress_warning(msg):
- sdist.warn(self, msg)
-
- @staticmethod
- def _should_suppress_warning(msg):
- """
- suppress missing-file warnings from sdist
- """
- return re.match(r"standard file .*not found", msg)
-
- def add_defaults(self):
- sdist.add_defaults(self)
- self.filelist.append(self.template)
- self.filelist.append(self.manifest)
- rcfiles = list(walk_revctrl())
- if rcfiles:
- self.filelist.extend(rcfiles)
- elif os.path.exists(self.manifest):
- self.read_manifest()
-
- if os.path.exists("setup.py"):
- # setup.py should be included by default, even if it's not
- # the script called to create the sdist
- self.filelist.append("setup.py")
-
- ei_cmd = self.get_finalized_command('egg_info')
- self.filelist.graft(ei_cmd.egg_info)
-
- def add_license_files(self):
- license_files = self.distribution.metadata.license_files or []
- for lf in license_files:
- log.info("adding license file '%s'", lf)
- pass
- self.filelist.extend(license_files)
-
- def prune_file_list(self):
- build = self.get_finalized_command('build')
- base_dir = self.distribution.get_fullname()
- self.filelist.prune(build.build_base)
- self.filelist.prune(base_dir)
- sep = re.escape(os.sep)
- self.filelist.exclude_pattern(r'(^|' + sep + r')(RCS|CVS|\.svn)' + sep,
- is_regex=1)
-
- def _safe_data_files(self, build_py):
- """
- The parent class implementation of this method
- (``sdist``) will try to include data files, which
- might cause recursion problems when
- ``include_package_data=True``.
-
- Therefore, avoid triggering any attempt of
- analyzing/building the manifest again.
- """
- if hasattr(build_py, 'get_data_files_without_manifest'):
- return build_py.get_data_files_without_manifest()
-
- warnings.warn(
- "Custom 'build_py' does not implement "
- "'get_data_files_without_manifest'.\nPlease extend command classes"
- " from setuptools instead of distutils.",
- SetuptoolsDeprecationWarning
- )
- return build_py.get_data_files()
-
-
-def write_file(filename, contents):
- """Create a file with the specified name and write 'contents' (a
- sequence of strings without line terminators) to it.
- """
- contents = "\n".join(contents)
-
- # assuming the contents has been vetted for utf-8 encoding
- contents = contents.encode("utf-8")
-
- with open(filename, "wb") as f: # always write POSIX-style manifest
- f.write(contents)
-
-
-def write_pkg_info(cmd, basename, filename):
- log.info("writing %s", filename)
- if not cmd.dry_run:
- metadata = cmd.distribution.metadata
- metadata.version, oldver = cmd.egg_version, metadata.version
- metadata.name, oldname = cmd.egg_name, metadata.name
-
- try:
- # write unescaped data to PKG-INFO, so older pkg_resources
- # can still parse it
- metadata.write_pkg_info(cmd.egg_info)
- finally:
- metadata.name, metadata.version = oldname, oldver
-
- safe = getattr(cmd.distribution, 'zip_safe', None)
-
- bdist_egg.write_safety_flag(cmd.egg_info, safe)
-
-
-def warn_depends_obsolete(cmd, basename, filename):
- if os.path.exists(filename):
- log.warn(
- "WARNING: 'depends.txt' is not used by setuptools 0.6!\n"
- "Use the install_requires/extras_require setup() args instead."
- )
-
-
-def _write_requirements(stream, reqs):
- lines = yield_lines(reqs or ())
-
- def append_cr(line):
- return line + '\n'
- lines = map(append_cr, lines)
- stream.writelines(lines)
-
-
-def write_requirements(cmd, basename, filename):
- dist = cmd.distribution
- data = io.StringIO()
- _write_requirements(data, dist.install_requires)
- extras_require = dist.extras_require or {}
- for extra in sorted(extras_require):
- data.write('\n[{extra}]\n'.format(**vars()))
- _write_requirements(data, extras_require[extra])
- cmd.write_or_delete_file("requirements", filename, data.getvalue())
-
-
-def write_setup_requirements(cmd, basename, filename):
- data = io.StringIO()
- _write_requirements(data, cmd.distribution.setup_requires)
- cmd.write_or_delete_file("setup-requirements", filename, data.getvalue())
-
-
-def write_toplevel_names(cmd, basename, filename):
- pkgs = dict.fromkeys(
- [
- k.split('.', 1)[0]
- for k in cmd.distribution.iter_distribution_names()
- ]
- )
- cmd.write_file("top-level names", filename, '\n'.join(sorted(pkgs)) + '\n')
-
-
-def overwrite_arg(cmd, basename, filename):
- write_arg(cmd, basename, filename, True)
-
-
-def write_arg(cmd, basename, filename, force=False):
- argname = os.path.splitext(basename)[0]
- value = getattr(cmd.distribution, argname, None)
- if value is not None:
- value = '\n'.join(value) + '\n'
- cmd.write_or_delete_file(argname, filename, value, force)
-
-
-def write_entries(cmd, basename, filename):
- ep = cmd.distribution.entry_points
-
- if isinstance(ep, str) or ep is None:
- data = ep
- elif ep is not None:
- data = []
- for section, contents in sorted(ep.items()):
- if not isinstance(contents, str):
- contents = EntryPoint.parse_group(section, contents)
- contents = '\n'.join(sorted(map(str, contents.values())))
- data.append('[%s]\n%s\n\n' % (section, contents))
- data = ''.join(data)
-
- cmd.write_or_delete_file('entry points', filename, data, True)
-
-
-def get_pkg_info_revision():
- """
- Get a -r### off of PKG-INFO Version in case this is an sdist of
- a subversion revision.
- """
- warnings.warn(
- "get_pkg_info_revision is deprecated.", EggInfoDeprecationWarning)
- if os.path.exists('PKG-INFO'):
- with io.open('PKG-INFO') as f:
- for line in f:
- match = re.match(r"Version:.*-r(\d+)\s*$", line)
- if match:
- return int(match.group(1))
- return 0
-
-
-class EggInfoDeprecationWarning(SetuptoolsDeprecationWarning):
- """Deprecated behavior warning for EggInfo, bypassing suppression."""
diff --git a/contrib/python/setuptools/py3/setuptools/command/install.py b/contrib/python/setuptools/py3/setuptools/command/install.py
deleted file mode 100644
index 35e54d2043a..00000000000
--- a/contrib/python/setuptools/py3/setuptools/command/install.py
+++ /dev/null
@@ -1,132 +0,0 @@
-from distutils.errors import DistutilsArgError
-import inspect
-import glob
-import warnings
-import platform
-import distutils.command.install as orig
-
-import setuptools
-
-# Prior to numpy 1.9, NumPy relies on the '_install' name, so provide it for
-# now. See https://github.com/pypa/setuptools/issues/199/
-_install = orig.install
-
-
-class install(orig.install):
- """Use easy_install to install the package, w/dependencies"""
-
- user_options = orig.install.user_options + [
- ('old-and-unmanageable', None, "Try not to use this!"),
- ('single-version-externally-managed', None,
- "used by system package builders to create 'flat' eggs"),
- ]
- boolean_options = orig.install.boolean_options + [
- 'old-and-unmanageable', 'single-version-externally-managed',
- ]
- new_commands = [
- ('install_egg_info', lambda self: True),
- ('install_scripts', lambda self: True),
- ]
- _nc = dict(new_commands)
-
- def initialize_options(self):
-
- warnings.warn(
- "setup.py install is deprecated. "
- "Use build and pip and other standards-based tools.",
- setuptools.SetuptoolsDeprecationWarning,
- )
-
- orig.install.initialize_options(self)
- self.old_and_unmanageable = None
- self.single_version_externally_managed = None
-
- def finalize_options(self):
- orig.install.finalize_options(self)
- if self.root:
- self.single_version_externally_managed = True
- elif self.single_version_externally_managed:
- if not self.root and not self.record:
- raise DistutilsArgError(
- "You must specify --record or --root when building system"
- " packages"
- )
-
- def handle_extra_path(self):
- if self.root or self.single_version_externally_managed:
- # explicit backward-compatibility mode, allow extra_path to work
- return orig.install.handle_extra_path(self)
-
- # Ignore extra_path when installing an egg (or being run by another
- # command without --root or --single-version-externally-managed
- self.path_file = None
- self.extra_dirs = ''
-
- def run(self):
- # Explicit request for old-style install? Just do it
- if self.old_and_unmanageable or self.single_version_externally_managed:
- return orig.install.run(self)
-
- if not self._called_from_setup(inspect.currentframe()):
- # Run in backward-compatibility mode to support bdist_* commands.
- orig.install.run(self)
- else:
- self.do_egg_install()
-
- @staticmethod
- def _called_from_setup(run_frame):
- """
- Attempt to detect whether run() was called from setup() or by another
- command. If called by setup(), the parent caller will be the
- 'run_command' method in 'distutils.dist', and *its* caller will be
- the 'run_commands' method. If called any other way, the
- immediate caller *might* be 'run_command', but it won't have been
- called by 'run_commands'. Return True in that case or if a call stack
- is unavailable. Return False otherwise.
- """
- if run_frame is None:
- msg = "Call stack not available. bdist_* commands may fail."
- warnings.warn(msg)
- if platform.python_implementation() == 'IronPython':
- msg = "For best results, pass -X:Frames to enable call stack."
- warnings.warn(msg)
- return True
- res = inspect.getouterframes(run_frame)[2]
- caller, = res[:1]
- info = inspect.getframeinfo(caller)
- caller_module = caller.f_globals.get('__name__', '')
- return (
- caller_module == 'distutils.dist'
- and info.function == 'run_commands'
- )
-
- def do_egg_install(self):
-
- easy_install = self.distribution.get_command_class('easy_install')
-
- cmd = easy_install(
- self.distribution, args="x", root=self.root, record=self.record,
- )
- cmd.ensure_finalized() # finalize before bdist_egg munges install cmd
- cmd.always_copy_from = '.' # make sure local-dir eggs get installed
-
- # pick up setup-dir .egg files only: no .egg-info
- cmd.package_index.scan(glob.glob('*.egg'))
-
- self.run_command('bdist_egg')
- args = [self.distribution.get_command_obj('bdist_egg').egg_output]
-
- if setuptools.bootstrap_install_from:
- # Bootstrap self-installation of setuptools
- args.insert(0, setuptools.bootstrap_install_from)
-
- cmd.args = args
- cmd.run(show_deprecation=False)
- setuptools.bootstrap_install_from = None
-
-
-# XXX Python 3.1 doesn't see _nc if this is inside the class
-install.sub_commands = (
- [cmd for cmd in orig.install.sub_commands if cmd[0] not in install._nc] +
- install.new_commands
-)
diff --git a/contrib/python/setuptools/py3/setuptools/command/install_egg_info.py b/contrib/python/setuptools/py3/setuptools/command/install_egg_info.py
deleted file mode 100644
index edc4718b686..00000000000
--- a/contrib/python/setuptools/py3/setuptools/command/install_egg_info.py
+++ /dev/null
@@ -1,62 +0,0 @@
-from distutils import log, dir_util
-import os
-
-from setuptools import Command
-from setuptools import namespaces
-from setuptools.archive_util import unpack_archive
-import pkg_resources
-
-
-class install_egg_info(namespaces.Installer, Command):
- """Install an .egg-info directory for the package"""
-
- description = "Install an .egg-info directory for the package"
-
- user_options = [
- ('install-dir=', 'd', "directory to install to"),
- ]
-
- def initialize_options(self):
- self.install_dir = None
-
- def finalize_options(self):
- self.set_undefined_options('install_lib',
- ('install_dir', 'install_dir'))
- ei_cmd = self.get_finalized_command("egg_info")
- basename = pkg_resources.Distribution(
- None, None, ei_cmd.egg_name, ei_cmd.egg_version
- ).egg_name() + '.egg-info'
- self.source = ei_cmd.egg_info
- self.target = os.path.join(self.install_dir, basename)
- self.outputs = []
-
- def run(self):
- self.run_command('egg_info')
- if os.path.isdir(self.target) and not os.path.islink(self.target):
- dir_util.remove_tree(self.target, dry_run=self.dry_run)
- elif os.path.exists(self.target):
- self.execute(os.unlink, (self.target,), "Removing " + self.target)
- if not self.dry_run:
- pkg_resources.ensure_directory(self.target)
- self.execute(
- self.copytree, (), "Copying %s to %s" % (self.source, self.target)
- )
- self.install_namespaces()
-
- def get_outputs(self):
- return self.outputs
-
- def copytree(self):
- # Copy the .egg-info tree to site-packages
- def skimmer(src, dst):
- # filter out source-control directories; note that 'src' is always
- # a '/'-separated path, regardless of platform. 'dst' is a
- # platform-specific path.
- for skip in '.svn/', 'CVS/':
- if src.startswith(skip) or '/' + skip in src:
- return None
- self.outputs.append(dst)
- log.debug("Copying %s to %s", src, dst)
- return dst
-
- unpack_archive(self.source, self.target, skimmer)
diff --git a/contrib/python/setuptools/py3/setuptools/command/install_lib.py b/contrib/python/setuptools/py3/setuptools/command/install_lib.py
deleted file mode 100644
index 2e9d8757a58..00000000000
--- a/contrib/python/setuptools/py3/setuptools/command/install_lib.py
+++ /dev/null
@@ -1,122 +0,0 @@
-import os
-import sys
-from itertools import product, starmap
-import distutils.command.install_lib as orig
-
-
-class install_lib(orig.install_lib):
- """Don't add compiled flags to filenames of non-Python files"""
-
- def run(self):
- self.build()
- outfiles = self.install()
- if outfiles is not None:
- # always compile, in case we have any extension stubs to deal with
- self.byte_compile(outfiles)
-
- def get_exclusions(self):
- """
- Return a collections.Sized collections.Container of paths to be
- excluded for single_version_externally_managed installations.
- """
- all_packages = (
- pkg
- for ns_pkg in self._get_SVEM_NSPs()
- for pkg in self._all_packages(ns_pkg)
- )
-
- excl_specs = product(all_packages, self._gen_exclusion_paths())
- return set(starmap(self._exclude_pkg_path, excl_specs))
-
- def _exclude_pkg_path(self, pkg, exclusion_path):
- """
- Given a package name and exclusion path within that package,
- compute the full exclusion path.
- """
- parts = pkg.split('.') + [exclusion_path]
- return os.path.join(self.install_dir, *parts)
-
- @staticmethod
- def _all_packages(pkg_name):
- """
- >>> list(install_lib._all_packages('foo.bar.baz'))
- ['foo.bar.baz', 'foo.bar', 'foo']
- """
- while pkg_name:
- yield pkg_name
- pkg_name, sep, child = pkg_name.rpartition('.')
-
- def _get_SVEM_NSPs(self):
- """
- Get namespace packages (list) but only for
- single_version_externally_managed installations and empty otherwise.
- """
- # TODO: is it necessary to short-circuit here? i.e. what's the cost
- # if get_finalized_command is called even when namespace_packages is
- # False?
- if not self.distribution.namespace_packages:
- return []
-
- install_cmd = self.get_finalized_command('install')
- svem = install_cmd.single_version_externally_managed
-
- return self.distribution.namespace_packages if svem else []
-
- @staticmethod
- def _gen_exclusion_paths():
- """
- Generate file paths to be excluded for namespace packages (bytecode
- cache files).
- """
- # always exclude the package module itself
- yield '__init__.py'
-
- yield '__init__.pyc'
- yield '__init__.pyo'
-
- if not hasattr(sys, 'implementation'):
- return
-
- base = os.path.join(
- '__pycache__', '__init__.' + sys.implementation.cache_tag)
- yield base + '.pyc'
- yield base + '.pyo'
- yield base + '.opt-1.pyc'
- yield base + '.opt-2.pyc'
-
- def copy_tree(
- self, infile, outfile,
- preserve_mode=1, preserve_times=1, preserve_symlinks=0, level=1
- ):
- assert preserve_mode and preserve_times and not preserve_symlinks
- exclude = self.get_exclusions()
-
- if not exclude:
- return orig.install_lib.copy_tree(self, infile, outfile)
-
- # Exclude namespace package __init__.py* files from the output
-
- from setuptools.archive_util import unpack_directory
- from distutils import log
-
- outfiles = []
-
- def pf(src, dst):
- if dst in exclude:
- log.warn("Skipping installation of %s (namespace package)",
- dst)
- return False
-
- log.info("copying %s -> %s", src, os.path.dirname(dst))
- outfiles.append(dst)
- return dst
-
- unpack_directory(infile, outfile, pf)
- return outfiles
-
- def get_outputs(self):
- outputs = orig.install_lib.get_outputs(self)
- exclude = self.get_exclusions()
- if exclude:
- return [f for f in outputs if f not in exclude]
- return outputs
diff --git a/contrib/python/setuptools/py3/setuptools/command/install_scripts.py b/contrib/python/setuptools/py3/setuptools/command/install_scripts.py
deleted file mode 100644
index 9cd8eb06277..00000000000
--- a/contrib/python/setuptools/py3/setuptools/command/install_scripts.py
+++ /dev/null
@@ -1,69 +0,0 @@
-from distutils import log
-import distutils.command.install_scripts as orig
-from distutils.errors import DistutilsModuleError
-import os
-import sys
-
-from pkg_resources import Distribution, PathMetadata, ensure_directory
-
-
-class install_scripts(orig.install_scripts):
- """Do normal script install, plus any egg_info wrapper scripts"""
-
- def initialize_options(self):
- orig.install_scripts.initialize_options(self)
- self.no_ep = False
-
- def run(self):
- import setuptools.command.easy_install as ei
-
- self.run_command("egg_info")
- if self.distribution.scripts:
- orig.install_scripts.run(self) # run first to set up self.outfiles
- else:
- self.outfiles = []
- if self.no_ep:
- # don't install entry point scripts into .egg file!
- return
-
- ei_cmd = self.get_finalized_command("egg_info")
- dist = Distribution(
- ei_cmd.egg_base, PathMetadata(ei_cmd.egg_base, ei_cmd.egg_info),
- ei_cmd.egg_name, ei_cmd.egg_version,
- )
- bs_cmd = self.get_finalized_command('build_scripts')
- exec_param = getattr(bs_cmd, 'executable', None)
- try:
- bw_cmd = self.get_finalized_command("bdist_wininst")
- is_wininst = getattr(bw_cmd, '_is_running', False)
- except (ImportError, DistutilsModuleError):
- is_wininst = False
- writer = ei.ScriptWriter
- if is_wininst:
- exec_param = "python.exe"
- writer = ei.WindowsScriptWriter
- if exec_param == sys.executable:
- # In case the path to the Python executable contains a space, wrap
- # it so it's not split up.
- exec_param = [exec_param]
- # resolve the writer to the environment
- writer = writer.best()
- cmd = writer.command_spec_class.best().from_param(exec_param)
- for args in writer.get_args(dist, cmd.as_header()):
- self.write_script(*args)
-
- def write_script(self, script_name, contents, mode="t", *ignored):
- """Write an executable file to the scripts directory"""
- from setuptools.command.easy_install import chmod, current_umask
-
- log.info("Installing %s script to %s", script_name, self.install_dir)
- target = os.path.join(self.install_dir, script_name)
- self.outfiles.append(target)
-
- mask = current_umask()
- if not self.dry_run:
- ensure_directory(target)
- f = open(target, "w" + mode)
- f.write(contents)
- f.close()
- chmod(target, 0o777 - mask)
diff --git a/contrib/python/setuptools/py3/setuptools/command/launcher manifest.xml b/contrib/python/setuptools/py3/setuptools/command/launcher manifest.xml
deleted file mode 100644
index 5972a96d8de..00000000000
--- a/contrib/python/setuptools/py3/setuptools/command/launcher manifest.xml
+++ /dev/null
@@ -1,15 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
-<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
- <assemblyIdentity version="1.0.0.0"
- processorArchitecture="X86"
- name="%(name)s"
- type="win32"/>
- <!-- Identify the application security requirements. -->
- <trustInfo xmlns="urn:schemas-microsoft-com:asm.v3">
- <security>
- <requestedPrivileges>
- <requestedExecutionLevel level="asInvoker" uiAccess="false"/>
- </requestedPrivileges>
- </security>
- </trustInfo>
-</assembly>
diff --git a/contrib/python/setuptools/py3/setuptools/command/py36compat.py b/contrib/python/setuptools/py3/setuptools/command/py36compat.py
deleted file mode 100644
index 343547a4d31..00000000000
--- a/contrib/python/setuptools/py3/setuptools/command/py36compat.py
+++ /dev/null
@@ -1,134 +0,0 @@
-import os
-from glob import glob
-from distutils.util import convert_path
-from distutils.command import sdist
-
-
-class sdist_add_defaults:
- """
- Mix-in providing forward-compatibility for functionality as found in
- distutils on Python 3.7.
-
- Do not edit the code in this class except to update functionality
- as implemented in distutils. Instead, override in the subclass.
- """
-
- def add_defaults(self):
- """Add all the default files to self.filelist:
- - README or README.txt
- - setup.py
- - test/test*.py
- - all pure Python modules mentioned in setup script
- - all files pointed by package_data (build_py)
- - all files defined in data_files.
- - all files defined as scripts.
- - all C sources listed as part of extensions or C libraries
- in the setup script (doesn't catch C headers!)
- Warns if (README or README.txt) or setup.py are missing; everything
- else is optional.
- """
- self._add_defaults_standards()
- self._add_defaults_optional()
- self._add_defaults_python()
- self._add_defaults_data_files()
- self._add_defaults_ext()
- self._add_defaults_c_libs()
- self._add_defaults_scripts()
-
- @staticmethod
- def _cs_path_exists(fspath):
- """
- Case-sensitive path existence check
-
- >>> sdist_add_defaults._cs_path_exists(__file__)
- True
- >>> sdist_add_defaults._cs_path_exists(__file__.upper())
- False
- """
- if not os.path.exists(fspath):
- return False
- # make absolute so we always have a directory
- abspath = os.path.abspath(fspath)
- directory, filename = os.path.split(abspath)
- return filename in os.listdir(directory)
-
- def _add_defaults_standards(self):
- standards = [self.READMES, self.distribution.script_name]
- for fn in standards:
- if isinstance(fn, tuple):
- alts = fn
- got_it = False
- for fn in alts:
- if self._cs_path_exists(fn):
- got_it = True
- self.filelist.append(fn)
- break
-
- if not got_it:
- self.warn("standard file not found: should have one of " +
- ', '.join(alts))
- else:
- if self._cs_path_exists(fn):
- self.filelist.append(fn)
- else:
- self.warn("standard file '%s' not found" % fn)
-
- def _add_defaults_optional(self):
- optional = ['test/test*.py', 'setup.cfg']
- for pattern in optional:
- files = filter(os.path.isfile, glob(pattern))
- self.filelist.extend(files)
-
- def _add_defaults_python(self):
- # build_py is used to get:
- # - python modules
- # - files defined in package_data
- build_py = self.get_finalized_command('build_py')
-
- # getting python files
- if self.distribution.has_pure_modules():
- self.filelist.extend(build_py.get_source_files())
-
- # getting package_data files
- # (computed in build_py.data_files by build_py.finalize_options)
- for pkg, src_dir, build_dir, filenames in build_py.data_files:
- for filename in filenames:
- self.filelist.append(os.path.join(src_dir, filename))
-
- def _add_defaults_data_files(self):
- # getting distribution.data_files
- if self.distribution.has_data_files():
- for item in self.distribution.data_files:
- if isinstance(item, str):
- # plain file
- item = convert_path(item)
- if os.path.isfile(item):
- self.filelist.append(item)
- else:
- # a (dirname, filenames) tuple
- dirname, filenames = item
- for f in filenames:
- f = convert_path(f)
- if os.path.isfile(f):
- self.filelist.append(f)
-
- def _add_defaults_ext(self):
- if self.distribution.has_ext_modules():
- build_ext = self.get_finalized_command('build_ext')
- self.filelist.extend(build_ext.get_source_files())
-
- def _add_defaults_c_libs(self):
- if self.distribution.has_c_libraries():
- build_clib = self.get_finalized_command('build_clib')
- self.filelist.extend(build_clib.get_source_files())
-
- def _add_defaults_scripts(self):
- if self.distribution.has_scripts():
- build_scripts = self.get_finalized_command('build_scripts')
- self.filelist.extend(build_scripts.get_source_files())
-
-
-if hasattr(sdist.sdist, '_add_defaults_standards'):
- # disable the functionality already available upstream
- class sdist_add_defaults: # noqa
- pass
diff --git a/contrib/python/setuptools/py3/setuptools/command/register.py b/contrib/python/setuptools/py3/setuptools/command/register.py
deleted file mode 100644
index b8266b9a60f..00000000000
--- a/contrib/python/setuptools/py3/setuptools/command/register.py
+++ /dev/null
@@ -1,18 +0,0 @@
-from distutils import log
-import distutils.command.register as orig
-
-from setuptools.errors import RemovedCommandError
-
-
-class register(orig.register):
- """Formerly used to register packages on PyPI."""
-
- def run(self):
- msg = (
- "The register command has been removed, use twine to upload "
- + "instead (https://pypi.org/p/twine)"
- )
-
- self.announce("ERROR: " + msg, log.ERROR)
-
- raise RemovedCommandError(msg)
diff --git a/contrib/python/setuptools/py3/setuptools/command/rotate.py b/contrib/python/setuptools/py3/setuptools/command/rotate.py
deleted file mode 100644
index 74795ba922b..00000000000
--- a/contrib/python/setuptools/py3/setuptools/command/rotate.py
+++ /dev/null
@@ -1,64 +0,0 @@
-from distutils.util import convert_path
-from distutils import log
-from distutils.errors import DistutilsOptionError
-import os
-import shutil
-
-from setuptools import Command
-
-
-class rotate(Command):
- """Delete older distributions"""
-
- description = "delete older distributions, keeping N newest files"
- user_options = [
- ('match=', 'm', "patterns to match (required)"),
- ('dist-dir=', 'd', "directory where the distributions are"),
- ('keep=', 'k', "number of matching distributions to keep"),
- ]
-
- boolean_options = []
-
- def initialize_options(self):
- self.match = None
- self.dist_dir = None
- self.keep = None
-
- def finalize_options(self):
- if self.match is None:
- raise DistutilsOptionError(
- "Must specify one or more (comma-separated) match patterns "
- "(e.g. '.zip' or '.egg')"
- )
- if self.keep is None:
- raise DistutilsOptionError("Must specify number of files to keep")
- try:
- self.keep = int(self.keep)
- except ValueError as e:
- raise DistutilsOptionError("--keep must be an integer") from e
- if isinstance(self.match, str):
- self.match = [
- convert_path(p.strip()) for p in self.match.split(',')
- ]
- self.set_undefined_options('bdist', ('dist_dir', 'dist_dir'))
-
- def run(self):
- self.run_command("egg_info")
- from glob import glob
-
- for pattern in self.match:
- pattern = self.distribution.get_name() + '*' + pattern
- files = glob(os.path.join(self.dist_dir, pattern))
- files = [(os.path.getmtime(f), f) for f in files]
- files.sort()
- files.reverse()
-
- log.info("%d file(s) matching %s", len(files), pattern)
- files = files[self.keep:]
- for (t, f) in files:
- log.info("Deleting %s", f)
- if not self.dry_run:
- if os.path.isdir(f):
- shutil.rmtree(f)
- else:
- os.unlink(f)
diff --git a/contrib/python/setuptools/py3/setuptools/command/saveopts.py b/contrib/python/setuptools/py3/setuptools/command/saveopts.py
deleted file mode 100644
index 611cec55286..00000000000
--- a/contrib/python/setuptools/py3/setuptools/command/saveopts.py
+++ /dev/null
@@ -1,22 +0,0 @@
-from setuptools.command.setopt import edit_config, option_base
-
-
-class saveopts(option_base):
- """Save command-line options to a file"""
-
- description = "save supplied options to setup.cfg or other config file"
-
- def run(self):
- dist = self.distribution
- settings = {}
-
- for cmd in dist.command_options:
-
- if cmd == 'saveopts':
- continue # don't save our own options!
-
- for opt, (src, val) in dist.get_option_dict(cmd).items():
- if src == "command line":
- settings.setdefault(cmd, {})[opt] = val
-
- edit_config(self.filename, settings, self.dry_run)
diff --git a/contrib/python/setuptools/py3/setuptools/command/sdist.py b/contrib/python/setuptools/py3/setuptools/command/sdist.py
deleted file mode 100644
index 0285b690fc7..00000000000
--- a/contrib/python/setuptools/py3/setuptools/command/sdist.py
+++ /dev/null
@@ -1,196 +0,0 @@
-from distutils import log
-import distutils.command.sdist as orig
-import os
-import sys
-import io
-import contextlib
-
-from .py36compat import sdist_add_defaults
-
-import pkg_resources
-
-_default_revctrl = list
-
-
-def walk_revctrl(dirname=''):
- """Find all files under revision control"""
- for ep in pkg_resources.iter_entry_points('setuptools.file_finders'):
- for item in ep.load()(dirname):
- yield item
-
-
-class sdist(sdist_add_defaults, orig.sdist):
- """Smart sdist that finds anything supported by revision control"""
-
- user_options = [
- ('formats=', None,
- "formats for source distribution (comma-separated list)"),
- ('keep-temp', 'k',
- "keep the distribution tree around after creating " +
- "archive file(s)"),
- ('dist-dir=', 'd',
- "directory to put the source distribution archive(s) in "
- "[default: dist]"),
- ('owner=', 'u',
- "Owner name used when creating a tar file [default: current user]"),
- ('group=', 'g',
- "Group name used when creating a tar file [default: current group]"),
- ]
-
- negative_opt = {}
-
- README_EXTENSIONS = ['', '.rst', '.txt', '.md']
- READMES = tuple('README{0}'.format(ext) for ext in README_EXTENSIONS)
-
- def run(self):
- self.run_command('egg_info')
- ei_cmd = self.get_finalized_command('egg_info')
- self.filelist = ei_cmd.filelist
- self.filelist.append(os.path.join(ei_cmd.egg_info, 'SOURCES.txt'))
- self.check_readme()
-
- # Run sub commands
- for cmd_name in self.get_sub_commands():
- self.run_command(cmd_name)
-
- self.make_distribution()
-
- dist_files = getattr(self.distribution, 'dist_files', [])
- for file in self.archive_files:
- data = ('sdist', '', file)
- if data not in dist_files:
- dist_files.append(data)
-
- def initialize_options(self):
- orig.sdist.initialize_options(self)
-
- self._default_to_gztar()
-
- def _default_to_gztar(self):
- # only needed on Python prior to 3.6.
- if sys.version_info >= (3, 6, 0, 'beta', 1):
- return
- self.formats = ['gztar']
-
- def make_distribution(self):
- """
- Workaround for #516
- """
- with self._remove_os_link():
- orig.sdist.make_distribution(self)
-
- @staticmethod
- @contextlib.contextmanager
- def _remove_os_link():
- """
- In a context, remove and restore os.link if it exists
- """
-
- class NoValue:
- pass
-
- orig_val = getattr(os, 'link', NoValue)
- try:
- del os.link
- except Exception:
- pass
- try:
- yield
- finally:
- if orig_val is not NoValue:
- setattr(os, 'link', orig_val)
-
- def _add_defaults_optional(self):
- super()._add_defaults_optional()
- if os.path.isfile('pyproject.toml'):
- self.filelist.append('pyproject.toml')
-
- def _add_defaults_python(self):
- """getting python files"""
- if self.distribution.has_pure_modules():
- build_py = self.get_finalized_command('build_py')
- self.filelist.extend(build_py.get_source_files())
- self._add_data_files(self._safe_data_files(build_py))
-
- def _safe_data_files(self, build_py):
- """
- Since the ``sdist`` class is also used to compute the MANIFEST
- (via :obj:`setuptools.command.egg_info.manifest_maker`),
- there might be recursion problems when trying to obtain the list of
- data_files and ``include_package_data=True`` (which in turn depends on
- the files included in the MANIFEST).
-
- To avoid that, ``manifest_maker`` should be able to overwrite this
- method and avoid recursive attempts to build/analyze the MANIFEST.
- """
- return build_py.data_files
-
- def _add_data_files(self, data_files):
- """
- Add data files as found in build_py.data_files.
- """
- self.filelist.extend(
- os.path.join(src_dir, name)
- for _, src_dir, _, filenames in data_files
- for name in filenames
- )
-
- def _add_defaults_data_files(self):
- try:
- super()._add_defaults_data_files()
- except TypeError:
- log.warn("data_files contains unexpected objects")
-
- def check_readme(self):
- for f in self.READMES:
- if os.path.exists(f):
- return
- else:
- self.warn(
- "standard file not found: should have one of " +
- ', '.join(self.READMES)
- )
-
- def make_release_tree(self, base_dir, files):
- orig.sdist.make_release_tree(self, base_dir, files)
-
- # Save any egg_info command line options used to create this sdist
- dest = os.path.join(base_dir, 'setup.cfg')
- if hasattr(os, 'link') and os.path.exists(dest):
- # unlink and re-copy, since it might be hard-linked, and
- # we don't want to change the source version
- os.unlink(dest)
- self.copy_file('setup.cfg', dest)
-
- self.get_finalized_command('egg_info').save_version_info(dest)
-
- def _manifest_is_not_generated(self):
- # check for special comment used in 2.7.1 and higher
- if not os.path.isfile(self.manifest):
- return False
-
- with io.open(self.manifest, 'rb') as fp:
- first_line = fp.readline()
- return (first_line !=
- '# file GENERATED by distutils, do NOT edit\n'.encode())
-
- def read_manifest(self):
- """Read the manifest file (named by 'self.manifest') and use it to
- fill in 'self.filelist', the list of files to include in the source
- distribution.
- """
- log.info("reading manifest file '%s'", self.manifest)
- manifest = open(self.manifest, 'rb')
- for line in manifest:
- # The manifest must contain UTF-8. See #303.
- try:
- line = line.decode('UTF-8')
- except UnicodeDecodeError:
- log.warn("%r not UTF-8 decodable -- skipping" % line)
- continue
- # ignore comments and blank lines
- line = line.strip()
- if line.startswith('#') or not line:
- continue
- self.filelist.append(line)
- manifest.close()
diff --git a/contrib/python/setuptools/py3/setuptools/command/setopt.py b/contrib/python/setuptools/py3/setuptools/command/setopt.py
deleted file mode 100644
index 6358c0451b2..00000000000
--- a/contrib/python/setuptools/py3/setuptools/command/setopt.py
+++ /dev/null
@@ -1,149 +0,0 @@
-from distutils.util import convert_path
-from distutils import log
-from distutils.errors import DistutilsOptionError
-import distutils
-import os
-import configparser
-
-from setuptools import Command
-
-__all__ = ['config_file', 'edit_config', 'option_base', 'setopt']
-
-
-def config_file(kind="local"):
- """Get the filename of the distutils, local, global, or per-user config
-
- `kind` must be one of "local", "global", or "user"
- """
- if kind == 'local':
- return 'setup.cfg'
- if kind == 'global':
- return os.path.join(
- os.path.dirname(distutils.__file__), 'distutils.cfg'
- )
- if kind == 'user':
- dot = os.name == 'posix' and '.' or ''
- return os.path.expanduser(convert_path("~/%spydistutils.cfg" % dot))
- raise ValueError(
- "config_file() type must be 'local', 'global', or 'user'", kind
- )
-
-
-def edit_config(filename, settings, dry_run=False):
- """Edit a configuration file to include `settings`
-
- `settings` is a dictionary of dictionaries or ``None`` values, keyed by
- command/section name. A ``None`` value means to delete the entire section,
- while a dictionary lists settings to be changed or deleted in that section.
- A setting of ``None`` means to delete that setting.
- """
- log.debug("Reading configuration from %s", filename)
- opts = configparser.RawConfigParser()
- opts.optionxform = lambda x: x
- opts.read([filename])
- for section, options in settings.items():
- if options is None:
- log.info("Deleting section [%s] from %s", section, filename)
- opts.remove_section(section)
- else:
- if not opts.has_section(section):
- log.debug("Adding new section [%s] to %s", section, filename)
- opts.add_section(section)
- for option, value in options.items():
- if value is None:
- log.debug(
- "Deleting %s.%s from %s",
- section, option, filename
- )
- opts.remove_option(section, option)
- if not opts.options(section):
- log.info("Deleting empty [%s] section from %s",
- section, filename)
- opts.remove_section(section)
- else:
- log.debug(
- "Setting %s.%s to %r in %s",
- section, option, value, filename
- )
- opts.set(section, option, value)
-
- log.info("Writing %s", filename)
- if not dry_run:
- with open(filename, 'w') as f:
- opts.write(f)
-
-
-class option_base(Command):
- """Abstract base class for commands that mess with config files"""
-
- user_options = [
- ('global-config', 'g',
- "save options to the site-wide distutils.cfg file"),
- ('user-config', 'u',
- "save options to the current user's pydistutils.cfg file"),
- ('filename=', 'f',
- "configuration file to use (default=setup.cfg)"),
- ]
-
- boolean_options = [
- 'global-config', 'user-config',
- ]
-
- def initialize_options(self):
- self.global_config = None
- self.user_config = None
- self.filename = None
-
- def finalize_options(self):
- filenames = []
- if self.global_config:
- filenames.append(config_file('global'))
- if self.user_config:
- filenames.append(config_file('user'))
- if self.filename is not None:
- filenames.append(self.filename)
- if not filenames:
- filenames.append(config_file('local'))
- if len(filenames) > 1:
- raise DistutilsOptionError(
- "Must specify only one configuration file option",
- filenames
- )
- self.filename, = filenames
-
-
-class setopt(option_base):
- """Save command-line options to a file"""
-
- description = "set an option in setup.cfg or another config file"
-
- user_options = [
- ('command=', 'c', 'command to set an option for'),
- ('option=', 'o', 'option to set'),
- ('set-value=', 's', 'value of the option'),
- ('remove', 'r', 'remove (unset) the value'),
- ] + option_base.user_options
-
- boolean_options = option_base.boolean_options + ['remove']
-
- def initialize_options(self):
- option_base.initialize_options(self)
- self.command = None
- self.option = None
- self.set_value = None
- self.remove = None
-
- def finalize_options(self):
- option_base.finalize_options(self)
- if self.command is None or self.option is None:
- raise DistutilsOptionError("Must specify --command *and* --option")
- if self.set_value is None and not self.remove:
- raise DistutilsOptionError("Must specify --set-value or --remove")
-
- def run(self):
- edit_config(
- self.filename, {
- self.command: {self.option.replace('-', '_'): self.set_value}
- },
- self.dry_run
- )
diff --git a/contrib/python/setuptools/py3/setuptools/command/test.py b/contrib/python/setuptools/py3/setuptools/command/test.py
deleted file mode 100644
index 4a389e4d071..00000000000
--- a/contrib/python/setuptools/py3/setuptools/command/test.py
+++ /dev/null
@@ -1,252 +0,0 @@
-import os
-import operator
-import sys
-import contextlib
-import itertools
-import unittest
-from distutils.errors import DistutilsError, DistutilsOptionError
-from distutils import log
-from unittest import TestLoader
-
-from pkg_resources import (
- resource_listdir,
- resource_exists,
- normalize_path,
- working_set,
- evaluate_marker,
- add_activation_listener,
- require,
- EntryPoint,
-)
-from setuptools import Command
-from setuptools.extern.more_itertools import unique_everseen
-
-
-class ScanningLoader(TestLoader):
- def __init__(self):
- TestLoader.__init__(self)
- self._visited = set()
-
- def loadTestsFromModule(self, module, pattern=None):
- """Return a suite of all tests cases contained in the given module
-
- If the module is a package, load tests from all the modules in it.
- If the module has an ``additional_tests`` function, call it and add
- the return value to the tests.
- """
- if module in self._visited:
- return None
- self._visited.add(module)
-
- tests = []
- tests.append(TestLoader.loadTestsFromModule(self, module))
-
- if hasattr(module, "additional_tests"):
- tests.append(module.additional_tests())
-
- if hasattr(module, '__path__'):
- for file in resource_listdir(module.__name__, ''):
- if file.endswith('.py') and file != '__init__.py':
- submodule = module.__name__ + '.' + file[:-3]
- else:
- if resource_exists(module.__name__, file + '/__init__.py'):
- submodule = module.__name__ + '.' + file
- else:
- continue
- tests.append(self.loadTestsFromName(submodule))
-
- if len(tests) != 1:
- return self.suiteClass(tests)
- else:
- return tests[0] # don't create a nested suite for only one return
-
-
-# adapted from jaraco.classes.properties:NonDataProperty
-class NonDataProperty:
- def __init__(self, fget):
- self.fget = fget
-
- def __get__(self, obj, objtype=None):
- if obj is None:
- return self
- return self.fget(obj)
-
-
-class test(Command):
- """Command to run unit tests after in-place build"""
-
- description = "run unit tests after in-place build (deprecated)"
-
- user_options = [
- ('test-module=', 'm', "Run 'test_suite' in specified module"),
- (
- 'test-suite=',
- 's',
- "Run single test, case or suite (e.g. 'module.test_suite')",
- ),
- ('test-runner=', 'r', "Test runner to use"),
- ]
-
- def initialize_options(self):
- self.test_suite = None
- self.test_module = None
- self.test_loader = None
- self.test_runner = None
-
- def finalize_options(self):
-
- if self.test_suite and self.test_module:
- msg = "You may specify a module or a suite, but not both"
- raise DistutilsOptionError(msg)
-
- if self.test_suite is None:
- if self.test_module is None:
- self.test_suite = self.distribution.test_suite
- else:
- self.test_suite = self.test_module + ".test_suite"
-
- if self.test_loader is None:
- self.test_loader = getattr(self.distribution, 'test_loader', None)
- if self.test_loader is None:
- self.test_loader = "setuptools.command.test:ScanningLoader"
- if self.test_runner is None:
- self.test_runner = getattr(self.distribution, 'test_runner', None)
-
- @NonDataProperty
- def test_args(self):
- return list(self._test_args())
-
- def _test_args(self):
- if not self.test_suite and sys.version_info >= (2, 7):
- yield 'discover'
- if self.verbose:
- yield '--verbose'
- if self.test_suite:
- yield self.test_suite
-
- def with_project_on_sys_path(self, func):
- """
- Backward compatibility for project_on_sys_path context.
- """
- with self.project_on_sys_path():
- func()
-
- @contextlib.contextmanager
- def project_on_sys_path(self, include_dists=[]):
- self.run_command('egg_info')
-
- # Build extensions in-place
- self.reinitialize_command('build_ext', inplace=1)
- self.run_command('build_ext')
-
- ei_cmd = self.get_finalized_command("egg_info")
-
- old_path = sys.path[:]
- old_modules = sys.modules.copy()
-
- try:
- project_path = normalize_path(ei_cmd.egg_base)
- sys.path.insert(0, project_path)
- working_set.__init__()
- add_activation_listener(lambda dist: dist.activate())
- require('%s==%s' % (ei_cmd.egg_name, ei_cmd.egg_version))
- with self.paths_on_pythonpath([project_path]):
- yield
- finally:
- sys.path[:] = old_path
- sys.modules.clear()
- sys.modules.update(old_modules)
- working_set.__init__()
-
- @staticmethod
- @contextlib.contextmanager
- def paths_on_pythonpath(paths):
- """
- Add the indicated paths to the head of the PYTHONPATH environment
- variable so that subprocesses will also see the packages at
- these paths.
-
- Do this in a context that restores the value on exit.
- """
- nothing = object()
- orig_pythonpath = os.environ.get('PYTHONPATH', nothing)
- current_pythonpath = os.environ.get('PYTHONPATH', '')
- try:
- prefix = os.pathsep.join(unique_everseen(paths))
- to_join = filter(None, [prefix, current_pythonpath])
- new_path = os.pathsep.join(to_join)
- if new_path:
- os.environ['PYTHONPATH'] = new_path
- yield
- finally:
- if orig_pythonpath is nothing:
- os.environ.pop('PYTHONPATH', None)
- else:
- os.environ['PYTHONPATH'] = orig_pythonpath
-
- @staticmethod
- def install_dists(dist):
- """
- Install the requirements indicated by self.distribution and
- return an iterable of the dists that were built.
- """
- ir_d = dist.fetch_build_eggs(dist.install_requires)
- tr_d = dist.fetch_build_eggs(dist.tests_require or [])
- er_d = dist.fetch_build_eggs(
- v
- for k, v in dist.extras_require.items()
- if k.startswith(':') and evaluate_marker(k[1:])
- )
- return itertools.chain(ir_d, tr_d, er_d)
-
- def run(self):
- self.announce(
- "WARNING: Testing via this command is deprecated and will be "
- "removed in a future version. Users looking for a generic test "
- "entry point independent of test runner are encouraged to use "
- "tox.",
- log.WARN,
- )
-
- installed_dists = self.install_dists(self.distribution)
-
- cmd = ' '.join(self._argv)
- if self.dry_run:
- self.announce('skipping "%s" (dry run)' % cmd)
- return
-
- self.announce('running "%s"' % cmd)
-
- paths = map(operator.attrgetter('location'), installed_dists)
- with self.paths_on_pythonpath(paths):
- with self.project_on_sys_path():
- self.run_tests()
-
- def run_tests(self):
- test = unittest.main(
- None,
- None,
- self._argv,
- testLoader=self._resolve_as_ep(self.test_loader),
- testRunner=self._resolve_as_ep(self.test_runner),
- exit=False,
- )
- if not test.result.wasSuccessful():
- msg = 'Test failed: %s' % test.result
- self.announce(msg, log.ERROR)
- raise DistutilsError(msg)
-
- @property
- def _argv(self):
- return ['unittest'] + self.test_args
-
- @staticmethod
- def _resolve_as_ep(val):
- """
- Load the indicated attribute value, called, as a as if it were
- specified as an entry point.
- """
- if val is None:
- return
- parsed = EntryPoint.parse("x=" + val)
- return parsed.resolve()()
diff --git a/contrib/python/setuptools/py3/setuptools/command/upload.py b/contrib/python/setuptools/py3/setuptools/command/upload.py
deleted file mode 100644
index ec7f81e2277..00000000000
--- a/contrib/python/setuptools/py3/setuptools/command/upload.py
+++ /dev/null
@@ -1,17 +0,0 @@
-from distutils import log
-from distutils.command import upload as orig
-
-from setuptools.errors import RemovedCommandError
-
-
-class upload(orig.upload):
- """Formerly used to upload packages to PyPI."""
-
- def run(self):
- msg = (
- "The upload command has been removed, use twine to upload "
- + "instead (https://pypi.org/p/twine)"
- )
-
- self.announce("ERROR: " + msg, log.ERROR)
- raise RemovedCommandError(msg)
diff --git a/contrib/python/setuptools/py3/setuptools/command/upload_docs.py b/contrib/python/setuptools/py3/setuptools/command/upload_docs.py
deleted file mode 100644
index 845bff4421f..00000000000
--- a/contrib/python/setuptools/py3/setuptools/command/upload_docs.py
+++ /dev/null
@@ -1,202 +0,0 @@
-# -*- coding: utf-8 -*-
-"""upload_docs
-
-Implements a Distutils 'upload_docs' subcommand (upload documentation to
-sites other than PyPi such as devpi).
-"""
-
-from base64 import standard_b64encode
-from distutils import log
-from distutils.errors import DistutilsOptionError
-import os
-import socket
-import zipfile
-import tempfile
-import shutil
-import itertools
-import functools
-import http.client
-import urllib.parse
-
-from pkg_resources import iter_entry_points
-from .upload import upload
-
-
-def _encode(s):
- return s.encode('utf-8', 'surrogateescape')
-
-
-class upload_docs(upload):
- # override the default repository as upload_docs isn't
- # supported by Warehouse (and won't be).
- DEFAULT_REPOSITORY = 'https://pypi.python.org/pypi/'
-
- description = 'Upload documentation to sites other than PyPi such as devpi'
-
- user_options = [
- ('repository=', 'r',
- "url of repository [default: %s]" % upload.DEFAULT_REPOSITORY),
- ('show-response', None,
- 'display full response text from server'),
- ('upload-dir=', None, 'directory to upload'),
- ]
- boolean_options = upload.boolean_options
-
- def has_sphinx(self):
- if self.upload_dir is None:
- for ep in iter_entry_points('distutils.commands', 'build_sphinx'):
- return True
-
- sub_commands = [('build_sphinx', has_sphinx)]
-
- def initialize_options(self):
- upload.initialize_options(self)
- self.upload_dir = None
- self.target_dir = None
-
- def finalize_options(self):
- upload.finalize_options(self)
- if self.upload_dir is None:
- if self.has_sphinx():
- build_sphinx = self.get_finalized_command('build_sphinx')
- self.target_dir = dict(build_sphinx.builder_target_dirs)['html']
- else:
- build = self.get_finalized_command('build')
- self.target_dir = os.path.join(build.build_base, 'docs')
- else:
- self.ensure_dirname('upload_dir')
- self.target_dir = self.upload_dir
- if 'pypi.python.org' in self.repository:
- log.warn("Upload_docs command is deprecated for PyPi. Use RTD instead.")
- self.announce('Using upload directory %s' % self.target_dir)
-
- def create_zipfile(self, filename):
- zip_file = zipfile.ZipFile(filename, "w")
- try:
- self.mkpath(self.target_dir) # just in case
- for root, dirs, files in os.walk(self.target_dir):
- if root == self.target_dir and not files:
- tmpl = "no files found in upload directory '%s'"
- raise DistutilsOptionError(tmpl % self.target_dir)
- for name in files:
- full = os.path.join(root, name)
- relative = root[len(self.target_dir):].lstrip(os.path.sep)
- dest = os.path.join(relative, name)
- zip_file.write(full, dest)
- finally:
- zip_file.close()
-
- def run(self):
- # Run sub commands
- for cmd_name in self.get_sub_commands():
- self.run_command(cmd_name)
-
- tmp_dir = tempfile.mkdtemp()
- name = self.distribution.metadata.get_name()
- zip_file = os.path.join(tmp_dir, "%s.zip" % name)
- try:
- self.create_zipfile(zip_file)
- self.upload_file(zip_file)
- finally:
- shutil.rmtree(tmp_dir)
-
- @staticmethod
- def _build_part(item, sep_boundary):
- key, values = item
- title = '\nContent-Disposition: form-data; name="%s"' % key
- # handle multiple entries for the same name
- if not isinstance(values, list):
- values = [values]
- for value in values:
- if isinstance(value, tuple):
- title += '; filename="%s"' % value[0]
- value = value[1]
- else:
- value = _encode(value)
- yield sep_boundary
- yield _encode(title)
- yield b"\n\n"
- yield value
- if value and value[-1:] == b'\r':
- yield b'\n' # write an extra newline (lurve Macs)
-
- @classmethod
- def _build_multipart(cls, data):
- """
- Build up the MIME payload for the POST data
- """
- boundary = '--------------GHSKFJDLGDS7543FJKLFHRE75642756743254'
- sep_boundary = b'\n--' + boundary.encode('ascii')
- end_boundary = sep_boundary + b'--'
- end_items = end_boundary, b"\n",
- builder = functools.partial(
- cls._build_part,
- sep_boundary=sep_boundary,
- )
- part_groups = map(builder, data.items())
- parts = itertools.chain.from_iterable(part_groups)
- body_items = itertools.chain(parts, end_items)
- content_type = 'multipart/form-data; boundary=%s' % boundary
- return b''.join(body_items), content_type
-
- def upload_file(self, filename):
- with open(filename, 'rb') as f:
- content = f.read()
- meta = self.distribution.metadata
- data = {
- ':action': 'doc_upload',
- 'name': meta.get_name(),
- 'content': (os.path.basename(filename), content),
- }
- # set up the authentication
- credentials = _encode(self.username + ':' + self.password)
- credentials = standard_b64encode(credentials).decode('ascii')
- auth = "Basic " + credentials
-
- body, ct = self._build_multipart(data)
-
- msg = "Submitting documentation to %s" % (self.repository)
- self.announce(msg, log.INFO)
-
- # build the Request
- # We can't use urllib2 since we need to send the Basic
- # auth right with the first request
- schema, netloc, url, params, query, fragments = \
- urllib.parse.urlparse(self.repository)
- assert not params and not query and not fragments
- if schema == 'http':
- conn = http.client.HTTPConnection(netloc)
- elif schema == 'https':
- conn = http.client.HTTPSConnection(netloc)
- else:
- raise AssertionError("unsupported schema " + schema)
-
- data = ''
- try:
- conn.connect()
- conn.putrequest("POST", url)
- content_type = ct
- conn.putheader('Content-type', content_type)
- conn.putheader('Content-length', str(len(body)))
- conn.putheader('Authorization', auth)
- conn.endheaders()
- conn.send(body)
- except socket.error as e:
- self.announce(str(e), log.ERROR)
- return
-
- r = conn.getresponse()
- if r.status == 200:
- msg = 'Server response (%s): %s' % (r.status, r.reason)
- self.announce(msg, log.INFO)
- elif r.status == 301:
- location = r.getheader('Location')
- if location is None:
- location = 'https://pythonhosted.org/%s/' % meta.get_name()
- msg = 'Upload successful. Visit %s' % location
- self.announce(msg, log.INFO)
- else:
- msg = 'Upload failed (%s): %s' % (r.status, r.reason)
- self.announce(msg, log.ERROR)
- if self.show_response:
- print('-' * 75, r.read(), '-' * 75)
diff --git a/contrib/python/setuptools/py3/setuptools/config.py b/contrib/python/setuptools/py3/setuptools/config.py
deleted file mode 100644
index b4e968e5cae..00000000000
--- a/contrib/python/setuptools/py3/setuptools/config.py
+++ /dev/null
@@ -1,751 +0,0 @@
-import ast
-import io
-import os
-import sys
-
-import warnings
-import functools
-import importlib
-from collections import defaultdict
-from functools import partial
-from functools import wraps
-from glob import iglob
-import contextlib
-
-from distutils.errors import DistutilsOptionError, DistutilsFileError
-from setuptools.extern.packaging.version import Version, InvalidVersion
-from setuptools.extern.packaging.specifiers import SpecifierSet
-
-
-class StaticModule:
- """
- Attempt to load the module by the name
- """
-
- def __init__(self, name):
- spec = importlib.util.find_spec(name)
- with open(spec.origin) as strm:
- src = strm.read()
- module = ast.parse(src)
- vars(self).update(locals())
- del self.self
-
- def __getattr__(self, attr):
- try:
- return next(
- ast.literal_eval(statement.value)
- for statement in self.module.body
- if isinstance(statement, ast.Assign)
- for target in statement.targets
- if isinstance(target, ast.Name) and target.id == attr
- )
- except Exception as e:
- raise AttributeError(
- "{self.name} has no attribute {attr}".format(**locals())
- ) from e
-
-
-def patch_path(path):
- """
- Add path to front of sys.path for the duration of the context.
- """
- try:
- sys.path.insert(0, path)
- yield
- finally:
- sys.path.remove(path)
-
-
-def read_configuration(filepath, find_others=False, ignore_option_errors=False):
- """Read given configuration file and returns options from it as a dict.
-
- :param str|unicode filepath: Path to configuration file
- to get options from.
-
- :param bool find_others: Whether to search for other configuration files
- which could be on in various places.
-
- :param bool ignore_option_errors: Whether to silently ignore
- options, values of which could not be resolved (e.g. due to exceptions
- in directives such as file:, attr:, etc.).
- If False exceptions are propagated as expected.
-
- :rtype: dict
- """
- from setuptools.dist import Distribution, _Distribution
-
- filepath = os.path.abspath(filepath)
-
- if not os.path.isfile(filepath):
- raise DistutilsFileError('Configuration file %s does not exist.' % filepath)
-
- current_directory = os.getcwd()
- os.chdir(os.path.dirname(filepath))
-
- try:
- dist = Distribution()
-
- filenames = dist.find_config_files() if find_others else []
- if filepath not in filenames:
- filenames.append(filepath)
-
- _Distribution.parse_config_files(dist, filenames=filenames)
-
- handlers = parse_configuration(
- dist, dist.command_options, ignore_option_errors=ignore_option_errors
- )
-
- finally:
- os.chdir(current_directory)
-
- return configuration_to_dict(handlers)
-
-
-def _get_option(target_obj, key):
- """
- Given a target object and option key, get that option from
- the target object, either through a get_{key} method or
- from an attribute directly.
- """
- getter_name = 'get_{key}'.format(**locals())
- by_attribute = functools.partial(getattr, target_obj, key)
- getter = getattr(target_obj, getter_name, by_attribute)
- return getter()
-
-
-def configuration_to_dict(handlers):
- """Returns configuration data gathered by given handlers as a dict.
-
- :param list[ConfigHandler] handlers: Handlers list,
- usually from parse_configuration()
-
- :rtype: dict
- """
- config_dict = defaultdict(dict)
-
- for handler in handlers:
- for option in handler.set_options:
- value = _get_option(handler.target_obj, option)
- config_dict[handler.section_prefix][option] = value
-
- return config_dict
-
-
-def parse_configuration(distribution, command_options, ignore_option_errors=False):
- """Performs additional parsing of configuration options
- for a distribution.
-
- Returns a list of used option handlers.
-
- :param Distribution distribution:
- :param dict command_options:
- :param bool ignore_option_errors: Whether to silently ignore
- options, values of which could not be resolved (e.g. due to exceptions
- in directives such as file:, attr:, etc.).
- If False exceptions are propagated as expected.
- :rtype: list
- """
- options = ConfigOptionsHandler(distribution, command_options, ignore_option_errors)
- options.parse()
-
- meta = ConfigMetadataHandler(
- distribution.metadata,
- command_options,
- ignore_option_errors,
- distribution.package_dir,
- )
- meta.parse()
-
- return meta, options
-
-
-class ConfigHandler:
- """Handles metadata supplied in configuration files."""
-
- section_prefix = None
- """Prefix for config sections handled by this handler.
- Must be provided by class heirs.
-
- """
-
- aliases = {}
- """Options aliases.
- For compatibility with various packages. E.g.: d2to1 and pbr.
- Note: `-` in keys is replaced with `_` by config parser.
-
- """
-
- def __init__(self, target_obj, options, ignore_option_errors=False):
- sections = {}
-
- section_prefix = self.section_prefix
- for section_name, section_options in options.items():
- if not section_name.startswith(section_prefix):
- continue
-
- section_name = section_name.replace(section_prefix, '').strip('.')
- sections[section_name] = section_options
-
- self.ignore_option_errors = ignore_option_errors
- self.target_obj = target_obj
- self.sections = sections
- self.set_options = []
-
- @property
- def parsers(self):
- """Metadata item name to parser function mapping."""
- raise NotImplementedError(
- '%s must provide .parsers property' % self.__class__.__name__
- )
-
- def __setitem__(self, option_name, value):
- unknown = tuple()
- target_obj = self.target_obj
-
- # Translate alias into real name.
- option_name = self.aliases.get(option_name, option_name)
-
- current_value = getattr(target_obj, option_name, unknown)
-
- if current_value is unknown:
- raise KeyError(option_name)
-
- if current_value:
- # Already inhabited. Skipping.
- return
-
- skip_option = False
- parser = self.parsers.get(option_name)
- if parser:
- try:
- value = parser(value)
-
- except Exception:
- skip_option = True
- if not self.ignore_option_errors:
- raise
-
- if skip_option:
- return
-
- setter = getattr(target_obj, 'set_%s' % option_name, None)
- if setter is None:
- setattr(target_obj, option_name, value)
- else:
- setter(value)
-
- self.set_options.append(option_name)
-
- @classmethod
- def _parse_list(cls, value, separator=','):
- """Represents value as a list.
-
- Value is split either by separator (defaults to comma) or by lines.
-
- :param value:
- :param separator: List items separator character.
- :rtype: list
- """
- if isinstance(value, list): # _get_parser_compound case
- return value
-
- if '\n' in value:
- value = value.splitlines()
- else:
- value = value.split(separator)
-
- return [chunk.strip() for chunk in value if chunk.strip()]
-
- @classmethod
- def _parse_list_glob(cls, value, separator=','):
- """Equivalent to _parse_list() but expands any glob patterns using glob().
-
- However, unlike with glob() calls, the results remain relative paths.
-
- :param value:
- :param separator: List items separator character.
- :rtype: list
- """
- glob_characters = ('*', '?', '[', ']', '{', '}')
- values = cls._parse_list(value, separator=separator)
- expanded_values = []
- for value in values:
-
- # Has globby characters?
- if any(char in value for char in glob_characters):
- # then expand the glob pattern while keeping paths *relative*:
- expanded_values.extend(sorted(
- os.path.relpath(path, os.getcwd())
- for path in iglob(os.path.abspath(value))))
-
- else:
- # take the value as-is:
- expanded_values.append(value)
-
- return expanded_values
-
- @classmethod
- def _parse_dict(cls, value):
- """Represents value as a dict.
-
- :param value:
- :rtype: dict
- """
- separator = '='
- result = {}
- for line in cls._parse_list(value):
- key, sep, val = line.partition(separator)
- if sep != separator:
- raise DistutilsOptionError(
- 'Unable to parse option value to dict: %s' % value
- )
- result[key.strip()] = val.strip()
-
- return result
-
- @classmethod
- def _parse_bool(cls, value):
- """Represents value as boolean.
-
- :param value:
- :rtype: bool
- """
- value = value.lower()
- return value in ('1', 'true', 'yes')
-
- @classmethod
- def _exclude_files_parser(cls, key):
- """Returns a parser function to make sure field inputs
- are not files.
-
- Parses a value after getting the key so error messages are
- more informative.
-
- :param key:
- :rtype: callable
- """
-
- def parser(value):
- exclude_directive = 'file:'
- if value.startswith(exclude_directive):
- raise ValueError(
- 'Only strings are accepted for the {0} field, '
- 'files are not accepted'.format(key)
- )
- return value
-
- return parser
-
- @classmethod
- def _parse_file(cls, value):
- """Represents value as a string, allowing including text
- from nearest files using `file:` directive.
-
- Directive is sandboxed and won't reach anything outside
- directory with setup.py.
-
- Examples:
- file: README.rst, CHANGELOG.md, src/file.txt
-
- :param str value:
- :rtype: str
- """
- include_directive = 'file:'
-
- if not isinstance(value, str):
- return value
-
- if not value.startswith(include_directive):
- return value
-
- spec = value[len(include_directive) :]
- filepaths = (os.path.abspath(path.strip()) for path in spec.split(','))
- return '\n'.join(
- cls._read_file(path)
- for path in filepaths
- if (cls._assert_local(path) or True) and os.path.isfile(path)
- )
-
- @staticmethod
- def _assert_local(filepath):
- if not filepath.startswith(os.getcwd()):
- raise DistutilsOptionError('`file:` directive can not access %s' % filepath)
-
- @staticmethod
- def _read_file(filepath):
- with io.open(filepath, encoding='utf-8') as f:
- return f.read()
-
- @classmethod
- def _parse_attr(cls, value, package_dir=None):
- """Represents value as a module attribute.
-
- Examples:
- attr: package.attr
- attr: package.module.attr
-
- :param str value:
- :rtype: str
- """
- attr_directive = 'attr:'
- if not value.startswith(attr_directive):
- return value
-
- attrs_path = value.replace(attr_directive, '').strip().split('.')
- attr_name = attrs_path.pop()
-
- module_name = '.'.join(attrs_path)
- module_name = module_name or '__init__'
-
- parent_path = os.getcwd()
- if package_dir:
- if attrs_path[0] in package_dir:
- # A custom path was specified for the module we want to import
- custom_path = package_dir[attrs_path[0]]
- parts = custom_path.rsplit('/', 1)
- if len(parts) > 1:
- parent_path = os.path.join(os.getcwd(), parts[0])
- module_name = parts[1]
- else:
- module_name = custom_path
- elif '' in package_dir:
- # A custom parent directory was specified for all root modules
- parent_path = os.path.join(os.getcwd(), package_dir[''])
-
- with patch_path(parent_path):
- try:
- # attempt to load value statically
- return getattr(StaticModule(module_name), attr_name)
- except Exception:
- # fallback to simple import
- module = importlib.import_module(module_name)
-
- return getattr(module, attr_name)
-
- @classmethod
- def _get_parser_compound(cls, *parse_methods):
- """Returns parser function to represents value as a list.
-
- Parses a value applying given methods one after another.
-
- :param parse_methods:
- :rtype: callable
- """
-
- def parse(value):
- parsed = value
-
- for method in parse_methods:
- parsed = method(parsed)
-
- return parsed
-
- return parse
-
- @classmethod
- def _parse_section_to_dict(cls, section_options, values_parser=None):
- """Parses section options into a dictionary.
-
- Optionally applies a given parser to values.
-
- :param dict section_options:
- :param callable values_parser:
- :rtype: dict
- """
- value = {}
- values_parser = values_parser or (lambda val: val)
- for key, (_, val) in section_options.items():
- value[key] = values_parser(val)
- return value
-
- def parse_section(self, section_options):
- """Parses configuration file section.
-
- :param dict section_options:
- """
- for (name, (_, value)) in section_options.items():
- try:
- self[name] = value
-
- except KeyError:
- pass # Keep silent for a new option may appear anytime.
-
- def parse(self):
- """Parses configuration file items from one
- or more related sections.
-
- """
- for section_name, section_options in self.sections.items():
-
- method_postfix = ''
- if section_name: # [section.option] variant
- method_postfix = '_%s' % section_name
-
- section_parser_method = getattr(
- self,
- # Dots in section names are translated into dunderscores.
- ('parse_section%s' % method_postfix).replace('.', '__'),
- None,
- )
-
- if section_parser_method is None:
- raise DistutilsOptionError(
- 'Unsupported distribution option section: [%s.%s]'
- % (self.section_prefix, section_name)
- )
-
- section_parser_method(section_options)
-
- def _deprecated_config_handler(self, func, msg, warning_class):
- """this function will wrap around parameters that are deprecated
-
- :param msg: deprecation message
- :param warning_class: class of warning exception to be raised
- :param func: function to be wrapped around
- """
-
- @wraps(func)
- def config_handler(*args, **kwargs):
- warnings.warn(msg, warning_class)
- return func(*args, **kwargs)
-
- return config_handler
-
-
-class ConfigMetadataHandler(ConfigHandler):
-
- section_prefix = 'metadata'
-
- aliases = {
- 'home_page': 'url',
- 'summary': 'description',
- 'classifier': 'classifiers',
- 'platform': 'platforms',
- }
-
- strict_mode = False
- """We need to keep it loose, to be partially compatible with
- `pbr` and `d2to1` packages which also uses `metadata` section.
-
- """
-
- def __init__(
- self, target_obj, options, ignore_option_errors=False, package_dir=None
- ):
- super(ConfigMetadataHandler, self).__init__(
- target_obj, options, ignore_option_errors
- )
- self.package_dir = package_dir
-
- @property
- def parsers(self):
- """Metadata item name to parser function mapping."""
- parse_list = self._parse_list
- parse_file = self._parse_file
- parse_dict = self._parse_dict
- exclude_files_parser = self._exclude_files_parser
-
- return {
- 'platforms': parse_list,
- 'keywords': parse_list,
- 'provides': parse_list,
- 'requires': self._deprecated_config_handler(
- parse_list,
- "The requires parameter is deprecated, please use "
- "install_requires for runtime dependencies.",
- DeprecationWarning,
- ),
- 'obsoletes': parse_list,
- 'classifiers': self._get_parser_compound(parse_file, parse_list),
- 'license': exclude_files_parser('license'),
- 'license_file': self._deprecated_config_handler(
- exclude_files_parser('license_file'),
- "The license_file parameter is deprecated, "
- "use license_files instead.",
- DeprecationWarning,
- ),
- 'license_files': parse_list,
- 'description': parse_file,
- 'long_description': parse_file,
- 'version': self._parse_version,
- 'project_urls': parse_dict,
- }
-
- def _parse_version(self, value):
- """Parses `version` option value.
-
- :param value:
- :rtype: str
-
- """
- version = self._parse_file(value)
-
- if version != value:
- version = version.strip()
- # Be strict about versions loaded from file because it's easy to
- # accidentally include newlines and other unintended content
- try:
- Version(version)
- except InvalidVersion:
- tmpl = (
- 'Version loaded from {value} does not '
- 'comply with PEP 440: {version}'
- )
- raise DistutilsOptionError(tmpl.format(**locals()))
-
- return version
-
- version = self._parse_attr(value, self.package_dir)
-
- if callable(version):
- version = version()
-
- if not isinstance(version, str):
- if hasattr(version, '__iter__'):
- version = '.'.join(map(str, version))
- else:
- version = '%s' % version
-
- return version
-
-
-class ConfigOptionsHandler(ConfigHandler):
-
- section_prefix = 'options'
-
- @property
- def parsers(self):
- """Metadata item name to parser function mapping."""
- parse_list = self._parse_list
- parse_list_semicolon = partial(self._parse_list, separator=';')
- parse_bool = self._parse_bool
- parse_dict = self._parse_dict
- parse_cmdclass = self._parse_cmdclass
-
- return {
- 'zip_safe': parse_bool,
- 'include_package_data': parse_bool,
- 'package_dir': parse_dict,
- 'scripts': parse_list,
- 'eager_resources': parse_list,
- 'dependency_links': parse_list,
- 'namespace_packages': parse_list,
- 'install_requires': parse_list_semicolon,
- 'setup_requires': parse_list_semicolon,
- 'tests_require': parse_list_semicolon,
- 'packages': self._parse_packages,
- 'entry_points': self._parse_file,
- 'py_modules': parse_list,
- 'python_requires': SpecifierSet,
- 'cmdclass': parse_cmdclass,
- }
-
- def _parse_cmdclass(self, value):
- def resolve_class(qualified_class_name):
- idx = qualified_class_name.rfind('.')
- class_name = qualified_class_name[idx + 1 :]
- pkg_name = qualified_class_name[:idx]
-
- module = __import__(pkg_name)
-
- return getattr(module, class_name)
-
- return {k: resolve_class(v) for k, v in self._parse_dict(value).items()}
-
- def _parse_packages(self, value):
- """Parses `packages` option value.
-
- :param value:
- :rtype: list
- """
- find_directives = ['find:', 'find_namespace:']
- trimmed_value = value.strip()
-
- if trimmed_value not in find_directives:
- return self._parse_list(value)
-
- findns = trimmed_value == find_directives[1]
-
- # Read function arguments from a dedicated section.
- find_kwargs = self.parse_section_packages__find(
- self.sections.get('packages.find', {})
- )
-
- if findns:
- from setuptools import find_namespace_packages as find_packages
- else:
- from setuptools import find_packages
-
- return find_packages(**find_kwargs)
-
- def parse_section_packages__find(self, section_options):
- """Parses `packages.find` configuration file section.
-
- To be used in conjunction with _parse_packages().
-
- :param dict section_options:
- """
- section_data = self._parse_section_to_dict(section_options, self._parse_list)
-
- valid_keys = ['where', 'include', 'exclude']
-
- find_kwargs = dict(
- [(k, v) for k, v in section_data.items() if k in valid_keys and v]
- )
-
- where = find_kwargs.get('where')
- if where is not None:
- find_kwargs['where'] = where[0] # cast list to single val
-
- return find_kwargs
-
- def parse_section_entry_points(self, section_options):
- """Parses `entry_points` configuration file section.
-
- :param dict section_options:
- """
- parsed = self._parse_section_to_dict(section_options, self._parse_list)
- self['entry_points'] = parsed
-
- def _parse_package_data(self, section_options):
- parsed = self._parse_section_to_dict(section_options, self._parse_list)
-
- root = parsed.get('*')
- if root:
- parsed[''] = root
- del parsed['*']
-
- return parsed
-
- def parse_section_package_data(self, section_options):
- """Parses `package_data` configuration file section.
-
- :param dict section_options:
- """
- self['package_data'] = self._parse_package_data(section_options)
-
- def parse_section_exclude_package_data(self, section_options):
- """Parses `exclude_package_data` configuration file section.
-
- :param dict section_options:
- """
- self['exclude_package_data'] = self._parse_package_data(section_options)
-
- def parse_section_extras_require(self, section_options):
- """Parses `extras_require` configuration file section.
-
- :param dict section_options:
- """
- parse_list = partial(self._parse_list, separator=';')
- self['extras_require'] = self._parse_section_to_dict(
- section_options, parse_list
- )
-
- def parse_section_data_files(self, section_options):
- """Parses `data_files` configuration file section.
-
- :param dict section_options:
- """
- parsed = self._parse_section_to_dict(section_options, self._parse_list_glob)
- self['data_files'] = [(k, v) for k, v in parsed.items()]
diff --git a/contrib/python/setuptools/py3/setuptools/dep_util.py b/contrib/python/setuptools/py3/setuptools/dep_util.py
deleted file mode 100644
index 521eb716a5e..00000000000
--- a/contrib/python/setuptools/py3/setuptools/dep_util.py
+++ /dev/null
@@ -1,25 +0,0 @@
-from distutils.dep_util import newer_group
-
-
-# yes, this is was almost entirely copy-pasted from
-# 'newer_pairwise()', this is just another convenience
-# function.
-def newer_pairwise_group(sources_groups, targets):
- """Walk both arguments in parallel, testing if each source group is newer
- than its corresponding target. Returns a pair of lists (sources_groups,
- targets) where sources is newer than target, according to the semantics
- of 'newer_group()'.
- """
- if len(sources_groups) != len(targets):
- raise ValueError(
- "'sources_group' and 'targets' must be the same length")
-
- # build a pair of lists (sources_groups, targets) where source is newer
- n_sources = []
- n_targets = []
- for i in range(len(sources_groups)):
- if newer_group(sources_groups[i], targets[i]):
- n_sources.append(sources_groups[i])
- n_targets.append(targets[i])
-
- return n_sources, n_targets
diff --git a/contrib/python/setuptools/py3/setuptools/depends.py b/contrib/python/setuptools/py3/setuptools/depends.py
deleted file mode 100644
index adffd12db8c..00000000000
--- a/contrib/python/setuptools/py3/setuptools/depends.py
+++ /dev/null
@@ -1,176 +0,0 @@
-import sys
-import marshal
-import contextlib
-import dis
-
-from setuptools.extern.packaging import version
-
-from ._imp import find_module, PY_COMPILED, PY_FROZEN, PY_SOURCE
-from . import _imp
-
-
-__all__ = [
- 'Require', 'find_module', 'get_module_constant', 'extract_constant'
-]
-
-
-class Require:
- """A prerequisite to building or installing a distribution"""
-
- def __init__(
- self, name, requested_version, module, homepage='',
- attribute=None, format=None):
-
- if format is None and requested_version is not None:
- format = version.Version
-
- if format is not None:
- requested_version = format(requested_version)
- if attribute is None:
- attribute = '__version__'
-
- self.__dict__.update(locals())
- del self.self
-
- def full_name(self):
- """Return full package/distribution name, w/version"""
- if self.requested_version is not None:
- return '%s-%s' % (self.name, self.requested_version)
- return self.name
-
- def version_ok(self, version):
- """Is 'version' sufficiently up-to-date?"""
- return self.attribute is None or self.format is None or \
- str(version) != "unknown" and self.format(version) >= self.requested_version
-
- def get_version(self, paths=None, default="unknown"):
- """Get version number of installed module, 'None', or 'default'
-
- Search 'paths' for module. If not found, return 'None'. If found,
- return the extracted version attribute, or 'default' if no version
- attribute was specified, or the value cannot be determined without
- importing the module. The version is formatted according to the
- requirement's version format (if any), unless it is 'None' or the
- supplied 'default'.
- """
-
- if self.attribute is None:
- try:
- f, p, i = find_module(self.module, paths)
- if f:
- f.close()
- return default
- except ImportError:
- return None
-
- v = get_module_constant(self.module, self.attribute, default, paths)
-
- if v is not None and v is not default and self.format is not None:
- return self.format(v)
-
- return v
-
- def is_present(self, paths=None):
- """Return true if dependency is present on 'paths'"""
- return self.get_version(paths) is not None
-
- def is_current(self, paths=None):
- """Return true if dependency is present and up-to-date on 'paths'"""
- version = self.get_version(paths)
- if version is None:
- return False
- return self.version_ok(str(version))
-
-
-def maybe_close(f):
- @contextlib.contextmanager
- def empty():
- yield
- return
- if not f:
- return empty()
-
- return contextlib.closing(f)
-
-
-def get_module_constant(module, symbol, default=-1, paths=None):
- """Find 'module' by searching 'paths', and extract 'symbol'
-
- Return 'None' if 'module' does not exist on 'paths', or it does not define
- 'symbol'. If the module defines 'symbol' as a constant, return the
- constant. Otherwise, return 'default'."""
-
- try:
- f, path, (suffix, mode, kind) = info = find_module(module, paths)
- except ImportError:
- # Module doesn't exist
- return None
-
- with maybe_close(f):
- if kind == PY_COMPILED:
- f.read(8) # skip magic & date
- code = marshal.load(f)
- elif kind == PY_FROZEN:
- code = _imp.get_frozen_object(module, paths)
- elif kind == PY_SOURCE:
- code = compile(f.read(), path, 'exec')
- else:
- # Not something we can parse; we'll have to import it. :(
- imported = _imp.get_module(module, paths, info)
- return getattr(imported, symbol, None)
-
- return extract_constant(code, symbol, default)
-
-
-def extract_constant(code, symbol, default=-1):
- """Extract the constant value of 'symbol' from 'code'
-
- If the name 'symbol' is bound to a constant value by the Python code
- object 'code', return that value. If 'symbol' is bound to an expression,
- return 'default'. Otherwise, return 'None'.
-
- Return value is based on the first assignment to 'symbol'. 'symbol' must
- be a global, or at least a non-"fast" local in the code block. That is,
- only 'STORE_NAME' and 'STORE_GLOBAL' opcodes are checked, and 'symbol'
- must be present in 'code.co_names'.
- """
- if symbol not in code.co_names:
- # name's not there, can't possibly be an assignment
- return None
-
- name_idx = list(code.co_names).index(symbol)
-
- STORE_NAME = 90
- STORE_GLOBAL = 97
- LOAD_CONST = 100
-
- const = default
-
- for byte_code in dis.Bytecode(code):
- op = byte_code.opcode
- arg = byte_code.arg
-
- if op == LOAD_CONST:
- const = code.co_consts[arg]
- elif arg == name_idx and (op == STORE_NAME or op == STORE_GLOBAL):
- return const
- else:
- const = default
-
-
-def _update_globals():
- """
- Patch the globals to remove the objects not available on some platforms.
-
- XXX it'd be better to test assertions about bytecode instead.
- """
-
- if not sys.platform.startswith('java') and sys.platform != 'cli':
- return
- incompatible = 'extract_constant', 'get_module_constant'
- for name in incompatible:
- del globals()[name]
- __all__.remove(name)
-
-
-_update_globals()
diff --git a/contrib/python/setuptools/py3/setuptools/dist.py b/contrib/python/setuptools/py3/setuptools/dist.py
deleted file mode 100644
index 37a10d1dcd9..00000000000
--- a/contrib/python/setuptools/py3/setuptools/dist.py
+++ /dev/null
@@ -1,1156 +0,0 @@
-# -*- coding: utf-8 -*-
-__all__ = ['Distribution']
-
-import io
-import sys
-import re
-import os
-import warnings
-import numbers
-import distutils.log
-import distutils.core
-import distutils.cmd
-import distutils.dist
-import distutils.command
-from distutils.util import strtobool
-from distutils.debug import DEBUG
-from distutils.fancy_getopt import translate_longopt
-from glob import iglob
-import itertools
-import textwrap
-from typing import List, Optional, TYPE_CHECKING
-
-from collections import defaultdict
-from email import message_from_file
-
-from distutils.errors import DistutilsOptionError, DistutilsSetupError
-from distutils.util import rfc822_escape
-
-from setuptools.extern import packaging
-from setuptools.extern import ordered_set
-from setuptools.extern.more_itertools import unique_everseen
-
-from . import SetuptoolsDeprecationWarning
-
-import setuptools
-import setuptools.command
-from setuptools import windows_support
-from setuptools.monkey import get_unpatched
-from setuptools.config import parse_configuration
-import pkg_resources
-from setuptools.extern.packaging import version
-
-if TYPE_CHECKING:
- from email.message import Message
-
-__import__('setuptools.extern.packaging.specifiers')
-__import__('setuptools.extern.packaging.version')
-
-
-def _get_unpatched(cls):
- warnings.warn("Do not call this function", DistDeprecationWarning)
- return get_unpatched(cls)
-
-
-def get_metadata_version(self):
- mv = getattr(self, 'metadata_version', None)
- if mv is None:
- mv = version.Version('2.1')
- self.metadata_version = mv
- return mv
-
-
-def rfc822_unescape(content: str) -> str:
- """Reverse RFC-822 escaping by removing leading whitespaces from content."""
- lines = content.splitlines()
- if len(lines) == 1:
- return lines[0].lstrip()
- return '\n'.join((lines[0].lstrip(), textwrap.dedent('\n'.join(lines[1:]))))
-
-
-def _read_field_from_msg(msg: "Message", field: str) -> Optional[str]:
- """Read Message header field."""
- value = msg[field]
- if value == 'UNKNOWN':
- return None
- return value
-
-
-def _read_field_unescaped_from_msg(msg: "Message", field: str) -> Optional[str]:
- """Read Message header field and apply rfc822_unescape."""
- value = _read_field_from_msg(msg, field)
- if value is None:
- return value
- return rfc822_unescape(value)
-
-
-def _read_list_from_msg(msg: "Message", field: str) -> Optional[List[str]]:
- """Read Message header field and return all results as list."""
- values = msg.get_all(field, None)
- if values == []:
- return None
- return values
-
-
-def _read_payload_from_msg(msg: "Message") -> Optional[str]:
- value = msg.get_payload().strip()
- if value == 'UNKNOWN':
- return None
- return value
-
-
-def read_pkg_file(self, file):
- """Reads the metadata values from a file object."""
- msg = message_from_file(file)
-
- self.metadata_version = version.Version(msg['metadata-version'])
- self.name = _read_field_from_msg(msg, 'name')
- self.version = _read_field_from_msg(msg, 'version')
- self.description = _read_field_from_msg(msg, 'summary')
- # we are filling author only.
- self.author = _read_field_from_msg(msg, 'author')
- self.maintainer = None
- self.author_email = _read_field_from_msg(msg, 'author-email')
- self.maintainer_email = None
- self.url = _read_field_from_msg(msg, 'home-page')
- self.license = _read_field_unescaped_from_msg(msg, 'license')
-
- if 'download-url' in msg:
- self.download_url = _read_field_from_msg(msg, 'download-url')
- else:
- self.download_url = None
-
- self.long_description = _read_field_unescaped_from_msg(msg, 'description')
- if (
- self.long_description is None and
- self.metadata_version >= version.Version('2.1')
- ):
- self.long_description = _read_payload_from_msg(msg)
- self.description = _read_field_from_msg(msg, 'summary')
-
- if 'keywords' in msg:
- self.keywords = _read_field_from_msg(msg, 'keywords').split(',')
-
- self.platforms = _read_list_from_msg(msg, 'platform')
- self.classifiers = _read_list_from_msg(msg, 'classifier')
-
- # PEP 314 - these fields only exist in 1.1
- if self.metadata_version == version.Version('1.1'):
- self.requires = _read_list_from_msg(msg, 'requires')
- self.provides = _read_list_from_msg(msg, 'provides')
- self.obsoletes = _read_list_from_msg(msg, 'obsoletes')
- else:
- self.requires = None
- self.provides = None
- self.obsoletes = None
-
- self.license_files = _read_list_from_msg(msg, 'license-file')
-
-
-def single_line(val):
- """
- Quick and dirty validation for Summary pypa/setuptools#1390.
- """
- if '\n' in val:
- # TODO: Replace with `raise ValueError("newlines not allowed")`
- # after reviewing #2893.
- warnings.warn("newlines not allowed and will break in the future")
- val = val.strip().split('\n')[0]
- return val
-
-
-# Based on Python 3.5 version
-def write_pkg_file(self, file): # noqa: C901 # is too complex (14) # FIXME
- """Write the PKG-INFO format data to a file object."""
- version = self.get_metadata_version()
-
- def write_field(key, value):
- file.write("%s: %s\n" % (key, value))
-
- write_field('Metadata-Version', str(version))
- write_field('Name', self.get_name())
- write_field('Version', self.get_version())
- write_field('Summary', single_line(self.get_description()))
- write_field('Home-page', self.get_url())
-
- optional_fields = (
- ('Author', 'author'),
- ('Author-email', 'author_email'),
- ('Maintainer', 'maintainer'),
- ('Maintainer-email', 'maintainer_email'),
- )
-
- for field, attr in optional_fields:
- attr_val = getattr(self, attr, None)
- if attr_val is not None:
- write_field(field, attr_val)
-
- license = rfc822_escape(self.get_license())
- write_field('License', license)
- if self.download_url:
- write_field('Download-URL', self.download_url)
- for project_url in self.project_urls.items():
- write_field('Project-URL', '%s, %s' % project_url)
-
- keywords = ','.join(self.get_keywords())
- if keywords:
- write_field('Keywords', keywords)
-
- for platform in self.get_platforms():
- write_field('Platform', platform)
-
- self._write_list(file, 'Classifier', self.get_classifiers())
-
- # PEP 314
- self._write_list(file, 'Requires', self.get_requires())
- self._write_list(file, 'Provides', self.get_provides())
- self._write_list(file, 'Obsoletes', self.get_obsoletes())
-
- # Setuptools specific for PEP 345
- if hasattr(self, 'python_requires'):
- write_field('Requires-Python', self.python_requires)
-
- # PEP 566
- if self.long_description_content_type:
- write_field('Description-Content-Type', self.long_description_content_type)
- if self.provides_extras:
- for extra in self.provides_extras:
- write_field('Provides-Extra', extra)
-
- self._write_list(file, 'License-File', self.license_files or [])
-
- file.write("\n%s\n\n" % self.get_long_description())
-
-
-sequence = tuple, list
-
-
-def check_importable(dist, attr, value):
- try:
- ep = pkg_resources.EntryPoint.parse('x=' + value)
- assert not ep.extras
- except (TypeError, ValueError, AttributeError, AssertionError) as e:
- raise DistutilsSetupError(
- "%r must be importable 'module:attrs' string (got %r)" % (attr, value)
- ) from e
-
-
-def assert_string_list(dist, attr, value):
- """Verify that value is a string list"""
- try:
- # verify that value is a list or tuple to exclude unordered
- # or single-use iterables
- assert isinstance(value, (list, tuple))
- # verify that elements of value are strings
- assert ''.join(value) != value
- except (TypeError, ValueError, AttributeError, AssertionError) as e:
- raise DistutilsSetupError(
- "%r must be a list of strings (got %r)" % (attr, value)
- ) from e
-
-
-def check_nsp(dist, attr, value):
- """Verify that namespace packages are valid"""
- ns_packages = value
- assert_string_list(dist, attr, ns_packages)
- for nsp in ns_packages:
- if not dist.has_contents_for(nsp):
- raise DistutilsSetupError(
- "Distribution contains no modules or packages for "
- + "namespace package %r" % nsp
- )
- parent, sep, child = nsp.rpartition('.')
- if parent and parent not in ns_packages:
- distutils.log.warn(
- "WARNING: %r is declared as a package namespace, but %r"
- " is not: please correct this in setup.py",
- nsp,
- parent,
- )
-
-
-def check_extras(dist, attr, value):
- """Verify that extras_require mapping is valid"""
- try:
- list(itertools.starmap(_check_extra, value.items()))
- except (TypeError, ValueError, AttributeError) as e:
- raise DistutilsSetupError(
- "'extras_require' must be a dictionary whose values are "
- "strings or lists of strings containing valid project/version "
- "requirement specifiers."
- ) from e
-
-
-def _check_extra(extra, reqs):
- name, sep, marker = extra.partition(':')
- if marker and pkg_resources.invalid_marker(marker):
- raise DistutilsSetupError("Invalid environment marker: " + marker)
- list(pkg_resources.parse_requirements(reqs))
-
-
-def assert_bool(dist, attr, value):
- """Verify that value is True, False, 0, or 1"""
- if bool(value) != value:
- tmpl = "{attr!r} must be a boolean value (got {value!r})"
- raise DistutilsSetupError(tmpl.format(attr=attr, value=value))
-
-
-def invalid_unless_false(dist, attr, value):
- if not value:
- warnings.warn(f"{attr} is ignored.", DistDeprecationWarning)
- return
- raise DistutilsSetupError(f"{attr} is invalid.")
-
-
-def check_requirements(dist, attr, value):
- """Verify that install_requires is a valid requirements list"""
- try:
- list(pkg_resources.parse_requirements(value))
- if isinstance(value, (dict, set)):
- raise TypeError("Unordered types are not allowed")
- except (TypeError, ValueError) as error:
- tmpl = (
- "{attr!r} must be a string or list of strings "
- "containing valid project/version requirement specifiers; {error}"
- )
- raise DistutilsSetupError(tmpl.format(attr=attr, error=error)) from error
-
-
-def check_specifier(dist, attr, value):
- """Verify that value is a valid version specifier"""
- try:
- packaging.specifiers.SpecifierSet(value)
- except (packaging.specifiers.InvalidSpecifier, AttributeError) as error:
- tmpl = (
- "{attr!r} must be a string " "containing valid version specifiers; {error}"
- )
- raise DistutilsSetupError(tmpl.format(attr=attr, error=error)) from error
-
-
-def check_entry_points(dist, attr, value):
- """Verify that entry_points map is parseable"""
- try:
- pkg_resources.EntryPoint.parse_map(value)
- except ValueError as e:
- raise DistutilsSetupError(e) from e
-
-
-def check_test_suite(dist, attr, value):
- if not isinstance(value, str):
- raise DistutilsSetupError("test_suite must be a string")
-
-
-def check_package_data(dist, attr, value):
- """Verify that value is a dictionary of package names to glob lists"""
- if not isinstance(value, dict):
- raise DistutilsSetupError(
- "{!r} must be a dictionary mapping package names to lists of "
- "string wildcard patterns".format(attr)
- )
- for k, v in value.items():
- if not isinstance(k, str):
- raise DistutilsSetupError(
- "keys of {!r} dict must be strings (got {!r})".format(attr, k)
- )
- assert_string_list(dist, 'values of {!r} dict'.format(attr), v)
-
-
-def check_packages(dist, attr, value):
- for pkgname in value:
- if not re.match(r'\w+(\.\w+)*', pkgname):
- distutils.log.warn(
- "WARNING: %r not a valid package name; please use only "
- ".-separated package names in setup.py",
- pkgname,
- )
-
-
-_Distribution = get_unpatched(distutils.core.Distribution)
-
-
-class Distribution(_Distribution):
- """Distribution with support for tests and package data
-
- This is an enhanced version of 'distutils.dist.Distribution' that
- effectively adds the following new optional keyword arguments to 'setup()':
-
- 'install_requires' -- a string or sequence of strings specifying project
- versions that the distribution requires when installed, in the format
- used by 'pkg_resources.require()'. They will be installed
- automatically when the package is installed. If you wish to use
- packages that are not available in PyPI, or want to give your users an
- alternate download location, you can add a 'find_links' option to the
- '[easy_install]' section of your project's 'setup.cfg' file, and then
- setuptools will scan the listed web pages for links that satisfy the
- requirements.
-
- 'extras_require' -- a dictionary mapping names of optional "extras" to the
- additional requirement(s) that using those extras incurs. For example,
- this::
-
- extras_require = dict(reST = ["docutils>=0.3", "reSTedit"])
-
- indicates that the distribution can optionally provide an extra
- capability called "reST", but it can only be used if docutils and
- reSTedit are installed. If the user installs your package using
- EasyInstall and requests one of your extras, the corresponding
- additional requirements will be installed if needed.
-
- 'test_suite' -- the name of a test suite to run for the 'test' command.
- If the user runs 'python setup.py test', the package will be installed,
- and the named test suite will be run. The format is the same as
- would be used on a 'unittest.py' command line. That is, it is the
- dotted name of an object to import and call to generate a test suite.
-
- 'package_data' -- a dictionary mapping package names to lists of filenames
- or globs to use to find data files contained in the named packages.
- If the dictionary has filenames or globs listed under '""' (the empty
- string), those names will be searched for in every package, in addition
- to any names for the specific package. Data files found using these
- names/globs will be installed along with the package, in the same
- location as the package. Note that globs are allowed to reference
- the contents of non-package subdirectories, as long as you use '/' as
- a path separator. (Globs are automatically converted to
- platform-specific paths at runtime.)
-
- In addition to these new keywords, this class also has several new methods
- for manipulating the distribution's contents. For example, the 'include()'
- and 'exclude()' methods can be thought of as in-place add and subtract
- commands that add or remove packages, modules, extensions, and so on from
- the distribution.
- """
-
- _DISTUTILS_UNSUPPORTED_METADATA = {
- 'long_description_content_type': lambda: None,
- 'project_urls': dict,
- 'provides_extras': ordered_set.OrderedSet,
- 'license_file': lambda: None,
- 'license_files': lambda: None,
- }
-
- _patched_dist = None
-
- def patch_missing_pkg_info(self, attrs):
- # Fake up a replacement for the data that would normally come from
- # PKG-INFO, but which might not yet be built if this is a fresh
- # checkout.
- #
- if not attrs or 'name' not in attrs or 'version' not in attrs:
- return
- key = pkg_resources.safe_name(str(attrs['name'])).lower()
- dist = pkg_resources.working_set.by_key.get(key)
- if dist is not None and not dist.has_metadata('PKG-INFO'):
- dist._version = pkg_resources.safe_version(str(attrs['version']))
- self._patched_dist = dist
-
- def __init__(self, attrs=None):
- have_package_data = hasattr(self, "package_data")
- if not have_package_data:
- self.package_data = {}
- attrs = attrs or {}
- self.dist_files = []
- # Filter-out setuptools' specific options.
- self.src_root = attrs.pop("src_root", None)
- self.patch_missing_pkg_info(attrs)
- self.dependency_links = attrs.pop('dependency_links', [])
- self.setup_requires = attrs.pop('setup_requires', [])
- for ep in pkg_resources.iter_entry_points('distutils.setup_keywords'):
- vars(self).setdefault(ep.name, None)
- _Distribution.__init__(
- self,
- {
- k: v
- for k, v in attrs.items()
- if k not in self._DISTUTILS_UNSUPPORTED_METADATA
- },
- )
-
- self._set_metadata_defaults(attrs)
-
- self.metadata.version = self._normalize_version(
- self._validate_version(self.metadata.version)
- )
- self._finalize_requires()
-
- def _set_metadata_defaults(self, attrs):
- """
- Fill-in missing metadata fields not supported by distutils.
- Some fields may have been set by other tools (e.g. pbr).
- Those fields (vars(self.metadata)) take precedence to
- supplied attrs.
- """
- for option, default in self._DISTUTILS_UNSUPPORTED_METADATA.items():
- vars(self.metadata).setdefault(option, attrs.get(option, default()))
-
- @staticmethod
- def _normalize_version(version):
- if isinstance(version, setuptools.sic) or version is None:
- return version
-
- normalized = str(packaging.version.Version(version))
- if version != normalized:
- tmpl = "Normalizing '{version}' to '{normalized}'"
- warnings.warn(tmpl.format(**locals()))
- return normalized
- return version
-
- @staticmethod
- def _validate_version(version):
- if isinstance(version, numbers.Number):
- # Some people apparently take "version number" too literally :)
- version = str(version)
-
- if version is not None:
- try:
- packaging.version.Version(version)
- except (packaging.version.InvalidVersion, TypeError):
- warnings.warn(
- "The version specified (%r) is an invalid version, this "
- "may not work as expected with newer versions of "
- "setuptools, pip, and PyPI. Please see PEP 440 for more "
- "details." % version
- )
- return setuptools.sic(version)
- return version
-
- def _finalize_requires(self):
- """
- Set `metadata.python_requires` and fix environment markers
- in `install_requires` and `extras_require`.
- """
- if getattr(self, 'python_requires', None):
- self.metadata.python_requires = self.python_requires
-
- if getattr(self, 'extras_require', None):
- for extra in self.extras_require.keys():
- # Since this gets called multiple times at points where the
- # keys have become 'converted' extras, ensure that we are only
- # truly adding extras we haven't seen before here.
- extra = extra.split(':')[0]
- if extra:
- self.metadata.provides_extras.add(extra)
-
- self._convert_extras_requirements()
- self._move_install_requirements_markers()
-
- def _convert_extras_requirements(self):
- """
- Convert requirements in `extras_require` of the form
- `"extra": ["barbazquux; {marker}"]` to
- `"extra:{marker}": ["barbazquux"]`.
- """
- spec_ext_reqs = getattr(self, 'extras_require', None) or {}
- self._tmp_extras_require = defaultdict(list)
- for section, v in spec_ext_reqs.items():
- # Do not strip empty sections.
- self._tmp_extras_require[section]
- for r in pkg_resources.parse_requirements(v):
- suffix = self._suffix_for(r)
- self._tmp_extras_require[section + suffix].append(r)
-
- @staticmethod
- def _suffix_for(req):
- """
- For a requirement, return the 'extras_require' suffix for
- that requirement.
- """
- return ':' + str(req.marker) if req.marker else ''
-
- def _move_install_requirements_markers(self):
- """
- Move requirements in `install_requires` that are using environment
- markers `extras_require`.
- """
-
- # divide the install_requires into two sets, simple ones still
- # handled by install_requires and more complex ones handled
- # by extras_require.
-
- def is_simple_req(req):
- return not req.marker
-
- spec_inst_reqs = getattr(self, 'install_requires', None) or ()
- inst_reqs = list(pkg_resources.parse_requirements(spec_inst_reqs))
- simple_reqs = filter(is_simple_req, inst_reqs)
- complex_reqs = itertools.filterfalse(is_simple_req, inst_reqs)
- self.install_requires = list(map(str, simple_reqs))
-
- for r in complex_reqs:
- self._tmp_extras_require[':' + str(r.marker)].append(r)
- self.extras_require = dict(
- (k, [str(r) for r in map(self._clean_req, v)])
- for k, v in self._tmp_extras_require.items()
- )
-
- def _clean_req(self, req):
- """
- Given a Requirement, remove environment markers and return it.
- """
- req.marker = None
- return req
-
- def _finalize_license_files(self):
- """Compute names of all license files which should be included."""
- license_files: Optional[List[str]] = self.metadata.license_files
- patterns: List[str] = license_files if license_files else []
-
- license_file: Optional[str] = self.metadata.license_file
- if license_file and license_file not in patterns:
- patterns.append(license_file)
-
- if license_files is None and license_file is None:
- # Default patterns match the ones wheel uses
- # See https://wheel.readthedocs.io/en/stable/user_guide.html
- # -> 'Including license files in the generated wheel file'
- patterns = ('LICEN[CS]E*', 'COPYING*', 'NOTICE*', 'AUTHORS*')
-
- self.metadata.license_files = list(
- unique_everseen(self._expand_patterns(patterns))
- )
-
- @staticmethod
- def _expand_patterns(patterns):
- """
- >>> list(Distribution._expand_patterns(['LICENSE']))
- ['LICENSE']
- >>> list(Distribution._expand_patterns(['setup.cfg', 'LIC*']))
- ['setup.cfg', 'LICENSE']
- """
- return (
- path
- for pattern in patterns
- for path in sorted(iglob(pattern))
- if not path.endswith('~') and os.path.isfile(path)
- )
-
- # FIXME: 'Distribution._parse_config_files' is too complex (14)
- def _parse_config_files(self, filenames=None): # noqa: C901
- """
- Adapted from distutils.dist.Distribution.parse_config_files,
- this method provides the same functionality in subtly-improved
- ways.
- """
- from configparser import ConfigParser
-
- # Ignore install directory options if we have a venv
- ignore_options = (
- []
- if sys.prefix == sys.base_prefix
- else [
- 'install-base',
- 'install-platbase',
- 'install-lib',
- 'install-platlib',
- 'install-purelib',
- 'install-headers',
- 'install-scripts',
- 'install-data',
- 'prefix',
- 'exec-prefix',
- 'home',
- 'user',
- 'root',
- ]
- )
-
- ignore_options = frozenset(ignore_options)
-
- if filenames is None:
- filenames = self.find_config_files()
-
- if DEBUG:
- self.announce("Distribution.parse_config_files():")
-
- parser = ConfigParser()
- parser.optionxform = str
- for filename in filenames:
- with io.open(filename, encoding='utf-8') as reader:
- if DEBUG:
- self.announce(" reading {filename}".format(**locals()))
- parser.read_file(reader)
- for section in parser.sections():
- options = parser.options(section)
- opt_dict = self.get_option_dict(section)
-
- for opt in options:
- if opt == '__name__' or opt in ignore_options:
- continue
-
- val = parser.get(section, opt)
- opt = self.warn_dash_deprecation(opt, section)
- opt = self.make_option_lowercase(opt, section)
- opt_dict[opt] = (filename, val)
-
- # Make the ConfigParser forget everything (so we retain
- # the original filenames that options come from)
- parser.__init__()
-
- if 'global' not in self.command_options:
- return
-
- # If there was a "global" section in the config file, use it
- # to set Distribution options.
-
- for (opt, (src, val)) in self.command_options['global'].items():
- alias = self.negative_opt.get(opt)
- if alias:
- val = not strtobool(val)
- elif opt in ('verbose', 'dry_run'): # ugh!
- val = strtobool(val)
-
- try:
- setattr(self, alias or opt, val)
- except ValueError as e:
- raise DistutilsOptionError(e) from e
-
- def warn_dash_deprecation(self, opt, section):
- if section in (
- 'options.extras_require',
- 'options.data_files',
- ):
- return opt
-
- underscore_opt = opt.replace('-', '_')
- commands = distutils.command.__all__ + self._setuptools_commands()
- if (
- not section.startswith('options')
- and section != 'metadata'
- and section not in commands
- ):
- return underscore_opt
-
- if '-' in opt:
- warnings.warn(
- "Usage of dash-separated '%s' will not be supported in future "
- "versions. Please use the underscore name '%s' instead"
- % (opt, underscore_opt)
- )
- return underscore_opt
-
- def _setuptools_commands(self):
- try:
- dist = pkg_resources.get_distribution('setuptools')
- return list(dist.get_entry_map('distutils.commands'))
- except pkg_resources.DistributionNotFound:
- # during bootstrapping, distribution doesn't exist
- return []
-
- def make_option_lowercase(self, opt, section):
- if section != 'metadata' or opt.islower():
- return opt
-
- lowercase_opt = opt.lower()
- warnings.warn(
- "Usage of uppercase key '%s' in '%s' will be deprecated in future "
- "versions. Please use lowercase '%s' instead"
- % (opt, section, lowercase_opt)
- )
- return lowercase_opt
-
- # FIXME: 'Distribution._set_command_options' is too complex (14)
- def _set_command_options(self, command_obj, option_dict=None): # noqa: C901
- """
- Set the options for 'command_obj' from 'option_dict'. Basically
- this means copying elements of a dictionary ('option_dict') to
- attributes of an instance ('command').
-
- 'command_obj' must be a Command instance. If 'option_dict' is not
- supplied, uses the standard option dictionary for this command
- (from 'self.command_options').
-
- (Adopted from distutils.dist.Distribution._set_command_options)
- """
- command_name = command_obj.get_command_name()
- if option_dict is None:
- option_dict = self.get_option_dict(command_name)
-
- if DEBUG:
- self.announce(" setting options for '%s' command:" % command_name)
- for (option, (source, value)) in option_dict.items():
- if DEBUG:
- self.announce(" %s = %s (from %s)" % (option, value, source))
- try:
- bool_opts = [translate_longopt(o) for o in command_obj.boolean_options]
- except AttributeError:
- bool_opts = []
- try:
- neg_opt = command_obj.negative_opt
- except AttributeError:
- neg_opt = {}
-
- try:
- is_string = isinstance(value, str)
- if option in neg_opt and is_string:
- setattr(command_obj, neg_opt[option], not strtobool(value))
- elif option in bool_opts and is_string:
- setattr(command_obj, option, strtobool(value))
- elif hasattr(command_obj, option):
- setattr(command_obj, option, value)
- else:
- raise DistutilsOptionError(
- "error in %s: command '%s' has no such option '%s'"
- % (source, command_name, option)
- )
- except ValueError as e:
- raise DistutilsOptionError(e) from e
-
- def parse_config_files(self, filenames=None, ignore_option_errors=False):
- """Parses configuration files from various levels
- and loads configuration.
-
- """
- self._parse_config_files(filenames=filenames)
-
- parse_configuration(
- self, self.command_options, ignore_option_errors=ignore_option_errors
- )
- self._finalize_requires()
- self._finalize_license_files()
-
- def fetch_build_eggs(self, requires):
- """Resolve pre-setup requirements"""
- resolved_dists = pkg_resources.working_set.resolve(
- pkg_resources.parse_requirements(requires),
- installer=self.fetch_build_egg,
- replace_conflicting=True,
- )
- for dist in resolved_dists:
- pkg_resources.working_set.add(dist, replace=True)
- return resolved_dists
-
- def finalize_options(self):
- """
- Allow plugins to apply arbitrary operations to the
- distribution. Each hook may optionally define a 'order'
- to influence the order of execution. Smaller numbers
- go first and the default is 0.
- """
- group = 'setuptools.finalize_distribution_options'
-
- def by_order(hook):
- return getattr(hook, 'order', 0)
-
- defined = pkg_resources.iter_entry_points(group)
- filtered = itertools.filterfalse(self._removed, defined)
- loaded = map(lambda e: e.load(), filtered)
- for ep in sorted(loaded, key=by_order):
- ep(self)
-
- @staticmethod
- def _removed(ep):
- """
- When removing an entry point, if metadata is loaded
- from an older version of Setuptools, that removed
- entry point will attempt to be loaded and will fail.
- See #2765 for more details.
- """
- removed = {
- # removed 2021-09-05
- '2to3_doctests',
- }
- return ep.name in removed
-
- def _finalize_setup_keywords(self):
- for ep in pkg_resources.iter_entry_points('distutils.setup_keywords'):
- value = getattr(self, ep.name, None)
- if value is not None:
- ep.require(installer=self.fetch_build_egg)
- ep.load()(self, ep.name, value)
-
- def get_egg_cache_dir(self):
- egg_cache_dir = os.path.join(os.curdir, '.eggs')
- if not os.path.exists(egg_cache_dir):
- os.mkdir(egg_cache_dir)
- windows_support.hide_file(egg_cache_dir)
- readme_txt_filename = os.path.join(egg_cache_dir, 'README.txt')
- with open(readme_txt_filename, 'w') as f:
- f.write(
- 'This directory contains eggs that were downloaded '
- 'by setuptools to build, test, and run plug-ins.\n\n'
- )
- f.write(
- 'This directory caches those eggs to prevent '
- 'repeated downloads.\n\n'
- )
- f.write('However, it is safe to delete this directory.\n\n')
-
- return egg_cache_dir
-
- def fetch_build_egg(self, req):
- """Fetch an egg needed for building"""
- from setuptools.installer import fetch_build_egg
-
- return fetch_build_egg(self, req)
-
- def get_command_class(self, command):
- """Pluggable version of get_command_class()"""
- if command in self.cmdclass:
- return self.cmdclass[command]
-
- eps = pkg_resources.iter_entry_points('distutils.commands', command)
- for ep in eps:
- ep.require(installer=self.fetch_build_egg)
- self.cmdclass[command] = cmdclass = ep.load()
- return cmdclass
- else:
- return _Distribution.get_command_class(self, command)
-
- def print_commands(self):
- for ep in pkg_resources.iter_entry_points('distutils.commands'):
- if ep.name not in self.cmdclass:
- # don't require extras as the commands won't be invoked
- cmdclass = ep.resolve()
- self.cmdclass[ep.name] = cmdclass
- return _Distribution.print_commands(self)
-
- def get_command_list(self):
- for ep in pkg_resources.iter_entry_points('distutils.commands'):
- if ep.name not in self.cmdclass:
- # don't require extras as the commands won't be invoked
- cmdclass = ep.resolve()
- self.cmdclass[ep.name] = cmdclass
- return _Distribution.get_command_list(self)
-
- def include(self, **attrs):
- """Add items to distribution that are named in keyword arguments
-
- For example, 'dist.include(py_modules=["x"])' would add 'x' to
- the distribution's 'py_modules' attribute, if it was not already
- there.
-
- Currently, this method only supports inclusion for attributes that are
- lists or tuples. If you need to add support for adding to other
- attributes in this or a subclass, you can add an '_include_X' method,
- where 'X' is the name of the attribute. The method will be called with
- the value passed to 'include()'. So, 'dist.include(foo={"bar":"baz"})'
- will try to call 'dist._include_foo({"bar":"baz"})', which can then
- handle whatever special inclusion logic is needed.
- """
- for k, v in attrs.items():
- include = getattr(self, '_include_' + k, None)
- if include:
- include(v)
- else:
- self._include_misc(k, v)
-
- def exclude_package(self, package):
- """Remove packages, modules, and extensions in named package"""
-
- pfx = package + '.'
- if self.packages:
- self.packages = [
- p for p in self.packages if p != package and not p.startswith(pfx)
- ]
-
- if self.py_modules:
- self.py_modules = [
- p for p in self.py_modules if p != package and not p.startswith(pfx)
- ]
-
- if self.ext_modules:
- self.ext_modules = [
- p
- for p in self.ext_modules
- if p.name != package and not p.name.startswith(pfx)
- ]
-
- def has_contents_for(self, package):
- """Return true if 'exclude_package(package)' would do something"""
-
- pfx = package + '.'
-
- for p in self.iter_distribution_names():
- if p == package or p.startswith(pfx):
- return True
-
- def _exclude_misc(self, name, value):
- """Handle 'exclude()' for list/tuple attrs without a special handler"""
- if not isinstance(value, sequence):
- raise DistutilsSetupError(
- "%s: setting must be a list or tuple (%r)" % (name, value)
- )
- try:
- old = getattr(self, name)
- except AttributeError as e:
- raise DistutilsSetupError("%s: No such distribution setting" % name) from e
- if old is not None and not isinstance(old, sequence):
- raise DistutilsSetupError(
- name + ": this setting cannot be changed via include/exclude"
- )
- elif old:
- setattr(self, name, [item for item in old if item not in value])
-
- def _include_misc(self, name, value):
- """Handle 'include()' for list/tuple attrs without a special handler"""
-
- if not isinstance(value, sequence):
- raise DistutilsSetupError("%s: setting must be a list (%r)" % (name, value))
- try:
- old = getattr(self, name)
- except AttributeError as e:
- raise DistutilsSetupError("%s: No such distribution setting" % name) from e
- if old is None:
- setattr(self, name, value)
- elif not isinstance(old, sequence):
- raise DistutilsSetupError(
- name + ": this setting cannot be changed via include/exclude"
- )
- else:
- new = [item for item in value if item not in old]
- setattr(self, name, old + new)
-
- def exclude(self, **attrs):
- """Remove items from distribution that are named in keyword arguments
-
- For example, 'dist.exclude(py_modules=["x"])' would remove 'x' from
- the distribution's 'py_modules' attribute. Excluding packages uses
- the 'exclude_package()' method, so all of the package's contained
- packages, modules, and extensions are also excluded.
-
- Currently, this method only supports exclusion from attributes that are
- lists or tuples. If you need to add support for excluding from other
- attributes in this or a subclass, you can add an '_exclude_X' method,
- where 'X' is the name of the attribute. The method will be called with
- the value passed to 'exclude()'. So, 'dist.exclude(foo={"bar":"baz"})'
- will try to call 'dist._exclude_foo({"bar":"baz"})', which can then
- handle whatever special exclusion logic is needed.
- """
- for k, v in attrs.items():
- exclude = getattr(self, '_exclude_' + k, None)
- if exclude:
- exclude(v)
- else:
- self._exclude_misc(k, v)
-
- def _exclude_packages(self, packages):
- if not isinstance(packages, sequence):
- raise DistutilsSetupError(
- "packages: setting must be a list or tuple (%r)" % (packages,)
- )
- list(map(self.exclude_package, packages))
-
- def _parse_command_opts(self, parser, args):
- # Remove --with-X/--without-X options when processing command args
- self.global_options = self.__class__.global_options
- self.negative_opt = self.__class__.negative_opt
-
- # First, expand any aliases
- command = args[0]
- aliases = self.get_option_dict('aliases')
- while command in aliases:
- src, alias = aliases[command]
- del aliases[command] # ensure each alias can expand only once!
- import shlex
-
- args[:1] = shlex.split(alias, True)
- command = args[0]
-
- nargs = _Distribution._parse_command_opts(self, parser, args)
-
- # Handle commands that want to consume all remaining arguments
- cmd_class = self.get_command_class(command)
- if getattr(cmd_class, 'command_consumes_arguments', None):
- self.get_option_dict(command)['args'] = ("command line", nargs)
- if nargs is not None:
- return []
-
- return nargs
-
- def get_cmdline_options(self):
- """Return a '{cmd: {opt:val}}' map of all command-line options
-
- Option names are all long, but do not include the leading '--', and
- contain dashes rather than underscores. If the option doesn't take
- an argument (e.g. '--quiet'), the 'val' is 'None'.
-
- Note that options provided by config files are intentionally excluded.
- """
-
- d = {}
-
- for cmd, opts in self.command_options.items():
-
- for opt, (src, val) in opts.items():
-
- if src != "command line":
- continue
-
- opt = opt.replace('_', '-')
-
- if val == 0:
- cmdobj = self.get_command_obj(cmd)
- neg_opt = self.negative_opt.copy()
- neg_opt.update(getattr(cmdobj, 'negative_opt', {}))
- for neg, pos in neg_opt.items():
- if pos == opt:
- opt = neg
- val = None
- break
- else:
- raise AssertionError("Shouldn't be able to get here")
-
- elif val == 1:
- val = None
-
- d.setdefault(cmd, {})[opt] = val
-
- return d
-
- def iter_distribution_names(self):
- """Yield all packages, modules, and extension names in distribution"""
-
- for pkg in self.packages or ():
- yield pkg
-
- for module in self.py_modules or ():
- yield module
-
- for ext in self.ext_modules or ():
- if isinstance(ext, tuple):
- name, buildinfo = ext
- else:
- name = ext.name
- if name.endswith('module'):
- name = name[:-6]
- yield name
-
- def handle_display_options(self, option_order):
- """If there were any non-global "display-only" options
- (--help-commands or the metadata display options) on the command
- line, display the requested info and return true; else return
- false.
- """
- import sys
-
- if self.help_commands:
- return _Distribution.handle_display_options(self, option_order)
-
- # Stdout may be StringIO (e.g. in tests)
- if not isinstance(sys.stdout, io.TextIOWrapper):
- return _Distribution.handle_display_options(self, option_order)
-
- # Don't wrap stdout if utf-8 is already the encoding. Provides
- # workaround for #334.
- if sys.stdout.encoding.lower() in ('utf-8', 'utf8'):
- return _Distribution.handle_display_options(self, option_order)
-
- # Print metadata in UTF-8 no matter the platform
- encoding = sys.stdout.encoding
- errors = sys.stdout.errors
- newline = sys.platform != 'win32' and '\n' or None
- line_buffering = sys.stdout.line_buffering
-
- sys.stdout = io.TextIOWrapper(
- sys.stdout.detach(), 'utf-8', errors, newline, line_buffering
- )
- try:
- return _Distribution.handle_display_options(self, option_order)
- finally:
- sys.stdout = io.TextIOWrapper(
- sys.stdout.detach(), encoding, errors, newline, line_buffering
- )
-
-
-class DistDeprecationWarning(SetuptoolsDeprecationWarning):
- """Class for warning about deprecations in dist in
- setuptools. Not ignored by default, unlike DeprecationWarning."""
diff --git a/contrib/python/setuptools/py3/setuptools/errors.py b/contrib/python/setuptools/py3/setuptools/errors.py
deleted file mode 100644
index f4d35a630a0..00000000000
--- a/contrib/python/setuptools/py3/setuptools/errors.py
+++ /dev/null
@@ -1,40 +0,0 @@
-"""setuptools.errors
-
-Provides exceptions used by setuptools modules.
-"""
-
-from distutils import errors as _distutils_errors
-from distutils.errors import DistutilsError
-
-
-class RemovedCommandError(DistutilsError, RuntimeError):
- """Error used for commands that have been removed in setuptools.
-
- Since ``setuptools`` is built on ``distutils``, simply removing a command
- from ``setuptools`` will make the behavior fall back to ``distutils``; this
- error is raised if a command exists in ``distutils`` but has been actively
- removed in ``setuptools``.
- """
-
-
-# Re-export errors from distutils to facilitate the migration to PEP632
-
-ByteCompileError = _distutils_errors.DistutilsByteCompileError
-CCompilerError = _distutils_errors.CCompilerError
-ClassError = _distutils_errors.DistutilsClassError
-CompileError = _distutils_errors.CompileError
-ExecError = _distutils_errors.DistutilsExecError
-FileError = _distutils_errors.DistutilsFileError
-InternalError = _distutils_errors.DistutilsInternalError
-LibError = _distutils_errors.LibError
-LinkError = _distutils_errors.LinkError
-ModuleError = _distutils_errors.DistutilsModuleError
-OptionError = _distutils_errors.DistutilsOptionError
-PlatformError = _distutils_errors.DistutilsPlatformError
-PreprocessError = _distutils_errors.PreprocessError
-SetupError = _distutils_errors.DistutilsSetupError
-TemplateError = _distutils_errors.DistutilsTemplateError
-UnknownFileError = _distutils_errors.UnknownFileError
-
-# The root error class in the hierarchy
-BaseError = _distutils_errors.DistutilsError
diff --git a/contrib/python/setuptools/py3/setuptools/extension.py b/contrib/python/setuptools/py3/setuptools/extension.py
deleted file mode 100644
index 1820722a494..00000000000
--- a/contrib/python/setuptools/py3/setuptools/extension.py
+++ /dev/null
@@ -1,55 +0,0 @@
-import re
-import functools
-import distutils.core
-import distutils.errors
-import distutils.extension
-
-from .monkey import get_unpatched
-
-
-def _have_cython():
- """
- Return True if Cython can be imported.
- """
- cython_impl = 'Cython.Distutils.build_ext'
- try:
- # from (cython_impl) import build_ext
- __import__(cython_impl, fromlist=['build_ext']).build_ext
- return True
- except Exception:
- pass
- return False
-
-
-# for compatibility
-have_pyrex = _have_cython
-
-_Extension = get_unpatched(distutils.core.Extension)
-
-
-class Extension(_Extension):
- """Extension that uses '.c' files in place of '.pyx' files"""
-
- def __init__(self, name, sources, *args, **kw):
- # The *args is needed for compatibility as calls may use positional
- # arguments. py_limited_api may be set only via keyword.
- self.py_limited_api = kw.pop("py_limited_api", False)
- _Extension.__init__(self, name, sources, *args, **kw)
-
- def _convert_pyx_sources_to_lang(self):
- """
- Replace sources with .pyx extensions to sources with the target
- language extension. This mechanism allows language authors to supply
- pre-converted sources but to prefer the .pyx sources.
- """
- if _have_cython():
- # the build has Cython, so allow it to compile the .pyx files
- return
- lang = self.language or ''
- target_ext = '.cpp' if lang.lower() == 'c++' else '.c'
- sub = functools.partial(re.sub, '.pyx$', target_ext)
- self.sources = list(map(sub, self.sources))
-
-
-class Library(Extension):
- """Just like a regular Extension, but built as a library instead"""
diff --git a/contrib/python/setuptools/py3/setuptools/extern/__init__.py b/contrib/python/setuptools/py3/setuptools/extern/__init__.py
deleted file mode 100644
index baca1afabe9..00000000000
--- a/contrib/python/setuptools/py3/setuptools/extern/__init__.py
+++ /dev/null
@@ -1,73 +0,0 @@
-import importlib.util
-import sys
-
-
-class VendorImporter:
- """
- A PEP 302 meta path importer for finding optionally-vendored
- or otherwise naturally-installed packages from root_name.
- """
-
- def __init__(self, root_name, vendored_names=(), vendor_pkg=None):
- self.root_name = root_name
- self.vendored_names = set(vendored_names)
- self.vendor_pkg = vendor_pkg or root_name.replace('extern', '_vendor')
-
- @property
- def search_path(self):
- """
- Search first the vendor package then as a natural package.
- """
- yield self.vendor_pkg + '.'
- yield ''
-
- def _module_matches_namespace(self, fullname):
- """Figure out if the target module is vendored."""
- root, base, target = fullname.partition(self.root_name + '.')
- return not root and any(map(target.startswith, self.vendored_names))
-
- def load_module(self, fullname):
- """
- Iterate over the search path to locate and load fullname.
- """
- root, base, target = fullname.partition(self.root_name + '.')
- for prefix in self.search_path:
- try:
- extant = prefix + target
- __import__(extant)
- mod = sys.modules[extant]
- sys.modules[fullname] = mod
- return mod
- except ImportError:
- pass
- else:
- raise ImportError(
- "The '{target}' package is required; "
- "normally this is bundled with this package so if you get "
- "this warning, consult the packager of your "
- "distribution.".format(**locals())
- )
-
- def create_module(self, spec):
- return self.load_module(spec.name)
-
- def exec_module(self, module):
- pass
-
- def find_spec(self, fullname, path=None, target=None):
- """Return a module spec for vendored names."""
- return (
- importlib.util.spec_from_loader(fullname, self)
- if self._module_matches_namespace(fullname) else None
- )
-
- def install(self):
- """
- Install this importer into sys.meta_path if not already present.
- """
- if self not in sys.meta_path:
- sys.meta_path.append(self)
-
-
-names = 'packaging', 'pyparsing', 'ordered_set', 'more_itertools',
-VendorImporter(__name__, names, 'setuptools._vendor').install()
diff --git a/contrib/python/setuptools/py3/setuptools/glob.py b/contrib/python/setuptools/py3/setuptools/glob.py
deleted file mode 100644
index 87062b8187f..00000000000
--- a/contrib/python/setuptools/py3/setuptools/glob.py
+++ /dev/null
@@ -1,167 +0,0 @@
-"""
-Filename globbing utility. Mostly a copy of `glob` from Python 3.5.
-
-Changes include:
- * `yield from` and PEP3102 `*` removed.
- * Hidden files are not ignored.
-"""
-
-import os
-import re
-import fnmatch
-
-__all__ = ["glob", "iglob", "escape"]
-
-
-def glob(pathname, recursive=False):
- """Return a list of paths matching a pathname pattern.
-
- The pattern may contain simple shell-style wildcards a la
- fnmatch. However, unlike fnmatch, filenames starting with a
- dot are special cases that are not matched by '*' and '?'
- patterns.
-
- If recursive is true, the pattern '**' will match any files and
- zero or more directories and subdirectories.
- """
- return list(iglob(pathname, recursive=recursive))
-
-
-def iglob(pathname, recursive=False):
- """Return an iterator which yields the paths matching a pathname pattern.
-
- The pattern may contain simple shell-style wildcards a la
- fnmatch. However, unlike fnmatch, filenames starting with a
- dot are special cases that are not matched by '*' and '?'
- patterns.
-
- If recursive is true, the pattern '**' will match any files and
- zero or more directories and subdirectories.
- """
- it = _iglob(pathname, recursive)
- if recursive and _isrecursive(pathname):
- s = next(it) # skip empty string
- assert not s
- return it
-
-
-def _iglob(pathname, recursive):
- dirname, basename = os.path.split(pathname)
- glob_in_dir = glob2 if recursive and _isrecursive(basename) else glob1
-
- if not has_magic(pathname):
- if basename:
- if os.path.lexists(pathname):
- yield pathname
- else:
- # Patterns ending with a slash should match only directories
- if os.path.isdir(dirname):
- yield pathname
- return
-
- if not dirname:
- yield from glob_in_dir(dirname, basename)
- return
- # `os.path.split()` returns the argument itself as a dirname if it is a
- # drive or UNC path. Prevent an infinite recursion if a drive or UNC path
- # contains magic characters (i.e. r'\\?\C:').
- if dirname != pathname and has_magic(dirname):
- dirs = _iglob(dirname, recursive)
- else:
- dirs = [dirname]
- if not has_magic(basename):
- glob_in_dir = glob0
- for dirname in dirs:
- for name in glob_in_dir(dirname, basename):
- yield os.path.join(dirname, name)
-
-
-# These 2 helper functions non-recursively glob inside a literal directory.
-# They return a list of basenames. `glob1` accepts a pattern while `glob0`
-# takes a literal basename (so it only has to check for its existence).
-
-
-def glob1(dirname, pattern):
- if not dirname:
- if isinstance(pattern, bytes):
- dirname = os.curdir.encode('ASCII')
- else:
- dirname = os.curdir
- try:
- names = os.listdir(dirname)
- except OSError:
- return []
- return fnmatch.filter(names, pattern)
-
-
-def glob0(dirname, basename):
- if not basename:
- # `os.path.split()` returns an empty basename for paths ending with a
- # directory separator. 'q*x/' should match only directories.
- if os.path.isdir(dirname):
- return [basename]
- else:
- if os.path.lexists(os.path.join(dirname, basename)):
- return [basename]
- return []
-
-
-# This helper function recursively yields relative pathnames inside a literal
-# directory.
-
-
-def glob2(dirname, pattern):
- assert _isrecursive(pattern)
- yield pattern[:0]
- for x in _rlistdir(dirname):
- yield x
-
-
-# Recursively yields relative pathnames inside a literal directory.
-def _rlistdir(dirname):
- if not dirname:
- if isinstance(dirname, bytes):
- dirname = os.curdir.encode('ASCII')
- else:
- dirname = os.curdir
- try:
- names = os.listdir(dirname)
- except os.error:
- return
- for x in names:
- yield x
- path = os.path.join(dirname, x) if dirname else x
- for y in _rlistdir(path):
- yield os.path.join(x, y)
-
-
-magic_check = re.compile('([*?[])')
-magic_check_bytes = re.compile(b'([*?[])')
-
-
-def has_magic(s):
- if isinstance(s, bytes):
- match = magic_check_bytes.search(s)
- else:
- match = magic_check.search(s)
- return match is not None
-
-
-def _isrecursive(pattern):
- if isinstance(pattern, bytes):
- return pattern == b'**'
- else:
- return pattern == '**'
-
-
-def escape(pathname):
- """Escape all special characters.
- """
- # Escaping is done by wrapping any of "*?[" between square brackets.
- # Metacharacters do not work in the drive part and shouldn't be escaped.
- drive, pathname = os.path.splitdrive(pathname)
- if isinstance(pathname, bytes):
- pathname = magic_check_bytes.sub(br'[\1]', pathname)
- else:
- pathname = magic_check.sub(r'[\1]', pathname)
- return drive + pathname
diff --git a/contrib/python/setuptools/py3/setuptools/installer.py b/contrib/python/setuptools/py3/setuptools/installer.py
deleted file mode 100644
index b7096df14b4..00000000000
--- a/contrib/python/setuptools/py3/setuptools/installer.py
+++ /dev/null
@@ -1,104 +0,0 @@
-import glob
-import os
-import subprocess
-import sys
-import tempfile
-import warnings
-from distutils import log
-from distutils.errors import DistutilsError
-
-import pkg_resources
-from setuptools.wheel import Wheel
-from ._deprecation_warning import SetuptoolsDeprecationWarning
-
-
-def _fixup_find_links(find_links):
- """Ensure find-links option end-up being a list of strings."""
- if isinstance(find_links, str):
- return find_links.split()
- assert isinstance(find_links, (tuple, list))
- return find_links
-
-
-def fetch_build_egg(dist, req): # noqa: C901 # is too complex (16) # FIXME
- """Fetch an egg needed for building.
-
- Use pip/wheel to fetch/build a wheel."""
- warnings.warn(
- "setuptools.installer is deprecated. Requirements should "
- "be satisfied by a PEP 517 installer.",
- SetuptoolsDeprecationWarning,
- )
- # Warn if wheel is not available
- try:
- pkg_resources.get_distribution('wheel')
- except pkg_resources.DistributionNotFound:
- dist.announce('WARNING: The wheel package is not available.', log.WARN)
- # Ignore environment markers; if supplied, it is required.
- req = strip_marker(req)
- # Take easy_install options into account, but do not override relevant
- # pip environment variables (like PIP_INDEX_URL or PIP_QUIET); they'll
- # take precedence.
- opts = dist.get_option_dict('easy_install')
- if 'allow_hosts' in opts:
- raise DistutilsError('the `allow-hosts` option is not supported '
- 'when using pip to install requirements.')
- quiet = 'PIP_QUIET' not in os.environ and 'PIP_VERBOSE' not in os.environ
- if 'PIP_INDEX_URL' in os.environ:
- index_url = None
- elif 'index_url' in opts:
- index_url = opts['index_url'][1]
- else:
- index_url = None
- find_links = (
- _fixup_find_links(opts['find_links'][1])[:] if 'find_links' in opts
- else []
- )
- if dist.dependency_links:
- find_links.extend(dist.dependency_links)
- eggs_dir = os.path.realpath(dist.get_egg_cache_dir())
- environment = pkg_resources.Environment()
- for egg_dist in pkg_resources.find_distributions(eggs_dir):
- if egg_dist in req and environment.can_add(egg_dist):
- return egg_dist
- with tempfile.TemporaryDirectory() as tmpdir:
- cmd = [
- sys.executable, '-m', 'pip',
- '--disable-pip-version-check',
- 'wheel', '--no-deps',
- '-w', tmpdir,
- ]
- if quiet:
- cmd.append('--quiet')
- if index_url is not None:
- cmd.extend(('--index-url', index_url))
- for link in find_links or []:
- cmd.extend(('--find-links', link))
- # If requirement is a PEP 508 direct URL, directly pass
- # the URL to pip, as `req @ url` does not work on the
- # command line.
- cmd.append(req.url or str(req))
- try:
- subprocess.check_call(cmd)
- except subprocess.CalledProcessError as e:
- raise DistutilsError(str(e)) from e
- wheel = Wheel(glob.glob(os.path.join(tmpdir, '*.whl'))[0])
- dist_location = os.path.join(eggs_dir, wheel.egg_name())
- wheel.install_as_egg(dist_location)
- dist_metadata = pkg_resources.PathMetadata(
- dist_location, os.path.join(dist_location, 'EGG-INFO'))
- dist = pkg_resources.Distribution.from_filename(
- dist_location, metadata=dist_metadata)
- return dist
-
-
-def strip_marker(req):
- """
- Return a new requirement without the environment marker to avoid
- calling pip with something like `babel; extra == "i18n"`, which
- would always be ignored.
- """
- # create a copy to avoid mutating the input
- req = pkg_resources.Requirement.parse(str(req))
- req.marker = None
- return req
diff --git a/contrib/python/setuptools/py3/setuptools/launch.py b/contrib/python/setuptools/py3/setuptools/launch.py
deleted file mode 100644
index 0208fdf33b6..00000000000
--- a/contrib/python/setuptools/py3/setuptools/launch.py
+++ /dev/null
@@ -1,36 +0,0 @@
-"""
-Launch the Python script on the command line after
-setuptools is bootstrapped via import.
-"""
-
-# Note that setuptools gets imported implicitly by the
-# invocation of this script using python -m setuptools.launch
-
-import tokenize
-import sys
-
-
-def run():
- """
- Run the script in sys.argv[1] as if it had
- been invoked naturally.
- """
- __builtins__
- script_name = sys.argv[1]
- namespace = dict(
- __file__=script_name,
- __name__='__main__',
- __doc__=None,
- )
- sys.argv[:] = sys.argv[1:]
-
- open_ = getattr(tokenize, 'open', open)
- with open_(script_name) as fid:
- script = fid.read()
- norm_script = script.replace('\\r\\n', '\\n')
- code = compile(norm_script, script_name, 'exec')
- exec(code, namespace)
-
-
-if __name__ == '__main__':
- run()
diff --git a/contrib/python/setuptools/py3/setuptools/monkey.py b/contrib/python/setuptools/py3/setuptools/monkey.py
deleted file mode 100644
index fb36dc1a97a..00000000000
--- a/contrib/python/setuptools/py3/setuptools/monkey.py
+++ /dev/null
@@ -1,177 +0,0 @@
-"""
-Monkey patching of distutils.
-"""
-
-import sys
-import distutils.filelist
-import platform
-import types
-import functools
-from importlib import import_module
-import inspect
-
-import setuptools
-
-__all__ = []
-"""
-Everything is private. Contact the project team
-if you think you need this functionality.
-"""
-
-
-def _get_mro(cls):
- """
- Returns the bases classes for cls sorted by the MRO.
-
- Works around an issue on Jython where inspect.getmro will not return all
- base classes if multiple classes share the same name. Instead, this
- function will return a tuple containing the class itself, and the contents
- of cls.__bases__. See https://github.com/pypa/setuptools/issues/1024.
- """
- if platform.python_implementation() == "Jython":
- return (cls,) + cls.__bases__
- return inspect.getmro(cls)
-
-
-def get_unpatched(item):
- lookup = (
- get_unpatched_class if isinstance(item, type) else
- get_unpatched_function if isinstance(item, types.FunctionType) else
- lambda item: None
- )
- return lookup(item)
-
-
-def get_unpatched_class(cls):
- """Protect against re-patching the distutils if reloaded
-
- Also ensures that no other distutils extension monkeypatched the distutils
- first.
- """
- external_bases = (
- cls
- for cls in _get_mro(cls)
- if not cls.__module__.startswith('setuptools')
- )
- base = next(external_bases)
- if not base.__module__.startswith('distutils'):
- msg = "distutils has already been patched by %r" % cls
- raise AssertionError(msg)
- return base
-
-
-def patch_all():
- # we can't patch distutils.cmd, alas
- distutils.core.Command = setuptools.Command
-
- has_issue_12885 = sys.version_info <= (3, 5, 3)
-
- if has_issue_12885:
- # fix findall bug in distutils (http://bugs.python.org/issue12885)
- distutils.filelist.findall = setuptools.findall
-
- needs_warehouse = (
- sys.version_info < (2, 7, 13)
- or
- (3, 4) < sys.version_info < (3, 4, 6)
- or
- (3, 5) < sys.version_info <= (3, 5, 3)
- )
-
- if needs_warehouse:
- warehouse = 'https://upload.pypi.org/legacy/'
- distutils.config.PyPIRCCommand.DEFAULT_REPOSITORY = warehouse
-
- _patch_distribution_metadata()
-
- # Install Distribution throughout the distutils
- for module in distutils.dist, distutils.core, distutils.cmd:
- module.Distribution = setuptools.dist.Distribution
-
- # Install the patched Extension
- distutils.core.Extension = setuptools.extension.Extension
- distutils.extension.Extension = setuptools.extension.Extension
- if 'distutils.command.build_ext' in sys.modules:
- sys.modules['distutils.command.build_ext'].Extension = (
- setuptools.extension.Extension
- )
-
- patch_for_msvc_specialized_compiler()
-
-
-def _patch_distribution_metadata():
- """Patch write_pkg_file and read_pkg_file for higher metadata standards"""
- for attr in ('write_pkg_file', 'read_pkg_file', 'get_metadata_version'):
- new_val = getattr(setuptools.dist, attr)
- setattr(distutils.dist.DistributionMetadata, attr, new_val)
-
-
-def patch_func(replacement, target_mod, func_name):
- """
- Patch func_name in target_mod with replacement
-
- Important - original must be resolved by name to avoid
- patching an already patched function.
- """
- original = getattr(target_mod, func_name)
-
- # set the 'unpatched' attribute on the replacement to
- # point to the original.
- vars(replacement).setdefault('unpatched', original)
-
- # replace the function in the original module
- setattr(target_mod, func_name, replacement)
-
-
-def get_unpatched_function(candidate):
- return getattr(candidate, 'unpatched')
-
-
-def patch_for_msvc_specialized_compiler():
- """
- Patch functions in distutils to use standalone Microsoft Visual C++
- compilers.
- """
- # import late to avoid circular imports on Python < 3.5
- msvc = import_module('setuptools.msvc')
-
- if platform.system() != 'Windows':
- # Compilers only available on Microsoft Windows
- return
-
- def patch_params(mod_name, func_name):
- """
- Prepare the parameters for patch_func to patch indicated function.
- """
- repl_prefix = 'msvc9_' if 'msvc9' in mod_name else 'msvc14_'
- repl_name = repl_prefix + func_name.lstrip('_')
- repl = getattr(msvc, repl_name)
- mod = import_module(mod_name)
- if not hasattr(mod, func_name):
- raise ImportError(func_name)
- return repl, mod, func_name
-
- # Python 2.7 to 3.4
- msvc9 = functools.partial(patch_params, 'distutils.msvc9compiler')
-
- # Python 3.5+
- msvc14 = functools.partial(patch_params, 'distutils._msvccompiler')
-
- try:
- # Patch distutils.msvc9compiler
- patch_func(*msvc9('find_vcvarsall'))
- patch_func(*msvc9('query_vcvarsall'))
- except ImportError:
- pass
-
- try:
- # Patch distutils._msvccompiler._get_vc_env
- patch_func(*msvc14('_get_vc_env'))
- except ImportError:
- pass
-
- try:
- # Patch distutils._msvccompiler.gen_lib_options for Numpy
- patch_func(*msvc14('gen_lib_options'))
- except ImportError:
- pass
diff --git a/contrib/python/setuptools/py3/setuptools/msvc.py b/contrib/python/setuptools/py3/setuptools/msvc.py
deleted file mode 100644
index 281ea1c2af6..00000000000
--- a/contrib/python/setuptools/py3/setuptools/msvc.py
+++ /dev/null
@@ -1,1805 +0,0 @@
-"""
-Improved support for Microsoft Visual C++ compilers.
-
-Known supported compilers:
---------------------------
-Microsoft Visual C++ 9.0:
- Microsoft Visual C++ Compiler for Python 2.7 (x86, amd64)
- Microsoft Windows SDK 6.1 (x86, x64, ia64)
- Microsoft Windows SDK 7.0 (x86, x64, ia64)
-
-Microsoft Visual C++ 10.0:
- Microsoft Windows SDK 7.1 (x86, x64, ia64)
-
-Microsoft Visual C++ 14.X:
- Microsoft Visual C++ Build Tools 2015 (x86, x64, arm)
- Microsoft Visual Studio Build Tools 2017 (x86, x64, arm, arm64)
- Microsoft Visual Studio Build Tools 2019 (x86, x64, arm, arm64)
-
-This may also support compilers shipped with compatible Visual Studio versions.
-"""
-
-import json
-from io import open
-from os import listdir, pathsep
-from os.path import join, isfile, isdir, dirname
-import sys
-import contextlib
-import platform
-import itertools
-import subprocess
-import distutils.errors
-from setuptools.extern.packaging.version import LegacyVersion
-from setuptools.extern.more_itertools import unique_everseen
-
-from .monkey import get_unpatched
-
-if platform.system() == 'Windows':
- import winreg
- from os import environ
-else:
- # Mock winreg and environ so the module can be imported on this platform.
-
- class winreg:
- HKEY_USERS = None
- HKEY_CURRENT_USER = None
- HKEY_LOCAL_MACHINE = None
- HKEY_CLASSES_ROOT = None
-
- environ = dict()
-
-_msvc9_suppress_errors = (
- # msvc9compiler isn't available on some platforms
- ImportError,
-
- # msvc9compiler raises DistutilsPlatformError in some
- # environments. See #1118.
- distutils.errors.DistutilsPlatformError,
-)
-
-try:
- from distutils.msvc9compiler import Reg
-except _msvc9_suppress_errors:
- pass
-
-
-def msvc9_find_vcvarsall(version):
- """
- Patched "distutils.msvc9compiler.find_vcvarsall" to use the standalone
- compiler build for Python
- (VCForPython / Microsoft Visual C++ Compiler for Python 2.7).
-
- Fall back to original behavior when the standalone compiler is not
- available.
-
- Redirect the path of "vcvarsall.bat".
-
- Parameters
- ----------
- version: float
- Required Microsoft Visual C++ version.
-
- Return
- ------
- str
- vcvarsall.bat path
- """
- vc_base = r'Software\%sMicrosoft\DevDiv\VCForPython\%0.1f'
- key = vc_base % ('', version)
- try:
- # Per-user installs register the compiler path here
- productdir = Reg.get_value(key, "installdir")
- except KeyError:
- try:
- # All-user installs on a 64-bit system register here
- key = vc_base % ('Wow6432Node\\', version)
- productdir = Reg.get_value(key, "installdir")
- except KeyError:
- productdir = None
-
- if productdir:
- vcvarsall = join(productdir, "vcvarsall.bat")
- if isfile(vcvarsall):
- return vcvarsall
-
- return get_unpatched(msvc9_find_vcvarsall)(version)
-
-
-def msvc9_query_vcvarsall(ver, arch='x86', *args, **kwargs):
- """
- Patched "distutils.msvc9compiler.query_vcvarsall" for support extra
- Microsoft Visual C++ 9.0 and 10.0 compilers.
-
- Set environment without use of "vcvarsall.bat".
-
- Parameters
- ----------
- ver: float
- Required Microsoft Visual C++ version.
- arch: str
- Target architecture.
-
- Return
- ------
- dict
- environment
- """
- # Try to get environment from vcvarsall.bat (Classical way)
- try:
- orig = get_unpatched(msvc9_query_vcvarsall)
- return orig(ver, arch, *args, **kwargs)
- except distutils.errors.DistutilsPlatformError:
- # Pass error if Vcvarsall.bat is missing
- pass
- except ValueError:
- # Pass error if environment not set after executing vcvarsall.bat
- pass
-
- # If error, try to set environment directly
- try:
- return EnvironmentInfo(arch, ver).return_env()
- except distutils.errors.DistutilsPlatformError as exc:
- _augment_exception(exc, ver, arch)
- raise
-
-
-def _msvc14_find_vc2015():
- """Python 3.8 "distutils/_msvccompiler.py" backport"""
- try:
- key = winreg.OpenKey(
- winreg.HKEY_LOCAL_MACHINE,
- r"Software\Microsoft\VisualStudio\SxS\VC7",
- 0,
- winreg.KEY_READ | winreg.KEY_WOW64_32KEY
- )
- except OSError:
- return None, None
-
- best_version = 0
- best_dir = None
- with key:
- for i in itertools.count():
- try:
- v, vc_dir, vt = winreg.EnumValue(key, i)
- except OSError:
- break
- if v and vt == winreg.REG_SZ and isdir(vc_dir):
- try:
- version = int(float(v))
- except (ValueError, TypeError):
- continue
- if version >= 14 and version > best_version:
- best_version, best_dir = version, vc_dir
- return best_version, best_dir
-
-
-def _msvc14_find_vc2017():
- """Python 3.8 "distutils/_msvccompiler.py" backport
-
- Returns "15, path" based on the result of invoking vswhere.exe
- If no install is found, returns "None, None"
-
- The version is returned to avoid unnecessarily changing the function
- result. It may be ignored when the path is not None.
-
- If vswhere.exe is not available, by definition, VS 2017 is not
- installed.
- """
- root = environ.get("ProgramFiles(x86)") or environ.get("ProgramFiles")
- if not root:
- return None, None
-
- try:
- path = subprocess.check_output([
- join(root, "Microsoft Visual Studio", "Installer", "vswhere.exe"),
- "-latest",
- "-prerelease",
- "-requiresAny",
- "-requires", "Microsoft.VisualStudio.Component.VC.Tools.x86.x64",
- "-requires", "Microsoft.VisualStudio.Workload.WDExpress",
- "-property", "installationPath",
- "-products", "*",
- ]).decode(encoding="mbcs", errors="strict").strip()
- except (subprocess.CalledProcessError, OSError, UnicodeDecodeError):
- return None, None
-
- path = join(path, "VC", "Auxiliary", "Build")
- if isdir(path):
- return 15, path
-
- return None, None
-
-
-PLAT_SPEC_TO_RUNTIME = {
- 'x86': 'x86',
- 'x86_amd64': 'x64',
- 'x86_arm': 'arm',
- 'x86_arm64': 'arm64'
-}
-
-
-def _msvc14_find_vcvarsall(plat_spec):
- """Python 3.8 "distutils/_msvccompiler.py" backport"""
- _, best_dir = _msvc14_find_vc2017()
- vcruntime = None
-
- if plat_spec in PLAT_SPEC_TO_RUNTIME:
- vcruntime_plat = PLAT_SPEC_TO_RUNTIME[plat_spec]
- else:
- vcruntime_plat = 'x64' if 'amd64' in plat_spec else 'x86'
-
- if best_dir:
- vcredist = join(best_dir, "..", "..", "redist", "MSVC", "**",
- vcruntime_plat, "Microsoft.VC14*.CRT",
- "vcruntime140.dll")
- try:
- import glob
- vcruntime = glob.glob(vcredist, recursive=True)[-1]
- except (ImportError, OSError, LookupError):
- vcruntime = None
-
- if not best_dir:
- best_version, best_dir = _msvc14_find_vc2015()
- if best_version:
- vcruntime = join(best_dir, 'redist', vcruntime_plat,
- "Microsoft.VC140.CRT", "vcruntime140.dll")
-
- if not best_dir:
- return None, None
-
- vcvarsall = join(best_dir, "vcvarsall.bat")
- if not isfile(vcvarsall):
- return None, None
-
- if not vcruntime or not isfile(vcruntime):
- vcruntime = None
-
- return vcvarsall, vcruntime
-
-
-def _msvc14_get_vc_env(plat_spec):
- """Python 3.8 "distutils/_msvccompiler.py" backport"""
- if "DISTUTILS_USE_SDK" in environ:
- return {
- key.lower(): value
- for key, value in environ.items()
- }
-
- vcvarsall, vcruntime = _msvc14_find_vcvarsall(plat_spec)
- if not vcvarsall:
- raise distutils.errors.DistutilsPlatformError(
- "Unable to find vcvarsall.bat"
- )
-
- try:
- out = subprocess.check_output(
- 'cmd /u /c "{}" {} && set'.format(vcvarsall, plat_spec),
- stderr=subprocess.STDOUT,
- ).decode('utf-16le', errors='replace')
- except subprocess.CalledProcessError as exc:
- raise distutils.errors.DistutilsPlatformError(
- "Error executing {}".format(exc.cmd)
- ) from exc
-
- env = {
- key.lower(): value
- for key, _, value in
- (line.partition('=') for line in out.splitlines())
- if key and value
- }
-
- if vcruntime:
- env['py_vcruntime_redist'] = vcruntime
- return env
-
-
-def msvc14_get_vc_env(plat_spec):
- """
- Patched "distutils._msvccompiler._get_vc_env" for support extra
- Microsoft Visual C++ 14.X compilers.
-
- Set environment without use of "vcvarsall.bat".
-
- Parameters
- ----------
- plat_spec: str
- Target architecture.
-
- Return
- ------
- dict
- environment
- """
-
- # Always use backport from CPython 3.8
- try:
- return _msvc14_get_vc_env(plat_spec)
- except distutils.errors.DistutilsPlatformError as exc:
- _augment_exception(exc, 14.0)
- raise
-
-
-def msvc14_gen_lib_options(*args, **kwargs):
- """
- Patched "distutils._msvccompiler.gen_lib_options" for fix
- compatibility between "numpy.distutils" and "distutils._msvccompiler"
- (for Numpy < 1.11.2)
- """
- if "numpy.distutils" in sys.modules:
- import numpy as np
- if LegacyVersion(np.__version__) < LegacyVersion('1.11.2'):
- return np.distutils.ccompiler.gen_lib_options(*args, **kwargs)
- return get_unpatched(msvc14_gen_lib_options)(*args, **kwargs)
-
-
-def _augment_exception(exc, version, arch=''):
- """
- Add details to the exception message to help guide the user
- as to what action will resolve it.
- """
- # Error if MSVC++ directory not found or environment not set
- message = exc.args[0]
-
- if "vcvarsall" in message.lower() or "visual c" in message.lower():
- # Special error message if MSVC++ not installed
- tmpl = 'Microsoft Visual C++ {version:0.1f} or greater is required.'
- message = tmpl.format(**locals())
- msdownload = 'www.microsoft.com/download/details.aspx?id=%d'
- if version == 9.0:
- if arch.lower().find('ia64') > -1:
- # For VC++ 9.0, if IA64 support is needed, redirect user
- # to Windows SDK 7.0.
- # Note: No download link available from Microsoft.
- message += ' Get it with "Microsoft Windows SDK 7.0"'
- else:
- # For VC++ 9.0 redirect user to Vc++ for Python 2.7 :
- # This redirection link is maintained by Microsoft.
- # Contact [email protected] if it needs updating.
- message += ' Get it from http://aka.ms/vcpython27'
- elif version == 10.0:
- # For VC++ 10.0 Redirect user to Windows SDK 7.1
- message += ' Get it with "Microsoft Windows SDK 7.1": '
- message += msdownload % 8279
- elif version >= 14.0:
- # For VC++ 14.X Redirect user to latest Visual C++ Build Tools
- message += (' Get it with "Microsoft C++ Build Tools": '
- r'https://visualstudio.microsoft.com'
- r'/visual-cpp-build-tools/')
-
- exc.args = (message, )
-
-
-class PlatformInfo:
- """
- Current and Target Architectures information.
-
- Parameters
- ----------
- arch: str
- Target architecture.
- """
- current_cpu = environ.get('processor_architecture', '').lower()
-
- def __init__(self, arch):
- self.arch = arch.lower().replace('x64', 'amd64')
-
- @property
- def target_cpu(self):
- """
- Return Target CPU architecture.
-
- Return
- ------
- str
- Target CPU
- """
- return self.arch[self.arch.find('_') + 1:]
-
- def target_is_x86(self):
- """
- Return True if target CPU is x86 32 bits..
-
- Return
- ------
- bool
- CPU is x86 32 bits
- """
- return self.target_cpu == 'x86'
-
- def current_is_x86(self):
- """
- Return True if current CPU is x86 32 bits..
-
- Return
- ------
- bool
- CPU is x86 32 bits
- """
- return self.current_cpu == 'x86'
-
- def current_dir(self, hidex86=False, x64=False):
- """
- Current platform specific subfolder.
-
- Parameters
- ----------
- hidex86: bool
- return '' and not '\x86' if architecture is x86.
- x64: bool
- return '\x64' and not '\amd64' if architecture is amd64.
-
- Return
- ------
- str
- subfolder: '\target', or '' (see hidex86 parameter)
- """
- return (
- '' if (self.current_cpu == 'x86' and hidex86) else
- r'\x64' if (self.current_cpu == 'amd64' and x64) else
- r'\%s' % self.current_cpu
- )
-
- def target_dir(self, hidex86=False, x64=False):
- r"""
- Target platform specific subfolder.
-
- Parameters
- ----------
- hidex86: bool
- return '' and not '\x86' if architecture is x86.
- x64: bool
- return '\x64' and not '\amd64' if architecture is amd64.
-
- Return
- ------
- str
- subfolder: '\current', or '' (see hidex86 parameter)
- """
- return (
- '' if (self.target_cpu == 'x86' and hidex86) else
- r'\x64' if (self.target_cpu == 'amd64' and x64) else
- r'\%s' % self.target_cpu
- )
-
- def cross_dir(self, forcex86=False):
- r"""
- Cross platform specific subfolder.
-
- Parameters
- ----------
- forcex86: bool
- Use 'x86' as current architecture even if current architecture is
- not x86.
-
- Return
- ------
- str
- subfolder: '' if target architecture is current architecture,
- '\current_target' if not.
- """
- current = 'x86' if forcex86 else self.current_cpu
- return (
- '' if self.target_cpu == current else
- self.target_dir().replace('\\', '\\%s_' % current)
- )
-
-
-class RegistryInfo:
- """
- Microsoft Visual Studio related registry information.
-
- Parameters
- ----------
- platform_info: PlatformInfo
- "PlatformInfo" instance.
- """
- HKEYS = (winreg.HKEY_USERS,
- winreg.HKEY_CURRENT_USER,
- winreg.HKEY_LOCAL_MACHINE,
- winreg.HKEY_CLASSES_ROOT)
-
- def __init__(self, platform_info):
- self.pi = platform_info
-
- @property
- def visualstudio(self):
- """
- Microsoft Visual Studio root registry key.
-
- Return
- ------
- str
- Registry key
- """
- return 'VisualStudio'
-
- @property
- def sxs(self):
- """
- Microsoft Visual Studio SxS registry key.
-
- Return
- ------
- str
- Registry key
- """
- return join(self.visualstudio, 'SxS')
-
- @property
- def vc(self):
- """
- Microsoft Visual C++ VC7 registry key.
-
- Return
- ------
- str
- Registry key
- """
- return join(self.sxs, 'VC7')
-
- @property
- def vs(self):
- """
- Microsoft Visual Studio VS7 registry key.
-
- Return
- ------
- str
- Registry key
- """
- return join(self.sxs, 'VS7')
-
- @property
- def vc_for_python(self):
- """
- Microsoft Visual C++ for Python registry key.
-
- Return
- ------
- str
- Registry key
- """
- return r'DevDiv\VCForPython'
-
- @property
- def microsoft_sdk(self):
- """
- Microsoft SDK registry key.
-
- Return
- ------
- str
- Registry key
- """
- return 'Microsoft SDKs'
-
- @property
- def windows_sdk(self):
- """
- Microsoft Windows/Platform SDK registry key.
-
- Return
- ------
- str
- Registry key
- """
- return join(self.microsoft_sdk, 'Windows')
-
- @property
- def netfx_sdk(self):
- """
- Microsoft .NET Framework SDK registry key.
-
- Return
- ------
- str
- Registry key
- """
- return join(self.microsoft_sdk, 'NETFXSDK')
-
- @property
- def windows_kits_roots(self):
- """
- Microsoft Windows Kits Roots registry key.
-
- Return
- ------
- str
- Registry key
- """
- return r'Windows Kits\Installed Roots'
-
- def microsoft(self, key, x86=False):
- """
- Return key in Microsoft software registry.
-
- Parameters
- ----------
- key: str
- Registry key path where look.
- x86: str
- Force x86 software registry.
-
- Return
- ------
- str
- Registry key
- """
- node64 = '' if self.pi.current_is_x86() or x86 else 'Wow6432Node'
- return join('Software', node64, 'Microsoft', key)
-
- def lookup(self, key, name):
- """
- Look for values in registry in Microsoft software registry.
-
- Parameters
- ----------
- key: str
- Registry key path where look.
- name: str
- Value name to find.
-
- Return
- ------
- str
- value
- """
- key_read = winreg.KEY_READ
- openkey = winreg.OpenKey
- closekey = winreg.CloseKey
- ms = self.microsoft
- for hkey in self.HKEYS:
- bkey = None
- try:
- bkey = openkey(hkey, ms(key), 0, key_read)
- except (OSError, IOError):
- if not self.pi.current_is_x86():
- try:
- bkey = openkey(hkey, ms(key, True), 0, key_read)
- except (OSError, IOError):
- continue
- else:
- continue
- try:
- return winreg.QueryValueEx(bkey, name)[0]
- except (OSError, IOError):
- pass
- finally:
- if bkey:
- closekey(bkey)
-
-
-class SystemInfo:
- """
- Microsoft Windows and Visual Studio related system information.
-
- Parameters
- ----------
- registry_info: RegistryInfo
- "RegistryInfo" instance.
- vc_ver: float
- Required Microsoft Visual C++ version.
- """
-
- # Variables and properties in this class use originals CamelCase variables
- # names from Microsoft source files for more easy comparison.
- WinDir = environ.get('WinDir', '')
- ProgramFiles = environ.get('ProgramFiles', '')
- ProgramFilesx86 = environ.get('ProgramFiles(x86)', ProgramFiles)
-
- def __init__(self, registry_info, vc_ver=None):
- self.ri = registry_info
- self.pi = self.ri.pi
-
- self.known_vs_paths = self.find_programdata_vs_vers()
-
- # Except for VS15+, VC version is aligned with VS version
- self.vs_ver = self.vc_ver = (
- vc_ver or self._find_latest_available_vs_ver())
-
- def _find_latest_available_vs_ver(self):
- """
- Find the latest VC version
-
- Return
- ------
- float
- version
- """
- reg_vc_vers = self.find_reg_vs_vers()
-
- if not (reg_vc_vers or self.known_vs_paths):
- raise distutils.errors.DistutilsPlatformError(
- 'No Microsoft Visual C++ version found')
-
- vc_vers = set(reg_vc_vers)
- vc_vers.update(self.known_vs_paths)
- return sorted(vc_vers)[-1]
-
- def find_reg_vs_vers(self):
- """
- Find Microsoft Visual Studio versions available in registry.
-
- Return
- ------
- list of float
- Versions
- """
- ms = self.ri.microsoft
- vckeys = (self.ri.vc, self.ri.vc_for_python, self.ri.vs)
- vs_vers = []
- for hkey, key in itertools.product(self.ri.HKEYS, vckeys):
- try:
- bkey = winreg.OpenKey(hkey, ms(key), 0, winreg.KEY_READ)
- except (OSError, IOError):
- continue
- with bkey:
- subkeys, values, _ = winreg.QueryInfoKey(bkey)
- for i in range(values):
- with contextlib.suppress(ValueError):
- ver = float(winreg.EnumValue(bkey, i)[0])
- if ver not in vs_vers:
- vs_vers.append(ver)
- for i in range(subkeys):
- with contextlib.suppress(ValueError):
- ver = float(winreg.EnumKey(bkey, i))
- if ver not in vs_vers:
- vs_vers.append(ver)
- return sorted(vs_vers)
-
- def find_programdata_vs_vers(self):
- r"""
- Find Visual studio 2017+ versions from information in
- "C:\ProgramData\Microsoft\VisualStudio\Packages\_Instances".
-
- Return
- ------
- dict
- float version as key, path as value.
- """
- vs_versions = {}
- instances_dir = \
- r'C:\ProgramData\Microsoft\VisualStudio\Packages\_Instances'
-
- try:
- hashed_names = listdir(instances_dir)
-
- except (OSError, IOError):
- # Directory not exists with all Visual Studio versions
- return vs_versions
-
- for name in hashed_names:
- try:
- # Get VS installation path from "state.json" file
- state_path = join(instances_dir, name, 'state.json')
- with open(state_path, 'rt', encoding='utf-8') as state_file:
- state = json.load(state_file)
- vs_path = state['installationPath']
-
- # Raises OSError if this VS installation does not contain VC
- listdir(join(vs_path, r'VC\Tools\MSVC'))
-
- # Store version and path
- vs_versions[self._as_float_version(
- state['installationVersion'])] = vs_path
-
- except (OSError, IOError, KeyError):
- # Skip if "state.json" file is missing or bad format
- continue
-
- return vs_versions
-
- @staticmethod
- def _as_float_version(version):
- """
- Return a string version as a simplified float version (major.minor)
-
- Parameters
- ----------
- version: str
- Version.
-
- Return
- ------
- float
- version
- """
- return float('.'.join(version.split('.')[:2]))
-
- @property
- def VSInstallDir(self):
- """
- Microsoft Visual Studio directory.
-
- Return
- ------
- str
- path
- """
- # Default path
- default = join(self.ProgramFilesx86,
- 'Microsoft Visual Studio %0.1f' % self.vs_ver)
-
- # Try to get path from registry, if fail use default path
- return self.ri.lookup(self.ri.vs, '%0.1f' % self.vs_ver) or default
-
- @property
- def VCInstallDir(self):
- """
- Microsoft Visual C++ directory.
-
- Return
- ------
- str
- path
- """
- path = self._guess_vc() or self._guess_vc_legacy()
-
- if not isdir(path):
- msg = 'Microsoft Visual C++ directory not found'
- raise distutils.errors.DistutilsPlatformError(msg)
-
- return path
-
- def _guess_vc(self):
- """
- Locate Visual C++ for VS2017+.
-
- Return
- ------
- str
- path
- """
- if self.vs_ver <= 14.0:
- return ''
-
- try:
- # First search in known VS paths
- vs_dir = self.known_vs_paths[self.vs_ver]
- except KeyError:
- # Else, search with path from registry
- vs_dir = self.VSInstallDir
-
- guess_vc = join(vs_dir, r'VC\Tools\MSVC')
-
- # Subdir with VC exact version as name
- try:
- # Update the VC version with real one instead of VS version
- vc_ver = listdir(guess_vc)[-1]
- self.vc_ver = self._as_float_version(vc_ver)
- return join(guess_vc, vc_ver)
- except (OSError, IOError, IndexError):
- return ''
-
- def _guess_vc_legacy(self):
- """
- Locate Visual C++ for versions prior to 2017.
-
- Return
- ------
- str
- path
- """
- default = join(self.ProgramFilesx86,
- r'Microsoft Visual Studio %0.1f\VC' % self.vs_ver)
-
- # Try to get "VC++ for Python" path from registry as default path
- reg_path = join(self.ri.vc_for_python, '%0.1f' % self.vs_ver)
- python_vc = self.ri.lookup(reg_path, 'installdir')
- default_vc = join(python_vc, 'VC') if python_vc else default
-
- # Try to get path from registry, if fail use default path
- return self.ri.lookup(self.ri.vc, '%0.1f' % self.vs_ver) or default_vc
-
- @property
- def WindowsSdkVersion(self):
- """
- Microsoft Windows SDK versions for specified MSVC++ version.
-
- Return
- ------
- tuple of str
- versions
- """
- if self.vs_ver <= 9.0:
- return '7.0', '6.1', '6.0a'
- elif self.vs_ver == 10.0:
- return '7.1', '7.0a'
- elif self.vs_ver == 11.0:
- return '8.0', '8.0a'
- elif self.vs_ver == 12.0:
- return '8.1', '8.1a'
- elif self.vs_ver >= 14.0:
- return '10.0', '8.1'
-
- @property
- def WindowsSdkLastVersion(self):
- """
- Microsoft Windows SDK last version.
-
- Return
- ------
- str
- version
- """
- return self._use_last_dir_name(join(self.WindowsSdkDir, 'lib'))
-
- @property # noqa: C901
- def WindowsSdkDir(self): # noqa: C901 # is too complex (12) # FIXME
- """
- Microsoft Windows SDK directory.
-
- Return
- ------
- str
- path
- """
- sdkdir = ''
- for ver in self.WindowsSdkVersion:
- # Try to get it from registry
- loc = join(self.ri.windows_sdk, 'v%s' % ver)
- sdkdir = self.ri.lookup(loc, 'installationfolder')
- if sdkdir:
- break
- if not sdkdir or not isdir(sdkdir):
- # Try to get "VC++ for Python" version from registry
- path = join(self.ri.vc_for_python, '%0.1f' % self.vc_ver)
- install_base = self.ri.lookup(path, 'installdir')
- if install_base:
- sdkdir = join(install_base, 'WinSDK')
- if not sdkdir or not isdir(sdkdir):
- # If fail, use default new path
- for ver in self.WindowsSdkVersion:
- intver = ver[:ver.rfind('.')]
- path = r'Microsoft SDKs\Windows Kits\%s' % intver
- d = join(self.ProgramFiles, path)
- if isdir(d):
- sdkdir = d
- if not sdkdir or not isdir(sdkdir):
- # If fail, use default old path
- for ver in self.WindowsSdkVersion:
- path = r'Microsoft SDKs\Windows\v%s' % ver
- d = join(self.ProgramFiles, path)
- if isdir(d):
- sdkdir = d
- if not sdkdir:
- # If fail, use Platform SDK
- sdkdir = join(self.VCInstallDir, 'PlatformSDK')
- return sdkdir
-
- @property
- def WindowsSDKExecutablePath(self):
- """
- Microsoft Windows SDK executable directory.
-
- Return
- ------
- str
- path
- """
- # Find WinSDK NetFx Tools registry dir name
- if self.vs_ver <= 11.0:
- netfxver = 35
- arch = ''
- else:
- netfxver = 40
- hidex86 = True if self.vs_ver <= 12.0 else False
- arch = self.pi.current_dir(x64=True, hidex86=hidex86)
- fx = 'WinSDK-NetFx%dTools%s' % (netfxver, arch.replace('\\', '-'))
-
- # list all possibles registry paths
- regpaths = []
- if self.vs_ver >= 14.0:
- for ver in self.NetFxSdkVersion:
- regpaths += [join(self.ri.netfx_sdk, ver, fx)]
-
- for ver in self.WindowsSdkVersion:
- regpaths += [join(self.ri.windows_sdk, 'v%sA' % ver, fx)]
-
- # Return installation folder from the more recent path
- for path in regpaths:
- execpath = self.ri.lookup(path, 'installationfolder')
- if execpath:
- return execpath
-
- @property
- def FSharpInstallDir(self):
- """
- Microsoft Visual F# directory.
-
- Return
- ------
- str
- path
- """
- path = join(self.ri.visualstudio, r'%0.1f\Setup\F#' % self.vs_ver)
- return self.ri.lookup(path, 'productdir') or ''
-
- @property
- def UniversalCRTSdkDir(self):
- """
- Microsoft Universal CRT SDK directory.
-
- Return
- ------
- str
- path
- """
- # Set Kit Roots versions for specified MSVC++ version
- vers = ('10', '81') if self.vs_ver >= 14.0 else ()
-
- # Find path of the more recent Kit
- for ver in vers:
- sdkdir = self.ri.lookup(self.ri.windows_kits_roots,
- 'kitsroot%s' % ver)
- if sdkdir:
- return sdkdir or ''
-
- @property
- def UniversalCRTSdkLastVersion(self):
- """
- Microsoft Universal C Runtime SDK last version.
-
- Return
- ------
- str
- version
- """
- return self._use_last_dir_name(join(self.UniversalCRTSdkDir, 'lib'))
-
- @property
- def NetFxSdkVersion(self):
- """
- Microsoft .NET Framework SDK versions.
-
- Return
- ------
- tuple of str
- versions
- """
- # Set FxSdk versions for specified VS version
- return (('4.7.2', '4.7.1', '4.7',
- '4.6.2', '4.6.1', '4.6',
- '4.5.2', '4.5.1', '4.5')
- if self.vs_ver >= 14.0 else ())
-
- @property
- def NetFxSdkDir(self):
- """
- Microsoft .NET Framework SDK directory.
-
- Return
- ------
- str
- path
- """
- sdkdir = ''
- for ver in self.NetFxSdkVersion:
- loc = join(self.ri.netfx_sdk, ver)
- sdkdir = self.ri.lookup(loc, 'kitsinstallationfolder')
- if sdkdir:
- break
- return sdkdir
-
- @property
- def FrameworkDir32(self):
- """
- Microsoft .NET Framework 32bit directory.
-
- Return
- ------
- str
- path
- """
- # Default path
- guess_fw = join(self.WinDir, r'Microsoft.NET\Framework')
-
- # Try to get path from registry, if fail use default path
- return self.ri.lookup(self.ri.vc, 'frameworkdir32') or guess_fw
-
- @property
- def FrameworkDir64(self):
- """
- Microsoft .NET Framework 64bit directory.
-
- Return
- ------
- str
- path
- """
- # Default path
- guess_fw = join(self.WinDir, r'Microsoft.NET\Framework64')
-
- # Try to get path from registry, if fail use default path
- return self.ri.lookup(self.ri.vc, 'frameworkdir64') or guess_fw
-
- @property
- def FrameworkVersion32(self):
- """
- Microsoft .NET Framework 32bit versions.
-
- Return
- ------
- tuple of str
- versions
- """
- return self._find_dot_net_versions(32)
-
- @property
- def FrameworkVersion64(self):
- """
- Microsoft .NET Framework 64bit versions.
-
- Return
- ------
- tuple of str
- versions
- """
- return self._find_dot_net_versions(64)
-
- def _find_dot_net_versions(self, bits):
- """
- Find Microsoft .NET Framework versions.
-
- Parameters
- ----------
- bits: int
- Platform number of bits: 32 or 64.
-
- Return
- ------
- tuple of str
- versions
- """
- # Find actual .NET version in registry
- reg_ver = self.ri.lookup(self.ri.vc, 'frameworkver%d' % bits)
- dot_net_dir = getattr(self, 'FrameworkDir%d' % bits)
- ver = reg_ver or self._use_last_dir_name(dot_net_dir, 'v') or ''
-
- # Set .NET versions for specified MSVC++ version
- if self.vs_ver >= 12.0:
- return ver, 'v4.0'
- elif self.vs_ver >= 10.0:
- return 'v4.0.30319' if ver.lower()[:2] != 'v4' else ver, 'v3.5'
- elif self.vs_ver == 9.0:
- return 'v3.5', 'v2.0.50727'
- elif self.vs_ver == 8.0:
- return 'v3.0', 'v2.0.50727'
-
- @staticmethod
- def _use_last_dir_name(path, prefix=''):
- """
- Return name of the last dir in path or '' if no dir found.
-
- Parameters
- ----------
- path: str
- Use dirs in this path
- prefix: str
- Use only dirs starting by this prefix
-
- Return
- ------
- str
- name
- """
- matching_dirs = (
- dir_name
- for dir_name in reversed(listdir(path))
- if isdir(join(path, dir_name)) and
- dir_name.startswith(prefix)
- )
- return next(matching_dirs, None) or ''
-
-
-class EnvironmentInfo:
- """
- Return environment variables for specified Microsoft Visual C++ version
- and platform : Lib, Include, Path and libpath.
-
- This function is compatible with Microsoft Visual C++ 9.0 to 14.X.
-
- Script created by analysing Microsoft environment configuration files like
- "vcvars[...].bat", "SetEnv.Cmd", "vcbuildtools.bat", ...
-
- Parameters
- ----------
- arch: str
- Target architecture.
- vc_ver: float
- Required Microsoft Visual C++ version. If not set, autodetect the last
- version.
- vc_min_ver: float
- Minimum Microsoft Visual C++ version.
- """
-
- # Variables and properties in this class use originals CamelCase variables
- # names from Microsoft source files for more easy comparison.
-
- def __init__(self, arch, vc_ver=None, vc_min_ver=0):
- self.pi = PlatformInfo(arch)
- self.ri = RegistryInfo(self.pi)
- self.si = SystemInfo(self.ri, vc_ver)
-
- if self.vc_ver < vc_min_ver:
- err = 'No suitable Microsoft Visual C++ version found'
- raise distutils.errors.DistutilsPlatformError(err)
-
- @property
- def vs_ver(self):
- """
- Microsoft Visual Studio.
-
- Return
- ------
- float
- version
- """
- return self.si.vs_ver
-
- @property
- def vc_ver(self):
- """
- Microsoft Visual C++ version.
-
- Return
- ------
- float
- version
- """
- return self.si.vc_ver
-
- @property
- def VSTools(self):
- """
- Microsoft Visual Studio Tools.
-
- Return
- ------
- list of str
- paths
- """
- paths = [r'Common7\IDE', r'Common7\Tools']
-
- if self.vs_ver >= 14.0:
- arch_subdir = self.pi.current_dir(hidex86=True, x64=True)
- paths += [r'Common7\IDE\CommonExtensions\Microsoft\TestWindow']
- paths += [r'Team Tools\Performance Tools']
- paths += [r'Team Tools\Performance Tools%s' % arch_subdir]
-
- return [join(self.si.VSInstallDir, path) for path in paths]
-
- @property
- def VCIncludes(self):
- """
- Microsoft Visual C++ & Microsoft Foundation Class Includes.
-
- Return
- ------
- list of str
- paths
- """
- return [join(self.si.VCInstallDir, 'Include'),
- join(self.si.VCInstallDir, r'ATLMFC\Include')]
-
- @property
- def VCLibraries(self):
- """
- Microsoft Visual C++ & Microsoft Foundation Class Libraries.
-
- Return
- ------
- list of str
- paths
- """
- if self.vs_ver >= 15.0:
- arch_subdir = self.pi.target_dir(x64=True)
- else:
- arch_subdir = self.pi.target_dir(hidex86=True)
- paths = ['Lib%s' % arch_subdir, r'ATLMFC\Lib%s' % arch_subdir]
-
- if self.vs_ver >= 14.0:
- paths += [r'Lib\store%s' % arch_subdir]
-
- return [join(self.si.VCInstallDir, path) for path in paths]
-
- @property
- def VCStoreRefs(self):
- """
- Microsoft Visual C++ store references Libraries.
-
- Return
- ------
- list of str
- paths
- """
- if self.vs_ver < 14.0:
- return []
- return [join(self.si.VCInstallDir, r'Lib\store\references')]
-
- @property
- def VCTools(self):
- """
- Microsoft Visual C++ Tools.
-
- Return
- ------
- list of str
- paths
- """
- si = self.si
- tools = [join(si.VCInstallDir, 'VCPackages')]
-
- forcex86 = True if self.vs_ver <= 10.0 else False
- arch_subdir = self.pi.cross_dir(forcex86)
- if arch_subdir:
- tools += [join(si.VCInstallDir, 'Bin%s' % arch_subdir)]
-
- if self.vs_ver == 14.0:
- path = 'Bin%s' % self.pi.current_dir(hidex86=True)
- tools += [join(si.VCInstallDir, path)]
-
- elif self.vs_ver >= 15.0:
- host_dir = (r'bin\HostX86%s' if self.pi.current_is_x86() else
- r'bin\HostX64%s')
- tools += [join(
- si.VCInstallDir, host_dir % self.pi.target_dir(x64=True))]
-
- if self.pi.current_cpu != self.pi.target_cpu:
- tools += [join(
- si.VCInstallDir, host_dir % self.pi.current_dir(x64=True))]
-
- else:
- tools += [join(si.VCInstallDir, 'Bin')]
-
- return tools
-
- @property
- def OSLibraries(self):
- """
- Microsoft Windows SDK Libraries.
-
- Return
- ------
- list of str
- paths
- """
- if self.vs_ver <= 10.0:
- arch_subdir = self.pi.target_dir(hidex86=True, x64=True)
- return [join(self.si.WindowsSdkDir, 'Lib%s' % arch_subdir)]
-
- else:
- arch_subdir = self.pi.target_dir(x64=True)
- lib = join(self.si.WindowsSdkDir, 'lib')
- libver = self._sdk_subdir
- return [join(lib, '%sum%s' % (libver, arch_subdir))]
-
- @property
- def OSIncludes(self):
- """
- Microsoft Windows SDK Include.
-
- Return
- ------
- list of str
- paths
- """
- include = join(self.si.WindowsSdkDir, 'include')
-
- if self.vs_ver <= 10.0:
- return [include, join(include, 'gl')]
-
- else:
- if self.vs_ver >= 14.0:
- sdkver = self._sdk_subdir
- else:
- sdkver = ''
- return [join(include, '%sshared' % sdkver),
- join(include, '%sum' % sdkver),
- join(include, '%swinrt' % sdkver)]
-
- @property
- def OSLibpath(self):
- """
- Microsoft Windows SDK Libraries Paths.
-
- Return
- ------
- list of str
- paths
- """
- ref = join(self.si.WindowsSdkDir, 'References')
- libpath = []
-
- if self.vs_ver <= 9.0:
- libpath += self.OSLibraries
-
- if self.vs_ver >= 11.0:
- libpath += [join(ref, r'CommonConfiguration\Neutral')]
-
- if self.vs_ver >= 14.0:
- libpath += [
- ref,
- join(self.si.WindowsSdkDir, 'UnionMetadata'),
- join(
- ref, 'Windows.Foundation.UniversalApiContract', '1.0.0.0'),
- join(ref, 'Windows.Foundation.FoundationContract', '1.0.0.0'),
- join(
- ref, 'Windows.Networking.Connectivity.WwanContract',
- '1.0.0.0'),
- join(
- self.si.WindowsSdkDir, 'ExtensionSDKs', 'Microsoft.VCLibs',
- '%0.1f' % self.vs_ver, 'References', 'CommonConfiguration',
- 'neutral'),
- ]
- return libpath
-
- @property
- def SdkTools(self):
- """
- Microsoft Windows SDK Tools.
-
- Return
- ------
- list of str
- paths
- """
- return list(self._sdk_tools())
-
- def _sdk_tools(self):
- """
- Microsoft Windows SDK Tools paths generator.
-
- Return
- ------
- generator of str
- paths
- """
- if self.vs_ver < 15.0:
- bin_dir = 'Bin' if self.vs_ver <= 11.0 else r'Bin\x86'
- yield join(self.si.WindowsSdkDir, bin_dir)
-
- if not self.pi.current_is_x86():
- arch_subdir = self.pi.current_dir(x64=True)
- path = 'Bin%s' % arch_subdir
- yield join(self.si.WindowsSdkDir, path)
-
- if self.vs_ver in (10.0, 11.0):
- if self.pi.target_is_x86():
- arch_subdir = ''
- else:
- arch_subdir = self.pi.current_dir(hidex86=True, x64=True)
- path = r'Bin\NETFX 4.0 Tools%s' % arch_subdir
- yield join(self.si.WindowsSdkDir, path)
-
- elif self.vs_ver >= 15.0:
- path = join(self.si.WindowsSdkDir, 'Bin')
- arch_subdir = self.pi.current_dir(x64=True)
- sdkver = self.si.WindowsSdkLastVersion
- yield join(path, '%s%s' % (sdkver, arch_subdir))
-
- if self.si.WindowsSDKExecutablePath:
- yield self.si.WindowsSDKExecutablePath
-
- @property
- def _sdk_subdir(self):
- """
- Microsoft Windows SDK version subdir.
-
- Return
- ------
- str
- subdir
- """
- ucrtver = self.si.WindowsSdkLastVersion
- return ('%s\\' % ucrtver) if ucrtver else ''
-
- @property
- def SdkSetup(self):
- """
- Microsoft Windows SDK Setup.
-
- Return
- ------
- list of str
- paths
- """
- if self.vs_ver > 9.0:
- return []
-
- return [join(self.si.WindowsSdkDir, 'Setup')]
-
- @property
- def FxTools(self):
- """
- Microsoft .NET Framework Tools.
-
- Return
- ------
- list of str
- paths
- """
- pi = self.pi
- si = self.si
-
- if self.vs_ver <= 10.0:
- include32 = True
- include64 = not pi.target_is_x86() and not pi.current_is_x86()
- else:
- include32 = pi.target_is_x86() or pi.current_is_x86()
- include64 = pi.current_cpu == 'amd64' or pi.target_cpu == 'amd64'
-
- tools = []
- if include32:
- tools += [join(si.FrameworkDir32, ver)
- for ver in si.FrameworkVersion32]
- if include64:
- tools += [join(si.FrameworkDir64, ver)
- for ver in si.FrameworkVersion64]
- return tools
-
- @property
- def NetFxSDKLibraries(self):
- """
- Microsoft .Net Framework SDK Libraries.
-
- Return
- ------
- list of str
- paths
- """
- if self.vs_ver < 14.0 or not self.si.NetFxSdkDir:
- return []
-
- arch_subdir = self.pi.target_dir(x64=True)
- return [join(self.si.NetFxSdkDir, r'lib\um%s' % arch_subdir)]
-
- @property
- def NetFxSDKIncludes(self):
- """
- Microsoft .Net Framework SDK Includes.
-
- Return
- ------
- list of str
- paths
- """
- if self.vs_ver < 14.0 or not self.si.NetFxSdkDir:
- return []
-
- return [join(self.si.NetFxSdkDir, r'include\um')]
-
- @property
- def VsTDb(self):
- """
- Microsoft Visual Studio Team System Database.
-
- Return
- ------
- list of str
- paths
- """
- return [join(self.si.VSInstallDir, r'VSTSDB\Deploy')]
-
- @property
- def MSBuild(self):
- """
- Microsoft Build Engine.
-
- Return
- ------
- list of str
- paths
- """
- if self.vs_ver < 12.0:
- return []
- elif self.vs_ver < 15.0:
- base_path = self.si.ProgramFilesx86
- arch_subdir = self.pi.current_dir(hidex86=True)
- else:
- base_path = self.si.VSInstallDir
- arch_subdir = ''
-
- path = r'MSBuild\%0.1f\bin%s' % (self.vs_ver, arch_subdir)
- build = [join(base_path, path)]
-
- if self.vs_ver >= 15.0:
- # Add Roslyn C# & Visual Basic Compiler
- build += [join(base_path, path, 'Roslyn')]
-
- return build
-
- @property
- def HTMLHelpWorkshop(self):
- """
- Microsoft HTML Help Workshop.
-
- Return
- ------
- list of str
- paths
- """
- if self.vs_ver < 11.0:
- return []
-
- return [join(self.si.ProgramFilesx86, 'HTML Help Workshop')]
-
- @property
- def UCRTLibraries(self):
- """
- Microsoft Universal C Runtime SDK Libraries.
-
- Return
- ------
- list of str
- paths
- """
- if self.vs_ver < 14.0:
- return []
-
- arch_subdir = self.pi.target_dir(x64=True)
- lib = join(self.si.UniversalCRTSdkDir, 'lib')
- ucrtver = self._ucrt_subdir
- return [join(lib, '%sucrt%s' % (ucrtver, arch_subdir))]
-
- @property
- def UCRTIncludes(self):
- """
- Microsoft Universal C Runtime SDK Include.
-
- Return
- ------
- list of str
- paths
- """
- if self.vs_ver < 14.0:
- return []
-
- include = join(self.si.UniversalCRTSdkDir, 'include')
- return [join(include, '%sucrt' % self._ucrt_subdir)]
-
- @property
- def _ucrt_subdir(self):
- """
- Microsoft Universal C Runtime SDK version subdir.
-
- Return
- ------
- str
- subdir
- """
- ucrtver = self.si.UniversalCRTSdkLastVersion
- return ('%s\\' % ucrtver) if ucrtver else ''
-
- @property
- def FSharp(self):
- """
- Microsoft Visual F#.
-
- Return
- ------
- list of str
- paths
- """
- if 11.0 > self.vs_ver > 12.0:
- return []
-
- return [self.si.FSharpInstallDir]
-
- @property
- def VCRuntimeRedist(self):
- """
- Microsoft Visual C++ runtime redistributable dll.
-
- Return
- ------
- str
- path
- """
- vcruntime = 'vcruntime%d0.dll' % self.vc_ver
- arch_subdir = self.pi.target_dir(x64=True).strip('\\')
-
- # Installation prefixes candidates
- prefixes = []
- tools_path = self.si.VCInstallDir
- redist_path = dirname(tools_path.replace(r'\Tools', r'\Redist'))
- if isdir(redist_path):
- # Redist version may not be exactly the same as tools
- redist_path = join(redist_path, listdir(redist_path)[-1])
- prefixes += [redist_path, join(redist_path, 'onecore')]
-
- prefixes += [join(tools_path, 'redist')] # VS14 legacy path
-
- # CRT directory
- crt_dirs = ('Microsoft.VC%d.CRT' % (self.vc_ver * 10),
- # Sometime store in directory with VS version instead of VC
- 'Microsoft.VC%d.CRT' % (int(self.vs_ver) * 10))
-
- # vcruntime path
- for prefix, crt_dir in itertools.product(prefixes, crt_dirs):
- path = join(prefix, arch_subdir, crt_dir, vcruntime)
- if isfile(path):
- return path
-
- def return_env(self, exists=True):
- """
- Return environment dict.
-
- Parameters
- ----------
- exists: bool
- It True, only return existing paths.
-
- Return
- ------
- dict
- environment
- """
- env = dict(
- include=self._build_paths('include',
- [self.VCIncludes,
- self.OSIncludes,
- self.UCRTIncludes,
- self.NetFxSDKIncludes],
- exists),
- lib=self._build_paths('lib',
- [self.VCLibraries,
- self.OSLibraries,
- self.FxTools,
- self.UCRTLibraries,
- self.NetFxSDKLibraries],
- exists),
- libpath=self._build_paths('libpath',
- [self.VCLibraries,
- self.FxTools,
- self.VCStoreRefs,
- self.OSLibpath],
- exists),
- path=self._build_paths('path',
- [self.VCTools,
- self.VSTools,
- self.VsTDb,
- self.SdkTools,
- self.SdkSetup,
- self.FxTools,
- self.MSBuild,
- self.HTMLHelpWorkshop,
- self.FSharp],
- exists),
- )
- if self.vs_ver >= 14 and isfile(self.VCRuntimeRedist):
- env['py_vcruntime_redist'] = self.VCRuntimeRedist
- return env
-
- def _build_paths(self, name, spec_path_lists, exists):
- """
- Given an environment variable name and specified paths,
- return a pathsep-separated string of paths containing
- unique, extant, directories from those paths and from
- the environment variable. Raise an error if no paths
- are resolved.
-
- Parameters
- ----------
- name: str
- Environment variable name
- spec_path_lists: list of str
- Paths
- exists: bool
- It True, only return existing paths.
-
- Return
- ------
- str
- Pathsep-separated paths
- """
- # flatten spec_path_lists
- spec_paths = itertools.chain.from_iterable(spec_path_lists)
- env_paths = environ.get(name, '').split(pathsep)
- paths = itertools.chain(spec_paths, env_paths)
- extant_paths = list(filter(isdir, paths)) if exists else paths
- if not extant_paths:
- msg = "%s environment variable is empty" % name.upper()
- raise distutils.errors.DistutilsPlatformError(msg)
- unique_paths = unique_everseen(extant_paths)
- return pathsep.join(unique_paths)
diff --git a/contrib/python/setuptools/py3/setuptools/namespaces.py b/contrib/python/setuptools/py3/setuptools/namespaces.py
deleted file mode 100644
index 44939e1c6d4..00000000000
--- a/contrib/python/setuptools/py3/setuptools/namespaces.py
+++ /dev/null
@@ -1,107 +0,0 @@
-import os
-from distutils import log
-import itertools
-
-
-flatten = itertools.chain.from_iterable
-
-
-class Installer:
-
- nspkg_ext = '-nspkg.pth'
-
- def install_namespaces(self):
- nsp = self._get_all_ns_packages()
- if not nsp:
- return
- filename, ext = os.path.splitext(self._get_target())
- filename += self.nspkg_ext
- self.outputs.append(filename)
- log.info("Installing %s", filename)
- lines = map(self._gen_nspkg_line, nsp)
-
- if self.dry_run:
- # always generate the lines, even in dry run
- list(lines)
- return
-
- with open(filename, 'wt') as f:
- f.writelines(lines)
-
- def uninstall_namespaces(self):
- filename, ext = os.path.splitext(self._get_target())
- filename += self.nspkg_ext
- if not os.path.exists(filename):
- return
- log.info("Removing %s", filename)
- os.remove(filename)
-
- def _get_target(self):
- return self.target
-
- _nspkg_tmpl = (
- "import sys, types, os",
- "has_mfs = sys.version_info > (3, 5)",
- "p = os.path.join(%(root)s, *%(pth)r)",
- "importlib = has_mfs and __import__('importlib.util')",
- "has_mfs and __import__('importlib.machinery')",
- (
- "m = has_mfs and "
- "sys.modules.setdefault(%(pkg)r, "
- "importlib.util.module_from_spec("
- "importlib.machinery.PathFinder.find_spec(%(pkg)r, "
- "[os.path.dirname(p)])))"
- ),
- (
- "m = m or "
- "sys.modules.setdefault(%(pkg)r, types.ModuleType(%(pkg)r))"
- ),
- "mp = (m or []) and m.__dict__.setdefault('__path__',[])",
- "(p not in mp) and mp.append(p)",
- )
- "lines for the namespace installer"
-
- _nspkg_tmpl_multi = (
- 'm and setattr(sys.modules[%(parent)r], %(child)r, m)',
- )
- "additional line(s) when a parent package is indicated"
-
- def _get_root(self):
- return "sys._getframe(1).f_locals['sitedir']"
-
- def _gen_nspkg_line(self, pkg):
- pth = tuple(pkg.split('.'))
- root = self._get_root()
- tmpl_lines = self._nspkg_tmpl
- parent, sep, child = pkg.rpartition('.')
- if parent:
- tmpl_lines += self._nspkg_tmpl_multi
- return ';'.join(tmpl_lines) % locals() + '\n'
-
- def _get_all_ns_packages(self):
- """Return sorted list of all package namespaces"""
- pkgs = self.distribution.namespace_packages or []
- return sorted(flatten(map(self._pkg_names, pkgs)))
-
- @staticmethod
- def _pkg_names(pkg):
- """
- Given a namespace package, yield the components of that
- package.
-
- >>> names = Installer._pkg_names('a.b.c')
- >>> set(names) == set(['a', 'a.b', 'a.b.c'])
- True
- """
- parts = pkg.split('.')
- while parts:
- yield '.'.join(parts)
- parts.pop()
-
-
-class DevelopInstaller(Installer):
- def _get_root(self):
- return repr(str(self.egg_path))
-
- def _get_target(self):
- return self.egg_link
diff --git a/contrib/python/setuptools/py3/setuptools/package_index.py b/contrib/python/setuptools/py3/setuptools/package_index.py
deleted file mode 100644
index 270e7f3c91b..00000000000
--- a/contrib/python/setuptools/py3/setuptools/package_index.py
+++ /dev/null
@@ -1,1127 +0,0 @@
-"""PyPI and direct package downloading"""
-import sys
-import os
-import re
-import io
-import shutil
-import socket
-import base64
-import hashlib
-import itertools
-import warnings
-import configparser
-import html
-import http.client
-import urllib.parse
-import urllib.request
-import urllib.error
-from functools import wraps
-
-import setuptools
-from pkg_resources import (
- CHECKOUT_DIST, Distribution, BINARY_DIST, normalize_path, SOURCE_DIST,
- Environment, find_distributions, safe_name, safe_version,
- to_filename, Requirement, DEVELOP_DIST, EGG_DIST, parse_version,
-)
-from distutils import log
-from distutils.errors import DistutilsError
-from fnmatch import translate
-from setuptools.wheel import Wheel
-from setuptools.extern.more_itertools import unique_everseen
-
-
-EGG_FRAGMENT = re.compile(r'^egg=([-A-Za-z0-9_.+!]+)$')
-HREF = re.compile(r"""href\s*=\s*['"]?([^'"> ]+)""", re.I)
-PYPI_MD5 = re.compile(
- r'<a href="([^"#]+)">([^<]+)</a>\n\s+\(<a (?:title="MD5 hash"\n\s+)'
- r'href="[^?]+\?:action=show_md5&amp;digest=([0-9a-f]{32})">md5</a>\)'
-)
-URL_SCHEME = re.compile('([-+.a-z0-9]{2,}):', re.I).match
-EXTENSIONS = ".tar.gz .tar.bz2 .tar .zip .tgz".split()
-
-__all__ = [
- 'PackageIndex', 'distros_for_url', 'parse_bdist_wininst',
- 'interpret_distro_name',
-]
-
-_SOCKET_TIMEOUT = 15
-
-_tmpl = "setuptools/{setuptools.__version__} Python-urllib/{py_major}"
-user_agent = _tmpl.format(
- py_major='{}.{}'.format(*sys.version_info), setuptools=setuptools)
-
-
-def parse_requirement_arg(spec):
- try:
- return Requirement.parse(spec)
- except ValueError as e:
- raise DistutilsError(
- "Not a URL, existing file, or requirement spec: %r" % (spec,)
- ) from e
-
-
-def parse_bdist_wininst(name):
- """Return (base,pyversion) or (None,None) for possible .exe name"""
-
- lower = name.lower()
- base, py_ver, plat = None, None, None
-
- if lower.endswith('.exe'):
- if lower.endswith('.win32.exe'):
- base = name[:-10]
- plat = 'win32'
- elif lower.startswith('.win32-py', -16):
- py_ver = name[-7:-4]
- base = name[:-16]
- plat = 'win32'
- elif lower.endswith('.win-amd64.exe'):
- base = name[:-14]
- plat = 'win-amd64'
- elif lower.startswith('.win-amd64-py', -20):
- py_ver = name[-7:-4]
- base = name[:-20]
- plat = 'win-amd64'
- return base, py_ver, plat
-
-
-def egg_info_for_url(url):
- parts = urllib.parse.urlparse(url)
- scheme, server, path, parameters, query, fragment = parts
- base = urllib.parse.unquote(path.split('/')[-1])
- if server == 'sourceforge.net' and base == 'download': # XXX Yuck
- base = urllib.parse.unquote(path.split('/')[-2])
- if '#' in base:
- base, fragment = base.split('#', 1)
- return base, fragment
-
-
-def distros_for_url(url, metadata=None):
- """Yield egg or source distribution objects that might be found at a URL"""
- base, fragment = egg_info_for_url(url)
- for dist in distros_for_location(url, base, metadata):
- yield dist
- if fragment:
- match = EGG_FRAGMENT.match(fragment)
- if match:
- for dist in interpret_distro_name(
- url, match.group(1), metadata, precedence=CHECKOUT_DIST
- ):
- yield dist
-
-
-def distros_for_location(location, basename, metadata=None):
- """Yield egg or source distribution objects based on basename"""
- if basename.endswith('.egg.zip'):
- basename = basename[:-4] # strip the .zip
- if basename.endswith('.egg') and '-' in basename:
- # only one, unambiguous interpretation
- return [Distribution.from_location(location, basename, metadata)]
- if basename.endswith('.whl') and '-' in basename:
- wheel = Wheel(basename)
- if not wheel.is_compatible():
- return []
- return [Distribution(
- location=location,
- project_name=wheel.project_name,
- version=wheel.version,
- # Increase priority over eggs.
- precedence=EGG_DIST + 1,
- )]
- if basename.endswith('.exe'):
- win_base, py_ver, platform = parse_bdist_wininst(basename)
- if win_base is not None:
- return interpret_distro_name(
- location, win_base, metadata, py_ver, BINARY_DIST, platform
- )
- # Try source distro extensions (.zip, .tgz, etc.)
- #
- for ext in EXTENSIONS:
- if basename.endswith(ext):
- basename = basename[:-len(ext)]
- return interpret_distro_name(location, basename, metadata)
- return [] # no extension matched
-
-
-def distros_for_filename(filename, metadata=None):
- """Yield possible egg or source distribution objects based on a filename"""
- return distros_for_location(
- normalize_path(filename), os.path.basename(filename), metadata
- )
-
-
-def interpret_distro_name(
- location, basename, metadata, py_version=None, precedence=SOURCE_DIST,
- platform=None
-):
- """Generate alternative interpretations of a source distro name
-
- Note: if `location` is a filesystem filename, you should call
- ``pkg_resources.normalize_path()`` on it before passing it to this
- routine!
- """
- # Generate alternative interpretations of a source distro name
- # Because some packages are ambiguous as to name/versions split
- # e.g. "adns-python-1.1.0", "egenix-mx-commercial", etc.
- # So, we generate each possible interpretation (e.g. "adns, python-1.1.0"
- # "adns-python, 1.1.0", and "adns-python-1.1.0, no version"). In practice,
- # the spurious interpretations should be ignored, because in the event
- # there's also an "adns" package, the spurious "python-1.1.0" version will
- # compare lower than any numeric version number, and is therefore unlikely
- # to match a request for it. It's still a potential problem, though, and
- # in the long run PyPI and the distutils should go for "safe" names and
- # versions in distribution archive names (sdist and bdist).
-
- parts = basename.split('-')
- if not py_version and any(re.match(r'py\d\.\d$', p) for p in parts[2:]):
- # it is a bdist_dumb, not an sdist -- bail out
- return
-
- for p in range(1, len(parts) + 1):
- yield Distribution(
- location, metadata, '-'.join(parts[:p]), '-'.join(parts[p:]),
- py_version=py_version, precedence=precedence,
- platform=platform
- )
-
-
-def unique_values(func):
- """
- Wrap a function returning an iterable such that the resulting iterable
- only ever yields unique items.
- """
-
- @wraps(func)
- def wrapper(*args, **kwargs):
- return unique_everseen(func(*args, **kwargs))
-
- return wrapper
-
-
-REL = re.compile(r"""<([^>]*\srel\s*=\s*['"]?([^'">]+)[^>]*)>""", re.I)
-# this line is here to fix emacs' cruddy broken syntax highlighting
-
-
-@unique_values
-def find_external_links(url, page):
- """Find rel="homepage" and rel="download" links in `page`, yielding URLs"""
-
- for match in REL.finditer(page):
- tag, rel = match.groups()
- rels = set(map(str.strip, rel.lower().split(',')))
- if 'homepage' in rels or 'download' in rels:
- for match in HREF.finditer(tag):
- yield urllib.parse.urljoin(url, htmldecode(match.group(1)))
-
- for tag in ("<th>Home Page", "<th>Download URL"):
- pos = page.find(tag)
- if pos != -1:
- match = HREF.search(page, pos)
- if match:
- yield urllib.parse.urljoin(url, htmldecode(match.group(1)))
-
-
-class ContentChecker:
- """
- A null content checker that defines the interface for checking content
- """
-
- def feed(self, block):
- """
- Feed a block of data to the hash.
- """
- return
-
- def is_valid(self):
- """
- Check the hash. Return False if validation fails.
- """
- return True
-
- def report(self, reporter, template):
- """
- Call reporter with information about the checker (hash name)
- substituted into the template.
- """
- return
-
-
-class HashChecker(ContentChecker):
- pattern = re.compile(
- r'(?P<hash_name>sha1|sha224|sha384|sha256|sha512|md5)='
- r'(?P<expected>[a-f0-9]+)'
- )
-
- def __init__(self, hash_name, expected):
- self.hash_name = hash_name
- self.hash = hashlib.new(hash_name)
- self.expected = expected
-
- @classmethod
- def from_url(cls, url):
- "Construct a (possibly null) ContentChecker from a URL"
- fragment = urllib.parse.urlparse(url)[-1]
- if not fragment:
- return ContentChecker()
- match = cls.pattern.search(fragment)
- if not match:
- return ContentChecker()
- return cls(**match.groupdict())
-
- def feed(self, block):
- self.hash.update(block)
-
- def is_valid(self):
- return self.hash.hexdigest() == self.expected
-
- def report(self, reporter, template):
- msg = template % self.hash_name
- return reporter(msg)
-
-
-class PackageIndex(Environment):
- """A distribution index that scans web pages for download URLs"""
-
- def __init__(
- self, index_url="https://pypi.org/simple/", hosts=('*',),
- ca_bundle=None, verify_ssl=True, *args, **kw
- ):
- Environment.__init__(self, *args, **kw)
- self.index_url = index_url + "/" [:not index_url.endswith('/')]
- self.scanned_urls = {}
- self.fetched_urls = {}
- self.package_pages = {}
- self.allows = re.compile('|'.join(map(translate, hosts))).match
- self.to_scan = []
- self.opener = urllib.request.urlopen
-
- def add(self, dist):
- # ignore invalid versions
- try:
- parse_version(dist.version)
- except Exception:
- return
- return super().add(dist)
-
- # FIXME: 'PackageIndex.process_url' is too complex (14)
- def process_url(self, url, retrieve=False): # noqa: C901
- """Evaluate a URL as a possible download, and maybe retrieve it"""
- if url in self.scanned_urls and not retrieve:
- return
- self.scanned_urls[url] = True
- if not URL_SCHEME(url):
- self.process_filename(url)
- return
- else:
- dists = list(distros_for_url(url))
- if dists:
- if not self.url_ok(url):
- return
- self.debug("Found link: %s", url)
-
- if dists or not retrieve or url in self.fetched_urls:
- list(map(self.add, dists))
- return # don't need the actual page
-
- if not self.url_ok(url):
- self.fetched_urls[url] = True
- return
-
- self.info("Reading %s", url)
- self.fetched_urls[url] = True # prevent multiple fetch attempts
- tmpl = "Download error on %s: %%s -- Some packages may not be found!"
- f = self.open_url(url, tmpl % url)
- if f is None:
- return
- if isinstance(f, urllib.error.HTTPError) and f.code == 401:
- self.info("Authentication error: %s" % f.msg)
- self.fetched_urls[f.url] = True
- if 'html' not in f.headers.get('content-type', '').lower():
- f.close() # not html, we can't process it
- return
-
- base = f.url # handle redirects
- page = f.read()
- if not isinstance(page, str):
- # In Python 3 and got bytes but want str.
- if isinstance(f, urllib.error.HTTPError):
- # Errors have no charset, assume latin1:
- charset = 'latin-1'
- else:
- charset = f.headers.get_param('charset') or 'latin-1'
- page = page.decode(charset, "ignore")
- f.close()
- for match in HREF.finditer(page):
- link = urllib.parse.urljoin(base, htmldecode(match.group(1)))
- self.process_url(link)
- if url.startswith(self.index_url) and getattr(f, 'code', None) != 404:
- page = self.process_index(url, page)
-
- def process_filename(self, fn, nested=False):
- # process filenames or directories
- if not os.path.exists(fn):
- self.warn("Not found: %s", fn)
- return
-
- if os.path.isdir(fn) and not nested:
- path = os.path.realpath(fn)
- for item in os.listdir(path):
- self.process_filename(os.path.join(path, item), True)
-
- dists = distros_for_filename(fn)
- if dists:
- self.debug("Found: %s", fn)
- list(map(self.add, dists))
-
- def url_ok(self, url, fatal=False):
- s = URL_SCHEME(url)
- is_file = s and s.group(1).lower() == 'file'
- if is_file or self.allows(urllib.parse.urlparse(url)[1]):
- return True
- msg = (
- "\nNote: Bypassing %s (disallowed host; see "
- "http://bit.ly/2hrImnY for details).\n")
- if fatal:
- raise DistutilsError(msg % url)
- else:
- self.warn(msg, url)
-
- def scan_egg_links(self, search_path):
- dirs = filter(os.path.isdir, search_path)
- egg_links = (
- (path, entry)
- for path in dirs
- for entry in os.listdir(path)
- if entry.endswith('.egg-link')
- )
- list(itertools.starmap(self.scan_egg_link, egg_links))
-
- def scan_egg_link(self, path, entry):
- with open(os.path.join(path, entry)) as raw_lines:
- # filter non-empty lines
- lines = list(filter(None, map(str.strip, raw_lines)))
-
- if len(lines) != 2:
- # format is not recognized; punt
- return
-
- egg_path, setup_path = lines
-
- for dist in find_distributions(os.path.join(path, egg_path)):
- dist.location = os.path.join(path, *lines)
- dist.precedence = SOURCE_DIST
- self.add(dist)
-
- def _scan(self, link):
- # Process a URL to see if it's for a package page
- NO_MATCH_SENTINEL = None, None
- if not link.startswith(self.index_url):
- return NO_MATCH_SENTINEL
-
- parts = list(map(
- urllib.parse.unquote, link[len(self.index_url):].split('/')
- ))
- if len(parts) != 2 or '#' in parts[1]:
- return NO_MATCH_SENTINEL
-
- # it's a package page, sanitize and index it
- pkg = safe_name(parts[0])
- ver = safe_version(parts[1])
- self.package_pages.setdefault(pkg.lower(), {})[link] = True
- return to_filename(pkg), to_filename(ver)
-
- def process_index(self, url, page):
- """Process the contents of a PyPI page"""
-
- # process an index page into the package-page index
- for match in HREF.finditer(page):
- try:
- self._scan(urllib.parse.urljoin(url, htmldecode(match.group(1))))
- except ValueError:
- pass
-
- pkg, ver = self._scan(url) # ensure this page is in the page index
- if not pkg:
- return "" # no sense double-scanning non-package pages
-
- # process individual package page
- for new_url in find_external_links(url, page):
- # Process the found URL
- base, frag = egg_info_for_url(new_url)
- if base.endswith('.py') and not frag:
- if ver:
- new_url += '#egg=%s-%s' % (pkg, ver)
- else:
- self.need_version_info(url)
- self.scan_url(new_url)
-
- return PYPI_MD5.sub(
- lambda m: '<a href="%s#md5=%s">%s</a>' % m.group(1, 3, 2), page
- )
-
- def need_version_info(self, url):
- self.scan_all(
- "Page at %s links to .py file(s) without version info; an index "
- "scan is required.", url
- )
-
- def scan_all(self, msg=None, *args):
- if self.index_url not in self.fetched_urls:
- if msg:
- self.warn(msg, *args)
- self.info(
- "Scanning index of all packages (this may take a while)"
- )
- self.scan_url(self.index_url)
-
- def find_packages(self, requirement):
- self.scan_url(self.index_url + requirement.unsafe_name + '/')
-
- if not self.package_pages.get(requirement.key):
- # Fall back to safe version of the name
- self.scan_url(self.index_url + requirement.project_name + '/')
-
- if not self.package_pages.get(requirement.key):
- # We couldn't find the target package, so search the index page too
- self.not_found_in_index(requirement)
-
- for url in list(self.package_pages.get(requirement.key, ())):
- # scan each page that might be related to the desired package
- self.scan_url(url)
-
- def obtain(self, requirement, installer=None):
- self.prescan()
- self.find_packages(requirement)
- for dist in self[requirement.key]:
- if dist in requirement:
- return dist
- self.debug("%s does not match %s", requirement, dist)
- return super(PackageIndex, self).obtain(requirement, installer)
-
- def check_hash(self, checker, filename, tfp):
- """
- checker is a ContentChecker
- """
- checker.report(
- self.debug,
- "Validating %%s checksum for %s" % filename)
- if not checker.is_valid():
- tfp.close()
- os.unlink(filename)
- raise DistutilsError(
- "%s validation failed for %s; "
- "possible download problem?"
- % (checker.hash.name, os.path.basename(filename))
- )
-
- def add_find_links(self, urls):
- """Add `urls` to the list that will be prescanned for searches"""
- for url in urls:
- if (
- self.to_scan is None # if we have already "gone online"
- or not URL_SCHEME(url) # or it's a local file/directory
- or url.startswith('file:')
- or list(distros_for_url(url)) # or a direct package link
- ):
- # then go ahead and process it now
- self.scan_url(url)
- else:
- # otherwise, defer retrieval till later
- self.to_scan.append(url)
-
- def prescan(self):
- """Scan urls scheduled for prescanning (e.g. --find-links)"""
- if self.to_scan:
- list(map(self.scan_url, self.to_scan))
- self.to_scan = None # from now on, go ahead and process immediately
-
- def not_found_in_index(self, requirement):
- if self[requirement.key]: # we've seen at least one distro
- meth, msg = self.info, "Couldn't retrieve index page for %r"
- else: # no distros seen for this name, might be misspelled
- meth, msg = (
- self.warn,
- "Couldn't find index page for %r (maybe misspelled?)")
- meth(msg, requirement.unsafe_name)
- self.scan_all()
-
- def download(self, spec, tmpdir):
- """Locate and/or download `spec` to `tmpdir`, returning a local path
-
- `spec` may be a ``Requirement`` object, or a string containing a URL,
- an existing local filename, or a project/version requirement spec
- (i.e. the string form of a ``Requirement`` object). If it is the URL
- of a .py file with an unambiguous ``#egg=name-version`` tag (i.e., one
- that escapes ``-`` as ``_`` throughout), a trivial ``setup.py`` is
- automatically created alongside the downloaded file.
-
- If `spec` is a ``Requirement`` object or a string containing a
- project/version requirement spec, this method returns the location of
- a matching distribution (possibly after downloading it to `tmpdir`).
- If `spec` is a locally existing file or directory name, it is simply
- returned unchanged. If `spec` is a URL, it is downloaded to a subpath
- of `tmpdir`, and the local filename is returned. Various errors may be
- raised if a problem occurs during downloading.
- """
- if not isinstance(spec, Requirement):
- scheme = URL_SCHEME(spec)
- if scheme:
- # It's a url, download it to tmpdir
- found = self._download_url(scheme.group(1), spec, tmpdir)
- base, fragment = egg_info_for_url(spec)
- if base.endswith('.py'):
- found = self.gen_setup(found, fragment, tmpdir)
- return found
- elif os.path.exists(spec):
- # Existing file or directory, just return it
- return spec
- else:
- spec = parse_requirement_arg(spec)
- return getattr(self.fetch_distribution(spec, tmpdir), 'location', None)
-
- def fetch_distribution( # noqa: C901 # is too complex (14) # FIXME
- self, requirement, tmpdir, force_scan=False, source=False,
- develop_ok=False, local_index=None):
- """Obtain a distribution suitable for fulfilling `requirement`
-
- `requirement` must be a ``pkg_resources.Requirement`` instance.
- If necessary, or if the `force_scan` flag is set, the requirement is
- searched for in the (online) package index as well as the locally
- installed packages. If a distribution matching `requirement` is found,
- the returned distribution's ``location`` is the value you would have
- gotten from calling the ``download()`` method with the matching
- distribution's URL or filename. If no matching distribution is found,
- ``None`` is returned.
-
- If the `source` flag is set, only source distributions and source
- checkout links will be considered. Unless the `develop_ok` flag is
- set, development and system eggs (i.e., those using the ``.egg-info``
- format) will be ignored.
- """
- # process a Requirement
- self.info("Searching for %s", requirement)
- skipped = {}
- dist = None
-
- def find(req, env=None):
- if env is None:
- env = self
- # Find a matching distribution; may be called more than once
-
- for dist in env[req.key]:
-
- if dist.precedence == DEVELOP_DIST and not develop_ok:
- if dist not in skipped:
- self.warn(
- "Skipping development or system egg: %s", dist,
- )
- skipped[dist] = 1
- continue
-
- test = (
- dist in req
- and (dist.precedence <= SOURCE_DIST or not source)
- )
- if test:
- loc = self.download(dist.location, tmpdir)
- dist.download_location = loc
- if os.path.exists(dist.download_location):
- return dist
-
- if force_scan:
- self.prescan()
- self.find_packages(requirement)
- dist = find(requirement)
-
- if not dist and local_index is not None:
- dist = find(requirement, local_index)
-
- if dist is None:
- if self.to_scan is not None:
- self.prescan()
- dist = find(requirement)
-
- if dist is None and not force_scan:
- self.find_packages(requirement)
- dist = find(requirement)
-
- if dist is None:
- self.warn(
- "No local packages or working download links found for %s%s",
- (source and "a source distribution of " or ""),
- requirement,
- )
- else:
- self.info("Best match: %s", dist)
- return dist.clone(location=dist.download_location)
-
- def fetch(self, requirement, tmpdir, force_scan=False, source=False):
- """Obtain a file suitable for fulfilling `requirement`
-
- DEPRECATED; use the ``fetch_distribution()`` method now instead. For
- backward compatibility, this routine is identical but returns the
- ``location`` of the downloaded distribution instead of a distribution
- object.
- """
- dist = self.fetch_distribution(requirement, tmpdir, force_scan, source)
- if dist is not None:
- return dist.location
- return None
-
- def gen_setup(self, filename, fragment, tmpdir):
- match = EGG_FRAGMENT.match(fragment)
- dists = match and [
- d for d in
- interpret_distro_name(filename, match.group(1), None) if d.version
- ] or []
-
- if len(dists) == 1: # unambiguous ``#egg`` fragment
- basename = os.path.basename(filename)
-
- # Make sure the file has been downloaded to the temp dir.
- if os.path.dirname(filename) != tmpdir:
- dst = os.path.join(tmpdir, basename)
- from setuptools.command.easy_install import samefile
- if not samefile(filename, dst):
- shutil.copy2(filename, dst)
- filename = dst
-
- with open(os.path.join(tmpdir, 'setup.py'), 'w') as file:
- file.write(
- "from setuptools import setup\n"
- "setup(name=%r, version=%r, py_modules=[%r])\n"
- % (
- dists[0].project_name, dists[0].version,
- os.path.splitext(basename)[0]
- )
- )
- return filename
-
- elif match:
- raise DistutilsError(
- "Can't unambiguously interpret project/version identifier %r; "
- "any dashes in the name or version should be escaped using "
- "underscores. %r" % (fragment, dists)
- )
- else:
- raise DistutilsError(
- "Can't process plain .py files without an '#egg=name-version'"
- " suffix to enable automatic setup script generation."
- )
-
- dl_blocksize = 8192
-
- def _download_to(self, url, filename):
- self.info("Downloading %s", url)
- # Download the file
- fp = None
- try:
- checker = HashChecker.from_url(url)
- fp = self.open_url(url)
- if isinstance(fp, urllib.error.HTTPError):
- raise DistutilsError(
- "Can't download %s: %s %s" % (url, fp.code, fp.msg)
- )
- headers = fp.info()
- blocknum = 0
- bs = self.dl_blocksize
- size = -1
- if "content-length" in headers:
- # Some servers return multiple Content-Length headers :(
- sizes = headers.get_all('Content-Length')
- size = max(map(int, sizes))
- self.reporthook(url, filename, blocknum, bs, size)
- with open(filename, 'wb') as tfp:
- while True:
- block = fp.read(bs)
- if block:
- checker.feed(block)
- tfp.write(block)
- blocknum += 1
- self.reporthook(url, filename, blocknum, bs, size)
- else:
- break
- self.check_hash(checker, filename, tfp)
- return headers
- finally:
- if fp:
- fp.close()
-
- def reporthook(self, url, filename, blocknum, blksize, size):
- pass # no-op
-
- # FIXME:
- def open_url(self, url, warning=None): # noqa: C901 # is too complex (12)
- if url.startswith('file:'):
- return local_open(url)
- try:
- return open_with_auth(url, self.opener)
- except (ValueError, http.client.InvalidURL) as v:
- msg = ' '.join([str(arg) for arg in v.args])
- if warning:
- self.warn(warning, msg)
- else:
- raise DistutilsError('%s %s' % (url, msg)) from v
- except urllib.error.HTTPError as v:
- return v
- except urllib.error.URLError as v:
- if warning:
- self.warn(warning, v.reason)
- else:
- raise DistutilsError("Download error for %s: %s"
- % (url, v.reason)) from v
- except http.client.BadStatusLine as v:
- if warning:
- self.warn(warning, v.line)
- else:
- raise DistutilsError(
- '%s returned a bad status line. The server might be '
- 'down, %s' %
- (url, v.line)
- ) from v
- except (http.client.HTTPException, socket.error) as v:
- if warning:
- self.warn(warning, v)
- else:
- raise DistutilsError("Download error for %s: %s"
- % (url, v)) from v
-
- def _download_url(self, scheme, url, tmpdir):
- # Determine download filename
- #
- name, fragment = egg_info_for_url(url)
- if name:
- while '..' in name:
- name = name.replace('..', '.').replace('\\', '_')
- else:
- name = "__downloaded__" # default if URL has no path contents
-
- if name.endswith('.egg.zip'):
- name = name[:-4] # strip the extra .zip before download
-
- filename = os.path.join(tmpdir, name)
-
- # Download the file
- #
- if scheme == 'svn' or scheme.startswith('svn+'):
- return self._download_svn(url, filename)
- elif scheme == 'git' or scheme.startswith('git+'):
- return self._download_git(url, filename)
- elif scheme.startswith('hg+'):
- return self._download_hg(url, filename)
- elif scheme == 'file':
- return urllib.request.url2pathname(urllib.parse.urlparse(url)[2])
- else:
- self.url_ok(url, True) # raises error if not allowed
- return self._attempt_download(url, filename)
-
- def scan_url(self, url):
- self.process_url(url, True)
-
- def _attempt_download(self, url, filename):
- headers = self._download_to(url, filename)
- if 'html' in headers.get('content-type', '').lower():
- return self._download_html(url, headers, filename)
- else:
- return filename
-
- def _download_html(self, url, headers, filename):
- file = open(filename)
- for line in file:
- if line.strip():
- # Check for a subversion index page
- if re.search(r'<title>([^- ]+ - )?Revision \d+:', line):
- # it's a subversion index page:
- file.close()
- os.unlink(filename)
- return self._download_svn(url, filename)
- break # not an index page
- file.close()
- os.unlink(filename)
- raise DistutilsError("Unexpected HTML page found at " + url)
-
- def _download_svn(self, url, filename):
- warnings.warn("SVN download support is deprecated", UserWarning)
- url = url.split('#', 1)[0] # remove any fragment for svn's sake
- creds = ''
- if url.lower().startswith('svn:') and '@' in url:
- scheme, netloc, path, p, q, f = urllib.parse.urlparse(url)
- if not netloc and path.startswith('//') and '/' in path[2:]:
- netloc, path = path[2:].split('/', 1)
- auth, host = _splituser(netloc)
- if auth:
- if ':' in auth:
- user, pw = auth.split(':', 1)
- creds = " --username=%s --password=%s" % (user, pw)
- else:
- creds = " --username=" + auth
- netloc = host
- parts = scheme, netloc, url, p, q, f
- url = urllib.parse.urlunparse(parts)
- self.info("Doing subversion checkout from %s to %s", url, filename)
- os.system("svn checkout%s -q %s %s" % (creds, url, filename))
- return filename
-
- @staticmethod
- def _vcs_split_rev_from_url(url, pop_prefix=False):
- scheme, netloc, path, query, frag = urllib.parse.urlsplit(url)
-
- scheme = scheme.split('+', 1)[-1]
-
- # Some fragment identification fails
- path = path.split('#', 1)[0]
-
- rev = None
- if '@' in path:
- path, rev = path.rsplit('@', 1)
-
- # Also, discard fragment
- url = urllib.parse.urlunsplit((scheme, netloc, path, query, ''))
-
- return url, rev
-
- def _download_git(self, url, filename):
- filename = filename.split('#', 1)[0]
- url, rev = self._vcs_split_rev_from_url(url, pop_prefix=True)
-
- self.info("Doing git clone from %s to %s", url, filename)
- os.system("git clone --quiet %s %s" % (url, filename))
-
- if rev is not None:
- self.info("Checking out %s", rev)
- os.system("git -C %s checkout --quiet %s" % (
- filename,
- rev,
- ))
-
- return filename
-
- def _download_hg(self, url, filename):
- filename = filename.split('#', 1)[0]
- url, rev = self._vcs_split_rev_from_url(url, pop_prefix=True)
-
- self.info("Doing hg clone from %s to %s", url, filename)
- os.system("hg clone --quiet %s %s" % (url, filename))
-
- if rev is not None:
- self.info("Updating to %s", rev)
- os.system("hg --cwd %s up -C -r %s -q" % (
- filename,
- rev,
- ))
-
- return filename
-
- def debug(self, msg, *args):
- log.debug(msg, *args)
-
- def info(self, msg, *args):
- log.info(msg, *args)
-
- def warn(self, msg, *args):
- log.warn(msg, *args)
-
-
-# This pattern matches a character entity reference (a decimal numeric
-# references, a hexadecimal numeric reference, or a named reference).
-entity_sub = re.compile(r'&(#(\d+|x[\da-fA-F]+)|[\w.:-]+);?').sub
-
-
-def decode_entity(match):
- what = match.group(0)
- return html.unescape(what)
-
-
-def htmldecode(text):
- """
- Decode HTML entities in the given text.
-
- >>> htmldecode(
- ... 'https://../package_name-0.1.2.tar.gz'
- ... '?tokena=A&amp;tokenb=B">package_name-0.1.2.tar.gz')
- 'https://../package_name-0.1.2.tar.gz?tokena=A&tokenb=B">package_name-0.1.2.tar.gz'
- """
- return entity_sub(decode_entity, text)
-
-
-def socket_timeout(timeout=15):
- def _socket_timeout(func):
- def _socket_timeout(*args, **kwargs):
- old_timeout = socket.getdefaulttimeout()
- socket.setdefaulttimeout(timeout)
- try:
- return func(*args, **kwargs)
- finally:
- socket.setdefaulttimeout(old_timeout)
-
- return _socket_timeout
-
- return _socket_timeout
-
-
-def _encode_auth(auth):
- """
- Encode auth from a URL suitable for an HTTP header.
- >>> str(_encode_auth('username%3Apassword'))
- 'dXNlcm5hbWU6cGFzc3dvcmQ='
-
- Long auth strings should not cause a newline to be inserted.
- >>> long_auth = 'username:' + 'password'*10
- >>> chr(10) in str(_encode_auth(long_auth))
- False
- """
- auth_s = urllib.parse.unquote(auth)
- # convert to bytes
- auth_bytes = auth_s.encode()
- encoded_bytes = base64.b64encode(auth_bytes)
- # convert back to a string
- encoded = encoded_bytes.decode()
- # strip the trailing carriage return
- return encoded.replace('\n', '')
-
-
-class Credential:
- """
- A username/password pair. Use like a namedtuple.
- """
-
- def __init__(self, username, password):
- self.username = username
- self.password = password
-
- def __iter__(self):
- yield self.username
- yield self.password
-
- def __str__(self):
- return '%(username)s:%(password)s' % vars(self)
-
-
-class PyPIConfig(configparser.RawConfigParser):
- def __init__(self):
- """
- Load from ~/.pypirc
- """
- defaults = dict.fromkeys(['username', 'password', 'repository'], '')
- configparser.RawConfigParser.__init__(self, defaults)
-
- rc = os.path.join(os.path.expanduser('~'), '.pypirc')
- if os.path.exists(rc):
- self.read(rc)
-
- @property
- def creds_by_repository(self):
- sections_with_repositories = [
- section for section in self.sections()
- if self.get(section, 'repository').strip()
- ]
-
- return dict(map(self._get_repo_cred, sections_with_repositories))
-
- def _get_repo_cred(self, section):
- repo = self.get(section, 'repository').strip()
- return repo, Credential(
- self.get(section, 'username').strip(),
- self.get(section, 'password').strip(),
- )
-
- def find_credential(self, url):
- """
- If the URL indicated appears to be a repository defined in this
- config, return the credential for that repository.
- """
- for repository, cred in self.creds_by_repository.items():
- if url.startswith(repository):
- return cred
-
-
-def open_with_auth(url, opener=urllib.request.urlopen):
- """Open a urllib2 request, handling HTTP authentication"""
-
- parsed = urllib.parse.urlparse(url)
- scheme, netloc, path, params, query, frag = parsed
-
- # Double scheme does not raise on macOS as revealed by a
- # failing test. We would expect "nonnumeric port". Refs #20.
- if netloc.endswith(':'):
- raise http.client.InvalidURL("nonnumeric port: ''")
-
- if scheme in ('http', 'https'):
- auth, address = _splituser(netloc)
- else:
- auth = None
-
- if not auth:
- cred = PyPIConfig().find_credential(url)
- if cred:
- auth = str(cred)
- info = cred.username, url
- log.info('Authenticating as %s for %s (from .pypirc)', *info)
-
- if auth:
- auth = "Basic " + _encode_auth(auth)
- parts = scheme, address, path, params, query, frag
- new_url = urllib.parse.urlunparse(parts)
- request = urllib.request.Request(new_url)
- request.add_header("Authorization", auth)
- else:
- request = urllib.request.Request(url)
-
- request.add_header('User-Agent', user_agent)
- fp = opener(request)
-
- if auth:
- # Put authentication info back into request URL if same host,
- # so that links found on the page will work
- s2, h2, path2, param2, query2, frag2 = urllib.parse.urlparse(fp.url)
- if s2 == scheme and h2 == address:
- parts = s2, netloc, path2, param2, query2, frag2
- fp.url = urllib.parse.urlunparse(parts)
-
- return fp
-
-
-# copy of urllib.parse._splituser from Python 3.8
-def _splituser(host):
- """splituser('user[:passwd]@host[:port]')
- --> 'user[:passwd]', 'host[:port]'."""
- user, delim, host = host.rpartition('@')
- return (user if delim else None), host
-
-
-# adding a timeout to avoid freezing package_index
-open_with_auth = socket_timeout(_SOCKET_TIMEOUT)(open_with_auth)
-
-
-def fix_sf_url(url):
- return url # backward compatibility
-
-
-def local_open(url):
- """Read a local path, with special support for directories"""
- scheme, server, path, param, query, frag = urllib.parse.urlparse(url)
- filename = urllib.request.url2pathname(path)
- if os.path.isfile(filename):
- return urllib.request.urlopen(url)
- elif path.endswith('/') and os.path.isdir(filename):
- files = []
- for f in os.listdir(filename):
- filepath = os.path.join(filename, f)
- if f == 'index.html':
- with open(filepath, 'r') as fp:
- body = fp.read()
- break
- elif os.path.isdir(filepath):
- f += '/'
- files.append('<a href="{name}">{name}</a>'.format(name=f))
- else:
- tmpl = (
- "<html><head><title>{url}</title>"
- "</head><body>{files}</body></html>")
- body = tmpl.format(url=url, files='\n'.join(files))
- status, message = 200, "OK"
- else:
- status, message, body = 404, "Path not found", "Not found"
-
- headers = {'content-type': 'text/html'}
- body_stream = io.StringIO(body)
- return urllib.error.HTTPError(url, status, message, headers, body_stream)
diff --git a/contrib/python/setuptools/py3/setuptools/py34compat.py b/contrib/python/setuptools/py3/setuptools/py34compat.py
deleted file mode 100644
index 3ad917222a4..00000000000
--- a/contrib/python/setuptools/py3/setuptools/py34compat.py
+++ /dev/null
@@ -1,13 +0,0 @@
-import importlib
-
-try:
- import importlib.util
-except ImportError:
- pass
-
-
-try:
- module_from_spec = importlib.util.module_from_spec
-except AttributeError:
- def module_from_spec(spec):
- return spec.loader.load_module(spec.name)
diff --git a/contrib/python/setuptools/py3/setuptools/sandbox.py b/contrib/python/setuptools/py3/setuptools/sandbox.py
deleted file mode 100644
index 034fc80d20e..00000000000
--- a/contrib/python/setuptools/py3/setuptools/sandbox.py
+++ /dev/null
@@ -1,530 +0,0 @@
-import os
-import sys
-import tempfile
-import operator
-import functools
-import itertools
-import re
-import contextlib
-import pickle
-import textwrap
-import builtins
-
-import pkg_resources
-from distutils.errors import DistutilsError
-from pkg_resources import working_set
-
-if sys.platform.startswith('java'):
- import org.python.modules.posix.PosixModule as _os
-else:
- _os = sys.modules[os.name]
-try:
- _file = file
-except NameError:
- _file = None
-_open = open
-
-
-__all__ = [
- "AbstractSandbox",
- "DirectorySandbox",
- "SandboxViolation",
- "run_setup",
-]
-
-
-def _execfile(filename, globals, locals=None):
- """
- Python 3 implementation of execfile.
- """
- mode = 'rb'
- with open(filename, mode) as stream:
- script = stream.read()
- if locals is None:
- locals = globals
- code = compile(script, filename, 'exec')
- exec(code, globals, locals)
-
-
-def save_argv(repl=None):
- saved = sys.argv[:]
- if repl is not None:
- sys.argv[:] = repl
- try:
- yield saved
- finally:
- sys.argv[:] = saved
-
-
-def save_path():
- saved = sys.path[:]
- try:
- yield saved
- finally:
- sys.path[:] = saved
-
-
-def override_temp(replacement):
- """
- Monkey-patch tempfile.tempdir with replacement, ensuring it exists
- """
- os.makedirs(replacement, exist_ok=True)
-
- saved = tempfile.tempdir
-
- tempfile.tempdir = replacement
-
- try:
- yield
- finally:
- tempfile.tempdir = saved
-
-
-def pushd(target):
- saved = os.getcwd()
- os.chdir(target)
- try:
- yield saved
- finally:
- os.chdir(saved)
-
-
-class UnpickleableException(Exception):
- """
- An exception representing another Exception that could not be pickled.
- """
-
- @staticmethod
- def dump(type, exc):
- """
- Always return a dumped (pickled) type and exc. If exc can't be pickled,
- wrap it in UnpickleableException first.
- """
- try:
- return pickle.dumps(type), pickle.dumps(exc)
- except Exception:
- # get UnpickleableException inside the sandbox
- from setuptools.sandbox import UnpickleableException as cls
-
- return cls.dump(cls, cls(repr(exc)))
-
-
-class ExceptionSaver:
- """
- A Context Manager that will save an exception, serialized, and restore it
- later.
- """
-
- def __enter__(self):
- return self
-
- def __exit__(self, type, exc, tb):
- if not exc:
- return
-
- # dump the exception
- self._saved = UnpickleableException.dump(type, exc)
- self._tb = tb
-
- # suppress the exception
- return True
-
- def resume(self):
- "restore and re-raise any exception"
-
- if '_saved' not in vars(self):
- return
-
- type, exc = map(pickle.loads, self._saved)
- raise exc.with_traceback(self._tb)
-
-
-def save_modules():
- """
- Context in which imported modules are saved.
-
- Translates exceptions internal to the context into the equivalent exception
- outside the context.
- """
- saved = sys.modules.copy()
- with ExceptionSaver() as saved_exc:
- yield saved
-
- sys.modules.update(saved)
- # remove any modules imported since
- del_modules = (
- mod_name
- for mod_name in sys.modules
- if mod_name not in saved
- # exclude any encodings modules. See #285
- and not mod_name.startswith('encodings.')
- )
- _clear_modules(del_modules)
-
- saved_exc.resume()
-
-
-def _clear_modules(module_names):
- for mod_name in list(module_names):
- del sys.modules[mod_name]
-
-
-def save_pkg_resources_state():
- saved = pkg_resources.__getstate__()
- try:
- yield saved
- finally:
- pkg_resources.__setstate__(saved)
-
-
-def setup_context(setup_dir):
- temp_dir = os.path.join(setup_dir, 'temp')
- with save_pkg_resources_state():
- with save_modules():
- with save_path():
- hide_setuptools()
- with save_argv():
- with override_temp(temp_dir):
- with pushd(setup_dir):
- # ensure setuptools commands are available
- __import__('setuptools')
- yield
-
-
-_MODULES_TO_HIDE = {
- 'setuptools',
- 'distutils',
- 'pkg_resources',
- 'Cython',
- '_distutils_hack',
-}
-
-
-def _needs_hiding(mod_name):
- """
- >>> _needs_hiding('setuptools')
- True
- >>> _needs_hiding('pkg_resources')
- True
- >>> _needs_hiding('setuptools_plugin')
- False
- >>> _needs_hiding('setuptools.__init__')
- True
- >>> _needs_hiding('distutils')
- True
- >>> _needs_hiding('os')
- False
- >>> _needs_hiding('Cython')
- True
- """
- base_module = mod_name.split('.', 1)[0]
- return base_module in _MODULES_TO_HIDE
-
-
-def hide_setuptools():
- """
- Remove references to setuptools' modules from sys.modules to allow the
- invocation to import the most appropriate setuptools. This technique is
- necessary to avoid issues such as #315 where setuptools upgrading itself
- would fail to find a function declared in the metadata.
- """
- _distutils_hack = sys.modules.get('_distutils_hack', None)
- if _distutils_hack is not None:
- _distutils_hack.remove_shim()
-
- modules = filter(_needs_hiding, sys.modules)
- _clear_modules(modules)
-
-
-def run_setup(setup_script, args):
- """Run a distutils setup script, sandboxed in its directory"""
- setup_dir = os.path.abspath(os.path.dirname(setup_script))
- with setup_context(setup_dir):
- try:
- sys.argv[:] = [setup_script] + list(args)
- sys.path.insert(0, setup_dir)
- # reset to include setup dir, w/clean callback list
- working_set.__init__()
- working_set.callbacks.append(lambda dist: dist.activate())
-
- with DirectorySandbox(setup_dir):
- ns = dict(__file__=setup_script, __name__='__main__')
- _execfile(setup_script, ns)
- except SystemExit as v:
- if v.args and v.args[0]:
- raise
- # Normal exit, just return
-
-
-class AbstractSandbox:
- """Wrap 'os' module and 'open()' builtin for virtualizing setup scripts"""
-
- _active = False
-
- def __init__(self):
- self._attrs = [
- name
- for name in dir(_os)
- if not name.startswith('_') and hasattr(self, name)
- ]
-
- def _copy(self, source):
- for name in self._attrs:
- setattr(os, name, getattr(source, name))
-
- def __enter__(self):
- self._copy(self)
- if _file:
- builtins.file = self._file
- builtins.open = self._open
- self._active = True
-
- def __exit__(self, exc_type, exc_value, traceback):
- self._active = False
- if _file:
- builtins.file = _file
- builtins.open = _open
- self._copy(_os)
-
- def run(self, func):
- """Run 'func' under os sandboxing"""
- with self:
- return func()
-
- def _mk_dual_path_wrapper(name):
- original = getattr(_os, name)
-
- def wrap(self, src, dst, *args, **kw):
- if self._active:
- src, dst = self._remap_pair(name, src, dst, *args, **kw)
- return original(src, dst, *args, **kw)
-
- return wrap
-
- for name in ["rename", "link", "symlink"]:
- if hasattr(_os, name):
- locals()[name] = _mk_dual_path_wrapper(name)
-
- def _mk_single_path_wrapper(name, original=None):
- original = original or getattr(_os, name)
-
- def wrap(self, path, *args, **kw):
- if self._active:
- path = self._remap_input(name, path, *args, **kw)
- return original(path, *args, **kw)
-
- return wrap
-
- if _file:
- _file = _mk_single_path_wrapper('file', _file)
- _open = _mk_single_path_wrapper('open', _open)
- for name in [
- "stat",
- "listdir",
- "chdir",
- "open",
- "chmod",
- "chown",
- "mkdir",
- "remove",
- "unlink",
- "rmdir",
- "utime",
- "lchown",
- "chroot",
- "lstat",
- "startfile",
- "mkfifo",
- "mknod",
- "pathconf",
- "access",
- ]:
- if hasattr(_os, name):
- locals()[name] = _mk_single_path_wrapper(name)
-
- def _mk_single_with_return(name):
- original = getattr(_os, name)
-
- def wrap(self, path, *args, **kw):
- if self._active:
- path = self._remap_input(name, path, *args, **kw)
- return self._remap_output(name, original(path, *args, **kw))
- return original(path, *args, **kw)
-
- return wrap
-
- for name in ['readlink', 'tempnam']:
- if hasattr(_os, name):
- locals()[name] = _mk_single_with_return(name)
-
- def _mk_query(name):
- original = getattr(_os, name)
-
- def wrap(self, *args, **kw):
- retval = original(*args, **kw)
- if self._active:
- return self._remap_output(name, retval)
- return retval
-
- return wrap
-
- for name in ['getcwd', 'tmpnam']:
- if hasattr(_os, name):
- locals()[name] = _mk_query(name)
-
- def _validate_path(self, path):
- """Called to remap or validate any path, whether input or output"""
- return path
-
- def _remap_input(self, operation, path, *args, **kw):
- """Called for path inputs"""
- return self._validate_path(path)
-
- def _remap_output(self, operation, path):
- """Called for path outputs"""
- return self._validate_path(path)
-
- def _remap_pair(self, operation, src, dst, *args, **kw):
- """Called for path pairs like rename, link, and symlink operations"""
- return (
- self._remap_input(operation + '-from', src, *args, **kw),
- self._remap_input(operation + '-to', dst, *args, **kw),
- )
-
-
-if hasattr(os, 'devnull'):
- _EXCEPTIONS = [os.devnull]
-else:
- _EXCEPTIONS = []
-
-
-class DirectorySandbox(AbstractSandbox):
- """Restrict operations to a single subdirectory - pseudo-chroot"""
-
- write_ops = dict.fromkeys(
- [
- "open",
- "chmod",
- "chown",
- "mkdir",
- "remove",
- "unlink",
- "rmdir",
- "utime",
- "lchown",
- "chroot",
- "mkfifo",
- "mknod",
- "tempnam",
- ]
- )
-
- _exception_patterns = []
- "exempt writing to paths that match the pattern"
-
- def __init__(self, sandbox, exceptions=_EXCEPTIONS):
- self._sandbox = os.path.normcase(os.path.realpath(sandbox))
- self._prefix = os.path.join(self._sandbox, '')
- self._exceptions = [
- os.path.normcase(os.path.realpath(path)) for path in exceptions
- ]
- AbstractSandbox.__init__(self)
-
- def _violation(self, operation, *args, **kw):
- from setuptools.sandbox import SandboxViolation
-
- raise SandboxViolation(operation, args, kw)
-
- if _file:
-
- def _file(self, path, mode='r', *args, **kw):
- if mode not in ('r', 'rt', 'rb', 'rU', 'U') and not self._ok(path):
- self._violation("file", path, mode, *args, **kw)
- return _file(path, mode, *args, **kw)
-
- def _open(self, path, mode='r', *args, **kw):
- if mode not in ('r', 'rt', 'rb', 'rU', 'U') and not self._ok(path):
- self._violation("open", path, mode, *args, **kw)
- return _open(path, mode, *args, **kw)
-
- def tmpnam(self):
- self._violation("tmpnam")
-
- def _ok(self, path):
- active = self._active
- try:
- self._active = False
- realpath = os.path.normcase(os.path.realpath(path))
- return (
- self._exempted(realpath)
- or realpath == self._sandbox
- or realpath.startswith(self._prefix)
- )
- finally:
- self._active = active
-
- def _exempted(self, filepath):
- start_matches = (
- filepath.startswith(exception) for exception in self._exceptions
- )
- pattern_matches = (
- re.match(pattern, filepath) for pattern in self._exception_patterns
- )
- candidates = itertools.chain(start_matches, pattern_matches)
- return any(candidates)
-
- def _remap_input(self, operation, path, *args, **kw):
- """Called for path inputs"""
- if operation in self.write_ops and not self._ok(path):
- self._violation(operation, os.path.realpath(path), *args, **kw)
- return path
-
- def _remap_pair(self, operation, src, dst, *args, **kw):
- """Called for path pairs like rename, link, and symlink operations"""
- if not self._ok(src) or not self._ok(dst):
- self._violation(operation, src, dst, *args, **kw)
- return (src, dst)
-
- def open(self, file, flags, mode=0o777, *args, **kw):
- """Called for low-level os.open()"""
- if flags & WRITE_FLAGS and not self._ok(file):
- self._violation("os.open", file, flags, mode, *args, **kw)
- return _os.open(file, flags, mode, *args, **kw)
-
-
-WRITE_FLAGS = functools.reduce(
- operator.or_,
- [
- getattr(_os, a, 0)
- for a in "O_WRONLY O_RDWR O_APPEND O_CREAT O_TRUNC O_TEMPORARY".split()
- ],
-)
-
-
-class SandboxViolation(DistutilsError):
- """A setup script attempted to modify the filesystem outside the sandbox"""
-
- tmpl = textwrap.dedent(
- """
- SandboxViolation: {cmd}{args!r} {kwargs}
-
- The package setup script has attempted to modify files on your system
- that are not within the EasyInstall build area, and has been aborted.
-
- This package cannot be safely installed by EasyInstall, and may not
- support alternate installation locations even if you run its setup
- script by hand. Please inform the package's author and the EasyInstall
- maintainers to find out if a fix or workaround is available.
- """
- ).lstrip()
-
- def __str__(self):
- cmd, args, kwargs = self.args
- return self.tmpl.format(**locals())
diff --git a/contrib/python/setuptools/py3/setuptools/script (dev).tmpl b/contrib/python/setuptools/py3/setuptools/script (dev).tmpl
deleted file mode 100644
index 39a24b04888..00000000000
--- a/contrib/python/setuptools/py3/setuptools/script (dev).tmpl
+++ /dev/null
@@ -1,6 +0,0 @@
-# EASY-INSTALL-DEV-SCRIPT: %(spec)r,%(script_name)r
-__requires__ = %(spec)r
-__import__('pkg_resources').require(%(spec)r)
-__file__ = %(dev_path)r
-with open(__file__) as f:
- exec(compile(f.read(), __file__, 'exec'))
diff --git a/contrib/python/setuptools/py3/setuptools/script.tmpl b/contrib/python/setuptools/py3/setuptools/script.tmpl
deleted file mode 100644
index ff5efbcab3b..00000000000
--- a/contrib/python/setuptools/py3/setuptools/script.tmpl
+++ /dev/null
@@ -1,3 +0,0 @@
-# EASY-INSTALL-SCRIPT: %(spec)r,%(script_name)r
-__requires__ = %(spec)r
-__import__('pkg_resources').run_script(%(spec)r, %(script_name)r)
diff --git a/contrib/python/setuptools/py3/setuptools/unicode_utils.py b/contrib/python/setuptools/py3/setuptools/unicode_utils.py
deleted file mode 100644
index e84e65e3e14..00000000000
--- a/contrib/python/setuptools/py3/setuptools/unicode_utils.py
+++ /dev/null
@@ -1,42 +0,0 @@
-import unicodedata
-import sys
-
-
-# HFS Plus uses decomposed UTF-8
-def decompose(path):
- if isinstance(path, str):
- return unicodedata.normalize('NFD', path)
- try:
- path = path.decode('utf-8')
- path = unicodedata.normalize('NFD', path)
- path = path.encode('utf-8')
- except UnicodeError:
- pass # Not UTF-8
- return path
-
-
-def filesys_decode(path):
- """
- Ensure that the given path is decoded,
- NONE when no expected encoding works
- """
-
- if isinstance(path, str):
- return path
-
- fs_enc = sys.getfilesystemencoding() or 'utf-8'
- candidates = fs_enc, 'utf-8'
-
- for enc in candidates:
- try:
- return path.decode(enc)
- except UnicodeDecodeError:
- continue
-
-
-def try_encode(string, enc):
- "turn unicode encoding into a functional routine"
- try:
- return string.encode(enc)
- except UnicodeEncodeError:
- return None
diff --git a/contrib/python/setuptools/py3/setuptools/version.py b/contrib/python/setuptools/py3/setuptools/version.py
deleted file mode 100644
index 95e18696585..00000000000
--- a/contrib/python/setuptools/py3/setuptools/version.py
+++ /dev/null
@@ -1,6 +0,0 @@
-import pkg_resources
-
-try:
- __version__ = pkg_resources.get_distribution('setuptools').version
-except Exception:
- __version__ = 'unknown'
diff --git a/contrib/python/setuptools/py3/setuptools/wheel.py b/contrib/python/setuptools/py3/setuptools/wheel.py
deleted file mode 100644
index 0be811af2c2..00000000000
--- a/contrib/python/setuptools/py3/setuptools/wheel.py
+++ /dev/null
@@ -1,213 +0,0 @@
-"""Wheels support."""
-
-from distutils.util import get_platform
-from distutils import log
-import email
-import itertools
-import os
-import posixpath
-import re
-import zipfile
-
-import pkg_resources
-import setuptools
-from pkg_resources import parse_version
-from setuptools.extern.packaging.tags import sys_tags
-from setuptools.extern.packaging.utils import canonicalize_name
-from setuptools.command.egg_info import write_requirements
-
-
-WHEEL_NAME = re.compile(
- r"""^(?P<project_name>.+?)-(?P<version>\d.*?)
- ((-(?P<build>\d.*?))?-(?P<py_version>.+?)-(?P<abi>.+?)-(?P<platform>.+?)
- )\.whl$""",
- re.VERBOSE).match
-
-NAMESPACE_PACKAGE_INIT = \
- "__import__('pkg_resources').declare_namespace(__name__)\n"
-
-
-def unpack(src_dir, dst_dir):
- '''Move everything under `src_dir` to `dst_dir`, and delete the former.'''
- for dirpath, dirnames, filenames in os.walk(src_dir):
- subdir = os.path.relpath(dirpath, src_dir)
- for f in filenames:
- src = os.path.join(dirpath, f)
- dst = os.path.join(dst_dir, subdir, f)
- os.renames(src, dst)
- for n, d in reversed(list(enumerate(dirnames))):
- src = os.path.join(dirpath, d)
- dst = os.path.join(dst_dir, subdir, d)
- if not os.path.exists(dst):
- # Directory does not exist in destination,
- # rename it and prune it from os.walk list.
- os.renames(src, dst)
- del dirnames[n]
- # Cleanup.
- for dirpath, dirnames, filenames in os.walk(src_dir, topdown=True):
- assert not filenames
- os.rmdir(dirpath)
-
-
-class Wheel:
-
- def __init__(self, filename):
- match = WHEEL_NAME(os.path.basename(filename))
- if match is None:
- raise ValueError('invalid wheel name: %r' % filename)
- self.filename = filename
- for k, v in match.groupdict().items():
- setattr(self, k, v)
-
- def tags(self):
- '''List tags (py_version, abi, platform) supported by this wheel.'''
- return itertools.product(
- self.py_version.split('.'),
- self.abi.split('.'),
- self.platform.split('.'),
- )
-
- def is_compatible(self):
- '''Is the wheel is compatible with the current platform?'''
- supported_tags = set(
- (t.interpreter, t.abi, t.platform) for t in sys_tags())
- return next((True for t in self.tags() if t in supported_tags), False)
-
- def egg_name(self):
- return pkg_resources.Distribution(
- project_name=self.project_name, version=self.version,
- platform=(None if self.platform == 'any' else get_platform()),
- ).egg_name() + '.egg'
-
- def get_dist_info(self, zf):
- # find the correct name of the .dist-info dir in the wheel file
- for member in zf.namelist():
- dirname = posixpath.dirname(member)
- if (dirname.endswith('.dist-info') and
- canonicalize_name(dirname).startswith(
- canonicalize_name(self.project_name))):
- return dirname
- raise ValueError("unsupported wheel format. .dist-info not found")
-
- def install_as_egg(self, destination_eggdir):
- '''Install wheel as an egg directory.'''
- with zipfile.ZipFile(self.filename) as zf:
- self._install_as_egg(destination_eggdir, zf)
-
- def _install_as_egg(self, destination_eggdir, zf):
- dist_basename = '%s-%s' % (self.project_name, self.version)
- dist_info = self.get_dist_info(zf)
- dist_data = '%s.data' % dist_basename
- egg_info = os.path.join(destination_eggdir, 'EGG-INFO')
-
- self._convert_metadata(zf, destination_eggdir, dist_info, egg_info)
- self._move_data_entries(destination_eggdir, dist_data)
- self._fix_namespace_packages(egg_info, destination_eggdir)
-
- @staticmethod
- def _convert_metadata(zf, destination_eggdir, dist_info, egg_info):
- def get_metadata(name):
- with zf.open(posixpath.join(dist_info, name)) as fp:
- value = fp.read().decode('utf-8')
- return email.parser.Parser().parsestr(value)
-
- wheel_metadata = get_metadata('WHEEL')
- # Check wheel format version is supported.
- wheel_version = parse_version(wheel_metadata.get('Wheel-Version'))
- wheel_v1 = (
- parse_version('1.0') <= wheel_version < parse_version('2.0dev0')
- )
- if not wheel_v1:
- raise ValueError(
- 'unsupported wheel format version: %s' % wheel_version)
- # Extract to target directory.
- os.mkdir(destination_eggdir)
- zf.extractall(destination_eggdir)
- # Convert metadata.
- dist_info = os.path.join(destination_eggdir, dist_info)
- dist = pkg_resources.Distribution.from_location(
- destination_eggdir, dist_info,
- metadata=pkg_resources.PathMetadata(destination_eggdir, dist_info),
- )
-
- # Note: Evaluate and strip markers now,
- # as it's difficult to convert back from the syntax:
- # foobar; "linux" in sys_platform and extra == 'test'
- def raw_req(req):
- req.marker = None
- return str(req)
- install_requires = list(sorted(map(raw_req, dist.requires())))
- extras_require = {
- extra: sorted(
- req
- for req in map(raw_req, dist.requires((extra,)))
- if req not in install_requires
- )
- for extra in dist.extras
- }
- os.rename(dist_info, egg_info)
- os.rename(
- os.path.join(egg_info, 'METADATA'),
- os.path.join(egg_info, 'PKG-INFO'),
- )
- setup_dist = setuptools.Distribution(
- attrs=dict(
- install_requires=install_requires,
- extras_require=extras_require,
- ),
- )
- # Temporarily disable info traces.
- log_threshold = log._global_log.threshold
- log.set_threshold(log.WARN)
- try:
- write_requirements(
- setup_dist.get_command_obj('egg_info'),
- None,
- os.path.join(egg_info, 'requires.txt'),
- )
- finally:
- log.set_threshold(log_threshold)
-
- @staticmethod
- def _move_data_entries(destination_eggdir, dist_data):
- """Move data entries to their correct location."""
- dist_data = os.path.join(destination_eggdir, dist_data)
- dist_data_scripts = os.path.join(dist_data, 'scripts')
- if os.path.exists(dist_data_scripts):
- egg_info_scripts = os.path.join(
- destination_eggdir, 'EGG-INFO', 'scripts')
- os.mkdir(egg_info_scripts)
- for entry in os.listdir(dist_data_scripts):
- # Remove bytecode, as it's not properly handled
- # during easy_install scripts install phase.
- if entry.endswith('.pyc'):
- os.unlink(os.path.join(dist_data_scripts, entry))
- else:
- os.rename(
- os.path.join(dist_data_scripts, entry),
- os.path.join(egg_info_scripts, entry),
- )
- os.rmdir(dist_data_scripts)
- for subdir in filter(os.path.exists, (
- os.path.join(dist_data, d)
- for d in ('data', 'headers', 'purelib', 'platlib')
- )):
- unpack(subdir, destination_eggdir)
- if os.path.exists(dist_data):
- os.rmdir(dist_data)
-
- @staticmethod
- def _fix_namespace_packages(egg_info, destination_eggdir):
- namespace_packages = os.path.join(
- egg_info, 'namespace_packages.txt')
- if os.path.exists(namespace_packages):
- with open(namespace_packages) as fp:
- namespace_packages = fp.read().split()
- for mod in namespace_packages:
- mod_dir = os.path.join(destination_eggdir, *mod.split('.'))
- mod_init = os.path.join(mod_dir, '__init__.py')
- if not os.path.exists(mod_dir):
- os.mkdir(mod_dir)
- if not os.path.exists(mod_init):
- with open(mod_init, 'w') as fp:
- fp.write(NAMESPACE_PACKAGE_INIT)
diff --git a/contrib/python/setuptools/py3/setuptools/windows_support.py b/contrib/python/setuptools/py3/setuptools/windows_support.py
deleted file mode 100644
index cb977cff954..00000000000
--- a/contrib/python/setuptools/py3/setuptools/windows_support.py
+++ /dev/null
@@ -1,29 +0,0 @@
-import platform
-import ctypes
-
-
-def windows_only(func):
- if platform.system() != 'Windows':
- return lambda *args, **kwargs: None
- return func
-
-
-@windows_only
-def hide_file(path):
- """
- Set the hidden attribute on a file or directory.
-
- From http://stackoverflow.com/questions/19622133/
-
- `path` must be text.
- """
- __import__('ctypes.wintypes')
- SetFileAttributes = ctypes.windll.kernel32.SetFileAttributesW
- SetFileAttributes.argtypes = ctypes.wintypes.LPWSTR, ctypes.wintypes.DWORD
- SetFileAttributes.restype = ctypes.wintypes.BOOL
-
- FILE_ATTRIBUTE_HIDDEN = 0x02
-
- ret = SetFileAttributes(path, FILE_ATTRIBUTE_HIDDEN)
- if not ret:
- raise ctypes.WinError()