aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/python
diff options
context:
space:
mode:
authorAlexSm <alex@ydb.tech>2024-03-05 10:40:59 +0100
committerGitHub <noreply@github.com>2024-03-05 12:40:59 +0300
commit1ac13c847b5358faba44dbb638a828e24369467b (patch)
tree07672b4dd3604ad3dee540a02c6494cb7d10dc3d /contrib/python
parentffcca3e7f7958ddc6487b91d3df8c01054bd0638 (diff)
downloadydb-1ac13c847b5358faba44dbb638a828e24369467b.tar.gz
Library import 16 (#2433)
Co-authored-by: robot-piglet <robot-piglet@yandex-team.com> Co-authored-by: deshevoy <deshevoy@yandex-team.com> Co-authored-by: robot-contrib <robot-contrib@yandex-team.com> Co-authored-by: thegeorg <thegeorg@yandex-team.com> Co-authored-by: robot-ya-builder <robot-ya-builder@yandex-team.com> Co-authored-by: svidyuk <svidyuk@yandex-team.com> Co-authored-by: shadchin <shadchin@yandex-team.com> Co-authored-by: robot-ratatosk <robot-ratatosk@yandex-team.com> Co-authored-by: innokentii <innokentii@yandex-team.com> Co-authored-by: arkady-e1ppa <arkady-e1ppa@yandex-team.com> Co-authored-by: snermolaev <snermolaev@yandex-team.com> Co-authored-by: dimdim11 <dimdim11@yandex-team.com> Co-authored-by: kickbutt <kickbutt@yandex-team.com> Co-authored-by: abdullinsaid <abdullinsaid@yandex-team.com> Co-authored-by: korsunandrei <korsunandrei@yandex-team.com> Co-authored-by: petrk <petrk@yandex-team.com> Co-authored-by: miroslav2 <miroslav2@yandex-team.com> Co-authored-by: serjflint <serjflint@yandex-team.com> Co-authored-by: akhropov <akhropov@yandex-team.com> Co-authored-by: prettyboy <prettyboy@yandex-team.com> Co-authored-by: ilikepugs <ilikepugs@yandex-team.com> Co-authored-by: hiddenpath <hiddenpath@yandex-team.com> Co-authored-by: mikhnenko <mikhnenko@yandex-team.com> Co-authored-by: spreis <spreis@yandex-team.com> Co-authored-by: andreyshspb <andreyshspb@yandex-team.com> Co-authored-by: dimaandreev <dimaandreev@yandex-team.com> Co-authored-by: rashid <rashid@yandex-team.com> Co-authored-by: robot-ydb-importer <robot-ydb-importer@yandex-team.com> Co-authored-by: r-vetrov <r-vetrov@yandex-team.com> Co-authored-by: ypodlesov <ypodlesov@yandex-team.com> Co-authored-by: zaverden <zaverden@yandex-team.com> Co-authored-by: vpozdyayev <vpozdyayev@yandex-team.com> Co-authored-by: robot-cozmo <robot-cozmo@yandex-team.com> Co-authored-by: v-korovin <v-korovin@yandex-team.com> Co-authored-by: arikon <arikon@yandex-team.com> Co-authored-by: khoden <khoden@yandex-team.com> Co-authored-by: psydmm <psydmm@yandex-team.com> Co-authored-by: robot-javacom <robot-javacom@yandex-team.com> Co-authored-by: dtorilov <dtorilov@yandex-team.com> Co-authored-by: sennikovmv <sennikovmv@yandex-team.com> Co-authored-by: hcpp <hcpp@ydb.tech>
Diffstat (limited to 'contrib/python')
-rw-r--r--contrib/python/MarkupSafe/py3/.dist-info/METADATA2
-rw-r--r--contrib/python/MarkupSafe/py3/markupsafe/__init__.py7
-rw-r--r--contrib/python/MarkupSafe/py3/tests/test_markupsafe.py2
-rw-r--r--contrib/python/MarkupSafe/py3/ya.make2
-rw-r--r--contrib/python/appnope/py3/.dist-info/METADATA11
-rw-r--r--contrib/python/appnope/py3/appnope/__init__.py10
-rw-r--r--contrib/python/appnope/py3/appnope/_dummy.py16
-rw-r--r--contrib/python/appnope/py3/appnope/_nope.py72
-rw-r--r--contrib/python/appnope/py3/ya.make2
-rw-r--r--contrib/python/fonttools/.dist-info/METADATA45
-rw-r--r--contrib/python/fonttools/README.rst2
-rw-r--r--contrib/python/fonttools/fontTools/__init__.py2
-rw-r--r--contrib/python/fonttools/fontTools/afmLib.py1
-rw-r--r--contrib/python/fonttools/fontTools/cffLib/__init__.py1
-rw-r--r--contrib/python/fonttools/fontTools/colorLib/builder.py15
-rw-r--r--contrib/python/fonttools/fontTools/config/__init__.py1
-rw-r--r--contrib/python/fonttools/fontTools/designspaceLib/__init__.py92
-rw-r--r--contrib/python/fonttools/fontTools/designspaceLib/__main__.py6
-rw-r--r--contrib/python/fonttools/fontTools/designspaceLib/statNames.py1
-rw-r--r--contrib/python/fonttools/fontTools/feaLib/builder.py32
-rw-r--r--contrib/python/fonttools/fontTools/feaLib/lexer.py4
-rw-r--r--contrib/python/fonttools/fontTools/feaLib/parser.py8
-rw-r--r--contrib/python/fonttools/fontTools/merge/layout.py16
-rw-r--r--contrib/python/fonttools/fontTools/misc/classifyTools.py1
-rw-r--r--contrib/python/fonttools/fontTools/misc/cliTools.py1
-rw-r--r--contrib/python/fonttools/fontTools/misc/configTools.py1
-rw-r--r--contrib/python/fonttools/fontTools/misc/dictTools.py1
-rw-r--r--contrib/python/fonttools/fontTools/misc/etree.py1
-rw-r--r--contrib/python/fonttools/fontTools/misc/filenames.py1
-rw-r--r--contrib/python/fonttools/fontTools/misc/textTools.py1
-rw-r--r--contrib/python/fonttools/fontTools/misc/transform.py1
-rw-r--r--contrib/python/fonttools/fontTools/misc/vector.py1
-rw-r--r--contrib/python/fonttools/fontTools/otlLib/builder.py346
-rw-r--r--contrib/python/fonttools/fontTools/pens/basePen.py3
-rw-r--r--contrib/python/fonttools/fontTools/pens/boundsPen.py2
-rw-r--r--contrib/python/fonttools/fontTools/pens/filterPen.py1
-rw-r--r--contrib/python/fonttools/fontTools/pens/hashPointPen.py14
-rw-r--r--contrib/python/fonttools/fontTools/pens/pointInsidePen.py1
-rw-r--r--contrib/python/fonttools/fontTools/pens/quartzPen.py1
-rw-r--r--contrib/python/fonttools/fontTools/pens/recordingPen.py1
-rw-r--r--contrib/python/fonttools/fontTools/pens/reportLabPen.py1
-rw-r--r--contrib/python/fonttools/fontTools/pens/roundingPen.py46
-rw-r--r--contrib/python/fonttools/fontTools/pens/statisticsPen.py3
-rw-r--r--contrib/python/fonttools/fontTools/pens/svgPathPen.py20
-rw-r--r--contrib/python/fonttools/fontTools/pens/teePen.py1
-rw-r--r--contrib/python/fonttools/fontTools/pens/transformPen.py1
-rw-r--r--contrib/python/fonttools/fontTools/subset/__init__.py3
-rw-r--r--contrib/python/fonttools/fontTools/svgLib/path/arc.py1
-rw-r--r--contrib/python/fonttools/fontTools/t1Lib/__init__.py2
-rw-r--r--contrib/python/fonttools/fontTools/ttLib/macUtils.py2
-rw-r--r--contrib/python/fonttools/fontTools/ttLib/removeOverlaps.py8
-rw-r--r--contrib/python/fonttools/fontTools/ttLib/scaleUpem.py1
-rw-r--r--contrib/python/fonttools/fontTools/ttLib/tables/C_O_L_R_.py1
-rw-r--r--contrib/python/fonttools/fontTools/ttLib/tables/O_S_2f_2.py1
-rw-r--r--contrib/python/fonttools/fontTools/ttLib/tables/T_S_I__0.py1
-rw-r--r--contrib/python/fonttools/fontTools/ttLib/tables/T_S_I__1.py1
-rw-r--r--contrib/python/fonttools/fontTools/ttLib/tables/T_S_I__2.py1
-rw-r--r--contrib/python/fonttools/fontTools/ttLib/tables/T_S_I__3.py1
-rw-r--r--contrib/python/fonttools/fontTools/ttLib/tables/T_S_I__5.py1
-rw-r--r--contrib/python/fonttools/fontTools/ttLib/tables/TupleVariation.py20
-rw-r--r--contrib/python/fonttools/fontTools/ttLib/tables/V_O_R_G_.py1
-rw-r--r--contrib/python/fonttools/fontTools/ttLib/tables/_k_e_r_n.py6
-rw-r--r--contrib/python/fonttools/fontTools/ttLib/tables/otBase.py6
-rw-r--r--contrib/python/fonttools/fontTools/ttLib/tables/otConverters.py1
-rw-r--r--contrib/python/fonttools/fontTools/ttLib/tables/otTables.py35
-rw-r--r--contrib/python/fonttools/fontTools/ttLib/tables/otTraverse.py1
-rw-r--r--contrib/python/fonttools/fontTools/ttLib/tables/sbixGlyph.py14
-rw-r--r--contrib/python/fonttools/fontTools/ttLib/tables/ttProgram.py1
-rw-r--r--contrib/python/fonttools/fontTools/ttLib/ttCollection.py1
-rw-r--r--contrib/python/fonttools/fontTools/ttLib/ttFont.py2
-rw-r--r--contrib/python/fonttools/fontTools/ttLib/ttGlyphSet.py2
-rw-r--r--contrib/python/fonttools/fontTools/ttx.py1
-rw-r--r--contrib/python/fonttools/fontTools/ufoLib/__init__.py2
-rw-r--r--contrib/python/fonttools/fontTools/ufoLib/converters.py1
-rw-r--r--contrib/python/fonttools/fontTools/ufoLib/etree.py1
-rw-r--r--contrib/python/fonttools/fontTools/ufoLib/glifLib.py9
-rw-r--r--contrib/python/fonttools/fontTools/ufoLib/plistlib.py1
-rw-r--r--contrib/python/fonttools/fontTools/ufoLib/pointPen.py1
-rw-r--r--contrib/python/fonttools/fontTools/ufoLib/utils.py1
-rw-r--r--contrib/python/fonttools/fontTools/unicodedata/__init__.py6
-rw-r--r--contrib/python/fonttools/fontTools/varLib/__init__.py17
-rw-r--r--contrib/python/fonttools/fontTools/varLib/featureVars.py1
-rw-r--r--contrib/python/fonttools/fontTools/varLib/instancer/__init__.py11
-rw-r--r--contrib/python/fonttools/fontTools/varLib/interpolate_layout.py1
-rw-r--r--contrib/python/fonttools/fontTools/varLib/merger.py1
-rw-r--r--contrib/python/fonttools/fontTools/varLib/mutator.py15
-rw-r--r--contrib/python/fonttools/ya.make3
-rw-r--r--contrib/python/google-auth/py3/.dist-info/METADATA2
-rw-r--r--contrib/python/google-auth/py3/google/auth/compute_engine/_metadata.py2
-rw-r--r--contrib/python/google-auth/py3/google/auth/compute_engine/credentials.py7
-rw-r--r--contrib/python/google-auth/py3/google/auth/credentials.py4
-rw-r--r--contrib/python/google-auth/py3/google/auth/downscoped.py14
-rw-r--r--contrib/python/google-auth/py3/google/auth/external_account.py10
-rw-r--r--contrib/python/google-auth/py3/google/auth/external_account_authorized_user.py9
-rw-r--r--contrib/python/google-auth/py3/google/auth/version.py2
-rw-r--r--contrib/python/google-auth/py3/google/oauth2/credentials.py11
-rw-r--r--contrib/python/google-auth/py3/google/oauth2/service_account.py31
-rw-r--r--contrib/python/google-auth/py3/tests/compute_engine/test__metadata.py13
-rw-r--r--contrib/python/google-auth/py3/tests/compute_engine/test_credentials.py8
-rw-r--r--contrib/python/google-auth/py3/tests/oauth2/test_service_account.py11
-rw-r--r--contrib/python/google-auth/py3/tests/test_aws.py3
-rw-r--r--contrib/python/google-auth/py3/tests/test_downscoped.py88
-rw-r--r--contrib/python/google-auth/py3/tests/test_external_account.py14
-rw-r--r--contrib/python/google-auth/py3/tests/test_external_account_authorized_user.py2
-rw-r--r--contrib/python/google-auth/py3/tests/test_identity_pool.py4
-rw-r--r--contrib/python/google-auth/py3/tests/test_pluggable.py2
-rw-r--r--contrib/python/google-auth/py3/ya.make2
-rw-r--r--contrib/python/httpcore/.dist-info/METADATA12
-rw-r--r--contrib/python/httpcore/httpcore/__init__.py2
-rw-r--r--contrib/python/httpcore/httpcore/_async/connection.py18
-rw-r--r--contrib/python/httpcore/httpcore/_async/connection_pool.py364
-rw-r--r--contrib/python/httpcore/httpcore/_async/http11.py3
-rw-r--r--contrib/python/httpcore/httpcore/_async/socks_proxy.py4
-rw-r--r--contrib/python/httpcore/httpcore/_sync/connection.py18
-rw-r--r--contrib/python/httpcore/httpcore/_sync/connection_pool.py364
-rw-r--r--contrib/python/httpcore/httpcore/_sync/http11.py3
-rw-r--r--contrib/python/httpcore/httpcore/_synchronization.py58
-rw-r--r--contrib/python/httpcore/ya.make2
-rw-r--r--contrib/python/hypothesis/py3/.dist-info/METADATA6
-rw-r--r--contrib/python/hypothesis/py3/hypothesis/control.py62
-rw-r--r--contrib/python/hypothesis/py3/hypothesis/core.py8
-rw-r--r--contrib/python/hypothesis/py3/hypothesis/extra/_patching.py37
-rw-r--r--contrib/python/hypothesis/py3/hypothesis/extra/codemods.py8
-rw-r--r--contrib/python/hypothesis/py3/hypothesis/extra/ghostwriter.py158
-rw-r--r--contrib/python/hypothesis/py3/hypothesis/extra/pandas/impl.py8
-rw-r--r--contrib/python/hypothesis/py3/hypothesis/internal/conjecture/data.py204
-rw-r--r--contrib/python/hypothesis/py3/hypothesis/internal/conjecture/datatree.py801
-rw-r--r--contrib/python/hypothesis/py3/hypothesis/internal/conjecture/engine.py23
-rw-r--r--contrib/python/hypothesis/py3/hypothesis/internal/conjecture/junkdrawer.py8
-rw-r--r--contrib/python/hypothesis/py3/hypothesis/internal/conjecture/shrinker.py12
-rw-r--r--contrib/python/hypothesis/py3/hypothesis/internal/conjecture/utils.py22
-rw-r--r--contrib/python/hypothesis/py3/hypothesis/internal/escalation.py8
-rw-r--r--contrib/python/hypothesis/py3/hypothesis/internal/observability.py18
-rw-r--r--contrib/python/hypothesis/py3/hypothesis/internal/reflection.py3
-rw-r--r--contrib/python/hypothesis/py3/hypothesis/provisional.py8
-rw-r--r--contrib/python/hypothesis/py3/hypothesis/stateful.py45
-rw-r--r--contrib/python/hypothesis/py3/hypothesis/strategies/_internal/core.py37
-rw-r--r--contrib/python/hypothesis/py3/hypothesis/strategies/_internal/misc.py8
-rw-r--r--contrib/python/hypothesis/py3/hypothesis/strategies/_internal/utils.py18
-rw-r--r--contrib/python/hypothesis/py3/hypothesis/vendor/pretty.py2
-rw-r--r--contrib/python/hypothesis/py3/hypothesis/vendor/tlds-alpha-by-domain.txt6
-rw-r--r--contrib/python/hypothesis/py3/hypothesis/version.py2
-rw-r--r--contrib/python/hypothesis/py3/ya.make2
-rw-r--r--contrib/python/numpy/py2/numpy/core/include/numpy/npy_math.h2
-rw-r--r--contrib/python/psutil/py2/psutil/_psaix.py552
-rw-r--r--contrib/python/psutil/py2/psutil/_psbsd.py917
-rw-r--r--contrib/python/psutil/py2/psutil/_pssunos.py727
-rw-r--r--contrib/python/psutil/py2/psutil/_psutil_posix.c4
-rw-r--r--contrib/python/psutil/py2/psutil/arch/aix/ifaddrs.h34
-rw-r--r--contrib/python/psutil/py2/psutil/arch/osx/ya.make9
-rw-r--r--contrib/python/psutil/py2/psutil/arch/solaris/v10/ifaddrs.h26
-rw-r--r--contrib/python/psutil/py2/test/test.py4
-rw-r--r--contrib/python/psutil/py2/test/ya.make8
-rw-r--r--contrib/python/psutil/py2/ya.make147
-rw-r--r--contrib/python/psutil/py3/psutil/_psaix.py552
-rw-r--r--contrib/python/psutil/py3/psutil/_psbsd.py917
-rw-r--r--contrib/python/psutil/py3/psutil/_pssunos.py727
-rw-r--r--contrib/python/psutil/py3/psutil/_psutil_posix.c4
-rw-r--r--contrib/python/psutil/py3/psutil/arch/aix/ifaddrs.h34
-rw-r--r--contrib/python/psutil/py3/psutil/arch/osx/ya.make9
-rw-r--r--contrib/python/psutil/py3/psutil/arch/solaris/v10/ifaddrs.h26
-rw-r--r--contrib/python/psutil/py3/test/test.py4
-rw-r--r--contrib/python/psutil/py3/test/ya.make8
-rw-r--r--contrib/python/psutil/py3/ya.make147
-rw-r--r--contrib/python/responses/py3/.dist-info/METADATA4
-rw-r--r--contrib/python/responses/py3/README.rst1
-rw-r--r--contrib/python/responses/py3/responses/matchers.py27
-rw-r--r--contrib/python/responses/py3/ya.make2
-rw-r--r--contrib/python/setuptools/py3/.dist-info/METADATA19
-rw-r--r--contrib/python/setuptools/py3/README.rst13
-rw-r--r--contrib/python/setuptools/py3/_distutils_hack/__init__.py13
-rw-r--r--contrib/python/setuptools/py3/pkg_resources/__init__.py113
-rw-r--r--contrib/python/setuptools/py3/setuptools/_core_metadata.py1
-rw-r--r--contrib/python/setuptools/py3/setuptools/_normalization.py16
-rw-r--r--contrib/python/setuptools/py3/setuptools/_reqs.py6
-rw-r--r--contrib/python/setuptools/py3/setuptools/build_meta.py8
-rw-r--r--contrib/python/setuptools/py3/setuptools/command/_requirestxt.py1
-rw-r--r--contrib/python/setuptools/py3/setuptools/command/bdist_egg.py23
-rw-r--r--contrib/python/setuptools/py3/setuptools/command/bdist_rpm.py3
-rw-r--r--contrib/python/setuptools/py3/setuptools/command/build.py11
-rw-r--r--contrib/python/setuptools/py3/setuptools/command/build_ext.py60
-rw-r--r--contrib/python/setuptools/py3/setuptools/command/build_py.py2
-rw-r--r--contrib/python/setuptools/py3/setuptools/command/develop.py7
-rw-r--r--contrib/python/setuptools/py3/setuptools/command/dist_info.py9
-rw-r--r--contrib/python/setuptools/py3/setuptools/command/easy_install.py104
-rw-r--r--contrib/python/setuptools/py3/setuptools/command/editable_wheel.py28
-rw-r--r--contrib/python/setuptools/py3/setuptools/command/egg_info.py11
-rw-r--r--contrib/python/setuptools/py3/setuptools/command/install.py5
-rw-r--r--contrib/python/setuptools/py3/setuptools/command/sdist.py16
-rw-r--r--contrib/python/setuptools/py3/setuptools/command/test.py2
-rw-r--r--contrib/python/setuptools/py3/setuptools/compat/__init__.py0
-rw-r--r--contrib/python/setuptools/py3/setuptools/compat/py310.py10
-rw-r--r--contrib/python/setuptools/py3/setuptools/compat/py311.py (renamed from contrib/python/setuptools/py3/setuptools/py312compat.py)0
-rw-r--r--contrib/python/setuptools/py3/setuptools/config/__init__.py1
-rw-r--r--contrib/python/setuptools/py3/setuptools/config/_apply_pyprojecttoml.py7
-rw-r--r--contrib/python/setuptools/py3/setuptools/config/_validate_pyproject/error_reporting.py2
-rw-r--r--contrib/python/setuptools/py3/setuptools/config/expand.py1
-rw-r--r--contrib/python/setuptools/py3/setuptools/config/pyprojecttoml.py5
-rw-r--r--contrib/python/setuptools/py3/setuptools/config/setupcfg.py15
-rw-r--r--contrib/python/setuptools/py3/setuptools/depends.py2
-rw-r--r--contrib/python/setuptools/py3/setuptools/discovery.py2
-rw-r--r--contrib/python/setuptools/py3/setuptools/dist.py10
-rw-r--r--contrib/python/setuptools/py3/setuptools/glob.py7
-rw-r--r--contrib/python/setuptools/py3/setuptools/installer.py3
-rw-r--r--contrib/python/setuptools/py3/setuptools/monkey.py20
-rw-r--r--contrib/python/setuptools/py3/setuptools/msvc.py39
-rw-r--r--contrib/python/setuptools/py3/setuptools/namespaces.py7
-rw-r--r--contrib/python/setuptools/py3/setuptools/package_index.py17
-rw-r--r--contrib/python/setuptools/py3/setuptools/sandbox.py36
-rw-r--r--contrib/python/setuptools/py3/setuptools/unicode_utils.py4
-rw-r--r--contrib/python/setuptools/py3/setuptools/wheel.py8
-rw-r--r--contrib/python/setuptools/py3/ya.make6
-rw-r--r--contrib/python/types-protobuf/ya.make12
213 files changed, 7394 insertions, 1770 deletions
diff --git a/contrib/python/MarkupSafe/py3/.dist-info/METADATA b/contrib/python/MarkupSafe/py3/.dist-info/METADATA
index c221b8e50e..dfe37d52df 100644
--- a/contrib/python/MarkupSafe/py3/.dist-info/METADATA
+++ b/contrib/python/MarkupSafe/py3/.dist-info/METADATA
@@ -1,6 +1,6 @@
Metadata-Version: 2.1
Name: MarkupSafe
-Version: 2.1.4
+Version: 2.1.5
Summary: Safely add untrusted strings to HTML/XML markup.
Home-page: https://palletsprojects.com/p/markupsafe/
Maintainer: Pallets
diff --git a/contrib/python/MarkupSafe/py3/markupsafe/__init__.py b/contrib/python/MarkupSafe/py3/markupsafe/__init__.py
index 2f401a8153..b40f24c66d 100644
--- a/contrib/python/MarkupSafe/py3/markupsafe/__init__.py
+++ b/contrib/python/MarkupSafe/py3/markupsafe/__init__.py
@@ -13,7 +13,7 @@ if t.TYPE_CHECKING:
_P = te.ParamSpec("_P")
-__version__ = "2.1.4"
+__version__ = "2.1.5"
def _simple_escaping_wrapper(func: "t.Callable[_P, str]") -> "t.Callable[_P, Markup]":
@@ -158,8 +158,7 @@ class Markup(str):
>>> Markup("Main &raquo;\t<em>About</em>").striptags()
'Main » About'
"""
- # collapse spaces
- value = " ".join(self.split())
+ value = str(self)
# Look for comments then tags separately. Otherwise, a comment that
# contains a tag would end early, leaving some of the comment behind.
@@ -193,6 +192,8 @@ class Markup(str):
value = f"{value[:start]}{value[end + 1:]}"
+ # collapse spaces
+ value = " ".join(value.split())
return self.__class__(value).unescape()
@classmethod
diff --git a/contrib/python/MarkupSafe/py3/tests/test_markupsafe.py b/contrib/python/MarkupSafe/py3/tests/test_markupsafe.py
index ea9a91873c..94bea38795 100644
--- a/contrib/python/MarkupSafe/py3/tests/test_markupsafe.py
+++ b/contrib/python/MarkupSafe/py3/tests/test_markupsafe.py
@@ -73,7 +73,7 @@ def test_escaping(escape):
Markup(
"<!-- outer comment -->"
"<em>Foo &amp; Bar"
- "<!-- inner comment about <em> -->"
+ " <!-- inner comment about <em> -->\n "
"</em>"
"<!-- comment\nwith\nnewlines\n-->"
"<meta content='tag\nwith\nnewlines'>"
diff --git a/contrib/python/MarkupSafe/py3/ya.make b/contrib/python/MarkupSafe/py3/ya.make
index 3583a8035e..f1664c93e8 100644
--- a/contrib/python/MarkupSafe/py3/ya.make
+++ b/contrib/python/MarkupSafe/py3/ya.make
@@ -2,7 +2,7 @@
PY3_LIBRARY()
-VERSION(2.1.4)
+VERSION(2.1.5)
LICENSE(BSD-3-Clause)
diff --git a/contrib/python/appnope/py3/.dist-info/METADATA b/contrib/python/appnope/py3/.dist-info/METADATA
index 9c7b757060..76ef35d8e7 100644
--- a/contrib/python/appnope/py3/.dist-info/METADATA
+++ b/contrib/python/appnope/py3/.dist-info/METADATA
@@ -1,20 +1,15 @@
Metadata-Version: 2.1
Name: appnope
-Version: 0.1.3
+Version: 0.1.4
Summary: Disable App Nap on macOS >= 10.9
Home-page: http://github.com/minrk/appnope
Author: Min Ragan-Kelley
Author-email: benjaminrk@gmail.com
License: BSD
-Platform: UNKNOWN
Classifier: License :: OSI Approved :: BSD License
Classifier: Operating System :: MacOS :: MacOS X
-Classifier: Programming Language :: Python :: 2
-Classifier: Programming Language :: Python :: 2.6
-Classifier: Programming Language :: Python :: 2.7
Classifier: Programming Language :: Python :: 3
-Classifier: Programming Language :: Python :: 3.2
-Classifier: Programming Language :: Python :: 3.3
+Requires-Python: >=3.6
Description-Content-Type: text/markdown
License-File: LICENSE
@@ -48,5 +43,3 @@ It uses ctypes to wrap a `[NSProcessInfo beginActivityWithOptions]` call to disa
To install:
pip install appnope
-
-
diff --git a/contrib/python/appnope/py3/appnope/__init__.py b/contrib/python/appnope/py3/appnope/__init__.py
index bcf87f4917..c447f24185 100644
--- a/contrib/python/appnope/py3/appnope/__init__.py
+++ b/contrib/python/appnope/py3/appnope/__init__.py
@@ -1,13 +1,15 @@
-__version__ = '0.1.3'
+__version__ = "0.1.4"
import re
import sys
import platform
+
def _v(version_s):
- return tuple(int(s) for s in re.findall("\d+", version_s))
+ return tuple(int(s) for s in re.findall(r"\d+", version_s))
+
if sys.platform != "darwin" or _v(platform.mac_ver()[0]) < _v("10.9"):
- from ._dummy import *
+ from ._dummy import * # noqa
else:
- from ._nope import *
+ from ._nope import * # noqa
diff --git a/contrib/python/appnope/py3/appnope/_dummy.py b/contrib/python/appnope/py3/appnope/_dummy.py
index a55ec5bfcd..a4ee06f89e 100644
--- a/contrib/python/appnope/py3/appnope/_dummy.py
+++ b/contrib/python/appnope/py3/appnope/_dummy.py
@@ -1,30 +1,32 @@
-#-----------------------------------------------------------------------------
+# -----------------------------------------------------------------------------
# Copyright (C) 2013 Min RK
#
# Distributed under the terms of the 2-clause BSD License.
-#-----------------------------------------------------------------------------
+# -----------------------------------------------------------------------------
from contextlib import contextmanager
+
def beginActivityWithOptions(options, reason=""):
return
+
def endActivity(activity):
return
+
def nope():
return
+
def nap():
return
@contextmanager
-def nope_scope(
- options=0,
- reason="Because Reasons"
- ):
+def nope_scope(options=0, reason="Because Reasons"):
yield
+
def napping_allowed():
- return True \ No newline at end of file
+ return True
diff --git a/contrib/python/appnope/py3/appnope/_nope.py b/contrib/python/appnope/py3/appnope/_nope.py
index d83e826797..10d1c056f4 100644
--- a/contrib/python/appnope/py3/appnope/_nope.py
+++ b/contrib/python/appnope/py3/appnope/_nope.py
@@ -1,16 +1,16 @@
-#-----------------------------------------------------------------------------
+# -----------------------------------------------------------------------------
# Copyright (C) 2013 Min RK
#
# Distributed under the terms of the 2-clause BSD License.
-#-----------------------------------------------------------------------------
+# -----------------------------------------------------------------------------
from contextlib import contextmanager
import ctypes
import ctypes.util
-objc = ctypes.cdll.LoadLibrary(ctypes.util.find_library('objc'))
-_ = ctypes.cdll.LoadLibrary(ctypes.util.find_library('Foundation'))
+objc = ctypes.cdll.LoadLibrary(ctypes.util.find_library("objc"))
+_ = ctypes.cdll.LoadLibrary(ctypes.util.find_library("Foundation"))
void_p = ctypes.c_void_p
ull = ctypes.c_uint64
@@ -22,74 +22,82 @@ objc.objc_msgSend.argtypes = [void_p, void_p]
msg = objc.objc_msgSend
+
def _utf8(s):
"""ensure utf8 bytes"""
if not isinstance(s, bytes):
- s = s.encode('utf8')
+ s = s.encode("utf8")
return s
+
def n(name):
"""create a selector name (for methods)"""
return objc.sel_registerName(_utf8(name))
+
def C(classname):
"""get an ObjC Class by name"""
ret = objc.objc_getClass(_utf8(classname))
assert ret is not None, "Couldn't find Class %s" % classname
return ret
+
# constants from Foundation
-NSActivityIdleDisplaySleepDisabled = (1 << 40)
-NSActivityIdleSystemSleepDisabled = (1 << 20)
-NSActivitySuddenTerminationDisabled = (1 << 14)
-NSActivityAutomaticTerminationDisabled = (1 << 15)
-NSActivityUserInitiated = (0x00FFFFFF | NSActivityIdleSystemSleepDisabled)
-NSActivityUserInitiatedAllowingIdleSystemSleep = (NSActivityUserInitiated & ~NSActivityIdleSystemSleepDisabled)
-NSActivityBackground = 0x000000FF
-NSActivityLatencyCritical = 0xFF00000000
+NSActivityIdleDisplaySleepDisabled = 1 << 40
+NSActivityIdleSystemSleepDisabled = 1 << 20
+NSActivitySuddenTerminationDisabled = 1 << 14
+NSActivityAutomaticTerminationDisabled = 1 << 15
+NSActivityUserInitiated = 0x00FFFFFF | NSActivityIdleSystemSleepDisabled
+NSActivityUserInitiatedAllowingIdleSystemSleep = (
+ NSActivityUserInitiated & ~NSActivityIdleSystemSleepDisabled
+)
+NSActivityBackground = 0x000000FF
+NSActivityLatencyCritical = 0xFF00000000
+
def beginActivityWithOptions(options, reason=""):
"""Wrapper for:
-
- [ [ NSProcessInfo processInfo]
+
+ [ [ NSProcessInfo processInfo]
beginActivityWithOptions: (uint64)options
reason: (str)reason
]
"""
- NSProcessInfo = C('NSProcessInfo')
- NSString = C('NSString')
-
+ NSProcessInfo = C("NSProcessInfo")
+ NSString = C("NSString")
+
objc.objc_msgSend.argtypes = [void_p, void_p, void_p]
reason = msg(NSString, n("stringWithUTF8String:"), _utf8(reason))
objc.objc_msgSend.argtypes = [void_p, void_p]
- info = msg(NSProcessInfo, n('processInfo'))
+ info = msg(NSProcessInfo, n("processInfo"))
objc.objc_msgSend.argtypes = [void_p, void_p, ull, void_p]
- activity = msg(info,
- n('beginActivityWithOptions:reason:'),
- ull(options),
- void_p(reason)
+ activity = msg(
+ info, n("beginActivityWithOptions:reason:"), ull(options), void_p(reason)
)
return activity
+
def endActivity(activity):
"""end a process activity assertion"""
- NSProcessInfo = C('NSProcessInfo')
+ NSProcessInfo = C("NSProcessInfo")
objc.objc_msgSend.argtypes = [void_p, void_p]
- info = msg(NSProcessInfo, n('processInfo'))
+ info = msg(NSProcessInfo, n("processInfo"))
objc.objc_msgSend.argtypes = [void_p, void_p, void_p]
msg(info, n("endActivity:"), void_p(activity))
+
_theactivity = None
+
def nope():
"""disable App Nap by setting NSActivityUserInitiatedAllowingIdleSystemSleep"""
global _theactivity
_theactivity = beginActivityWithOptions(
- NSActivityUserInitiatedAllowingIdleSystemSleep,
- "Because Reasons"
+ NSActivityUserInitiatedAllowingIdleSystemSleep, "Because Reasons"
)
+
def nap():
"""end the caffeinated state started by `nope`"""
global _theactivity
@@ -97,17 +105,18 @@ def nap():
endActivity(_theactivity)
_theactivity = None
+
def napping_allowed():
"""is napping allowed?"""
return _theactivity is None
+
@contextmanager
def nope_scope(
- options=NSActivityUserInitiatedAllowingIdleSystemSleep,
- reason="Because Reasons"
- ):
+ options=NSActivityUserInitiatedAllowingIdleSystemSleep, reason="Because Reasons"
+):
"""context manager for beginActivityWithOptions.
-
+
Within this context, App Nap will be disabled.
"""
activity = beginActivityWithOptions(options, reason)
@@ -116,6 +125,7 @@ def nope_scope(
finally:
endActivity(activity)
+
__all__ = [
"NSActivityIdleDisplaySleepDisabled",
"NSActivityIdleSystemSleepDisabled",
diff --git a/contrib/python/appnope/py3/ya.make b/contrib/python/appnope/py3/ya.make
index bacfa9a91b..a1901177d1 100644
--- a/contrib/python/appnope/py3/ya.make
+++ b/contrib/python/appnope/py3/ya.make
@@ -2,7 +2,7 @@
PY3_LIBRARY()
-VERSION(0.1.3)
+VERSION(0.1.4)
LICENSE(BSD-2-Clause)
diff --git a/contrib/python/fonttools/.dist-info/METADATA b/contrib/python/fonttools/.dist-info/METADATA
index f9e01c388f..a711f82b4e 100644
--- a/contrib/python/fonttools/.dist-info/METADATA
+++ b/contrib/python/fonttools/.dist-info/METADATA
@@ -1,6 +1,6 @@
Metadata-Version: 2.1
Name: fonttools
-Version: 4.47.2
+Version: 4.49.0
Summary: Tools to manipulate font files
Home-page: http://github.com/fonttools/fonttools
Author: Just van Rossum
@@ -31,7 +31,7 @@ Requires-Python: >=3.8
License-File: LICENSE
Provides-Extra: all
Requires-Dist: fs <3,>=2.2.0 ; extra == 'all'
-Requires-Dist: lxml <5,>=4.0 ; extra == 'all'
+Requires-Dist: lxml >=4.0 ; extra == 'all'
Requires-Dist: zopfli >=0.1.4 ; extra == 'all'
Requires-Dist: lz4 >=1.7.4.2 ; extra == 'all'
Requires-Dist: pycairo ; extra == 'all'
@@ -52,7 +52,7 @@ Requires-Dist: pycairo ; extra == 'interpolatable'
Requires-Dist: scipy ; (platform_python_implementation != "PyPy") and extra == 'interpolatable'
Requires-Dist: munkres ; (platform_python_implementation == "PyPy") and extra == 'interpolatable'
Provides-Extra: lxml
-Requires-Dist: lxml <5,>=4.0 ; extra == 'lxml'
+Requires-Dist: lxml >=4.0 ; extra == 'lxml'
Provides-Extra: pathops
Requires-Dist: skia-pathops >=0.5.0 ; extra == 'pathops'
Provides-Extra: plot
@@ -118,7 +118,7 @@ Python 3 `venv <https://docs.python.org/3/library/venv.html>`__ module.
# create new virtual environment called e.g. 'fonttools-venv', or anything you like
python -m virtualenv fonttools-venv
- # source the `activate` shell script to enter the environment (Un*x); to exit, just type `deactivate`
+ # source the `activate` shell script to enter the environment (Unix-like); to exit, just type `deactivate`
. fonttools-venv/bin/activate
# to activate the virtual environment in Windows `cmd.exe`, do
@@ -375,6 +375,37 @@ Have fun!
Changelog
~~~~~~~~~
+4.49.0 (released 2024-02-15)
+----------------------------
+
+- [otlLib] Add API for building ``MATH`` table (#3446)
+
+4.48.1 (released 2024-02-06)
+----------------------------
+
+- Fixed uploading wheels to PyPI, no code changes since v4.48.0.
+
+4.48.0 (released 2024-02-06)
+----------------------------
+
+- [varLib] Do not log when there are no OTL tables to be merged.
+- [setup.py] Do not restrict lxml<5 any more, tests pass just fine with lxml>=5.
+- [feaLib] Remove glyph and class names length restrictions in FEA (#3424).
+- [roundingPens] Added ``transformRoundFunc`` parameter to the rounding pens to allow
+ for custom rounding of the components' transforms (#3426).
+- [feaLib] Keep declaration order of ligature components within a ligature set, instead
+ of sorting by glyph name (#3429).
+- [feaLib] Fixed ordering of alternates in ``aalt`` lookups, following the declaration
+ order of feature references within the ``aalt`` feature block (#3430).
+- [varLib.instancer] Fixed a bug in the instancer's IUP optimization (#3432).
+- [sbix] Support sbix glyphs with new graphicType "flip" (#3433).
+- [svgPathPen] Added ``--glyphs`` option to dump the SVG paths for the named glyphs
+ in the font (0572f78).
+- [designspaceLib] Added "description" attribute to ``<mappings>`` and ``<mapping>``
+ elements, and allow multiple ``<mappings>`` elements to group ``<mapping>`` elements
+ that are logically related (#3435, #3437).
+- [otlLib] Correctly choose the most compact GSUB contextual lookup format (#3439).
+
4.47.2 (released 2024-01-11)
----------------------------
@@ -697,10 +728,10 @@ Minor release to fix uploading wheels to PyPI.
----------------------------
- [varLib.instancer] Added support for L4 instancing, i.e. moving the default value of
- an axis while keeping it variable. Thanks Behdad! (#2728, #2861).
+ an axis while keeping it variable. Thanks Behdad! (#2728, #2861).
It's now also possible to restrict an axis min/max values beyond the current default
value, e.g. a font wght has min=100, def=400, max=900 and you want a partial VF that
- only varies between 500 and 700, you can now do that.
+ only varies between 500 and 700, you can now do that.
You can either specify two min/max values (wght=500:700), and the new default will be
set to either the minimum or maximum, depending on which one is closer to the current
default (e.g. 500 in this case). Or you can specify three values (e.g. wght=500:600:700)
@@ -708,7 +739,7 @@ Minor release to fix uploading wheels to PyPI.
- [otlLib/featureVars] Set a few Count values so one doesn't need to compile the font
to update them (#2860).
- [varLib.models] Make extrapolation work for 2-master models as well where one master
- is at the default location (#2843, #2846).
+ is at the default location (#2843, #2846).
Add optional extrapolate=False to normalizeLocation() (#2847, #2849).
- [varLib.cff] Fixed sub-optimal packing of CFF2 deltas by no longer rounding them to
integer (#2838).
diff --git a/contrib/python/fonttools/README.rst b/contrib/python/fonttools/README.rst
index d84282fc76..2274fbdc69 100644
--- a/contrib/python/fonttools/README.rst
+++ b/contrib/python/fonttools/README.rst
@@ -44,7 +44,7 @@ Python 3 `venv <https://docs.python.org/3/library/venv.html>`__ module.
# create new virtual environment called e.g. 'fonttools-venv', or anything you like
python -m virtualenv fonttools-venv
- # source the `activate` shell script to enter the environment (Un*x); to exit, just type `deactivate`
+ # source the `activate` shell script to enter the environment (Unix-like); to exit, just type `deactivate`
. fonttools-venv/bin/activate
# to activate the virtual environment in Windows `cmd.exe`, do
diff --git a/contrib/python/fonttools/fontTools/__init__.py b/contrib/python/fonttools/fontTools/__init__.py
index 7410d3c7fe..e6a745bd52 100644
--- a/contrib/python/fonttools/fontTools/__init__.py
+++ b/contrib/python/fonttools/fontTools/__init__.py
@@ -3,6 +3,6 @@ from fontTools.misc.loggingTools import configLogger
log = logging.getLogger(__name__)
-version = __version__ = "4.47.2"
+version = __version__ = "4.49.0"
__all__ = ["version", "log", "configLogger"]
diff --git a/contrib/python/fonttools/fontTools/afmLib.py b/contrib/python/fonttools/fontTools/afmLib.py
index e89646951c..0aabf7f635 100644
--- a/contrib/python/fonttools/fontTools/afmLib.py
+++ b/contrib/python/fonttools/fontTools/afmLib.py
@@ -45,7 +45,6 @@ Here is an example of using `afmLib` to read, modify and write an AFM file:
"""
-
import re
# every single line starts with a "word"
diff --git a/contrib/python/fonttools/fontTools/cffLib/__init__.py b/contrib/python/fonttools/fontTools/cffLib/__init__.py
index 644508c155..0ad41c5674 100644
--- a/contrib/python/fonttools/fontTools/cffLib/__init__.py
+++ b/contrib/python/fonttools/fontTools/cffLib/__init__.py
@@ -2880,7 +2880,6 @@ class PrivateDict(BaseDict):
class IndexedStrings(object):
-
"""SID -> string mapping."""
def __init__(self, file=None):
diff --git a/contrib/python/fonttools/fontTools/colorLib/builder.py b/contrib/python/fonttools/fontTools/colorLib/builder.py
index 442bc20e42..6e45e7a885 100644
--- a/contrib/python/fonttools/fontTools/colorLib/builder.py
+++ b/contrib/python/fonttools/fontTools/colorLib/builder.py
@@ -2,6 +2,7 @@
colorLib.builder: Build COLR/CPAL tables from scratch
"""
+
import collections
import copy
import enum
@@ -298,11 +299,15 @@ def buildPaletteLabels(
labels: Iterable[_OptionalLocalizedString], nameTable: _n_a_m_e.table__n_a_m_e
) -> List[Optional[int]]:
return [
- nameTable.addMultilingualName(l, mac=False)
- if isinstance(l, dict)
- else C_P_A_L_.table_C_P_A_L_.NO_NAME_ID
- if l is None
- else nameTable.addMultilingualName({"en": l}, mac=False)
+ (
+ nameTable.addMultilingualName(l, mac=False)
+ if isinstance(l, dict)
+ else (
+ C_P_A_L_.table_C_P_A_L_.NO_NAME_ID
+ if l is None
+ else nameTable.addMultilingualName({"en": l}, mac=False)
+ )
+ )
for l in labels
]
diff --git a/contrib/python/fonttools/fontTools/config/__init__.py b/contrib/python/fonttools/fontTools/config/__init__.py
index c106fe51fc..41ab8f7581 100644
--- a/contrib/python/fonttools/fontTools/config/__init__.py
+++ b/contrib/python/fonttools/fontTools/config/__init__.py
@@ -6,6 +6,7 @@ etc. If this file gets too big, split it into smaller files per-module.
An instance of the Config class can be attached to a TTFont object, so that
the various modules can access their configuration options from it.
"""
+
from textwrap import dedent
from fontTools.misc.configTools import *
diff --git a/contrib/python/fonttools/fontTools/designspaceLib/__init__.py b/contrib/python/fonttools/fontTools/designspaceLib/__init__.py
index 69d4912c09..342f1decd5 100644
--- a/contrib/python/fonttools/fontTools/designspaceLib/__init__.py
+++ b/contrib/python/fonttools/fontTools/designspaceLib/__init__.py
@@ -476,7 +476,14 @@ class AxisMappingDescriptor(SimpleDescriptor):
_attrs = ["inputLocation", "outputLocation"]
- def __init__(self, *, inputLocation=None, outputLocation=None):
+ def __init__(
+ self,
+ *,
+ inputLocation=None,
+ outputLocation=None,
+ description=None,
+ groupDescription=None,
+ ):
self.inputLocation: SimpleLocationDict = inputLocation or {}
"""dict. Axis values for the input of the mapping, in design space coordinates.
@@ -491,6 +498,20 @@ class AxisMappingDescriptor(SimpleDescriptor):
.. versionadded:: 5.1
"""
+ self.description = description
+ """string. A description of the mapping.
+
+ varLib.
+
+ .. versionadded:: 5.2
+ """
+ self.groupDescription = groupDescription
+ """string. A description of the group of mappings.
+
+ varLib.
+
+ .. versionadded:: 5.2
+ """
class InstanceDescriptor(SimpleDescriptor):
@@ -1413,18 +1434,27 @@ class BaseDocWriter(object):
):
axesElement = ET.Element("axes")
if self.documentObject.elidedFallbackName is not None:
- axesElement.attrib[
- "elidedfallbackname"
- ] = self.documentObject.elidedFallbackName
+ axesElement.attrib["elidedfallbackname"] = (
+ self.documentObject.elidedFallbackName
+ )
self.root.append(axesElement)
for axisObject in self.documentObject.axes:
self._addAxis(axisObject)
if self.documentObject.axisMappings:
- mappingsElement = ET.Element("mappings")
- self.root.findall(".axes")[0].append(mappingsElement)
+ mappingsElement = None
+ lastGroup = object()
for mappingObject in self.documentObject.axisMappings:
+ if getattr(mappingObject, "groupDescription", None) != lastGroup:
+ if mappingsElement is not None:
+ self.root.findall(".axes")[0].append(mappingsElement)
+ lastGroup = getattr(mappingObject, "groupDescription", None)
+ mappingsElement = ET.Element("mappings")
+ if lastGroup is not None:
+ mappingsElement.attrib["description"] = lastGroup
self._addAxisMapping(mappingsElement, mappingObject)
+ if mappingsElement is not None:
+ self.root.findall(".axes")[0].append(mappingsElement)
if self.documentObject.locationLabels:
labelsElement = ET.Element("labels")
@@ -1586,6 +1616,8 @@ class BaseDocWriter(object):
def _addAxisMapping(self, mappingsElement, mappingObject):
mappingElement = ET.Element("mapping")
+ if getattr(mappingObject, "description", None) is not None:
+ mappingElement.attrib["description"] = mappingObject.description
for what in ("inputLocation", "outputLocation"):
whatObject = getattr(mappingObject, what, None)
if whatObject is None:
@@ -1744,17 +1776,17 @@ class BaseDocWriter(object):
if instanceObject.filename is not None:
instanceElement.attrib["filename"] = instanceObject.filename
if instanceObject.postScriptFontName is not None:
- instanceElement.attrib[
- "postscriptfontname"
- ] = instanceObject.postScriptFontName
+ instanceElement.attrib["postscriptfontname"] = (
+ instanceObject.postScriptFontName
+ )
if instanceObject.styleMapFamilyName is not None:
- instanceElement.attrib[
- "stylemapfamilyname"
- ] = instanceObject.styleMapFamilyName
+ instanceElement.attrib["stylemapfamilyname"] = (
+ instanceObject.styleMapFamilyName
+ )
if instanceObject.styleMapStyleName is not None:
- instanceElement.attrib[
- "stylemapstylename"
- ] = instanceObject.styleMapStyleName
+ instanceElement.attrib["stylemapstylename"] = (
+ instanceObject.styleMapStyleName
+ )
if self.effectiveFormatTuple < (5, 0):
# Deprecated members as of version 5.0
if instanceObject.glyphs:
@@ -2081,10 +2113,11 @@ class BaseDocReader(LogMixin):
self.documentObject.axes.append(axisObject)
self.axisDefaults[axisObject.name] = axisObject.default
- mappingsElement = self.root.find(".axes/mappings")
self.documentObject.axisMappings = []
- if mappingsElement is not None:
+ for mappingsElement in self.root.findall(".axes/mappings"):
+ groupDescription = mappingsElement.attrib.get("description")
for mappingElement in mappingsElement.findall("mapping"):
+ description = mappingElement.attrib.get("description")
inputElement = mappingElement.find("input")
outputElement = mappingElement.find("output")
inputLoc = {}
@@ -2098,7 +2131,10 @@ class BaseDocReader(LogMixin):
value = float(dimElement.attrib["xvalue"])
outputLoc[name] = value
axisMappingObject = self.axisMappingDescriptorClass(
- inputLocation=inputLoc, outputLocation=outputLoc
+ inputLocation=inputLoc,
+ outputLocation=outputLoc,
+ description=description,
+ groupDescription=groupDescription,
)
self.documentObject.axisMappings.append(axisMappingObject)
@@ -3279,3 +3315,23 @@ class DesignSpaceDocument(LogMixin, AsDictMixin):
finally:
for source, font in zip(self.sources, fonts):
source.font = font
+
+
+def main(args=None):
+ """Roundtrip .designspace file through the DesignSpaceDocument class"""
+
+ if args is None:
+ import sys
+
+ args = sys.argv[1:]
+
+ from argparse import ArgumentParser
+
+ parser = ArgumentParser(prog="designspaceLib", description=main.__doc__)
+ parser.add_argument("input")
+ parser.add_argument("output")
+
+ options = parser.parse_args(args)
+
+ ds = DesignSpaceDocument.fromfile(options.input)
+ ds.write(options.output)
diff --git a/contrib/python/fonttools/fontTools/designspaceLib/__main__.py b/contrib/python/fonttools/fontTools/designspaceLib/__main__.py
new file mode 100644
index 0000000000..8f5e44ea9e
--- /dev/null
+++ b/contrib/python/fonttools/fontTools/designspaceLib/__main__.py
@@ -0,0 +1,6 @@
+import sys
+from fontTools.designspaceLib import main
+
+
+if __name__ == "__main__":
+ sys.exit(main())
diff --git a/contrib/python/fonttools/fontTools/designspaceLib/statNames.py b/contrib/python/fonttools/fontTools/designspaceLib/statNames.py
index a164169da6..1474e5fcf5 100644
--- a/contrib/python/fonttools/fontTools/designspaceLib/statNames.py
+++ b/contrib/python/fonttools/fontTools/designspaceLib/statNames.py
@@ -8,6 +8,7 @@ instance:
names = getStatNames(doc, instance.getFullUserLocation(doc))
print(names.styleNames)
"""
+
from __future__ import annotations
from dataclasses import dataclass
diff --git a/contrib/python/fonttools/fontTools/feaLib/builder.py b/contrib/python/fonttools/fontTools/feaLib/builder.py
index 36eed95148..7921a3f179 100644
--- a/contrib/python/fonttools/fontTools/feaLib/builder.py
+++ b/contrib/python/fonttools/fontTools/feaLib/builder.py
@@ -285,7 +285,11 @@ class Builder(object):
def build_feature_aalt_(self):
if not self.aalt_features_ and not self.aalt_alternates_:
return
- alternates = {g: set(a) for g, a in self.aalt_alternates_.items()}
+ # > alternate glyphs will be sorted in the order that the source features
+ # > are named in the aalt definition, not the order of the feature definitions
+ # > in the file. Alternates defined explicitly ... will precede all others.
+ # https://github.com/fonttools/fonttools/issues/836
+ alternates = {g: list(a) for g, a in self.aalt_alternates_.items()}
for location, name in self.aalt_features_ + [(None, "aalt")]:
feature = [
(script, lang, feature, lookups)
@@ -302,17 +306,14 @@ class Builder(object):
lookuplist = [lookuplist]
for lookup in lookuplist:
for glyph, alts in lookup.getAlternateGlyphs().items():
- alternates.setdefault(glyph, set()).update(alts)
+ alts_for_glyph = alternates.setdefault(glyph, [])
+ alts_for_glyph.extend(
+ g for g in alts if g not in alts_for_glyph
+ )
single = {
- glyph: list(repl)[0] for glyph, repl in alternates.items() if len(repl) == 1
- }
- # TODO: Figure out the glyph alternate ordering used by makeotf.
- # https://github.com/fonttools/fonttools/issues/836
- multi = {
- glyph: sorted(repl, key=self.font.getGlyphID)
- for glyph, repl in alternates.items()
- if len(repl) > 1
+ glyph: repl[0] for glyph, repl in alternates.items() if len(repl) == 1
}
+ multi = {glyph: repl for glyph, repl in alternates.items() if len(repl) > 1}
if not single and not multi:
return
self.features_ = {
@@ -1249,8 +1250,9 @@ class Builder(object):
def add_single_subst(self, location, prefix, suffix, mapping, forceChain):
if self.cur_feature_name_ == "aalt":
for from_glyph, to_glyph in mapping.items():
- alts = self.aalt_alternates_.setdefault(from_glyph, set())
- alts.add(to_glyph)
+ alts = self.aalt_alternates_.setdefault(from_glyph, [])
+ if to_glyph not in alts:
+ alts.append(to_glyph)
return
if prefix or suffix or forceChain:
self.add_single_subst_chained_(location, prefix, suffix, mapping)
@@ -1303,8 +1305,8 @@ class Builder(object):
# GSUB 3
def add_alternate_subst(self, location, prefix, glyph, suffix, replacement):
if self.cur_feature_name_ == "aalt":
- alts = self.aalt_alternates_.setdefault(glyph, set())
- alts.update(replacement)
+ alts = self.aalt_alternates_.setdefault(glyph, [])
+ alts.extend(g for g in replacement if g not in alts)
return
if prefix or suffix:
chain = self.get_lookup_(location, ChainContextSubstBuilder)
@@ -1338,7 +1340,7 @@ class Builder(object):
# substitutions to be specified on target sequences that contain
# glyph classes, the implementation software will enumerate
# all specific glyph sequences if glyph classes are detected"
- for g in sorted(itertools.product(*glyphs)):
+ for g in itertools.product(*glyphs):
lookup.ligatures[g] = replacement
# GSUB 5/6
diff --git a/contrib/python/fonttools/fontTools/feaLib/lexer.py b/contrib/python/fonttools/fontTools/feaLib/lexer.py
index e0ae0aefee..5867f70b38 100644
--- a/contrib/python/fonttools/fontTools/feaLib/lexer.py
+++ b/contrib/python/fonttools/fontTools/feaLib/lexer.py
@@ -111,10 +111,6 @@ class Lexer(object):
glyphclass = text[start + 1 : self.pos_]
if len(glyphclass) < 1:
raise FeatureLibError("Expected glyph class name", location)
- if len(glyphclass) > 63:
- raise FeatureLibError(
- "Glyph class names must not be longer than 63 characters", location
- )
if not Lexer.RE_GLYPHCLASS.match(glyphclass):
raise FeatureLibError(
"Glyph class names must consist of letters, digits, "
diff --git a/contrib/python/fonttools/fontTools/feaLib/parser.py b/contrib/python/fonttools/fontTools/feaLib/parser.py
index 8ffdf644c3..8cbe79592b 100644
--- a/contrib/python/fonttools/fontTools/feaLib/parser.py
+++ b/contrib/python/fonttools/fontTools/feaLib/parser.py
@@ -2071,13 +2071,7 @@ class Parser(object):
def expect_glyph_(self):
self.advance_lexer_()
if self.cur_token_type_ is Lexer.NAME:
- self.cur_token_ = self.cur_token_.lstrip("\\")
- if len(self.cur_token_) > 63:
- raise FeatureLibError(
- "Glyph names must not be longer than 63 characters",
- self.cur_token_location_,
- )
- return self.cur_token_
+ return self.cur_token_.lstrip("\\")
elif self.cur_token_type_ is Lexer.CID:
return "cid%05d" % self.cur_token_
raise FeatureLibError("Expected a glyph name or CID", self.cur_token_location_)
diff --git a/contrib/python/fonttools/fontTools/merge/layout.py b/contrib/python/fonttools/fontTools/merge/layout.py
index 6b85cd5033..e1b504e619 100644
--- a/contrib/python/fonttools/fontTools/merge/layout.py
+++ b/contrib/python/fonttools/fontTools/merge/layout.py
@@ -169,20 +169,16 @@ otTables.BaseTagList.mergeMap = {
"BaselineTag": sumLists,
}
-otTables.GDEF.mergeMap = (
- otTables.GSUB.mergeMap
-) = (
- otTables.GPOS.mergeMap
-) = otTables.BASE.mergeMap = otTables.JSTF.mergeMap = otTables.MATH.mergeMap = {
+otTables.GDEF.mergeMap = otTables.GSUB.mergeMap = otTables.GPOS.mergeMap = (
+ otTables.BASE.mergeMap
+) = otTables.JSTF.mergeMap = otTables.MATH.mergeMap = {
"*": mergeObjects,
"Version": max,
}
-ttLib.getTableClass("GDEF").mergeMap = ttLib.getTableClass(
- "GSUB"
-).mergeMap = ttLib.getTableClass("GPOS").mergeMap = ttLib.getTableClass(
- "BASE"
-).mergeMap = ttLib.getTableClass(
+ttLib.getTableClass("GDEF").mergeMap = ttLib.getTableClass("GSUB").mergeMap = (
+ ttLib.getTableClass("GPOS").mergeMap
+) = ttLib.getTableClass("BASE").mergeMap = ttLib.getTableClass(
"JSTF"
).mergeMap = ttLib.getTableClass(
"MATH"
diff --git a/contrib/python/fonttools/fontTools/misc/classifyTools.py b/contrib/python/fonttools/fontTools/misc/classifyTools.py
index 2235bbd7f8..aed7ca68c4 100644
--- a/contrib/python/fonttools/fontTools/misc/classifyTools.py
+++ b/contrib/python/fonttools/fontTools/misc/classifyTools.py
@@ -3,7 +3,6 @@
class Classifier(object):
-
"""
Main Classifier object, used to classify things into similar sets.
"""
diff --git a/contrib/python/fonttools/fontTools/misc/cliTools.py b/contrib/python/fonttools/fontTools/misc/cliTools.py
index 8322ea9ebb..8a64235bf0 100644
--- a/contrib/python/fonttools/fontTools/misc/cliTools.py
+++ b/contrib/python/fonttools/fontTools/misc/cliTools.py
@@ -1,4 +1,5 @@
"""Collection of utilities for command-line interfaces and console scripts."""
+
import os
import re
diff --git a/contrib/python/fonttools/fontTools/misc/configTools.py b/contrib/python/fonttools/fontTools/misc/configTools.py
index 38bbada24a..7eb1854fdf 100644
--- a/contrib/python/fonttools/fontTools/misc/configTools.py
+++ b/contrib/python/fonttools/fontTools/misc/configTools.py
@@ -8,6 +8,7 @@ To create your own config system, you need to create an instance of
``options`` class variable set to your instance of Options.
"""
+
from __future__ import annotations
import logging
diff --git a/contrib/python/fonttools/fontTools/misc/dictTools.py b/contrib/python/fonttools/fontTools/misc/dictTools.py
index e3c0df7355..cd3d394c25 100644
--- a/contrib/python/fonttools/fontTools/misc/dictTools.py
+++ b/contrib/python/fonttools/fontTools/misc/dictTools.py
@@ -1,6 +1,5 @@
"""Misc dict tools."""
-
__all__ = ["hashdict"]
diff --git a/contrib/python/fonttools/fontTools/misc/etree.py b/contrib/python/fonttools/fontTools/misc/etree.py
index 9d4a65c360..d0967b5f52 100644
--- a/contrib/python/fonttools/fontTools/misc/etree.py
+++ b/contrib/python/fonttools/fontTools/misc/etree.py
@@ -11,6 +11,7 @@ or subclasses built-in ElementTree classes to add features that are
only availble in lxml, like OrderedDict for attributes, pretty_print and
iterwalk.
"""
+
from fontTools.misc.textTools import tostr
diff --git a/contrib/python/fonttools/fontTools/misc/filenames.py b/contrib/python/fonttools/fontTools/misc/filenames.py
index d279f89cc8..ddedc5210f 100644
--- a/contrib/python/fonttools/fontTools/misc/filenames.py
+++ b/contrib/python/fonttools/fontTools/misc/filenames.py
@@ -17,7 +17,6 @@ by Tal Leming and is copyright (c) 2005-2016, The RoboFab Developers:
- Just van Rossum
"""
-
illegalCharacters = r"\" * + / : < > ? [ \ ] | \0".split(" ")
illegalCharacters += [chr(i) for i in range(1, 32)]
illegalCharacters += [chr(0x7F)]
diff --git a/contrib/python/fonttools/fontTools/misc/textTools.py b/contrib/python/fonttools/fontTools/misc/textTools.py
index f7ca1acc9b..f5484a83aa 100644
--- a/contrib/python/fonttools/fontTools/misc/textTools.py
+++ b/contrib/python/fonttools/fontTools/misc/textTools.py
@@ -1,6 +1,5 @@
"""fontTools.misc.textTools.py -- miscellaneous routines."""
-
import ast
import string
diff --git a/contrib/python/fonttools/fontTools/misc/transform.py b/contrib/python/fonttools/fontTools/misc/transform.py
index f85b54b731..0f9f3a5d8b 100644
--- a/contrib/python/fonttools/fontTools/misc/transform.py
+++ b/contrib/python/fonttools/fontTools/misc/transform.py
@@ -76,7 +76,6 @@ def _normSinCos(v):
class Transform(NamedTuple):
-
"""2x2 transformation matrix plus offset, a.k.a. Affine transform.
Transform instances are immutable: all transforming methods, eg.
rotate(), return a new Transform instance.
diff --git a/contrib/python/fonttools/fontTools/misc/vector.py b/contrib/python/fonttools/fontTools/misc/vector.py
index 666ff15cf8..02c62e6512 100644
--- a/contrib/python/fonttools/fontTools/misc/vector.py
+++ b/contrib/python/fonttools/fontTools/misc/vector.py
@@ -8,7 +8,6 @@ __all__ = ["Vector"]
class Vector(tuple):
-
"""A math-like vector.
Represents an n-dimensional numeric vector. ``Vector`` objects support
diff --git a/contrib/python/fonttools/fontTools/otlLib/builder.py b/contrib/python/fonttools/fontTools/otlLib/builder.py
index 4b457f4d9f..70fd87ab57 100644
--- a/contrib/python/fonttools/fontTools/otlLib/builder.py
+++ b/contrib/python/fonttools/fontTools/otlLib/builder.py
@@ -1,11 +1,13 @@
from collections import namedtuple, OrderedDict
import os
from fontTools.misc.fixedTools import fixedToFloat
+from fontTools.misc.roundTools import otRound
from fontTools import ttLib
from fontTools.ttLib.tables import otTables as ot
from fontTools.ttLib.tables.otBase import (
ValueRecord,
valueRecordFormatDict,
+ OTLOffsetOverflowError,
OTTableWriter,
CountReference,
)
@@ -350,16 +352,14 @@ class ChainContextualBuilder(LookupBuilder):
return [x for x in ruleset if len(x.rules) > 0]
def getCompiledSize_(self, subtables):
- size = 0
- for st in subtables:
- w = OTTableWriter()
- w["LookupType"] = CountReference(
- {"LookupType": st.LookupType}, "LookupType"
- )
- # We need to make a copy here because compiling
- # modifies the subtable (finalizing formats etc.)
- copy.deepcopy(st).compile(w, self.font)
- size += len(w.getAllData())
+ if not subtables:
+ return 0
+ # We need to make a copy here because compiling
+ # modifies the subtable (finalizing formats etc.)
+ table = self.buildLookup_(copy.deepcopy(subtables))
+ w = OTTableWriter()
+ table.compile(w, self.font)
+ size = len(w.getAllData())
return size
def build(self):
@@ -410,22 +410,23 @@ class ChainContextualBuilder(LookupBuilder):
if not ruleset.hasAnyGlyphClasses:
candidates[1] = [self.buildFormat1Subtable(ruleset, chaining)]
+ candidates_by_size = []
for i in [1, 2, 3]:
if candidates[i]:
try:
- self.getCompiledSize_(candidates[i])
- except Exception as e:
+ size = self.getCompiledSize_(candidates[i])
+ except OTLOffsetOverflowError as e:
log.warning(
"Contextual format %i at %s overflowed (%s)"
% (i, str(self.location), e)
)
- candidates[i] = None
+ else:
+ candidates_by_size.append((size, candidates[i]))
- candidates = [x for x in candidates if x is not None]
- if not candidates:
+ if not candidates_by_size:
raise OpenTypeLibError("All candidates overflowed", self.location)
- winner = min(candidates, key=self.getCompiledSize_)
+ _min_size, winner = min(candidates_by_size, key=lambda x: x[0])
subtables.extend(winner)
# If we are not chaining, lookup type will be automatically fixed by
@@ -774,7 +775,10 @@ class ChainContextSubstBuilder(ChainContextualBuilder):
if lookup is not None:
alts = lookup.getAlternateGlyphs()
for glyph, replacements in alts.items():
- result.setdefault(glyph, set()).update(replacements)
+ alts_for_glyph = result.setdefault(glyph, [])
+ alts_for_glyph.extend(
+ g for g in replacements if g not in alts_for_glyph
+ )
return result
def find_chainable_single_subst(self, mapping):
@@ -1238,7 +1242,7 @@ class SingleSubstBuilder(LookupBuilder):
return self.buildLookup_(subtables)
def getAlternateGlyphs(self):
- return {glyph: set([repl]) for glyph, repl in self.mapping.items()}
+ return {glyph: [repl] for glyph, repl in self.mapping.items()}
def add_subtable_break(self, location):
self.mapping[(self.SUBTABLE_BREAK_, location)] = self.SUBTABLE_BREAK_
@@ -1567,19 +1571,6 @@ def buildAlternateSubstSubtable(mapping):
return self
-def _getLigatureKey(components):
- # Computes a key for ordering ligatures in a GSUB Type-4 lookup.
-
- # When building the OpenType lookup, we need to make sure that
- # the longest sequence of components is listed first, so we
- # use the negative length as the primary key for sorting.
- # To make buildLigatureSubstSubtable() deterministic, we use the
- # component sequence as the secondary key.
-
- # For example, this will sort (f,f,f) < (f,f,i) < (f,f) < (f,i) < (f,l).
- return (-len(components), components)
-
-
def buildLigatureSubstSubtable(mapping):
"""Builds a ligature substitution (GSUB4) subtable.
@@ -1613,7 +1604,7 @@ def buildLigatureSubstSubtable(mapping):
# with fontTools >= 3.1:
# self.ligatures = dict(mapping)
self.ligatures = {}
- for components in sorted(mapping.keys(), key=_getLigatureKey):
+ for components in sorted(mapping.keys(), key=self._getLigatureSortKey):
ligature = ot.Ligature()
ligature.Component = components[1:]
ligature.CompCount = len(ligature.Component) + 1
@@ -2916,3 +2907,294 @@ def _addName(ttFont, value, minNameID=0, windows=True, mac=True):
return nameTable.addMultilingualName(
names, ttFont=ttFont, windows=windows, mac=mac, minNameID=minNameID
)
+
+
+def buildMathTable(
+ ttFont,
+ constants=None,
+ italicsCorrections=None,
+ topAccentAttachments=None,
+ extendedShapes=None,
+ mathKerns=None,
+ minConnectorOverlap=0,
+ vertGlyphVariants=None,
+ horizGlyphVariants=None,
+ vertGlyphAssembly=None,
+ horizGlyphAssembly=None,
+):
+ """
+ Add a 'MATH' table to 'ttFont'.
+
+ 'constants' is a dictionary of math constants. The keys are the constant
+ names from the MATH table specification (with capital first letter), and the
+ values are the constant values as numbers.
+
+ 'italicsCorrections' is a dictionary of italic corrections. The keys are the
+ glyph names, and the values are the italic corrections as numbers.
+
+ 'topAccentAttachments' is a dictionary of top accent attachments. The keys
+ are the glyph names, and the values are the top accent horizontal positions
+ as numbers.
+
+ 'extendedShapes' is a set of extended shape glyphs.
+
+ 'mathKerns' is a dictionary of math kerns. The keys are the glyph names, and
+ the values are dictionaries. The keys of these dictionaries are the side
+ names ('TopRight', 'TopLeft', 'BottomRight', 'BottomLeft'), and the values
+ are tuples of two lists. The first list contains the correction heights as
+ numbers, and the second list contains the kern values as numbers.
+
+ 'minConnectorOverlap' is the minimum connector overlap as a number.
+
+ 'vertGlyphVariants' is a dictionary of vertical glyph variants. The keys are
+ the glyph names, and the values are tuples of glyph name and full advance height.
+
+ 'horizGlyphVariants' is a dictionary of horizontal glyph variants. The keys
+ are the glyph names, and the values are tuples of glyph name and full
+ advance width.
+
+ 'vertGlyphAssembly' is a dictionary of vertical glyph assemblies. The keys
+ are the glyph names, and the values are tuples of assembly parts and italics
+ correction. The assembly parts are tuples of glyph name, flags, start
+ connector length, end connector length, and full advance height.
+
+ 'horizGlyphAssembly' is a dictionary of horizontal glyph assemblies. The
+ keys are the glyph names, and the values are tuples of assembly parts
+ and italics correction. The assembly parts are tuples of glyph name, flags,
+ start connector length, end connector length, and full advance width.
+
+ Where a number is expected, an integer or a float can be used. The floats
+ will be rounded.
+
+ Example::
+
+ constants = {
+ "ScriptPercentScaleDown": 70,
+ "ScriptScriptPercentScaleDown": 50,
+ "DelimitedSubFormulaMinHeight": 24,
+ "DisplayOperatorMinHeight": 60,
+ ...
+ }
+ italicsCorrections = {
+ "fitalic-math": 100,
+ "fbolditalic-math": 120,
+ ...
+ }
+ topAccentAttachments = {
+ "circumflexcomb": 500,
+ "acutecomb": 400,
+ "A": 300,
+ "B": 340,
+ ...
+ }
+ extendedShapes = {"parenleft", "parenright", ...}
+ mathKerns = {
+ "A": {
+ "TopRight": ([-50, -100], [10, 20, 30]),
+ "TopLeft": ([50, 100], [10, 20, 30]),
+ ...
+ },
+ ...
+ }
+ vertGlyphVariants = {
+ "parenleft": [("parenleft", 700), ("parenleft.size1", 1000), ...],
+ "parenright": [("parenright", 700), ("parenright.size1", 1000), ...],
+ ...
+ }
+ vertGlyphAssembly = {
+ "braceleft": [
+ (
+ ("braceleft.bottom", 0, 0, 200, 500),
+ ("braceleft.extender", 1, 200, 200, 200)),
+ ("braceleft.middle", 0, 100, 100, 700),
+ ("braceleft.extender", 1, 200, 200, 200),
+ ("braceleft.top", 0, 200, 0, 500),
+ ),
+ 100,
+ ],
+ ...
+ }
+ """
+ glyphMap = ttFont.getReverseGlyphMap()
+
+ ttFont["MATH"] = math = ttLib.newTable("MATH")
+ math.table = table = ot.MATH()
+ table.Version = 0x00010000
+ table.populateDefaults()
+
+ table.MathConstants = _buildMathConstants(constants)
+ table.MathGlyphInfo = _buildMathGlyphInfo(
+ glyphMap,
+ italicsCorrections,
+ topAccentAttachments,
+ extendedShapes,
+ mathKerns,
+ )
+ table.MathVariants = _buildMathVariants(
+ glyphMap,
+ minConnectorOverlap,
+ vertGlyphVariants,
+ horizGlyphVariants,
+ vertGlyphAssembly,
+ horizGlyphAssembly,
+ )
+
+
+def _buildMathConstants(constants):
+ if not constants:
+ return None
+
+ mathConstants = ot.MathConstants()
+ for conv in mathConstants.getConverters():
+ value = otRound(constants.get(conv.name, 0))
+ if conv.tableClass:
+ assert issubclass(conv.tableClass, ot.MathValueRecord)
+ value = _mathValueRecord(value)
+ setattr(mathConstants, conv.name, value)
+ return mathConstants
+
+
+def _buildMathGlyphInfo(
+ glyphMap,
+ italicsCorrections,
+ topAccentAttachments,
+ extendedShapes,
+ mathKerns,
+):
+ if not any([extendedShapes, italicsCorrections, topAccentAttachments, mathKerns]):
+ return None
+
+ info = ot.MathGlyphInfo()
+ info.populateDefaults()
+
+ if italicsCorrections:
+ coverage = buildCoverage(italicsCorrections.keys(), glyphMap)
+ info.MathItalicsCorrectionInfo = ot.MathItalicsCorrectionInfo()
+ info.MathItalicsCorrectionInfo.Coverage = coverage
+ info.MathItalicsCorrectionInfo.ItalicsCorrectionCount = len(coverage.glyphs)
+ info.MathItalicsCorrectionInfo.ItalicsCorrection = [
+ _mathValueRecord(italicsCorrections[n]) for n in coverage.glyphs
+ ]
+
+ if topAccentAttachments:
+ coverage = buildCoverage(topAccentAttachments.keys(), glyphMap)
+ info.MathTopAccentAttachment = ot.MathTopAccentAttachment()
+ info.MathTopAccentAttachment.TopAccentCoverage = coverage
+ info.MathTopAccentAttachment.TopAccentAttachmentCount = len(coverage.glyphs)
+ info.MathTopAccentAttachment.TopAccentAttachment = [
+ _mathValueRecord(topAccentAttachments[n]) for n in coverage.glyphs
+ ]
+
+ if extendedShapes:
+ info.ExtendedShapeCoverage = buildCoverage(extendedShapes, glyphMap)
+
+ if mathKerns:
+ coverage = buildCoverage(mathKerns.keys(), glyphMap)
+ info.MathKernInfo = ot.MathKernInfo()
+ info.MathKernInfo.MathKernCoverage = coverage
+ info.MathKernInfo.MathKernCount = len(coverage.glyphs)
+ info.MathKernInfo.MathKernInfoRecords = []
+ for glyph in coverage.glyphs:
+ record = ot.MathKernInfoRecord()
+ for side in {"TopRight", "TopLeft", "BottomRight", "BottomLeft"}:
+ if side in mathKerns[glyph]:
+ correctionHeights, kernValues = mathKerns[glyph][side]
+ assert len(correctionHeights) == len(kernValues) - 1
+ kern = ot.MathKern()
+ kern.HeightCount = len(correctionHeights)
+ kern.CorrectionHeight = [
+ _mathValueRecord(h) for h in correctionHeights
+ ]
+ kern.KernValue = [_mathValueRecord(v) for v in kernValues]
+ setattr(record, f"{side}MathKern", kern)
+ info.MathKernInfo.MathKernInfoRecords.append(record)
+
+ return info
+
+
+def _buildMathVariants(
+ glyphMap,
+ minConnectorOverlap,
+ vertGlyphVariants,
+ horizGlyphVariants,
+ vertGlyphAssembly,
+ horizGlyphAssembly,
+):
+ if not any(
+ [vertGlyphVariants, horizGlyphVariants, vertGlyphAssembly, horizGlyphAssembly]
+ ):
+ return None
+
+ variants = ot.MathVariants()
+ variants.populateDefaults()
+
+ variants.MinConnectorOverlap = minConnectorOverlap
+
+ if vertGlyphVariants or vertGlyphAssembly:
+ variants.VertGlyphCoverage, variants.VertGlyphConstruction = (
+ _buildMathGlyphConstruction(
+ glyphMap,
+ vertGlyphVariants,
+ vertGlyphAssembly,
+ )
+ )
+
+ if horizGlyphVariants or horizGlyphAssembly:
+ variants.HorizGlyphCoverage, variants.HorizGlyphConstruction = (
+ _buildMathGlyphConstruction(
+ glyphMap,
+ horizGlyphVariants,
+ horizGlyphAssembly,
+ )
+ )
+
+ return variants
+
+
+def _buildMathGlyphConstruction(glyphMap, variants, assemblies):
+ glyphs = set()
+ if variants:
+ glyphs.update(variants.keys())
+ if assemblies:
+ glyphs.update(assemblies.keys())
+ coverage = buildCoverage(glyphs, glyphMap)
+ constructions = []
+
+ for glyphName in coverage.glyphs:
+ construction = ot.MathGlyphConstruction()
+ construction.populateDefaults()
+
+ if variants and glyphName in variants:
+ construction.VariantCount = len(variants[glyphName])
+ construction.MathGlyphVariantRecord = []
+ for variantName, advance in variants[glyphName]:
+ record = ot.MathGlyphVariantRecord()
+ record.VariantGlyph = variantName
+ record.AdvanceMeasurement = otRound(advance)
+ construction.MathGlyphVariantRecord.append(record)
+
+ if assemblies and glyphName in assemblies:
+ parts, ic = assemblies[glyphName]
+ construction.GlyphAssembly = ot.GlyphAssembly()
+ construction.GlyphAssembly.ItalicsCorrection = _mathValueRecord(ic)
+ construction.GlyphAssembly.PartCount = len(parts)
+ construction.GlyphAssembly.PartRecords = []
+ for part in parts:
+ part_name, flags, start, end, advance = part
+ record = ot.GlyphPartRecord()
+ record.glyph = part_name
+ record.PartFlags = int(flags)
+ record.StartConnectorLength = otRound(start)
+ record.EndConnectorLength = otRound(end)
+ record.FullAdvance = otRound(advance)
+ construction.GlyphAssembly.PartRecords.append(record)
+
+ constructions.append(construction)
+
+ return coverage, constructions
+
+
+def _mathValueRecord(value):
+ value_record = ot.MathValueRecord()
+ value_record.Value = otRound(value)
+ return value_record
diff --git a/contrib/python/fonttools/fontTools/pens/basePen.py b/contrib/python/fonttools/fontTools/pens/basePen.py
index ac8abd40ce..5d2cf5032c 100644
--- a/contrib/python/fonttools/fontTools/pens/basePen.py
+++ b/contrib/python/fonttools/fontTools/pens/basePen.py
@@ -148,7 +148,6 @@ class AbstractPen:
class NullPen(AbstractPen):
-
"""A pen that does nothing."""
def moveTo(self, pt):
@@ -187,7 +186,6 @@ class MissingComponentError(KeyError):
class DecomposingPen(LoggingPen):
-
"""Implements a 'addComponent' method that decomposes components
(i.e. draws them onto self as simple contours).
It can also be used as a mixin class (e.g. see ContourRecordingPen).
@@ -229,7 +227,6 @@ class DecomposingPen(LoggingPen):
class BasePen(DecomposingPen):
-
"""Base class for drawing pens. You must override _moveTo, _lineTo and
_curveToOne. You may additionally override _closePath, _endPath,
addComponent, addVarComponent, and/or _qCurveToOne. You should not
diff --git a/contrib/python/fonttools/fontTools/pens/boundsPen.py b/contrib/python/fonttools/fontTools/pens/boundsPen.py
index d833cc89b9..c92184413e 100644
--- a/contrib/python/fonttools/fontTools/pens/boundsPen.py
+++ b/contrib/python/fonttools/fontTools/pens/boundsPen.py
@@ -7,7 +7,6 @@ __all__ = ["BoundsPen", "ControlBoundsPen"]
class ControlBoundsPen(BasePen):
-
"""Pen to calculate the "control bounds" of a shape. This is the
bounding box of all control points, so may be larger than the
actual bounding box if there are curves that don't have points
@@ -67,7 +66,6 @@ class ControlBoundsPen(BasePen):
class BoundsPen(ControlBoundsPen):
-
"""Pen to calculate the bounds of a shape. It calculates the
correct bounds even when the shape contains curves that don't
have points on their extremes. This is somewhat slower to compute
diff --git a/contrib/python/fonttools/fontTools/pens/filterPen.py b/contrib/python/fonttools/fontTools/pens/filterPen.py
index 81423109ae..6c8712c261 100644
--- a/contrib/python/fonttools/fontTools/pens/filterPen.py
+++ b/contrib/python/fonttools/fontTools/pens/filterPen.py
@@ -9,7 +9,6 @@ class _PassThruComponentsMixin(object):
class FilterPen(_PassThruComponentsMixin, AbstractPen):
-
"""Base class for pens that apply some transformation to the coordinates
they receive and pass them to another pen.
diff --git a/contrib/python/fonttools/fontTools/pens/hashPointPen.py b/contrib/python/fonttools/fontTools/pens/hashPointPen.py
index b82468ec9c..f15dcabbfd 100644
--- a/contrib/python/fonttools/fontTools/pens/hashPointPen.py
+++ b/contrib/python/fonttools/fontTools/pens/hashPointPen.py
@@ -31,6 +31,20 @@ class HashPointPen(AbstractPointPen):
> # The hash values are identical, the outline has not changed.
> # Compile the hinting code ...
> pass
+
+ If you want to compare a glyph from a source format which supports floating point
+ coordinates and transformations against a glyph from a format which has restrictions
+ on the precision of floats, e.g. UFO vs. TTF, you must use an appropriate rounding
+ function to make the values comparable. For TTF fonts with composites, this
+ construct can be used to make the transform values conform to F2Dot14:
+
+ > ttf_hash_pen = HashPointPen(ttf_glyph_width, ttFont.getGlyphSet())
+ > ttf_round_pen = RoundingPointPen(ttf_hash_pen, transformRoundFunc=partial(floatToFixedToFloat, precisionBits=14))
+ > ufo_hash_pen = HashPointPen(ufo_glyph.width, ufo)
+ > ttf_glyph.drawPoints(ttf_round_pen, ttFont["glyf"])
+ > ufo_round_pen = RoundingPointPen(ufo_hash_pen, transformRoundFunc=partial(floatToFixedToFloat, precisionBits=14))
+ > ufo_glyph.drawPoints(ufo_round_pen)
+ > assert ttf_hash_pen.hash == ufo_hash_pen.hash
"""
def __init__(self, glyphWidth=0, glyphSet=None):
diff --git a/contrib/python/fonttools/fontTools/pens/pointInsidePen.py b/contrib/python/fonttools/fontTools/pens/pointInsidePen.py
index 8a579ae4c9..e1fbbbcb1d 100644
--- a/contrib/python/fonttools/fontTools/pens/pointInsidePen.py
+++ b/contrib/python/fonttools/fontTools/pens/pointInsidePen.py
@@ -10,7 +10,6 @@ __all__ = ["PointInsidePen"]
class PointInsidePen(BasePen):
-
"""This pen implements "point inside" testing: to test whether
a given point lies inside the shape (black) or outside (white).
Instances of this class can be recycled, as long as the
diff --git a/contrib/python/fonttools/fontTools/pens/quartzPen.py b/contrib/python/fonttools/fontTools/pens/quartzPen.py
index 6e1228d6f2..2b8a927dc4 100644
--- a/contrib/python/fonttools/fontTools/pens/quartzPen.py
+++ b/contrib/python/fonttools/fontTools/pens/quartzPen.py
@@ -9,7 +9,6 @@ __all__ = ["QuartzPen"]
class QuartzPen(BasePen):
-
"""A pen that creates a CGPath
Parameters
diff --git a/contrib/python/fonttools/fontTools/pens/recordingPen.py b/contrib/python/fonttools/fontTools/pens/recordingPen.py
index e24b65265a..4f44a4d59f 100644
--- a/contrib/python/fonttools/fontTools/pens/recordingPen.py
+++ b/contrib/python/fonttools/fontTools/pens/recordingPen.py
@@ -1,4 +1,5 @@
"""Pen recording operations that can be accessed or replayed."""
+
from fontTools.pens.basePen import AbstractPen, DecomposingPen
from fontTools.pens.pointPen import AbstractPointPen
diff --git a/contrib/python/fonttools/fontTools/pens/reportLabPen.py b/contrib/python/fonttools/fontTools/pens/reportLabPen.py
index 2cb89c8bf4..20c9065c71 100644
--- a/contrib/python/fonttools/fontTools/pens/reportLabPen.py
+++ b/contrib/python/fonttools/fontTools/pens/reportLabPen.py
@@ -6,7 +6,6 @@ __all__ = ["ReportLabPen"]
class ReportLabPen(BasePen):
-
"""A pen for drawing onto a ``reportlab.graphics.shapes.Path`` object."""
def __init__(self, glyphSet, path=None):
diff --git a/contrib/python/fonttools/fontTools/pens/roundingPen.py b/contrib/python/fonttools/fontTools/pens/roundingPen.py
index 2a7c476c36..176bcc7a55 100644
--- a/contrib/python/fonttools/fontTools/pens/roundingPen.py
+++ b/contrib/python/fonttools/fontTools/pens/roundingPen.py
@@ -1,4 +1,4 @@
-from fontTools.misc.roundTools import otRound
+from fontTools.misc.roundTools import noRound, otRound
from fontTools.misc.transform import Transform
from fontTools.pens.filterPen import FilterPen, FilterPointPen
@@ -8,7 +8,9 @@ __all__ = ["RoundingPen", "RoundingPointPen"]
class RoundingPen(FilterPen):
"""
- Filter pen that rounds point coordinates and component XY offsets to integer.
+ Filter pen that rounds point coordinates and component XY offsets to integer. For
+ rounding the component transform values, a separate round function can be passed to
+ the pen.
>>> from fontTools.pens.recordingPen import RecordingPen
>>> recpen = RecordingPen()
@@ -28,9 +30,10 @@ class RoundingPen(FilterPen):
True
"""
- def __init__(self, outPen, roundFunc=otRound):
+ def __init__(self, outPen, roundFunc=otRound, transformRoundFunc=noRound):
super().__init__(outPen)
self.roundFunc = roundFunc
+ self.transformRoundFunc = transformRoundFunc
def moveTo(self, pt):
self._outPen.moveTo((self.roundFunc(pt[0]), self.roundFunc(pt[1])))
@@ -49,12 +52,16 @@ class RoundingPen(FilterPen):
)
def addComponent(self, glyphName, transformation):
+ xx, xy, yx, yy, dx, dy = transformation
self._outPen.addComponent(
glyphName,
Transform(
- *transformation[:4],
- self.roundFunc(transformation[4]),
- self.roundFunc(transformation[5]),
+ self.transformRoundFunc(xx),
+ self.transformRoundFunc(xy),
+ self.transformRoundFunc(yx),
+ self.transformRoundFunc(yy),
+ self.roundFunc(dx),
+ self.roundFunc(dy),
),
)
@@ -62,6 +69,8 @@ class RoundingPen(FilterPen):
class RoundingPointPen(FilterPointPen):
"""
Filter point pen that rounds point coordinates and component XY offsets to integer.
+ For rounding the component scale values, a separate round function can be passed to
+ the pen.
>>> from fontTools.pens.recordingPen import RecordingPointPen
>>> recpen = RecordingPointPen()
@@ -87,26 +96,35 @@ class RoundingPointPen(FilterPointPen):
True
"""
- def __init__(self, outPen, roundFunc=otRound):
+ def __init__(self, outPen, roundFunc=otRound, transformRoundFunc=noRound):
super().__init__(outPen)
self.roundFunc = roundFunc
+ self.transformRoundFunc = transformRoundFunc
- def addPoint(self, pt, segmentType=None, smooth=False, name=None, **kwargs):
+ def addPoint(
+ self, pt, segmentType=None, smooth=False, name=None, identifier=None, **kwargs
+ ):
self._outPen.addPoint(
(self.roundFunc(pt[0]), self.roundFunc(pt[1])),
segmentType=segmentType,
smooth=smooth,
name=name,
+ identifier=identifier,
**kwargs,
)
- def addComponent(self, baseGlyphName, transformation, **kwargs):
+ def addComponent(self, baseGlyphName, transformation, identifier=None, **kwargs):
+ xx, xy, yx, yy, dx, dy = transformation
self._outPen.addComponent(
- baseGlyphName,
- Transform(
- *transformation[:4],
- self.roundFunc(transformation[4]),
- self.roundFunc(transformation[5]),
+ baseGlyphName=baseGlyphName,
+ transformation=Transform(
+ self.transformRoundFunc(xx),
+ self.transformRoundFunc(xy),
+ self.transformRoundFunc(yx),
+ self.transformRoundFunc(yy),
+ self.roundFunc(dx),
+ self.roundFunc(dy),
),
+ identifier=identifier,
**kwargs,
)
diff --git a/contrib/python/fonttools/fontTools/pens/statisticsPen.py b/contrib/python/fonttools/fontTools/pens/statisticsPen.py
index 403ef39f9e..699b14ca79 100644
--- a/contrib/python/fonttools/fontTools/pens/statisticsPen.py
+++ b/contrib/python/fonttools/fontTools/pens/statisticsPen.py
@@ -1,5 +1,6 @@
"""Pen calculating area, center of mass, variance and standard-deviation,
covariance and correlation, and slant, of glyph shapes."""
+
from math import sqrt, degrees, atan
from fontTools.pens.basePen import BasePen, OpenContourError
from fontTools.pens.momentsPen import MomentsPen
@@ -52,7 +53,6 @@ class StatisticsBase:
class StatisticsPen(StatisticsBase, MomentsPen):
-
"""Pen calculating area, center of mass, variance and
standard-deviation, covariance and correlation, and slant,
of glyph shapes.
@@ -91,7 +91,6 @@ class StatisticsPen(StatisticsBase, MomentsPen):
class StatisticsControlPen(StatisticsBase, BasePen):
-
"""Pen calculating area, center of mass, variance and
standard-deviation, covariance and correlation, and slant,
of glyph shapes, using the control polygon only.
diff --git a/contrib/python/fonttools/fontTools/pens/svgPathPen.py b/contrib/python/fonttools/fontTools/pens/svgPathPen.py
index 53b3683f2d..29d41a8029 100644
--- a/contrib/python/fonttools/fontTools/pens/svgPathPen.py
+++ b/contrib/python/fonttools/fontTools/pens/svgPathPen.py
@@ -220,13 +220,19 @@ def main(args=None):
"fonttools pens.svgPathPen", description="Generate SVG from text"
)
parser.add_argument("font", metavar="font.ttf", help="Font file.")
- parser.add_argument("text", metavar="text", help="Text string.")
+ parser.add_argument("text", metavar="text", nargs="?", help="Text string.")
parser.add_argument(
"-y",
metavar="<number>",
help="Face index into a collection to open. Zero based.",
)
parser.add_argument(
+ "--glyphs",
+ metavar="whitespace-separated list of glyph names",
+ type=str,
+ help="Glyphs to show. Exclusive with text option",
+ )
+ parser.add_argument(
"--variations",
metavar="AXIS=LOC",
default="",
@@ -241,6 +247,7 @@ def main(args=None):
font = TTFont(options.font, fontNumber=fontNumber)
text = options.text
+ glyphs = options.glyphs
location = {}
for tag_v in options.variations.split():
@@ -255,10 +262,17 @@ def main(args=None):
glyphset = font.getGlyphSet(location=location)
cmap = font["cmap"].getBestCmap()
+ if glyphs is not None and text is not None:
+ raise ValueError("Options --glyphs and --text are exclusive")
+
+ if glyphs is None:
+ glyphs = " ".join(cmap[ord(u)] for u in text)
+
+ glyphs = glyphs.split()
+
s = ""
width = 0
- for u in text:
- g = cmap[ord(u)]
+ for g in glyphs:
glyph = glyphset[g]
pen = SVGPathPen(glyphset)
diff --git a/contrib/python/fonttools/fontTools/pens/teePen.py b/contrib/python/fonttools/fontTools/pens/teePen.py
index 2828175a7c..939f049b9f 100644
--- a/contrib/python/fonttools/fontTools/pens/teePen.py
+++ b/contrib/python/fonttools/fontTools/pens/teePen.py
@@ -1,4 +1,5 @@
"""Pen multiplexing drawing to one or more pens."""
+
from fontTools.pens.basePen import AbstractPen
diff --git a/contrib/python/fonttools/fontTools/pens/transformPen.py b/contrib/python/fonttools/fontTools/pens/transformPen.py
index 2e572f612e..ff98dbddb0 100644
--- a/contrib/python/fonttools/fontTools/pens/transformPen.py
+++ b/contrib/python/fonttools/fontTools/pens/transformPen.py
@@ -5,7 +5,6 @@ __all__ = ["TransformPen", "TransformPointPen"]
class TransformPen(FilterPen):
-
"""Pen that transforms all coordinates using a Affine transformation,
and passes them to another pen.
"""
diff --git a/contrib/python/fonttools/fontTools/subset/__init__.py b/contrib/python/fonttools/fontTools/subset/__init__.py
index 9b1758435c..250a07ef1a 100644
--- a/contrib/python/fonttools/fontTools/subset/__init__.py
+++ b/contrib/python/fonttools/fontTools/subset/__init__.py
@@ -3733,6 +3733,3 @@ __all__ = [
"parse_unicodes",
"main",
]
-
-if __name__ == "__main__":
- sys.exit(main())
diff --git a/contrib/python/fonttools/fontTools/svgLib/path/arc.py b/contrib/python/fonttools/fontTools/svgLib/path/arc.py
index 3e0a211e04..4b2aa5c97b 100644
--- a/contrib/python/fonttools/fontTools/svgLib/path/arc.py
+++ b/contrib/python/fonttools/fontTools/svgLib/path/arc.py
@@ -4,6 +4,7 @@ The code is mostly adapted from Blink's SVGPathNormalizer::DecomposeArcToCubic
https://github.com/chromium/chromium/blob/93831f2/third_party/
blink/renderer/core/svg/svg_path_parser.cc#L169-L278
"""
+
from fontTools.misc.transform import Identity, Scale
from math import atan2, ceil, cos, fabs, isfinite, pi, radians, sin, sqrt, tan
diff --git a/contrib/python/fonttools/fontTools/t1Lib/__init__.py b/contrib/python/fonttools/fontTools/t1Lib/__init__.py
index a64f78097c..0475881e91 100644
--- a/contrib/python/fonttools/fontTools/t1Lib/__init__.py
+++ b/contrib/python/fonttools/fontTools/t1Lib/__init__.py
@@ -15,6 +15,7 @@ write(path, data, kind='OTHER', dohex=False)
part should be written as hexadecimal or binary, but only if kind
is 'OTHER'.
"""
+
import fontTools
from fontTools.misc import eexec
from fontTools.misc.macCreatorType import getMacCreatorAndType
@@ -49,7 +50,6 @@ class T1Error(Exception):
class T1Font(object):
-
"""Type 1 font class.
Uses a minimal interpeter that supports just about enough PS to parse
diff --git a/contrib/python/fonttools/fontTools/ttLib/macUtils.py b/contrib/python/fonttools/fontTools/ttLib/macUtils.py
index 468a75ad6d..0959a6fc27 100644
--- a/contrib/python/fonttools/fontTools/ttLib/macUtils.py
+++ b/contrib/python/fonttools/fontTools/ttLib/macUtils.py
@@ -1,4 +1,5 @@
"""ttLib.macUtils.py -- Various Mac-specific stuff."""
+
from io import BytesIO
from fontTools.misc.macRes import ResourceReader, ResourceError
@@ -35,7 +36,6 @@ def openTTFonts(path):
class SFNTResourceReader(BytesIO):
-
"""Simple read-only file wrapper for 'sfnt' resources."""
def __init__(self, path, res_name_or_index):
diff --git a/contrib/python/fonttools/fontTools/ttLib/removeOverlaps.py b/contrib/python/fonttools/fontTools/ttLib/removeOverlaps.py
index 624cd47b40..4795320669 100644
--- a/contrib/python/fonttools/fontTools/ttLib/removeOverlaps.py
+++ b/contrib/python/fonttools/fontTools/ttLib/removeOverlaps.py
@@ -202,9 +202,11 @@ def removeOverlaps(
glyphNames = sorted(
glyphNames,
key=lambda name: (
- glyfTable[name].getCompositeMaxpValues(glyfTable).maxComponentDepth
- if glyfTable[name].isComposite()
- else 0,
+ (
+ glyfTable[name].getCompositeMaxpValues(glyfTable).maxComponentDepth
+ if glyfTable[name].isComposite()
+ else 0
+ ),
name,
),
)
diff --git a/contrib/python/fonttools/fontTools/ttLib/scaleUpem.py b/contrib/python/fonttools/fontTools/ttLib/scaleUpem.py
index 3f9b22af8f..2909bfcb2c 100644
--- a/contrib/python/fonttools/fontTools/ttLib/scaleUpem.py
+++ b/contrib/python/fonttools/fontTools/ttLib/scaleUpem.py
@@ -3,7 +3,6 @@
AAT and Graphite tables are not supported. CFF/CFF2 fonts
are de-subroutinized."""
-
from fontTools.ttLib.ttVisitor import TTVisitor
import fontTools.ttLib as ttLib
import fontTools.ttLib.tables.otBase as otBase
diff --git a/contrib/python/fonttools/fontTools/ttLib/tables/C_O_L_R_.py b/contrib/python/fonttools/fontTools/ttLib/tables/C_O_L_R_.py
index 2f03ec054f..df857842cc 100644
--- a/contrib/python/fonttools/fontTools/ttLib/tables/C_O_L_R_.py
+++ b/contrib/python/fonttools/fontTools/ttLib/tables/C_O_L_R_.py
@@ -7,7 +7,6 @@ from . import DefaultTable
class table_C_O_L_R_(DefaultTable.DefaultTable):
-
"""This table is structured so that you can treat it like a dictionary keyed by glyph name.
``ttFont['COLR'][<glyphName>]`` will return the color layers for any glyph.
diff --git a/contrib/python/fonttools/fontTools/ttLib/tables/O_S_2f_2.py b/contrib/python/fonttools/fontTools/ttLib/tables/O_S_2f_2.py
index edff91f58d..0c739bcc44 100644
--- a/contrib/python/fonttools/fontTools/ttLib/tables/O_S_2f_2.py
+++ b/contrib/python/fonttools/fontTools/ttLib/tables/O_S_2f_2.py
@@ -113,7 +113,6 @@ OS2_format_5_addition = bigendian + OS2_format_5_addition
class table_O_S_2f_2(DefaultTable.DefaultTable):
-
"""the OS/2 table"""
dependencies = ["head"]
diff --git a/contrib/python/fonttools/fontTools/ttLib/tables/T_S_I__0.py b/contrib/python/fonttools/fontTools/ttLib/tables/T_S_I__0.py
index f15fc67bce..77905822a8 100644
--- a/contrib/python/fonttools/fontTools/ttLib/tables/T_S_I__0.py
+++ b/contrib/python/fonttools/fontTools/ttLib/tables/T_S_I__0.py
@@ -5,6 +5,7 @@ TSI0 is the index table containing the lengths and offsets for the glyph
programs and 'extra' programs ('fpgm', 'prep', and 'cvt') that are contained
in the TSI1 table.
"""
+
from . import DefaultTable
import struct
diff --git a/contrib/python/fonttools/fontTools/ttLib/tables/T_S_I__1.py b/contrib/python/fonttools/fontTools/ttLib/tables/T_S_I__1.py
index 55aca33991..a9d04a09b0 100644
--- a/contrib/python/fonttools/fontTools/ttLib/tables/T_S_I__1.py
+++ b/contrib/python/fonttools/fontTools/ttLib/tables/T_S_I__1.py
@@ -4,6 +4,7 @@ tool to store its hinting source data.
TSI1 contains the text of the glyph programs in the form of low-level assembly
code, as well as the 'extra' programs 'fpgm', 'ppgm' (i.e. 'prep'), and 'cvt'.
"""
+
from . import DefaultTable
from fontTools.misc.loggingTools import LogMixin
from fontTools.misc.textTools import strjoin, tobytes, tostr
diff --git a/contrib/python/fonttools/fontTools/ttLib/tables/T_S_I__2.py b/contrib/python/fonttools/fontTools/ttLib/tables/T_S_I__2.py
index 4278be1556..163ef45226 100644
--- a/contrib/python/fonttools/fontTools/ttLib/tables/T_S_I__2.py
+++ b/contrib/python/fonttools/fontTools/ttLib/tables/T_S_I__2.py
@@ -5,6 +5,7 @@ TSI2 is the index table containing the lengths and offsets for the glyph
programs that are contained in the TSI3 table. It uses the same format as
the TSI0 table.
"""
+
from fontTools import ttLib
superclass = ttLib.getTableClass("TSI0")
diff --git a/contrib/python/fonttools/fontTools/ttLib/tables/T_S_I__3.py b/contrib/python/fonttools/fontTools/ttLib/tables/T_S_I__3.py
index 785ca23152..604a7f0beb 100644
--- a/contrib/python/fonttools/fontTools/ttLib/tables/T_S_I__3.py
+++ b/contrib/python/fonttools/fontTools/ttLib/tables/T_S_I__3.py
@@ -3,6 +3,7 @@ tool to store its hinting source data.
TSI3 contains the text of the glyph programs in the form of 'VTTTalk' code.
"""
+
from fontTools import ttLib
superclass = ttLib.getTableClass("TSI1")
diff --git a/contrib/python/fonttools/fontTools/ttLib/tables/T_S_I__5.py b/contrib/python/fonttools/fontTools/ttLib/tables/T_S_I__5.py
index 5edc86a9cb..d86798695c 100644
--- a/contrib/python/fonttools/fontTools/ttLib/tables/T_S_I__5.py
+++ b/contrib/python/fonttools/fontTools/ttLib/tables/T_S_I__5.py
@@ -3,6 +3,7 @@ tool to store its hinting source data.
TSI5 contains the VTT character groups.
"""
+
from fontTools.misc.textTools import safeEval
from . import DefaultTable
import sys
diff --git a/contrib/python/fonttools/fontTools/ttLib/tables/TupleVariation.py b/contrib/python/fonttools/fontTools/ttLib/tables/TupleVariation.py
index 30d009906d..027ac15342 100644
--- a/contrib/python/fonttools/fontTools/ttLib/tables/TupleVariation.py
+++ b/contrib/python/fonttools/fontTools/ttLib/tables/TupleVariation.py
@@ -517,22 +517,22 @@ class TupleVariation(object):
return # no change
coordWidth = self.getCoordWidth()
self.coordinates = [
- None
- if d is None
- else d * scalar
- if coordWidth == 1
- else (d[0] * scalar, d[1] * scalar)
+ (
+ None
+ if d is None
+ else d * scalar if coordWidth == 1 else (d[0] * scalar, d[1] * scalar)
+ )
for d in self.coordinates
]
def roundDeltas(self):
coordWidth = self.getCoordWidth()
self.coordinates = [
- None
- if d is None
- else otRound(d)
- if coordWidth == 1
- else (otRound(d[0]), otRound(d[1]))
+ (
+ None
+ if d is None
+ else otRound(d) if coordWidth == 1 else (otRound(d[0]), otRound(d[1]))
+ )
for d in self.coordinates
]
diff --git a/contrib/python/fonttools/fontTools/ttLib/tables/V_O_R_G_.py b/contrib/python/fonttools/fontTools/ttLib/tables/V_O_R_G_.py
index 4508c137d6..b08737b224 100644
--- a/contrib/python/fonttools/fontTools/ttLib/tables/V_O_R_G_.py
+++ b/contrib/python/fonttools/fontTools/ttLib/tables/V_O_R_G_.py
@@ -4,7 +4,6 @@ import struct
class table_V_O_R_G_(DefaultTable.DefaultTable):
-
"""This table is structured so that you can treat it like a dictionary keyed by glyph name.
``ttFont['VORG'][<glyphName>]`` will return the vertical origin for any glyph.
diff --git a/contrib/python/fonttools/fontTools/ttLib/tables/_k_e_r_n.py b/contrib/python/fonttools/fontTools/ttLib/tables/_k_e_r_n.py
index 8f55a311cd..270b3b7e44 100644
--- a/contrib/python/fonttools/fontTools/ttLib/tables/_k_e_r_n.py
+++ b/contrib/python/fonttools/fontTools/ttLib/tables/_k_e_r_n.py
@@ -147,9 +147,9 @@ class KernTable_format_0(object):
except IndexError:
# Slower, but will not throw an IndexError on an invalid
# glyph id.
- kernTable[
- (ttFont.getGlyphName(left), ttFont.getGlyphName(right))
- ] = value
+ kernTable[(ttFont.getGlyphName(left), ttFont.getGlyphName(right))] = (
+ value
+ )
if len(data) > 6 * nPairs + 4: # Ignore up to 4 bytes excess
log.warning(
"excess data in 'kern' subtable: %d bytes", len(data) - 6 * nPairs
diff --git a/contrib/python/fonttools/fontTools/ttLib/tables/otBase.py b/contrib/python/fonttools/fontTools/ttLib/tables/otBase.py
index d565603b1f..53abd13b48 100644
--- a/contrib/python/fonttools/fontTools/ttLib/tables/otBase.py
+++ b/contrib/python/fonttools/fontTools/ttLib/tables/otBase.py
@@ -79,7 +79,6 @@ class RepackerState(IntEnum):
class BaseTTXConverter(DefaultTable):
-
"""Generic base class for TTX table converters. It functions as an
adapter between the TTX (ttLib actually) table model and the model
we use for OpenType tables, which is necessarily subtly different.
@@ -260,7 +259,6 @@ assert array.array("i").itemsize == 4, "Oops, file a bug against fonttools."
class OTTableReader(object):
-
"""Helper class to retrieve data from an OpenType table."""
__slots__ = ("data", "offset", "pos", "localState", "tableTag")
@@ -392,7 +390,6 @@ class OffsetToWriter(object):
class OTTableWriter(object):
-
"""Helper class to gather and assemble data for OpenType tables."""
def __init__(self, localState=None, tableTag=None):
@@ -882,7 +879,6 @@ def packUInt24(value):
class BaseTable(object):
-
"""Generic base class for all OpenType (sub)tables."""
def __getattr__(self, attr):
@@ -1210,7 +1206,6 @@ class BaseTable(object):
class FormatSwitchingBaseTable(BaseTable):
-
"""Minor specialization of BaseTable, for tables that have multiple
formats, eg. CoverageFormat1 vs. CoverageFormat2."""
@@ -1335,7 +1330,6 @@ valueRecordFormatDict = _buildDict()
class ValueRecordFactory(object):
-
"""Given a format code, this object convert ValueRecords."""
def __init__(self, valueFormat):
diff --git a/contrib/python/fonttools/fontTools/ttLib/tables/otConverters.py b/contrib/python/fonttools/fontTools/ttLib/tables/otConverters.py
index 390f1660e8..afe4e538f4 100644
--- a/contrib/python/fonttools/fontTools/ttLib/tables/otConverters.py
+++ b/contrib/python/fonttools/fontTools/ttLib/tables/otConverters.py
@@ -146,7 +146,6 @@ class _LazyList(UserList):
class BaseConverter(object):
-
"""Base class for converter objects. Apart from the constructor, this
is an abstract class."""
diff --git a/contrib/python/fonttools/fontTools/ttLib/tables/otTables.py b/contrib/python/fonttools/fontTools/ttLib/tables/otTables.py
index 262f8d4187..3505f42337 100644
--- a/contrib/python/fonttools/fontTools/ttLib/tables/otTables.py
+++ b/contrib/python/fonttools/fontTools/ttLib/tables/otTables.py
@@ -1123,6 +1123,35 @@ class LigatureSubst(FormatSwitchingBaseTable):
self.ligatures = ligatures
del self.Format # Don't need this anymore
+ @staticmethod
+ def _getLigatureSortKey(components):
+ # Computes a key for ordering ligatures in a GSUB Type-4 lookup.
+
+ # When building the OpenType lookup, we need to make sure that
+ # the longest sequence of components is listed first, so we
+ # use the negative length as the key for sorting.
+ # Note, we no longer need to worry about deterministic order because the
+ # ligature mapping `dict` remembers the insertion order, and this in
+ # turn depends on the order in which the ligatures are written in the FEA.
+ # Since python sort algorithm is stable, the ligatures of equal length
+ # will keep the relative order in which they appear in the feature file.
+ # For example, given the following ligatures (all starting with 'f' and
+ # thus belonging to the same LigatureSet):
+ #
+ # feature liga {
+ # sub f i by f_i;
+ # sub f f f by f_f_f;
+ # sub f f by f_f;
+ # sub f f i by f_f_i;
+ # } liga;
+ #
+ # this should sort to: f_f_f, f_f_i, f_i, f_f
+ # This is also what fea-rs does, see:
+ # https://github.com/adobe-type-tools/afdko/issues/1727
+ # https://github.com/fonttools/fonttools/issues/3428
+ # https://github.com/googlefonts/fontc/pull/680
+ return -len(components)
+
def preWrite(self, font):
self.Format = 1
ligatures = getattr(self, "ligatures", None)
@@ -1135,13 +1164,11 @@ class LigatureSubst(FormatSwitchingBaseTable):
# ligatures is map from components-sequence to lig-glyph
newLigatures = dict()
- for comps, lig in sorted(
- ligatures.items(), key=lambda item: (-len(item[0]), item[0])
- ):
+ for comps in sorted(ligatures.keys(), key=self._getLigatureSortKey):
ligature = Ligature()
ligature.Component = comps[1:]
ligature.CompCount = len(comps)
- ligature.LigGlyph = lig
+ ligature.LigGlyph = ligatures[comps]
newLigatures.setdefault(comps[0], []).append(ligature)
ligatures = newLigatures
diff --git a/contrib/python/fonttools/fontTools/ttLib/tables/otTraverse.py b/contrib/python/fonttools/fontTools/ttLib/tables/otTraverse.py
index bf22dcfdb5..ac94218723 100644
--- a/contrib/python/fonttools/fontTools/ttLib/tables/otTraverse.py
+++ b/contrib/python/fonttools/fontTools/ttLib/tables/otTraverse.py
@@ -1,4 +1,5 @@
"""Methods for traversing trees of otData-driven OpenType tables."""
+
from collections import deque
from typing import Callable, Deque, Iterable, List, Optional, Tuple
from .otBase import BaseTable
diff --git a/contrib/python/fonttools/fontTools/ttLib/tables/sbixGlyph.py b/contrib/python/fonttools/fontTools/ttLib/tables/sbixGlyph.py
index fd687a1880..b744a2a3bc 100644
--- a/contrib/python/fonttools/fontTools/ttLib/tables/sbixGlyph.py
+++ b/contrib/python/fonttools/fontTools/ttLib/tables/sbixGlyph.py
@@ -54,6 +54,10 @@ class Glyph(object):
# pad with spaces
self.graphicType += " "[: (4 - len(self.graphicType))]
+ def is_reference_type(self):
+ """Returns True if this glyph is a reference to another glyph's image data."""
+ return self.graphicType == "dupe" or self.graphicType == "flip"
+
def decompile(self, ttFont):
self.glyphName = ttFont.getGlyphName(self.gid)
if self.rawdata is None:
@@ -71,7 +75,7 @@ class Glyph(object):
sbixGlyphHeaderFormat, self.rawdata[:sbixGlyphHeaderFormatSize], self
)
- if self.graphicType == "dupe":
+ if self.is_reference_type():
# this glyph is a reference to another glyph's image data
(gid,) = struct.unpack(">H", self.rawdata[sbixGlyphHeaderFormatSize:])
self.referenceGlyphName = ttFont.getGlyphName(gid)
@@ -94,7 +98,7 @@ class Glyph(object):
rawdata = b""
else:
rawdata = sstruct.pack(sbixGlyphHeaderFormat, self)
- if self.graphicType == "dupe":
+ if self.is_reference_type():
rawdata += struct.pack(">H", ttFont.getGlyphID(self.referenceGlyphName))
else:
assert self.imageData is not None
@@ -117,8 +121,8 @@ class Glyph(object):
originOffsetY=self.originOffsetY,
)
xmlWriter.newline()
- if self.graphicType == "dupe":
- # graphicType == "dupe" is a reference to another glyph id.
+ if self.is_reference_type():
+ # this glyph is a reference to another glyph id.
xmlWriter.simpletag("ref", glyphname=self.referenceGlyphName)
else:
xmlWriter.begintag("hexdata")
@@ -131,7 +135,7 @@ class Glyph(object):
def fromXML(self, name, attrs, content, ttFont):
if name == "ref":
- # glyph is a "dupe", i.e. a reference to another glyph's image data.
+ # this glyph i.e. a reference to another glyph's image data.
# in this case imageData contains the glyph id of the reference glyph
# get glyph id from glyphname
glyphname = safeEval("'''" + attrs["glyphname"] + "'''")
diff --git a/contrib/python/fonttools/fontTools/ttLib/tables/ttProgram.py b/contrib/python/fonttools/fontTools/ttLib/tables/ttProgram.py
index 84aa63f363..32a4ec8b20 100644
--- a/contrib/python/fonttools/fontTools/ttLib/tables/ttProgram.py
+++ b/contrib/python/fonttools/fontTools/ttLib/tables/ttProgram.py
@@ -1,4 +1,5 @@
"""ttLib.tables.ttProgram.py -- Assembler/disassembler for TrueType bytecode programs."""
+
from __future__ import annotations
from fontTools.misc.textTools import num2binary, binary2num, readHex, strjoin
diff --git a/contrib/python/fonttools/fontTools/ttLib/ttCollection.py b/contrib/python/fonttools/fontTools/ttLib/ttCollection.py
index 70ed4b7a0d..f01bc42be3 100644
--- a/contrib/python/fonttools/fontTools/ttLib/ttCollection.py
+++ b/contrib/python/fonttools/fontTools/ttLib/ttCollection.py
@@ -8,7 +8,6 @@ log = logging.getLogger(__name__)
class TTCollection(object):
-
"""Object representing a TrueType Collection / OpenType Collection.
The main API is self.fonts being a list of TTFont instances.
diff --git a/contrib/python/fonttools/fontTools/ttLib/ttFont.py b/contrib/python/fonttools/fontTools/ttLib/ttFont.py
index c8c74fecfe..ad62a187de 100644
--- a/contrib/python/fonttools/fontTools/ttLib/ttFont.py
+++ b/contrib/python/fonttools/fontTools/ttLib/ttFont.py
@@ -15,7 +15,6 @@ log = logging.getLogger(__name__)
class TTFont(object):
-
"""Represents a TrueType font.
The object manages file input and output, and offers a convenient way of
@@ -843,7 +842,6 @@ class TTFont(object):
class GlyphOrder(object):
-
"""A pseudo table. The glyph order isn't in the font as a separate
table, but it's nice to present it as such in the TTX format.
"""
diff --git a/contrib/python/fonttools/fontTools/ttLib/ttGlyphSet.py b/contrib/python/fonttools/fontTools/ttLib/ttGlyphSet.py
index 5d188d6a10..b4beb3e766 100644
--- a/contrib/python/fonttools/fontTools/ttLib/ttGlyphSet.py
+++ b/contrib/python/fonttools/fontTools/ttLib/ttGlyphSet.py
@@ -17,7 +17,6 @@ from fontTools.pens.recordingPen import (
class _TTGlyphSet(Mapping):
-
"""Generic dict-like GlyphSet class that pulls metrics from hmtx and
glyph shape from TrueType or CFF.
"""
@@ -125,7 +124,6 @@ class _TTGlyphSetCFF(_TTGlyphSet):
class _TTGlyph(ABC):
-
"""Glyph object that supports the Pen protocol, meaning that it has
.draw() and .drawPoints() methods that take a pen object as their only
argument. Additionally there are 'width' and 'lsb' attributes, read from
diff --git a/contrib/python/fonttools/fontTools/ttx.py b/contrib/python/fonttools/fontTools/ttx.py
index d8c2a3a758..e7a068748b 100644
--- a/contrib/python/fonttools/fontTools/ttx.py
+++ b/contrib/python/fonttools/fontTools/ttx.py
@@ -103,7 +103,6 @@ Compile options
extension is available at https://pypi.python.org/pypi/zopfli
"""
-
from fontTools.ttLib import TTFont, TTLibError
from fontTools.misc.macCreatorType import getMacCreatorAndType
from fontTools.unicode import setUnicodeData
diff --git a/contrib/python/fonttools/fontTools/ufoLib/__init__.py b/contrib/python/fonttools/fontTools/ufoLib/__init__.py
index 1a456a206f..c2d2b0b266 100644
--- a/contrib/python/fonttools/fontTools/ufoLib/__init__.py
+++ b/contrib/python/fonttools/fontTools/ufoLib/__init__.py
@@ -197,7 +197,6 @@ class _UFOBaseIO:
class UFOReader(_UFOBaseIO):
-
"""
Read the various components of the .ufo.
@@ -881,7 +880,6 @@ class UFOReader(_UFOBaseIO):
class UFOWriter(UFOReader):
-
"""
Write the various components of the .ufo.
diff --git a/contrib/python/fonttools/fontTools/ufoLib/converters.py b/contrib/python/fonttools/fontTools/ufoLib/converters.py
index daccf78272..88a26c616a 100644
--- a/contrib/python/fonttools/fontTools/ufoLib/converters.py
+++ b/contrib/python/fonttools/fontTools/ufoLib/converters.py
@@ -2,7 +2,6 @@
Conversion functions.
"""
-
# adapted from the UFO spec
diff --git a/contrib/python/fonttools/fontTools/ufoLib/etree.py b/contrib/python/fonttools/fontTools/ufoLib/etree.py
index 5054f8169a..77e3c16e2b 100644
--- a/contrib/python/fonttools/fontTools/ufoLib/etree.py
+++ b/contrib/python/fonttools/fontTools/ufoLib/etree.py
@@ -2,4 +2,5 @@
for the old ufoLib.etree module, which was moved to fontTools.misc.etree.
Please use the latter instead.
"""
+
from fontTools.misc.etree import *
diff --git a/contrib/python/fonttools/fontTools/ufoLib/glifLib.py b/contrib/python/fonttools/fontTools/ufoLib/glifLib.py
index 6dee9db302..62e87db0df 100644
--- a/contrib/python/fonttools/fontTools/ufoLib/glifLib.py
+++ b/contrib/python/fonttools/fontTools/ufoLib/glifLib.py
@@ -91,7 +91,6 @@ GLIFFormatVersion.__str__ = _VersionTupleEnumMixin.__str__
class Glyph:
-
"""
Minimal glyph object. It has no glyph attributes until either
the draw() or the drawPoints() method has been called.
@@ -123,7 +122,6 @@ class Glyph:
class GlyphSet(_UFOBaseIO):
-
"""
GlyphSet manages a set of .glif files inside one directory.
@@ -1228,9 +1226,9 @@ def _readGlyphFromTreeFormat2(
unicodes = []
guidelines = []
anchors = []
- haveSeenAdvance = (
- haveSeenImage
- ) = haveSeenOutline = haveSeenLib = haveSeenNote = False
+ haveSeenAdvance = haveSeenImage = haveSeenOutline = haveSeenLib = haveSeenNote = (
+ False
+ )
identifiers = set()
for element in tree:
if element.tag == "outline":
@@ -1883,7 +1881,6 @@ _transformationInfo = [
class GLIFPointPen(AbstractPointPen):
-
"""
Helper class using the PointPen protocol to write the <outline>
part of .glif files.
diff --git a/contrib/python/fonttools/fontTools/ufoLib/plistlib.py b/contrib/python/fonttools/fontTools/ufoLib/plistlib.py
index 1f52f20a2b..38bb266b21 100644
--- a/contrib/python/fonttools/fontTools/ufoLib/plistlib.py
+++ b/contrib/python/fonttools/fontTools/ufoLib/plistlib.py
@@ -2,6 +2,7 @@
for the old ufoLib.plistlib module, which was moved to fontTools.misc.plistlib.
Please use the latter instead.
"""
+
from fontTools.misc.plistlib import dump, dumps, load, loads
from fontTools.misc.textTools import tobytes
diff --git a/contrib/python/fonttools/fontTools/ufoLib/pointPen.py b/contrib/python/fonttools/fontTools/ufoLib/pointPen.py
index 3433fdbc96..baef9a583e 100644
--- a/contrib/python/fonttools/fontTools/ufoLib/pointPen.py
+++ b/contrib/python/fonttools/fontTools/ufoLib/pointPen.py
@@ -2,4 +2,5 @@
for the old ufoLib.pointPen module, which was moved to fontTools.pens.pointPen.
Please use the latter instead.
"""
+
from fontTools.pens.pointPen import *
diff --git a/contrib/python/fonttools/fontTools/ufoLib/utils.py b/contrib/python/fonttools/fontTools/ufoLib/utils.py
index 85878b47a1..45ec1c564b 100644
--- a/contrib/python/fonttools/fontTools/ufoLib/utils.py
+++ b/contrib/python/fonttools/fontTools/ufoLib/utils.py
@@ -1,6 +1,7 @@
"""The module contains miscellaneous helpers.
It's not considered part of the public ufoLib API.
"""
+
import warnings
import functools
diff --git a/contrib/python/fonttools/fontTools/unicodedata/__init__.py b/contrib/python/fonttools/fontTools/unicodedata/__init__.py
index 808c9c722e..06eb4619a9 100644
--- a/contrib/python/fonttools/fontTools/unicodedata/__init__.py
+++ b/contrib/python/fonttools/fontTools/unicodedata/__init__.py
@@ -201,15 +201,13 @@ T = TypeVar("T")
@overload
-def script_horizontal_direction(script_code: str, default: T) -> HorizDirection | T:
- ...
+def script_horizontal_direction(script_code: str, default: T) -> HorizDirection | T: ...
@overload
def script_horizontal_direction(
script_code: str, default: type[KeyError] = KeyError
-) -> HorizDirection:
- ...
+) -> HorizDirection: ...
def script_horizontal_direction(
diff --git a/contrib/python/fonttools/fontTools/varLib/__init__.py b/contrib/python/fonttools/fontTools/varLib/__init__.py
index 46834f6433..1e0f2ec2f4 100644
--- a/contrib/python/fonttools/fontTools/varLib/__init__.py
+++ b/contrib/python/fonttools/fontTools/varLib/__init__.py
@@ -18,6 +18,7 @@ Then you can make a variable-font this way:
API *will* change in near future.
"""
+
from typing import List
from fontTools.misc.vector import Vector
from fontTools.misc.roundTools import noRound, otRound
@@ -216,8 +217,6 @@ def _add_avar(font, axes, mappings, axisTags):
if mappings:
interesting = True
- hiddenAxes = [axis for axis in axes.values() if axis.hidden]
-
inputLocations = [
{
axes[name].tag: models.normalizeValue(v, vals_triples[axes[name].tag])
@@ -571,9 +570,11 @@ def _get_advance_metrics(
sparse_advance = 0xFFFF
for glyph in glyphOrder:
vhAdvances = [
- metrics[glyph][0]
- if glyph in metrics and metrics[glyph][0] != sparse_advance
- else None
+ (
+ metrics[glyph][0]
+ if glyph in metrics and metrics[glyph][0] != sparse_advance
+ else None
+ )
for metrics in advMetricses
]
vhAdvanceDeltasAndSupports[glyph] = masterModel.getDeltasAndSupports(
@@ -752,10 +753,14 @@ def _add_BASE(font, masterModel, master_ttfs, axisTags):
def _merge_OTL(font, model, master_fonts, axisTags):
+ otl_tags = ["GSUB", "GDEF", "GPOS"]
+ if not any(tag in font for tag in otl_tags):
+ return
+
log.info("Merging OpenType Layout tables")
merger = VariationMerger(model, axisTags, font)
- merger.mergeTables(font, master_fonts, ["GSUB", "GDEF", "GPOS"])
+ merger.mergeTables(font, master_fonts, otl_tags)
store = merger.store_builder.finish()
if not store:
return
diff --git a/contrib/python/fonttools/fontTools/varLib/featureVars.py b/contrib/python/fonttools/fontTools/varLib/featureVars.py
index 828b843594..2e957f5585 100644
--- a/contrib/python/fonttools/fontTools/varLib/featureVars.py
+++ b/contrib/python/fonttools/fontTools/varLib/featureVars.py
@@ -3,6 +3,7 @@ https://docs.microsoft.com/en-us/typography/opentype/spec/chapter2#featurevariat
NOTE: The API is experimental and subject to change.
"""
+
from fontTools.misc.dictTools import hashdict
from fontTools.misc.intTools import bit_count
from fontTools.ttLib import newTable
diff --git a/contrib/python/fonttools/fontTools/varLib/instancer/__init__.py b/contrib/python/fonttools/fontTools/varLib/instancer/__init__.py
index d1cde0df7a..89427dc534 100644
--- a/contrib/python/fonttools/fontTools/varLib/instancer/__init__.py
+++ b/contrib/python/fonttools/fontTools/varLib/instancer/__init__.py
@@ -82,6 +82,7 @@ are supported, but support for CFF2 variable fonts will be added soon.
The discussion and implementation of these features are tracked at
https://github.com/fonttools/fonttools/issues/1537
"""
+
from fontTools.misc.fixedTools import (
floatToFixedToFloat,
strToFixedToFloat,
@@ -614,7 +615,7 @@ def _instantiateGvarGlyph(
if optimize:
isComposite = glyf[glyphname].isComposite()
for var in tupleVarStore:
- var.optimize(coordinates, endPts, isComposite)
+ var.optimize(coordinates, endPts, isComposite=isComposite)
def instantiateGvarGlyph(varfont, glyphname, axisLimits, optimize=True):
@@ -643,9 +644,11 @@ def instantiateGvar(varfont, axisLimits, optimize=True):
glyphnames = sorted(
glyf.glyphOrder,
key=lambda name: (
- glyf[name].getCompositeMaxpValues(glyf).maxComponentDepth
- if glyf[name].isComposite() or glyf[name].isVarComposite()
- else 0,
+ (
+ glyf[name].getCompositeMaxpValues(glyf).maxComponentDepth
+ if glyf[name].isComposite() or glyf[name].isVarComposite()
+ else 0
+ ),
name,
),
)
diff --git a/contrib/python/fonttools/fontTools/varLib/interpolate_layout.py b/contrib/python/fonttools/fontTools/varLib/interpolate_layout.py
index aa3f49c6ed..798b29590d 100644
--- a/contrib/python/fonttools/fontTools/varLib/interpolate_layout.py
+++ b/contrib/python/fonttools/fontTools/varLib/interpolate_layout.py
@@ -1,6 +1,7 @@
"""
Interpolate OpenType Layout tables (GDEF / GPOS / GSUB).
"""
+
from fontTools.ttLib import TTFont
from fontTools.varLib import models, VarLibError, load_designspace, load_masters
from fontTools.varLib.merger import InstancerMerger
diff --git a/contrib/python/fonttools/fontTools/varLib/merger.py b/contrib/python/fonttools/fontTools/varLib/merger.py
index 96029166a7..61122f4c67 100644
--- a/contrib/python/fonttools/fontTools/varLib/merger.py
+++ b/contrib/python/fonttools/fontTools/varLib/merger.py
@@ -1,6 +1,7 @@
"""
Merge OpenType Layout tables (GDEF / GPOS / GSUB).
"""
+
import os
import copy
import enum
diff --git a/contrib/python/fonttools/fontTools/varLib/mutator.py b/contrib/python/fonttools/fontTools/varLib/mutator.py
index d1d123ab69..c7c37dabca 100644
--- a/contrib/python/fonttools/fontTools/varLib/mutator.py
+++ b/contrib/python/fonttools/fontTools/varLib/mutator.py
@@ -3,6 +3,7 @@ Instantiate a variation font. Run, eg:
$ fonttools varLib.mutator ./NotoSansArabic-VF.ttf wght=140 wdth=85
"""
+
from fontTools.misc.fixedTools import floatToFixedToFloat, floatToFixed
from fontTools.misc.roundTools import otRound
from fontTools.pens.boundsPen import BoundsPen
@@ -198,9 +199,11 @@ def instantiateVariableFont(varfont, location, inplace=False, overlap=True):
glyphnames = sorted(
gvar.variations.keys(),
key=lambda name: (
- glyf[name].getCompositeMaxpValues(glyf).maxComponentDepth
- if glyf[name].isComposite() or glyf[name].isVarComposite()
- else 0,
+ (
+ glyf[name].getCompositeMaxpValues(glyf).maxComponentDepth
+ if glyf[name].isComposite() or glyf[name].isVarComposite()
+ else 0
+ ),
name,
),
)
@@ -304,9 +307,9 @@ def instantiateVariableFont(varfont, location, inplace=False, overlap=True):
if applies:
assert record.FeatureTableSubstitution.Version == 0x00010000
for rec in record.FeatureTableSubstitution.SubstitutionRecord:
- table.FeatureList.FeatureRecord[
- rec.FeatureIndex
- ].Feature = rec.Feature
+ table.FeatureList.FeatureRecord[rec.FeatureIndex].Feature = (
+ rec.Feature
+ )
break
del table.FeatureVariations
diff --git a/contrib/python/fonttools/ya.make b/contrib/python/fonttools/ya.make
index 4d380b556c..e5986f9d03 100644
--- a/contrib/python/fonttools/ya.make
+++ b/contrib/python/fonttools/ya.make
@@ -2,7 +2,7 @@
PY3_LIBRARY()
-VERSION(4.47.2)
+VERSION(4.49.0)
LICENSE(MIT)
@@ -41,6 +41,7 @@ PY_SRCS(
fontTools/cu2qu/errors.py
fontTools/cu2qu/ufo.py
fontTools/designspaceLib/__init__.py
+ fontTools/designspaceLib/__main__.py
fontTools/designspaceLib/split.py
fontTools/designspaceLib/statNames.py
fontTools/designspaceLib/types.py
diff --git a/contrib/python/google-auth/py3/.dist-info/METADATA b/contrib/python/google-auth/py3/.dist-info/METADATA
index 48bac82c9d..c8e96994cf 100644
--- a/contrib/python/google-auth/py3/.dist-info/METADATA
+++ b/contrib/python/google-auth/py3/.dist-info/METADATA
@@ -1,6 +1,6 @@
Metadata-Version: 2.1
Name: google-auth
-Version: 2.27.0
+Version: 2.28.0
Summary: Google Authentication Library
Home-page: https://github.com/googleapis/google-auth-library-python
Author: Google Cloud Platform
diff --git a/contrib/python/google-auth/py3/google/auth/compute_engine/_metadata.py b/contrib/python/google-auth/py3/google/auth/compute_engine/_metadata.py
index 1c884c3c43..108cbfe932 100644
--- a/contrib/python/google-auth/py3/google/auth/compute_engine/_metadata.py
+++ b/contrib/python/google-auth/py3/google/auth/compute_engine/_metadata.py
@@ -222,7 +222,7 @@ def get(
content = _helpers.from_bytes(response.data)
if response.status == http_client.NOT_FOUND and return_none_for_not_found_error:
- _LOGGER.info(
+ _LOGGER.debug(
"Compute Engine Metadata server call to %s returned 404, reason: %s",
path,
content,
diff --git a/contrib/python/google-auth/py3/google/auth/compute_engine/credentials.py b/contrib/python/google-auth/py3/google/auth/compute_engine/credentials.py
index 7541c1d8cf..008b991bb9 100644
--- a/contrib/python/google-auth/py3/google/auth/compute_engine/credentials.py
+++ b/contrib/python/google-auth/py3/google/auth/compute_engine/credentials.py
@@ -28,7 +28,6 @@ from google.auth import iam
from google.auth import jwt
from google.auth import metrics
from google.auth.compute_engine import _metadata
-from google.auth.transport import requests as google_auth_requests
from google.oauth2 import _client
@@ -84,7 +83,6 @@ class Credentials(
self._scopes = scopes
self._default_scopes = default_scopes
self._universe_domain_cached = False
- self._universe_domain_request = google_auth_requests.Request()
if universe_domain:
self._universe_domain = universe_domain
self._universe_domain_cached = True
@@ -150,8 +148,11 @@ class Credentials(
def universe_domain(self):
if self._universe_domain_cached:
return self._universe_domain
+
+ from google.auth.transport import requests as google_auth_requests
+
self._universe_domain = _metadata.get_universe_domain(
- self._universe_domain_request
+ google_auth_requests.Request()
)
self._universe_domain_cached = True
return self._universe_domain
diff --git a/contrib/python/google-auth/py3/google/auth/credentials.py b/contrib/python/google-auth/py3/google/auth/credentials.py
index a4fa1829c7..27abd443dc 100644
--- a/contrib/python/google-auth/py3/google/auth/credentials.py
+++ b/contrib/python/google-auth/py3/google/auth/credentials.py
@@ -24,6 +24,8 @@ from google.auth import exceptions
from google.auth import metrics
from google.auth._refresh_worker import RefreshThreadManager
+DEFAULT_UNIVERSE_DOMAIN = "googleapis.com"
+
class Credentials(metaclass=abc.ABCMeta):
"""Base class for all credentials.
@@ -57,7 +59,7 @@ class Credentials(metaclass=abc.ABCMeta):
"""Optional[dict]: Cache of a trust boundary response which has a list
of allowed regions and an encoded string representation of credentials
trust boundary."""
- self._universe_domain = "googleapis.com"
+ self._universe_domain = DEFAULT_UNIVERSE_DOMAIN
"""Optional[str]: The universe domain value, default is googleapis.com
"""
diff --git a/contrib/python/google-auth/py3/google/auth/downscoped.py b/contrib/python/google-auth/py3/google/auth/downscoped.py
index b4d9d386e5..ea75be90fe 100644
--- a/contrib/python/google-auth/py3/google/auth/downscoped.py
+++ b/contrib/python/google-auth/py3/google/auth/downscoped.py
@@ -63,7 +63,7 @@ _STS_GRANT_TYPE = "urn:ietf:params:oauth:grant-type:token-exchange"
# The token exchange requested_token_type. This is always an access_token.
_STS_REQUESTED_TOKEN_TYPE = "urn:ietf:params:oauth:token-type:access_token"
# The STS token URL used to exchanged a short lived access token for a downscoped one.
-_STS_TOKEN_URL = "https://sts.googleapis.com/v1/token"
+_STS_TOKEN_URL_PATTERN = "https://sts.{}/v1/token"
# The subject token type to use when exchanging a short lived access token for a
# downscoped token.
_STS_SUBJECT_TOKEN_TYPE = "urn:ietf:params:oauth:token-type:access_token"
@@ -437,7 +437,11 @@ class Credentials(credentials.CredentialsWithQuotaProject):
"""
def __init__(
- self, source_credentials, credential_access_boundary, quota_project_id=None
+ self,
+ source_credentials,
+ credential_access_boundary,
+ quota_project_id=None,
+ universe_domain=credentials.DEFAULT_UNIVERSE_DOMAIN,
):
"""Instantiates a downscoped credentials object using the provided source
credentials and credential access boundary rules.
@@ -456,6 +460,7 @@ class Credentials(credentials.CredentialsWithQuotaProject):
the upper bound of the permissions that are available on that resource and an
optional condition to further restrict permissions.
quota_project_id (Optional[str]): The optional quota project ID.
+ universe_domain (Optional[str]): The universe domain value, default is googleapis.com
Raises:
google.auth.exceptions.RefreshError: If the source credentials
return an error on token refresh.
@@ -467,7 +472,10 @@ class Credentials(credentials.CredentialsWithQuotaProject):
self._source_credentials = source_credentials
self._credential_access_boundary = credential_access_boundary
self._quota_project_id = quota_project_id
- self._sts_client = sts.Client(_STS_TOKEN_URL)
+ self._universe_domain = universe_domain or credentials.DEFAULT_UNIVERSE_DOMAIN
+ self._sts_client = sts.Client(
+ _STS_TOKEN_URL_PATTERN.format(self.universe_domain)
+ )
@_helpers.copy_docstring(credentials.Credentials)
def refresh(self, request):
diff --git a/contrib/python/google-auth/py3/google/auth/external_account.py b/contrib/python/google-auth/py3/google/auth/external_account.py
index c314ea799e..0420883f86 100644
--- a/contrib/python/google-auth/py3/google/auth/external_account.py
+++ b/contrib/python/google-auth/py3/google/auth/external_account.py
@@ -51,8 +51,6 @@ _STS_REQUESTED_TOKEN_TYPE = "urn:ietf:params:oauth:token-type:access_token"
# Cloud resource manager URL used to retrieve project information.
_CLOUD_RESOURCE_MANAGER = "https://cloudresourcemanager.googleapis.com/v1/projects/"
-_DEFAULT_UNIVERSE_DOMAIN = "googleapis.com"
-
class Credentials(
credentials.Scoped,
@@ -83,7 +81,7 @@ class Credentials(
scopes=None,
default_scopes=None,
workforce_pool_user_project=None,
- universe_domain=_DEFAULT_UNIVERSE_DOMAIN,
+ universe_domain=credentials.DEFAULT_UNIVERSE_DOMAIN,
trust_boundary=None,
):
"""Instantiates an external account credentials object.
@@ -131,7 +129,7 @@ class Credentials(
self._scopes = scopes
self._default_scopes = default_scopes
self._workforce_pool_user_project = workforce_pool_user_project
- self._universe_domain = universe_domain or _DEFAULT_UNIVERSE_DOMAIN
+ self._universe_domain = universe_domain or credentials.DEFAULT_UNIVERSE_DOMAIN
self._trust_boundary = {
"locations": [],
"encoded_locations": "0x0",
@@ -513,7 +511,9 @@ class Credentials(
credential_source=info.get("credential_source"),
quota_project_id=info.get("quota_project_id"),
workforce_pool_user_project=info.get("workforce_pool_user_project"),
- universe_domain=info.get("universe_domain", _DEFAULT_UNIVERSE_DOMAIN),
+ universe_domain=info.get(
+ "universe_domain", credentials.DEFAULT_UNIVERSE_DOMAIN
+ ),
**kwargs
)
diff --git a/contrib/python/google-auth/py3/google/auth/external_account_authorized_user.py b/contrib/python/google-auth/py3/google/auth/external_account_authorized_user.py
index 526588f7e8..f73387172c 100644
--- a/contrib/python/google-auth/py3/google/auth/external_account_authorized_user.py
+++ b/contrib/python/google-auth/py3/google/auth/external_account_authorized_user.py
@@ -43,7 +43,6 @@ from google.auth import exceptions
from google.oauth2 import sts
from google.oauth2 import utils
-_DEFAULT_UNIVERSE_DOMAIN = "googleapis.com"
_EXTERNAL_ACCOUNT_AUTHORIZED_USER_JSON_TYPE = "external_account_authorized_user"
@@ -76,7 +75,7 @@ class Credentials(
revoke_url=None,
scopes=None,
quota_project_id=None,
- universe_domain=_DEFAULT_UNIVERSE_DOMAIN,
+ universe_domain=credentials.DEFAULT_UNIVERSE_DOMAIN,
):
"""Instantiates a external account authorized user credentials object.
@@ -120,7 +119,7 @@ class Credentials(
self._revoke_url = revoke_url
self._quota_project_id = quota_project_id
self._scopes = scopes
- self._universe_domain = universe_domain or _DEFAULT_UNIVERSE_DOMAIN
+ self._universe_domain = universe_domain or credentials.DEFAULT_UNIVERSE_DOMAIN
if not self.valid and not self.can_refresh:
raise exceptions.InvalidOperation(
@@ -342,7 +341,9 @@ class Credentials(
revoke_url=info.get("revoke_url"),
quota_project_id=info.get("quota_project_id"),
scopes=info.get("scopes"),
- universe_domain=info.get("universe_domain", _DEFAULT_UNIVERSE_DOMAIN),
+ universe_domain=info.get(
+ "universe_domain", credentials.DEFAULT_UNIVERSE_DOMAIN
+ ),
**kwargs
)
diff --git a/contrib/python/google-auth/py3/google/auth/version.py b/contrib/python/google-auth/py3/google/auth/version.py
index e1fa722c81..9672a6c412 100644
--- a/contrib/python/google-auth/py3/google/auth/version.py
+++ b/contrib/python/google-auth/py3/google/auth/version.py
@@ -12,4 +12,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-__version__ = "2.27.0"
+__version__ = "2.28.0"
diff --git a/contrib/python/google-auth/py3/google/oauth2/credentials.py b/contrib/python/google-auth/py3/google/oauth2/credentials.py
index c239beed13..5ca00d4c5a 100644
--- a/contrib/python/google-auth/py3/google/oauth2/credentials.py
+++ b/contrib/python/google-auth/py3/google/oauth2/credentials.py
@@ -49,7 +49,6 @@ _LOGGER = logging.getLogger(__name__)
# The Google OAuth 2.0 token endpoint. Used for authorized user credentials.
_GOOGLE_OAUTH2_TOKEN_ENDPOINT = "https://oauth2.googleapis.com/token"
-_DEFAULT_UNIVERSE_DOMAIN = "googleapis.com"
class Credentials(credentials.ReadOnlyScoped, credentials.CredentialsWithQuotaProject):
@@ -86,7 +85,7 @@ class Credentials(credentials.ReadOnlyScoped, credentials.CredentialsWithQuotaPr
enable_reauth_refresh=False,
granted_scopes=None,
trust_boundary=None,
- universe_domain=_DEFAULT_UNIVERSE_DOMAIN,
+ universe_domain=credentials.DEFAULT_UNIVERSE_DOMAIN,
account=None,
):
"""
@@ -150,7 +149,7 @@ class Credentials(credentials.ReadOnlyScoped, credentials.CredentialsWithQuotaPr
self.refresh_handler = refresh_handler
self._enable_reauth_refresh = enable_reauth_refresh
self._trust_boundary = trust_boundary
- self._universe_domain = universe_domain or _DEFAULT_UNIVERSE_DOMAIN
+ self._universe_domain = universe_domain or credentials.DEFAULT_UNIVERSE_DOMAIN
self._account = account or ""
def __getstate__(self):
@@ -187,7 +186,9 @@ class Credentials(credentials.ReadOnlyScoped, credentials.CredentialsWithQuotaPr
self._rapt_token = d.get("_rapt_token")
self._enable_reauth_refresh = d.get("_enable_reauth_refresh")
self._trust_boundary = d.get("_trust_boundary")
- self._universe_domain = d.get("_universe_domain") or _DEFAULT_UNIVERSE_DOMAIN
+ self._universe_domain = (
+ d.get("_universe_domain") or credentials.DEFAULT_UNIVERSE_DOMAIN
+ )
# The refresh_handler setter should be used to repopulate this.
self._refresh_handler = None
self._refresh_worker = None
@@ -373,7 +374,7 @@ class Credentials(credentials.ReadOnlyScoped, credentials.CredentialsWithQuotaPr
@_helpers.copy_docstring(credentials.Credentials)
def refresh(self, request):
- if self._universe_domain != _DEFAULT_UNIVERSE_DOMAIN:
+ if self._universe_domain != credentials.DEFAULT_UNIVERSE_DOMAIN:
raise exceptions.RefreshError(
"User credential refresh is only supported in the default "
"googleapis.com universe domain, but the current universe "
diff --git a/contrib/python/google-auth/py3/google/oauth2/service_account.py b/contrib/python/google-auth/py3/google/oauth2/service_account.py
index 4502c6f68c..04fd7797ad 100644
--- a/contrib/python/google-auth/py3/google/oauth2/service_account.py
+++ b/contrib/python/google-auth/py3/google/oauth2/service_account.py
@@ -82,7 +82,6 @@ from google.auth import metrics
from google.oauth2 import _client
_DEFAULT_TOKEN_LIFETIME_SECS = 3600 # 1 hour in seconds
-_DEFAULT_UNIVERSE_DOMAIN = "googleapis.com"
_GOOGLE_OAUTH2_TOKEN_ENDPOINT = "https://oauth2.googleapis.com/token"
@@ -139,7 +138,7 @@ class Credentials(
quota_project_id=None,
additional_claims=None,
always_use_jwt_access=False,
- universe_domain=_DEFAULT_UNIVERSE_DOMAIN,
+ universe_domain=credentials.DEFAULT_UNIVERSE_DOMAIN,
trust_boundary=None,
):
"""
@@ -182,9 +181,9 @@ class Credentials(
self._quota_project_id = quota_project_id
self._token_uri = token_uri
self._always_use_jwt_access = always_use_jwt_access
- self._universe_domain = universe_domain or _DEFAULT_UNIVERSE_DOMAIN
+ self._universe_domain = universe_domain or credentials.DEFAULT_UNIVERSE_DOMAIN
- if universe_domain != _DEFAULT_UNIVERSE_DOMAIN:
+ if universe_domain != credentials.DEFAULT_UNIVERSE_DOMAIN:
self._always_use_jwt_access = True
self._jwt_credentials = None
@@ -216,7 +215,9 @@ class Credentials(
service_account_email=info["client_email"],
token_uri=info["token_uri"],
project_id=info.get("project_id"),
- universe_domain=info.get("universe_domain", _DEFAULT_UNIVERSE_DOMAIN),
+ universe_domain=info.get(
+ "universe_domain", credentials.DEFAULT_UNIVERSE_DOMAIN
+ ),
trust_boundary=info.get("trust_boundary"),
**kwargs
)
@@ -316,7 +317,7 @@ class Credentials(
"""
cred = self._make_copy()
if (
- cred._universe_domain != _DEFAULT_UNIVERSE_DOMAIN
+ cred._universe_domain != credentials.DEFAULT_UNIVERSE_DOMAIN
and not always_use_jwt_access
):
raise exceptions.InvalidValue(
@@ -329,7 +330,7 @@ class Credentials(
def with_universe_domain(self, universe_domain):
cred = self._make_copy()
cred._universe_domain = universe_domain
- if universe_domain != _DEFAULT_UNIVERSE_DOMAIN:
+ if universe_domain != credentials.DEFAULT_UNIVERSE_DOMAIN:
cred._always_use_jwt_access = True
return cred
@@ -427,7 +428,10 @@ class Credentials(
# created, try to create one with scopes
self._create_self_signed_jwt(None)
- if self._universe_domain != _DEFAULT_UNIVERSE_DOMAIN and self._subject:
+ if (
+ self._universe_domain != credentials.DEFAULT_UNIVERSE_DOMAIN
+ and self._subject
+ ):
raise exceptions.RefreshError(
"domain wide delegation is not supported for non-default universe domain"
)
@@ -556,7 +560,7 @@ class IDTokenCredentials(
target_audience,
additional_claims=None,
quota_project_id=None,
- universe_domain=_DEFAULT_UNIVERSE_DOMAIN,
+ universe_domain=credentials.DEFAULT_UNIVERSE_DOMAIN,
):
"""
Args:
@@ -588,11 +592,11 @@ class IDTokenCredentials(
self._use_iam_endpoint = False
if not universe_domain:
- self._universe_domain = _DEFAULT_UNIVERSE_DOMAIN
+ self._universe_domain = credentials.DEFAULT_UNIVERSE_DOMAIN
else:
self._universe_domain = universe_domain
- if universe_domain != _DEFAULT_UNIVERSE_DOMAIN:
+ if universe_domain != credentials.DEFAULT_UNIVERSE_DOMAIN:
self._use_iam_endpoint = True
if additional_claims is not None:
@@ -708,7 +712,10 @@ class IDTokenCredentials(
default and use_iam_endpoint is False.
"""
cred = self._make_copy()
- if cred._universe_domain != _DEFAULT_UNIVERSE_DOMAIN and not use_iam_endpoint:
+ if (
+ cred._universe_domain != credentials.DEFAULT_UNIVERSE_DOMAIN
+ and not use_iam_endpoint
+ ):
raise exceptions.InvalidValue(
"use_iam_endpoint should be True for non-default universe domain"
)
diff --git a/contrib/python/google-auth/py3/tests/compute_engine/test__metadata.py b/contrib/python/google-auth/py3/tests/compute_engine/test__metadata.py
index 5e037a940b..35e3c089f9 100644
--- a/contrib/python/google-auth/py3/tests/compute_engine/test__metadata.py
+++ b/contrib/python/google-auth/py3/tests/compute_engine/test__metadata.py
@@ -400,6 +400,19 @@ def test_get_universe_domain_success():
assert universe_domain == "fake_universe_domain"
+def test_get_universe_domain_success_empty_response():
+ request = make_request("", headers={"content-type": "text/plain"})
+
+ universe_domain = _metadata.get_universe_domain(request)
+
+ request.assert_called_once_with(
+ method="GET",
+ url=_metadata._METADATA_ROOT + "universe/universe_domain",
+ headers=_metadata._METADATA_HEADERS,
+ )
+ assert universe_domain == "googleapis.com"
+
+
def test_get_universe_domain_not_found():
# Test that if the universe domain endpoint returns 404 error, we should
# use googleapis.com as the universe domain
diff --git a/contrib/python/google-auth/py3/tests/compute_engine/test_credentials.py b/contrib/python/google-auth/py3/tests/compute_engine/test_credentials.py
index f04bb1304a..9cca317924 100644
--- a/contrib/python/google-auth/py3/tests/compute_engine/test_credentials.py
+++ b/contrib/python/google-auth/py3/tests/compute_engine/test_credentials.py
@@ -257,16 +257,12 @@ class TestCredentials(object):
assert self.credentials.universe_domain == "fake_universe_domain"
assert self.credentials._universe_domain == "fake_universe_domain"
assert self.credentials._universe_domain_cached
- get_universe_domain.assert_called_once_with(
- self.credentials._universe_domain_request
- )
+ get_universe_domain.assert_called_once()
# calling the universe_domain property the second time should use the
# cached value instead of calling get_universe_domain
assert self.credentials.universe_domain == "fake_universe_domain"
- get_universe_domain.assert_called_once_with(
- self.credentials._universe_domain_request
- )
+ get_universe_domain.assert_called_once()
@mock.patch("google.auth.compute_engine._metadata.get_universe_domain")
def test_user_provided_universe_domain(self, get_universe_domain):
diff --git a/contrib/python/google-auth/py3/tests/oauth2/test_service_account.py b/contrib/python/google-auth/py3/tests/oauth2/test_service_account.py
index 8dd5f219be..ce0c72fa0a 100644
--- a/contrib/python/google-auth/py3/tests/oauth2/test_service_account.py
+++ b/contrib/python/google-auth/py3/tests/oauth2/test_service_account.py
@@ -24,6 +24,7 @@ from google.auth import crypt
from google.auth import exceptions
from google.auth import jwt
from google.auth import transport
+from google.auth.credentials import DEFAULT_UNIVERSE_DOMAIN
from google.oauth2 import service_account
@@ -59,7 +60,7 @@ class TestCredentials(object):
TOKEN_URI = "https://example.com/oauth2/token"
@classmethod
- def make_credentials(cls, universe_domain=service_account._DEFAULT_UNIVERSE_DOMAIN):
+ def make_credentials(cls, universe_domain=DEFAULT_UNIVERSE_DOMAIN):
return service_account.Credentials(
SIGNER,
cls.SERVICE_ACCOUNT_EMAIL,
@@ -71,7 +72,7 @@ class TestCredentials(object):
credentials = service_account.Credentials(
SIGNER, self.SERVICE_ACCOUNT_EMAIL, self.TOKEN_URI, universe_domain=None
)
- assert credentials.universe_domain == service_account._DEFAULT_UNIVERSE_DOMAIN
+ assert credentials.universe_domain == DEFAULT_UNIVERSE_DOMAIN
def test_from_service_account_info(self):
credentials = service_account.Credentials.from_service_account_info(
@@ -81,7 +82,7 @@ class TestCredentials(object):
assert credentials._signer.key_id == SERVICE_ACCOUNT_INFO["private_key_id"]
assert credentials.service_account_email == SERVICE_ACCOUNT_INFO["client_email"]
assert credentials._token_uri == SERVICE_ACCOUNT_INFO["token_uri"]
- assert credentials._universe_domain == service_account._DEFAULT_UNIVERSE_DOMAIN
+ assert credentials._universe_domain == DEFAULT_UNIVERSE_DOMAIN
assert not credentials._always_use_jwt_access
def test_from_service_account_info_non_gdu(self):
@@ -596,7 +597,7 @@ class TestIDTokenCredentials(object):
TARGET_AUDIENCE = "https://example.com"
@classmethod
- def make_credentials(cls, universe_domain=service_account._DEFAULT_UNIVERSE_DOMAIN):
+ def make_credentials(cls, universe_domain=DEFAULT_UNIVERSE_DOMAIN):
return service_account.IDTokenCredentials(
SIGNER,
cls.SERVICE_ACCOUNT_EMAIL,
@@ -613,7 +614,7 @@ class TestIDTokenCredentials(object):
self.TARGET_AUDIENCE,
universe_domain=None,
)
- assert credentials._universe_domain == service_account._DEFAULT_UNIVERSE_DOMAIN
+ assert credentials._universe_domain == DEFAULT_UNIVERSE_DOMAIN
def test_from_service_account_info(self):
credentials = service_account.IDTokenCredentials.from_service_account_info(
diff --git a/contrib/python/google-auth/py3/tests/test_aws.py b/contrib/python/google-auth/py3/tests/test_aws.py
index db2e984100..3f358d52b0 100644
--- a/contrib/python/google-auth/py3/tests/test_aws.py
+++ b/contrib/python/google-auth/py3/tests/test_aws.py
@@ -26,7 +26,7 @@ from google.auth import aws
from google.auth import environment_vars
from google.auth import exceptions
from google.auth import transport
-
+from google.auth.credentials import DEFAULT_UNIVERSE_DOMAIN
IMPERSONATE_ACCESS_TOKEN_REQUEST_METRICS_HEADER_VALUE = (
"gl-python/3.7 auth/1.1 auth-request-type/at cred-type/imp"
@@ -75,7 +75,6 @@ REQUEST_PARAMS = '{"KeySchema":[{"KeyType":"HASH","AttributeName":"Id"}],"TableN
# Each tuple contains the following entries:
# region, time, credentials, original_request, signed_request
-DEFAULT_UNIVERSE_DOMAIN = "googleapis.com"
VALID_TOKEN_URLS = [
"https://sts.googleapis.com",
"https://us-east-1.sts.googleapis.com",
diff --git a/contrib/python/google-auth/py3/tests/test_downscoped.py b/contrib/python/google-auth/py3/tests/test_downscoped.py
index 8cc2a30d16..fe6e291c75 100644
--- a/contrib/python/google-auth/py3/tests/test_downscoped.py
+++ b/contrib/python/google-auth/py3/tests/test_downscoped.py
@@ -25,6 +25,7 @@ from google.auth import credentials
from google.auth import downscoped
from google.auth import exceptions
from google.auth import transport
+from google.auth.credentials import DEFAULT_UNIVERSE_DOMAIN
from google.auth.credentials import TokenState
@@ -447,7 +448,11 @@ class TestCredentialAccessBoundary(object):
class TestCredentials(object):
@staticmethod
- def make_credentials(source_credentials=SourceCredentials(), quota_project_id=None):
+ def make_credentials(
+ source_credentials=SourceCredentials(),
+ quota_project_id=None,
+ universe_domain=None,
+ ):
availability_condition = make_availability_condition(
EXPRESSION, TITLE, DESCRIPTION
)
@@ -458,7 +463,10 @@ class TestCredentials(object):
credential_access_boundary = make_credential_access_boundary(rules)
return downscoped.Credentials(
- source_credentials, credential_access_boundary, quota_project_id
+ source_credentials,
+ credential_access_boundary,
+ quota_project_id,
+ universe_domain,
)
@staticmethod
@@ -473,10 +481,12 @@ class TestCredentials(object):
return request
@staticmethod
- def assert_request_kwargs(request_kwargs, headers, request_data):
+ def assert_request_kwargs(
+ request_kwargs, headers, request_data, token_endpoint=TOKEN_EXCHANGE_ENDPOINT
+ ):
"""Asserts the request was called with the expected parameters.
"""
- assert request_kwargs["url"] == TOKEN_EXCHANGE_ENDPOINT
+ assert request_kwargs["url"] == token_endpoint
assert request_kwargs["method"] == "POST"
assert request_kwargs["headers"] == headers
assert request_kwargs["body"] is not None
@@ -496,6 +506,33 @@ class TestCredentials(object):
assert not credentials.expired
# No quota project ID set.
assert not credentials.quota_project_id
+ assert credentials.universe_domain == DEFAULT_UNIVERSE_DOMAIN
+
+ def test_default_state_with_explicit_none_value(self):
+ credentials = self.make_credentials(universe_domain=None)
+
+ # No token acquired yet.
+ assert not credentials.token
+ assert not credentials.valid
+ # Expiration hasn't been set yet.
+ assert not credentials.expiry
+ assert not credentials.expired
+ # No quota project ID set.
+ assert not credentials.quota_project_id
+ assert credentials.universe_domain == DEFAULT_UNIVERSE_DOMAIN
+
+ def test_create_with_customized_universe_domain(self):
+ test_universe_domain = "foo.com"
+ credentials = self.make_credentials(universe_domain=test_universe_domain)
+ # No token acquired yet.
+ assert not credentials.token
+ assert not credentials.valid
+ # Expiration hasn't been set yet.
+ assert not credentials.expiry
+ assert not credentials.expired
+ # No quota project ID set.
+ assert not credentials.quota_project_id
+ assert credentials.universe_domain == test_universe_domain
def test_with_quota_project(self):
credentials = self.make_credentials()
@@ -507,6 +544,49 @@ class TestCredentials(object):
assert quota_project_creds.quota_project_id == "project-foo"
@mock.patch("google.auth._helpers.utcnow", return_value=datetime.datetime.min)
+ def test_refresh_on_custom_universe(self, unused_utcnow):
+ test_universe_domain = "foo.com"
+ response = SUCCESS_RESPONSE.copy()
+ # Test custom expiration to confirm expiry is set correctly.
+ response["expires_in"] = 2800
+ expected_expiry = datetime.datetime.min + datetime.timedelta(
+ seconds=response["expires_in"]
+ )
+ headers = {"Content-Type": "application/x-www-form-urlencoded"}
+ request_data = {
+ "grant_type": GRANT_TYPE,
+ "subject_token": "ACCESS_TOKEN_1",
+ "subject_token_type": SUBJECT_TOKEN_TYPE,
+ "requested_token_type": REQUESTED_TOKEN_TYPE,
+ "options": urllib.parse.quote(json.dumps(CREDENTIAL_ACCESS_BOUNDARY_JSON)),
+ }
+ request = self.make_mock_request(status=http_client.OK, data=response)
+ source_credentials = SourceCredentials()
+ credentials = self.make_credentials(
+ source_credentials=source_credentials, universe_domain=test_universe_domain
+ )
+ token_exchange_endpoint = downscoped._STS_TOKEN_URL_PATTERN.format(
+ test_universe_domain
+ )
+
+ # Spy on calls to source credentials refresh to confirm the expected request
+ # instance is used.
+ with mock.patch.object(
+ source_credentials, "refresh", wraps=source_credentials.refresh
+ ) as wrapped_souce_cred_refresh:
+ credentials.refresh(request)
+
+ self.assert_request_kwargs(
+ request.call_args[1], headers, request_data, token_exchange_endpoint
+ )
+ assert credentials.valid
+ assert credentials.expiry == expected_expiry
+ assert not credentials.expired
+ assert credentials.token == response["access_token"]
+ # Confirm source credentials called with the same request instance.
+ wrapped_souce_cred_refresh.assert_called_with(request)
+
+ @mock.patch("google.auth._helpers.utcnow", return_value=datetime.datetime.min)
def test_refresh(self, unused_utcnow):
response = SUCCESS_RESPONSE.copy()
# Test custom expiration to confirm expiry is set correctly.
diff --git a/contrib/python/google-auth/py3/tests/test_external_account.py b/contrib/python/google-auth/py3/tests/test_external_account.py
index 7f33b1dfa2..03a5014ce5 100644
--- a/contrib/python/google-auth/py3/tests/test_external_account.py
+++ b/contrib/python/google-auth/py3/tests/test_external_account.py
@@ -24,9 +24,9 @@ from google.auth import _helpers
from google.auth import exceptions
from google.auth import external_account
from google.auth import transport
+from google.auth.credentials import DEFAULT_UNIVERSE_DOMAIN
from google.auth.credentials import TokenState
-
IMPERSONATE_ACCESS_TOKEN_REQUEST_METRICS_HEADER_VALUE = (
"gl-python/3.7 auth/1.1 auth-request-type/at cred-type/imp"
)
@@ -150,7 +150,7 @@ class TestCredentials(object):
default_scopes=None,
service_account_impersonation_url=None,
service_account_impersonation_options={},
- universe_domain=external_account._DEFAULT_UNIVERSE_DOMAIN,
+ universe_domain=DEFAULT_UNIVERSE_DOMAIN,
):
return CredentialsImpl(
audience=cls.AUDIENCE,
@@ -386,7 +386,7 @@ class TestCredentials(object):
quota_project_id=self.QUOTA_PROJECT_ID,
scopes=["email"],
default_scopes=["default2"],
- universe_domain=external_account._DEFAULT_UNIVERSE_DOMAIN,
+ universe_domain=DEFAULT_UNIVERSE_DOMAIN,
)
def test_with_token_uri(self):
@@ -474,7 +474,7 @@ class TestCredentials(object):
quota_project_id="project-foo",
scopes=self.SCOPES,
default_scopes=["default1"],
- universe_domain=external_account._DEFAULT_UNIVERSE_DOMAIN,
+ universe_domain=DEFAULT_UNIVERSE_DOMAIN,
)
def test_with_invalid_impersonation_target_principal(self):
@@ -504,7 +504,7 @@ class TestCredentials(object):
assert credentials.universe_domain == "dummy_universe.com"
credentials = self.make_credentials()
- assert credentials.universe_domain == external_account._DEFAULT_UNIVERSE_DOMAIN
+ assert credentials.universe_domain == DEFAULT_UNIVERSE_DOMAIN
def test_with_universe_domain(self):
credentials = self.make_credentials()
@@ -523,7 +523,7 @@ class TestCredentials(object):
"token_url": self.TOKEN_URL,
"credential_source": self.CREDENTIAL_SOURCE.copy(),
"workforce_pool_user_project": self.WORKFORCE_POOL_USER_PROJECT,
- "universe_domain": external_account._DEFAULT_UNIVERSE_DOMAIN,
+ "universe_domain": DEFAULT_UNIVERSE_DOMAIN,
}
def test_info_with_full_options(self):
@@ -548,7 +548,7 @@ class TestCredentials(object):
"quota_project_id": self.QUOTA_PROJECT_ID,
"client_id": CLIENT_ID,
"client_secret": CLIENT_SECRET,
- "universe_domain": external_account._DEFAULT_UNIVERSE_DOMAIN,
+ "universe_domain": DEFAULT_UNIVERSE_DOMAIN,
}
def test_service_account_email_without_impersonation(self):
diff --git a/contrib/python/google-auth/py3/tests/test_external_account_authorized_user.py b/contrib/python/google-auth/py3/tests/test_external_account_authorized_user.py
index 7213a23486..743ee9c848 100644
--- a/contrib/python/google-auth/py3/tests/test_external_account_authorized_user.py
+++ b/contrib/python/google-auth/py3/tests/test_external_account_authorized_user.py
@@ -22,6 +22,7 @@ import pytest # type: ignore
from google.auth import exceptions
from google.auth import external_account_authorized_user
from google.auth import transport
+from google.auth.credentials import DEFAULT_UNIVERSE_DOMAIN
TOKEN_URL = "https://sts.googleapis.com/v1/token"
TOKEN_INFO_URL = "https://sts.googleapis.com/v1/introspect"
@@ -45,7 +46,6 @@ BASIC_AUTH_ENCODING = "dXNlcm5hbWU6cGFzc3dvcmQ="
SCOPES = ["email", "profile"]
NOW = datetime.datetime(1990, 8, 27, 6, 54, 30)
FAKE_UNIVERSE_DOMAIN = "fake-universe-domain"
-DEFAULT_UNIVERSE_DOMAIN = external_account_authorized_user._DEFAULT_UNIVERSE_DOMAIN
class TestCredentials(object):
diff --git a/contrib/python/google-auth/py3/tests/test_identity_pool.py b/contrib/python/google-auth/py3/tests/test_identity_pool.py
index 2d10a5d268..96be1d61c2 100644
--- a/contrib/python/google-auth/py3/tests/test_identity_pool.py
+++ b/contrib/python/google-auth/py3/tests/test_identity_pool.py
@@ -26,7 +26,7 @@ from google.auth import exceptions
from google.auth import identity_pool
from google.auth import metrics
from google.auth import transport
-
+from google.auth.credentials import DEFAULT_UNIVERSE_DOMAIN
CLIENT_ID = "username"
CLIENT_SECRET = "password"
@@ -68,8 +68,6 @@ WORKFORCE_AUDIENCE = (
WORKFORCE_SUBJECT_TOKEN_TYPE = "urn:ietf:params:oauth:token-type:id_token"
WORKFORCE_POOL_USER_PROJECT = "WORKFORCE_POOL_USER_PROJECT_NUMBER"
-DEFAULT_UNIVERSE_DOMAIN = "googleapis.com"
-
VALID_TOKEN_URLS = [
"https://sts.googleapis.com",
"https://us-east-1.sts.googleapis.com",
diff --git a/contrib/python/google-auth/py3/tests/test_pluggable.py b/contrib/python/google-auth/py3/tests/test_pluggable.py
index 783bbcaec0..24cd0e2ec9 100644
--- a/contrib/python/google-auth/py3/tests/test_pluggable.py
+++ b/contrib/python/google-auth/py3/tests/test_pluggable.py
@@ -21,6 +21,7 @@ import pytest # type: ignore
from google.auth import exceptions
from google.auth import pluggable
+from google.auth.credentials import DEFAULT_UNIVERSE_DOMAIN
from .test__default import WORKFORCE_AUDIENCE
CLIENT_ID = "username"
@@ -45,7 +46,6 @@ TOKEN_URL = "https://sts.googleapis.com/v1/token"
TOKEN_INFO_URL = "https://sts.googleapis.com/v1/introspect"
SUBJECT_TOKEN_TYPE = "urn:ietf:params:oauth:token-type:jwt"
AUDIENCE = "//iam.googleapis.com/projects/123456/locations/global/workloadIdentityPools/POOL_ID/providers/PROVIDER_ID"
-DEFAULT_UNIVERSE_DOMAIN = "googleapis.com"
VALID_TOKEN_URLS = [
"https://sts.googleapis.com",
diff --git a/contrib/python/google-auth/py3/ya.make b/contrib/python/google-auth/py3/ya.make
index 5ece69bc98..7863862fdc 100644
--- a/contrib/python/google-auth/py3/ya.make
+++ b/contrib/python/google-auth/py3/ya.make
@@ -2,7 +2,7 @@
PY3_LIBRARY()
-VERSION(2.27.0)
+VERSION(2.28.0)
LICENSE(Apache-2.0)
diff --git a/contrib/python/httpcore/.dist-info/METADATA b/contrib/python/httpcore/.dist-info/METADATA
index 07eab9de21..51de714c58 100644
--- a/contrib/python/httpcore/.dist-info/METADATA
+++ b/contrib/python/httpcore/.dist-info/METADATA
@@ -1,6 +1,6 @@
Metadata-Version: 2.1
Name: httpcore
-Version: 1.0.2
+Version: 1.0.3
Summary: A minimal low-level HTTP client.
Project-URL: Documentation, https://www.encode.io/httpcore
Project-URL: Homepage, https://www.encode.io/httpcore/
@@ -33,7 +33,7 @@ Requires-Dist: h2<5,>=3; extra == 'http2'
Provides-Extra: socks
Requires-Dist: socksio==1.*; extra == 'socks'
Provides-Extra: trio
-Requires-Dist: trio<0.23.0,>=0.22.0; extra == 'trio'
+Requires-Dist: trio<0.24.0,>=0.22.0; extra == 'trio'
Description-Content-Type: text/markdown
# HTTP Core
@@ -153,7 +153,13 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).
-## 1.0.2 (November 10th, 2023)
+## 1.0.3 (February 13th, 2024)
+
+- Fix support for async cancellations. (#880)
+- Fix trace extension when used with socks proxy. (#849)
+- Fix SSL context for connections using the "wss" scheme (#869)
+
+## 1.0.2 (November 10th, 2023)
- Fix `float("inf")` timeouts in `Event.wait` function. (#846)
diff --git a/contrib/python/httpcore/httpcore/__init__.py b/contrib/python/httpcore/httpcore/__init__.py
index eb3e577186..3709fc4080 100644
--- a/contrib/python/httpcore/httpcore/__init__.py
+++ b/contrib/python/httpcore/httpcore/__init__.py
@@ -130,7 +130,7 @@ __all__ = [
"WriteError",
]
-__version__ = "1.0.2"
+__version__ = "1.0.3"
__locals = locals()
diff --git a/contrib/python/httpcore/httpcore/_async/connection.py b/contrib/python/httpcore/httpcore/_async/connection.py
index 45ee22a63d..2f439cf09c 100644
--- a/contrib/python/httpcore/httpcore/_async/connection.py
+++ b/contrib/python/httpcore/httpcore/_async/connection.py
@@ -6,7 +6,7 @@ from typing import Iterable, Iterator, Optional, Type
from .._backends.auto import AutoBackend
from .._backends.base import SOCKET_OPTION, AsyncNetworkBackend, AsyncNetworkStream
-from .._exceptions import ConnectError, ConnectionNotAvailable, ConnectTimeout
+from .._exceptions import ConnectError, ConnectTimeout
from .._models import Origin, Request, Response
from .._ssl import default_ssl_context
from .._synchronization import AsyncLock
@@ -70,9 +70,9 @@ class AsyncHTTPConnection(AsyncConnectionInterface):
f"Attempted to send request to {request.url.origin} on connection to {self._origin}"
)
- async with self._request_lock:
- if self._connection is None:
- try:
+ try:
+ async with self._request_lock:
+ if self._connection is None:
stream = await self._connect(request)
ssl_object = stream.get_extra_info("ssl_object")
@@ -94,11 +94,9 @@ class AsyncHTTPConnection(AsyncConnectionInterface):
stream=stream,
keepalive_expiry=self._keepalive_expiry,
)
- except Exception as exc:
- self._connect_failed = True
- raise exc
- elif not self._connection.is_available():
- raise ConnectionNotAvailable()
+ except BaseException as exc:
+ self._connect_failed = True
+ raise exc
return await self._connection.handle_async_request(request)
@@ -137,7 +135,7 @@ class AsyncHTTPConnection(AsyncConnectionInterface):
)
trace.return_value = stream
- if self._origin.scheme == b"https":
+ if self._origin.scheme in (b"https", b"wss"):
ssl_context = (
default_ssl_context()
if self._ssl_context is None
diff --git a/contrib/python/httpcore/httpcore/_async/connection_pool.py b/contrib/python/httpcore/httpcore/_async/connection_pool.py
index 0320c6d80e..018b0ba234 100644
--- a/contrib/python/httpcore/httpcore/_async/connection_pool.py
+++ b/contrib/python/httpcore/httpcore/_async/connection_pool.py
@@ -1,31 +1,30 @@
import ssl
import sys
-import time
from types import TracebackType
from typing import AsyncIterable, AsyncIterator, Iterable, List, Optional, Type
from .._backends.auto import AutoBackend
from .._backends.base import SOCKET_OPTION, AsyncNetworkBackend
-from .._exceptions import ConnectionNotAvailable, PoolTimeout, UnsupportedProtocol
+from .._exceptions import ConnectionNotAvailable, UnsupportedProtocol
from .._models import Origin, Request, Response
-from .._synchronization import AsyncEvent, AsyncLock, AsyncShieldCancellation
+from .._synchronization import AsyncEvent, AsyncShieldCancellation, AsyncThreadLock
from .connection import AsyncHTTPConnection
from .interfaces import AsyncConnectionInterface, AsyncRequestInterface
-class RequestStatus:
- def __init__(self, request: Request):
+class AsyncPoolRequest:
+ def __init__(self, request: Request) -> None:
self.request = request
self.connection: Optional[AsyncConnectionInterface] = None
self._connection_acquired = AsyncEvent()
- def set_connection(self, connection: AsyncConnectionInterface) -> None:
- assert self.connection is None
+ def assign_to_connection(
+ self, connection: Optional[AsyncConnectionInterface]
+ ) -> None:
self.connection = connection
self._connection_acquired.set()
- def unset_connection(self) -> None:
- assert self.connection is not None
+ def clear_connection(self) -> None:
self.connection = None
self._connection_acquired = AsyncEvent()
@@ -37,6 +36,9 @@ class RequestStatus:
assert self.connection is not None
return self.connection
+ def is_queued(self) -> bool:
+ return self.connection is None
+
class AsyncConnectionPool(AsyncRequestInterface):
"""
@@ -107,14 +109,21 @@ class AsyncConnectionPool(AsyncRequestInterface):
self._local_address = local_address
self._uds = uds
- self._pool: List[AsyncConnectionInterface] = []
- self._requests: List[RequestStatus] = []
- self._pool_lock = AsyncLock()
self._network_backend = (
AutoBackend() if network_backend is None else network_backend
)
self._socket_options = socket_options
+ # The mutable state on a connection pool is the queue of incoming requests,
+ # and the set of connections that are servicing those requests.
+ self._connections: List[AsyncConnectionInterface] = []
+ self._requests: List[AsyncPoolRequest] = []
+
+ # We only mutate the state of the connection pool within an 'optional_thread_lock'
+ # context. This holds a threading lock unless we're running in async mode,
+ # in which case it is a no-op.
+ self._optional_thread_lock = AsyncThreadLock()
+
def create_connection(self, origin: Origin) -> AsyncConnectionInterface:
return AsyncHTTPConnection(
origin=origin,
@@ -145,64 +154,7 @@ class AsyncConnectionPool(AsyncRequestInterface):
]
```
"""
- return list(self._pool)
-
- async def _attempt_to_acquire_connection(self, status: RequestStatus) -> bool:
- """
- Attempt to provide a connection that can handle the given origin.
- """
- origin = status.request.url.origin
-
- # If there are queued requests in front of us, then don't acquire a
- # connection. We handle requests strictly in order.
- waiting = [s for s in self._requests if s.connection is None]
- if waiting and waiting[0] is not status:
- return False
-
- # Reuse an existing connection if one is currently available.
- for idx, connection in enumerate(self._pool):
- if connection.can_handle_request(origin) and connection.is_available():
- self._pool.pop(idx)
- self._pool.insert(0, connection)
- status.set_connection(connection)
- return True
-
- # If the pool is currently full, attempt to close one idle connection.
- if len(self._pool) >= self._max_connections:
- for idx, connection in reversed(list(enumerate(self._pool))):
- if connection.is_idle():
- await connection.aclose()
- self._pool.pop(idx)
- break
-
- # If the pool is still full, then we cannot acquire a connection.
- if len(self._pool) >= self._max_connections:
- return False
-
- # Otherwise create a new connection.
- connection = self.create_connection(origin)
- self._pool.insert(0, connection)
- status.set_connection(connection)
- return True
-
- async def _close_expired_connections(self) -> None:
- """
- Clean up the connection pool by closing off any connections that have expired.
- """
- # Close any connections that have expired their keep-alive time.
- for idx, connection in reversed(list(enumerate(self._pool))):
- if connection.has_expired():
- await connection.aclose()
- self._pool.pop(idx)
-
- # If the pool size exceeds the maximum number of allowed keep-alive connections,
- # then close off idle connections as required.
- pool_size = len(self._pool)
- for idx, connection in reversed(list(enumerate(self._pool))):
- if connection.is_idle() and pool_size > self._max_keepalive_connections:
- await connection.aclose()
- self._pool.pop(idx)
- pool_size -= 1
+ return list(self._connections)
async def handle_async_request(self, request: Request) -> Response:
"""
@@ -220,116 +172,147 @@ class AsyncConnectionPool(AsyncRequestInterface):
f"Request URL has an unsupported protocol '{scheme}://'."
)
- status = RequestStatus(request)
timeouts = request.extensions.get("timeout", {})
timeout = timeouts.get("pool", None)
- if timeout is not None:
- deadline = time.monotonic() + timeout
- else:
- deadline = float("inf")
-
- async with self._pool_lock:
- self._requests.append(status)
- await self._close_expired_connections()
- await self._attempt_to_acquire_connection(status)
-
- while True:
- try:
- connection = await status.wait_for_connection(timeout=timeout)
- except BaseException as exc:
- # If we timeout here, or if the task is cancelled, then make
- # sure to remove the request from the queue before bubbling
- # up the exception.
- async with self._pool_lock:
- # Ensure only remove when task exists.
- if status in self._requests:
- self._requests.remove(status)
- raise exc
-
- try:
- response = await connection.handle_async_request(request)
- except ConnectionNotAvailable:
- # The ConnectionNotAvailable exception is a special case, that
- # indicates we need to retry the request on a new connection.
- #
- # The most common case where this can occur is when multiple
- # requests are queued waiting for a single connection, which
- # might end up as an HTTP/2 connection, but which actually ends
- # up as HTTP/1.1.
- async with self._pool_lock:
- # Maintain our position in the request queue, but reset the
- # status so that the request becomes queued again.
- status.unset_connection()
- await self._attempt_to_acquire_connection(status)
- except BaseException as exc:
- with AsyncShieldCancellation():
- await self.response_closed(status)
- raise exc
- else:
- break
-
- timeout = deadline - time.monotonic()
- if timeout < 0:
- raise PoolTimeout # pragma: nocover
-
- # When we return the response, we wrap the stream in a special class
- # that handles notifying the connection pool once the response
- # has been released.
+ with self._optional_thread_lock:
+ # Add the incoming request to our request queue.
+ pool_request = AsyncPoolRequest(request)
+ self._requests.append(pool_request)
+
+ try:
+ while True:
+ with self._optional_thread_lock:
+ # Assign incoming requests to available connections,
+ # closing or creating new connections as required.
+ closing = self._assign_requests_to_connections()
+ await self._close_connections(closing)
+
+ # Wait until this request has an assigned connection.
+ connection = await pool_request.wait_for_connection(timeout=timeout)
+
+ try:
+ # Send the request on the assigned connection.
+ response = await connection.handle_async_request(
+ pool_request.request
+ )
+ except ConnectionNotAvailable:
+ # In some cases a connection may initially be available to
+ # handle a request, but then become unavailable.
+ #
+ # In this case we clear the connection and try again.
+ pool_request.clear_connection()
+ else:
+ break # pragma: nocover
+
+ except BaseException as exc:
+ with self._optional_thread_lock:
+ # For any exception or cancellation we remove the request from
+ # the queue, and then re-assign requests to connections.
+ self._requests.remove(pool_request)
+ closing = self._assign_requests_to_connections()
+
+ await self._close_connections(closing)
+ raise exc from None
+
+ # Return the response. Note that in this case we still have to manage
+ # the point at which the response is closed.
assert isinstance(response.stream, AsyncIterable)
return Response(
status=response.status,
headers=response.headers,
- content=ConnectionPoolByteStream(response.stream, self, status),
+ content=PoolByteStream(
+ stream=response.stream, pool_request=pool_request, pool=self
+ ),
extensions=response.extensions,
)
- async def response_closed(self, status: RequestStatus) -> None:
+ def _assign_requests_to_connections(self) -> List[AsyncConnectionInterface]:
"""
- This method acts as a callback once the request/response cycle is complete.
+ Manage the state of the connection pool, assigning incoming
+ requests to connections as available.
- It is called into from the `ConnectionPoolByteStream.aclose()` method.
- """
- assert status.connection is not None
- connection = status.connection
-
- async with self._pool_lock:
- # Update the state of the connection pool.
- if status in self._requests:
- self._requests.remove(status)
-
- if connection.is_closed() and connection in self._pool:
- self._pool.remove(connection)
-
- # Since we've had a response closed, it's possible we'll now be able
- # to service one or more requests that are currently pending.
- for status in self._requests:
- if status.connection is None:
- acquired = await self._attempt_to_acquire_connection(status)
- # If we could not acquire a connection for a queued request
- # then we don't need to check anymore requests that are
- # queued later behind it.
- if not acquired:
- break
-
- # Housekeeping.
- await self._close_expired_connections()
+ Called whenever a new request is added or removed from the pool.
- async def aclose(self) -> None:
+ Any closing connections are returned, allowing the I/O for closing
+ those connections to be handled seperately.
"""
- Close any connections in the pool.
- """
- async with self._pool_lock:
- for connection in self._pool:
+ closing_connections = []
+
+ # First we handle cleaning up any connections that are closed,
+ # have expired their keep-alive, or surplus idle connections.
+ for connection in list(self._connections):
+ if connection.is_closed():
+ # log: "removing closed connection"
+ self._connections.remove(connection)
+ elif connection.has_expired():
+ # log: "closing expired connection"
+ self._connections.remove(connection)
+ closing_connections.append(connection)
+ elif (
+ connection.is_idle()
+ and len([connection.is_idle() for connection in self._connections])
+ > self._max_keepalive_connections
+ ):
+ # log: "closing idle connection"
+ self._connections.remove(connection)
+ closing_connections.append(connection)
+
+ # Assign queued requests to connections.
+ queued_requests = [request for request in self._requests if request.is_queued()]
+ for pool_request in queued_requests:
+ origin = pool_request.request.url.origin
+ avilable_connections = [
+ connection
+ for connection in self._connections
+ if connection.can_handle_request(origin) and connection.is_available()
+ ]
+ idle_connections = [
+ connection for connection in self._connections if connection.is_idle()
+ ]
+
+ # There are three cases for how we may be able to handle the request:
+ #
+ # 1. There is an existing connection that can handle the request.
+ # 2. We can create a new connection to handle the request.
+ # 3. We can close an idle connection and then create a new connection
+ # to handle the request.
+ if avilable_connections:
+ # log: "reusing existing connection"
+ connection = avilable_connections[0]
+ pool_request.assign_to_connection(connection)
+ elif len(self._connections) < self._max_connections:
+ # log: "creating new connection"
+ connection = self.create_connection(origin)
+ self._connections.append(connection)
+ pool_request.assign_to_connection(connection)
+ elif idle_connections:
+ # log: "closing idle connection"
+ connection = idle_connections[0]
+ self._connections.remove(connection)
+ closing_connections.append(connection)
+ # log: "creating new connection"
+ connection = self.create_connection(origin)
+ self._connections.append(connection)
+ pool_request.assign_to_connection(connection)
+
+ return closing_connections
+
+ async def _close_connections(self, closing: List[AsyncConnectionInterface]) -> None:
+ # Close connections which have been removed from the pool.
+ with AsyncShieldCancellation():
+ for connection in closing:
await connection.aclose()
- self._pool = []
- self._requests = []
+
+ async def aclose(self) -> None:
+ # Explicitly close the connection pool.
+ # Clears all existing requests and connections.
+ with self._optional_thread_lock:
+ closing_connections = list(self._connections)
+ self._connections = []
+ await self._close_connections(closing_connections)
async def __aenter__(self) -> "AsyncConnectionPool":
- # Acquiring the pool lock here ensures that we have the
- # correct dependencies installed as early as possible.
- async with self._pool_lock:
- pass
return self
async def __aexit__(
@@ -340,31 +323,58 @@ class AsyncConnectionPool(AsyncRequestInterface):
) -> None:
await self.aclose()
+ def __repr__(self) -> str:
+ class_name = self.__class__.__name__
+ with self._optional_thread_lock:
+ request_is_queued = [request.is_queued() for request in self._requests]
+ connection_is_idle = [
+ connection.is_idle() for connection in self._connections
+ ]
+
+ num_active_requests = request_is_queued.count(False)
+ num_queued_requests = request_is_queued.count(True)
+ num_active_connections = connection_is_idle.count(False)
+ num_idle_connections = connection_is_idle.count(True)
+
+ requests_info = (
+ f"Requests: {num_active_requests} active, {num_queued_requests} queued"
+ )
+ connection_info = (
+ f"Connections: {num_active_connections} active, {num_idle_connections} idle"
+ )
+
+ return f"<{class_name} [{requests_info} | {connection_info}]>"
-class ConnectionPoolByteStream:
- """
- A wrapper around the response byte stream, that additionally handles
- notifying the connection pool when the response has been closed.
- """
+class PoolByteStream:
def __init__(
self,
stream: AsyncIterable[bytes],
+ pool_request: AsyncPoolRequest,
pool: AsyncConnectionPool,
- status: RequestStatus,
) -> None:
self._stream = stream
+ self._pool_request = pool_request
self._pool = pool
- self._status = status
+ self._closed = False
async def __aiter__(self) -> AsyncIterator[bytes]:
- async for part in self._stream:
- yield part
+ try:
+ async for part in self._stream:
+ yield part
+ except BaseException as exc:
+ await self.aclose()
+ raise exc from None
async def aclose(self) -> None:
- try:
- if hasattr(self._stream, "aclose"):
- await self._stream.aclose()
- finally:
+ if not self._closed:
+ self._closed = True
with AsyncShieldCancellation():
- await self._pool.response_closed(self._status)
+ if hasattr(self._stream, "aclose"):
+ await self._stream.aclose()
+
+ with self._pool._optional_thread_lock:
+ self._pool._requests.remove(self._pool_request)
+ closing = self._pool._assign_requests_to_connections()
+
+ await self._pool._close_connections(closing)
diff --git a/contrib/python/httpcore/httpcore/_async/http11.py b/contrib/python/httpcore/httpcore/_async/http11.py
index 32fa3a6f23..a5eb480840 100644
--- a/contrib/python/httpcore/httpcore/_async/http11.py
+++ b/contrib/python/httpcore/httpcore/_async/http11.py
@@ -10,7 +10,6 @@ from typing import (
Tuple,
Type,
Union,
- cast,
)
import h11
@@ -228,7 +227,7 @@ class AsyncHTTP11Connection(AsyncConnectionInterface):
self._h11_state.receive_data(data)
else:
# mypy fails to narrow the type in the above if statement above
- return cast(Union[h11.Event, Type[h11.PAUSED]], event)
+ return event # type: ignore[return-value]
async def _response_closed(self) -> None:
async with self._state_lock:
diff --git a/contrib/python/httpcore/httpcore/_async/socks_proxy.py b/contrib/python/httpcore/httpcore/_async/socks_proxy.py
index 08a065d6d1..f839603fe5 100644
--- a/contrib/python/httpcore/httpcore/_async/socks_proxy.py
+++ b/contrib/python/httpcore/httpcore/_async/socks_proxy.py
@@ -228,7 +228,7 @@ class AsyncSocks5Connection(AsyncConnectionInterface):
"port": self._proxy_origin.port,
"timeout": timeout,
}
- with Trace("connect_tcp", logger, request, kwargs) as trace:
+ async with Trace("connect_tcp", logger, request, kwargs) as trace:
stream = await self._network_backend.connect_tcp(**kwargs)
trace.return_value = stream
@@ -239,7 +239,7 @@ class AsyncSocks5Connection(AsyncConnectionInterface):
"port": self._remote_origin.port,
"auth": self._proxy_auth,
}
- with Trace(
+ async with Trace(
"setup_socks5_connection", logger, request, kwargs
) as trace:
await _init_socks5_connection(**kwargs)
diff --git a/contrib/python/httpcore/httpcore/_sync/connection.py b/contrib/python/httpcore/httpcore/_sync/connection.py
index 81e4172a21..c3890f340c 100644
--- a/contrib/python/httpcore/httpcore/_sync/connection.py
+++ b/contrib/python/httpcore/httpcore/_sync/connection.py
@@ -6,7 +6,7 @@ from typing import Iterable, Iterator, Optional, Type
from .._backends.sync import SyncBackend
from .._backends.base import SOCKET_OPTION, NetworkBackend, NetworkStream
-from .._exceptions import ConnectError, ConnectionNotAvailable, ConnectTimeout
+from .._exceptions import ConnectError, ConnectTimeout
from .._models import Origin, Request, Response
from .._ssl import default_ssl_context
from .._synchronization import Lock
@@ -70,9 +70,9 @@ class HTTPConnection(ConnectionInterface):
f"Attempted to send request to {request.url.origin} on connection to {self._origin}"
)
- with self._request_lock:
- if self._connection is None:
- try:
+ try:
+ with self._request_lock:
+ if self._connection is None:
stream = self._connect(request)
ssl_object = stream.get_extra_info("ssl_object")
@@ -94,11 +94,9 @@ class HTTPConnection(ConnectionInterface):
stream=stream,
keepalive_expiry=self._keepalive_expiry,
)
- except Exception as exc:
- self._connect_failed = True
- raise exc
- elif not self._connection.is_available():
- raise ConnectionNotAvailable()
+ except BaseException as exc:
+ self._connect_failed = True
+ raise exc
return self._connection.handle_request(request)
@@ -137,7 +135,7 @@ class HTTPConnection(ConnectionInterface):
)
trace.return_value = stream
- if self._origin.scheme == b"https":
+ if self._origin.scheme in (b"https", b"wss"):
ssl_context = (
default_ssl_context()
if self._ssl_context is None
diff --git a/contrib/python/httpcore/httpcore/_sync/connection_pool.py b/contrib/python/httpcore/httpcore/_sync/connection_pool.py
index ccfb8d2220..8dcf348cac 100644
--- a/contrib/python/httpcore/httpcore/_sync/connection_pool.py
+++ b/contrib/python/httpcore/httpcore/_sync/connection_pool.py
@@ -1,31 +1,30 @@
import ssl
import sys
-import time
from types import TracebackType
from typing import Iterable, Iterator, Iterable, List, Optional, Type
from .._backends.sync import SyncBackend
from .._backends.base import SOCKET_OPTION, NetworkBackend
-from .._exceptions import ConnectionNotAvailable, PoolTimeout, UnsupportedProtocol
+from .._exceptions import ConnectionNotAvailable, UnsupportedProtocol
from .._models import Origin, Request, Response
-from .._synchronization import Event, Lock, ShieldCancellation
+from .._synchronization import Event, ShieldCancellation, ThreadLock
from .connection import HTTPConnection
from .interfaces import ConnectionInterface, RequestInterface
-class RequestStatus:
- def __init__(self, request: Request):
+class PoolRequest:
+ def __init__(self, request: Request) -> None:
self.request = request
self.connection: Optional[ConnectionInterface] = None
self._connection_acquired = Event()
- def set_connection(self, connection: ConnectionInterface) -> None:
- assert self.connection is None
+ def assign_to_connection(
+ self, connection: Optional[ConnectionInterface]
+ ) -> None:
self.connection = connection
self._connection_acquired.set()
- def unset_connection(self) -> None:
- assert self.connection is not None
+ def clear_connection(self) -> None:
self.connection = None
self._connection_acquired = Event()
@@ -37,6 +36,9 @@ class RequestStatus:
assert self.connection is not None
return self.connection
+ def is_queued(self) -> bool:
+ return self.connection is None
+
class ConnectionPool(RequestInterface):
"""
@@ -107,14 +109,21 @@ class ConnectionPool(RequestInterface):
self._local_address = local_address
self._uds = uds
- self._pool: List[ConnectionInterface] = []
- self._requests: List[RequestStatus] = []
- self._pool_lock = Lock()
self._network_backend = (
SyncBackend() if network_backend is None else network_backend
)
self._socket_options = socket_options
+ # The mutable state on a connection pool is the queue of incoming requests,
+ # and the set of connections that are servicing those requests.
+ self._connections: List[ConnectionInterface] = []
+ self._requests: List[PoolRequest] = []
+
+ # We only mutate the state of the connection pool within an 'optional_thread_lock'
+ # context. This holds a threading lock unless we're running in async mode,
+ # in which case it is a no-op.
+ self._optional_thread_lock = ThreadLock()
+
def create_connection(self, origin: Origin) -> ConnectionInterface:
return HTTPConnection(
origin=origin,
@@ -145,64 +154,7 @@ class ConnectionPool(RequestInterface):
]
```
"""
- return list(self._pool)
-
- def _attempt_to_acquire_connection(self, status: RequestStatus) -> bool:
- """
- Attempt to provide a connection that can handle the given origin.
- """
- origin = status.request.url.origin
-
- # If there are queued requests in front of us, then don't acquire a
- # connection. We handle requests strictly in order.
- waiting = [s for s in self._requests if s.connection is None]
- if waiting and waiting[0] is not status:
- return False
-
- # Reuse an existing connection if one is currently available.
- for idx, connection in enumerate(self._pool):
- if connection.can_handle_request(origin) and connection.is_available():
- self._pool.pop(idx)
- self._pool.insert(0, connection)
- status.set_connection(connection)
- return True
-
- # If the pool is currently full, attempt to close one idle connection.
- if len(self._pool) >= self._max_connections:
- for idx, connection in reversed(list(enumerate(self._pool))):
- if connection.is_idle():
- connection.close()
- self._pool.pop(idx)
- break
-
- # If the pool is still full, then we cannot acquire a connection.
- if len(self._pool) >= self._max_connections:
- return False
-
- # Otherwise create a new connection.
- connection = self.create_connection(origin)
- self._pool.insert(0, connection)
- status.set_connection(connection)
- return True
-
- def _close_expired_connections(self) -> None:
- """
- Clean up the connection pool by closing off any connections that have expired.
- """
- # Close any connections that have expired their keep-alive time.
- for idx, connection in reversed(list(enumerate(self._pool))):
- if connection.has_expired():
- connection.close()
- self._pool.pop(idx)
-
- # If the pool size exceeds the maximum number of allowed keep-alive connections,
- # then close off idle connections as required.
- pool_size = len(self._pool)
- for idx, connection in reversed(list(enumerate(self._pool))):
- if connection.is_idle() and pool_size > self._max_keepalive_connections:
- connection.close()
- self._pool.pop(idx)
- pool_size -= 1
+ return list(self._connections)
def handle_request(self, request: Request) -> Response:
"""
@@ -220,116 +172,147 @@ class ConnectionPool(RequestInterface):
f"Request URL has an unsupported protocol '{scheme}://'."
)
- status = RequestStatus(request)
timeouts = request.extensions.get("timeout", {})
timeout = timeouts.get("pool", None)
- if timeout is not None:
- deadline = time.monotonic() + timeout
- else:
- deadline = float("inf")
-
- with self._pool_lock:
- self._requests.append(status)
- self._close_expired_connections()
- self._attempt_to_acquire_connection(status)
-
- while True:
- try:
- connection = status.wait_for_connection(timeout=timeout)
- except BaseException as exc:
- # If we timeout here, or if the task is cancelled, then make
- # sure to remove the request from the queue before bubbling
- # up the exception.
- with self._pool_lock:
- # Ensure only remove when task exists.
- if status in self._requests:
- self._requests.remove(status)
- raise exc
-
- try:
- response = connection.handle_request(request)
- except ConnectionNotAvailable:
- # The ConnectionNotAvailable exception is a special case, that
- # indicates we need to retry the request on a new connection.
- #
- # The most common case where this can occur is when multiple
- # requests are queued waiting for a single connection, which
- # might end up as an HTTP/2 connection, but which actually ends
- # up as HTTP/1.1.
- with self._pool_lock:
- # Maintain our position in the request queue, but reset the
- # status so that the request becomes queued again.
- status.unset_connection()
- self._attempt_to_acquire_connection(status)
- except BaseException as exc:
- with ShieldCancellation():
- self.response_closed(status)
- raise exc
- else:
- break
-
- timeout = deadline - time.monotonic()
- if timeout < 0:
- raise PoolTimeout # pragma: nocover
-
- # When we return the response, we wrap the stream in a special class
- # that handles notifying the connection pool once the response
- # has been released.
+ with self._optional_thread_lock:
+ # Add the incoming request to our request queue.
+ pool_request = PoolRequest(request)
+ self._requests.append(pool_request)
+
+ try:
+ while True:
+ with self._optional_thread_lock:
+ # Assign incoming requests to available connections,
+ # closing or creating new connections as required.
+ closing = self._assign_requests_to_connections()
+ self._close_connections(closing)
+
+ # Wait until this request has an assigned connection.
+ connection = pool_request.wait_for_connection(timeout=timeout)
+
+ try:
+ # Send the request on the assigned connection.
+ response = connection.handle_request(
+ pool_request.request
+ )
+ except ConnectionNotAvailable:
+ # In some cases a connection may initially be available to
+ # handle a request, but then become unavailable.
+ #
+ # In this case we clear the connection and try again.
+ pool_request.clear_connection()
+ else:
+ break # pragma: nocover
+
+ except BaseException as exc:
+ with self._optional_thread_lock:
+ # For any exception or cancellation we remove the request from
+ # the queue, and then re-assign requests to connections.
+ self._requests.remove(pool_request)
+ closing = self._assign_requests_to_connections()
+
+ self._close_connections(closing)
+ raise exc from None
+
+ # Return the response. Note that in this case we still have to manage
+ # the point at which the response is closed.
assert isinstance(response.stream, Iterable)
return Response(
status=response.status,
headers=response.headers,
- content=ConnectionPoolByteStream(response.stream, self, status),
+ content=PoolByteStream(
+ stream=response.stream, pool_request=pool_request, pool=self
+ ),
extensions=response.extensions,
)
- def response_closed(self, status: RequestStatus) -> None:
+ def _assign_requests_to_connections(self) -> List[ConnectionInterface]:
"""
- This method acts as a callback once the request/response cycle is complete.
+ Manage the state of the connection pool, assigning incoming
+ requests to connections as available.
- It is called into from the `ConnectionPoolByteStream.close()` method.
- """
- assert status.connection is not None
- connection = status.connection
-
- with self._pool_lock:
- # Update the state of the connection pool.
- if status in self._requests:
- self._requests.remove(status)
-
- if connection.is_closed() and connection in self._pool:
- self._pool.remove(connection)
-
- # Since we've had a response closed, it's possible we'll now be able
- # to service one or more requests that are currently pending.
- for status in self._requests:
- if status.connection is None:
- acquired = self._attempt_to_acquire_connection(status)
- # If we could not acquire a connection for a queued request
- # then we don't need to check anymore requests that are
- # queued later behind it.
- if not acquired:
- break
-
- # Housekeeping.
- self._close_expired_connections()
+ Called whenever a new request is added or removed from the pool.
- def close(self) -> None:
+ Any closing connections are returned, allowing the I/O for closing
+ those connections to be handled seperately.
"""
- Close any connections in the pool.
- """
- with self._pool_lock:
- for connection in self._pool:
+ closing_connections = []
+
+ # First we handle cleaning up any connections that are closed,
+ # have expired their keep-alive, or surplus idle connections.
+ for connection in list(self._connections):
+ if connection.is_closed():
+ # log: "removing closed connection"
+ self._connections.remove(connection)
+ elif connection.has_expired():
+ # log: "closing expired connection"
+ self._connections.remove(connection)
+ closing_connections.append(connection)
+ elif (
+ connection.is_idle()
+ and len([connection.is_idle() for connection in self._connections])
+ > self._max_keepalive_connections
+ ):
+ # log: "closing idle connection"
+ self._connections.remove(connection)
+ closing_connections.append(connection)
+
+ # Assign queued requests to connections.
+ queued_requests = [request for request in self._requests if request.is_queued()]
+ for pool_request in queued_requests:
+ origin = pool_request.request.url.origin
+ avilable_connections = [
+ connection
+ for connection in self._connections
+ if connection.can_handle_request(origin) and connection.is_available()
+ ]
+ idle_connections = [
+ connection for connection in self._connections if connection.is_idle()
+ ]
+
+ # There are three cases for how we may be able to handle the request:
+ #
+ # 1. There is an existing connection that can handle the request.
+ # 2. We can create a new connection to handle the request.
+ # 3. We can close an idle connection and then create a new connection
+ # to handle the request.
+ if avilable_connections:
+ # log: "reusing existing connection"
+ connection = avilable_connections[0]
+ pool_request.assign_to_connection(connection)
+ elif len(self._connections) < self._max_connections:
+ # log: "creating new connection"
+ connection = self.create_connection(origin)
+ self._connections.append(connection)
+ pool_request.assign_to_connection(connection)
+ elif idle_connections:
+ # log: "closing idle connection"
+ connection = idle_connections[0]
+ self._connections.remove(connection)
+ closing_connections.append(connection)
+ # log: "creating new connection"
+ connection = self.create_connection(origin)
+ self._connections.append(connection)
+ pool_request.assign_to_connection(connection)
+
+ return closing_connections
+
+ def _close_connections(self, closing: List[ConnectionInterface]) -> None:
+ # Close connections which have been removed from the pool.
+ with ShieldCancellation():
+ for connection in closing:
connection.close()
- self._pool = []
- self._requests = []
+
+ def close(self) -> None:
+ # Explicitly close the connection pool.
+ # Clears all existing requests and connections.
+ with self._optional_thread_lock:
+ closing_connections = list(self._connections)
+ self._connections = []
+ self._close_connections(closing_connections)
def __enter__(self) -> "ConnectionPool":
- # Acquiring the pool lock here ensures that we have the
- # correct dependencies installed as early as possible.
- with self._pool_lock:
- pass
return self
def __exit__(
@@ -340,31 +323,58 @@ class ConnectionPool(RequestInterface):
) -> None:
self.close()
+ def __repr__(self) -> str:
+ class_name = self.__class__.__name__
+ with self._optional_thread_lock:
+ request_is_queued = [request.is_queued() for request in self._requests]
+ connection_is_idle = [
+ connection.is_idle() for connection in self._connections
+ ]
+
+ num_active_requests = request_is_queued.count(False)
+ num_queued_requests = request_is_queued.count(True)
+ num_active_connections = connection_is_idle.count(False)
+ num_idle_connections = connection_is_idle.count(True)
+
+ requests_info = (
+ f"Requests: {num_active_requests} active, {num_queued_requests} queued"
+ )
+ connection_info = (
+ f"Connections: {num_active_connections} active, {num_idle_connections} idle"
+ )
+
+ return f"<{class_name} [{requests_info} | {connection_info}]>"
-class ConnectionPoolByteStream:
- """
- A wrapper around the response byte stream, that additionally handles
- notifying the connection pool when the response has been closed.
- """
+class PoolByteStream:
def __init__(
self,
stream: Iterable[bytes],
+ pool_request: PoolRequest,
pool: ConnectionPool,
- status: RequestStatus,
) -> None:
self._stream = stream
+ self._pool_request = pool_request
self._pool = pool
- self._status = status
+ self._closed = False
def __iter__(self) -> Iterator[bytes]:
- for part in self._stream:
- yield part
+ try:
+ for part in self._stream:
+ yield part
+ except BaseException as exc:
+ self.close()
+ raise exc from None
def close(self) -> None:
- try:
- if hasattr(self._stream, "close"):
- self._stream.close()
- finally:
+ if not self._closed:
+ self._closed = True
with ShieldCancellation():
- self._pool.response_closed(self._status)
+ if hasattr(self._stream, "close"):
+ self._stream.close()
+
+ with self._pool._optional_thread_lock:
+ self._pool._requests.remove(self._pool_request)
+ closing = self._pool._assign_requests_to_connections()
+
+ self._pool._close_connections(closing)
diff --git a/contrib/python/httpcore/httpcore/_sync/http11.py b/contrib/python/httpcore/httpcore/_sync/http11.py
index 0cc100e3ff..e108f88b12 100644
--- a/contrib/python/httpcore/httpcore/_sync/http11.py
+++ b/contrib/python/httpcore/httpcore/_sync/http11.py
@@ -10,7 +10,6 @@ from typing import (
Tuple,
Type,
Union,
- cast,
)
import h11
@@ -228,7 +227,7 @@ class HTTP11Connection(ConnectionInterface):
self._h11_state.receive_data(data)
else:
# mypy fails to narrow the type in the above if statement above
- return cast(Union[h11.Event, Type[h11.PAUSED]], event)
+ return event # type: ignore[return-value]
def _response_closed(self) -> None:
with self._state_lock:
diff --git a/contrib/python/httpcore/httpcore/_synchronization.py b/contrib/python/httpcore/httpcore/_synchronization.py
index 119d89fc0d..9619a39835 100644
--- a/contrib/python/httpcore/httpcore/_synchronization.py
+++ b/contrib/python/httpcore/httpcore/_synchronization.py
@@ -45,6 +45,13 @@ def current_async_library() -> str:
class AsyncLock:
+ """
+ This is a standard lock.
+
+ In the sync case `Lock` provides thread locking.
+ In the async case `AsyncLock` provides async locking.
+ """
+
def __init__(self) -> None:
self._backend = ""
@@ -82,6 +89,26 @@ class AsyncLock:
self._anyio_lock.release()
+class AsyncThreadLock:
+ """
+ This is a threading-only lock for no-I/O contexts.
+
+ In the sync case `ThreadLock` provides thread locking.
+ In the async case `AsyncThreadLock` is a no-op.
+ """
+
+ def __enter__(self) -> "AsyncThreadLock":
+ return self
+
+ def __exit__(
+ self,
+ exc_type: Optional[Type[BaseException]] = None,
+ exc_value: Optional[BaseException] = None,
+ traceback: Optional[TracebackType] = None,
+ ) -> None:
+ pass
+
+
class AsyncEvent:
def __init__(self) -> None:
self._backend = ""
@@ -202,6 +229,13 @@ class AsyncShieldCancellation:
class Lock:
+ """
+ This is a standard lock.
+
+ In the sync case `Lock` provides thread locking.
+ In the async case `AsyncLock` provides async locking.
+ """
+
def __init__(self) -> None:
self._lock = threading.Lock()
@@ -218,6 +252,30 @@ class Lock:
self._lock.release()
+class ThreadLock:
+ """
+ This is a threading-only lock for no-I/O contexts.
+
+ In the sync case `ThreadLock` provides thread locking.
+ In the async case `AsyncThreadLock` is a no-op.
+ """
+
+ def __init__(self) -> None:
+ self._lock = threading.Lock()
+
+ def __enter__(self) -> "ThreadLock":
+ self._lock.acquire()
+ return self
+
+ def __exit__(
+ self,
+ exc_type: Optional[Type[BaseException]] = None,
+ exc_value: Optional[BaseException] = None,
+ traceback: Optional[TracebackType] = None,
+ ) -> None:
+ self._lock.release()
+
+
class Event:
def __init__(self) -> None:
self._event = threading.Event()
diff --git a/contrib/python/httpcore/ya.make b/contrib/python/httpcore/ya.make
index de7a3ac5a2..e8408ed893 100644
--- a/contrib/python/httpcore/ya.make
+++ b/contrib/python/httpcore/ya.make
@@ -2,7 +2,7 @@
PY3_LIBRARY()
-VERSION(1.0.2)
+VERSION(1.0.3)
LICENSE(BSD-3-Clause)
diff --git a/contrib/python/hypothesis/py3/.dist-info/METADATA b/contrib/python/hypothesis/py3/.dist-info/METADATA
index 04454aaec7..26ae8b8481 100644
--- a/contrib/python/hypothesis/py3/.dist-info/METADATA
+++ b/contrib/python/hypothesis/py3/.dist-info/METADATA
@@ -1,6 +1,6 @@
Metadata-Version: 2.1
Name: hypothesis
-Version: 6.97.4
+Version: 6.98.8
Summary: A library for property-based testing
Home-page: https://hypothesis.works
Author: David R. MacIver and Zac Hatfield-Dodds
@@ -53,7 +53,7 @@ Requires-Dist: pytz >=2014.1 ; extra == 'all'
Requires-Dist: redis >=3.0.0 ; extra == 'all'
Requires-Dist: rich >=9.0.0 ; extra == 'all'
Requires-Dist: backports.zoneinfo >=0.2.1 ; (python_version < "3.9") and extra == 'all'
-Requires-Dist: tzdata >=2023.4 ; (sys_platform == "win32") and extra == 'all'
+Requires-Dist: tzdata >=2024.1 ; (sys_platform == "win32" or sys_platform == "emscripten") and extra == 'all'
Provides-Extra: cli
Requires-Dist: click >=7.0 ; extra == 'cli'
Requires-Dist: black >=19.10b0 ; extra == 'cli'
@@ -82,7 +82,7 @@ Provides-Extra: redis
Requires-Dist: redis >=3.0.0 ; extra == 'redis'
Provides-Extra: zoneinfo
Requires-Dist: backports.zoneinfo >=0.2.1 ; (python_version < "3.9") and extra == 'zoneinfo'
-Requires-Dist: tzdata >=2023.4 ; (sys_platform == "win32") and extra == 'zoneinfo'
+Requires-Dist: tzdata >=2024.1 ; (sys_platform == "win32" or sys_platform == "emscripten") and extra == 'zoneinfo'
==========
Hypothesis
diff --git a/contrib/python/hypothesis/py3/hypothesis/control.py b/contrib/python/hypothesis/py3/hypothesis/control.py
index 92b26069c5..ea085b0b55 100644
--- a/contrib/python/hypothesis/py3/hypothesis/control.py
+++ b/contrib/python/hypothesis/py3/hypothesis/control.py
@@ -10,8 +10,10 @@
import inspect
import math
+import random
from collections import defaultdict
-from typing import NoReturn, Union
+from contextlib import contextmanager
+from typing import Any, NoReturn, Union
from weakref import WeakKeyDictionary
from hypothesis import Verbosity, settings
@@ -19,6 +21,7 @@ from hypothesis._settings import note_deprecation
from hypothesis.errors import InvalidArgument, UnsatisfiedAssumption
from hypothesis.internal.compat import BaseExceptionGroup
from hypothesis.internal.conjecture.data import ConjectureData
+from hypothesis.internal.observability import TESTCASE_CALLBACKS
from hypothesis.internal.reflection import get_pretty_function_description
from hypothesis.internal.validation import check_type
from hypothesis.reporting import report, verbose_report
@@ -26,8 +29,9 @@ from hypothesis.utils.dynamicvariables import DynamicVariable
from hypothesis.vendor.pretty import IDKey, pretty
-def _calling_function_name(frame):
- return frame.f_back.f_code.co_name
+def _calling_function_location(what: str, frame: Any) -> str:
+ where = frame.f_back
+ return f"{what}() in {where.f_code.co_name} (line {where.f_lineno})"
def reject() -> NoReturn:
@@ -37,8 +41,11 @@ def reject() -> NoReturn:
since="2023-09-25",
has_codemod=False,
)
- f = _calling_function_name(inspect.currentframe())
- raise UnsatisfiedAssumption(f"reject() in {f}")
+ where = _calling_function_location("reject", inspect.currentframe())
+ if currently_in_test_context():
+ count = current_build_context().data._observability_predicates[where]
+ count["unsatisfied"] += 1
+ raise UnsatisfiedAssumption(where)
def assume(condition: object) -> bool:
@@ -54,9 +61,13 @@ def assume(condition: object) -> bool:
since="2023-09-25",
has_codemod=False,
)
- if not condition:
- f = _calling_function_name(inspect.currentframe())
- raise UnsatisfiedAssumption(f"failed to satisfy assume() in {f}")
+ if TESTCASE_CALLBACKS or not condition:
+ where = _calling_function_location("assume", inspect.currentframe())
+ if TESTCASE_CALLBACKS and currently_in_test_context():
+ predicates = current_build_context().data._observability_predicates
+ predicates[where]["satisfied" if condition else "unsatisfied"] += 1
+ if not condition:
+ raise UnsatisfiedAssumption(f"failed to satisfy {where}")
return True
@@ -82,6 +93,38 @@ def current_build_context() -> "BuildContext":
return context
+class RandomSeeder:
+ def __init__(self, seed):
+ self.seed = seed
+
+ def __repr__(self):
+ return f"RandomSeeder({self.seed!r})"
+
+
+class _Checker:
+ def __init__(self) -> None:
+ self.saw_global_random = False
+
+ def __call__(self, x):
+ self.saw_global_random |= isinstance(x, RandomSeeder)
+ return x
+
+
+@contextmanager
+def deprecate_random_in_strategy(fmt, *args):
+ _global_rand_state = random.getstate()
+ yield (checker := _Checker())
+ if _global_rand_state != random.getstate() and not checker.saw_global_random:
+ # raise InvalidDefinition
+ note_deprecation(
+ "Do not use the `random` module inside strategies; instead "
+ "consider `st.randoms()`, `st.sampled_from()`, etc. " + fmt.format(*args),
+ since="2024-02-05",
+ has_codemod=False,
+ stacklevel=1,
+ )
+
+
class BuildContext:
def __init__(self, data, *, is_final=False, close_on_capture=True):
assert isinstance(data, ConjectureData)
@@ -110,7 +153,8 @@ class BuildContext:
kwargs = {}
for k, s in kwarg_strategies.items():
start_idx = self.data.index
- obj = self.data.draw(s, observe_as=f"generate:{k}")
+ with deprecate_random_in_strategy("from {}={!r}", k, s) as check:
+ obj = check(self.data.draw(s, observe_as=f"generate:{k}"))
end_idx = self.data.index
kwargs[k] = obj
diff --git a/contrib/python/hypothesis/py3/hypothesis/core.py b/contrib/python/hypothesis/py3/hypothesis/core.py
index 601c33e176..e000da174f 100644
--- a/contrib/python/hypothesis/py3/hypothesis/core.py
+++ b/contrib/python/hypothesis/py3/hypothesis/core.py
@@ -91,6 +91,7 @@ from hypothesis.internal.healthcheck import fail_health_check
from hypothesis.internal.observability import (
OBSERVABILITY_COLLECT_COVERAGE,
TESTCASE_CALLBACKS,
+ _system_metadata,
deliver_json_blob,
make_testcase,
)
@@ -1066,7 +1067,6 @@ class StateForActualGivenExecution:
string_repr=self._string_repr,
arguments={**self._jsonable_arguments, **data._observability_args},
timing=self._timing_features,
- metadata={},
coverage=tractable_coverage_report(trace) or None,
)
deliver_json_blob(tc)
@@ -1195,7 +1195,11 @@ class StateForActualGivenExecution:
},
"timing": self._timing_features,
"coverage": None, # Not recorded when we're replaying the MFE
- "metadata": {"traceback": tb},
+ "metadata": {
+ "traceback": tb,
+ "predicates": ran_example._observability_predicates,
+ **_system_metadata(),
+ },
}
deliver_json_blob(tc)
# Whether or not replay actually raised the exception again, we want
diff --git a/contrib/python/hypothesis/py3/hypothesis/extra/_patching.py b/contrib/python/hypothesis/py3/hypothesis/extra/_patching.py
index 49b15ebc37..8f53076d72 100644
--- a/contrib/python/hypothesis/py3/hypothesis/extra/_patching.py
+++ b/contrib/python/hypothesis/py3/hypothesis/extra/_patching.py
@@ -86,7 +86,7 @@ class AddExamplesCodemod(VisitorBasedCodemodCommand):
# Codemod the failing examples to Call nodes usable as decorators
self.fn_examples = {
- k: tuple(self.__call_node_to_example_dec(ex, via) for ex, via in nodes)
+ k: tuple(d for x in nodes if (d := self.__call_node_to_example_dec(*x)))
for k, nodes in fn_examples.items()
}
@@ -94,16 +94,20 @@ class AddExamplesCodemod(VisitorBasedCodemodCommand):
# If we have black installed, remove trailing comma, _unless_ there's a comment
node = node.with_changes(
func=self.decorator_func,
- args=[
- a.with_changes(
- comma=a.comma
- if m.findall(a.comma, m.Comment())
- else cst.MaybeSentinel.DEFAULT
- )
- for a in node.args
- ]
- if black
- else node.args,
+ args=(
+ [
+ a.with_changes(
+ comma=(
+ a.comma
+ if m.findall(a.comma, m.Comment())
+ else cst.MaybeSentinel.DEFAULT
+ )
+ )
+ for a in node.args
+ ]
+ if black
+ else node.args
+ ),
)
# Note: calling a method on a decorator requires PEP-614, i.e. Python 3.9+,
# but plumbing two cases through doesn't seem worth the trouble :-/
@@ -112,10 +116,13 @@ class AddExamplesCodemod(VisitorBasedCodemodCommand):
args=[cst.Arg(cst.SimpleString(repr(via)))],
)
if black: # pragma: no branch
- pretty = black.format_str(
- cst.Module([]).code_for_node(via),
- mode=black.FileMode(line_length=self.line_length),
- )
+ try:
+ pretty = black.format_str(
+ cst.Module([]).code_for_node(via),
+ mode=black.FileMode(line_length=self.line_length),
+ )
+ except ImportError:
+ return None # See https://github.com/psf/black/pull/4224
via = cst.parse_expression(pretty.strip())
return cst.Decorator(via)
diff --git a/contrib/python/hypothesis/py3/hypothesis/extra/codemods.py b/contrib/python/hypothesis/py3/hypothesis/extra/codemods.py
index b2828c31c3..3de0580ada 100644
--- a/contrib/python/hypothesis/py3/hypothesis/extra/codemods.py
+++ b/contrib/python/hypothesis/py3/hypothesis/extra/codemods.py
@@ -218,9 +218,11 @@ class HypothesisFixPositionalKeywonlyArgs(VisitorBasedCodemodCommand):
whitespace_after=cst.SimpleWhitespace(""),
)
newargs = [
- arg
- if arg.keyword or arg.star or p.kind is not Parameter.KEYWORD_ONLY
- else arg.with_changes(keyword=cst.Name(p.name), equal=assign_nospace)
+ (
+ arg
+ if arg.keyword or arg.star or p.kind is not Parameter.KEYWORD_ONLY
+ else arg.with_changes(keyword=cst.Name(p.name), equal=assign_nospace)
+ )
for p, arg in zip(params, updated_node.args)
]
return updated_node.with_changes(args=newargs)
diff --git a/contrib/python/hypothesis/py3/hypothesis/extra/ghostwriter.py b/contrib/python/hypothesis/py3/hypothesis/extra/ghostwriter.py
index 68e6a85c29..8917d5bd87 100644
--- a/contrib/python/hypothesis/py3/hypothesis/extra/ghostwriter.py
+++ b/contrib/python/hypothesis/py3/hypothesis/extra/ghostwriter.py
@@ -482,7 +482,6 @@ def _get_params(func: Callable) -> Dict[str, inspect.Parameter]:
kind = inspect.Parameter.KEYWORD_ONLY
continue # we omit *varargs, if there are any
if _iskeyword(arg.lstrip("*")) or not arg.lstrip("*").isidentifier():
- print(repr(args))
break # skip all subsequent params if this name is invalid
params.append(inspect.Parameter(name=arg, kind=kind))
@@ -588,6 +587,8 @@ def _imports_for_object(obj):
"""Return the imports for `obj`, which may be empty for e.g. lambdas"""
if isinstance(obj, (re.Pattern, re.Match)):
return {"re"}
+ if isinstance(obj, st.SearchStrategy):
+ return _imports_for_strategy(obj)
try:
if is_generic_type(obj):
if isinstance(obj, TypeVar):
@@ -606,19 +607,19 @@ def _imports_for_strategy(strategy):
# If we have a lazy from_type strategy, because unwrapping it gives us an
# error or invalid syntax, import that type and we're done.
if isinstance(strategy, LazyStrategy):
- if strategy.function.__name__ in (
- st.from_type.__name__,
- st.from_regex.__name__,
- ):
- return {
- imp
- for arg in set(strategy._LazyStrategy__args)
- | set(strategy._LazyStrategy__kwargs.values())
- for imp in _imports_for_object(arg)
- }
+ imports = {
+ imp
+ for arg in set(strategy._LazyStrategy__args)
+ | set(strategy._LazyStrategy__kwargs.values())
+ for imp in _imports_for_object(_strip_typevars(arg))
+ }
+ if re.match(r"from_(type|regex)\(", repr(strategy)):
+ if repr(strategy).startswith("from_type("):
+ return {module for module, _ in imports}
+ return imports
elif _get_module(strategy.function).startswith("hypothesis.extra."):
module = _get_module(strategy.function).replace("._array_helpers", ".numpy")
- return {(module, strategy.function.__name__)}
+ return {(module, strategy.function.__name__)} | imports
imports = set()
with warnings.catch_warnings():
@@ -672,6 +673,9 @@ def _valid_syntax_repr(strategy):
if isinstance(strategy, OneOfStrategy):
seen = set()
elems = []
+ with warnings.catch_warnings():
+ warnings.simplefilter("ignore", SmallSearchSpaceWarning)
+ strategy.element_strategies # might warn on first access
for s in strategy.element_strategies:
if isinstance(s, SampledFromStrategy) and s.elements == (os.environ,):
continue
@@ -694,7 +698,11 @@ def _valid_syntax_repr(strategy):
# Return a syntactically-valid strategy repr, including fixing some
# strategy reprs and replacing invalid syntax reprs with `"nothing()"`.
# String-replace to hide the special case in from_type() for Decimal('snan')
- r = repr(strategy).replace(".filter(_can_hash)", "")
+ r = (
+ repr(strategy)
+ .replace(".filter(_can_hash)", "")
+ .replace("hypothesis.strategies.", "")
+ )
# Replace <unknown> with ... in confusing lambdas
r = re.sub(r"(lambda.*?: )(<unknown>)([,)])", r"\1...\3", r)
compile(r, "<string>", "eval")
@@ -724,9 +732,10 @@ def _get_module_helper(obj):
dots = [i for i, c in enumerate(module_name) if c == "."] + [None]
for idx in dots:
- if getattr(sys.modules.get(module_name[:idx]), obj.__name__, None) is obj:
- KNOWN_FUNCTION_LOCATIONS[obj] = module_name[:idx]
- return module_name[:idx]
+ for candidate in (module_name[:idx].lstrip("_"), module_name[:idx]):
+ if getattr(sys.modules.get(candidate), obj.__name__, None) is obj:
+ KNOWN_FUNCTION_LOCATIONS[obj] = candidate
+ return candidate
return module_name
@@ -755,7 +764,7 @@ def _get_qualname(obj, *, include_module=False):
def _write_call(
- func: Callable, *pass_variables: str, except_: Except, assign: str = ""
+ func: Callable, *pass_variables: str, except_: Except = Exception, assign: str = ""
) -> str:
"""Write a call to `func` with explicit and implicit arguments.
@@ -770,9 +779,11 @@ def _write_call(
subtypes of `except_`, which will be handled in an outer try-except block.
"""
args = ", ".join(
- (v or p.name)
- if p.kind is inspect.Parameter.POSITIONAL_ONLY
- else f"{p.name}={v or p.name}"
+ (
+ (v or p.name)
+ if p.kind is inspect.Parameter.POSITIONAL_ONLY
+ else f"{p.name}={v or p.name}"
+ )
for v, p in zip_longest(pass_variables, _get_params(func).values())
)
call = f"{_get_qualname(func, include_module=True)}({args})"
@@ -998,6 +1009,9 @@ def _parameter_to_annotation(parameter: Any) -> Optional[_AnnotationData]:
else:
type_name = str(parameter)
+ if type_name.startswith("hypothesis.strategies."):
+ return _AnnotationData(type_name.replace("hypothesis.strategies", "st"), set())
+
origin_type = get_origin(parameter)
# if not generic or no generic arguments
@@ -1043,9 +1057,6 @@ def _make_test(imports: ImportSet, body: str) -> str:
# Discarding "builtins." and "__main__" probably isn't particularly useful
# for user code, but important for making a good impression in demos.
body = body.replace("builtins.", "").replace("__main__.", "")
- body = body.replace("hypothesis.strategies.", "st.")
- if "st.from_type(typing." in body:
- imports.add("typing")
imports |= {("hypothesis", "given"), ("hypothesis", "strategies as st")}
if " reject()\n" in body:
imports.add(("hypothesis", "reject"))
@@ -1258,11 +1269,29 @@ def magic(
hints = get_type_hints(func)
hints.pop("return", None)
params = _get_params(func)
- if len(hints) == len(params) == 2:
- a, b = hints.values()
+ if (len(hints) == len(params) == 2) or (
+ _get_module(func) == "operator"
+ and "item" not in func.__name__
+ and tuple(params) in [("a", "b"), ("x", "y")]
+ ):
+ a, b = hints.values() or [Any, Any]
arg1, arg2 = params
if a == b and len(arg1) == len(arg2) <= 3:
- make_(_make_binop_body, func, annotate=annotate)
+ # https://en.wikipedia.org/wiki/Distributive_property#Other_examples
+ known = {
+ "mul": "add",
+ "matmul": "add",
+ "or_": "and_",
+ "and_": "or_",
+ }.get(func.__name__, "")
+ distributes_over = getattr(sys.modules[_get_module(func)], known, None)
+ make_(
+ _make_binop_body,
+ func,
+ commutative=func.__name__ != "matmul",
+ distributes_over=distributes_over,
+ annotate=annotate,
+ )
del by_name[name]
# Look for Numpy ufuncs or gufuncs, and write array-oriented tests for them.
@@ -1467,10 +1496,17 @@ def roundtrip(
return _make_test(*_make_roundtrip_body(funcs, except_, style, annotate))
-def _make_equiv_body(funcs, except_, style, annotate):
+def _get_varnames(funcs):
var_names = [f"result_{f.__name__}" for f in funcs]
if len(set(var_names)) < len(var_names):
- var_names = [f"result_{i}_{ f.__name__}" for i, f in enumerate(funcs)]
+ var_names = [f"result_{f.__name__}_{_get_module(f)}" for f in funcs]
+ if len(set(var_names)) < len(var_names):
+ var_names = [f"result_{i}_{f.__name__}" for i, f in enumerate(funcs)]
+ return var_names
+
+
+def _make_equiv_body(funcs, except_, style, annotate):
+ var_names = _get_varnames(funcs)
test_lines = [
_write_call(f, assign=vname, except_=except_)
for vname, f in zip(var_names, funcs)
@@ -1510,10 +1546,7 @@ else:
def _make_equiv_errors_body(funcs, except_, style, annotate):
- var_names = [f"result_{f.__name__}" for f in funcs]
- if len(set(var_names)) < len(var_names):
- var_names = [f"result_{i}_{ f.__name__}" for i, f in enumerate(funcs)]
-
+ var_names = _get_varnames(funcs)
first, *rest = funcs
first_call = _write_call(first, assign=var_names[0], except_=except_)
extra_imports, suppress = _exception_string(except_)
@@ -1713,18 +1746,11 @@ def _make_binop_body(
maker(
"associative",
"abc",
+ _write_call(func, "a", _write_call(func, "b", "c"), assign="left"),
_write_call(
func,
- "a",
- _write_call(func, "b", "c", except_=Exception),
- except_=Exception,
- assign="left",
- ),
- _write_call(
- func,
- _write_call(func, "a", "b", except_=Exception),
+ _write_call(func, "a", "b"),
"c",
- except_=Exception,
assign="right",
),
)
@@ -1732,8 +1758,8 @@ def _make_binop_body(
maker(
"commutative",
"ab",
- _write_call(func, "a", "b", except_=Exception, assign="left"),
- _write_call(func, "b", "a", except_=Exception, assign="right"),
+ _write_call(func, "a", "b", assign="left"),
+ _write_call(func, "b", "a", assign="right"),
)
if identity is not None:
# Guess that the identity element is the minimal example from our operands
@@ -1755,34 +1781,42 @@ def _make_binop_body(
compile(repr(identity), "<string>", "exec")
except SyntaxError:
identity = repr(identity) # type: ignore
- maker(
- "identity",
- "a",
+ identity_parts = [
+ f"{identity = }",
_assert_eq(
style,
"a",
- _write_call(func, "a", repr(identity), except_=Exception),
+ _write_call(func, "a", "identity"),
),
- )
+ _assert_eq(
+ style,
+ "a",
+ _write_call(func, "identity", "a"),
+ ),
+ ]
+ maker("identity", "a", "\n".join(identity_parts))
if distributes_over:
- maker(
- distributes_over.__name__ + "_distributes_over",
- "abc",
+ do = distributes_over
+ dist_parts = [
+ _write_call(func, "a", _write_call(do, "b", "c"), assign="left"),
_write_call(
- distributes_over,
- _write_call(func, "a", "b", except_=Exception),
- _write_call(func, "a", "c", except_=Exception),
- except_=Exception,
- assign="left",
+ do,
+ _write_call(func, "a", "b"),
+ _write_call(func, "a", "c"),
+ assign="ldist",
),
+ _assert_eq(style, "ldist", "left"),
+ "\n",
+ _write_call(func, _write_call(do, "a", "b"), "c", assign="right"),
_write_call(
- func,
- "a",
- _write_call(distributes_over, "b", "c", except_=Exception),
- except_=Exception,
- assign="right",
+ do,
+ _write_call(func, "a", "c"),
+ _write_call(func, "b", "c"),
+ assign="rdist",
),
- )
+ _assert_eq(style, "rdist", "right"),
+ ]
+ maker(do.__name__ + "_distributes_over", "abc", "\n".join(dist_parts))
_, operands_repr = _valid_syntax_repr(operands)
operands_repr = _st_strategy_names(operands_repr)
diff --git a/contrib/python/hypothesis/py3/hypothesis/extra/pandas/impl.py b/contrib/python/hypothesis/py3/hypothesis/extra/pandas/impl.py
index 4801b1ad46..7b53a8be42 100644
--- a/contrib/python/hypothesis/py3/hypothesis/extra/pandas/impl.py
+++ b/contrib/python/hypothesis/py3/hypothesis/extra/pandas/impl.py
@@ -346,9 +346,11 @@ def series(
return pandas.Series(
(),
index=index,
- dtype=dtype
- if dtype is not None
- else draw(dtype_for_elements_strategy(elements)),
+ dtype=(
+ dtype
+ if dtype is not None
+ else draw(dtype_for_elements_strategy(elements))
+ ),
name=draw(name),
)
diff --git a/contrib/python/hypothesis/py3/hypothesis/internal/conjecture/data.py b/contrib/python/hypothesis/py3/hypothesis/internal/conjecture/data.py
index 8b8462d24d..cea40823be 100644
--- a/contrib/python/hypothesis/py3/hypothesis/internal/conjecture/data.py
+++ b/contrib/python/hypothesis/py3/hypothesis/internal/conjecture/data.py
@@ -30,6 +30,7 @@ from typing import (
Set,
Tuple,
Type,
+ TypedDict,
TypeVar,
Union,
)
@@ -163,6 +164,8 @@ NASTY_FLOATS.extend([-x for x in NASTY_FLOATS])
FLOAT_INIT_LOGIC_CACHE = LRUReusedCache(4096)
+DRAW_STRING_DEFAULT_MAX_SIZE = 10**10 # "arbitrarily large"
+
class Example:
"""Examples track the hierarchical structure of draws from the byte stream,
@@ -794,6 +797,34 @@ global_test_counter = 0
MAX_DEPTH = 100
+class IntegerKWargs(TypedDict):
+ min_value: Optional[int]
+ max_value: Optional[int]
+ weights: Optional[Sequence[float]]
+ shrink_towards: int
+
+
+class FloatKWargs(TypedDict):
+ min_value: float
+ max_value: float
+ allow_nan: bool
+ smallest_nonzero_magnitude: float
+
+
+class StringKWargs(TypedDict):
+ intervals: IntervalSet
+ min_size: int
+ max_size: Optional[int]
+
+
+class BytesKWargs(TypedDict):
+ size: int
+
+
+class BooleanKWargs(TypedDict):
+ p: float
+
+
class DataObserver:
"""Observer class for recording the behaviour of a
ConjectureData object, primarily used for tracking
@@ -810,18 +841,34 @@ class DataObserver:
Note that this is called after ``freeze`` has completed.
"""
- def draw_bits(self, n_bits: int, *, forced: bool, value: int) -> None:
- """Called when ``draw_bits`` is called on on the
- observed ``ConjectureData``.
- * ``n_bits`` is the number of bits drawn.
- * ``forced`` is True if the corresponding
- draw was forced or ``False`` otherwise.
- * ``value`` is the result that ``draw_bits`` returned.
- """
-
def kill_branch(self) -> None:
"""Mark this part of the tree as not worth re-exploring."""
+ def draw_integer(
+ self, value: int, *, was_forced: bool, kwargs: IntegerKWargs
+ ) -> None:
+ pass
+
+ def draw_float(
+ self, value: float, *, was_forced: bool, kwargs: FloatKWargs
+ ) -> None:
+ pass
+
+ def draw_string(
+ self, value: str, *, was_forced: bool, kwargs: StringKWargs
+ ) -> None:
+ pass
+
+ def draw_bytes(
+ self, value: bytes, *, was_forced: bool, kwargs: BytesKWargs
+ ) -> None:
+ pass
+
+ def draw_boolean(
+ self, value: bool, *, was_forced: bool, kwargs: BooleanKWargs
+ ) -> None:
+ pass
+
@dataclass_transform()
@attr.s(slots=True)
@@ -995,7 +1042,7 @@ class PrimitiveProvider:
assert min_value is not None
assert max_value is not None
- sampler = Sampler(weights)
+ sampler = Sampler(weights, observe=False)
gap = max_value - shrink_towards
forced_idx = None
@@ -1023,7 +1070,7 @@ class PrimitiveProvider:
probe = shrink_towards + self._draw_unbounded_integer(
forced=None if forced is None else forced - shrink_towards
)
- self._cd.stop_example(discard=max_value < probe)
+ self._cd.stop_example()
return probe
if max_value is None:
@@ -1034,7 +1081,7 @@ class PrimitiveProvider:
probe = shrink_towards + self._draw_unbounded_integer(
forced=None if forced is None else forced - shrink_towards
)
- self._cd.stop_example(discard=probe < min_value)
+ self._cd.stop_example()
return probe
return self._draw_bounded_integer(
@@ -1091,7 +1138,7 @@ class PrimitiveProvider:
assert pos_clamper is not None
clamped = pos_clamper(result)
if clamped != result and not (math.isnan(result) and allow_nan):
- self._cd.stop_example(discard=True)
+ self._cd.stop_example()
self._cd.start_example(DRAW_FLOAT_LABEL)
self._draw_float(forced=clamped)
result = clamped
@@ -1113,7 +1160,7 @@ class PrimitiveProvider:
forced: Optional[str] = None,
) -> str:
if max_size is None:
- max_size = 10**10 # "arbitrarily large"
+ max_size = DRAW_STRING_DEFAULT_MAX_SIZE
assert forced is None or min_size <= len(forced) <= max_size
@@ -1129,6 +1176,7 @@ class PrimitiveProvider:
max_size=max_size,
average_size=average_size,
forced=None if forced is None else len(forced),
+ observe=False,
)
while elements.more():
forced_i: Optional[int] = None
@@ -1264,7 +1312,7 @@ class PrimitiveProvider:
probe = self._cd.draw_bits(
bits, forced=None if forced is None else abs(forced - center)
)
- self._cd.stop_example(discard=probe > gap)
+ self._cd.stop_example()
if above:
result = center + probe
@@ -1356,7 +1404,7 @@ class PrimitiveProvider:
]
nasty_floats = [f for f in NASTY_FLOATS + boundary_values if permitted(f)]
weights = [0.2 * len(nasty_floats)] + [0.8] * len(nasty_floats)
- sampler = Sampler(weights) if nasty_floats else None
+ sampler = Sampler(weights, observe=False) if nasty_floats else None
pos_clamper = neg_clamper = None
if sign_aware_lte(0.0, max_value):
@@ -1450,6 +1498,9 @@ class ConjectureData:
self.arg_slices: Set[Tuple[int, int]] = set()
self.slice_comments: Dict[Tuple[int, int], str] = {}
self._observability_args: Dict[str, Any] = {}
+ self._observability_predicates: defaultdict = defaultdict(
+ lambda: {"satisfied": 0, "unsatisfied": 0}
+ )
self.extra_information = ExtraInformation()
@@ -1462,6 +1513,19 @@ class ConjectureData:
", frozen" if self.frozen else "",
)
+ # A bit of explanation of the `observe` argument in our draw_* functions.
+ #
+ # There are two types of draws: sub-ir and super-ir. For instance, some ir
+ # nodes use `many`, which in turn calls draw_boolean. But some strategies
+ # also use many, at the super-ir level. We don't want to write sub-ir draws
+ # to the DataTree (and consequently use them when computing novel prefixes),
+ # since they are fully recorded by writing the ir node itself.
+ # But super-ir draws are not included in the ir node, so we do want to write
+ # these to the tree.
+ #
+ # `observe` formalizes this distinction. The draw will only be written to
+ # the DataTree if observe is True.
+
def draw_integer(
self,
min_value: Optional[int] = None,
@@ -1471,6 +1535,7 @@ class ConjectureData:
weights: Optional[Sequence[float]] = None,
shrink_towards: int = 0,
forced: Optional[int] = None,
+ observe: bool = True,
) -> int:
# Validate arguments
if weights is not None:
@@ -1491,13 +1556,18 @@ class ConjectureData:
if forced is not None and max_value is not None:
assert forced <= max_value
- return self.provider.draw_integer(
- min_value=min_value,
- max_value=max_value,
- weights=weights,
- shrink_towards=shrink_towards,
- forced=forced,
- )
+ kwargs: IntegerKWargs = {
+ "min_value": min_value,
+ "max_value": max_value,
+ "weights": weights,
+ "shrink_towards": shrink_towards,
+ }
+ value = self.provider.draw_integer(**kwargs, forced=forced)
+ if observe:
+ self.observer.draw_integer(
+ value, was_forced=forced is not None, kwargs=kwargs
+ )
+ return value
def draw_float(
self,
@@ -1511,6 +1581,7 @@ class ConjectureData:
# width: Literal[16, 32, 64] = 64,
# exclude_min and exclude_max handled higher up,
forced: Optional[float] = None,
+ observe: bool = True,
) -> float:
assert smallest_nonzero_magnitude > 0
assert not math.isnan(min_value)
@@ -1518,15 +1589,22 @@ class ConjectureData:
if forced is not None:
assert allow_nan or not math.isnan(forced)
- assert math.isnan(forced) or min_value <= forced <= max_value
+ assert math.isnan(forced) or (
+ sign_aware_lte(min_value, forced) and sign_aware_lte(forced, max_value)
+ )
- return self.provider.draw_float(
- min_value=min_value,
- max_value=max_value,
- allow_nan=allow_nan,
- smallest_nonzero_magnitude=smallest_nonzero_magnitude,
- forced=forced,
- )
+ kwargs: FloatKWargs = {
+ "min_value": min_value,
+ "max_value": max_value,
+ "allow_nan": allow_nan,
+ "smallest_nonzero_magnitude": smallest_nonzero_magnitude,
+ }
+ value = self.provider.draw_float(**kwargs, forced=forced)
+ if observe:
+ self.observer.draw_float(
+ value, kwargs=kwargs, was_forced=forced is not None
+ )
+ return value
def draw_string(
self,
@@ -1535,19 +1613,44 @@ class ConjectureData:
min_size: int = 0,
max_size: Optional[int] = None,
forced: Optional[str] = None,
+ observe: bool = True,
) -> str:
assert forced is None or min_size <= len(forced)
- return self.provider.draw_string(
- intervals, min_size=min_size, max_size=max_size, forced=forced
- )
- def draw_bytes(self, size: int, *, forced: Optional[bytes] = None) -> bytes:
+ kwargs: StringKWargs = {
+ "intervals": intervals,
+ "min_size": min_size,
+ "max_size": max_size,
+ }
+ value = self.provider.draw_string(**kwargs, forced=forced)
+ if observe:
+ self.observer.draw_string(
+ value, kwargs=kwargs, was_forced=forced is not None
+ )
+ return value
+
+ def draw_bytes(
+ self,
+ # TODO move to min_size and max_size here.
+ size: int,
+ *,
+ forced: Optional[bytes] = None,
+ observe: bool = True,
+ ) -> bytes:
assert forced is None or len(forced) == size
assert size >= 0
- return self.provider.draw_bytes(size, forced=forced)
+ kwargs: BytesKWargs = {"size": size}
+ value = self.provider.draw_bytes(**kwargs, forced=forced)
+ if observe:
+ self.observer.draw_bytes(
+ value, kwargs=kwargs, was_forced=forced is not None
+ )
+ return value
- def draw_boolean(self, p: float = 0.5, *, forced: Optional[bool] = None) -> bool:
+ def draw_boolean(
+ self, p: float = 0.5, *, forced: Optional[bool] = None, observe: bool = True
+ ) -> bool:
# Internally, we treat probabilities lower than 1 / 2**64 as
# unconditionally false.
#
@@ -1558,7 +1661,13 @@ class ConjectureData:
if forced is False:
assert p < (1 - 2 ** (-64))
- return self.provider.draw_boolean(p, forced=forced)
+ kwargs: BooleanKWargs = {"p": p}
+ value = self.provider.draw_boolean(**kwargs, forced=forced)
+ if observe:
+ self.observer.draw_boolean(
+ value, kwargs=kwargs, was_forced=forced is not None
+ )
+ return value
def as_result(self) -> Union[ConjectureResult, _Overrun]:
"""Convert the result of running this test into
@@ -1575,9 +1684,11 @@ class ConjectureData:
examples=self.examples,
blocks=self.blocks,
output=self.output,
- extra_information=self.extra_information
- if self.extra_information.has_information()
- else None,
+ extra_information=(
+ self.extra_information
+ if self.extra_information.has_information()
+ else None
+ ),
has_discards=self.has_discards,
target_observations=self.target_observations,
tags=frozenset(self.tags),
@@ -1730,9 +1841,15 @@ class ConjectureData:
self.buffer = bytes(self.buffer)
self.observer.conclude_test(self.status, self.interesting_origin)
- def choice(self, values: Sequence[T], *, forced: Optional[T] = None) -> T:
+ def choice(
+ self,
+ values: Sequence[T],
+ *,
+ forced: Optional[T] = None,
+ observe: bool = True,
+ ) -> T:
forced_i = None if forced is None else values.index(forced)
- i = self.draw_integer(0, len(values) - 1, forced=forced_i)
+ i = self.draw_integer(0, len(values) - 1, forced=forced_i, observe=observe)
return values[i]
def draw_bits(self, n: int, *, forced: Optional[int] = None) -> int:
@@ -1769,7 +1886,6 @@ class ConjectureData:
buf = bytes(buf)
result = int_from_bytes(buf)
- self.observer.draw_bits(n, forced=forced is not None, value=result)
self.__example_record.draw_bits(n, forced)
initial = self.index
diff --git a/contrib/python/hypothesis/py3/hypothesis/internal/conjecture/datatree.py b/contrib/python/hypothesis/py3/hypothesis/internal/conjecture/datatree.py
index d82ed3ca67..a9a6e5b196 100644
--- a/contrib/python/hypothesis/py3/hypothesis/internal/conjecture/datatree.py
+++ b/contrib/python/hypothesis/py3/hypothesis/internal/conjecture/datatree.py
@@ -8,17 +8,38 @@
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at https://mozilla.org/MPL/2.0/.
+import itertools
+import math
+from typing import TYPE_CHECKING, List, Literal, Optional, Union
+
import attr
from hypothesis.errors import Flaky, HypothesisException, StopTest
+from hypothesis.internal import floats as flt
from hypothesis.internal.compat import int_to_bytes
from hypothesis.internal.conjecture.data import (
+ BooleanKWargs,
+ BytesKWargs,
ConjectureData,
DataObserver,
+ FloatKWargs,
+ IntegerKWargs,
Status,
- bits_to_bytes,
+ StringKWargs,
)
-from hypothesis.internal.conjecture.junkdrawer import IntList
+from hypothesis.internal.floats import count_between_floats, float_to_int, int_to_float
+
+if TYPE_CHECKING:
+ from typing import TypeAlias
+else:
+ TypeAlias = object
+
+IRType: TypeAlias = Union[int, str, bool, float, bytes]
+IRKWargsType: TypeAlias = Union[
+ IntegerKWargs, FloatKWargs, StringKWargs, BytesKWargs, BooleanKWargs
+]
+# this would be "IRTypeType", but that's just confusing.
+IRLiteralType: TypeAlias = Literal["integer", "string", "boolean", "float", "bytes"]
class PreviouslyUnseenBehaviour(HypothesisException):
@@ -51,12 +72,15 @@ class Branch:
"""Represents a transition where multiple choices can be made as to what
to drawn."""
- bit_length = attr.ib()
+ kwargs = attr.ib()
+ ir_type = attr.ib()
children = attr.ib(repr=False)
@property
def max_children(self):
- return 1 << self.bit_length
+ max_children = compute_max_children(self.ir_type, self.kwargs)
+ assert max_children > 0
+ return max_children
@attr.s(slots=True, frozen=True)
@@ -67,61 +91,276 @@ class Conclusion:
interesting_origin = attr.ib()
+# The number of max children where, beyond this, it is practically impossible
+# for hypothesis to saturate / explore all children nodes in a reasonable time
+# frame. We use this to bail out of expensive max children computations early,
+# where the numbers involved are so large that we know they will be larger than
+# this number.
+#
+# Note that it's ok for us to underestimate the number of max children of a node
+# by using this. We just may think the node is exhausted when in fact it has more
+# possible children to be explored. This has the potential to finish generation
+# early due to exhausting the entire tree, but that is quite unlikely: (1) the
+# number of examples would have to be quite high, and (2) the tree would have to
+# contain only one or two nodes, or generate_novel_prefix would simply switch to
+# exploring another non-exhausted node.
+#
+# Also note that we may sometimes compute max children above this value. In other
+# words, this is *not* a hard maximum on the computed max children. It's the point
+# where further computation is not beneficial - but sometimes doing that computation
+# unconditionally is cheaper than estimating against this value.
+#
+# The one case where this may be detrimental is fuzzing, where the throughput of
+# examples is so high that it really may saturate important nodes. We'll cross
+# that bridge when we come to it.
+MAX_CHILDREN_EFFECTIVELY_INFINITE = 100_000
+
+
+def compute_max_children(ir_type, kwargs):
+ from hypothesis.internal.conjecture.data import DRAW_STRING_DEFAULT_MAX_SIZE
+
+ if ir_type == "integer":
+ min_value = kwargs["min_value"]
+ max_value = kwargs["max_value"]
+ weights = kwargs["weights"]
+
+ if min_value is None and max_value is None:
+ # full 128 bit range.
+ return 2**128 - 1
+ if min_value is not None and max_value is not None:
+ # count between min/max value.
+ n = max_value - min_value + 1
+ # remove any values with a zero probability of being drawn (weight=0).
+ if weights is not None:
+ n -= sum(weight == 0 for weight in weights)
+ return n
+
+ # hard case: only one bound was specified. Here we probe either upwards
+ # or downwards with our full 128 bit generation, but only half of these
+ # (plus one for the case of generating zero) result in a probe in the
+ # direction we want. ((2**128 - 1) // 2) + 1 == 2 ** 127
+ assert (min_value is None) ^ (max_value is None)
+ return 2**127
+ elif ir_type == "boolean":
+ p = kwargs["p"]
+ # probabilities of 0 or 1 (or effectively 0 or 1) only have one choice.
+ if p <= 2 ** (-64) or p >= (1 - 2 ** (-64)):
+ return 1
+ return 2
+ elif ir_type == "bytes":
+ return 2 ** (8 * kwargs["size"])
+ elif ir_type == "string":
+ min_size = kwargs["min_size"]
+ max_size = kwargs["max_size"]
+ intervals = kwargs["intervals"]
+
+ if max_size is None:
+ max_size = DRAW_STRING_DEFAULT_MAX_SIZE
+
+ if len(intervals) == 0:
+ # Special-case the empty alphabet to avoid an error in math.log(0).
+ # Only possibility is the empty string.
+ return 1
+
+ # We want to estimate if we're going to have more children than
+ # MAX_CHILDREN_EFFECTIVELY_INFINITE, without computing a potentially
+ # extremely expensive pow. We'll check if the number of strings in
+ # the largest string size alone is enough to put us over this limit.
+ # We'll also employ a trick of estimating against log, which is cheaper
+ # than computing a pow.
+ #
+ # x = max_size
+ # y = len(intervals)
+ # n = MAX_CHILDREN_EFFECTIVELY_INFINITE
+ #
+ # x**y > n
+ # <=> log(x**y) > log(n)
+ # <=> y * log(x) > log(n)
+
+ # avoid math.log(1) == 0 and incorrectly failing the below estimate,
+ # even when we definitely are too large.
+ if len(intervals) == 1:
+ definitely_too_large = max_size > MAX_CHILDREN_EFFECTIVELY_INFINITE
+ else:
+ definitely_too_large = max_size * math.log(len(intervals)) > math.log(
+ MAX_CHILDREN_EFFECTIVELY_INFINITE
+ )
+
+ if definitely_too_large:
+ return MAX_CHILDREN_EFFECTIVELY_INFINITE
+
+ # number of strings of length k, for each k in [min_size, max_size].
+ return sum(len(intervals) ** k for k in range(min_size, max_size + 1))
+
+ elif ir_type == "float":
+ return count_between_floats(kwargs["min_value"], kwargs["max_value"])
+
+ raise NotImplementedError(f"unhandled ir_type {ir_type}")
+
+
+# In theory, this is a strict superset of the functionality of compute_max_children;
+#
+# assert len(all_children(ir_type, kwargs)) == compute_max_children(ir_type, kwargs)
+#
+# In practice, we maintain two distinct implementations for efficiency and space
+# reasons. If you just need the number of children, it is cheaper to use
+# compute_max_children than to reify the list of children (only to immediately
+# throw it away).
+def all_children(ir_type, kwargs):
+ if ir_type == "integer":
+ min_value = kwargs["min_value"]
+ max_value = kwargs["max_value"]
+ weights = kwargs["weights"]
+ # it's a bit annoying (but completely feasible) to implement the cases
+ # other than "both sides bounded" here. We haven't needed to yet because
+ # in practice we don't struggle with unbounded integer generation.
+ assert min_value is not None
+ assert max_value is not None
+
+ if weights is None:
+ yield from range(min_value, max_value + 1)
+ else:
+ # skip any values with a corresponding weight of 0 (can never be drawn).
+ for weight, n in zip(weights, range(min_value, max_value + 1)):
+ if weight == 0:
+ continue
+ yield n
+
+ if ir_type == "boolean":
+ p = kwargs["p"]
+ if p <= 2 ** (-64):
+ yield False
+ elif p >= (1 - 2 ** (-64)):
+ yield True
+ else:
+ yield from [False, True]
+ if ir_type == "bytes":
+ size = kwargs["size"]
+ yield from (int_to_bytes(i, size) for i in range(2 ** (8 * size)))
+ if ir_type == "string":
+ min_size = kwargs["min_size"]
+ max_size = kwargs["max_size"]
+ intervals = kwargs["intervals"]
+
+ # written unidiomatically in order to handle the case of max_size=inf.
+ size = min_size
+ while size <= max_size:
+ for ords in itertools.product(intervals, repeat=size):
+ yield "".join(chr(n) for n in ords)
+ size += 1
+ if ir_type == "float":
+
+ def floats_between(a, b):
+ for n in range(float_to_int(a), float_to_int(b) + 1):
+ yield int_to_float(n)
+
+ min_value = kwargs["min_value"]
+ max_value = kwargs["max_value"]
+
+ if flt.is_negative(min_value):
+ if flt.is_negative(max_value):
+ # if both are negative, have to invert order
+ yield from floats_between(max_value, min_value)
+ else:
+ yield from floats_between(-0.0, min_value)
+ yield from floats_between(0.0, max_value)
+ else:
+ yield from floats_between(min_value, max_value)
+
+
@attr.s(slots=True)
class TreeNode:
- """Node in a tree that corresponds to previous interactions with
- a ``ConjectureData`` object according to some fixed test function.
-
- This is functionally a variant patricia trie.
- See https://en.wikipedia.org/wiki/Radix_tree for the general idea,
- but what this means in particular here is that we have a very deep
- but very lightly branching tree and rather than store this as a fully
- recursive structure we flatten prefixes and long branches into
- lists. This significantly compacts the storage requirements.
-
- A single ``TreeNode`` corresponds to a previously seen sequence
- of calls to ``ConjectureData`` which we have never seen branch,
- followed by a ``transition`` which describes what happens next.
"""
+ A node, or collection of directly descended nodes, in a DataTree.
+
+ We store the DataTree as a radix tree (https://en.wikipedia.org/wiki/Radix_tree),
+ which means that nodes that are the only child of their parent are collapsed
+ into their parent to save space.
+
+ Conceptually, you can unfold a single TreeNode storing n values in its lists
+ into a sequence of n nodes, each a child of the last. In other words,
+ (kwargs[i], values[i], ir_types[i]) corresponds to the single node at index
+ i.
+
+ Note that if a TreeNode represents a choice (i.e. the nodes cannot be compacted
+ via the radix tree definition), then its lists will be empty and it will
+ store a `Branch` representing that choce in its `transition`.
+
+ Examples
+ --------
+
+ Consider sequentially drawing a boolean, then an integer.
+
+ data.draw_boolean()
+ data.draw_integer(1, 3)
+
+ If we draw True and then 2, the tree may conceptually look like this.
+
+ ┌──────┐
+ │ root │
+ └──┬───┘
+ ┌──┴───┐
+ │ True │
+ └──┬───┘
+ ┌──┴───┐
+ │ 2 │
+ └──────┘
+
+ But since 2 is the only child of True, we will compact these nodes and store
+ them as a single TreeNode.
+
+ ┌──────┐
+ │ root │
+ └──┬───┘
+ ┌────┴──────┐
+ │ [True, 2] │
+ └───────────┘
+
+ If we then draw True and then 3, True will have multiple children and we
+ can no longer store this compacted representation. We would call split_at(0)
+ on the [True, 2] node to indicate that we need to add a choice at 0-index
+ node (True).
- # Records the previous sequence of calls to ``data.draw_bits``,
- # with the ``n_bits`` argument going in ``bit_lengths`` and the
- # values seen in ``values``. These should always have the same
- # length.
- bit_lengths = attr.ib(factory=IntList)
- values = attr.ib(factory=IntList)
-
- # The indices of of the calls to ``draw_bits`` that we have stored
- # where ``forced`` is not None. Stored as None if no indices
- # have been forced, purely for space saving reasons (we force
- # quite rarely).
- __forced = attr.ib(default=None, init=False)
-
- # What happens next after observing this sequence of calls.
- # Either:
+ ┌──────┐
+ │ root │
+ └──┬───┘
+ ┌──┴───┐
+ ┌─┤ True ├─┐
+ │ └──────┘ │
+ ┌─┴─┐ ┌─┴─┐
+ │ 2 │ │ 3 │
+ └───┘ └───┘
+ """
+
+ # The kwargs, value, and ir_types of the nodes stored here. These always
+ # have the same length. The values at index i belong to node i.
+ kwargs: List[IRKWargsType] = attr.ib(factory=list)
+ values: List[IRType] = attr.ib(factory=list)
+ ir_types: List[IRLiteralType] = attr.ib(factory=list)
+
+ # The indices of nodes which had forced values.
#
- # * ``None``, indicating we don't know yet.
- # * A ``Branch`` object indicating that there is a ``draw_bits``
- # call that we have seen take multiple outcomes there.
- # * A ``Conclusion`` object indicating that ``conclude_test``
- # was called here.
- transition = attr.ib(default=None)
-
- # A tree node is exhausted if every possible sequence of
- # draws below it has been explored. We store this information
- # on a field and update it when performing operations that
- # could change the answer.
+ # Stored as None if no indices have been forced, purely for space saving
+ # reasons (we force quite rarely).
+ __forced: Optional[set] = attr.ib(default=None, init=False)
+
+ # What happens next after drawing these nodes. (conceptually, "what is the
+ # child/children of the last node stored here").
#
- # A node may start exhausted, e.g. because it it leads
- # immediately to a conclusion, but can only go from
- # non-exhausted to exhausted when one of its children
- # becomes exhausted or it is marked as a conclusion.
+ # One of:
+ # - None (we don't know yet)
+ # - Branch (we have seen multiple possible outcomes here)
+ # - Conclusion (ConjectureData.conclude_test was called here)
+ # - Killed (this branch is valid and may even have children, but should not
+ # be explored when generating novel prefixes)
+ transition: Union[None, Branch, Conclusion, Killed] = attr.ib(default=None)
+
+ # A tree node is exhausted if every possible sequence of draws below it has
+ # been explored. We only update this when performing operations that could
+ # change the answer.
#
- # Therefore we only need to check whether we need to update
- # this field when the node is first created in ``split_at``
- # or when we have walked a path through this node to a
- # conclusion in ``TreeRecordingObserver``.
- is_exhausted = attr.ib(default=False, init=False)
+ # See also TreeNode.check_exhausted.
+ is_exhausted: bool = attr.ib(default=False, init=False)
@property
def forced(self):
@@ -130,17 +369,21 @@ class TreeNode:
return self.__forced
def mark_forced(self, i):
- """Note that the value at index ``i`` was forced."""
+ """
+ Note that the draw at node i was forced.
+ """
assert 0 <= i < len(self.values)
if self.__forced is None:
self.__forced = set()
self.__forced.add(i)
def split_at(self, i):
- """Splits the tree so that it can incorporate
- a decision at the ``draw_bits`` call corresponding
- to position ``i``, or raises ``Flaky`` if that was
- meant to be a forced node."""
+ """
+ Splits the tree so that it can incorporate a decision at the draw call
+ corresponding to the node at position i.
+
+ Raises Flaky if node i was forced.
+ """
if i in self.forced:
inconsistent_generation()
@@ -150,26 +393,58 @@ class TreeNode:
key = self.values[i]
child = TreeNode(
- bit_lengths=self.bit_lengths[i + 1 :],
+ ir_types=self.ir_types[i + 1 :],
+ kwargs=self.kwargs[i + 1 :],
values=self.values[i + 1 :],
transition=self.transition,
)
- self.transition = Branch(bit_length=self.bit_lengths[i], children={key: child})
+ self.transition = Branch(
+ kwargs=self.kwargs[i], ir_type=self.ir_types[i], children={key: child}
+ )
if self.__forced is not None:
child.__forced = {j - i - 1 for j in self.__forced if j > i}
self.__forced = {j for j in self.__forced if j < i}
child.check_exhausted()
+ del self.ir_types[i:]
del self.values[i:]
- del self.bit_lengths[i:]
- assert len(self.values) == len(self.bit_lengths) == i
+ del self.kwargs[i:]
+ assert len(self.values) == len(self.kwargs) == len(self.ir_types) == i
def check_exhausted(self):
- """Recalculates ``self.is_exhausted`` if necessary then returns
- it."""
+ """
+ Recalculates is_exhausted if necessary, and then returns it.
+
+ A node is exhausted if:
+ - Its transition is Conclusion or Killed
+ - It has the maximum number of children (i.e. we have found all of its
+ possible children), and all its children are exhausted
+
+ Therefore, we only need to compute this for a node when:
+ - We first create it in split_at
+ - We set its transition to either Conclusion or Killed
+ (TreeRecordingObserver.conclude_test or TreeRecordingObserver.kill_branch)
+ - We exhaust any of its children
+ """
+
if (
+ # a node cannot go from is_exhausted -> not is_exhausted.
not self.is_exhausted
- and len(self.forced) == len(self.values)
+ # if we don't know what happens after this node, we don't have
+ # enough information to tell if it's exhausted.
and self.transition is not None
+ # if there are still any nodes left which are the only child of their
+ # parent (len(self.values) > 0), then this TreeNode must be not
+ # exhausted, unless all of those nodes were forced.
+ #
+ # This is because we maintain an invariant of only adding nodes to
+ # DataTree which have at least 2 possible values, so we know that if
+ # they do not have any siblings that we still have more choices to
+ # discover.
+ #
+ # (We actually *do* currently add single-valued nodes to the tree,
+ # but immediately split them into a transition to avoid falsifying
+ # this check. this is a bit of a hack.)
+ and len(self.forced) == len(self.values)
):
if isinstance(self.transition, (Conclusion, Killed)):
self.is_exhausted = True
@@ -181,16 +456,159 @@ class TreeNode:
class DataTree:
- """Tracks the tree structure of a collection of ConjectureData
- objects, for use in ConjectureRunner."""
+ """
+ A DataTree tracks the structured history of draws in some test function,
+ across multiple ConjectureData objects.
+
+ This information is used by ConjectureRunner to generate novel prefixes of
+ this tree (see generate_novel_prefix). A novel prefix is a sequence of draws
+ which the tree has not seen before, and therefore the ConjectureRunner has
+ not generated as an input to the test function before.
+
+ DataTree tracks the following:
+
+ - Draws, at the ir level (with some ir_type, e.g. "integer")
+ - ConjectureData.draw_integer()
+ - ConjectureData.draw_float()
+ - ConjectureData.draw_string()
+ - ConjectureData.draw_boolean()
+ - ConjectureData.draw_bytes()
+ - Test conclusions (with some Status, e.g. Status.VALID)
+ - ConjectureData.conclude_test()
+
+ A DataTree is — surprise — a *tree*. A node in this tree is either a draw with
+ some value, a test conclusion with some Status, or a special `Killed` value,
+ which denotes that further draws may exist beyond this node but should not be
+ considered worth exploring when generating novel prefixes. A node is a leaf
+ iff it is a conclusion or Killed.
+
+ A branch from node A to node B indicates that we have previously seen some
+ sequence (a, b) of draws, where a and b are the values in nodes A and B.
+ Similar intuition holds for conclusion and Killed nodes.
+
+ Examples
+ --------
+
+ To see how a DataTree gets built through successive sets of draws, consider
+ the following code that calls through to some ConjecutreData object `data`.
+ The first call can be either True or False, and the second call can be any
+ integer in the range [1, 3].
+
+ data.draw_boolean()
+ data.draw_integer(1, 3)
+
+ To start, the corresponding DataTree object is completely empty.
+
+ ┌──────┐
+ │ root │
+ └──────┘
+
+ We happen to draw True and then 2 in the above code. The tree tracks this.
+ (2 also connects to a child Conclusion node with Status.VALID since it's the
+ final draw in the code. I'll omit Conclusion nodes in diagrams for brevity.)
+
+ ┌──────┐
+ │ root │
+ └──┬───┘
+ ┌──┴───┐
+ │ True │
+ └──┬───┘
+ ┌──┴───┐
+ │ 2 │
+ └──────┘
+
+ This is a very boring tree so far! But now we happen to draw False and
+ then 1. This causes a split in the tree. Remember, DataTree tracks history
+ over all invocations of a function, not just one. The end goal is to know
+ what invocations haven't been tried yet, after all.
+
+ ┌──────┐
+ ┌───┤ root ├───┐
+ │ └──────┘ │
+ ┌──┴───┐ ┌─┴─────┐
+ │ True │ │ False │
+ └──┬───┘ └──┬────┘
+ ┌─┴─┐ ┌─┴─┐
+ │ 2 │ │ 1 │
+ └───┘ └───┘
+
+ If we were to ask DataTree for a novel prefix at this point, it might
+ generate any of (True, 1), (True, 3), (False, 2), or (False, 3).
+
+ Note that the novel prefix stops as soon as it generates a novel node. For
+ instance, if we had generated a novel prefix back when the tree was only
+ root -> True -> 2, we could have gotten any of (True, 1), (True, 3), or
+ (False). But we could *not* have gotten (False, n), because both False and
+ n were novel at that point, and we stop at the first novel node — False.
+
+ I won't belabor this example. Here's what the tree looks like when fully
+ explored:
+
+ ┌──────┐
+ ┌──────┤ root ├──────┐
+ │ └──────┘ │
+ ┌──┴───┐ ┌─┴─────┐
+ ┌──┤ True ├──┐ ┌───┤ False ├──┐
+ │ └──┬───┘ │ │ └──┬────┘ │
+ ┌─┴─┐ ┌─┴─┐ ┌─┴─┐ ┌─┴─┐ ┌─┴─┐ ┌─┴─┐
+ │ 1 │ │ 2 │ │ 3 │ │ 1 │ │ 2 │ │ 3 │
+ └───┘ └───┘ └───┘ └───┘ └───┘ └───┘
+
+ You could imagine much more complicated trees than this arising in practice,
+ and indeed they do. In particular, the tree need not be balanced or 'nice'
+ like the tree above. For instance,
+
+ b = data.draw_boolean()
+ if b:
+ data.draw_integer(1, 3)
+
+ results in a tree with the entire right part lopped off, and False leading
+ straight to a conclusion node with Status.VALID. As another example,
+
+ n = data.draw_integers()
+ assume(n >= 3)
+ data.draw_string()
+
+ results in a tree with the 0, 1, and 2 nodes leading straight to a
+ conclusion node with Status.INVALID, and the rest branching off into all
+ the possibilities of draw_string.
+
+ Notes
+ -----
+
+ The above examples are slightly simplified and are intended to convey
+ intuition. In practice, there are some implementation details to be aware
+ of.
+
+ - In draw nodes, we store the kwargs used in addition to the value drawn.
+ E.g. the node corresponding to data.draw_float(min_value=1.0, max_value=1.5)
+ would store {"min_value": 1.0, "max_value": 1.5, ...} (default values for
+ other kwargs omitted).
+
+ The kwargs parameters have the potential to change both the range of
+ possible outputs of a node, and the probability distribution within that
+ range, so we need to use these when drawing in DataTree as well. We draw
+ values using these kwargs when (1) generating a novel value for a node
+ and (2) choosing a random child when traversing the tree.
+
+ - For space efficiency, rather than tracking the full tree structure, we
+ store DataTree as a radix tree. This is conceptually equivalent (radix
+ trees can always be "unfolded" to the full tree) but it means the internal
+ representation may differ in practice.
+
+ See TreeNode for more information.
+ """
def __init__(self):
self.root = TreeNode()
+ self._children_cache = {}
@property
def is_exhausted(self):
- """Returns True if every possible node is dead and thus the language
- described must have been fully explored."""
+ """
+ Returns True if every node is exhausted, and therefore the tree has
+ been fully explored.
+ """
return self.root.is_exhausted
def generate_novel_prefix(self, random):
@@ -201,26 +619,43 @@ class DataTree:
for it to be uniform at random, but previous attempts to do that
have proven too expensive.
"""
+
assert not self.is_exhausted
novel_prefix = bytearray()
- def append_int(n_bits, value):
- novel_prefix.extend(int_to_bytes(value, bits_to_bytes(n_bits)))
+ def append_buf(buf):
+ novel_prefix.extend(buf)
current_node = self.root
while True:
assert not current_node.is_exhausted
- for i, (n_bits, value) in enumerate(
- zip(current_node.bit_lengths, current_node.values)
+ for i, (ir_type, kwargs, value) in enumerate(
+ zip(current_node.ir_types, current_node.kwargs, current_node.values)
):
if i in current_node.forced:
- append_int(n_bits, value)
+ if ir_type == "float":
+ value = int_to_float(value)
+ (_value, buf) = self._draw(
+ ir_type, kwargs, forced=value, random=random
+ )
+ append_buf(buf)
else:
+ attempts = 0
while True:
- k = random.getrandbits(n_bits)
- if k != value:
- append_int(n_bits, k)
+ if attempts <= 10:
+ (v, buf) = self._draw(ir_type, kwargs, random=random)
+ else:
+ (v, buf) = self._draw_from_cache(
+ ir_type, kwargs, key=id(current_node), random=random
+ )
+
+ if v != value:
+ append_buf(buf)
break
+ attempts += 1
+ self._reject_child(
+ ir_type, kwargs, child=v, key=id(current_node)
+ )
# We've now found a value that is allowed to
# vary, so what follows is not fixed.
return bytes(novel_prefix)
@@ -230,27 +665,37 @@ class DataTree:
return bytes(novel_prefix)
branch = current_node.transition
assert isinstance(branch, Branch)
- n_bits = branch.bit_length
- check_counter = 0
+ attempts = 0
while True:
- k = random.getrandbits(n_bits)
+ if attempts <= 10:
+ (v, buf) = self._draw(
+ branch.ir_type, branch.kwargs, random=random
+ )
+ else:
+ (v, buf) = self._draw_from_cache(
+ branch.ir_type, branch.kwargs, key=id(branch), random=random
+ )
try:
- child = branch.children[k]
+ child = branch.children[v]
except KeyError:
- append_int(n_bits, k)
+ append_buf(buf)
return bytes(novel_prefix)
if not child.is_exhausted:
- append_int(n_bits, k)
+ append_buf(buf)
current_node = child
break
- check_counter += 1
+ attempts += 1
+ self._reject_child(
+ branch.ir_type, branch.kwargs, child=v, key=id(branch)
+ )
+
# We don't expect this assertion to ever fire, but coverage
# wants the loop inside to run if you have branch checking
# on, hence the pragma.
assert ( # pragma: no cover
- check_counter != 1000
- or len(branch.children) < (2**n_bits)
+ attempts != 1000
+ or len(branch.children) < branch.max_children
or any(not v.is_exhausted for v in branch.children.values())
)
@@ -274,13 +719,22 @@ class DataTree:
or ``start_example`` as these are not currently recorded in the
tree. This will likely change in future."""
node = self.root
+
+ def draw(ir_type, kwargs, *, forced=None):
+ draw_func = getattr(data, f"draw_{ir_type}")
+ value = draw_func(**kwargs, forced=forced)
+
+ if ir_type == "float":
+ value = float_to_int(value)
+ return value
+
try:
while True:
- for i, (n_bits, previous) in enumerate(
- zip(node.bit_lengths, node.values)
+ for i, (ir_type, kwargs, previous) in enumerate(
+ zip(node.ir_types, node.kwargs, node.values)
):
- v = data.draw_bits(
- n_bits, forced=node.values[i] if i in node.forced else None
+ v = draw(
+ ir_type, kwargs, forced=previous if i in node.forced else None
)
if v != previous:
raise PreviouslyUnseenBehaviour
@@ -290,7 +744,7 @@ class DataTree:
elif node.transition is None:
raise PreviouslyUnseenBehaviour
elif isinstance(node.transition, Branch):
- v = data.draw_bits(node.transition.bit_length)
+ v = draw(node.transition.ir_type, node.transition.kwargs)
try:
node = node.transition.children[v]
except KeyError as err:
@@ -305,6 +759,97 @@ class DataTree:
def new_observer(self):
return TreeRecordingObserver(self)
+ def _draw(self, ir_type, kwargs, *, random, forced=None):
+ # we should possibly pull out BUFFER_SIZE to a common file to avoid this
+ # circular import.
+ from hypothesis.internal.conjecture.engine import BUFFER_SIZE
+
+ cd = ConjectureData(max_length=BUFFER_SIZE, prefix=b"", random=random)
+ draw_func = getattr(cd, f"draw_{ir_type}")
+
+ value = draw_func(**kwargs, forced=forced)
+ buf = cd.buffer
+
+ # using floats as keys into branch.children breaks things, because
+ # e.g. hash(0.0) == hash(-0.0) would collide as keys when they are
+ # in fact distinct child branches.
+ # To distinguish floats here we'll use their bits representation. This
+ # entails some bookkeeping such that we're careful about when the
+ # float key is in its bits form (as a key into branch.children) and
+ # when it is in its float form (as a value we want to write to the
+ # buffer), and converting between the two forms as appropriate.
+ if ir_type == "float":
+ value = float_to_int(value)
+ return (value, buf)
+
+ def _get_children_cache(self, ir_type, kwargs, *, key):
+ # cache the state of the children generator per node/branch (passed as
+ # `key` here), such that we track which children we've already tried
+ # for this branch across draws.
+ # We take advantage of python generators here as one-way iterables,
+ # so each time we iterate we implicitly store our position in the
+ # children generator and don't re-draw children. `children` is the
+ # concrete list of children draw from the generator that we will work
+ # with. Whenever we need to top up this list, we will draw a new value
+ # from the generator.
+ if key not in self._children_cache:
+ generator = all_children(ir_type, kwargs)
+ children = []
+ rejected = set()
+ self._children_cache[key] = (generator, children, rejected)
+
+ return self._children_cache[key]
+
+ def _draw_from_cache(self, ir_type, kwargs, *, key, random):
+ (generator, children, rejected) = self._get_children_cache(
+ ir_type, kwargs, key=key
+ )
+ # Keep a stock of 100 potentially-valid children at all times.
+ # This number is chosen to balance memory/speed vs randomness. Ideally
+ # we would sample uniformly from all not-yet-rejected children, but
+ # computing and storing said children is not free.
+ # no-branch because coverage of the fall-through case here is a bit
+ # annoying.
+ if len(children) < 100: # pragma: no branch
+ for v in generator:
+ if ir_type == "float":
+ v = float_to_int(v)
+ if v in rejected:
+ continue
+ children.append(v)
+ if len(children) >= 100:
+ break
+
+ forced = random.choice(children)
+ if ir_type == "float":
+ forced = int_to_float(forced)
+ (value, buf) = self._draw(ir_type, kwargs, forced=forced, random=random)
+ return (value, buf)
+
+ def _reject_child(self, ir_type, kwargs, *, child, key):
+ (_generator, children, rejected) = self._get_children_cache(
+ ir_type, kwargs, key=key
+ )
+ rejected.add(child)
+ # we remove a child from the list of possible children *only* when it is
+ # rejected, and not when it is initially drawn in _draw_from_cache. The
+ # reason is that a child being drawn does not guarantee that child will
+ # be used in a way such that it is written back to the tree, so it needs
+ # to be available for future draws until we are certain it has been
+ # used.
+ #
+ # For instance, if we generated novel prefixes in a loop (but never used
+ # those prefixes to generate new values!) then we don't want to remove
+ # the drawn children from the available pool until they are actually
+ # used.
+ #
+ # This does result in a small inefficiency: we may draw a child,
+ # immediately use it (so we know it cannot be drawn again), but still
+ # wait to draw and reject it here, because DataTree cannot guarantee
+ # the drawn child has been used.
+ if child in children:
+ children.remove(child)
+
class TreeRecordingObserver(DataObserver):
def __init__(self, tree):
@@ -313,13 +858,49 @@ class TreeRecordingObserver(DataObserver):
self.__trail = [self.__current_node]
self.killed = False
- def draw_bits(self, n_bits, forced, value):
+ def draw_integer(
+ self, value: int, *, was_forced: bool, kwargs: IntegerKWargs
+ ) -> None:
+ self.draw_value("integer", value, was_forced=was_forced, kwargs=kwargs)
+
+ def draw_float(
+ self, value: float, *, was_forced: bool, kwargs: FloatKWargs
+ ) -> None:
+ self.draw_value("float", value, was_forced=was_forced, kwargs=kwargs)
+
+ def draw_string(
+ self, value: str, *, was_forced: bool, kwargs: StringKWargs
+ ) -> None:
+ self.draw_value("string", value, was_forced=was_forced, kwargs=kwargs)
+
+ def draw_bytes(
+ self, value: bytes, *, was_forced: bool, kwargs: BytesKWargs
+ ) -> None:
+ self.draw_value("bytes", value, was_forced=was_forced, kwargs=kwargs)
+
+ def draw_boolean(
+ self, value: bool, *, was_forced: bool, kwargs: BooleanKWargs
+ ) -> None:
+ self.draw_value("boolean", value, was_forced=was_forced, kwargs=kwargs)
+
+ def draw_value(
+ self,
+ ir_type: IRLiteralType,
+ value: IRType,
+ *,
+ was_forced: bool,
+ kwargs: IRKWargsType,
+ ) -> None:
i = self.__index_in_current_node
self.__index_in_current_node += 1
node = self.__current_node
- assert len(node.bit_lengths) == len(node.values)
- if i < len(node.bit_lengths):
- if n_bits != node.bit_lengths[i]:
+
+ if isinstance(value, float):
+ value = float_to_int(value)
+
+ assert len(node.kwargs) == len(node.values) == len(node.ir_types)
+ if i < len(node.values):
+ if ir_type != node.ir_types[i] or kwargs != node.kwargs[i]:
inconsistent_generation()
# Note that we don't check whether a previously
# forced value is now free. That will be caught
@@ -327,23 +908,43 @@ class TreeRecordingObserver(DataObserver):
# may pass silently. This is acceptable because it
# means we skip a hash set lookup on every
# draw and that's a pretty niche failure mode.
- if forced and i not in node.forced:
+ if was_forced and i not in node.forced:
inconsistent_generation()
if value != node.values[i]:
node.split_at(i)
assert i == len(node.values)
new_node = TreeNode()
- branch = node.transition
- branch.children[value] = new_node
+ node.transition.children[value] = new_node
self.__current_node = new_node
self.__index_in_current_node = 0
else:
trans = node.transition
if trans is None:
- node.bit_lengths.append(n_bits)
+ node.ir_types.append(ir_type)
+ node.kwargs.append(kwargs)
node.values.append(value)
- if forced:
+ if was_forced:
node.mark_forced(i)
+ # generate_novel_prefix assumes the following invariant: any one
+ # of the series of draws in a particular node can vary, i.e. the
+ # max number of children is at least 2. However, some draws are
+ # pseudo-choices and only have a single value, such as
+ # integers(0, 0).
+ #
+ # Currently, we address this by forcefully splitting such
+ # single-valued nodes into a transition when we see them. An
+ # exception to this is if it was forced: forced pseudo-choices
+ # do not cause the above issue because they inherently cannot
+ # vary, and moreover they trip other invariants about never
+ # splitting forced nodes.
+ #
+ # An alternative is not writing such choices to the tree at
+ # all, and thus guaranteeing that each node has at least 2 max
+ # children.
+ if compute_max_children(ir_type, kwargs) == 1 and not was_forced:
+ node.split_at(i)
+ self.__current_node = node.transition.children[value]
+ self.__index_in_current_node = 0
elif isinstance(trans, Conclusion):
assert trans.status != Status.OVERRUN
# We tried to draw where history says we should have
@@ -351,7 +952,7 @@ class TreeRecordingObserver(DataObserver):
inconsistent_generation()
else:
assert isinstance(trans, Branch), trans
- if n_bits != trans.bit_length:
+ if ir_type != trans.ir_type or kwargs != trans.kwargs:
inconsistent_generation()
try:
self.__current_node = trans.children[value]
diff --git a/contrib/python/hypothesis/py3/hypothesis/internal/conjecture/engine.py b/contrib/python/hypothesis/py3/hypothesis/internal/conjecture/engine.py
index 99a170ca64..2a011a8b11 100644
--- a/contrib/python/hypothesis/py3/hypothesis/internal/conjecture/engine.py
+++ b/contrib/python/hypothesis/py3/hypothesis/internal/conjecture/engine.py
@@ -47,6 +47,13 @@ MUTATION_POOL_SIZE = 100
MIN_TEST_CALLS = 10
BUFFER_SIZE = 8 * 1024
+# If the shrinking phase takes more than five minutes, abort it early and print
+# a warning. Many CI systems will kill a build after around ten minutes with
+# no output, and appearing to hang isn't great for interactive use either -
+# showing partially-shrunk examples is better than quitting with no examples!
+# (but make it monkeypatchable, for the rare users who need to keep on shrinking)
+MAX_SHRINKING_SECONDS = 300
+
@attr.s
class HealthCheckState:
@@ -811,9 +818,8 @@ class ConjectureRunner:
)
assert ex1.end <= ex2.start
- replacements = [data.buffer[e.start : e.end] for e in [ex1, ex2]]
-
- replacement = self.random.choice(replacements)
+ e = self.random.choice([ex1, ex2])
+ replacement = data.buffer[e.start : e.end]
try:
# We attempt to replace both the the examples with
@@ -822,7 +828,7 @@ class ConjectureRunner:
# wrong - labels matching are only a best guess as to
# whether the two are equivalent - but it doesn't
# really matter. It may not achieve the desired result
- # but it's still a perfectly acceptable choice sequence.
+ # but it's still a perfectly acceptable choice sequence
# to try.
new_data = self.cached_test_function(
data.buffer[: ex1.start]
@@ -922,7 +928,7 @@ class ConjectureRunner:
)
def new_conjecture_data_for_buffer(self, buffer):
- return ConjectureData.for_buffer(buffer, observer=self.tree.new_observer())
+ return self.new_conjecture_data(buffer, max_length=len(buffer))
def shrink_interesting_examples(self):
"""If we've found interesting examples, try to replace each of them
@@ -935,12 +941,7 @@ class ConjectureRunner:
return
self.debug("Shrinking interesting examples")
-
- # If the shrinking phase takes more than five minutes, abort it early and print
- # a warning. Many CI systems will kill a build after around ten minutes with
- # no output, and appearing to hang isn't great for interactive use either -
- # showing partially-shrunk examples is better than quitting with no examples!
- self.finish_shrinking_deadline = time.perf_counter() + 300
+ self.finish_shrinking_deadline = time.perf_counter() + MAX_SHRINKING_SECONDS
for prev_data in sorted(
self.interesting_examples.values(), key=lambda d: sort_key(d.buffer)
diff --git a/contrib/python/hypothesis/py3/hypothesis/internal/conjecture/junkdrawer.py b/contrib/python/hypothesis/py3/hypothesis/internal/conjecture/junkdrawer.py
index ec12b028b8..4a2140eccd 100644
--- a/contrib/python/hypothesis/py3/hypothesis/internal/conjecture/junkdrawer.py
+++ b/contrib/python/hypothesis/py3/hypothesis/internal/conjecture/junkdrawer.py
@@ -110,12 +110,10 @@ class IntList(Sequence[int]):
return len(self.__underlying)
@overload
- def __getitem__(self, i: int) -> int:
- ... # pragma: no cover
+ def __getitem__(self, i: int) -> int: ... # pragma: no cover
@overload
- def __getitem__(self, i: slice) -> "IntList":
- ... # pragma: no cover
+ def __getitem__(self, i: slice) -> "IntList": ... # pragma: no cover
def __getitem__(self, i: Union[int, slice]) -> "Union[int, IntList]":
if isinstance(i, slice):
@@ -245,7 +243,7 @@ class LazySequenceCopy:
return i
-def clamp(lower: int, value: int, upper: int) -> int:
+def clamp(lower: float, value: float, upper: float) -> float:
"""Given a value and lower/upper bounds, 'clamp' the value so that
it satisfies lower <= value <= upper."""
return max(lower, min(value, upper))
diff --git a/contrib/python/hypothesis/py3/hypothesis/internal/conjecture/shrinker.py b/contrib/python/hypothesis/py3/hypothesis/internal/conjecture/shrinker.py
index 39a515d296..b762b89c96 100644
--- a/contrib/python/hypothesis/py3/hypothesis/internal/conjecture/shrinker.py
+++ b/contrib/python/hypothesis/py3/hypothesis/internal/conjecture/shrinker.py
@@ -627,16 +627,16 @@ class Shrinker:
# This *can't* be a shrink because none of the components were.
assert shrink_target is self.shrink_target
if result.status == Status.VALID:
- self.shrink_target.slice_comments[
- (0, 0)
- ] = "The test sometimes passed when commented parts were varied together."
+ self.shrink_target.slice_comments[(0, 0)] = (
+ "The test sometimes passed when commented parts were varied together."
+ )
break # Test passed, this param can't vary freely.
elif self.__predicate(result): # pragma: no branch
n_same_failures_together += 1
if n_same_failures_together >= 100:
- self.shrink_target.slice_comments[
- (0, 0)
- ] = "The test always failed when commented parts were varied together."
+ self.shrink_target.slice_comments[(0, 0)] = (
+ "The test always failed when commented parts were varied together."
+ )
break
def greedy_shrink(self):
diff --git a/contrib/python/hypothesis/py3/hypothesis/internal/conjecture/utils.py b/contrib/python/hypothesis/py3/hypothesis/internal/conjecture/utils.py
index 61f9d742bb..5e77437a78 100644
--- a/contrib/python/hypothesis/py3/hypothesis/internal/conjecture/utils.py
+++ b/contrib/python/hypothesis/py3/hypothesis/internal/conjecture/utils.py
@@ -101,13 +101,12 @@ class Sampler:
table: List[Tuple[int, int, float]] # (base_idx, alt_idx, alt_chance)
- def __init__(self, weights: Sequence[float]):
- n = len(weights)
+ def __init__(self, weights: Sequence[float], *, observe: bool = True):
+ self.observe = observe
+ n = len(weights)
table: "list[list[int | float | None]]" = [[i, None, None] for i in range(n)]
-
total = sum(weights)
-
num_type = type(total)
zero = num_type(0) # type: ignore
@@ -179,7 +178,7 @@ class Sampler:
)
)
base, alternate, alternate_chance = data.choice(
- self.table, forced=forced_choice
+ self.table, forced=forced_choice, observe=self.observe
)
forced_use_alternate = None
if forced is not None:
@@ -189,7 +188,9 @@ class Sampler:
forced_use_alternate = forced == alternate and alternate_chance > 0
assert forced == base or forced_use_alternate
- use_alternate = data.draw_boolean(alternate_chance, forced=forced_use_alternate)
+ use_alternate = data.draw_boolean(
+ alternate_chance, forced=forced_use_alternate, observe=self.observe
+ )
data.stop_example()
if use_alternate:
assert forced is None or alternate == forced, (forced, alternate)
@@ -200,7 +201,7 @@ class Sampler:
INT_SIZES = (8, 16, 32, 64, 128)
-INT_SIZES_SAMPLER = Sampler((4.0, 8.0, 1.0, 1.0, 0.5))
+INT_SIZES_SAMPLER = Sampler((4.0, 8.0, 1.0, 1.0, 0.5), observe=False)
class many:
@@ -223,6 +224,7 @@ class many:
average_size: Union[int, float],
*,
forced: Optional[int] = None,
+ observe: bool = True,
) -> None:
assert 0 <= min_size <= average_size <= max_size
assert forced is None or min_size <= forced <= max_size
@@ -236,17 +238,17 @@ class many:
self.drawn = False
self.force_stop = False
self.rejected = False
+ self.observe = observe
def more(self) -> bool:
"""Should I draw another element to add to the collection?"""
if self.drawn:
- self.data.stop_example(discard=self.rejected)
+ self.data.stop_example()
self.drawn = True
self.rejected = False
self.data.start_example(ONE_FROM_MANY_LABEL)
-
if self.min_size == self.max_size:
# if we have to hit an exact size, draw unconditionally until that
# point, and no further.
@@ -265,7 +267,7 @@ class many:
elif self.forced_size is not None:
forced_result = self.count < self.forced_size
should_continue = self.data.draw_boolean(
- self.p_continue, forced=forced_result
+ self.p_continue, forced=forced_result, observe=self.observe
)
if should_continue:
diff --git a/contrib/python/hypothesis/py3/hypothesis/internal/escalation.py b/contrib/python/hypothesis/py3/hypothesis/internal/escalation.py
index 9261d2aefc..c3c678d239 100644
--- a/contrib/python/hypothesis/py3/hypothesis/internal/escalation.py
+++ b/contrib/python/hypothesis/py3/hypothesis/internal/escalation.py
@@ -142,9 +142,11 @@ class InterestingOrigin(NamedTuple):
# to support introspection when debugging, so we can use that unconditionally.
cls.from_exception(exception.__context__) if exception.__context__ else (),
# We distinguish exception groups by the inner exceptions, as for __context__
- tuple(map(cls.from_exception, exception.exceptions))
- if isinstance(exception, BaseExceptionGroup)
- else (),
+ (
+ tuple(map(cls.from_exception, exception.exceptions))
+ if isinstance(exception, BaseExceptionGroup)
+ else ()
+ ),
)
diff --git a/contrib/python/hypothesis/py3/hypothesis/internal/observability.py b/contrib/python/hypothesis/py3/hypothesis/internal/observability.py
index eb083f8be1..98753985f1 100644
--- a/contrib/python/hypothesis/py3/hypothesis/internal/observability.py
+++ b/contrib/python/hypothesis/py3/hypothesis/internal/observability.py
@@ -13,8 +13,10 @@
import json
import os
import sys
+import time
import warnings
from datetime import date, timedelta
+from functools import lru_cache
from typing import Callable, Dict, List, Optional
from hypothesis.configuration import storage_directory
@@ -38,7 +40,6 @@ def make_testcase(
string_repr: str = "<unknown>",
arguments: Optional[dict] = None,
timing: Dict[str, float],
- metadata: Optional[dict] = None,
coverage: Optional[Dict[str, List[int]]] = None,
) -> dict:
if data.interesting_origin:
@@ -68,8 +69,9 @@ def make_testcase(
},
"timing": timing,
"metadata": {
- **(metadata or {}),
"traceback": getattr(data.extra_information, "_expected_traceback", None),
+ "predicates": data._observability_predicates,
+ **_system_metadata(),
},
"coverage": coverage,
}
@@ -87,6 +89,18 @@ def _deliver_to_file(value): # pragma: no cover
f.write(json.dumps(value) + "\n")
+_imported_at = time.time()
+
+
+@lru_cache
+def _system_metadata():
+ return {
+ "sys.argv": sys.argv,
+ "os.getpid()": os.getpid(),
+ "imported_at": _imported_at,
+ }
+
+
OBSERVABILITY_COLLECT_COVERAGE = (
"HYPOTHESIS_EXPERIMENTAL_OBSERVABILITY_NOCOVER" not in os.environ
)
diff --git a/contrib/python/hypothesis/py3/hypothesis/internal/reflection.py b/contrib/python/hypothesis/py3/hypothesis/internal/reflection.py
index e62ab28f36..a829f097be 100644
--- a/contrib/python/hypothesis/py3/hypothesis/internal/reflection.py
+++ b/contrib/python/hypothesis/py3/hypothesis/internal/reflection.py
@@ -23,6 +23,7 @@ import warnings
from functools import partial, wraps
from io import StringIO
from keyword import iskeyword
+from random import _inst as global_random_instance
from tokenize import COMMENT, detect_encoding, generate_tokens, untokenize
from types import ModuleType
from typing import TYPE_CHECKING, Any, Callable
@@ -446,6 +447,8 @@ def get_pretty_function_description(f):
# Some objects, like `builtins.abs` are of BuiltinMethodType but have
# their module as __self__. This might include c-extensions generally?
if not (self is None or inspect.isclass(self) or inspect.ismodule(self)):
+ if self is global_random_instance:
+ return f"random.{name}"
return f"{self!r}.{name}"
elif isinstance(name, str) and getattr(dict, name, object()) is f:
# special case for keys/values views in from_type() / ghostwriter output
diff --git a/contrib/python/hypothesis/py3/hypothesis/provisional.py b/contrib/python/hypothesis/py3/hypothesis/provisional.py
index dec1abfc61..a6f1c4afc5 100644
--- a/contrib/python/hypothesis/py3/hypothesis/provisional.py
+++ b/contrib/python/hypothesis/py3/hypothesis/provisional.py
@@ -146,9 +146,11 @@ def domains(
_url_fragments_strategy = (
st.lists(
st.builds(
- lambda char, encode: f"%{ord(char):02X}"
- if (encode or char not in FRAGMENT_SAFE_CHARACTERS)
- else char,
+ lambda char, encode: (
+ f"%{ord(char):02X}"
+ if (encode or char not in FRAGMENT_SAFE_CHARACTERS)
+ else char
+ ),
st.characters(min_codepoint=0, max_codepoint=255),
st.booleans(),
),
diff --git a/contrib/python/hypothesis/py3/hypothesis/stateful.py b/contrib/python/hypothesis/py3/hypothesis/stateful.py
index 39ce653981..2ae5815161 100644
--- a/contrib/python/hypothesis/py3/hypothesis/stateful.py
+++ b/contrib/python/hypothesis/py3/hypothesis/stateful.py
@@ -116,6 +116,7 @@ def run_state_machine_as_test(state_machine_factory, *, settings=None, _min_step
machine = factory()
check_type(RuleBasedStateMachine, machine, "state_machine_factory()")
cd.hypothesis_runner = machine
+ machine._observability_predicates = cd._observability_predicates # alias
print_steps = (
current_build_context().is_final or current_verbosity() >= Verbosity.debug
@@ -232,11 +233,12 @@ class StateMachineMeta(type):
class RuleBasedStateMachine(metaclass=StateMachineMeta):
"""A RuleBasedStateMachine gives you a structured way to define state machines.
- The idea is that a state machine carries a bunch of types of data
- divided into Bundles, and has a set of rules which may read data
- from bundles (or just from normal strategies) and push data onto
- bundles. At any given point a random applicable rule will be
- executed.
+ The idea is that a state machine carries the system under test and some supporting
+ data. This data can be stored in instance variables or
+ divided into Bundles. The state machine has a set of rules which may read data
+ from bundles (or just from normal strategies), push data onto
+ bundles, change the state of the machine, or verify properties.
+ At any given point a random applicable rule will be executed.
"""
_rules_per_class: ClassVar[Dict[type, List[classmethod]]] = {}
@@ -458,6 +460,27 @@ class BundleReferenceStrategy(SearchStrategy):
class Bundle(SearchStrategy[Ex]):
+ """A collection of values for use in stateful testing.
+
+ Bundles are a kind of strategy where values can be added by rules,
+ and (like any strategy) used as inputs to future rules.
+
+ The ``name`` argument they are passed is the they are referred to
+ internally by the state machine; no two bundles may have
+ the same name. It is idiomatic to use the attribute
+ being assigned to as the name of the Bundle::
+
+ class MyStateMachine(RuleBasedStateMachine):
+ keys = Bundle("keys")
+
+ Bundles can contain the same value more than once; this becomes
+ relevant when using :func:`~hypothesis.stateful.consumes` to remove
+ values again.
+
+ If the ``consume`` argument is set to True, then all values that are
+ drawn from this bundle will be consumed (as above) when requested.
+ """
+
def __init__(self, name: str, *, consume: bool = False) -> None:
self.name = name
self.__reference_strategy = BundleReferenceStrategy(name, consume=consume)
@@ -637,7 +660,7 @@ def rule(
``targets`` will define where the end result of this function should go. If
both are empty then the end result will be discarded.
- ``target`` must be a Bundle, or if the result should go to multiple
+ ``target`` must be a Bundle, or if the result should be replicated to multiple
bundles you can pass a tuple of them as the ``targets`` argument.
It is invalid to use both arguments for a single rule. If the result
should go to exactly one of several bundles, define a separate rule for
@@ -941,8 +964,14 @@ class RuleStrategy(SearchStrategy):
return (rule, data.draw(rule.arguments_strategy))
def is_valid(self, rule):
- if not all(precond(self.machine) for precond in rule.preconditions):
- return False
+ predicates = self.machine._observability_predicates
+ desc = f"{self.machine.__class__.__qualname__}, rule {rule.function.__name__},"
+ for pred in rule.preconditions:
+ meets_precond = pred(self.machine)
+ where = f"{desc} precondition {get_pretty_function_description(pred)}"
+ predicates[where]["satisfied" if meets_precond else "unsatisfied"] += 1
+ if not meets_precond:
+ return False
for b in rule.bundles:
bundle = self.machine.bundle(b.name)
diff --git a/contrib/python/hypothesis/py3/hypothesis/strategies/_internal/core.py b/contrib/python/hypothesis/py3/hypothesis/strategies/_internal/core.py
index 234b8de822..1efe75caec 100644
--- a/contrib/python/hypothesis/py3/hypothesis/strategies/_internal/core.py
+++ b/contrib/python/hypothesis/py3/hypothesis/strategies/_internal/core.py
@@ -53,7 +53,13 @@ from uuid import UUID
import attr
from hypothesis._settings import note_deprecation
-from hypothesis.control import cleanup, current_build_context, note
+from hypothesis.control import (
+ RandomSeeder,
+ cleanup,
+ current_build_context,
+ deprecate_random_in_strategy,
+ note,
+)
from hypothesis.errors import (
HypothesisSideeffectWarning,
HypothesisWarning,
@@ -111,7 +117,7 @@ from hypothesis.strategies._internal.collections import (
from hypothesis.strategies._internal.deferred import DeferredStrategy
from hypothesis.strategies._internal.functions import FunctionStrategy
from hypothesis.strategies._internal.lazy import LazyStrategy, unwrap_strategies
-from hypothesis.strategies._internal.misc import just, none, nothing
+from hypothesis.strategies._internal.misc import BooleansStrategy, just, none, nothing
from hypothesis.strategies._internal.numbers import (
IntegersStrategy,
Real,
@@ -152,14 +158,14 @@ else:
@cacheable
-@defines_strategy()
+@defines_strategy(force_reusable_values=True)
def booleans() -> SearchStrategy[bool]:
"""Returns a strategy which generates instances of :class:`python:bool`.
Examples from this strategy will shrink towards ``False`` (i.e.
shrinking will replace ``True`` with ``False`` where possible).
"""
- return SampledFromStrategy([False, True], repr_="booleans()")
+ return BooleansStrategy()
@overload
@@ -998,14 +1004,6 @@ def randoms(
)
-class RandomSeeder:
- def __init__(self, seed):
- self.seed = seed
-
- def __repr__(self):
- return f"RandomSeeder({self.seed!r})"
-
-
class RandomModule(SearchStrategy):
def do_draw(self, data):
# It would be unsafe to do run this method more than once per test case,
@@ -1835,9 +1833,11 @@ def _composite(f):
params = params[1:]
newsig = sig.replace(
parameters=params,
- return_annotation=SearchStrategy
- if sig.return_annotation is sig.empty
- else SearchStrategy[sig.return_annotation],
+ return_annotation=(
+ SearchStrategy
+ if sig.return_annotation is sig.empty
+ else SearchStrategy[sig.return_annotation]
+ ),
)
@defines_strategy()
@@ -2104,6 +2104,10 @@ def runner(*, default: Any = not_set) -> SearchStrategy[Any]:
The exact meaning depends on the entry point, but it will usually be the
associated 'self' value for it.
+ If you are using this in a rule for stateful testing, this strategy
+ will return the instance of the :class:`~hypothesis.stateful.RuleBasedStateMachine`
+ that the rule is running for.
+
If there is no current test runner and a default is provided, return
that default. If no default is provided, raises InvalidArgument.
@@ -2134,7 +2138,8 @@ class DataObject:
self.count += 1
printer = RepresentationPrinter(context=current_build_context())
desc = f"Draw {self.count}{'' if label is None else f' ({label})'}: "
- result = self.conjecture_data.draw(strategy, observe_as=f"generate:{desc}")
+ with deprecate_random_in_strategy("{}from {!r}", desc, strategy):
+ result = self.conjecture_data.draw(strategy, observe_as=f"generate:{desc}")
if TESTCASE_CALLBACKS:
self.conjecture_data._observability_args[desc] = to_jsonable(result)
diff --git a/contrib/python/hypothesis/py3/hypothesis/strategies/_internal/misc.py b/contrib/python/hypothesis/py3/hypothesis/strategies/_internal/misc.py
index ad37107f73..3d0b0c97e0 100644
--- a/contrib/python/hypothesis/py3/hypothesis/strategies/_internal/misc.py
+++ b/contrib/python/hypothesis/py3/hypothesis/strategies/_internal/misc.py
@@ -116,3 +116,11 @@ def nothing() -> SearchStrategy:
Examples from this strategy do not shrink (because there are none).
"""
return NOTHING
+
+
+class BooleansStrategy(SearchStrategy):
+ def do_draw(self, data):
+ return data.draw_boolean()
+
+ def __repr__(self):
+ return "booleans()"
diff --git a/contrib/python/hypothesis/py3/hypothesis/strategies/_internal/utils.py b/contrib/python/hypothesis/py3/hypothesis/strategies/_internal/utils.py
index bd56d2287e..066c2b6581 100644
--- a/contrib/python/hypothesis/py3/hypothesis/strategies/_internal/utils.py
+++ b/contrib/python/hypothesis/py3/hypothesis/strategies/_internal/utils.py
@@ -17,6 +17,7 @@ import attr
from hypothesis.internal.cache import LRUReusedCache
from hypothesis.internal.compat import dataclass_asdict
+from hypothesis.internal.conjecture.junkdrawer import clamp
from hypothesis.internal.floats import float_to_int
from hypothesis.internal.reflection import proxies
from hypothesis.vendor.pretty import pretty
@@ -160,6 +161,9 @@ def to_jsonable(obj: object) -> object:
"""
if isinstance(obj, (str, int, float, bool, type(None))):
if isinstance(obj, int) and abs(obj) >= 2**63:
+ # Silently clamp very large ints to max_float, to avoid
+ # OverflowError when casting to float.
+ obj = clamp(-sys.float_info.max, obj, sys.float_info.max)
return float(obj)
return obj
if isinstance(obj, (list, tuple, set, frozenset)):
@@ -172,6 +176,14 @@ def to_jsonable(obj: object) -> object:
for k, v in obj.items()
}
+ # Hey, might as well try calling a .to_json() method - it works for Pandas!
+ # We try this before the below general-purpose handlers to give folks a
+ # chance to control this behavior on their custom classes.
+ try:
+ return to_jsonable(obj.to_json()) # type: ignore
+ except Exception:
+ pass
+
# Special handling for dataclasses, attrs, and pydantic classes
if (
(dcs := sys.modules.get("dataclasses"))
@@ -184,11 +196,5 @@ def to_jsonable(obj: object) -> object:
if (pyd := sys.modules.get("pydantic")) and isinstance(obj, pyd.BaseModel):
return to_jsonable(obj.model_dump())
- # Hey, might as well try calling a .to_json() method - it works for Pandas!
- try:
- return to_jsonable(obj.to_json()) # type: ignore
- except Exception:
- pass
-
# If all else fails, we'll just pretty-print as a string.
return pretty(obj)
diff --git a/contrib/python/hypothesis/py3/hypothesis/vendor/pretty.py b/contrib/python/hypothesis/py3/hypothesis/vendor/pretty.py
index 056f1795d3..35451b9961 100644
--- a/contrib/python/hypothesis/py3/hypothesis/vendor/pretty.py
+++ b/contrib/python/hypothesis/py3/hypothesis/vendor/pretty.py
@@ -764,7 +764,7 @@ def for_type_by_name(type_module, type_name, func):
"""Add a pretty printer for a type specified by the module and name of a
type rather than the type object itself."""
key = (type_module, type_name)
- oldfunc = _deferred_type_pprinters.get(key, None)
+ oldfunc = _deferred_type_pprinters.get(key)
_deferred_type_pprinters[key] = func
return oldfunc
diff --git a/contrib/python/hypothesis/py3/hypothesis/vendor/tlds-alpha-by-domain.txt b/contrib/python/hypothesis/py3/hypothesis/vendor/tlds-alpha-by-domain.txt
index 589b768abd..167e07e0e7 100644
--- a/contrib/python/hypothesis/py3/hypothesis/vendor/tlds-alpha-by-domain.txt
+++ b/contrib/python/hypothesis/py3/hypothesis/vendor/tlds-alpha-by-domain.txt
@@ -1,4 +1,4 @@
-# Version 2023122300, Last Updated Sat Dec 23 07:07:01 2023 UTC
+# Version 2024021000, Last Updated Sat Feb 10 07:07:02 2024 UTC
AAA
AARP
ABB
@@ -96,7 +96,6 @@ BA
BABY
BAIDU
BANAMEX
-BANANAREPUBLIC
BAND
BANK
BAR
@@ -261,7 +260,6 @@ COFFEE
COLLEGE
COLOGNE
COM
-COMCAST
COMMBANK
COMMUNITY
COMPANY
@@ -855,7 +853,6 @@ OFFICE
OKINAWA
OLAYAN
OLAYANGROUP
-OLDNAVY
OLLO
OM
OMEGA
@@ -1276,7 +1273,6 @@ WTC
WTF
XBOX
XEROX
-XFINITY
XIHUAN
XIN
XN--11B4C3D
diff --git a/contrib/python/hypothesis/py3/hypothesis/version.py b/contrib/python/hypothesis/py3/hypothesis/version.py
index 10be563628..6aafd5421c 100644
--- a/contrib/python/hypothesis/py3/hypothesis/version.py
+++ b/contrib/python/hypothesis/py3/hypothesis/version.py
@@ -8,5 +8,5 @@
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at https://mozilla.org/MPL/2.0/.
-__version_info__ = (6, 97, 4)
+__version_info__ = (6, 98, 8)
__version__ = ".".join(map(str, __version_info__))
diff --git a/contrib/python/hypothesis/py3/ya.make b/contrib/python/hypothesis/py3/ya.make
index e0530c7a9e..1678e159c9 100644
--- a/contrib/python/hypothesis/py3/ya.make
+++ b/contrib/python/hypothesis/py3/ya.make
@@ -2,7 +2,7 @@
PY3_LIBRARY()
-VERSION(6.97.4)
+VERSION(6.98.8)
LICENSE(MPL-2.0)
diff --git a/contrib/python/numpy/py2/numpy/core/include/numpy/npy_math.h b/contrib/python/numpy/py2/numpy/core/include/numpy/npy_math.h
index 582390cdcb..71951264fb 100644
--- a/contrib/python/numpy/py2/numpy/core/include/numpy/npy_math.h
+++ b/contrib/python/numpy/py2/numpy/core/include/numpy/npy_math.h
@@ -7,7 +7,7 @@ extern "C" {
#include <math.h>
#ifdef __SUNPRO_CC
-#include <sunmath.h>
+#error #include <sunmath.h>
#endif
#ifdef HAVE_NPY_CONFIG_H
#include <npy_config.h>
diff --git a/contrib/python/psutil/py2/psutil/_psaix.py b/contrib/python/psutil/py2/psutil/_psaix.py
new file mode 100644
index 0000000000..7160ecd63a
--- /dev/null
+++ b/contrib/python/psutil/py2/psutil/_psaix.py
@@ -0,0 +1,552 @@
+# Copyright (c) 2009, Giampaolo Rodola'
+# Copyright (c) 2017, Arnon Yaari
+# All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""AIX platform implementation."""
+
+import functools
+import glob
+import os
+import re
+import subprocess
+import sys
+from collections import namedtuple
+
+from . import _common
+from . import _psposix
+from . import _psutil_aix as cext
+from . import _psutil_posix as cext_posix
+from ._common import AccessDenied
+from ._common import conn_to_ntuple
+from ._common import get_procfs_path
+from ._common import memoize_when_activated
+from ._common import NIC_DUPLEX_FULL
+from ._common import NIC_DUPLEX_HALF
+from ._common import NIC_DUPLEX_UNKNOWN
+from ._common import NoSuchProcess
+from ._common import usage_percent
+from ._common import ZombieProcess
+from ._compat import FileNotFoundError
+from ._compat import PermissionError
+from ._compat import ProcessLookupError
+from ._compat import PY3
+
+
+__extra__all__ = ["PROCFS_PATH"]
+
+
+# =====================================================================
+# --- globals
+# =====================================================================
+
+
+HAS_THREADS = hasattr(cext, "proc_threads")
+HAS_NET_IO_COUNTERS = hasattr(cext, "net_io_counters")
+HAS_PROC_IO_COUNTERS = hasattr(cext, "proc_io_counters")
+
+PAGE_SIZE = cext_posix.getpagesize()
+AF_LINK = cext_posix.AF_LINK
+
+PROC_STATUSES = {
+ cext.SIDL: _common.STATUS_IDLE,
+ cext.SZOMB: _common.STATUS_ZOMBIE,
+ cext.SACTIVE: _common.STATUS_RUNNING,
+ cext.SSWAP: _common.STATUS_RUNNING, # TODO what status is this?
+ cext.SSTOP: _common.STATUS_STOPPED,
+}
+
+TCP_STATUSES = {
+ cext.TCPS_ESTABLISHED: _common.CONN_ESTABLISHED,
+ cext.TCPS_SYN_SENT: _common.CONN_SYN_SENT,
+ cext.TCPS_SYN_RCVD: _common.CONN_SYN_RECV,
+ cext.TCPS_FIN_WAIT_1: _common.CONN_FIN_WAIT1,
+ cext.TCPS_FIN_WAIT_2: _common.CONN_FIN_WAIT2,
+ cext.TCPS_TIME_WAIT: _common.CONN_TIME_WAIT,
+ cext.TCPS_CLOSED: _common.CONN_CLOSE,
+ cext.TCPS_CLOSE_WAIT: _common.CONN_CLOSE_WAIT,
+ cext.TCPS_LAST_ACK: _common.CONN_LAST_ACK,
+ cext.TCPS_LISTEN: _common.CONN_LISTEN,
+ cext.TCPS_CLOSING: _common.CONN_CLOSING,
+ cext.PSUTIL_CONN_NONE: _common.CONN_NONE,
+}
+
+proc_info_map = dict(
+ ppid=0,
+ rss=1,
+ vms=2,
+ create_time=3,
+ nice=4,
+ num_threads=5,
+ status=6,
+ ttynr=7)
+
+
+# =====================================================================
+# --- named tuples
+# =====================================================================
+
+
+# psutil.Process.memory_info()
+pmem = namedtuple('pmem', ['rss', 'vms'])
+# psutil.Process.memory_full_info()
+pfullmem = pmem
+# psutil.Process.cpu_times()
+scputimes = namedtuple('scputimes', ['user', 'system', 'idle', 'iowait'])
+# psutil.virtual_memory()
+svmem = namedtuple('svmem', ['total', 'available', 'percent', 'used', 'free'])
+
+
+# =====================================================================
+# --- memory
+# =====================================================================
+
+
+def virtual_memory():
+ total, avail, free, pinned, inuse = cext.virtual_mem()
+ percent = usage_percent((total - avail), total, round_=1)
+ return svmem(total, avail, percent, inuse, free)
+
+
+def swap_memory():
+ """Swap system memory as a (total, used, free, sin, sout) tuple."""
+ total, free, sin, sout = cext.swap_mem()
+ used = total - free
+ percent = usage_percent(used, total, round_=1)
+ return _common.sswap(total, used, free, percent, sin, sout)
+
+
+# =====================================================================
+# --- CPU
+# =====================================================================
+
+
+def cpu_times():
+ """Return system-wide CPU times as a named tuple"""
+ ret = cext.per_cpu_times()
+ return scputimes(*[sum(x) for x in zip(*ret)])
+
+
+def per_cpu_times():
+ """Return system per-CPU times as a list of named tuples"""
+ ret = cext.per_cpu_times()
+ return [scputimes(*x) for x in ret]
+
+
+def cpu_count_logical():
+ """Return the number of logical CPUs in the system."""
+ try:
+ return os.sysconf("SC_NPROCESSORS_ONLN")
+ except ValueError:
+ # mimic os.cpu_count() behavior
+ return None
+
+
+def cpu_count_physical():
+ cmd = "lsdev -Cc processor"
+ p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ stdout, stderr = p.communicate()
+ if PY3:
+ stdout, stderr = [x.decode(sys.stdout.encoding)
+ for x in (stdout, stderr)]
+ if p.returncode != 0:
+ raise RuntimeError("%r command error\n%s" % (cmd, stderr))
+ processors = stdout.strip().splitlines()
+ return len(processors) or None
+
+
+def cpu_stats():
+ """Return various CPU stats as a named tuple."""
+ ctx_switches, interrupts, soft_interrupts, syscalls = cext.cpu_stats()
+ return _common.scpustats(
+ ctx_switches, interrupts, soft_interrupts, syscalls)
+
+
+# =====================================================================
+# --- disks
+# =====================================================================
+
+
+disk_io_counters = cext.disk_io_counters
+disk_usage = _psposix.disk_usage
+
+
+def disk_partitions(all=False):
+ """Return system disk partitions."""
+ # TODO - the filtering logic should be better checked so that
+ # it tries to reflect 'df' as much as possible
+ retlist = []
+ partitions = cext.disk_partitions()
+ for partition in partitions:
+ device, mountpoint, fstype, opts = partition
+ if device == 'none':
+ device = ''
+ if not all:
+ # Differently from, say, Linux, we don't have a list of
+ # common fs types so the best we can do, AFAIK, is to
+ # filter by filesystem having a total size > 0.
+ if not disk_usage(mountpoint).total:
+ continue
+ maxfile = maxpath = None # set later
+ ntuple = _common.sdiskpart(device, mountpoint, fstype, opts,
+ maxfile, maxpath)
+ retlist.append(ntuple)
+ return retlist
+
+
+# =====================================================================
+# --- network
+# =====================================================================
+
+
+net_if_addrs = cext_posix.net_if_addrs
+
+if HAS_NET_IO_COUNTERS:
+ net_io_counters = cext.net_io_counters
+
+
+def net_connections(kind, _pid=-1):
+ """Return socket connections. If pid == -1 return system-wide
+ connections (as opposed to connections opened by one process only).
+ """
+ cmap = _common.conn_tmap
+ if kind not in cmap:
+ raise ValueError("invalid %r kind argument; choose between %s"
+ % (kind, ', '.join([repr(x) for x in cmap])))
+ families, types = _common.conn_tmap[kind]
+ rawlist = cext.net_connections(_pid)
+ ret = []
+ for item in rawlist:
+ fd, fam, type_, laddr, raddr, status, pid = item
+ if fam not in families:
+ continue
+ if type_ not in types:
+ continue
+ nt = conn_to_ntuple(fd, fam, type_, laddr, raddr, status,
+ TCP_STATUSES, pid=pid if _pid == -1 else None)
+ ret.append(nt)
+ return ret
+
+
+def net_if_stats():
+ """Get NIC stats (isup, duplex, speed, mtu)."""
+ duplex_map = {"Full": NIC_DUPLEX_FULL,
+ "Half": NIC_DUPLEX_HALF}
+ names = set([x[0] for x in net_if_addrs()])
+ ret = {}
+ for name in names:
+ isup, mtu = cext.net_if_stats(name)
+
+ # try to get speed and duplex
+ # TODO: rewrite this in C (entstat forks, so use truss -f to follow.
+ # looks like it is using an undocumented ioctl?)
+ duplex = ""
+ speed = 0
+ p = subprocess.Popen(["/usr/bin/entstat", "-d", name],
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ stdout, stderr = p.communicate()
+ if PY3:
+ stdout, stderr = [x.decode(sys.stdout.encoding)
+ for x in (stdout, stderr)]
+ if p.returncode == 0:
+ re_result = re.search(
+ r"Running: (\d+) Mbps.*?(\w+) Duplex", stdout)
+ if re_result is not None:
+ speed = int(re_result.group(1))
+ duplex = re_result.group(2)
+
+ duplex = duplex_map.get(duplex, NIC_DUPLEX_UNKNOWN)
+ ret[name] = _common.snicstats(isup, duplex, speed, mtu)
+ return ret
+
+
+# =====================================================================
+# --- other system functions
+# =====================================================================
+
+
+def boot_time():
+ """The system boot time expressed in seconds since the epoch."""
+ return cext.boot_time()
+
+
+def users():
+ """Return currently connected users as a list of namedtuples."""
+ retlist = []
+ rawlist = cext.users()
+ localhost = (':0.0', ':0')
+ for item in rawlist:
+ user, tty, hostname, tstamp, user_process, pid = item
+ # note: the underlying C function includes entries about
+ # system boot, run level and others. We might want
+ # to use them in the future.
+ if not user_process:
+ continue
+ if hostname in localhost:
+ hostname = 'localhost'
+ nt = _common.suser(user, tty, hostname, tstamp, pid)
+ retlist.append(nt)
+ return retlist
+
+
+# =====================================================================
+# --- processes
+# =====================================================================
+
+
+def pids():
+ """Returns a list of PIDs currently running on the system."""
+ return [int(x) for x in os.listdir(get_procfs_path()) if x.isdigit()]
+
+
+def pid_exists(pid):
+ """Check for the existence of a unix pid."""
+ return os.path.exists(os.path.join(get_procfs_path(), str(pid), "psinfo"))
+
+
+def wrap_exceptions(fun):
+ """Call callable into a try/except clause and translate ENOENT,
+ EACCES and EPERM in NoSuchProcess or AccessDenied exceptions.
+ """
+ @functools.wraps(fun)
+ def wrapper(self, *args, **kwargs):
+ try:
+ return fun(self, *args, **kwargs)
+ except (FileNotFoundError, ProcessLookupError):
+ # ENOENT (no such file or directory) gets raised on open().
+ # ESRCH (no such process) can get raised on read() if
+ # process is gone in meantime.
+ if not pid_exists(self.pid):
+ raise NoSuchProcess(self.pid, self._name)
+ else:
+ raise ZombieProcess(self.pid, self._name, self._ppid)
+ except PermissionError:
+ raise AccessDenied(self.pid, self._name)
+ return wrapper
+
+
+class Process(object):
+ """Wrapper class around underlying C implementation."""
+
+ __slots__ = ["pid", "_name", "_ppid", "_procfs_path", "_cache"]
+
+ def __init__(self, pid):
+ self.pid = pid
+ self._name = None
+ self._ppid = None
+ self._procfs_path = get_procfs_path()
+
+ def oneshot_enter(self):
+ self._proc_basic_info.cache_activate(self)
+ self._proc_cred.cache_activate(self)
+
+ def oneshot_exit(self):
+ self._proc_basic_info.cache_deactivate(self)
+ self._proc_cred.cache_deactivate(self)
+
+ @wrap_exceptions
+ @memoize_when_activated
+ def _proc_basic_info(self):
+ return cext.proc_basic_info(self.pid, self._procfs_path)
+
+ @wrap_exceptions
+ @memoize_when_activated
+ def _proc_cred(self):
+ return cext.proc_cred(self.pid, self._procfs_path)
+
+ @wrap_exceptions
+ def name(self):
+ if self.pid == 0:
+ return "swapper"
+ # note: max 16 characters
+ return cext.proc_name(self.pid, self._procfs_path).rstrip("\x00")
+
+ @wrap_exceptions
+ def exe(self):
+ # there is no way to get executable path in AIX other than to guess,
+ # and guessing is more complex than what's in the wrapping class
+ cmdline = self.cmdline()
+ if not cmdline:
+ return ''
+ exe = cmdline[0]
+ if os.path.sep in exe:
+ # relative or absolute path
+ if not os.path.isabs(exe):
+ # if cwd has changed, we're out of luck - this may be wrong!
+ exe = os.path.abspath(os.path.join(self.cwd(), exe))
+ if (os.path.isabs(exe) and
+ os.path.isfile(exe) and
+ os.access(exe, os.X_OK)):
+ return exe
+ # not found, move to search in PATH using basename only
+ exe = os.path.basename(exe)
+ # search for exe name PATH
+ for path in os.environ["PATH"].split(":"):
+ possible_exe = os.path.abspath(os.path.join(path, exe))
+ if (os.path.isfile(possible_exe) and
+ os.access(possible_exe, os.X_OK)):
+ return possible_exe
+ return ''
+
+ @wrap_exceptions
+ def cmdline(self):
+ return cext.proc_args(self.pid)
+
+ @wrap_exceptions
+ def environ(self):
+ return cext.proc_environ(self.pid)
+
+ @wrap_exceptions
+ def create_time(self):
+ return self._proc_basic_info()[proc_info_map['create_time']]
+
+ @wrap_exceptions
+ def num_threads(self):
+ return self._proc_basic_info()[proc_info_map['num_threads']]
+
+ if HAS_THREADS:
+ @wrap_exceptions
+ def threads(self):
+ rawlist = cext.proc_threads(self.pid)
+ retlist = []
+ for thread_id, utime, stime in rawlist:
+ ntuple = _common.pthread(thread_id, utime, stime)
+ retlist.append(ntuple)
+ # The underlying C implementation retrieves all OS threads
+ # and filters them by PID. At this point we can't tell whether
+ # an empty list means there were no connections for process or
+ # process is no longer active so we force NSP in case the PID
+ # is no longer there.
+ if not retlist:
+ # will raise NSP if process is gone
+ os.stat('%s/%s' % (self._procfs_path, self.pid))
+ return retlist
+
+ @wrap_exceptions
+ def connections(self, kind='inet'):
+ ret = net_connections(kind, _pid=self.pid)
+ # The underlying C implementation retrieves all OS connections
+ # and filters them by PID. At this point we can't tell whether
+ # an empty list means there were no connections for process or
+ # process is no longer active so we force NSP in case the PID
+ # is no longer there.
+ if not ret:
+ # will raise NSP if process is gone
+ os.stat('%s/%s' % (self._procfs_path, self.pid))
+ return ret
+
+ @wrap_exceptions
+ def nice_get(self):
+ return cext_posix.getpriority(self.pid)
+
+ @wrap_exceptions
+ def nice_set(self, value):
+ return cext_posix.setpriority(self.pid, value)
+
+ @wrap_exceptions
+ def ppid(self):
+ self._ppid = self._proc_basic_info()[proc_info_map['ppid']]
+ return self._ppid
+
+ @wrap_exceptions
+ def uids(self):
+ real, effective, saved, _, _, _ = self._proc_cred()
+ return _common.puids(real, effective, saved)
+
+ @wrap_exceptions
+ def gids(self):
+ _, _, _, real, effective, saved = self._proc_cred()
+ return _common.puids(real, effective, saved)
+
+ @wrap_exceptions
+ def cpu_times(self):
+ cpu_times = cext.proc_cpu_times(self.pid, self._procfs_path)
+ return _common.pcputimes(*cpu_times)
+
+ @wrap_exceptions
+ def terminal(self):
+ ttydev = self._proc_basic_info()[proc_info_map['ttynr']]
+ # convert from 64-bit dev_t to 32-bit dev_t and then map the device
+ ttydev = (((ttydev & 0x0000FFFF00000000) >> 16) | (ttydev & 0xFFFF))
+ # try to match rdev of /dev/pts/* files ttydev
+ for dev in glob.glob("/dev/**/*"):
+ if os.stat(dev).st_rdev == ttydev:
+ return dev
+ return None
+
+ @wrap_exceptions
+ def cwd(self):
+ procfs_path = self._procfs_path
+ try:
+ result = os.readlink("%s/%s/cwd" % (procfs_path, self.pid))
+ return result.rstrip('/')
+ except FileNotFoundError:
+ os.stat("%s/%s" % (procfs_path, self.pid)) # raise NSP or AD
+ return None
+
+ @wrap_exceptions
+ def memory_info(self):
+ ret = self._proc_basic_info()
+ rss = ret[proc_info_map['rss']] * 1024
+ vms = ret[proc_info_map['vms']] * 1024
+ return pmem(rss, vms)
+
+ memory_full_info = memory_info
+
+ @wrap_exceptions
+ def status(self):
+ code = self._proc_basic_info()[proc_info_map['status']]
+ # XXX is '?' legit? (we're not supposed to return it anyway)
+ return PROC_STATUSES.get(code, '?')
+
+ def open_files(self):
+ # TODO rewrite without using procfiles (stat /proc/pid/fd/* and then
+ # find matching name of the inode)
+ p = subprocess.Popen(["/usr/bin/procfiles", "-n", str(self.pid)],
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ stdout, stderr = p.communicate()
+ if PY3:
+ stdout, stderr = [x.decode(sys.stdout.encoding)
+ for x in (stdout, stderr)]
+ if "no such process" in stderr.lower():
+ raise NoSuchProcess(self.pid, self._name)
+ procfiles = re.findall(r"(\d+): S_IFREG.*\s*.*name:(.*)\n", stdout)
+ retlist = []
+ for fd, path in procfiles:
+ path = path.strip()
+ if path.startswith("//"):
+ path = path[1:]
+ if path.lower() == "cannot be retrieved":
+ continue
+ retlist.append(_common.popenfile(path, int(fd)))
+ return retlist
+
+ @wrap_exceptions
+ def num_fds(self):
+ if self.pid == 0: # no /proc/0/fd
+ return 0
+ return len(os.listdir("%s/%s/fd" % (self._procfs_path, self.pid)))
+
+ @wrap_exceptions
+ def num_ctx_switches(self):
+ return _common.pctxsw(
+ *cext.proc_num_ctx_switches(self.pid))
+
+ @wrap_exceptions
+ def wait(self, timeout=None):
+ return _psposix.wait_pid(self.pid, timeout, self._name)
+
+ if HAS_PROC_IO_COUNTERS:
+ @wrap_exceptions
+ def io_counters(self):
+ try:
+ rc, wc, rb, wb = cext.proc_io_counters(self.pid)
+ except OSError:
+ # if process is terminated, proc_io_counters returns OSError
+ # instead of NSP
+ if not pid_exists(self.pid):
+ raise NoSuchProcess(self.pid, self._name)
+ raise
+ return _common.pio(rc, wc, rb, wb)
diff --git a/contrib/python/psutil/py2/psutil/_psbsd.py b/contrib/python/psutil/py2/psutil/_psbsd.py
new file mode 100644
index 0000000000..764463e980
--- /dev/null
+++ b/contrib/python/psutil/py2/psutil/_psbsd.py
@@ -0,0 +1,917 @@
+# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""FreeBSD, OpenBSD and NetBSD platforms implementation."""
+
+import contextlib
+import errno
+import functools
+import os
+import xml.etree.ElementTree as ET
+from collections import namedtuple
+from collections import defaultdict
+
+from . import _common
+from . import _psposix
+from . import _psutil_bsd as cext
+from . import _psutil_posix as cext_posix
+from ._common import AccessDenied
+from ._common import conn_tmap
+from ._common import conn_to_ntuple
+from ._common import FREEBSD
+from ._common import memoize
+from ._common import memoize_when_activated
+from ._common import NETBSD
+from ._common import NoSuchProcess
+from ._common import OPENBSD
+from ._common import usage_percent
+from ._common import ZombieProcess
+from ._compat import FileNotFoundError
+from ._compat import PermissionError
+from ._compat import ProcessLookupError
+from ._compat import which
+
+
+__extra__all__ = []
+
+
+# =====================================================================
+# --- globals
+# =====================================================================
+
+
+if FREEBSD:
+ PROC_STATUSES = {
+ cext.SIDL: _common.STATUS_IDLE,
+ cext.SRUN: _common.STATUS_RUNNING,
+ cext.SSLEEP: _common.STATUS_SLEEPING,
+ cext.SSTOP: _common.STATUS_STOPPED,
+ cext.SZOMB: _common.STATUS_ZOMBIE,
+ cext.SWAIT: _common.STATUS_WAITING,
+ cext.SLOCK: _common.STATUS_LOCKED,
+ }
+elif OPENBSD:
+ PROC_STATUSES = {
+ cext.SIDL: _common.STATUS_IDLE,
+ cext.SSLEEP: _common.STATUS_SLEEPING,
+ cext.SSTOP: _common.STATUS_STOPPED,
+ # According to /usr/include/sys/proc.h SZOMB is unused.
+ # test_zombie_process() shows that SDEAD is the right
+ # equivalent. Also it appears there's no equivalent of
+ # psutil.STATUS_DEAD. SDEAD really means STATUS_ZOMBIE.
+ # cext.SZOMB: _common.STATUS_ZOMBIE,
+ cext.SDEAD: _common.STATUS_ZOMBIE,
+ cext.SZOMB: _common.STATUS_ZOMBIE,
+ # From http://www.eecs.harvard.edu/~margo/cs161/videos/proc.h.txt
+ # OpenBSD has SRUN and SONPROC: SRUN indicates that a process
+ # is runnable but *not* yet running, i.e. is on a run queue.
+ # SONPROC indicates that the process is actually executing on
+ # a CPU, i.e. it is no longer on a run queue.
+ # As such we'll map SRUN to STATUS_WAKING and SONPROC to
+ # STATUS_RUNNING
+ cext.SRUN: _common.STATUS_WAKING,
+ cext.SONPROC: _common.STATUS_RUNNING,
+ }
+elif NETBSD:
+ PROC_STATUSES = {
+ cext.SIDL: _common.STATUS_IDLE,
+ cext.SSLEEP: _common.STATUS_SLEEPING,
+ cext.SSTOP: _common.STATUS_STOPPED,
+ cext.SZOMB: _common.STATUS_ZOMBIE,
+ cext.SRUN: _common.STATUS_WAKING,
+ cext.SONPROC: _common.STATUS_RUNNING,
+ }
+
+TCP_STATUSES = {
+ cext.TCPS_ESTABLISHED: _common.CONN_ESTABLISHED,
+ cext.TCPS_SYN_SENT: _common.CONN_SYN_SENT,
+ cext.TCPS_SYN_RECEIVED: _common.CONN_SYN_RECV,
+ cext.TCPS_FIN_WAIT_1: _common.CONN_FIN_WAIT1,
+ cext.TCPS_FIN_WAIT_2: _common.CONN_FIN_WAIT2,
+ cext.TCPS_TIME_WAIT: _common.CONN_TIME_WAIT,
+ cext.TCPS_CLOSED: _common.CONN_CLOSE,
+ cext.TCPS_CLOSE_WAIT: _common.CONN_CLOSE_WAIT,
+ cext.TCPS_LAST_ACK: _common.CONN_LAST_ACK,
+ cext.TCPS_LISTEN: _common.CONN_LISTEN,
+ cext.TCPS_CLOSING: _common.CONN_CLOSING,
+ cext.PSUTIL_CONN_NONE: _common.CONN_NONE,
+}
+
+PAGESIZE = cext_posix.getpagesize()
+AF_LINK = cext_posix.AF_LINK
+
+HAS_PER_CPU_TIMES = hasattr(cext, "per_cpu_times")
+HAS_PROC_NUM_THREADS = hasattr(cext, "proc_num_threads")
+HAS_PROC_OPEN_FILES = hasattr(cext, 'proc_open_files')
+HAS_PROC_NUM_FDS = hasattr(cext, 'proc_num_fds')
+
+kinfo_proc_map = dict(
+ ppid=0,
+ status=1,
+ real_uid=2,
+ effective_uid=3,
+ saved_uid=4,
+ real_gid=5,
+ effective_gid=6,
+ saved_gid=7,
+ ttynr=8,
+ create_time=9,
+ ctx_switches_vol=10,
+ ctx_switches_unvol=11,
+ read_io_count=12,
+ write_io_count=13,
+ user_time=14,
+ sys_time=15,
+ ch_user_time=16,
+ ch_sys_time=17,
+ rss=18,
+ vms=19,
+ memtext=20,
+ memdata=21,
+ memstack=22,
+ cpunum=23,
+ name=24,
+)
+
+
+# =====================================================================
+# --- named tuples
+# =====================================================================
+
+
+# psutil.virtual_memory()
+svmem = namedtuple(
+ 'svmem', ['total', 'available', 'percent', 'used', 'free',
+ 'active', 'inactive', 'buffers', 'cached', 'shared', 'wired'])
+# psutil.cpu_times()
+scputimes = namedtuple(
+ 'scputimes', ['user', 'nice', 'system', 'idle', 'irq'])
+# psutil.Process.memory_info()
+pmem = namedtuple('pmem', ['rss', 'vms', 'text', 'data', 'stack'])
+# psutil.Process.memory_full_info()
+pfullmem = pmem
+# psutil.Process.cpu_times()
+pcputimes = namedtuple('pcputimes',
+ ['user', 'system', 'children_user', 'children_system'])
+# psutil.Process.memory_maps(grouped=True)
+pmmap_grouped = namedtuple(
+ 'pmmap_grouped', 'path rss, private, ref_count, shadow_count')
+# psutil.Process.memory_maps(grouped=False)
+pmmap_ext = namedtuple(
+ 'pmmap_ext', 'addr, perms path rss, private, ref_count, shadow_count')
+# psutil.disk_io_counters()
+if FREEBSD:
+ sdiskio = namedtuple('sdiskio', ['read_count', 'write_count',
+ 'read_bytes', 'write_bytes',
+ 'read_time', 'write_time',
+ 'busy_time'])
+else:
+ sdiskio = namedtuple('sdiskio', ['read_count', 'write_count',
+ 'read_bytes', 'write_bytes'])
+
+
+# =====================================================================
+# --- memory
+# =====================================================================
+
+
+def virtual_memory():
+ """System virtual memory as a namedtuple."""
+ mem = cext.virtual_mem()
+ total, free, active, inactive, wired, cached, buffers, shared = mem
+ if NETBSD:
+ # On NetBSD buffers and shared mem is determined via /proc.
+ # The C ext set them to 0.
+ with open('/proc/meminfo', 'rb') as f:
+ for line in f:
+ if line.startswith(b'Buffers:'):
+ buffers = int(line.split()[1]) * 1024
+ elif line.startswith(b'MemShared:'):
+ shared = int(line.split()[1]) * 1024
+ avail = inactive + cached + free
+ used = active + wired + cached
+ percent = usage_percent((total - avail), total, round_=1)
+ return svmem(total, avail, percent, used, free,
+ active, inactive, buffers, cached, shared, wired)
+
+
+def swap_memory():
+ """System swap memory as (total, used, free, sin, sout) namedtuple."""
+ total, used, free, sin, sout = cext.swap_mem()
+ percent = usage_percent(used, total, round_=1)
+ return _common.sswap(total, used, free, percent, sin, sout)
+
+
+# =====================================================================
+# --- CPU
+# =====================================================================
+
+
+def cpu_times():
+ """Return system per-CPU times as a namedtuple"""
+ user, nice, system, idle, irq = cext.cpu_times()
+ return scputimes(user, nice, system, idle, irq)
+
+
+if HAS_PER_CPU_TIMES:
+ def per_cpu_times():
+ """Return system CPU times as a namedtuple"""
+ ret = []
+ for cpu_t in cext.per_cpu_times():
+ user, nice, system, idle, irq = cpu_t
+ item = scputimes(user, nice, system, idle, irq)
+ ret.append(item)
+ return ret
+else:
+ # XXX
+ # Ok, this is very dirty.
+ # On FreeBSD < 8 we cannot gather per-cpu information, see:
+ # https://github.com/giampaolo/psutil/issues/226
+ # If num cpus > 1, on first call we return single cpu times to avoid a
+ # crash at psutil import time.
+ # Next calls will fail with NotImplementedError
+ def per_cpu_times():
+ """Return system CPU times as a namedtuple"""
+ if cpu_count_logical() == 1:
+ return [cpu_times()]
+ if per_cpu_times.__called__:
+ raise NotImplementedError("supported only starting from FreeBSD 8")
+ per_cpu_times.__called__ = True
+ return [cpu_times()]
+
+ per_cpu_times.__called__ = False
+
+
+def cpu_count_logical():
+ """Return the number of logical CPUs in the system."""
+ return cext.cpu_count_logical()
+
+
+if OPENBSD or NETBSD:
+ def cpu_count_physical():
+ # OpenBSD and NetBSD do not implement this.
+ return 1 if cpu_count_logical() == 1 else None
+else:
+ def cpu_count_physical():
+ """Return the number of physical CPUs in the system."""
+ # From the C module we'll get an XML string similar to this:
+ # http://manpages.ubuntu.com/manpages/precise/man4/smp.4freebsd.html
+ # We may get None in case "sysctl kern.sched.topology_spec"
+ # is not supported on this BSD version, in which case we'll mimic
+ # os.cpu_count() and return None.
+ ret = None
+ s = cext.cpu_count_phys()
+ if s is not None:
+ # get rid of padding chars appended at the end of the string
+ index = s.rfind("</groups>")
+ if index != -1:
+ s = s[:index + 9]
+ root = ET.fromstring(s)
+ try:
+ ret = len(root.findall('group/children/group/cpu')) or None
+ finally:
+ # needed otherwise it will memleak
+ root.clear()
+ if not ret:
+ # If logical CPUs are 1 it's obvious we'll have only 1
+ # physical CPU.
+ if cpu_count_logical() == 1:
+ return 1
+ return ret
+
+
+def cpu_stats():
+ """Return various CPU stats as a named tuple."""
+ if FREEBSD:
+ # Note: the C ext is returning some metrics we are not exposing:
+ # traps.
+ ctxsw, intrs, soft_intrs, syscalls, traps = cext.cpu_stats()
+ elif NETBSD:
+ # XXX
+ # Note about intrs: the C extension returns 0. intrs
+ # can be determined via /proc/stat; it has the same value as
+ # soft_intrs thought so the kernel is faking it (?).
+ #
+ # Note about syscalls: the C extension always sets it to 0 (?).
+ #
+ # Note: the C ext is returning some metrics we are not exposing:
+ # traps, faults and forks.
+ ctxsw, intrs, soft_intrs, syscalls, traps, faults, forks = \
+ cext.cpu_stats()
+ with open('/proc/stat', 'rb') as f:
+ for line in f:
+ if line.startswith(b'intr'):
+ intrs = int(line.split()[1])
+ elif OPENBSD:
+ # Note: the C ext is returning some metrics we are not exposing:
+ # traps, faults and forks.
+ ctxsw, intrs, soft_intrs, syscalls, traps, faults, forks = \
+ cext.cpu_stats()
+ return _common.scpustats(ctxsw, intrs, soft_intrs, syscalls)
+
+
+# =====================================================================
+# --- disks
+# =====================================================================
+
+
+def disk_partitions(all=False):
+ """Return mounted disk partitions as a list of namedtuples.
+ 'all' argument is ignored, see:
+ https://github.com/giampaolo/psutil/issues/906
+ """
+ retlist = []
+ partitions = cext.disk_partitions()
+ for partition in partitions:
+ device, mountpoint, fstype, opts = partition
+ maxfile = maxpath = None # set later
+ ntuple = _common.sdiskpart(device, mountpoint, fstype, opts,
+ maxfile, maxpath)
+ retlist.append(ntuple)
+ return retlist
+
+
+disk_usage = _psposix.disk_usage
+disk_io_counters = cext.disk_io_counters
+
+
+# =====================================================================
+# --- network
+# =====================================================================
+
+
+net_io_counters = cext.net_io_counters
+net_if_addrs = cext_posix.net_if_addrs
+
+
+def net_if_stats():
+ """Get NIC stats (isup, duplex, speed, mtu)."""
+ names = net_io_counters().keys()
+ ret = {}
+ for name in names:
+ try:
+ mtu = cext_posix.net_if_mtu(name)
+ isup = cext_posix.net_if_is_running(name)
+ duplex, speed = cext_posix.net_if_duplex_speed(name)
+ except OSError as err:
+ # https://github.com/giampaolo/psutil/issues/1279
+ if err.errno != errno.ENODEV:
+ raise
+ else:
+ if hasattr(_common, 'NicDuplex'):
+ duplex = _common.NicDuplex(duplex)
+ ret[name] = _common.snicstats(isup, duplex, speed, mtu)
+ return ret
+
+
+def net_connections(kind):
+ """System-wide network connections."""
+ if OPENBSD:
+ ret = []
+ for pid in pids():
+ try:
+ cons = Process(pid).connections(kind)
+ except (NoSuchProcess, ZombieProcess):
+ continue
+ else:
+ for conn in cons:
+ conn = list(conn)
+ conn.append(pid)
+ ret.append(_common.sconn(*conn))
+ return ret
+
+ if kind not in _common.conn_tmap:
+ raise ValueError("invalid %r kind argument; choose between %s"
+ % (kind, ', '.join([repr(x) for x in conn_tmap])))
+ families, types = conn_tmap[kind]
+ ret = set()
+ if NETBSD:
+ rawlist = cext.net_connections(-1)
+ else:
+ rawlist = cext.net_connections()
+ for item in rawlist:
+ fd, fam, type, laddr, raddr, status, pid = item
+ # TODO: apply filter at C level
+ if fam in families and type in types:
+ nt = conn_to_ntuple(fd, fam, type, laddr, raddr, status,
+ TCP_STATUSES, pid)
+ ret.add(nt)
+ return list(ret)
+
+
+# =====================================================================
+# --- sensors
+# =====================================================================
+
+
+if FREEBSD:
+
+ def sensors_battery():
+ """Return battery info."""
+ try:
+ percent, minsleft, power_plugged = cext.sensors_battery()
+ except NotImplementedError:
+ # See: https://github.com/giampaolo/psutil/issues/1074
+ return None
+ power_plugged = power_plugged == 1
+ if power_plugged:
+ secsleft = _common.POWER_TIME_UNLIMITED
+ elif minsleft == -1:
+ secsleft = _common.POWER_TIME_UNKNOWN
+ else:
+ secsleft = minsleft * 60
+ return _common.sbattery(percent, secsleft, power_plugged)
+
+ def sensors_temperatures():
+ "Return CPU cores temperatures if available, else an empty dict."
+ ret = defaultdict(list)
+ num_cpus = cpu_count_logical()
+ for cpu in range(num_cpus):
+ try:
+ current, high = cext.sensors_cpu_temperature(cpu)
+ if high <= 0:
+ high = None
+ name = "Core %s" % cpu
+ ret["coretemp"].append(
+ _common.shwtemp(name, current, high, high))
+ except NotImplementedError:
+ pass
+
+ return ret
+
+ def cpu_freq():
+ """Return frequency metrics for CPUs. As of Dec 2018 only
+ CPU 0 appears to be supported by FreeBSD and all other cores
+ match the frequency of CPU 0.
+ """
+ ret = []
+ num_cpus = cpu_count_logical()
+ for cpu in range(num_cpus):
+ try:
+ current, available_freq = cext.cpu_frequency(cpu)
+ except NotImplementedError:
+ continue
+ if available_freq:
+ try:
+ min_freq = int(available_freq.split(" ")[-1].split("/")[0])
+ except(IndexError, ValueError):
+ min_freq = None
+ try:
+ max_freq = int(available_freq.split(" ")[0].split("/")[0])
+ except(IndexError, ValueError):
+ max_freq = None
+ ret.append(_common.scpufreq(current, min_freq, max_freq))
+ return ret
+
+
+# =====================================================================
+# --- other system functions
+# =====================================================================
+
+
+def boot_time():
+ """The system boot time expressed in seconds since the epoch."""
+ return cext.boot_time()
+
+
+def users():
+ """Return currently connected users as a list of namedtuples."""
+ retlist = []
+ rawlist = cext.users()
+ for item in rawlist:
+ user, tty, hostname, tstamp, pid = item
+ if pid == -1:
+ assert OPENBSD
+ pid = None
+ if tty == '~':
+ continue # reboot or shutdown
+ nt = _common.suser(user, tty or None, hostname, tstamp, pid)
+ retlist.append(nt)
+ return retlist
+
+
+# =====================================================================
+# --- processes
+# =====================================================================
+
+
+@memoize
+def _pid_0_exists():
+ try:
+ Process(0).name()
+ except NoSuchProcess:
+ return False
+ except AccessDenied:
+ return True
+ else:
+ return True
+
+
+def pids():
+ """Returns a list of PIDs currently running on the system."""
+ ret = cext.pids()
+ if OPENBSD and (0 not in ret) and _pid_0_exists():
+ # On OpenBSD the kernel does not return PID 0 (neither does
+ # ps) but it's actually querable (Process(0) will succeed).
+ ret.insert(0, 0)
+ return ret
+
+
+if OPENBSD or NETBSD:
+ def pid_exists(pid):
+ """Return True if pid exists."""
+ exists = _psposix.pid_exists(pid)
+ if not exists:
+ # We do this because _psposix.pid_exists() lies in case of
+ # zombie processes.
+ return pid in pids()
+ else:
+ return True
+else:
+ pid_exists = _psposix.pid_exists
+
+
+def is_zombie(pid):
+ try:
+ st = cext.proc_oneshot_info(pid)[kinfo_proc_map['status']]
+ return st == cext.SZOMB
+ except Exception:
+ return False
+
+
+def wrap_exceptions(fun):
+ """Decorator which translates bare OSError exceptions into
+ NoSuchProcess and AccessDenied.
+ """
+ @functools.wraps(fun)
+ def wrapper(self, *args, **kwargs):
+ try:
+ return fun(self, *args, **kwargs)
+ except ProcessLookupError:
+ if is_zombie(self.pid):
+ raise ZombieProcess(self.pid, self._name, self._ppid)
+ else:
+ raise NoSuchProcess(self.pid, self._name)
+ except PermissionError:
+ raise AccessDenied(self.pid, self._name)
+ except OSError:
+ if self.pid == 0:
+ if 0 in pids():
+ raise AccessDenied(self.pid, self._name)
+ else:
+ raise
+ raise
+ return wrapper
+
+
+@contextlib.contextmanager
+def wrap_exceptions_procfs(inst):
+ """Same as above, for routines relying on reading /proc fs."""
+ try:
+ yield
+ except (ProcessLookupError, FileNotFoundError):
+ # ENOENT (no such file or directory) gets raised on open().
+ # ESRCH (no such process) can get raised on read() if
+ # process is gone in meantime.
+ if is_zombie(inst.pid):
+ raise ZombieProcess(inst.pid, inst._name, inst._ppid)
+ else:
+ raise NoSuchProcess(inst.pid, inst._name)
+ except PermissionError:
+ raise AccessDenied(inst.pid, inst._name)
+
+
+class Process(object):
+ """Wrapper class around underlying C implementation."""
+
+ __slots__ = ["pid", "_name", "_ppid", "_cache"]
+
+ def __init__(self, pid):
+ self.pid = pid
+ self._name = None
+ self._ppid = None
+
+ def _assert_alive(self):
+ """Raise NSP if the process disappeared on us."""
+ # For those C function who do not raise NSP, possibly returning
+ # incorrect or incomplete result.
+ cext.proc_name(self.pid)
+
+ @wrap_exceptions
+ @memoize_when_activated
+ def oneshot(self):
+ """Retrieves multiple process info in one shot as a raw tuple."""
+ ret = cext.proc_oneshot_info(self.pid)
+ assert len(ret) == len(kinfo_proc_map)
+ return ret
+
+ def oneshot_enter(self):
+ self.oneshot.cache_activate(self)
+
+ def oneshot_exit(self):
+ self.oneshot.cache_deactivate(self)
+
+ @wrap_exceptions
+ def name(self):
+ name = self.oneshot()[kinfo_proc_map['name']]
+ return name if name is not None else cext.proc_name(self.pid)
+
+ @wrap_exceptions
+ def exe(self):
+ if FREEBSD:
+ if self.pid == 0:
+ return '' # else NSP
+ return cext.proc_exe(self.pid)
+ elif NETBSD:
+ if self.pid == 0:
+ # /proc/0 dir exists but /proc/0/exe doesn't
+ return ""
+ with wrap_exceptions_procfs(self):
+ return os.readlink("/proc/%s/exe" % self.pid)
+ else:
+ # OpenBSD: exe cannot be determined; references:
+ # https://chromium.googlesource.com/chromium/src/base/+/
+ # master/base_paths_posix.cc
+ # We try our best guess by using which against the first
+ # cmdline arg (may return None).
+ cmdline = self.cmdline()
+ if cmdline:
+ return which(cmdline[0]) or ""
+ else:
+ return ""
+
+ @wrap_exceptions
+ def cmdline(self):
+ if OPENBSD and self.pid == 0:
+ return [] # ...else it crashes
+ elif NETBSD:
+ # XXX - most of the times the underlying sysctl() call on Net
+ # and Open BSD returns a truncated string.
+ # Also /proc/pid/cmdline behaves the same so it looks
+ # like this is a kernel bug.
+ try:
+ return cext.proc_cmdline(self.pid)
+ except OSError as err:
+ if err.errno == errno.EINVAL:
+ if is_zombie(self.pid):
+ raise ZombieProcess(self.pid, self._name, self._ppid)
+ elif not pid_exists(self.pid):
+ raise NoSuchProcess(self.pid, self._name, self._ppid)
+ else:
+ # XXX: this happens with unicode tests. It means the C
+ # routine is unable to decode invalid unicode chars.
+ return []
+ else:
+ raise
+ else:
+ return cext.proc_cmdline(self.pid)
+
+ @wrap_exceptions
+ def environ(self):
+ return cext.proc_environ(self.pid)
+
+ @wrap_exceptions
+ def terminal(self):
+ tty_nr = self.oneshot()[kinfo_proc_map['ttynr']]
+ tmap = _psposix.get_terminal_map()
+ try:
+ return tmap[tty_nr]
+ except KeyError:
+ return None
+
+ @wrap_exceptions
+ def ppid(self):
+ self._ppid = self.oneshot()[kinfo_proc_map['ppid']]
+ return self._ppid
+
+ @wrap_exceptions
+ def uids(self):
+ rawtuple = self.oneshot()
+ return _common.puids(
+ rawtuple[kinfo_proc_map['real_uid']],
+ rawtuple[kinfo_proc_map['effective_uid']],
+ rawtuple[kinfo_proc_map['saved_uid']])
+
+ @wrap_exceptions
+ def gids(self):
+ rawtuple = self.oneshot()
+ return _common.pgids(
+ rawtuple[kinfo_proc_map['real_gid']],
+ rawtuple[kinfo_proc_map['effective_gid']],
+ rawtuple[kinfo_proc_map['saved_gid']])
+
+ @wrap_exceptions
+ def cpu_times(self):
+ rawtuple = self.oneshot()
+ return _common.pcputimes(
+ rawtuple[kinfo_proc_map['user_time']],
+ rawtuple[kinfo_proc_map['sys_time']],
+ rawtuple[kinfo_proc_map['ch_user_time']],
+ rawtuple[kinfo_proc_map['ch_sys_time']])
+
+ if FREEBSD:
+ @wrap_exceptions
+ def cpu_num(self):
+ return self.oneshot()[kinfo_proc_map['cpunum']]
+
+ @wrap_exceptions
+ def memory_info(self):
+ rawtuple = self.oneshot()
+ return pmem(
+ rawtuple[kinfo_proc_map['rss']],
+ rawtuple[kinfo_proc_map['vms']],
+ rawtuple[kinfo_proc_map['memtext']],
+ rawtuple[kinfo_proc_map['memdata']],
+ rawtuple[kinfo_proc_map['memstack']])
+
+ memory_full_info = memory_info
+
+ @wrap_exceptions
+ def create_time(self):
+ return self.oneshot()[kinfo_proc_map['create_time']]
+
+ @wrap_exceptions
+ def num_threads(self):
+ if HAS_PROC_NUM_THREADS:
+ # FreeBSD
+ return cext.proc_num_threads(self.pid)
+ else:
+ return len(self.threads())
+
+ @wrap_exceptions
+ def num_ctx_switches(self):
+ rawtuple = self.oneshot()
+ return _common.pctxsw(
+ rawtuple[kinfo_proc_map['ctx_switches_vol']],
+ rawtuple[kinfo_proc_map['ctx_switches_unvol']])
+
+ @wrap_exceptions
+ def threads(self):
+ # Note: on OpenSBD this (/dev/mem) requires root access.
+ rawlist = cext.proc_threads(self.pid)
+ retlist = []
+ for thread_id, utime, stime in rawlist:
+ ntuple = _common.pthread(thread_id, utime, stime)
+ retlist.append(ntuple)
+ if OPENBSD:
+ self._assert_alive()
+ return retlist
+
+ @wrap_exceptions
+ def connections(self, kind='inet'):
+ if kind not in conn_tmap:
+ raise ValueError("invalid %r kind argument; choose between %s"
+ % (kind, ', '.join([repr(x) for x in conn_tmap])))
+
+ if NETBSD:
+ families, types = conn_tmap[kind]
+ ret = []
+ rawlist = cext.net_connections(self.pid)
+ for item in rawlist:
+ fd, fam, type, laddr, raddr, status, pid = item
+ assert pid == self.pid
+ if fam in families and type in types:
+ nt = conn_to_ntuple(fd, fam, type, laddr, raddr, status,
+ TCP_STATUSES)
+ ret.append(nt)
+ self._assert_alive()
+ return list(ret)
+
+ families, types = conn_tmap[kind]
+ rawlist = cext.proc_connections(self.pid, families, types)
+ ret = []
+ for item in rawlist:
+ fd, fam, type, laddr, raddr, status = item
+ nt = conn_to_ntuple(fd, fam, type, laddr, raddr, status,
+ TCP_STATUSES)
+ ret.append(nt)
+
+ if OPENBSD:
+ self._assert_alive()
+
+ return ret
+
+ @wrap_exceptions
+ def wait(self, timeout=None):
+ return _psposix.wait_pid(self.pid, timeout, self._name)
+
+ @wrap_exceptions
+ def nice_get(self):
+ return cext_posix.getpriority(self.pid)
+
+ @wrap_exceptions
+ def nice_set(self, value):
+ return cext_posix.setpriority(self.pid, value)
+
+ @wrap_exceptions
+ def status(self):
+ code = self.oneshot()[kinfo_proc_map['status']]
+ # XXX is '?' legit? (we're not supposed to return it anyway)
+ return PROC_STATUSES.get(code, '?')
+
+ @wrap_exceptions
+ def io_counters(self):
+ rawtuple = self.oneshot()
+ return _common.pio(
+ rawtuple[kinfo_proc_map['read_io_count']],
+ rawtuple[kinfo_proc_map['write_io_count']],
+ -1,
+ -1)
+
+ @wrap_exceptions
+ def cwd(self):
+ """Return process current working directory."""
+ # sometimes we get an empty string, in which case we turn
+ # it into None
+ if OPENBSD and self.pid == 0:
+ return None # ...else it would raise EINVAL
+ elif NETBSD or HAS_PROC_OPEN_FILES:
+ # FreeBSD < 8 does not support functions based on
+ # kinfo_getfile() and kinfo_getvmmap()
+ return cext.proc_cwd(self.pid) or None
+ else:
+ raise NotImplementedError(
+ "supported only starting from FreeBSD 8" if
+ FREEBSD else "")
+
+ nt_mmap_grouped = namedtuple(
+ 'mmap', 'path rss, private, ref_count, shadow_count')
+ nt_mmap_ext = namedtuple(
+ 'mmap', 'addr, perms path rss, private, ref_count, shadow_count')
+
+ def _not_implemented(self):
+ raise NotImplementedError
+
+ # FreeBSD < 8 does not support functions based on kinfo_getfile()
+ # and kinfo_getvmmap()
+ if HAS_PROC_OPEN_FILES:
+ @wrap_exceptions
+ def open_files(self):
+ """Return files opened by process as a list of namedtuples."""
+ rawlist = cext.proc_open_files(self.pid)
+ return [_common.popenfile(path, fd) for path, fd in rawlist]
+ else:
+ open_files = _not_implemented
+
+ # FreeBSD < 8 does not support functions based on kinfo_getfile()
+ # and kinfo_getvmmap()
+ if HAS_PROC_NUM_FDS:
+ @wrap_exceptions
+ def num_fds(self):
+ """Return the number of file descriptors opened by this process."""
+ ret = cext.proc_num_fds(self.pid)
+ if NETBSD:
+ self._assert_alive()
+ return ret
+ else:
+ num_fds = _not_implemented
+
+ # --- FreeBSD only APIs
+
+ if FREEBSD:
+
+ @wrap_exceptions
+ def cpu_affinity_get(self):
+ return cext.proc_cpu_affinity_get(self.pid)
+
+ @wrap_exceptions
+ def cpu_affinity_set(self, cpus):
+ # Pre-emptively check if CPUs are valid because the C
+ # function has a weird behavior in case of invalid CPUs,
+ # see: https://github.com/giampaolo/psutil/issues/586
+ allcpus = tuple(range(len(per_cpu_times())))
+ for cpu in cpus:
+ if cpu not in allcpus:
+ raise ValueError("invalid CPU #%i (choose between %s)"
+ % (cpu, allcpus))
+ try:
+ cext.proc_cpu_affinity_set(self.pid, cpus)
+ except OSError as err:
+ # 'man cpuset_setaffinity' about EDEADLK:
+ # <<the call would leave a thread without a valid CPU to run
+ # on because the set does not overlap with the thread's
+ # anonymous mask>>
+ if err.errno in (errno.EINVAL, errno.EDEADLK):
+ for cpu in cpus:
+ if cpu not in allcpus:
+ raise ValueError(
+ "invalid CPU #%i (choose between %s)" % (
+ cpu, allcpus))
+ raise
+
+ @wrap_exceptions
+ def memory_maps(self):
+ return cext.proc_memory_maps(self.pid)
+
+ @wrap_exceptions
+ def rlimit(self, resource, limits=None):
+ if limits is None:
+ return cext.proc_getrlimit(self.pid, resource)
+ else:
+ if len(limits) != 2:
+ raise ValueError(
+ "second argument must be a (soft, hard) tuple, "
+ "got %s" % repr(limits))
+ soft, hard = limits
+ return cext.proc_setrlimit(self.pid, resource, soft, hard)
diff --git a/contrib/python/psutil/py2/psutil/_pssunos.py b/contrib/python/psutil/py2/psutil/_pssunos.py
new file mode 100644
index 0000000000..5618bd4460
--- /dev/null
+++ b/contrib/python/psutil/py2/psutil/_pssunos.py
@@ -0,0 +1,727 @@
+# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Sun OS Solaris platform implementation."""
+
+import errno
+import functools
+import os
+import socket
+import subprocess
+import sys
+from collections import namedtuple
+from socket import AF_INET
+
+from . import _common
+from . import _psposix
+from . import _psutil_posix as cext_posix
+from . import _psutil_sunos as cext
+from ._common import AccessDenied
+from ._common import AF_INET6
+from ._common import debug
+from ._common import get_procfs_path
+from ._common import isfile_strict
+from ._common import memoize_when_activated
+from ._common import NoSuchProcess
+from ._common import sockfam_to_enum
+from ._common import socktype_to_enum
+from ._common import usage_percent
+from ._common import ZombieProcess
+from ._compat import b
+from ._compat import FileNotFoundError
+from ._compat import PermissionError
+from ._compat import ProcessLookupError
+from ._compat import PY3
+
+
+__extra__all__ = ["CONN_IDLE", "CONN_BOUND", "PROCFS_PATH"]
+
+
+# =====================================================================
+# --- globals
+# =====================================================================
+
+
+PAGE_SIZE = cext_posix.getpagesize()
+AF_LINK = cext_posix.AF_LINK
+IS_64_BIT = sys.maxsize > 2**32
+
+CONN_IDLE = "IDLE"
+CONN_BOUND = "BOUND"
+
+PROC_STATUSES = {
+ cext.SSLEEP: _common.STATUS_SLEEPING,
+ cext.SRUN: _common.STATUS_RUNNING,
+ cext.SZOMB: _common.STATUS_ZOMBIE,
+ cext.SSTOP: _common.STATUS_STOPPED,
+ cext.SIDL: _common.STATUS_IDLE,
+ cext.SONPROC: _common.STATUS_RUNNING, # same as run
+ cext.SWAIT: _common.STATUS_WAITING,
+}
+
+TCP_STATUSES = {
+ cext.TCPS_ESTABLISHED: _common.CONN_ESTABLISHED,
+ cext.TCPS_SYN_SENT: _common.CONN_SYN_SENT,
+ cext.TCPS_SYN_RCVD: _common.CONN_SYN_RECV,
+ cext.TCPS_FIN_WAIT_1: _common.CONN_FIN_WAIT1,
+ cext.TCPS_FIN_WAIT_2: _common.CONN_FIN_WAIT2,
+ cext.TCPS_TIME_WAIT: _common.CONN_TIME_WAIT,
+ cext.TCPS_CLOSED: _common.CONN_CLOSE,
+ cext.TCPS_CLOSE_WAIT: _common.CONN_CLOSE_WAIT,
+ cext.TCPS_LAST_ACK: _common.CONN_LAST_ACK,
+ cext.TCPS_LISTEN: _common.CONN_LISTEN,
+ cext.TCPS_CLOSING: _common.CONN_CLOSING,
+ cext.PSUTIL_CONN_NONE: _common.CONN_NONE,
+ cext.TCPS_IDLE: CONN_IDLE, # sunos specific
+ cext.TCPS_BOUND: CONN_BOUND, # sunos specific
+}
+
+proc_info_map = dict(
+ ppid=0,
+ rss=1,
+ vms=2,
+ create_time=3,
+ nice=4,
+ num_threads=5,
+ status=6,
+ ttynr=7,
+ uid=8,
+ euid=9,
+ gid=10,
+ egid=11)
+
+
+# =====================================================================
+# --- named tuples
+# =====================================================================
+
+
+# psutil.cpu_times()
+scputimes = namedtuple('scputimes', ['user', 'system', 'idle', 'iowait'])
+# psutil.cpu_times(percpu=True)
+pcputimes = namedtuple('pcputimes',
+ ['user', 'system', 'children_user', 'children_system'])
+# psutil.virtual_memory()
+svmem = namedtuple('svmem', ['total', 'available', 'percent', 'used', 'free'])
+# psutil.Process.memory_info()
+pmem = namedtuple('pmem', ['rss', 'vms'])
+pfullmem = pmem
+# psutil.Process.memory_maps(grouped=True)
+pmmap_grouped = namedtuple('pmmap_grouped',
+ ['path', 'rss', 'anonymous', 'locked'])
+# psutil.Process.memory_maps(grouped=False)
+pmmap_ext = namedtuple(
+ 'pmmap_ext', 'addr perms ' + ' '.join(pmmap_grouped._fields))
+
+
+# =====================================================================
+# --- memory
+# =====================================================================
+
+
+def virtual_memory():
+ """Report virtual memory metrics."""
+ # we could have done this with kstat, but IMHO this is good enough
+ total = os.sysconf('SC_PHYS_PAGES') * PAGE_SIZE
+ # note: there's no difference on Solaris
+ free = avail = os.sysconf('SC_AVPHYS_PAGES') * PAGE_SIZE
+ used = total - free
+ percent = usage_percent(used, total, round_=1)
+ return svmem(total, avail, percent, used, free)
+
+
+def swap_memory():
+ """Report swap memory metrics."""
+ sin, sout = cext.swap_mem()
+ # XXX
+ # we are supposed to get total/free by doing so:
+ # http://cvs.opensolaris.org/source/xref/onnv/onnv-gate/
+ # usr/src/cmd/swap/swap.c
+ # ...nevertheless I can't manage to obtain the same numbers as 'swap'
+ # cmdline utility, so let's parse its output (sigh!)
+ p = subprocess.Popen(['/usr/bin/env', 'PATH=/usr/sbin:/sbin:%s' %
+ os.environ['PATH'], 'swap', '-l'],
+ stdout=subprocess.PIPE)
+ stdout, stderr = p.communicate()
+ if PY3:
+ stdout = stdout.decode(sys.stdout.encoding)
+ if p.returncode != 0:
+ raise RuntimeError("'swap -l' failed (retcode=%s)" % p.returncode)
+
+ lines = stdout.strip().split('\n')[1:]
+ if not lines:
+ raise RuntimeError('no swap device(s) configured')
+ total = free = 0
+ for line in lines:
+ line = line.split()
+ t, f = line[3:4]
+ total += int(int(t) * 512)
+ free += int(int(f) * 512)
+ used = total - free
+ percent = usage_percent(used, total, round_=1)
+ return _common.sswap(total, used, free, percent,
+ sin * PAGE_SIZE, sout * PAGE_SIZE)
+
+
+# =====================================================================
+# --- CPU
+# =====================================================================
+
+
+def cpu_times():
+ """Return system-wide CPU times as a named tuple"""
+ ret = cext.per_cpu_times()
+ return scputimes(*[sum(x) for x in zip(*ret)])
+
+
+def per_cpu_times():
+ """Return system per-CPU times as a list of named tuples"""
+ ret = cext.per_cpu_times()
+ return [scputimes(*x) for x in ret]
+
+
+def cpu_count_logical():
+ """Return the number of logical CPUs in the system."""
+ try:
+ return os.sysconf("SC_NPROCESSORS_ONLN")
+ except ValueError:
+ # mimic os.cpu_count() behavior
+ return None
+
+
+def cpu_count_physical():
+ """Return the number of physical CPUs in the system."""
+ return cext.cpu_count_phys()
+
+
+def cpu_stats():
+ """Return various CPU stats as a named tuple."""
+ ctx_switches, interrupts, syscalls, traps = cext.cpu_stats()
+ soft_interrupts = 0
+ return _common.scpustats(ctx_switches, interrupts, soft_interrupts,
+ syscalls)
+
+
+# =====================================================================
+# --- disks
+# =====================================================================
+
+
+disk_io_counters = cext.disk_io_counters
+disk_usage = _psposix.disk_usage
+
+
+def disk_partitions(all=False):
+ """Return system disk partitions."""
+ # TODO - the filtering logic should be better checked so that
+ # it tries to reflect 'df' as much as possible
+ retlist = []
+ partitions = cext.disk_partitions()
+ for partition in partitions:
+ device, mountpoint, fstype, opts = partition
+ if device == 'none':
+ device = ''
+ if not all:
+ # Differently from, say, Linux, we don't have a list of
+ # common fs types so the best we can do, AFAIK, is to
+ # filter by filesystem having a total size > 0.
+ try:
+ if not disk_usage(mountpoint).total:
+ continue
+ except OSError as err:
+ # https://github.com/giampaolo/psutil/issues/1674
+ debug("skipping %r: %r" % (mountpoint, err))
+ continue
+ maxfile = maxpath = None # set later
+ ntuple = _common.sdiskpart(device, mountpoint, fstype, opts,
+ maxfile, maxpath)
+ retlist.append(ntuple)
+ return retlist
+
+
+# =====================================================================
+# --- network
+# =====================================================================
+
+
+net_io_counters = cext.net_io_counters
+net_if_addrs = cext_posix.net_if_addrs
+
+
+def net_connections(kind, _pid=-1):
+ """Return socket connections. If pid == -1 return system-wide
+ connections (as opposed to connections opened by one process only).
+ Only INET sockets are returned (UNIX are not).
+ """
+ cmap = _common.conn_tmap.copy()
+ if _pid == -1:
+ cmap.pop('unix', 0)
+ if kind not in cmap:
+ raise ValueError("invalid %r kind argument; choose between %s"
+ % (kind, ', '.join([repr(x) for x in cmap])))
+ families, types = _common.conn_tmap[kind]
+ rawlist = cext.net_connections(_pid)
+ ret = set()
+ for item in rawlist:
+ fd, fam, type_, laddr, raddr, status, pid = item
+ if fam not in families:
+ continue
+ if type_ not in types:
+ continue
+ # TODO: refactor and use _common.conn_to_ntuple.
+ if fam in (AF_INET, AF_INET6):
+ if laddr:
+ laddr = _common.addr(*laddr)
+ if raddr:
+ raddr = _common.addr(*raddr)
+ status = TCP_STATUSES[status]
+ fam = sockfam_to_enum(fam)
+ type_ = socktype_to_enum(type_)
+ if _pid == -1:
+ nt = _common.sconn(fd, fam, type_, laddr, raddr, status, pid)
+ else:
+ nt = _common.pconn(fd, fam, type_, laddr, raddr, status)
+ ret.add(nt)
+ return list(ret)
+
+
+def net_if_stats():
+ """Get NIC stats (isup, duplex, speed, mtu)."""
+ ret = cext.net_if_stats()
+ for name, items in ret.items():
+ isup, duplex, speed, mtu = items
+ if hasattr(_common, 'NicDuplex'):
+ duplex = _common.NicDuplex(duplex)
+ ret[name] = _common.snicstats(isup, duplex, speed, mtu)
+ return ret
+
+
+# =====================================================================
+# --- other system functions
+# =====================================================================
+
+
+def boot_time():
+ """The system boot time expressed in seconds since the epoch."""
+ return cext.boot_time()
+
+
+def users():
+ """Return currently connected users as a list of namedtuples."""
+ retlist = []
+ rawlist = cext.users()
+ localhost = (':0.0', ':0')
+ for item in rawlist:
+ user, tty, hostname, tstamp, user_process, pid = item
+ # note: the underlying C function includes entries about
+ # system boot, run level and others. We might want
+ # to use them in the future.
+ if not user_process:
+ continue
+ if hostname in localhost:
+ hostname = 'localhost'
+ nt = _common.suser(user, tty, hostname, tstamp, pid)
+ retlist.append(nt)
+ return retlist
+
+
+# =====================================================================
+# --- processes
+# =====================================================================
+
+
+def pids():
+ """Returns a list of PIDs currently running on the system."""
+ return [int(x) for x in os.listdir(b(get_procfs_path())) if x.isdigit()]
+
+
+def pid_exists(pid):
+ """Check for the existence of a unix pid."""
+ return _psposix.pid_exists(pid)
+
+
+def wrap_exceptions(fun):
+ """Call callable into a try/except clause and translate ENOENT,
+ EACCES and EPERM in NoSuchProcess or AccessDenied exceptions.
+ """
+ @functools.wraps(fun)
+ def wrapper(self, *args, **kwargs):
+ try:
+ return fun(self, *args, **kwargs)
+ except (FileNotFoundError, ProcessLookupError):
+ # ENOENT (no such file or directory) gets raised on open().
+ # ESRCH (no such process) can get raised on read() if
+ # process is gone in meantime.
+ if not pid_exists(self.pid):
+ raise NoSuchProcess(self.pid, self._name)
+ else:
+ raise ZombieProcess(self.pid, self._name, self._ppid)
+ except PermissionError:
+ raise AccessDenied(self.pid, self._name)
+ except OSError:
+ if self.pid == 0:
+ if 0 in pids():
+ raise AccessDenied(self.pid, self._name)
+ else:
+ raise
+ raise
+ return wrapper
+
+
+class Process(object):
+ """Wrapper class around underlying C implementation."""
+
+ __slots__ = ["pid", "_name", "_ppid", "_procfs_path", "_cache"]
+
+ def __init__(self, pid):
+ self.pid = pid
+ self._name = None
+ self._ppid = None
+ self._procfs_path = get_procfs_path()
+
+ def _assert_alive(self):
+ """Raise NSP if the process disappeared on us."""
+ # For those C function who do not raise NSP, possibly returning
+ # incorrect or incomplete result.
+ os.stat('%s/%s' % (self._procfs_path, self.pid))
+
+ def oneshot_enter(self):
+ self._proc_name_and_args.cache_activate(self)
+ self._proc_basic_info.cache_activate(self)
+ self._proc_cred.cache_activate(self)
+
+ def oneshot_exit(self):
+ self._proc_name_and_args.cache_deactivate(self)
+ self._proc_basic_info.cache_deactivate(self)
+ self._proc_cred.cache_deactivate(self)
+
+ @wrap_exceptions
+ @memoize_when_activated
+ def _proc_name_and_args(self):
+ return cext.proc_name_and_args(self.pid, self._procfs_path)
+
+ @wrap_exceptions
+ @memoize_when_activated
+ def _proc_basic_info(self):
+ if self.pid == 0 and not \
+ os.path.exists('%s/%s/psinfo' % (self._procfs_path, self.pid)):
+ raise AccessDenied(self.pid)
+ ret = cext.proc_basic_info(self.pid, self._procfs_path)
+ assert len(ret) == len(proc_info_map)
+ return ret
+
+ @wrap_exceptions
+ @memoize_when_activated
+ def _proc_cred(self):
+ return cext.proc_cred(self.pid, self._procfs_path)
+
+ @wrap_exceptions
+ def name(self):
+ # note: max len == 15
+ return self._proc_name_and_args()[0]
+
+ @wrap_exceptions
+ def exe(self):
+ try:
+ return os.readlink(
+ "%s/%s/path/a.out" % (self._procfs_path, self.pid))
+ except OSError:
+ pass # continue and guess the exe name from the cmdline
+ # Will be guessed later from cmdline but we want to explicitly
+ # invoke cmdline here in order to get an AccessDenied
+ # exception if the user has not enough privileges.
+ self.cmdline()
+ return ""
+
+ @wrap_exceptions
+ def cmdline(self):
+ return self._proc_name_and_args()[1].split(' ')
+
+ @wrap_exceptions
+ def environ(self):
+ return cext.proc_environ(self.pid, self._procfs_path)
+
+ @wrap_exceptions
+ def create_time(self):
+ return self._proc_basic_info()[proc_info_map['create_time']]
+
+ @wrap_exceptions
+ def num_threads(self):
+ return self._proc_basic_info()[proc_info_map['num_threads']]
+
+ @wrap_exceptions
+ def nice_get(self):
+ # Note #1: getpriority(3) doesn't work for realtime processes.
+ # Psinfo is what ps uses, see:
+ # https://github.com/giampaolo/psutil/issues/1194
+ return self._proc_basic_info()[proc_info_map['nice']]
+
+ @wrap_exceptions
+ def nice_set(self, value):
+ if self.pid in (2, 3):
+ # Special case PIDs: internally setpriority(3) return ESRCH
+ # (no such process), no matter what.
+ # The process actually exists though, as it has a name,
+ # creation time, etc.
+ raise AccessDenied(self.pid, self._name)
+ return cext_posix.setpriority(self.pid, value)
+
+ @wrap_exceptions
+ def ppid(self):
+ self._ppid = self._proc_basic_info()[proc_info_map['ppid']]
+ return self._ppid
+
+ @wrap_exceptions
+ def uids(self):
+ try:
+ real, effective, saved, _, _, _ = self._proc_cred()
+ except AccessDenied:
+ real = self._proc_basic_info()[proc_info_map['uid']]
+ effective = self._proc_basic_info()[proc_info_map['euid']]
+ saved = None
+ return _common.puids(real, effective, saved)
+
+ @wrap_exceptions
+ def gids(self):
+ try:
+ _, _, _, real, effective, saved = self._proc_cred()
+ except AccessDenied:
+ real = self._proc_basic_info()[proc_info_map['gid']]
+ effective = self._proc_basic_info()[proc_info_map['egid']]
+ saved = None
+ return _common.puids(real, effective, saved)
+
+ @wrap_exceptions
+ def cpu_times(self):
+ try:
+ times = cext.proc_cpu_times(self.pid, self._procfs_path)
+ except OSError as err:
+ if err.errno == errno.EOVERFLOW and not IS_64_BIT:
+ # We may get here if we attempt to query a 64bit process
+ # with a 32bit python.
+ # Error originates from read() and also tools like "cat"
+ # fail in the same way (!).
+ # Since there simply is no way to determine CPU times we
+ # return 0.0 as a fallback. See:
+ # https://github.com/giampaolo/psutil/issues/857
+ times = (0.0, 0.0, 0.0, 0.0)
+ else:
+ raise
+ return _common.pcputimes(*times)
+
+ @wrap_exceptions
+ def cpu_num(self):
+ return cext.proc_cpu_num(self.pid, self._procfs_path)
+
+ @wrap_exceptions
+ def terminal(self):
+ procfs_path = self._procfs_path
+ hit_enoent = False
+ tty = wrap_exceptions(
+ self._proc_basic_info()[proc_info_map['ttynr']])
+ if tty != cext.PRNODEV:
+ for x in (0, 1, 2, 255):
+ try:
+ return os.readlink(
+ '%s/%d/path/%d' % (procfs_path, self.pid, x))
+ except FileNotFoundError:
+ hit_enoent = True
+ continue
+ if hit_enoent:
+ self._assert_alive()
+
+ @wrap_exceptions
+ def cwd(self):
+ # /proc/PID/path/cwd may not be resolved by readlink() even if
+ # it exists (ls shows it). If that's the case and the process
+ # is still alive return None (we can return None also on BSD).
+ # Reference: http://goo.gl/55XgO
+ procfs_path = self._procfs_path
+ try:
+ return os.readlink("%s/%s/path/cwd" % (procfs_path, self.pid))
+ except FileNotFoundError:
+ os.stat("%s/%s" % (procfs_path, self.pid)) # raise NSP or AD
+ return None
+
+ @wrap_exceptions
+ def memory_info(self):
+ ret = self._proc_basic_info()
+ rss = ret[proc_info_map['rss']] * 1024
+ vms = ret[proc_info_map['vms']] * 1024
+ return pmem(rss, vms)
+
+ memory_full_info = memory_info
+
+ @wrap_exceptions
+ def status(self):
+ code = self._proc_basic_info()[proc_info_map['status']]
+ # XXX is '?' legit? (we're not supposed to return it anyway)
+ return PROC_STATUSES.get(code, '?')
+
+ @wrap_exceptions
+ def threads(self):
+ procfs_path = self._procfs_path
+ ret = []
+ tids = os.listdir('%s/%d/lwp' % (procfs_path, self.pid))
+ hit_enoent = False
+ for tid in tids:
+ tid = int(tid)
+ try:
+ utime, stime = cext.query_process_thread(
+ self.pid, tid, procfs_path)
+ except EnvironmentError as err:
+ if err.errno == errno.EOVERFLOW and not IS_64_BIT:
+ # We may get here if we attempt to query a 64bit process
+ # with a 32bit python.
+ # Error originates from read() and also tools like "cat"
+ # fail in the same way (!).
+ # Since there simply is no way to determine CPU times we
+ # return 0.0 as a fallback. See:
+ # https://github.com/giampaolo/psutil/issues/857
+ continue
+ # ENOENT == thread gone in meantime
+ if err.errno == errno.ENOENT:
+ hit_enoent = True
+ continue
+ raise
+ else:
+ nt = _common.pthread(tid, utime, stime)
+ ret.append(nt)
+ if hit_enoent:
+ self._assert_alive()
+ return ret
+
+ @wrap_exceptions
+ def open_files(self):
+ retlist = []
+ hit_enoent = False
+ procfs_path = self._procfs_path
+ pathdir = '%s/%d/path' % (procfs_path, self.pid)
+ for fd in os.listdir('%s/%d/fd' % (procfs_path, self.pid)):
+ path = os.path.join(pathdir, fd)
+ if os.path.islink(path):
+ try:
+ file = os.readlink(path)
+ except FileNotFoundError:
+ hit_enoent = True
+ continue
+ else:
+ if isfile_strict(file):
+ retlist.append(_common.popenfile(file, int(fd)))
+ if hit_enoent:
+ self._assert_alive()
+ return retlist
+
+ def _get_unix_sockets(self, pid):
+ """Get UNIX sockets used by process by parsing 'pfiles' output."""
+ # TODO: rewrite this in C (...but the damn netstat source code
+ # does not include this part! Argh!!)
+ cmd = "pfiles %s" % pid
+ p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ stdout, stderr = p.communicate()
+ if PY3:
+ stdout, stderr = [x.decode(sys.stdout.encoding)
+ for x in (stdout, stderr)]
+ if p.returncode != 0:
+ if 'permission denied' in stderr.lower():
+ raise AccessDenied(self.pid, self._name)
+ if 'no such process' in stderr.lower():
+ raise NoSuchProcess(self.pid, self._name)
+ raise RuntimeError("%r command error\n%s" % (cmd, stderr))
+
+ lines = stdout.split('\n')[2:]
+ for i, line in enumerate(lines):
+ line = line.lstrip()
+ if line.startswith('sockname: AF_UNIX'):
+ path = line.split(' ', 2)[2]
+ type = lines[i - 2].strip()
+ if type == 'SOCK_STREAM':
+ type = socket.SOCK_STREAM
+ elif type == 'SOCK_DGRAM':
+ type = socket.SOCK_DGRAM
+ else:
+ type = -1
+ yield (-1, socket.AF_UNIX, type, path, "", _common.CONN_NONE)
+
+ @wrap_exceptions
+ def connections(self, kind='inet'):
+ ret = net_connections(kind, _pid=self.pid)
+ # The underlying C implementation retrieves all OS connections
+ # and filters them by PID. At this point we can't tell whether
+ # an empty list means there were no connections for process or
+ # process is no longer active so we force NSP in case the PID
+ # is no longer there.
+ if not ret:
+ # will raise NSP if process is gone
+ os.stat('%s/%s' % (self._procfs_path, self.pid))
+
+ # UNIX sockets
+ if kind in ('all', 'unix'):
+ ret.extend([_common.pconn(*conn) for conn in
+ self._get_unix_sockets(self.pid)])
+ return ret
+
+ nt_mmap_grouped = namedtuple('mmap', 'path rss anon locked')
+ nt_mmap_ext = namedtuple('mmap', 'addr perms path rss anon locked')
+
+ @wrap_exceptions
+ def memory_maps(self):
+ def toaddr(start, end):
+ return '%s-%s' % (hex(start)[2:].strip('L'),
+ hex(end)[2:].strip('L'))
+
+ procfs_path = self._procfs_path
+ retlist = []
+ try:
+ rawlist = cext.proc_memory_maps(self.pid, procfs_path)
+ except OSError as err:
+ if err.errno == errno.EOVERFLOW and not IS_64_BIT:
+ # We may get here if we attempt to query a 64bit process
+ # with a 32bit python.
+ # Error originates from read() and also tools like "cat"
+ # fail in the same way (!).
+ # Since there simply is no way to determine CPU times we
+ # return 0.0 as a fallback. See:
+ # https://github.com/giampaolo/psutil/issues/857
+ return []
+ else:
+ raise
+ hit_enoent = False
+ for item in rawlist:
+ addr, addrsize, perm, name, rss, anon, locked = item
+ addr = toaddr(addr, addrsize)
+ if not name.startswith('['):
+ try:
+ name = os.readlink(
+ '%s/%s/path/%s' % (procfs_path, self.pid, name))
+ except OSError as err:
+ if err.errno == errno.ENOENT:
+ # sometimes the link may not be resolved by
+ # readlink() even if it exists (ls shows it).
+ # If that's the case we just return the
+ # unresolved link path.
+ # This seems an incosistency with /proc similar
+ # to: http://goo.gl/55XgO
+ name = '%s/%s/path/%s' % (procfs_path, self.pid, name)
+ hit_enoent = True
+ else:
+ raise
+ retlist.append((addr, perm, name, rss, anon, locked))
+ if hit_enoent:
+ self._assert_alive()
+ return retlist
+
+ @wrap_exceptions
+ def num_fds(self):
+ return len(os.listdir("%s/%s/fd" % (self._procfs_path, self.pid)))
+
+ @wrap_exceptions
+ def num_ctx_switches(self):
+ return _common.pctxsw(
+ *cext.proc_num_ctx_switches(self.pid, self._procfs_path))
+
+ @wrap_exceptions
+ def wait(self, timeout=None):
+ return _psposix.wait_pid(self.pid, timeout, self._name)
diff --git a/contrib/python/psutil/py2/psutil/_psutil_posix.c b/contrib/python/psutil/py2/psutil/_psutil_posix.c
index 305cec76d1..3447fc9017 100644
--- a/contrib/python/psutil/py2/psutil/_psutil_posix.c
+++ b/contrib/python/psutil/py2/psutil/_psutil_posix.c
@@ -18,9 +18,9 @@
#include <unistd.h>
#ifdef PSUTIL_SUNOS10
- #include "arch/solaris/v10/ifaddrs.h"
+ #error #include "arch/solaris/v10/ifaddrs.h"
#elif PSUTIL_AIX
- #include "arch/aix/ifaddrs.h"
+ #error #include "arch/aix/ifaddrs.h"
#else
#include <ifaddrs.h>
#endif
diff --git a/contrib/python/psutil/py2/psutil/arch/aix/ifaddrs.h b/contrib/python/psutil/py2/psutil/arch/aix/ifaddrs.h
deleted file mode 100644
index e15802bf7b..0000000000
--- a/contrib/python/psutil/py2/psutil/arch/aix/ifaddrs.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Copyright (c) 2017, Arnon Yaari
- * All rights reserved.
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE file.
- */
-
-/*! Based on code from
- https://lists.samba.org/archive/samba-technical/2009-February/063079.html
-!*/
-
-
-#ifndef GENERIC_AIX_IFADDRS_H
-#define GENERIC_AIX_IFADDRS_H
-
-#include <sys/socket.h>
-#include <net/if.h>
-
-#undef ifa_dstaddr
-#undef ifa_broadaddr
-#define ifa_broadaddr ifa_dstaddr
-
-struct ifaddrs {
- struct ifaddrs *ifa_next;
- char *ifa_name;
- unsigned int ifa_flags;
- struct sockaddr *ifa_addr;
- struct sockaddr *ifa_netmask;
- struct sockaddr *ifa_dstaddr;
-};
-
-extern int getifaddrs(struct ifaddrs **);
-extern void freeifaddrs(struct ifaddrs *);
-#endif
diff --git a/contrib/python/psutil/py2/psutil/arch/osx/ya.make b/contrib/python/psutil/py2/psutil/arch/osx/ya.make
deleted file mode 100644
index 613c49f924..0000000000
--- a/contrib/python/psutil/py2/psutil/arch/osx/ya.make
+++ /dev/null
@@ -1,9 +0,0 @@
-PY23_NATIVE_LIBRARY()
-
-LICENSE(BSD-3-Clause)
-
-SRCS(
- process_info.c
-)
-
-END()
diff --git a/contrib/python/psutil/py2/psutil/arch/solaris/v10/ifaddrs.h b/contrib/python/psutil/py2/psutil/arch/solaris/v10/ifaddrs.h
deleted file mode 100644
index 0953a9b99a..0000000000
--- a/contrib/python/psutil/py2/psutil/arch/solaris/v10/ifaddrs.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/* Reference: https://lists.samba.org/archive/samba-technical/2009-February/063079.html */
-
-
-#ifndef __IFADDRS_H__
-#define __IFADDRS_H__
-
-#include <sys/socket.h>
-#include <net/if.h>
-
-#undef ifa_dstaddr
-#undef ifa_broadaddr
-#define ifa_broadaddr ifa_dstaddr
-
-struct ifaddrs {
- struct ifaddrs *ifa_next;
- char *ifa_name;
- unsigned int ifa_flags;
- struct sockaddr *ifa_addr;
- struct sockaddr *ifa_netmask;
- struct sockaddr *ifa_dstaddr;
-};
-
-extern int getifaddrs(struct ifaddrs **);
-extern void freeifaddrs(struct ifaddrs *);
-
-#endif
diff --git a/contrib/python/psutil/py2/test/test.py b/contrib/python/psutil/py2/test/test.py
deleted file mode 100644
index 4f5a0e50d8..0000000000
--- a/contrib/python/psutil/py2/test/test.py
+++ /dev/null
@@ -1,4 +0,0 @@
-from library.python.import_test import check_imports
-test_imports = lambda: check_imports(no_check=['psutil._ps*'])
-#from psutil._psutil_posix import net_if_addrs
-
diff --git a/contrib/python/psutil/py2/test/ya.make b/contrib/python/psutil/py2/test/ya.make
deleted file mode 100644
index 20d09c1d70..0000000000
--- a/contrib/python/psutil/py2/test/ya.make
+++ /dev/null
@@ -1,8 +0,0 @@
-PY2TEST()
-PEERDIR(
- contrib/python/psutil
- library/python/import_test
-)
-TEST_SRCS(test.py)
-NO_LINT()
-END()
diff --git a/contrib/python/psutil/py2/ya.make b/contrib/python/psutil/py2/ya.make
index ddc88f1692..0e3d24c4d0 100644
--- a/contrib/python/psutil/py2/ya.make
+++ b/contrib/python/psutil/py2/ya.make
@@ -1,58 +1,75 @@
PY2_LIBRARY()
+VERSION(5.8.0)
+
LICENSE(BSD-3-Clause)
-VERSION(5.8.0)
+NO_COMPILER_WARNINGS()
-NO_UTIL()
+NO_LINT()
-SRCDIR(contrib/python/psutil/py2/psutil)
+NO_CHECK_IMPORTS(
+ psutil._psaix
+ psutil._psbsd
+ psutil._psosx
+ psutil._pssunos
+ psutil._psutil_bsd
+ psutil._psutil_common
+ psutil._psutil_osx
+ psutil._psutil_sunos
+ psutil._psutil_windows
+ psutil._pswindows
+)
-NO_COMPILER_WARNINGS()
+NO_UTIL()
CFLAGS(
-DPSUTIL_VERSION=580
)
-IF (OS_LINUX OR OS_DARWIN)
- CFLAGS(
- -DPSUTIL_POSIX=1
- )
- SRCS(
- _psutil_common.c
- _psutil_posix.c
- )
- PY_REGISTER(psutil._psutil_posix)
-ENDIF ()
+SRCS(
+ psutil/_psutil_common.c
+)
IF (OS_LINUX)
CFLAGS(
+ -DPSUTIL_POSIX=1
-DPSUTIL_LINUX=1
)
SRCS(
- _psutil_linux.c
+ psutil/_psutil_linux.c
+ psutil/_psutil_posix.c
)
- PY_REGISTER(psutil._psutil_linux)
-ENDIF ()
+
+ PY_REGISTER(
+ psutil._psutil_linux
+ psutil._psutil_posix
+ )
+ENDIF()
IF (OS_DARWIN)
CFLAGS(
+ -DPSUTIL_POSIX=1
-DPSUTIL_OSX=1
)
- EXTRALIBS("-framework CoreFoundation -framework IOKit")
-
- PEERDIR(
- contrib/python/psutil/py2/psutil/arch/osx
+ LDFLAGS(
+ -framework CoreFoundation
+ -framework IOKit
)
SRCS(
- _psutil_osx.c
+ psutil/_psutil_osx.c
+ psutil/_psutil_posix.c
+ psutil/arch/osx/process_info.c
)
- PY_REGISTER(psutil._psutil_osx)
-ENDIF ()
+ PY_REGISTER(
+ psutil._psutil_osx
+ psutil._psutil_posix
+ )
+ENDIF()
IF (OS_WINDOWS)
CFLAGS(
@@ -68,80 +85,42 @@ IF (OS_WINDOWS)
)
SRCS(
- _psutil_common.c
- _psutil_windows.c
- arch/windows/cpu.c
- arch/windows/disk.c
- arch/windows/net.c
- arch/windows/process_handles.c
- arch/windows/process_info.c
- arch/windows/process_utils.c
- arch/windows/security.c
- arch/windows/services.c
- arch/windows/socks.c
- arch/windows/wmi.c
+ psutil/_psutil_windows.c
+ psutil/arch/windows/cpu.c
+ psutil/arch/windows/disk.c
+ psutil/arch/windows/net.c
+ psutil/arch/windows/process_handles.c
+ psutil/arch/windows/process_info.c
+ psutil/arch/windows/process_utils.c
+ psutil/arch/windows/security.c
+ psutil/arch/windows/services.c
+ psutil/arch/windows/socks.c
+ psutil/arch/windows/wmi.c
)
- PY_REGISTER(psutil._psutil_windows)
-ENDIF ()
-
-NO_CHECK_IMPORTS(
- psutil._psbsd
- psutil._psosx
- psutil._pssunos
- psutil._psutil_bsd
- psutil._psutil_common
- psutil._psutil_osx
- psutil._psutil_sunos
- psutil._psutil_windows
- psutil._pswindows
-)
+ PY_REGISTER(
+ psutil._psutil_windows
+ )
+ENDIF()
PY_SRCS(
TOP_LEVEL
psutil/__init__.py
psutil/_common.py
psutil/_compat.py
+ psutil/_psaix.py
+ psutil/_psbsd.py
+ psutil/_pslinux.py
+ psutil/_psosx.py
+ psutil/_psposix.py
+ psutil/_pssunos.py
+ psutil/_pswindows.py
)
-IF (OS_LINUX OR OS_DARWIN)
- PY_SRCS(
- TOP_LEVEL
- psutil/_psposix.py
- )
-ENDIF ()
-
-IF (OS_LINUX)
- PY_SRCS(
- TOP_LEVEL
- psutil/_pslinux.py
- )
-ENDIF ()
-
-IF (OS_DARWIN)
- PY_SRCS(
- TOP_LEVEL
- psutil/_psosx.py
- )
-ENDIF ()
-
-IF (OS_WINDOWS)
- PY_SRCS(
- TOP_LEVEL
- psutil/_pswindows.py
- )
-ENDIF ()
-
RESOURCE_FILES(
PREFIX contrib/python/psutil/py2/
.dist-info/METADATA
.dist-info/top_level.txt
)
-NO_LINT()
-
END()
-
-RECURSE_FOR_TESTS(
- test
-)
diff --git a/contrib/python/psutil/py3/psutil/_psaix.py b/contrib/python/psutil/py3/psutil/_psaix.py
new file mode 100644
index 0000000000..7160ecd63a
--- /dev/null
+++ b/contrib/python/psutil/py3/psutil/_psaix.py
@@ -0,0 +1,552 @@
+# Copyright (c) 2009, Giampaolo Rodola'
+# Copyright (c) 2017, Arnon Yaari
+# All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""AIX platform implementation."""
+
+import functools
+import glob
+import os
+import re
+import subprocess
+import sys
+from collections import namedtuple
+
+from . import _common
+from . import _psposix
+from . import _psutil_aix as cext
+from . import _psutil_posix as cext_posix
+from ._common import AccessDenied
+from ._common import conn_to_ntuple
+from ._common import get_procfs_path
+from ._common import memoize_when_activated
+from ._common import NIC_DUPLEX_FULL
+from ._common import NIC_DUPLEX_HALF
+from ._common import NIC_DUPLEX_UNKNOWN
+from ._common import NoSuchProcess
+from ._common import usage_percent
+from ._common import ZombieProcess
+from ._compat import FileNotFoundError
+from ._compat import PermissionError
+from ._compat import ProcessLookupError
+from ._compat import PY3
+
+
+__extra__all__ = ["PROCFS_PATH"]
+
+
+# =====================================================================
+# --- globals
+# =====================================================================
+
+
+HAS_THREADS = hasattr(cext, "proc_threads")
+HAS_NET_IO_COUNTERS = hasattr(cext, "net_io_counters")
+HAS_PROC_IO_COUNTERS = hasattr(cext, "proc_io_counters")
+
+PAGE_SIZE = cext_posix.getpagesize()
+AF_LINK = cext_posix.AF_LINK
+
+PROC_STATUSES = {
+ cext.SIDL: _common.STATUS_IDLE,
+ cext.SZOMB: _common.STATUS_ZOMBIE,
+ cext.SACTIVE: _common.STATUS_RUNNING,
+ cext.SSWAP: _common.STATUS_RUNNING, # TODO what status is this?
+ cext.SSTOP: _common.STATUS_STOPPED,
+}
+
+TCP_STATUSES = {
+ cext.TCPS_ESTABLISHED: _common.CONN_ESTABLISHED,
+ cext.TCPS_SYN_SENT: _common.CONN_SYN_SENT,
+ cext.TCPS_SYN_RCVD: _common.CONN_SYN_RECV,
+ cext.TCPS_FIN_WAIT_1: _common.CONN_FIN_WAIT1,
+ cext.TCPS_FIN_WAIT_2: _common.CONN_FIN_WAIT2,
+ cext.TCPS_TIME_WAIT: _common.CONN_TIME_WAIT,
+ cext.TCPS_CLOSED: _common.CONN_CLOSE,
+ cext.TCPS_CLOSE_WAIT: _common.CONN_CLOSE_WAIT,
+ cext.TCPS_LAST_ACK: _common.CONN_LAST_ACK,
+ cext.TCPS_LISTEN: _common.CONN_LISTEN,
+ cext.TCPS_CLOSING: _common.CONN_CLOSING,
+ cext.PSUTIL_CONN_NONE: _common.CONN_NONE,
+}
+
+proc_info_map = dict(
+ ppid=0,
+ rss=1,
+ vms=2,
+ create_time=3,
+ nice=4,
+ num_threads=5,
+ status=6,
+ ttynr=7)
+
+
+# =====================================================================
+# --- named tuples
+# =====================================================================
+
+
+# psutil.Process.memory_info()
+pmem = namedtuple('pmem', ['rss', 'vms'])
+# psutil.Process.memory_full_info()
+pfullmem = pmem
+# psutil.Process.cpu_times()
+scputimes = namedtuple('scputimes', ['user', 'system', 'idle', 'iowait'])
+# psutil.virtual_memory()
+svmem = namedtuple('svmem', ['total', 'available', 'percent', 'used', 'free'])
+
+
+# =====================================================================
+# --- memory
+# =====================================================================
+
+
+def virtual_memory():
+ total, avail, free, pinned, inuse = cext.virtual_mem()
+ percent = usage_percent((total - avail), total, round_=1)
+ return svmem(total, avail, percent, inuse, free)
+
+
+def swap_memory():
+ """Swap system memory as a (total, used, free, sin, sout) tuple."""
+ total, free, sin, sout = cext.swap_mem()
+ used = total - free
+ percent = usage_percent(used, total, round_=1)
+ return _common.sswap(total, used, free, percent, sin, sout)
+
+
+# =====================================================================
+# --- CPU
+# =====================================================================
+
+
+def cpu_times():
+ """Return system-wide CPU times as a named tuple"""
+ ret = cext.per_cpu_times()
+ return scputimes(*[sum(x) for x in zip(*ret)])
+
+
+def per_cpu_times():
+ """Return system per-CPU times as a list of named tuples"""
+ ret = cext.per_cpu_times()
+ return [scputimes(*x) for x in ret]
+
+
+def cpu_count_logical():
+ """Return the number of logical CPUs in the system."""
+ try:
+ return os.sysconf("SC_NPROCESSORS_ONLN")
+ except ValueError:
+ # mimic os.cpu_count() behavior
+ return None
+
+
+def cpu_count_physical():
+ cmd = "lsdev -Cc processor"
+ p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ stdout, stderr = p.communicate()
+ if PY3:
+ stdout, stderr = [x.decode(sys.stdout.encoding)
+ for x in (stdout, stderr)]
+ if p.returncode != 0:
+ raise RuntimeError("%r command error\n%s" % (cmd, stderr))
+ processors = stdout.strip().splitlines()
+ return len(processors) or None
+
+
+def cpu_stats():
+ """Return various CPU stats as a named tuple."""
+ ctx_switches, interrupts, soft_interrupts, syscalls = cext.cpu_stats()
+ return _common.scpustats(
+ ctx_switches, interrupts, soft_interrupts, syscalls)
+
+
+# =====================================================================
+# --- disks
+# =====================================================================
+
+
+disk_io_counters = cext.disk_io_counters
+disk_usage = _psposix.disk_usage
+
+
+def disk_partitions(all=False):
+ """Return system disk partitions."""
+ # TODO - the filtering logic should be better checked so that
+ # it tries to reflect 'df' as much as possible
+ retlist = []
+ partitions = cext.disk_partitions()
+ for partition in partitions:
+ device, mountpoint, fstype, opts = partition
+ if device == 'none':
+ device = ''
+ if not all:
+ # Differently from, say, Linux, we don't have a list of
+ # common fs types so the best we can do, AFAIK, is to
+ # filter by filesystem having a total size > 0.
+ if not disk_usage(mountpoint).total:
+ continue
+ maxfile = maxpath = None # set later
+ ntuple = _common.sdiskpart(device, mountpoint, fstype, opts,
+ maxfile, maxpath)
+ retlist.append(ntuple)
+ return retlist
+
+
+# =====================================================================
+# --- network
+# =====================================================================
+
+
+net_if_addrs = cext_posix.net_if_addrs
+
+if HAS_NET_IO_COUNTERS:
+ net_io_counters = cext.net_io_counters
+
+
+def net_connections(kind, _pid=-1):
+ """Return socket connections. If pid == -1 return system-wide
+ connections (as opposed to connections opened by one process only).
+ """
+ cmap = _common.conn_tmap
+ if kind not in cmap:
+ raise ValueError("invalid %r kind argument; choose between %s"
+ % (kind, ', '.join([repr(x) for x in cmap])))
+ families, types = _common.conn_tmap[kind]
+ rawlist = cext.net_connections(_pid)
+ ret = []
+ for item in rawlist:
+ fd, fam, type_, laddr, raddr, status, pid = item
+ if fam not in families:
+ continue
+ if type_ not in types:
+ continue
+ nt = conn_to_ntuple(fd, fam, type_, laddr, raddr, status,
+ TCP_STATUSES, pid=pid if _pid == -1 else None)
+ ret.append(nt)
+ return ret
+
+
+def net_if_stats():
+ """Get NIC stats (isup, duplex, speed, mtu)."""
+ duplex_map = {"Full": NIC_DUPLEX_FULL,
+ "Half": NIC_DUPLEX_HALF}
+ names = set([x[0] for x in net_if_addrs()])
+ ret = {}
+ for name in names:
+ isup, mtu = cext.net_if_stats(name)
+
+ # try to get speed and duplex
+ # TODO: rewrite this in C (entstat forks, so use truss -f to follow.
+ # looks like it is using an undocumented ioctl?)
+ duplex = ""
+ speed = 0
+ p = subprocess.Popen(["/usr/bin/entstat", "-d", name],
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ stdout, stderr = p.communicate()
+ if PY3:
+ stdout, stderr = [x.decode(sys.stdout.encoding)
+ for x in (stdout, stderr)]
+ if p.returncode == 0:
+ re_result = re.search(
+ r"Running: (\d+) Mbps.*?(\w+) Duplex", stdout)
+ if re_result is not None:
+ speed = int(re_result.group(1))
+ duplex = re_result.group(2)
+
+ duplex = duplex_map.get(duplex, NIC_DUPLEX_UNKNOWN)
+ ret[name] = _common.snicstats(isup, duplex, speed, mtu)
+ return ret
+
+
+# =====================================================================
+# --- other system functions
+# =====================================================================
+
+
+def boot_time():
+ """The system boot time expressed in seconds since the epoch."""
+ return cext.boot_time()
+
+
+def users():
+ """Return currently connected users as a list of namedtuples."""
+ retlist = []
+ rawlist = cext.users()
+ localhost = (':0.0', ':0')
+ for item in rawlist:
+ user, tty, hostname, tstamp, user_process, pid = item
+ # note: the underlying C function includes entries about
+ # system boot, run level and others. We might want
+ # to use them in the future.
+ if not user_process:
+ continue
+ if hostname in localhost:
+ hostname = 'localhost'
+ nt = _common.suser(user, tty, hostname, tstamp, pid)
+ retlist.append(nt)
+ return retlist
+
+
+# =====================================================================
+# --- processes
+# =====================================================================
+
+
+def pids():
+ """Returns a list of PIDs currently running on the system."""
+ return [int(x) for x in os.listdir(get_procfs_path()) if x.isdigit()]
+
+
+def pid_exists(pid):
+ """Check for the existence of a unix pid."""
+ return os.path.exists(os.path.join(get_procfs_path(), str(pid), "psinfo"))
+
+
+def wrap_exceptions(fun):
+ """Call callable into a try/except clause and translate ENOENT,
+ EACCES and EPERM in NoSuchProcess or AccessDenied exceptions.
+ """
+ @functools.wraps(fun)
+ def wrapper(self, *args, **kwargs):
+ try:
+ return fun(self, *args, **kwargs)
+ except (FileNotFoundError, ProcessLookupError):
+ # ENOENT (no such file or directory) gets raised on open().
+ # ESRCH (no such process) can get raised on read() if
+ # process is gone in meantime.
+ if not pid_exists(self.pid):
+ raise NoSuchProcess(self.pid, self._name)
+ else:
+ raise ZombieProcess(self.pid, self._name, self._ppid)
+ except PermissionError:
+ raise AccessDenied(self.pid, self._name)
+ return wrapper
+
+
+class Process(object):
+ """Wrapper class around underlying C implementation."""
+
+ __slots__ = ["pid", "_name", "_ppid", "_procfs_path", "_cache"]
+
+ def __init__(self, pid):
+ self.pid = pid
+ self._name = None
+ self._ppid = None
+ self._procfs_path = get_procfs_path()
+
+ def oneshot_enter(self):
+ self._proc_basic_info.cache_activate(self)
+ self._proc_cred.cache_activate(self)
+
+ def oneshot_exit(self):
+ self._proc_basic_info.cache_deactivate(self)
+ self._proc_cred.cache_deactivate(self)
+
+ @wrap_exceptions
+ @memoize_when_activated
+ def _proc_basic_info(self):
+ return cext.proc_basic_info(self.pid, self._procfs_path)
+
+ @wrap_exceptions
+ @memoize_when_activated
+ def _proc_cred(self):
+ return cext.proc_cred(self.pid, self._procfs_path)
+
+ @wrap_exceptions
+ def name(self):
+ if self.pid == 0:
+ return "swapper"
+ # note: max 16 characters
+ return cext.proc_name(self.pid, self._procfs_path).rstrip("\x00")
+
+ @wrap_exceptions
+ def exe(self):
+ # there is no way to get executable path in AIX other than to guess,
+ # and guessing is more complex than what's in the wrapping class
+ cmdline = self.cmdline()
+ if not cmdline:
+ return ''
+ exe = cmdline[0]
+ if os.path.sep in exe:
+ # relative or absolute path
+ if not os.path.isabs(exe):
+ # if cwd has changed, we're out of luck - this may be wrong!
+ exe = os.path.abspath(os.path.join(self.cwd(), exe))
+ if (os.path.isabs(exe) and
+ os.path.isfile(exe) and
+ os.access(exe, os.X_OK)):
+ return exe
+ # not found, move to search in PATH using basename only
+ exe = os.path.basename(exe)
+ # search for exe name PATH
+ for path in os.environ["PATH"].split(":"):
+ possible_exe = os.path.abspath(os.path.join(path, exe))
+ if (os.path.isfile(possible_exe) and
+ os.access(possible_exe, os.X_OK)):
+ return possible_exe
+ return ''
+
+ @wrap_exceptions
+ def cmdline(self):
+ return cext.proc_args(self.pid)
+
+ @wrap_exceptions
+ def environ(self):
+ return cext.proc_environ(self.pid)
+
+ @wrap_exceptions
+ def create_time(self):
+ return self._proc_basic_info()[proc_info_map['create_time']]
+
+ @wrap_exceptions
+ def num_threads(self):
+ return self._proc_basic_info()[proc_info_map['num_threads']]
+
+ if HAS_THREADS:
+ @wrap_exceptions
+ def threads(self):
+ rawlist = cext.proc_threads(self.pid)
+ retlist = []
+ for thread_id, utime, stime in rawlist:
+ ntuple = _common.pthread(thread_id, utime, stime)
+ retlist.append(ntuple)
+ # The underlying C implementation retrieves all OS threads
+ # and filters them by PID. At this point we can't tell whether
+ # an empty list means there were no connections for process or
+ # process is no longer active so we force NSP in case the PID
+ # is no longer there.
+ if not retlist:
+ # will raise NSP if process is gone
+ os.stat('%s/%s' % (self._procfs_path, self.pid))
+ return retlist
+
+ @wrap_exceptions
+ def connections(self, kind='inet'):
+ ret = net_connections(kind, _pid=self.pid)
+ # The underlying C implementation retrieves all OS connections
+ # and filters them by PID. At this point we can't tell whether
+ # an empty list means there were no connections for process or
+ # process is no longer active so we force NSP in case the PID
+ # is no longer there.
+ if not ret:
+ # will raise NSP if process is gone
+ os.stat('%s/%s' % (self._procfs_path, self.pid))
+ return ret
+
+ @wrap_exceptions
+ def nice_get(self):
+ return cext_posix.getpriority(self.pid)
+
+ @wrap_exceptions
+ def nice_set(self, value):
+ return cext_posix.setpriority(self.pid, value)
+
+ @wrap_exceptions
+ def ppid(self):
+ self._ppid = self._proc_basic_info()[proc_info_map['ppid']]
+ return self._ppid
+
+ @wrap_exceptions
+ def uids(self):
+ real, effective, saved, _, _, _ = self._proc_cred()
+ return _common.puids(real, effective, saved)
+
+ @wrap_exceptions
+ def gids(self):
+ _, _, _, real, effective, saved = self._proc_cred()
+ return _common.puids(real, effective, saved)
+
+ @wrap_exceptions
+ def cpu_times(self):
+ cpu_times = cext.proc_cpu_times(self.pid, self._procfs_path)
+ return _common.pcputimes(*cpu_times)
+
+ @wrap_exceptions
+ def terminal(self):
+ ttydev = self._proc_basic_info()[proc_info_map['ttynr']]
+ # convert from 64-bit dev_t to 32-bit dev_t and then map the device
+ ttydev = (((ttydev & 0x0000FFFF00000000) >> 16) | (ttydev & 0xFFFF))
+ # try to match rdev of /dev/pts/* files ttydev
+ for dev in glob.glob("/dev/**/*"):
+ if os.stat(dev).st_rdev == ttydev:
+ return dev
+ return None
+
+ @wrap_exceptions
+ def cwd(self):
+ procfs_path = self._procfs_path
+ try:
+ result = os.readlink("%s/%s/cwd" % (procfs_path, self.pid))
+ return result.rstrip('/')
+ except FileNotFoundError:
+ os.stat("%s/%s" % (procfs_path, self.pid)) # raise NSP or AD
+ return None
+
+ @wrap_exceptions
+ def memory_info(self):
+ ret = self._proc_basic_info()
+ rss = ret[proc_info_map['rss']] * 1024
+ vms = ret[proc_info_map['vms']] * 1024
+ return pmem(rss, vms)
+
+ memory_full_info = memory_info
+
+ @wrap_exceptions
+ def status(self):
+ code = self._proc_basic_info()[proc_info_map['status']]
+ # XXX is '?' legit? (we're not supposed to return it anyway)
+ return PROC_STATUSES.get(code, '?')
+
+ def open_files(self):
+ # TODO rewrite without using procfiles (stat /proc/pid/fd/* and then
+ # find matching name of the inode)
+ p = subprocess.Popen(["/usr/bin/procfiles", "-n", str(self.pid)],
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ stdout, stderr = p.communicate()
+ if PY3:
+ stdout, stderr = [x.decode(sys.stdout.encoding)
+ for x in (stdout, stderr)]
+ if "no such process" in stderr.lower():
+ raise NoSuchProcess(self.pid, self._name)
+ procfiles = re.findall(r"(\d+): S_IFREG.*\s*.*name:(.*)\n", stdout)
+ retlist = []
+ for fd, path in procfiles:
+ path = path.strip()
+ if path.startswith("//"):
+ path = path[1:]
+ if path.lower() == "cannot be retrieved":
+ continue
+ retlist.append(_common.popenfile(path, int(fd)))
+ return retlist
+
+ @wrap_exceptions
+ def num_fds(self):
+ if self.pid == 0: # no /proc/0/fd
+ return 0
+ return len(os.listdir("%s/%s/fd" % (self._procfs_path, self.pid)))
+
+ @wrap_exceptions
+ def num_ctx_switches(self):
+ return _common.pctxsw(
+ *cext.proc_num_ctx_switches(self.pid))
+
+ @wrap_exceptions
+ def wait(self, timeout=None):
+ return _psposix.wait_pid(self.pid, timeout, self._name)
+
+ if HAS_PROC_IO_COUNTERS:
+ @wrap_exceptions
+ def io_counters(self):
+ try:
+ rc, wc, rb, wb = cext.proc_io_counters(self.pid)
+ except OSError:
+ # if process is terminated, proc_io_counters returns OSError
+ # instead of NSP
+ if not pid_exists(self.pid):
+ raise NoSuchProcess(self.pid, self._name)
+ raise
+ return _common.pio(rc, wc, rb, wb)
diff --git a/contrib/python/psutil/py3/psutil/_psbsd.py b/contrib/python/psutil/py3/psutil/_psbsd.py
new file mode 100644
index 0000000000..764463e980
--- /dev/null
+++ b/contrib/python/psutil/py3/psutil/_psbsd.py
@@ -0,0 +1,917 @@
+# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""FreeBSD, OpenBSD and NetBSD platforms implementation."""
+
+import contextlib
+import errno
+import functools
+import os
+import xml.etree.ElementTree as ET
+from collections import namedtuple
+from collections import defaultdict
+
+from . import _common
+from . import _psposix
+from . import _psutil_bsd as cext
+from . import _psutil_posix as cext_posix
+from ._common import AccessDenied
+from ._common import conn_tmap
+from ._common import conn_to_ntuple
+from ._common import FREEBSD
+from ._common import memoize
+from ._common import memoize_when_activated
+from ._common import NETBSD
+from ._common import NoSuchProcess
+from ._common import OPENBSD
+from ._common import usage_percent
+from ._common import ZombieProcess
+from ._compat import FileNotFoundError
+from ._compat import PermissionError
+from ._compat import ProcessLookupError
+from ._compat import which
+
+
+__extra__all__ = []
+
+
+# =====================================================================
+# --- globals
+# =====================================================================
+
+
+if FREEBSD:
+ PROC_STATUSES = {
+ cext.SIDL: _common.STATUS_IDLE,
+ cext.SRUN: _common.STATUS_RUNNING,
+ cext.SSLEEP: _common.STATUS_SLEEPING,
+ cext.SSTOP: _common.STATUS_STOPPED,
+ cext.SZOMB: _common.STATUS_ZOMBIE,
+ cext.SWAIT: _common.STATUS_WAITING,
+ cext.SLOCK: _common.STATUS_LOCKED,
+ }
+elif OPENBSD:
+ PROC_STATUSES = {
+ cext.SIDL: _common.STATUS_IDLE,
+ cext.SSLEEP: _common.STATUS_SLEEPING,
+ cext.SSTOP: _common.STATUS_STOPPED,
+ # According to /usr/include/sys/proc.h SZOMB is unused.
+ # test_zombie_process() shows that SDEAD is the right
+ # equivalent. Also it appears there's no equivalent of
+ # psutil.STATUS_DEAD. SDEAD really means STATUS_ZOMBIE.
+ # cext.SZOMB: _common.STATUS_ZOMBIE,
+ cext.SDEAD: _common.STATUS_ZOMBIE,
+ cext.SZOMB: _common.STATUS_ZOMBIE,
+ # From http://www.eecs.harvard.edu/~margo/cs161/videos/proc.h.txt
+ # OpenBSD has SRUN and SONPROC: SRUN indicates that a process
+ # is runnable but *not* yet running, i.e. is on a run queue.
+ # SONPROC indicates that the process is actually executing on
+ # a CPU, i.e. it is no longer on a run queue.
+ # As such we'll map SRUN to STATUS_WAKING and SONPROC to
+ # STATUS_RUNNING
+ cext.SRUN: _common.STATUS_WAKING,
+ cext.SONPROC: _common.STATUS_RUNNING,
+ }
+elif NETBSD:
+ PROC_STATUSES = {
+ cext.SIDL: _common.STATUS_IDLE,
+ cext.SSLEEP: _common.STATUS_SLEEPING,
+ cext.SSTOP: _common.STATUS_STOPPED,
+ cext.SZOMB: _common.STATUS_ZOMBIE,
+ cext.SRUN: _common.STATUS_WAKING,
+ cext.SONPROC: _common.STATUS_RUNNING,
+ }
+
+TCP_STATUSES = {
+ cext.TCPS_ESTABLISHED: _common.CONN_ESTABLISHED,
+ cext.TCPS_SYN_SENT: _common.CONN_SYN_SENT,
+ cext.TCPS_SYN_RECEIVED: _common.CONN_SYN_RECV,
+ cext.TCPS_FIN_WAIT_1: _common.CONN_FIN_WAIT1,
+ cext.TCPS_FIN_WAIT_2: _common.CONN_FIN_WAIT2,
+ cext.TCPS_TIME_WAIT: _common.CONN_TIME_WAIT,
+ cext.TCPS_CLOSED: _common.CONN_CLOSE,
+ cext.TCPS_CLOSE_WAIT: _common.CONN_CLOSE_WAIT,
+ cext.TCPS_LAST_ACK: _common.CONN_LAST_ACK,
+ cext.TCPS_LISTEN: _common.CONN_LISTEN,
+ cext.TCPS_CLOSING: _common.CONN_CLOSING,
+ cext.PSUTIL_CONN_NONE: _common.CONN_NONE,
+}
+
+PAGESIZE = cext_posix.getpagesize()
+AF_LINK = cext_posix.AF_LINK
+
+HAS_PER_CPU_TIMES = hasattr(cext, "per_cpu_times")
+HAS_PROC_NUM_THREADS = hasattr(cext, "proc_num_threads")
+HAS_PROC_OPEN_FILES = hasattr(cext, 'proc_open_files')
+HAS_PROC_NUM_FDS = hasattr(cext, 'proc_num_fds')
+
+kinfo_proc_map = dict(
+ ppid=0,
+ status=1,
+ real_uid=2,
+ effective_uid=3,
+ saved_uid=4,
+ real_gid=5,
+ effective_gid=6,
+ saved_gid=7,
+ ttynr=8,
+ create_time=9,
+ ctx_switches_vol=10,
+ ctx_switches_unvol=11,
+ read_io_count=12,
+ write_io_count=13,
+ user_time=14,
+ sys_time=15,
+ ch_user_time=16,
+ ch_sys_time=17,
+ rss=18,
+ vms=19,
+ memtext=20,
+ memdata=21,
+ memstack=22,
+ cpunum=23,
+ name=24,
+)
+
+
+# =====================================================================
+# --- named tuples
+# =====================================================================
+
+
+# psutil.virtual_memory()
+svmem = namedtuple(
+ 'svmem', ['total', 'available', 'percent', 'used', 'free',
+ 'active', 'inactive', 'buffers', 'cached', 'shared', 'wired'])
+# psutil.cpu_times()
+scputimes = namedtuple(
+ 'scputimes', ['user', 'nice', 'system', 'idle', 'irq'])
+# psutil.Process.memory_info()
+pmem = namedtuple('pmem', ['rss', 'vms', 'text', 'data', 'stack'])
+# psutil.Process.memory_full_info()
+pfullmem = pmem
+# psutil.Process.cpu_times()
+pcputimes = namedtuple('pcputimes',
+ ['user', 'system', 'children_user', 'children_system'])
+# psutil.Process.memory_maps(grouped=True)
+pmmap_grouped = namedtuple(
+ 'pmmap_grouped', 'path rss, private, ref_count, shadow_count')
+# psutil.Process.memory_maps(grouped=False)
+pmmap_ext = namedtuple(
+ 'pmmap_ext', 'addr, perms path rss, private, ref_count, shadow_count')
+# psutil.disk_io_counters()
+if FREEBSD:
+ sdiskio = namedtuple('sdiskio', ['read_count', 'write_count',
+ 'read_bytes', 'write_bytes',
+ 'read_time', 'write_time',
+ 'busy_time'])
+else:
+ sdiskio = namedtuple('sdiskio', ['read_count', 'write_count',
+ 'read_bytes', 'write_bytes'])
+
+
+# =====================================================================
+# --- memory
+# =====================================================================
+
+
+def virtual_memory():
+ """System virtual memory as a namedtuple."""
+ mem = cext.virtual_mem()
+ total, free, active, inactive, wired, cached, buffers, shared = mem
+ if NETBSD:
+ # On NetBSD buffers and shared mem is determined via /proc.
+ # The C ext set them to 0.
+ with open('/proc/meminfo', 'rb') as f:
+ for line in f:
+ if line.startswith(b'Buffers:'):
+ buffers = int(line.split()[1]) * 1024
+ elif line.startswith(b'MemShared:'):
+ shared = int(line.split()[1]) * 1024
+ avail = inactive + cached + free
+ used = active + wired + cached
+ percent = usage_percent((total - avail), total, round_=1)
+ return svmem(total, avail, percent, used, free,
+ active, inactive, buffers, cached, shared, wired)
+
+
+def swap_memory():
+ """System swap memory as (total, used, free, sin, sout) namedtuple."""
+ total, used, free, sin, sout = cext.swap_mem()
+ percent = usage_percent(used, total, round_=1)
+ return _common.sswap(total, used, free, percent, sin, sout)
+
+
+# =====================================================================
+# --- CPU
+# =====================================================================
+
+
+def cpu_times():
+ """Return system per-CPU times as a namedtuple"""
+ user, nice, system, idle, irq = cext.cpu_times()
+ return scputimes(user, nice, system, idle, irq)
+
+
+if HAS_PER_CPU_TIMES:
+ def per_cpu_times():
+ """Return system CPU times as a namedtuple"""
+ ret = []
+ for cpu_t in cext.per_cpu_times():
+ user, nice, system, idle, irq = cpu_t
+ item = scputimes(user, nice, system, idle, irq)
+ ret.append(item)
+ return ret
+else:
+ # XXX
+ # Ok, this is very dirty.
+ # On FreeBSD < 8 we cannot gather per-cpu information, see:
+ # https://github.com/giampaolo/psutil/issues/226
+ # If num cpus > 1, on first call we return single cpu times to avoid a
+ # crash at psutil import time.
+ # Next calls will fail with NotImplementedError
+ def per_cpu_times():
+ """Return system CPU times as a namedtuple"""
+ if cpu_count_logical() == 1:
+ return [cpu_times()]
+ if per_cpu_times.__called__:
+ raise NotImplementedError("supported only starting from FreeBSD 8")
+ per_cpu_times.__called__ = True
+ return [cpu_times()]
+
+ per_cpu_times.__called__ = False
+
+
+def cpu_count_logical():
+ """Return the number of logical CPUs in the system."""
+ return cext.cpu_count_logical()
+
+
+if OPENBSD or NETBSD:
+ def cpu_count_physical():
+ # OpenBSD and NetBSD do not implement this.
+ return 1 if cpu_count_logical() == 1 else None
+else:
+ def cpu_count_physical():
+ """Return the number of physical CPUs in the system."""
+ # From the C module we'll get an XML string similar to this:
+ # http://manpages.ubuntu.com/manpages/precise/man4/smp.4freebsd.html
+ # We may get None in case "sysctl kern.sched.topology_spec"
+ # is not supported on this BSD version, in which case we'll mimic
+ # os.cpu_count() and return None.
+ ret = None
+ s = cext.cpu_count_phys()
+ if s is not None:
+ # get rid of padding chars appended at the end of the string
+ index = s.rfind("</groups>")
+ if index != -1:
+ s = s[:index + 9]
+ root = ET.fromstring(s)
+ try:
+ ret = len(root.findall('group/children/group/cpu')) or None
+ finally:
+ # needed otherwise it will memleak
+ root.clear()
+ if not ret:
+ # If logical CPUs are 1 it's obvious we'll have only 1
+ # physical CPU.
+ if cpu_count_logical() == 1:
+ return 1
+ return ret
+
+
+def cpu_stats():
+ """Return various CPU stats as a named tuple."""
+ if FREEBSD:
+ # Note: the C ext is returning some metrics we are not exposing:
+ # traps.
+ ctxsw, intrs, soft_intrs, syscalls, traps = cext.cpu_stats()
+ elif NETBSD:
+ # XXX
+ # Note about intrs: the C extension returns 0. intrs
+ # can be determined via /proc/stat; it has the same value as
+ # soft_intrs thought so the kernel is faking it (?).
+ #
+ # Note about syscalls: the C extension always sets it to 0 (?).
+ #
+ # Note: the C ext is returning some metrics we are not exposing:
+ # traps, faults and forks.
+ ctxsw, intrs, soft_intrs, syscalls, traps, faults, forks = \
+ cext.cpu_stats()
+ with open('/proc/stat', 'rb') as f:
+ for line in f:
+ if line.startswith(b'intr'):
+ intrs = int(line.split()[1])
+ elif OPENBSD:
+ # Note: the C ext is returning some metrics we are not exposing:
+ # traps, faults and forks.
+ ctxsw, intrs, soft_intrs, syscalls, traps, faults, forks = \
+ cext.cpu_stats()
+ return _common.scpustats(ctxsw, intrs, soft_intrs, syscalls)
+
+
+# =====================================================================
+# --- disks
+# =====================================================================
+
+
+def disk_partitions(all=False):
+ """Return mounted disk partitions as a list of namedtuples.
+ 'all' argument is ignored, see:
+ https://github.com/giampaolo/psutil/issues/906
+ """
+ retlist = []
+ partitions = cext.disk_partitions()
+ for partition in partitions:
+ device, mountpoint, fstype, opts = partition
+ maxfile = maxpath = None # set later
+ ntuple = _common.sdiskpart(device, mountpoint, fstype, opts,
+ maxfile, maxpath)
+ retlist.append(ntuple)
+ return retlist
+
+
+disk_usage = _psposix.disk_usage
+disk_io_counters = cext.disk_io_counters
+
+
+# =====================================================================
+# --- network
+# =====================================================================
+
+
+net_io_counters = cext.net_io_counters
+net_if_addrs = cext_posix.net_if_addrs
+
+
+def net_if_stats():
+ """Get NIC stats (isup, duplex, speed, mtu)."""
+ names = net_io_counters().keys()
+ ret = {}
+ for name in names:
+ try:
+ mtu = cext_posix.net_if_mtu(name)
+ isup = cext_posix.net_if_is_running(name)
+ duplex, speed = cext_posix.net_if_duplex_speed(name)
+ except OSError as err:
+ # https://github.com/giampaolo/psutil/issues/1279
+ if err.errno != errno.ENODEV:
+ raise
+ else:
+ if hasattr(_common, 'NicDuplex'):
+ duplex = _common.NicDuplex(duplex)
+ ret[name] = _common.snicstats(isup, duplex, speed, mtu)
+ return ret
+
+
+def net_connections(kind):
+ """System-wide network connections."""
+ if OPENBSD:
+ ret = []
+ for pid in pids():
+ try:
+ cons = Process(pid).connections(kind)
+ except (NoSuchProcess, ZombieProcess):
+ continue
+ else:
+ for conn in cons:
+ conn = list(conn)
+ conn.append(pid)
+ ret.append(_common.sconn(*conn))
+ return ret
+
+ if kind not in _common.conn_tmap:
+ raise ValueError("invalid %r kind argument; choose between %s"
+ % (kind, ', '.join([repr(x) for x in conn_tmap])))
+ families, types = conn_tmap[kind]
+ ret = set()
+ if NETBSD:
+ rawlist = cext.net_connections(-1)
+ else:
+ rawlist = cext.net_connections()
+ for item in rawlist:
+ fd, fam, type, laddr, raddr, status, pid = item
+ # TODO: apply filter at C level
+ if fam in families and type in types:
+ nt = conn_to_ntuple(fd, fam, type, laddr, raddr, status,
+ TCP_STATUSES, pid)
+ ret.add(nt)
+ return list(ret)
+
+
+# =====================================================================
+# --- sensors
+# =====================================================================
+
+
+if FREEBSD:
+
+ def sensors_battery():
+ """Return battery info."""
+ try:
+ percent, minsleft, power_plugged = cext.sensors_battery()
+ except NotImplementedError:
+ # See: https://github.com/giampaolo/psutil/issues/1074
+ return None
+ power_plugged = power_plugged == 1
+ if power_plugged:
+ secsleft = _common.POWER_TIME_UNLIMITED
+ elif minsleft == -1:
+ secsleft = _common.POWER_TIME_UNKNOWN
+ else:
+ secsleft = minsleft * 60
+ return _common.sbattery(percent, secsleft, power_plugged)
+
+ def sensors_temperatures():
+ "Return CPU cores temperatures if available, else an empty dict."
+ ret = defaultdict(list)
+ num_cpus = cpu_count_logical()
+ for cpu in range(num_cpus):
+ try:
+ current, high = cext.sensors_cpu_temperature(cpu)
+ if high <= 0:
+ high = None
+ name = "Core %s" % cpu
+ ret["coretemp"].append(
+ _common.shwtemp(name, current, high, high))
+ except NotImplementedError:
+ pass
+
+ return ret
+
+ def cpu_freq():
+ """Return frequency metrics for CPUs. As of Dec 2018 only
+ CPU 0 appears to be supported by FreeBSD and all other cores
+ match the frequency of CPU 0.
+ """
+ ret = []
+ num_cpus = cpu_count_logical()
+ for cpu in range(num_cpus):
+ try:
+ current, available_freq = cext.cpu_frequency(cpu)
+ except NotImplementedError:
+ continue
+ if available_freq:
+ try:
+ min_freq = int(available_freq.split(" ")[-1].split("/")[0])
+ except(IndexError, ValueError):
+ min_freq = None
+ try:
+ max_freq = int(available_freq.split(" ")[0].split("/")[0])
+ except(IndexError, ValueError):
+ max_freq = None
+ ret.append(_common.scpufreq(current, min_freq, max_freq))
+ return ret
+
+
+# =====================================================================
+# --- other system functions
+# =====================================================================
+
+
+def boot_time():
+ """The system boot time expressed in seconds since the epoch."""
+ return cext.boot_time()
+
+
+def users():
+ """Return currently connected users as a list of namedtuples."""
+ retlist = []
+ rawlist = cext.users()
+ for item in rawlist:
+ user, tty, hostname, tstamp, pid = item
+ if pid == -1:
+ assert OPENBSD
+ pid = None
+ if tty == '~':
+ continue # reboot or shutdown
+ nt = _common.suser(user, tty or None, hostname, tstamp, pid)
+ retlist.append(nt)
+ return retlist
+
+
+# =====================================================================
+# --- processes
+# =====================================================================
+
+
+@memoize
+def _pid_0_exists():
+ try:
+ Process(0).name()
+ except NoSuchProcess:
+ return False
+ except AccessDenied:
+ return True
+ else:
+ return True
+
+
+def pids():
+ """Returns a list of PIDs currently running on the system."""
+ ret = cext.pids()
+ if OPENBSD and (0 not in ret) and _pid_0_exists():
+ # On OpenBSD the kernel does not return PID 0 (neither does
+ # ps) but it's actually querable (Process(0) will succeed).
+ ret.insert(0, 0)
+ return ret
+
+
+if OPENBSD or NETBSD:
+ def pid_exists(pid):
+ """Return True if pid exists."""
+ exists = _psposix.pid_exists(pid)
+ if not exists:
+ # We do this because _psposix.pid_exists() lies in case of
+ # zombie processes.
+ return pid in pids()
+ else:
+ return True
+else:
+ pid_exists = _psposix.pid_exists
+
+
+def is_zombie(pid):
+ try:
+ st = cext.proc_oneshot_info(pid)[kinfo_proc_map['status']]
+ return st == cext.SZOMB
+ except Exception:
+ return False
+
+
+def wrap_exceptions(fun):
+ """Decorator which translates bare OSError exceptions into
+ NoSuchProcess and AccessDenied.
+ """
+ @functools.wraps(fun)
+ def wrapper(self, *args, **kwargs):
+ try:
+ return fun(self, *args, **kwargs)
+ except ProcessLookupError:
+ if is_zombie(self.pid):
+ raise ZombieProcess(self.pid, self._name, self._ppid)
+ else:
+ raise NoSuchProcess(self.pid, self._name)
+ except PermissionError:
+ raise AccessDenied(self.pid, self._name)
+ except OSError:
+ if self.pid == 0:
+ if 0 in pids():
+ raise AccessDenied(self.pid, self._name)
+ else:
+ raise
+ raise
+ return wrapper
+
+
+@contextlib.contextmanager
+def wrap_exceptions_procfs(inst):
+ """Same as above, for routines relying on reading /proc fs."""
+ try:
+ yield
+ except (ProcessLookupError, FileNotFoundError):
+ # ENOENT (no such file or directory) gets raised on open().
+ # ESRCH (no such process) can get raised on read() if
+ # process is gone in meantime.
+ if is_zombie(inst.pid):
+ raise ZombieProcess(inst.pid, inst._name, inst._ppid)
+ else:
+ raise NoSuchProcess(inst.pid, inst._name)
+ except PermissionError:
+ raise AccessDenied(inst.pid, inst._name)
+
+
+class Process(object):
+ """Wrapper class around underlying C implementation."""
+
+ __slots__ = ["pid", "_name", "_ppid", "_cache"]
+
+ def __init__(self, pid):
+ self.pid = pid
+ self._name = None
+ self._ppid = None
+
+ def _assert_alive(self):
+ """Raise NSP if the process disappeared on us."""
+ # For those C function who do not raise NSP, possibly returning
+ # incorrect or incomplete result.
+ cext.proc_name(self.pid)
+
+ @wrap_exceptions
+ @memoize_when_activated
+ def oneshot(self):
+ """Retrieves multiple process info in one shot as a raw tuple."""
+ ret = cext.proc_oneshot_info(self.pid)
+ assert len(ret) == len(kinfo_proc_map)
+ return ret
+
+ def oneshot_enter(self):
+ self.oneshot.cache_activate(self)
+
+ def oneshot_exit(self):
+ self.oneshot.cache_deactivate(self)
+
+ @wrap_exceptions
+ def name(self):
+ name = self.oneshot()[kinfo_proc_map['name']]
+ return name if name is not None else cext.proc_name(self.pid)
+
+ @wrap_exceptions
+ def exe(self):
+ if FREEBSD:
+ if self.pid == 0:
+ return '' # else NSP
+ return cext.proc_exe(self.pid)
+ elif NETBSD:
+ if self.pid == 0:
+ # /proc/0 dir exists but /proc/0/exe doesn't
+ return ""
+ with wrap_exceptions_procfs(self):
+ return os.readlink("/proc/%s/exe" % self.pid)
+ else:
+ # OpenBSD: exe cannot be determined; references:
+ # https://chromium.googlesource.com/chromium/src/base/+/
+ # master/base_paths_posix.cc
+ # We try our best guess by using which against the first
+ # cmdline arg (may return None).
+ cmdline = self.cmdline()
+ if cmdline:
+ return which(cmdline[0]) or ""
+ else:
+ return ""
+
+ @wrap_exceptions
+ def cmdline(self):
+ if OPENBSD and self.pid == 0:
+ return [] # ...else it crashes
+ elif NETBSD:
+ # XXX - most of the times the underlying sysctl() call on Net
+ # and Open BSD returns a truncated string.
+ # Also /proc/pid/cmdline behaves the same so it looks
+ # like this is a kernel bug.
+ try:
+ return cext.proc_cmdline(self.pid)
+ except OSError as err:
+ if err.errno == errno.EINVAL:
+ if is_zombie(self.pid):
+ raise ZombieProcess(self.pid, self._name, self._ppid)
+ elif not pid_exists(self.pid):
+ raise NoSuchProcess(self.pid, self._name, self._ppid)
+ else:
+ # XXX: this happens with unicode tests. It means the C
+ # routine is unable to decode invalid unicode chars.
+ return []
+ else:
+ raise
+ else:
+ return cext.proc_cmdline(self.pid)
+
+ @wrap_exceptions
+ def environ(self):
+ return cext.proc_environ(self.pid)
+
+ @wrap_exceptions
+ def terminal(self):
+ tty_nr = self.oneshot()[kinfo_proc_map['ttynr']]
+ tmap = _psposix.get_terminal_map()
+ try:
+ return tmap[tty_nr]
+ except KeyError:
+ return None
+
+ @wrap_exceptions
+ def ppid(self):
+ self._ppid = self.oneshot()[kinfo_proc_map['ppid']]
+ return self._ppid
+
+ @wrap_exceptions
+ def uids(self):
+ rawtuple = self.oneshot()
+ return _common.puids(
+ rawtuple[kinfo_proc_map['real_uid']],
+ rawtuple[kinfo_proc_map['effective_uid']],
+ rawtuple[kinfo_proc_map['saved_uid']])
+
+ @wrap_exceptions
+ def gids(self):
+ rawtuple = self.oneshot()
+ return _common.pgids(
+ rawtuple[kinfo_proc_map['real_gid']],
+ rawtuple[kinfo_proc_map['effective_gid']],
+ rawtuple[kinfo_proc_map['saved_gid']])
+
+ @wrap_exceptions
+ def cpu_times(self):
+ rawtuple = self.oneshot()
+ return _common.pcputimes(
+ rawtuple[kinfo_proc_map['user_time']],
+ rawtuple[kinfo_proc_map['sys_time']],
+ rawtuple[kinfo_proc_map['ch_user_time']],
+ rawtuple[kinfo_proc_map['ch_sys_time']])
+
+ if FREEBSD:
+ @wrap_exceptions
+ def cpu_num(self):
+ return self.oneshot()[kinfo_proc_map['cpunum']]
+
+ @wrap_exceptions
+ def memory_info(self):
+ rawtuple = self.oneshot()
+ return pmem(
+ rawtuple[kinfo_proc_map['rss']],
+ rawtuple[kinfo_proc_map['vms']],
+ rawtuple[kinfo_proc_map['memtext']],
+ rawtuple[kinfo_proc_map['memdata']],
+ rawtuple[kinfo_proc_map['memstack']])
+
+ memory_full_info = memory_info
+
+ @wrap_exceptions
+ def create_time(self):
+ return self.oneshot()[kinfo_proc_map['create_time']]
+
+ @wrap_exceptions
+ def num_threads(self):
+ if HAS_PROC_NUM_THREADS:
+ # FreeBSD
+ return cext.proc_num_threads(self.pid)
+ else:
+ return len(self.threads())
+
+ @wrap_exceptions
+ def num_ctx_switches(self):
+ rawtuple = self.oneshot()
+ return _common.pctxsw(
+ rawtuple[kinfo_proc_map['ctx_switches_vol']],
+ rawtuple[kinfo_proc_map['ctx_switches_unvol']])
+
+ @wrap_exceptions
+ def threads(self):
+ # Note: on OpenSBD this (/dev/mem) requires root access.
+ rawlist = cext.proc_threads(self.pid)
+ retlist = []
+ for thread_id, utime, stime in rawlist:
+ ntuple = _common.pthread(thread_id, utime, stime)
+ retlist.append(ntuple)
+ if OPENBSD:
+ self._assert_alive()
+ return retlist
+
+ @wrap_exceptions
+ def connections(self, kind='inet'):
+ if kind not in conn_tmap:
+ raise ValueError("invalid %r kind argument; choose between %s"
+ % (kind, ', '.join([repr(x) for x in conn_tmap])))
+
+ if NETBSD:
+ families, types = conn_tmap[kind]
+ ret = []
+ rawlist = cext.net_connections(self.pid)
+ for item in rawlist:
+ fd, fam, type, laddr, raddr, status, pid = item
+ assert pid == self.pid
+ if fam in families and type in types:
+ nt = conn_to_ntuple(fd, fam, type, laddr, raddr, status,
+ TCP_STATUSES)
+ ret.append(nt)
+ self._assert_alive()
+ return list(ret)
+
+ families, types = conn_tmap[kind]
+ rawlist = cext.proc_connections(self.pid, families, types)
+ ret = []
+ for item in rawlist:
+ fd, fam, type, laddr, raddr, status = item
+ nt = conn_to_ntuple(fd, fam, type, laddr, raddr, status,
+ TCP_STATUSES)
+ ret.append(nt)
+
+ if OPENBSD:
+ self._assert_alive()
+
+ return ret
+
+ @wrap_exceptions
+ def wait(self, timeout=None):
+ return _psposix.wait_pid(self.pid, timeout, self._name)
+
+ @wrap_exceptions
+ def nice_get(self):
+ return cext_posix.getpriority(self.pid)
+
+ @wrap_exceptions
+ def nice_set(self, value):
+ return cext_posix.setpriority(self.pid, value)
+
+ @wrap_exceptions
+ def status(self):
+ code = self.oneshot()[kinfo_proc_map['status']]
+ # XXX is '?' legit? (we're not supposed to return it anyway)
+ return PROC_STATUSES.get(code, '?')
+
+ @wrap_exceptions
+ def io_counters(self):
+ rawtuple = self.oneshot()
+ return _common.pio(
+ rawtuple[kinfo_proc_map['read_io_count']],
+ rawtuple[kinfo_proc_map['write_io_count']],
+ -1,
+ -1)
+
+ @wrap_exceptions
+ def cwd(self):
+ """Return process current working directory."""
+ # sometimes we get an empty string, in which case we turn
+ # it into None
+ if OPENBSD and self.pid == 0:
+ return None # ...else it would raise EINVAL
+ elif NETBSD or HAS_PROC_OPEN_FILES:
+ # FreeBSD < 8 does not support functions based on
+ # kinfo_getfile() and kinfo_getvmmap()
+ return cext.proc_cwd(self.pid) or None
+ else:
+ raise NotImplementedError(
+ "supported only starting from FreeBSD 8" if
+ FREEBSD else "")
+
+ nt_mmap_grouped = namedtuple(
+ 'mmap', 'path rss, private, ref_count, shadow_count')
+ nt_mmap_ext = namedtuple(
+ 'mmap', 'addr, perms path rss, private, ref_count, shadow_count')
+
+ def _not_implemented(self):
+ raise NotImplementedError
+
+ # FreeBSD < 8 does not support functions based on kinfo_getfile()
+ # and kinfo_getvmmap()
+ if HAS_PROC_OPEN_FILES:
+ @wrap_exceptions
+ def open_files(self):
+ """Return files opened by process as a list of namedtuples."""
+ rawlist = cext.proc_open_files(self.pid)
+ return [_common.popenfile(path, fd) for path, fd in rawlist]
+ else:
+ open_files = _not_implemented
+
+ # FreeBSD < 8 does not support functions based on kinfo_getfile()
+ # and kinfo_getvmmap()
+ if HAS_PROC_NUM_FDS:
+ @wrap_exceptions
+ def num_fds(self):
+ """Return the number of file descriptors opened by this process."""
+ ret = cext.proc_num_fds(self.pid)
+ if NETBSD:
+ self._assert_alive()
+ return ret
+ else:
+ num_fds = _not_implemented
+
+ # --- FreeBSD only APIs
+
+ if FREEBSD:
+
+ @wrap_exceptions
+ def cpu_affinity_get(self):
+ return cext.proc_cpu_affinity_get(self.pid)
+
+ @wrap_exceptions
+ def cpu_affinity_set(self, cpus):
+ # Pre-emptively check if CPUs are valid because the C
+ # function has a weird behavior in case of invalid CPUs,
+ # see: https://github.com/giampaolo/psutil/issues/586
+ allcpus = tuple(range(len(per_cpu_times())))
+ for cpu in cpus:
+ if cpu not in allcpus:
+ raise ValueError("invalid CPU #%i (choose between %s)"
+ % (cpu, allcpus))
+ try:
+ cext.proc_cpu_affinity_set(self.pid, cpus)
+ except OSError as err:
+ # 'man cpuset_setaffinity' about EDEADLK:
+ # <<the call would leave a thread without a valid CPU to run
+ # on because the set does not overlap with the thread's
+ # anonymous mask>>
+ if err.errno in (errno.EINVAL, errno.EDEADLK):
+ for cpu in cpus:
+ if cpu not in allcpus:
+ raise ValueError(
+ "invalid CPU #%i (choose between %s)" % (
+ cpu, allcpus))
+ raise
+
+ @wrap_exceptions
+ def memory_maps(self):
+ return cext.proc_memory_maps(self.pid)
+
+ @wrap_exceptions
+ def rlimit(self, resource, limits=None):
+ if limits is None:
+ return cext.proc_getrlimit(self.pid, resource)
+ else:
+ if len(limits) != 2:
+ raise ValueError(
+ "second argument must be a (soft, hard) tuple, "
+ "got %s" % repr(limits))
+ soft, hard = limits
+ return cext.proc_setrlimit(self.pid, resource, soft, hard)
diff --git a/contrib/python/psutil/py3/psutil/_pssunos.py b/contrib/python/psutil/py3/psutil/_pssunos.py
new file mode 100644
index 0000000000..5618bd4460
--- /dev/null
+++ b/contrib/python/psutil/py3/psutil/_pssunos.py
@@ -0,0 +1,727 @@
+# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Sun OS Solaris platform implementation."""
+
+import errno
+import functools
+import os
+import socket
+import subprocess
+import sys
+from collections import namedtuple
+from socket import AF_INET
+
+from . import _common
+from . import _psposix
+from . import _psutil_posix as cext_posix
+from . import _psutil_sunos as cext
+from ._common import AccessDenied
+from ._common import AF_INET6
+from ._common import debug
+from ._common import get_procfs_path
+from ._common import isfile_strict
+from ._common import memoize_when_activated
+from ._common import NoSuchProcess
+from ._common import sockfam_to_enum
+from ._common import socktype_to_enum
+from ._common import usage_percent
+from ._common import ZombieProcess
+from ._compat import b
+from ._compat import FileNotFoundError
+from ._compat import PermissionError
+from ._compat import ProcessLookupError
+from ._compat import PY3
+
+
+__extra__all__ = ["CONN_IDLE", "CONN_BOUND", "PROCFS_PATH"]
+
+
+# =====================================================================
+# --- globals
+# =====================================================================
+
+
+PAGE_SIZE = cext_posix.getpagesize()
+AF_LINK = cext_posix.AF_LINK
+IS_64_BIT = sys.maxsize > 2**32
+
+CONN_IDLE = "IDLE"
+CONN_BOUND = "BOUND"
+
+PROC_STATUSES = {
+ cext.SSLEEP: _common.STATUS_SLEEPING,
+ cext.SRUN: _common.STATUS_RUNNING,
+ cext.SZOMB: _common.STATUS_ZOMBIE,
+ cext.SSTOP: _common.STATUS_STOPPED,
+ cext.SIDL: _common.STATUS_IDLE,
+ cext.SONPROC: _common.STATUS_RUNNING, # same as run
+ cext.SWAIT: _common.STATUS_WAITING,
+}
+
+TCP_STATUSES = {
+ cext.TCPS_ESTABLISHED: _common.CONN_ESTABLISHED,
+ cext.TCPS_SYN_SENT: _common.CONN_SYN_SENT,
+ cext.TCPS_SYN_RCVD: _common.CONN_SYN_RECV,
+ cext.TCPS_FIN_WAIT_1: _common.CONN_FIN_WAIT1,
+ cext.TCPS_FIN_WAIT_2: _common.CONN_FIN_WAIT2,
+ cext.TCPS_TIME_WAIT: _common.CONN_TIME_WAIT,
+ cext.TCPS_CLOSED: _common.CONN_CLOSE,
+ cext.TCPS_CLOSE_WAIT: _common.CONN_CLOSE_WAIT,
+ cext.TCPS_LAST_ACK: _common.CONN_LAST_ACK,
+ cext.TCPS_LISTEN: _common.CONN_LISTEN,
+ cext.TCPS_CLOSING: _common.CONN_CLOSING,
+ cext.PSUTIL_CONN_NONE: _common.CONN_NONE,
+ cext.TCPS_IDLE: CONN_IDLE, # sunos specific
+ cext.TCPS_BOUND: CONN_BOUND, # sunos specific
+}
+
+proc_info_map = dict(
+ ppid=0,
+ rss=1,
+ vms=2,
+ create_time=3,
+ nice=4,
+ num_threads=5,
+ status=6,
+ ttynr=7,
+ uid=8,
+ euid=9,
+ gid=10,
+ egid=11)
+
+
+# =====================================================================
+# --- named tuples
+# =====================================================================
+
+
+# psutil.cpu_times()
+scputimes = namedtuple('scputimes', ['user', 'system', 'idle', 'iowait'])
+# psutil.cpu_times(percpu=True)
+pcputimes = namedtuple('pcputimes',
+ ['user', 'system', 'children_user', 'children_system'])
+# psutil.virtual_memory()
+svmem = namedtuple('svmem', ['total', 'available', 'percent', 'used', 'free'])
+# psutil.Process.memory_info()
+pmem = namedtuple('pmem', ['rss', 'vms'])
+pfullmem = pmem
+# psutil.Process.memory_maps(grouped=True)
+pmmap_grouped = namedtuple('pmmap_grouped',
+ ['path', 'rss', 'anonymous', 'locked'])
+# psutil.Process.memory_maps(grouped=False)
+pmmap_ext = namedtuple(
+ 'pmmap_ext', 'addr perms ' + ' '.join(pmmap_grouped._fields))
+
+
+# =====================================================================
+# --- memory
+# =====================================================================
+
+
+def virtual_memory():
+ """Report virtual memory metrics."""
+ # we could have done this with kstat, but IMHO this is good enough
+ total = os.sysconf('SC_PHYS_PAGES') * PAGE_SIZE
+ # note: there's no difference on Solaris
+ free = avail = os.sysconf('SC_AVPHYS_PAGES') * PAGE_SIZE
+ used = total - free
+ percent = usage_percent(used, total, round_=1)
+ return svmem(total, avail, percent, used, free)
+
+
+def swap_memory():
+ """Report swap memory metrics."""
+ sin, sout = cext.swap_mem()
+ # XXX
+ # we are supposed to get total/free by doing so:
+ # http://cvs.opensolaris.org/source/xref/onnv/onnv-gate/
+ # usr/src/cmd/swap/swap.c
+ # ...nevertheless I can't manage to obtain the same numbers as 'swap'
+ # cmdline utility, so let's parse its output (sigh!)
+ p = subprocess.Popen(['/usr/bin/env', 'PATH=/usr/sbin:/sbin:%s' %
+ os.environ['PATH'], 'swap', '-l'],
+ stdout=subprocess.PIPE)
+ stdout, stderr = p.communicate()
+ if PY3:
+ stdout = stdout.decode(sys.stdout.encoding)
+ if p.returncode != 0:
+ raise RuntimeError("'swap -l' failed (retcode=%s)" % p.returncode)
+
+ lines = stdout.strip().split('\n')[1:]
+ if not lines:
+ raise RuntimeError('no swap device(s) configured')
+ total = free = 0
+ for line in lines:
+ line = line.split()
+ t, f = line[3:4]
+ total += int(int(t) * 512)
+ free += int(int(f) * 512)
+ used = total - free
+ percent = usage_percent(used, total, round_=1)
+ return _common.sswap(total, used, free, percent,
+ sin * PAGE_SIZE, sout * PAGE_SIZE)
+
+
+# =====================================================================
+# --- CPU
+# =====================================================================
+
+
+def cpu_times():
+ """Return system-wide CPU times as a named tuple"""
+ ret = cext.per_cpu_times()
+ return scputimes(*[sum(x) for x in zip(*ret)])
+
+
+def per_cpu_times():
+ """Return system per-CPU times as a list of named tuples"""
+ ret = cext.per_cpu_times()
+ return [scputimes(*x) for x in ret]
+
+
+def cpu_count_logical():
+ """Return the number of logical CPUs in the system."""
+ try:
+ return os.sysconf("SC_NPROCESSORS_ONLN")
+ except ValueError:
+ # mimic os.cpu_count() behavior
+ return None
+
+
+def cpu_count_physical():
+ """Return the number of physical CPUs in the system."""
+ return cext.cpu_count_phys()
+
+
+def cpu_stats():
+ """Return various CPU stats as a named tuple."""
+ ctx_switches, interrupts, syscalls, traps = cext.cpu_stats()
+ soft_interrupts = 0
+ return _common.scpustats(ctx_switches, interrupts, soft_interrupts,
+ syscalls)
+
+
+# =====================================================================
+# --- disks
+# =====================================================================
+
+
+disk_io_counters = cext.disk_io_counters
+disk_usage = _psposix.disk_usage
+
+
+def disk_partitions(all=False):
+ """Return system disk partitions."""
+ # TODO - the filtering logic should be better checked so that
+ # it tries to reflect 'df' as much as possible
+ retlist = []
+ partitions = cext.disk_partitions()
+ for partition in partitions:
+ device, mountpoint, fstype, opts = partition
+ if device == 'none':
+ device = ''
+ if not all:
+ # Differently from, say, Linux, we don't have a list of
+ # common fs types so the best we can do, AFAIK, is to
+ # filter by filesystem having a total size > 0.
+ try:
+ if not disk_usage(mountpoint).total:
+ continue
+ except OSError as err:
+ # https://github.com/giampaolo/psutil/issues/1674
+ debug("skipping %r: %r" % (mountpoint, err))
+ continue
+ maxfile = maxpath = None # set later
+ ntuple = _common.sdiskpart(device, mountpoint, fstype, opts,
+ maxfile, maxpath)
+ retlist.append(ntuple)
+ return retlist
+
+
+# =====================================================================
+# --- network
+# =====================================================================
+
+
+net_io_counters = cext.net_io_counters
+net_if_addrs = cext_posix.net_if_addrs
+
+
+def net_connections(kind, _pid=-1):
+ """Return socket connections. If pid == -1 return system-wide
+ connections (as opposed to connections opened by one process only).
+ Only INET sockets are returned (UNIX are not).
+ """
+ cmap = _common.conn_tmap.copy()
+ if _pid == -1:
+ cmap.pop('unix', 0)
+ if kind not in cmap:
+ raise ValueError("invalid %r kind argument; choose between %s"
+ % (kind, ', '.join([repr(x) for x in cmap])))
+ families, types = _common.conn_tmap[kind]
+ rawlist = cext.net_connections(_pid)
+ ret = set()
+ for item in rawlist:
+ fd, fam, type_, laddr, raddr, status, pid = item
+ if fam not in families:
+ continue
+ if type_ not in types:
+ continue
+ # TODO: refactor and use _common.conn_to_ntuple.
+ if fam in (AF_INET, AF_INET6):
+ if laddr:
+ laddr = _common.addr(*laddr)
+ if raddr:
+ raddr = _common.addr(*raddr)
+ status = TCP_STATUSES[status]
+ fam = sockfam_to_enum(fam)
+ type_ = socktype_to_enum(type_)
+ if _pid == -1:
+ nt = _common.sconn(fd, fam, type_, laddr, raddr, status, pid)
+ else:
+ nt = _common.pconn(fd, fam, type_, laddr, raddr, status)
+ ret.add(nt)
+ return list(ret)
+
+
+def net_if_stats():
+ """Get NIC stats (isup, duplex, speed, mtu)."""
+ ret = cext.net_if_stats()
+ for name, items in ret.items():
+ isup, duplex, speed, mtu = items
+ if hasattr(_common, 'NicDuplex'):
+ duplex = _common.NicDuplex(duplex)
+ ret[name] = _common.snicstats(isup, duplex, speed, mtu)
+ return ret
+
+
+# =====================================================================
+# --- other system functions
+# =====================================================================
+
+
+def boot_time():
+ """The system boot time expressed in seconds since the epoch."""
+ return cext.boot_time()
+
+
+def users():
+ """Return currently connected users as a list of namedtuples."""
+ retlist = []
+ rawlist = cext.users()
+ localhost = (':0.0', ':0')
+ for item in rawlist:
+ user, tty, hostname, tstamp, user_process, pid = item
+ # note: the underlying C function includes entries about
+ # system boot, run level and others. We might want
+ # to use them in the future.
+ if not user_process:
+ continue
+ if hostname in localhost:
+ hostname = 'localhost'
+ nt = _common.suser(user, tty, hostname, tstamp, pid)
+ retlist.append(nt)
+ return retlist
+
+
+# =====================================================================
+# --- processes
+# =====================================================================
+
+
+def pids():
+ """Returns a list of PIDs currently running on the system."""
+ return [int(x) for x in os.listdir(b(get_procfs_path())) if x.isdigit()]
+
+
+def pid_exists(pid):
+ """Check for the existence of a unix pid."""
+ return _psposix.pid_exists(pid)
+
+
+def wrap_exceptions(fun):
+ """Call callable into a try/except clause and translate ENOENT,
+ EACCES and EPERM in NoSuchProcess or AccessDenied exceptions.
+ """
+ @functools.wraps(fun)
+ def wrapper(self, *args, **kwargs):
+ try:
+ return fun(self, *args, **kwargs)
+ except (FileNotFoundError, ProcessLookupError):
+ # ENOENT (no such file or directory) gets raised on open().
+ # ESRCH (no such process) can get raised on read() if
+ # process is gone in meantime.
+ if not pid_exists(self.pid):
+ raise NoSuchProcess(self.pid, self._name)
+ else:
+ raise ZombieProcess(self.pid, self._name, self._ppid)
+ except PermissionError:
+ raise AccessDenied(self.pid, self._name)
+ except OSError:
+ if self.pid == 0:
+ if 0 in pids():
+ raise AccessDenied(self.pid, self._name)
+ else:
+ raise
+ raise
+ return wrapper
+
+
+class Process(object):
+ """Wrapper class around underlying C implementation."""
+
+ __slots__ = ["pid", "_name", "_ppid", "_procfs_path", "_cache"]
+
+ def __init__(self, pid):
+ self.pid = pid
+ self._name = None
+ self._ppid = None
+ self._procfs_path = get_procfs_path()
+
+ def _assert_alive(self):
+ """Raise NSP if the process disappeared on us."""
+ # For those C function who do not raise NSP, possibly returning
+ # incorrect or incomplete result.
+ os.stat('%s/%s' % (self._procfs_path, self.pid))
+
+ def oneshot_enter(self):
+ self._proc_name_and_args.cache_activate(self)
+ self._proc_basic_info.cache_activate(self)
+ self._proc_cred.cache_activate(self)
+
+ def oneshot_exit(self):
+ self._proc_name_and_args.cache_deactivate(self)
+ self._proc_basic_info.cache_deactivate(self)
+ self._proc_cred.cache_deactivate(self)
+
+ @wrap_exceptions
+ @memoize_when_activated
+ def _proc_name_and_args(self):
+ return cext.proc_name_and_args(self.pid, self._procfs_path)
+
+ @wrap_exceptions
+ @memoize_when_activated
+ def _proc_basic_info(self):
+ if self.pid == 0 and not \
+ os.path.exists('%s/%s/psinfo' % (self._procfs_path, self.pid)):
+ raise AccessDenied(self.pid)
+ ret = cext.proc_basic_info(self.pid, self._procfs_path)
+ assert len(ret) == len(proc_info_map)
+ return ret
+
+ @wrap_exceptions
+ @memoize_when_activated
+ def _proc_cred(self):
+ return cext.proc_cred(self.pid, self._procfs_path)
+
+ @wrap_exceptions
+ def name(self):
+ # note: max len == 15
+ return self._proc_name_and_args()[0]
+
+ @wrap_exceptions
+ def exe(self):
+ try:
+ return os.readlink(
+ "%s/%s/path/a.out" % (self._procfs_path, self.pid))
+ except OSError:
+ pass # continue and guess the exe name from the cmdline
+ # Will be guessed later from cmdline but we want to explicitly
+ # invoke cmdline here in order to get an AccessDenied
+ # exception if the user has not enough privileges.
+ self.cmdline()
+ return ""
+
+ @wrap_exceptions
+ def cmdline(self):
+ return self._proc_name_and_args()[1].split(' ')
+
+ @wrap_exceptions
+ def environ(self):
+ return cext.proc_environ(self.pid, self._procfs_path)
+
+ @wrap_exceptions
+ def create_time(self):
+ return self._proc_basic_info()[proc_info_map['create_time']]
+
+ @wrap_exceptions
+ def num_threads(self):
+ return self._proc_basic_info()[proc_info_map['num_threads']]
+
+ @wrap_exceptions
+ def nice_get(self):
+ # Note #1: getpriority(3) doesn't work for realtime processes.
+ # Psinfo is what ps uses, see:
+ # https://github.com/giampaolo/psutil/issues/1194
+ return self._proc_basic_info()[proc_info_map['nice']]
+
+ @wrap_exceptions
+ def nice_set(self, value):
+ if self.pid in (2, 3):
+ # Special case PIDs: internally setpriority(3) return ESRCH
+ # (no such process), no matter what.
+ # The process actually exists though, as it has a name,
+ # creation time, etc.
+ raise AccessDenied(self.pid, self._name)
+ return cext_posix.setpriority(self.pid, value)
+
+ @wrap_exceptions
+ def ppid(self):
+ self._ppid = self._proc_basic_info()[proc_info_map['ppid']]
+ return self._ppid
+
+ @wrap_exceptions
+ def uids(self):
+ try:
+ real, effective, saved, _, _, _ = self._proc_cred()
+ except AccessDenied:
+ real = self._proc_basic_info()[proc_info_map['uid']]
+ effective = self._proc_basic_info()[proc_info_map['euid']]
+ saved = None
+ return _common.puids(real, effective, saved)
+
+ @wrap_exceptions
+ def gids(self):
+ try:
+ _, _, _, real, effective, saved = self._proc_cred()
+ except AccessDenied:
+ real = self._proc_basic_info()[proc_info_map['gid']]
+ effective = self._proc_basic_info()[proc_info_map['egid']]
+ saved = None
+ return _common.puids(real, effective, saved)
+
+ @wrap_exceptions
+ def cpu_times(self):
+ try:
+ times = cext.proc_cpu_times(self.pid, self._procfs_path)
+ except OSError as err:
+ if err.errno == errno.EOVERFLOW and not IS_64_BIT:
+ # We may get here if we attempt to query a 64bit process
+ # with a 32bit python.
+ # Error originates from read() and also tools like "cat"
+ # fail in the same way (!).
+ # Since there simply is no way to determine CPU times we
+ # return 0.0 as a fallback. See:
+ # https://github.com/giampaolo/psutil/issues/857
+ times = (0.0, 0.0, 0.0, 0.0)
+ else:
+ raise
+ return _common.pcputimes(*times)
+
+ @wrap_exceptions
+ def cpu_num(self):
+ return cext.proc_cpu_num(self.pid, self._procfs_path)
+
+ @wrap_exceptions
+ def terminal(self):
+ procfs_path = self._procfs_path
+ hit_enoent = False
+ tty = wrap_exceptions(
+ self._proc_basic_info()[proc_info_map['ttynr']])
+ if tty != cext.PRNODEV:
+ for x in (0, 1, 2, 255):
+ try:
+ return os.readlink(
+ '%s/%d/path/%d' % (procfs_path, self.pid, x))
+ except FileNotFoundError:
+ hit_enoent = True
+ continue
+ if hit_enoent:
+ self._assert_alive()
+
+ @wrap_exceptions
+ def cwd(self):
+ # /proc/PID/path/cwd may not be resolved by readlink() even if
+ # it exists (ls shows it). If that's the case and the process
+ # is still alive return None (we can return None also on BSD).
+ # Reference: http://goo.gl/55XgO
+ procfs_path = self._procfs_path
+ try:
+ return os.readlink("%s/%s/path/cwd" % (procfs_path, self.pid))
+ except FileNotFoundError:
+ os.stat("%s/%s" % (procfs_path, self.pid)) # raise NSP or AD
+ return None
+
+ @wrap_exceptions
+ def memory_info(self):
+ ret = self._proc_basic_info()
+ rss = ret[proc_info_map['rss']] * 1024
+ vms = ret[proc_info_map['vms']] * 1024
+ return pmem(rss, vms)
+
+ memory_full_info = memory_info
+
+ @wrap_exceptions
+ def status(self):
+ code = self._proc_basic_info()[proc_info_map['status']]
+ # XXX is '?' legit? (we're not supposed to return it anyway)
+ return PROC_STATUSES.get(code, '?')
+
+ @wrap_exceptions
+ def threads(self):
+ procfs_path = self._procfs_path
+ ret = []
+ tids = os.listdir('%s/%d/lwp' % (procfs_path, self.pid))
+ hit_enoent = False
+ for tid in tids:
+ tid = int(tid)
+ try:
+ utime, stime = cext.query_process_thread(
+ self.pid, tid, procfs_path)
+ except EnvironmentError as err:
+ if err.errno == errno.EOVERFLOW and not IS_64_BIT:
+ # We may get here if we attempt to query a 64bit process
+ # with a 32bit python.
+ # Error originates from read() and also tools like "cat"
+ # fail in the same way (!).
+ # Since there simply is no way to determine CPU times we
+ # return 0.0 as a fallback. See:
+ # https://github.com/giampaolo/psutil/issues/857
+ continue
+ # ENOENT == thread gone in meantime
+ if err.errno == errno.ENOENT:
+ hit_enoent = True
+ continue
+ raise
+ else:
+ nt = _common.pthread(tid, utime, stime)
+ ret.append(nt)
+ if hit_enoent:
+ self._assert_alive()
+ return ret
+
+ @wrap_exceptions
+ def open_files(self):
+ retlist = []
+ hit_enoent = False
+ procfs_path = self._procfs_path
+ pathdir = '%s/%d/path' % (procfs_path, self.pid)
+ for fd in os.listdir('%s/%d/fd' % (procfs_path, self.pid)):
+ path = os.path.join(pathdir, fd)
+ if os.path.islink(path):
+ try:
+ file = os.readlink(path)
+ except FileNotFoundError:
+ hit_enoent = True
+ continue
+ else:
+ if isfile_strict(file):
+ retlist.append(_common.popenfile(file, int(fd)))
+ if hit_enoent:
+ self._assert_alive()
+ return retlist
+
+ def _get_unix_sockets(self, pid):
+ """Get UNIX sockets used by process by parsing 'pfiles' output."""
+ # TODO: rewrite this in C (...but the damn netstat source code
+ # does not include this part! Argh!!)
+ cmd = "pfiles %s" % pid
+ p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ stdout, stderr = p.communicate()
+ if PY3:
+ stdout, stderr = [x.decode(sys.stdout.encoding)
+ for x in (stdout, stderr)]
+ if p.returncode != 0:
+ if 'permission denied' in stderr.lower():
+ raise AccessDenied(self.pid, self._name)
+ if 'no such process' in stderr.lower():
+ raise NoSuchProcess(self.pid, self._name)
+ raise RuntimeError("%r command error\n%s" % (cmd, stderr))
+
+ lines = stdout.split('\n')[2:]
+ for i, line in enumerate(lines):
+ line = line.lstrip()
+ if line.startswith('sockname: AF_UNIX'):
+ path = line.split(' ', 2)[2]
+ type = lines[i - 2].strip()
+ if type == 'SOCK_STREAM':
+ type = socket.SOCK_STREAM
+ elif type == 'SOCK_DGRAM':
+ type = socket.SOCK_DGRAM
+ else:
+ type = -1
+ yield (-1, socket.AF_UNIX, type, path, "", _common.CONN_NONE)
+
+ @wrap_exceptions
+ def connections(self, kind='inet'):
+ ret = net_connections(kind, _pid=self.pid)
+ # The underlying C implementation retrieves all OS connections
+ # and filters them by PID. At this point we can't tell whether
+ # an empty list means there were no connections for process or
+ # process is no longer active so we force NSP in case the PID
+ # is no longer there.
+ if not ret:
+ # will raise NSP if process is gone
+ os.stat('%s/%s' % (self._procfs_path, self.pid))
+
+ # UNIX sockets
+ if kind in ('all', 'unix'):
+ ret.extend([_common.pconn(*conn) for conn in
+ self._get_unix_sockets(self.pid)])
+ return ret
+
+ nt_mmap_grouped = namedtuple('mmap', 'path rss anon locked')
+ nt_mmap_ext = namedtuple('mmap', 'addr perms path rss anon locked')
+
+ @wrap_exceptions
+ def memory_maps(self):
+ def toaddr(start, end):
+ return '%s-%s' % (hex(start)[2:].strip('L'),
+ hex(end)[2:].strip('L'))
+
+ procfs_path = self._procfs_path
+ retlist = []
+ try:
+ rawlist = cext.proc_memory_maps(self.pid, procfs_path)
+ except OSError as err:
+ if err.errno == errno.EOVERFLOW and not IS_64_BIT:
+ # We may get here if we attempt to query a 64bit process
+ # with a 32bit python.
+ # Error originates from read() and also tools like "cat"
+ # fail in the same way (!).
+ # Since there simply is no way to determine CPU times we
+ # return 0.0 as a fallback. See:
+ # https://github.com/giampaolo/psutil/issues/857
+ return []
+ else:
+ raise
+ hit_enoent = False
+ for item in rawlist:
+ addr, addrsize, perm, name, rss, anon, locked = item
+ addr = toaddr(addr, addrsize)
+ if not name.startswith('['):
+ try:
+ name = os.readlink(
+ '%s/%s/path/%s' % (procfs_path, self.pid, name))
+ except OSError as err:
+ if err.errno == errno.ENOENT:
+ # sometimes the link may not be resolved by
+ # readlink() even if it exists (ls shows it).
+ # If that's the case we just return the
+ # unresolved link path.
+ # This seems an incosistency with /proc similar
+ # to: http://goo.gl/55XgO
+ name = '%s/%s/path/%s' % (procfs_path, self.pid, name)
+ hit_enoent = True
+ else:
+ raise
+ retlist.append((addr, perm, name, rss, anon, locked))
+ if hit_enoent:
+ self._assert_alive()
+ return retlist
+
+ @wrap_exceptions
+ def num_fds(self):
+ return len(os.listdir("%s/%s/fd" % (self._procfs_path, self.pid)))
+
+ @wrap_exceptions
+ def num_ctx_switches(self):
+ return _common.pctxsw(
+ *cext.proc_num_ctx_switches(self.pid, self._procfs_path))
+
+ @wrap_exceptions
+ def wait(self, timeout=None):
+ return _psposix.wait_pid(self.pid, timeout, self._name)
diff --git a/contrib/python/psutil/py3/psutil/_psutil_posix.c b/contrib/python/psutil/py3/psutil/_psutil_posix.c
index 305cec76d1..3447fc9017 100644
--- a/contrib/python/psutil/py3/psutil/_psutil_posix.c
+++ b/contrib/python/psutil/py3/psutil/_psutil_posix.c
@@ -18,9 +18,9 @@
#include <unistd.h>
#ifdef PSUTIL_SUNOS10
- #include "arch/solaris/v10/ifaddrs.h"
+ #error #include "arch/solaris/v10/ifaddrs.h"
#elif PSUTIL_AIX
- #include "arch/aix/ifaddrs.h"
+ #error #include "arch/aix/ifaddrs.h"
#else
#include <ifaddrs.h>
#endif
diff --git a/contrib/python/psutil/py3/psutil/arch/aix/ifaddrs.h b/contrib/python/psutil/py3/psutil/arch/aix/ifaddrs.h
deleted file mode 100644
index e15802bf7b..0000000000
--- a/contrib/python/psutil/py3/psutil/arch/aix/ifaddrs.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Copyright (c) 2017, Arnon Yaari
- * All rights reserved.
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE file.
- */
-
-/*! Based on code from
- https://lists.samba.org/archive/samba-technical/2009-February/063079.html
-!*/
-
-
-#ifndef GENERIC_AIX_IFADDRS_H
-#define GENERIC_AIX_IFADDRS_H
-
-#include <sys/socket.h>
-#include <net/if.h>
-
-#undef ifa_dstaddr
-#undef ifa_broadaddr
-#define ifa_broadaddr ifa_dstaddr
-
-struct ifaddrs {
- struct ifaddrs *ifa_next;
- char *ifa_name;
- unsigned int ifa_flags;
- struct sockaddr *ifa_addr;
- struct sockaddr *ifa_netmask;
- struct sockaddr *ifa_dstaddr;
-};
-
-extern int getifaddrs(struct ifaddrs **);
-extern void freeifaddrs(struct ifaddrs *);
-#endif
diff --git a/contrib/python/psutil/py3/psutil/arch/osx/ya.make b/contrib/python/psutil/py3/psutil/arch/osx/ya.make
deleted file mode 100644
index 613c49f924..0000000000
--- a/contrib/python/psutil/py3/psutil/arch/osx/ya.make
+++ /dev/null
@@ -1,9 +0,0 @@
-PY23_NATIVE_LIBRARY()
-
-LICENSE(BSD-3-Clause)
-
-SRCS(
- process_info.c
-)
-
-END()
diff --git a/contrib/python/psutil/py3/psutil/arch/solaris/v10/ifaddrs.h b/contrib/python/psutil/py3/psutil/arch/solaris/v10/ifaddrs.h
deleted file mode 100644
index 0953a9b99a..0000000000
--- a/contrib/python/psutil/py3/psutil/arch/solaris/v10/ifaddrs.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/* Reference: https://lists.samba.org/archive/samba-technical/2009-February/063079.html */
-
-
-#ifndef __IFADDRS_H__
-#define __IFADDRS_H__
-
-#include <sys/socket.h>
-#include <net/if.h>
-
-#undef ifa_dstaddr
-#undef ifa_broadaddr
-#define ifa_broadaddr ifa_dstaddr
-
-struct ifaddrs {
- struct ifaddrs *ifa_next;
- char *ifa_name;
- unsigned int ifa_flags;
- struct sockaddr *ifa_addr;
- struct sockaddr *ifa_netmask;
- struct sockaddr *ifa_dstaddr;
-};
-
-extern int getifaddrs(struct ifaddrs **);
-extern void freeifaddrs(struct ifaddrs *);
-
-#endif
diff --git a/contrib/python/psutil/py3/test/test.py b/contrib/python/psutil/py3/test/test.py
deleted file mode 100644
index 4f5a0e50d8..0000000000
--- a/contrib/python/psutil/py3/test/test.py
+++ /dev/null
@@ -1,4 +0,0 @@
-from library.python.import_test import check_imports
-test_imports = lambda: check_imports(no_check=['psutil._ps*'])
-#from psutil._psutil_posix import net_if_addrs
-
diff --git a/contrib/python/psutil/py3/test/ya.make b/contrib/python/psutil/py3/test/ya.make
deleted file mode 100644
index 72d9c04afc..0000000000
--- a/contrib/python/psutil/py3/test/ya.make
+++ /dev/null
@@ -1,8 +0,0 @@
-PY3TEST()
-PEERDIR(
- contrib/python/psutil
- library/python/import_test
-)
-TEST_SRCS(test.py)
-NO_LINT()
-END()
diff --git a/contrib/python/psutil/py3/ya.make b/contrib/python/psutil/py3/ya.make
index d90f7e52f2..16c79983b7 100644
--- a/contrib/python/psutil/py3/ya.make
+++ b/contrib/python/psutil/py3/ya.make
@@ -1,58 +1,75 @@
PY3_LIBRARY()
+VERSION(5.8.0)
+
LICENSE(BSD-3-Clause)
-VERSION(5.8.0)
+NO_COMPILER_WARNINGS()
-NO_UTIL()
+NO_LINT()
-SRCDIR(contrib/python/psutil/py3/psutil)
+NO_CHECK_IMPORTS(
+ psutil._psaix
+ psutil._psbsd
+ psutil._psosx
+ psutil._pssunos
+ psutil._psutil_bsd
+ psutil._psutil_common
+ psutil._psutil_osx
+ psutil._psutil_sunos
+ psutil._psutil_windows
+ psutil._pswindows
+)
-NO_COMPILER_WARNINGS()
+NO_UTIL()
CFLAGS(
-DPSUTIL_VERSION=580
)
-IF (OS_LINUX OR OS_DARWIN)
- CFLAGS(
- -DPSUTIL_POSIX=1
- )
- SRCS(
- _psutil_common.c
- _psutil_posix.c
- )
- PY_REGISTER(psutil._psutil_posix)
-ENDIF ()
+SRCS(
+ psutil/_psutil_common.c
+)
IF (OS_LINUX)
CFLAGS(
+ -DPSUTIL_POSIX=1
-DPSUTIL_LINUX=1
)
SRCS(
- _psutil_linux.c
+ psutil/_psutil_linux.c
+ psutil/_psutil_posix.c
)
- PY_REGISTER(psutil._psutil_linux)
-ENDIF ()
+
+ PY_REGISTER(
+ psutil._psutil_linux
+ psutil._psutil_posix
+ )
+ENDIF()
IF (OS_DARWIN)
CFLAGS(
+ -DPSUTIL_POSIX=1
-DPSUTIL_OSX=1
)
- EXTRALIBS("-framework CoreFoundation -framework IOKit")
-
- PEERDIR(
- contrib/python/psutil/py3/psutil/arch/osx
+ LDFLAGS(
+ -framework CoreFoundation
+ -framework IOKit
)
SRCS(
- _psutil_osx.c
+ psutil/_psutil_osx.c
+ psutil/_psutil_posix.c
+ psutil/arch/osx/process_info.c
)
- PY_REGISTER(psutil._psutil_osx)
-ENDIF ()
+ PY_REGISTER(
+ psutil._psutil_osx
+ psutil._psutil_posix
+ )
+ENDIF()
IF (OS_WINDOWS)
CFLAGS(
@@ -68,80 +85,42 @@ IF (OS_WINDOWS)
)
SRCS(
- _psutil_common.c
- _psutil_windows.c
- arch/windows/cpu.c
- arch/windows/disk.c
- arch/windows/net.c
- arch/windows/process_handles.c
- arch/windows/process_info.c
- arch/windows/process_utils.c
- arch/windows/security.c
- arch/windows/services.c
- arch/windows/socks.c
- arch/windows/wmi.c
+ psutil/_psutil_windows.c
+ psutil/arch/windows/cpu.c
+ psutil/arch/windows/disk.c
+ psutil/arch/windows/net.c
+ psutil/arch/windows/process_handles.c
+ psutil/arch/windows/process_info.c
+ psutil/arch/windows/process_utils.c
+ psutil/arch/windows/security.c
+ psutil/arch/windows/services.c
+ psutil/arch/windows/socks.c
+ psutil/arch/windows/wmi.c
)
- PY_REGISTER(psutil._psutil_windows)
-ENDIF ()
-
-NO_CHECK_IMPORTS(
- psutil._psbsd
- psutil._psosx
- psutil._pssunos
- psutil._psutil_bsd
- psutil._psutil_common
- psutil._psutil_osx
- psutil._psutil_sunos
- psutil._psutil_windows
- psutil._pswindows
-)
+ PY_REGISTER(
+ psutil._psutil_windows
+ )
+ENDIF()
PY_SRCS(
TOP_LEVEL
psutil/__init__.py
psutil/_common.py
psutil/_compat.py
+ psutil/_psaix.py
+ psutil/_psbsd.py
+ psutil/_pslinux.py
+ psutil/_psosx.py
+ psutil/_psposix.py
+ psutil/_pssunos.py
+ psutil/_pswindows.py
)
-IF (OS_LINUX OR OS_DARWIN)
- PY_SRCS(
- TOP_LEVEL
- psutil/_psposix.py
- )
-ENDIF ()
-
-IF (OS_LINUX)
- PY_SRCS(
- TOP_LEVEL
- psutil/_pslinux.py
- )
-ENDIF ()
-
-IF (OS_DARWIN)
- PY_SRCS(
- TOP_LEVEL
- psutil/_psosx.py
- )
-ENDIF ()
-
-IF (OS_WINDOWS)
- PY_SRCS(
- TOP_LEVEL
- psutil/_pswindows.py
- )
-ENDIF ()
-
RESOURCE_FILES(
PREFIX contrib/python/psutil/py3/
.dist-info/METADATA
.dist-info/top_level.txt
)
-NO_LINT()
-
END()
-
-RECURSE_FOR_TESTS(
- test
-)
diff --git a/contrib/python/responses/py3/.dist-info/METADATA b/contrib/python/responses/py3/.dist-info/METADATA
index 6daec58b17..c658b29065 100644
--- a/contrib/python/responses/py3/.dist-info/METADATA
+++ b/contrib/python/responses/py3/.dist-info/METADATA
@@ -1,6 +1,6 @@
Metadata-Version: 2.1
Name: responses
-Version: 0.24.1
+Version: 0.25.0
Summary: A utility library for mocking out the `requests` Python library.
Home-page: https://github.com/getsentry/responses
Author: David Cramer
@@ -19,6 +19,7 @@ Classifier: Programming Language :: Python :: 3.8
Classifier: Programming Language :: Python :: 3.9
Classifier: Programming Language :: Python :: 3.10
Classifier: Programming Language :: Python :: 3.11
+Classifier: Programming Language :: Python :: 3.12
Classifier: Topic :: Software Development
Requires-Python: >=3.8
Description-Content-Type: text/x-rst
@@ -1273,6 +1274,7 @@ If you are using the ``Retry`` features of ``urllib3`` and want to cover scenari
import responses
from responses import registries
+ from urllib3.util import Retry
@responses.activate(registry=registries.OrderedRegistry)
diff --git a/contrib/python/responses/py3/README.rst b/contrib/python/responses/py3/README.rst
index e2096c6cae..9b255076b7 100644
--- a/contrib/python/responses/py3/README.rst
+++ b/contrib/python/responses/py3/README.rst
@@ -1232,6 +1232,7 @@ If you are using the ``Retry`` features of ``urllib3`` and want to cover scenari
import responses
from responses import registries
+ from urllib3.util import Retry
@responses.activate(registry=registries.OrderedRegistry)
diff --git a/contrib/python/responses/py3/responses/matchers.py b/contrib/python/responses/py3/responses/matchers.py
index 78980fe307..20af1be693 100644
--- a/contrib/python/responses/py3/responses/matchers.py
+++ b/contrib/python/responses/py3/responses/matchers.py
@@ -4,8 +4,9 @@ import re
from json.decoder import JSONDecodeError
from typing import Any
from typing import Callable
-from typing import Dict
from typing import List
+from typing import Mapping
+from typing import MutableMapping
from typing import Optional
from typing import Pattern
from typing import Tuple
@@ -17,7 +18,7 @@ from requests import PreparedRequest
from urllib3.util.url import parse_url
-def _create_key_val_str(input_dict: Union[Dict[Any, Any], Any]) -> str:
+def _create_key_val_str(input_dict: Union[Mapping[Any, Any], Any]) -> str:
"""
Returns string of format {'key': val, 'key2': val2}
Function is called recursively for nested dictionaries
@@ -57,8 +58,8 @@ def _create_key_val_str(input_dict: Union[Dict[Any, Any], Any]) -> str:
def _filter_dict_recursively(
- dict1: Dict[Any, Any], dict2: Dict[Any, Any]
-) -> Dict[Any, Any]:
+ dict1: Mapping[Any, Any], dict2: Mapping[Any, Any]
+) -> Mapping[Any, Any]:
filtered_dict = {}
for k, val in dict1.items():
if k in dict2:
@@ -70,7 +71,7 @@ def _filter_dict_recursively(
def urlencoded_params_matcher(
- params: Optional[Dict[str, str]], *, allow_blank: bool = False
+ params: Optional[Mapping[str, str]], *, allow_blank: bool = False
) -> Callable[..., Any]:
"""
Matches URL encoded data
@@ -100,7 +101,7 @@ def urlencoded_params_matcher(
def json_params_matcher(
- params: Optional[Union[Dict[str, Any], List[Any]]], *, strict_match: bool = True
+ params: Optional[Union[Mapping[str, Any], List[Any]]], *, strict_match: bool = True
) -> Callable[..., Any]:
"""Matches JSON encoded data of request body.
@@ -192,7 +193,7 @@ def fragment_identifier_matcher(identifier: Optional[str]) -> Callable[..., Any]
def query_param_matcher(
- params: Optional[Dict[str, Any]], *, strict_match: bool = True
+ params: Optional[MutableMapping[str, Any]], *, strict_match: bool = True
) -> Callable[..., Any]:
"""Matcher to match 'params' argument in request.
@@ -276,7 +277,7 @@ def query_string_matcher(query: Optional[str]) -> Callable[..., Any]:
return match
-def request_kwargs_matcher(kwargs: Optional[Dict[str, Any]]) -> Callable[..., Any]:
+def request_kwargs_matcher(kwargs: Optional[Mapping[str, Any]]) -> Callable[..., Any]:
"""
Matcher to match keyword arguments provided to request
@@ -308,7 +309,7 @@ def request_kwargs_matcher(kwargs: Optional[Dict[str, Any]]) -> Callable[..., An
def multipart_matcher(
- files: Dict[str, Any], data: Optional[Dict[str, str]] = None
+ files: Mapping[str, Any], data: Optional[Mapping[str, str]] = None
) -> Callable[..., Any]:
"""
Matcher to match 'multipart/form-data' content-type.
@@ -392,7 +393,7 @@ def multipart_matcher(
def header_matcher(
- headers: Dict[str, Union[str, Pattern[str]]], strict_match: bool = False
+ headers: Mapping[str, Union[str, Pattern[str]]], strict_match: bool = False
) -> Callable[..., Any]:
"""
Matcher to match 'headers' argument in request using the responses library.
@@ -408,7 +409,7 @@ def header_matcher(
:return: (func) matcher
"""
- def _compare_with_regex(request_headers: Union[Dict[Any, Any], Any]) -> bool:
+ def _compare_with_regex(request_headers: Union[Mapping[Any, Any], Any]) -> bool:
if strict_match and len(request_headers) != len(headers):
return False
@@ -420,13 +421,13 @@ def header_matcher(
else:
if not v == request_headers[k]:
return False
- elif strict_match:
+ else:
return False
return True
def match(request: PreparedRequest) -> Tuple[bool, str]:
- request_headers: Union[Dict[Any, Any], Any] = request.headers or {}
+ request_headers: Union[Mapping[Any, Any], Any] = request.headers or {}
if not strict_match:
# filter down to just the headers specified in the matcher
diff --git a/contrib/python/responses/py3/ya.make b/contrib/python/responses/py3/ya.make
index 4a0969de93..ffa133acdf 100644
--- a/contrib/python/responses/py3/ya.make
+++ b/contrib/python/responses/py3/ya.make
@@ -2,7 +2,7 @@
PY3_LIBRARY()
-VERSION(0.24.1)
+VERSION(0.25.0)
LICENSE(Apache-2.0)
diff --git a/contrib/python/setuptools/py3/.dist-info/METADATA b/contrib/python/setuptools/py3/.dist-info/METADATA
index a12855b091..6abcbef24b 100644
--- a/contrib/python/setuptools/py3/.dist-info/METADATA
+++ b/contrib/python/setuptools/py3/.dist-info/METADATA
@@ -1,6 +1,6 @@
Metadata-Version: 2.1
Name: setuptools
-Version: 69.0.3
+Version: 69.1.0
Summary: Easily download, build, install, upgrade, and uninstall Python packages
Home-page: https://github.com/pypa/setuptools
Author: Python Packaging Authority
@@ -51,6 +51,7 @@ Requires-Dist: filelock >=3.4.0 ; extra == 'testing'
Requires-Dist: ini2toml[lite] >=0.9 ; extra == 'testing'
Requires-Dist: tomli-w >=1.0.0 ; extra == 'testing'
Requires-Dist: pytest-timeout ; extra == 'testing'
+Requires-Dist: pytest-home >=0.5 ; extra == 'testing'
Provides-Extra: testing-integration
Requires-Dist: pytest ; extra == 'testing-integration'
Requires-Dist: pytest-xdist ; extra == 'testing-integration'
@@ -63,11 +64,10 @@ Requires-Dist: jaraco.envs >=2.2 ; extra == 'testing-integration'
Requires-Dist: build[virtualenv] >=1.0.3 ; extra == 'testing-integration'
Requires-Dist: filelock >=3.4.0 ; extra == 'testing-integration'
Requires-Dist: packaging >=23.1 ; extra == 'testing-integration'
-Requires-Dist: pytest-black >=0.3.7 ; (platform_python_implementation != "PyPy") and extra == 'testing'
Requires-Dist: pytest-cov ; (platform_python_implementation != "PyPy") and extra == 'testing'
Requires-Dist: pytest-mypy >=0.9.1 ; (platform_python_implementation != "PyPy") and extra == 'testing'
Requires-Dist: jaraco.develop >=7.21 ; (python_version >= "3.9" and sys_platform != "cygwin") and extra == 'testing'
-Requires-Dist: pytest-ruff ; (sys_platform != "cygwin") and extra == 'testing'
+Requires-Dist: pytest-ruff >=0.2.1 ; (sys_platform != "cygwin") and extra == 'testing'
Requires-Dist: pytest-perf ; (sys_platform != "cygwin") and extra == 'testing'
.. image:: https://img.shields.io/pypi/v/setuptools.svg
@@ -83,14 +83,10 @@ Requires-Dist: pytest-perf ; (sys_platform != "cygwin") and extra == 'testing'
:target: https://github.com/astral-sh/ruff
:alt: Ruff
-.. image:: https://img.shields.io/badge/code%20style-black-000000.svg
- :target: https://github.com/psf/black
- :alt: Code style: Black
-
.. image:: https://img.shields.io/readthedocs/setuptools/latest.svg
:target: https://setuptools.pypa.io
-.. image:: https://img.shields.io/badge/skeleton-2023-informational
+.. image:: https://img.shields.io/badge/skeleton-2024-informational
:target: https://blog.jaraco.com/skeleton
.. image:: https://img.shields.io/codecov/c/github/pypa/setuptools/master.svg?logo=codecov&logoColor=white
@@ -103,10 +99,9 @@ Requires-Dist: pytest-perf ; (sys_platform != "cygwin") and extra == 'testing'
:target: https://discord.com/channels/803025117553754132/815945031150993468
:alt: Discord
-See the `Installation Instructions
-<https://packaging.python.org/installing/>`_ in the Python Packaging
-User's Guide for instructions on installing, upgrading, and uninstalling
-Setuptools.
+See the `Quickstart <https://setuptools.pypa.io/en/latest/userguide/quickstart.html>`_
+and the `User's Guide <https://setuptools.pypa.io/en/latest/userguide/>`_ for
+instructions on how to use Setuptools.
Questions and comments should be directed to `GitHub Discussions
<https://github.com/pypa/setuptools/discussions>`_.
diff --git a/contrib/python/setuptools/py3/README.rst b/contrib/python/setuptools/py3/README.rst
index 92c7dc6e87..eec6e35531 100644
--- a/contrib/python/setuptools/py3/README.rst
+++ b/contrib/python/setuptools/py3/README.rst
@@ -11,14 +11,10 @@
:target: https://github.com/astral-sh/ruff
:alt: Ruff
-.. image:: https://img.shields.io/badge/code%20style-black-000000.svg
- :target: https://github.com/psf/black
- :alt: Code style: Black
-
.. image:: https://img.shields.io/readthedocs/setuptools/latest.svg
:target: https://setuptools.pypa.io
-.. image:: https://img.shields.io/badge/skeleton-2023-informational
+.. image:: https://img.shields.io/badge/skeleton-2024-informational
:target: https://blog.jaraco.com/skeleton
.. image:: https://img.shields.io/codecov/c/github/pypa/setuptools/master.svg?logo=codecov&logoColor=white
@@ -31,10 +27,9 @@
:target: https://discord.com/channels/803025117553754132/815945031150993468
:alt: Discord
-See the `Installation Instructions
-<https://packaging.python.org/installing/>`_ in the Python Packaging
-User's Guide for instructions on installing, upgrading, and uninstalling
-Setuptools.
+See the `Quickstart <https://setuptools.pypa.io/en/latest/userguide/quickstart.html>`_
+and the `User's Guide <https://setuptools.pypa.io/en/latest/userguide/>`_ for
+instructions on how to use Setuptools.
Questions and comments should be directed to `GitHub Discussions
<https://github.com/pypa/setuptools/discussions>`_.
diff --git a/contrib/python/setuptools/py3/_distutils_hack/__init__.py b/contrib/python/setuptools/py3/_distutils_hack/__init__.py
index b951c2defd..4d3f09b0ae 100644
--- a/contrib/python/setuptools/py3/_distutils_hack/__init__.py
+++ b/contrib/python/setuptools/py3/_distutils_hack/__init__.py
@@ -3,16 +3,9 @@ import sys
import os
-is_pypy = '__pypy__' in sys.builtin_module_names
-
-
def warn_distutils_present():
if 'distutils' not in sys.modules:
return
- if is_pypy and sys.version_info < (3, 7):
- # PyPy for 3.6 unconditionally imports distutils, so bypass the warning
- # https://foss.heptapod.net/pypy/pypy/-/blob/be829135bc0d758997b3566062999ee8b23872b4/lib-python/3/site.py#L250
- return
import warnings
warnings.warn(
@@ -90,7 +83,7 @@ class DistutilsMetaFinder:
# optimization: only consider top level modules and those
# found in the CPython test suite.
if path is not None and not fullname.startswith('test.'):
- return
+ return None
method_name = 'spec_for_{fullname}'.format(**locals())
method = getattr(self, method_name, lambda: None)
@@ -98,7 +91,7 @@ class DistutilsMetaFinder:
def spec_for_distutils(self):
if self.is_cpython():
- return
+ return None
import importlib
import importlib.abc
@@ -115,7 +108,7 @@ class DistutilsMetaFinder:
# setuptools from the path but only after the hook
# has been loaded. Ref #2980.
# In either case, fall back to stdlib behavior.
- return
+ return None
class DistutilsLoader(importlib.abc.Loader):
def create_module(self, spec):
diff --git a/contrib/python/setuptools/py3/pkg_resources/__init__.py b/contrib/python/setuptools/py3/pkg_resources/__init__.py
index f41d46c634..d83283ff1c 100644
--- a/contrib/python/setuptools/py3/pkg_resources/__init__.py
+++ b/contrib/python/setuptools/py3/pkg_resources/__init__.py
@@ -18,11 +18,16 @@ This module is deprecated. Users are directed to :mod:`importlib.resources`,
"""
import sys
+
+if sys.version_info < (3, 8):
+ raise RuntimeError("Python 3.8 or later is required")
+
import os
import io
import time
import re
import types
+from typing import Protocol
import zipfile
import zipimport
import warnings
@@ -41,18 +46,10 @@ import inspect
import ntpath
import posixpath
import importlib
+import importlib.machinery
from pkgutil import get_importer
-try:
- import _imp
-except ImportError:
- # Python 3.2 compatibility
- import imp as _imp
-
-try:
- FileExistsError
-except NameError:
- FileExistsError = OSError
+import _imp
# capture these to bypass sandboxing
from os import utime
@@ -68,14 +65,6 @@ except ImportError:
from os import open as os_open
from os.path import isdir, split
-try:
- import importlib.machinery as importlib_machinery
-
- # access attribute to force import under delayed import mechanisms.
- importlib_machinery.__name__
-except ImportError:
- importlib_machinery = None
-
from pkg_resources.extern.jaraco.text import (
yield_lines,
drop_comment,
@@ -91,9 +80,6 @@ __import__('pkg_resources.extern.packaging.requirements')
__import__('pkg_resources.extern.packaging.markers')
__import__('pkg_resources.extern.packaging.utils')
-if sys.version_info < (3, 5):
- raise RuntimeError("Python 3.5 or later is required")
-
# declare some globals that will be defined later to
# satisfy the linters.
require = None
@@ -407,20 +393,18 @@ def get_provider(moduleOrReq):
return _find_adapter(_provider_factories, loader)(module)
-def _macos_vers(_cache=[]):
- if not _cache:
- version = platform.mac_ver()[0]
- # fallback for MacPorts
- if version == '':
- plist = '/System/Library/CoreServices/SystemVersion.plist'
- if os.path.exists(plist):
- if hasattr(plistlib, 'readPlist'):
- plist_content = plistlib.readPlist(plist)
- if 'ProductVersion' in plist_content:
- version = plist_content['ProductVersion']
-
- _cache.append(version.split('.'))
- return _cache[0]
+@functools.lru_cache(maxsize=None)
+def _macos_vers():
+ version = platform.mac_ver()[0]
+ # fallback for MacPorts
+ if version == '':
+ plist = '/System/Library/CoreServices/SystemVersion.plist'
+ if os.path.exists(plist):
+ with open(plist, 'rb') as fh:
+ plist_content = plistlib.load(fh)
+ if 'ProductVersion' in plist_content:
+ version = plist_content['ProductVersion']
+ return version.split('.')
def _macos_arch(machine):
@@ -546,54 +530,54 @@ def get_entry_info(dist, group, name):
return get_distribution(dist).get_entry_info(group, name)
-class IMetadataProvider:
- def has_metadata(name):
+class IMetadataProvider(Protocol):
+ def has_metadata(self, name):
"""Does the package's distribution contain the named metadata?"""
- def get_metadata(name):
+ def get_metadata(self, name):
"""The named metadata resource as a string"""
- def get_metadata_lines(name):
+ def get_metadata_lines(self, name):
"""Yield named metadata resource as list of non-blank non-comment lines
Leading and trailing whitespace is stripped from each line, and lines
with ``#`` as the first non-blank character are omitted."""
- def metadata_isdir(name):
+ def metadata_isdir(self, name):
"""Is the named metadata a directory? (like ``os.path.isdir()``)"""
- def metadata_listdir(name):
+ def metadata_listdir(self, name):
"""List of metadata names in the directory (like ``os.listdir()``)"""
- def run_script(script_name, namespace):
+ def run_script(self, script_name, namespace):
"""Execute the named script in the supplied namespace dictionary"""
-class IResourceProvider(IMetadataProvider):
+class IResourceProvider(IMetadataProvider, Protocol):
"""An object that provides access to package resources"""
- def get_resource_filename(manager, resource_name):
+ def get_resource_filename(self, manager, resource_name):
"""Return a true filesystem path for `resource_name`
`manager` must be an ``IResourceManager``"""
- def get_resource_stream(manager, resource_name):
+ def get_resource_stream(self, manager, resource_name):
"""Return a readable file-like object for `resource_name`
`manager` must be an ``IResourceManager``"""
- def get_resource_string(manager, resource_name):
+ def get_resource_string(self, manager, resource_name):
"""Return a string containing the contents of `resource_name`
`manager` must be an ``IResourceManager``"""
- def has_resource(resource_name):
+ def has_resource(self, resource_name):
"""Does the package contain the named resource?"""
- def resource_isdir(resource_name):
+ def resource_isdir(self, resource_name):
"""Is the named resource a directory? (like ``os.path.isdir()``)"""
- def resource_listdir(resource_name):
+ def resource_listdir(self, resource_name):
"""List of resource names in the directory (like ``os.listdir()``)"""
@@ -1143,8 +1127,7 @@ class Environment:
None is returned instead. This method is a hook that allows subclasses
to attempt other ways of obtaining a distribution before falling back
to the `installer` argument."""
- if installer is not None:
- return installer(requirement)
+ return installer(requirement) if installer else None
def __iter__(self):
"""Yield the unique project names of the available distributions"""
@@ -1734,7 +1717,7 @@ class DefaultProvider(EggProvider):
'SourcelessFileLoader',
)
for name in loader_names:
- loader_cls = getattr(importlib_machinery, name, type(None))
+ loader_cls = getattr(importlib.machinery, name, type(None))
register_loader_type(loader_cls, cls)
@@ -1895,7 +1878,7 @@ class ZipProvider(EggProvider):
try:
rename(tmpnam, real_path)
- except os.error:
+ except OSError:
if os.path.isfile(real_path):
if self._is_current(real_path, zip_path):
# the file became current since it was checked above,
@@ -1908,7 +1891,7 @@ class ZipProvider(EggProvider):
return real_path
raise
- except os.error:
+ except OSError:
# report a user-friendly error
manager.extraction_error()
@@ -2229,7 +2212,7 @@ def resolve_egg_link(path):
if hasattr(pkgutil, 'ImpImporter'):
register_finder(pkgutil.ImpImporter, find_on_path)
-register_finder(importlib_machinery.FileFinder, find_on_path)
+register_finder(importlib.machinery.FileFinder, find_on_path)
_declare_state('dict', _namespace_handlers={})
_declare_state('dict', _namespace_packages={})
@@ -2396,7 +2379,7 @@ if hasattr(pkgutil, 'ImpImporter'):
register_namespace_handler(pkgutil.ImpImporter, file_ns_handler)
register_namespace_handler(zipimport.zipimporter, file_ns_handler)
-register_namespace_handler(importlib_machinery.FileFinder, file_ns_handler)
+register_namespace_handler(importlib.machinery.FileFinder, file_ns_handler)
def null_ns_handler(importer, path_item, packageName, module):
@@ -2422,12 +2405,9 @@ def _cygwin_patch(filename): # pragma: nocover
return os.path.abspath(filename) if sys.platform == 'cygwin' else filename
-def _normalize_cached(filename, _cache={}):
- try:
- return _cache[filename]
- except KeyError:
- _cache[filename] = result = normalize_path(filename)
- return result
+@functools.lru_cache(maxsize=None)
+def _normalize_cached(filename):
+ return normalize_path(filename)
def _is_egg_path(path):
@@ -2852,9 +2832,7 @@ class Distribution:
def _get_version(self):
lines = self._get_metadata(self.PKG_INFO)
- version = _version_from_file(lines)
-
- return version
+ return _version_from_file(lines)
def activate(self, path=None, replace=False):
"""Ensure distribution is importable on `path` (default=sys.path)"""
@@ -2901,7 +2879,7 @@ class Distribution:
def __dir__(self):
return list(
- set(super(Distribution, self).__dir__())
+ set(super().__dir__())
| set(attr for attr in self._provider.__dir__() if not attr.startswith('_'))
)
@@ -3168,7 +3146,7 @@ class RequirementParseError(packaging.requirements.InvalidRequirement):
class Requirement(packaging.requirements.Requirement):
def __init__(self, requirement_string):
"""DO NOT CALL THIS UNDOCUMENTED METHOD; use Requirement.parse()!"""
- super(Requirement, self).__init__(requirement_string)
+ super().__init__(requirement_string)
self.unsafe_name = self.name
project_name = safe_name(self.name)
self.project_name, self.key = project_name, project_name.lower()
@@ -3229,6 +3207,7 @@ def _find_adapter(registry, ob):
for t in types:
if t in registry:
return registry[t]
+ return None
def ensure_directory(path):
diff --git a/contrib/python/setuptools/py3/setuptools/_core_metadata.py b/contrib/python/setuptools/py3/setuptools/_core_metadata.py
index 6c904c3c77..4bf3c7c947 100644
--- a/contrib/python/setuptools/py3/setuptools/_core_metadata.py
+++ b/contrib/python/setuptools/py3/setuptools/_core_metadata.py
@@ -3,6 +3,7 @@ Handling of Core Metadata for Python packages (including reading and writing).
See: https://packaging.python.org/en/latest/specifications/core-metadata/
"""
+
import os
import stat
import textwrap
diff --git a/contrib/python/setuptools/py3/setuptools/_normalization.py b/contrib/python/setuptools/py3/setuptools/_normalization.py
index aa9274f093..8f211b8bfb 100644
--- a/contrib/python/setuptools/py3/setuptools/_normalization.py
+++ b/contrib/python/setuptools/py3/setuptools/_normalization.py
@@ -2,6 +2,7 @@
Helpers for normalization as expected in wheel/sdist/module file names
and core metadata
"""
+
import re
from pathlib import Path
from typing import Union
@@ -119,6 +120,21 @@ def filename_component(value: str) -> str:
return value.replace("-", "_").strip("_")
+def filename_component_broken(value: str) -> str:
+ """
+ Produce the incorrect filename component for compatibility.
+
+ See pypa/setuptools#4167 for detailed analysis.
+
+ TODO: replace this with filename_component after pip 24 is
+ nearly-ubiquitous.
+
+ >>> filename_component_broken('foo_bar-baz')
+ 'foo-bar-baz'
+ """
+ return value.replace('_', '-')
+
+
def safer_name(value: str) -> str:
"""Like ``safe_name`` but can be used as filename component for wheel"""
# See bdist_wheel.safer_name
diff --git a/contrib/python/setuptools/py3/setuptools/_reqs.py b/contrib/python/setuptools/py3/setuptools/_reqs.py
index 7d7130d50e..9f83437033 100644
--- a/contrib/python/setuptools/py3/setuptools/_reqs.py
+++ b/contrib/python/setuptools/py3/setuptools/_reqs.py
@@ -24,13 +24,11 @@ def parse_strings(strs: _StrOrIter) -> Iterator[str]:
@overload
-def parse(strs: _StrOrIter) -> Iterator[Requirement]:
- ...
+def parse(strs: _StrOrIter) -> Iterator[Requirement]: ...
@overload
-def parse(strs: _StrOrIter, parser: Callable[[str], _T]) -> Iterator[_T]:
- ...
+def parse(strs: _StrOrIter, parser: Callable[[str], _T]) -> Iterator[_T]: ...
def parse(strs, parser=parse_req):
diff --git a/contrib/python/setuptools/py3/setuptools/build_meta.py b/contrib/python/setuptools/py3/setuptools/build_meta.py
index 6da80d70b8..0a0abfdae0 100644
--- a/contrib/python/setuptools/py3/setuptools/build_meta.py
+++ b/contrib/python/setuptools/py3/setuptools/build_meta.py
@@ -121,16 +121,16 @@ def _file_with_extension(directory, extension):
raise ValueError(
'No distribution was found. Ensure that `setup.py` '
'is not empty and that it calls `setup()`.'
- )
+ ) from None
return file
def _open_setup_script(setup_script):
if not os.path.exists(setup_script):
# Supply a default setup.py
- return io.StringIO(u"from setuptools import setup; setup()")
+ return io.StringIO("from setuptools import setup; setup()")
- return getattr(tokenize, 'open', open)(setup_script)
+ return tokenize.open(setup_script)
@contextlib.contextmanager
@@ -477,7 +477,7 @@ class _BuildMetaLegacyBackend(_BuildMetaBackend):
sys.argv[0] = setup_script
try:
- super(_BuildMetaLegacyBackend, self).run_setup(setup_script=setup_script)
+ super().run_setup(setup_script=setup_script)
finally:
# While PEP 517 frontends should be calling each hook in a fresh
# subprocess according to the standard (and thus it should not be
diff --git a/contrib/python/setuptools/py3/setuptools/command/_requirestxt.py b/contrib/python/setuptools/py3/setuptools/command/_requirestxt.py
index 32bae2c4b4..7b732b11ab 100644
--- a/contrib/python/setuptools/py3/setuptools/command/_requirestxt.py
+++ b/contrib/python/setuptools/py3/setuptools/command/_requirestxt.py
@@ -6,6 +6,7 @@ The ``requires.txt`` file has an specific format:
See https://setuptools.pypa.io/en/latest/deprecated/python_eggs.html#requires-txt
"""
+
import io
from collections import defaultdict
from itertools import filterfalse
diff --git a/contrib/python/setuptools/py3/setuptools/command/bdist_egg.py b/contrib/python/setuptools/py3/setuptools/command/bdist_egg.py
index bdece56bc9..3687efdf9c 100644
--- a/contrib/python/setuptools/py3/setuptools/command/bdist_egg.py
+++ b/contrib/python/setuptools/py3/setuptools/command/bdist_egg.py
@@ -232,9 +232,11 @@ class bdist_egg(Command):
remove_tree(self.bdist_dir, dry_run=self.dry_run)
# Add to 'Distribution.dist_files' so that the "upload" command works
- getattr(self.distribution, 'dist_files', []).append(
- ('bdist_egg', get_python_version(), self.egg_output)
- )
+ getattr(self.distribution, 'dist_files', []).append((
+ 'bdist_egg',
+ get_python_version(),
+ self.egg_output,
+ ))
def zap_pyfiles(self):
log.info("Removing .py files from temporary directory")
@@ -319,8 +321,7 @@ def walk_egg(egg_dir):
if 'EGG-INFO' in dirs:
dirs.remove('EGG-INFO')
yield base, dirs, files
- for bdf in walker:
- yield bdf
+ yield from walker
def analyze_egg(egg_dir, stubs):
@@ -368,10 +369,7 @@ def scan_module(egg_dir, base, name, stubs):
return True # Extension module
pkg = base[len(egg_dir) + 1 :].replace(os.sep, '.')
module = pkg + (pkg and '.' or '') + os.path.splitext(name)[0]
- if sys.version_info < (3, 7):
- skip = 12 # skip magic & date & file size
- else:
- skip = 16 # skip magic & reserved? & date & file size
+ skip = 16 # skip magic & reserved? & date & file size
f = open(filename, 'rb')
f.read(skip)
code = marshal.load(f)
@@ -404,14 +402,12 @@ def scan_module(egg_dir, base, name, stubs):
def iter_symbols(code):
"""Yield names and strings used by `code` and its nested code objects"""
- for name in code.co_names:
- yield name
+ yield from code.co_names
for const in code.co_consts:
if isinstance(const, str):
yield const
elif isinstance(const, CodeType):
- for name in iter_symbols(const):
- yield name
+ yield from iter_symbols(const)
def can_scan():
@@ -423,6 +419,7 @@ def can_scan():
"Please ask the author to include a 'zip_safe'"
" setting (either True or False) in the package's setup.py"
)
+ return False
# Attribute names of options for commands that might need to be convinced to
diff --git a/contrib/python/setuptools/py3/setuptools/command/bdist_rpm.py b/contrib/python/setuptools/py3/setuptools/command/bdist_rpm.py
index 30b7c23385..70ed6b6097 100644
--- a/contrib/python/setuptools/py3/setuptools/command/bdist_rpm.py
+++ b/contrib/python/setuptools/py3/setuptools/command/bdist_rpm.py
@@ -30,11 +30,10 @@ class bdist_rpm(orig.bdist_rpm):
def _make_spec_file(self):
spec = orig.bdist_rpm._make_spec_file(self)
- spec = [
+ return [
line.replace(
"setup.py install ",
"setup.py install --single-version-externally-managed ",
).replace("%setup", "%setup -n %{name}-%{unmangled_version}")
for line in spec
]
- return spec
diff --git a/contrib/python/setuptools/py3/setuptools/command/build.py b/contrib/python/setuptools/py3/setuptools/command/build.py
index 0f1d688e17..afda7e3be9 100644
--- a/contrib/python/setuptools/py3/setuptools/command/build.py
+++ b/contrib/python/setuptools/py3/setuptools/command/build.py
@@ -1,17 +1,8 @@
-import sys
-from typing import TYPE_CHECKING, List, Dict
+from typing import Dict, List, Protocol
from distutils.command.build import build as _build
from ..warnings import SetuptoolsDeprecationWarning
-if sys.version_info >= (3, 8):
- from typing import Protocol
-elif TYPE_CHECKING:
- from typing_extensions import Protocol
-else:
- from abc import ABC as Protocol
-
-
_ORIGINAL_SUBCOMMANDS = {"build_py", "build_clib", "build_ext", "build_scripts"}
diff --git a/contrib/python/setuptools/py3/setuptools/command/build_ext.py b/contrib/python/setuptools/py3/setuptools/command/build_ext.py
index 9a80781cf4..780afe3aec 100644
--- a/contrib/python/setuptools/py3/setuptools/command/build_ext.py
+++ b/contrib/python/setuptools/py3/setuptools/command/build_ext.py
@@ -37,9 +37,9 @@ def _customize_compiler_for_shlib(compiler):
tmp = _CONFIG_VARS.copy()
try:
# XXX Help! I don't have any idea whether these are right...
- _CONFIG_VARS[
- 'LDSHARED'
- ] = "gcc -Wl,-x -dynamiclib -undefined dynamic_lookup"
+ _CONFIG_VARS['LDSHARED'] = (
+ "gcc -Wl,-x -dynamiclib -undefined dynamic_lookup"
+ )
_CONFIG_VARS['CCSHARED'] = " -dynamiclib"
_CONFIG_VARS['SO'] = ".dylib"
customize_compiler(compiler)
@@ -76,6 +76,7 @@ def get_abi3_suffix():
return suffix
elif suffix == '.pyd': # Windows
return suffix
+ return None
class build_ext(_build_ext):
@@ -157,7 +158,7 @@ class build_ext(_build_ext):
if fullname in self.ext_map:
ext = self.ext_map[fullname]
- use_abi3 = getattr(ext, 'py_limited_api') and get_abi3_suffix()
+ use_abi3 = ext.py_limited_api and get_abi3_suffix()
if use_abi3:
filename = filename[: -len(so_ext)]
so_ext = get_abi3_suffix()
@@ -341,33 +342,30 @@ class build_ext(_build_ext):
if not self.dry_run:
f = open(stub_file, 'w')
f.write(
- '\n'.join(
- [
- "def __bootstrap__():",
- " global __bootstrap__, __file__, __loader__",
- " import sys, os, pkg_resources, importlib.util"
- + if_dl(", dl"),
- " __file__ = pkg_resources.resource_filename"
- "(__name__,%r)" % os.path.basename(ext._file_name),
- " del __bootstrap__",
- " if '__loader__' in globals():",
- " del __loader__",
- if_dl(" old_flags = sys.getdlopenflags()"),
- " old_dir = os.getcwd()",
- " try:",
- " os.chdir(os.path.dirname(__file__))",
- if_dl(" sys.setdlopenflags(dl.RTLD_NOW)"),
- " spec = importlib.util.spec_from_file_location(",
- " __name__, __file__)",
- " mod = importlib.util.module_from_spec(spec)",
- " spec.loader.exec_module(mod)",
- " finally:",
- if_dl(" sys.setdlopenflags(old_flags)"),
- " os.chdir(old_dir)",
- "__bootstrap__()",
- "", # terminal \n
- ]
- )
+ '\n'.join([
+ "def __bootstrap__():",
+ " global __bootstrap__, __file__, __loader__",
+ " import sys, os, pkg_resources, importlib.util" + if_dl(", dl"),
+ " __file__ = pkg_resources.resource_filename"
+ "(__name__,%r)" % os.path.basename(ext._file_name),
+ " del __bootstrap__",
+ " if '__loader__' in globals():",
+ " del __loader__",
+ if_dl(" old_flags = sys.getdlopenflags()"),
+ " old_dir = os.getcwd()",
+ " try:",
+ " os.chdir(os.path.dirname(__file__))",
+ if_dl(" sys.setdlopenflags(dl.RTLD_NOW)"),
+ " spec = importlib.util.spec_from_file_location(",
+ " __name__, __file__)",
+ " mod = importlib.util.module_from_spec(spec)",
+ " spec.loader.exec_module(mod)",
+ " finally:",
+ if_dl(" sys.setdlopenflags(old_flags)"),
+ " os.chdir(old_dir)",
+ "__bootstrap__()",
+ "", # terminal \n
+ ])
)
f.close()
if compile:
diff --git a/contrib/python/setuptools/py3/setuptools/command/build_py.py b/contrib/python/setuptools/py3/setuptools/command/build_py.py
index cbdd05aab0..3f40b060b3 100644
--- a/contrib/python/setuptools/py3/setuptools/command/build_py.py
+++ b/contrib/python/setuptools/py3/setuptools/command/build_py.py
@@ -288,7 +288,7 @@ class build_py(orig.build_py):
return list(unique_everseen(keepers))
@staticmethod
- def _get_platform_patterns(spec, package, src_dir, extra_patterns=[]):
+ def _get_platform_patterns(spec, package, src_dir, extra_patterns=()):
"""
yield platform-specific path patterns (suitable for glob
or fn_match) from a glob-based spec (such as
diff --git a/contrib/python/setuptools/py3/setuptools/command/develop.py b/contrib/python/setuptools/py3/setuptools/command/develop.py
index ea3e48e55c..d8c1b49b3d 100644
--- a/contrib/python/setuptools/py3/setuptools/command/develop.py
+++ b/contrib/python/setuptools/py3/setuptools/command/develop.py
@@ -5,6 +5,7 @@ import os
import glob
from setuptools.command.easy_install import easy_install
+from setuptools import _normalization
from setuptools import _path
from setuptools import namespaces
import setuptools
@@ -52,7 +53,9 @@ class develop(namespaces.DevelopInstaller, easy_install):
# pick up setup-dir .egg files only: no .egg-info
self.package_index.scan(glob.glob('*.egg'))
- egg_link_fn = ei.egg_name + '.egg-link'
+ egg_link_fn = (
+ _normalization.filename_component_broken(ei.egg_name) + '.egg-link'
+ )
self.egg_link = os.path.join(self.install_dir, egg_link_fn)
self.egg_base = ei.egg_base
if self.egg_path is None:
@@ -157,6 +160,8 @@ class develop(namespaces.DevelopInstaller, easy_install):
script_text = strm.read()
self.install_script(dist, script_name, script_text, script_path)
+ return None
+
def install_wrapper_scripts(self, dist):
dist = VersionlessRequirement(dist)
return easy_install.install_wrapper_scripts(self, dist)
diff --git a/contrib/python/setuptools/py3/setuptools/command/dist_info.py b/contrib/python/setuptools/py3/setuptools/command/dist_info.py
index 5ef322168c..f5061afaaf 100644
--- a/contrib/python/setuptools/py3/setuptools/command/dist_info.py
+++ b/contrib/python/setuptools/py3/setuptools/command/dist_info.py
@@ -5,7 +5,6 @@ As defined in the wheel specification
import os
import shutil
-import sys
from contextlib import contextmanager
from distutils import log
from distutils.core import Command
@@ -77,7 +76,7 @@ class dist_info(Command):
if requires_bkp:
bkp_name = f"{dir_path}.__bkp__"
_rm(bkp_name, ignore_errors=True)
- _copy(dir_path, bkp_name, dirs_exist_ok=True, symlinks=True)
+ shutil.copytree(dir_path, bkp_name, dirs_exist_ok=True, symlinks=True)
try:
yield
finally:
@@ -103,9 +102,3 @@ class dist_info(Command):
def _rm(dir_name, **opts):
if os.path.isdir(dir_name):
shutil.rmtree(dir_name, **opts)
-
-
-def _copy(src, dst, **opts):
- if sys.version_info < (3, 8):
- opts.pop("dirs_exist_ok", None)
- shutil.copytree(src, dst, **opts)
diff --git a/contrib/python/setuptools/py3/setuptools/command/easy_install.py b/contrib/python/setuptools/py3/setuptools/command/easy_install.py
index 5d6fd5ca71..cc0c409123 100644
--- a/contrib/python/setuptools/py3/setuptools/command/easy_install.py
+++ b/contrib/python/setuptools/py3/setuptools/command/easy_install.py
@@ -74,7 +74,7 @@ from pkg_resources import (
DEVELOP_DIST,
)
import pkg_resources
-from .. import py312compat
+from ..compat import py311
from .._path import ensure_directory
from ..extern.jaraco.text import yield_lines
@@ -245,31 +245,26 @@ class easy_install(Command):
self.config_vars = dict(sysconfig.get_config_vars())
- self.config_vars.update(
- {
- 'dist_name': self.distribution.get_name(),
- 'dist_version': self.distribution.get_version(),
- 'dist_fullname': self.distribution.get_fullname(),
- 'py_version': py_version,
- 'py_version_short': (
- f'{sys.version_info.major}.{sys.version_info.minor}'
- ),
- 'py_version_nodot': f'{sys.version_info.major}{sys.version_info.minor}',
- 'sys_prefix': self.config_vars['prefix'],
- 'sys_exec_prefix': self.config_vars['exec_prefix'],
- # Only python 3.2+ has abiflags
- 'abiflags': getattr(sys, 'abiflags', ''),
- 'platlibdir': getattr(sys, 'platlibdir', 'lib'),
- }
- )
+ self.config_vars.update({
+ 'dist_name': self.distribution.get_name(),
+ 'dist_version': self.distribution.get_version(),
+ 'dist_fullname': self.distribution.get_fullname(),
+ 'py_version': py_version,
+ 'py_version_short': f'{sys.version_info.major}.{sys.version_info.minor}',
+ 'py_version_nodot': f'{sys.version_info.major}{sys.version_info.minor}',
+ 'sys_prefix': self.config_vars['prefix'],
+ 'sys_exec_prefix': self.config_vars['exec_prefix'],
+ # Only POSIX systems have abiflags
+ 'abiflags': getattr(sys, 'abiflags', ''),
+ # Only python 3.9+ has platlibdir
+ 'platlibdir': getattr(sys, 'platlibdir', 'lib'),
+ })
with contextlib.suppress(AttributeError):
# only for distutils outside stdlib
- self.config_vars.update(
- {
- 'implementation_lower': install._get_implementation().lower(),
- 'implementation': install._get_implementation(),
- }
- )
+ self.config_vars.update({
+ 'implementation_lower': install._get_implementation().lower(),
+ 'implementation': install._get_implementation(),
+ })
# pypa/distutils#113 Python 3.9 compat
self.config_vars.setdefault(
@@ -668,7 +663,7 @@ class easy_install(Command):
@contextlib.contextmanager
def _tmpdir(self):
- tmpdir = tempfile.mkdtemp(prefix=u"easy_install-")
+ tmpdir = tempfile.mkdtemp(prefix="easy_install-")
try:
# cast to str as workaround for #709 and #710 and #712
yield str(tmpdir)
@@ -746,6 +741,7 @@ class easy_install(Command):
for dist in dists:
if dist in spec:
return dist
+ return None
def select_scheme(self, name):
try:
@@ -1028,9 +1024,9 @@ class easy_install(Command):
f.close()
script_dir = os.path.join(_egg_info, 'scripts')
# delete entry-point scripts to avoid duping
- self.delete_blockers(
- [os.path.join(script_dir, args[0]) for args in ScriptWriter.get_args(dist)]
- )
+ self.delete_blockers([
+ os.path.join(script_dir, args[0]) for args in ScriptWriter.get_args(dist)
+ ])
# Build .egg file from tmpdir
bdist_egg.make_zipfile(
egg_path,
@@ -1433,24 +1429,20 @@ def get_site_dirs():
if sys.platform in ('os2emx', 'riscos'):
sitedirs.append(os.path.join(prefix, "Lib", "site-packages"))
elif os.sep == '/':
- sitedirs.extend(
- [
- os.path.join(
- prefix,
- "lib",
- "python{}.{}".format(*sys.version_info),
- "site-packages",
- ),
- os.path.join(prefix, "lib", "site-python"),
- ]
- )
- else:
- sitedirs.extend(
- [
+ sitedirs.extend([
+ os.path.join(
prefix,
- os.path.join(prefix, "lib", "site-packages"),
- ]
- )
+ "lib",
+ "python{}.{}".format(*sys.version_info),
+ "site-packages",
+ ),
+ os.path.join(prefix, "lib", "site-python"),
+ ])
+ else:
+ sitedirs.extend([
+ prefix,
+ os.path.join(prefix, "lib", "site-packages"),
+ ])
if sys.platform != 'darwin':
continue
@@ -1482,9 +1474,7 @@ def get_site_dirs():
with contextlib.suppress(AttributeError):
sitedirs.extend(site.getsitepackages())
- sitedirs = list(map(normalize_path, sitedirs))
-
- return sitedirs
+ return list(map(normalize_path, sitedirs))
def expand_paths(inputs): # noqa: C901 # is too complex (11) # FIXME
@@ -1678,9 +1668,11 @@ class PthDistributions(Environment):
last_paths.remove(path)
# also, re-check that all paths are still valid before saving them
for path in self.paths[:]:
- if path not in last_paths and not path.startswith(
- ('import ', 'from ', '#')
- ):
+ if path not in last_paths and not path.startswith((
+ 'import ',
+ 'from ',
+ '#',
+ )):
absolute_path = os.path.join(self.basedir, path)
if not os.path.exists(absolute_path):
self.paths.remove(path)
@@ -1751,8 +1743,7 @@ class RewritePthDistributions(PthDistributions):
@classmethod
def _wrap_lines(cls, lines):
yield cls.prelude
- for line in lines:
- yield line
+ yield from lines
yield cls.postlude
prelude = _one_liner(
@@ -2032,7 +2023,7 @@ def chmod(path, mode):
log.debug("changing mode of %s to %o", path, mode)
try:
_chmod(path, mode)
- except os.error as e:
+ except OSError as e:
log.debug("chmod failed: %s", e)
@@ -2188,8 +2179,7 @@ class ScriptWriter:
cls._ensure_safe_name(name)
script_text = cls.template % locals()
args = cls._get_script_args(type_, name, header, script_text)
- for res in args:
- yield res
+ yield from args
@staticmethod
def _ensure_safe_name(name):
@@ -2339,7 +2329,7 @@ def load_launcher_manifest(name):
def _rmtree(path, ignore_errors=False, onexc=auto_chmod):
- return py312compat.shutil_rmtree(path, ignore_errors, onexc)
+ return py311.shutil_rmtree(path, ignore_errors, onexc)
def current_umask():
diff --git a/contrib/python/setuptools/py3/setuptools/command/editable_wheel.py b/contrib/python/setuptools/py3/setuptools/command/editable_wheel.py
index 79c839f8f0..8a4ae7928f 100644
--- a/contrib/python/setuptools/py3/setuptools/command/editable_wheel.py
+++ b/contrib/python/setuptools/py3/setuptools/command/editable_wheel.py
@@ -19,7 +19,7 @@ import traceback
from contextlib import suppress
from enum import Enum
from inspect import cleandoc
-from itertools import chain
+from itertools import chain, starmap
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import (
@@ -30,6 +30,7 @@ from typing import (
List,
Mapping,
Optional,
+ Protocol,
Tuple,
TypeVar,
Union,
@@ -54,13 +55,6 @@ from .build_py import build_py as build_py_cls
if TYPE_CHECKING:
from wheel.wheelfile import WheelFile # noqa
-if sys.version_info >= (3, 8):
- from typing import Protocol
-elif TYPE_CHECKING:
- from typing_extensions import Protocol
-else:
- from abc import ABC as Protocol
-
_Path = Union[str, Path]
_P = TypeVar("_P", bound=_Path)
_logger = logging.getLogger(__name__)
@@ -384,14 +378,13 @@ class editable_wheel(Command):
class EditableStrategy(Protocol):
- def __call__(self, wheel: "WheelFile", files: List[str], mapping: Dict[str, str]):
- ...
+ def __call__(
+ self, wheel: "WheelFile", files: List[str], mapping: Dict[str, str]
+ ): ...
- def __enter__(self):
- ...
+ def __enter__(self): ...
- def __exit__(self, _exc_type, _exc_value, _traceback):
- ...
+ def __exit__(self, _exc_type, _exc_value, _traceback): ...
class _StaticPth:
@@ -401,7 +394,7 @@ class _StaticPth:
self.path_entries = path_entries
def __call__(self, wheel: "WheelFile", files: List[str], mapping: Dict[str, str]):
- entries = "\n".join((str(p.resolve()) for p in self.path_entries))
+ entries = "\n".join(str(p.resolve()) for p in self.path_entries)
contents = _encode_pth(f"{entries}\n")
wheel.writestr(f"__editable__.{self.name}.pth", contents)
@@ -413,8 +406,7 @@ class _StaticPth:
_logger.warning(msg + _LENIENT_WARNING)
return self
- def __exit__(self, _exc_type, _exc_value, _traceback):
- ...
+ def __exit__(self, _exc_type, _exc_value, _traceback): ...
class _LinkTree(_StaticPth):
@@ -608,7 +600,7 @@ def _simple_layout(
layout = {pkg: find_package_path(pkg, package_dir, project_dir) for pkg in packages}
if not layout:
return set(package_dir) in ({}, {""})
- parent = os.path.commonpath([_parent_path(k, v) for k, v in layout.items()])
+ parent = os.path.commonpath(starmap(_parent_path, layout.items()))
return all(
_path.same_path(Path(parent, *key.split('.')), value)
for key, value in layout.items()
diff --git a/contrib/python/setuptools/py3/setuptools/command/egg_info.py b/contrib/python/setuptools/py3/setuptools/command/egg_info.py
index 7c7f57aaf8..62d2feea9b 100644
--- a/contrib/python/setuptools/py3/setuptools/command/egg_info.py
+++ b/contrib/python/setuptools/py3/setuptools/command/egg_info.py
@@ -385,9 +385,8 @@ class FileList(_FileList):
try:
process_action = action_map[action]
except KeyError:
- raise DistutilsInternalError(
- "this cannot happen: invalid action '{action!s}'".format(action=action),
- )
+ msg = f"Invalid MANIFEST.in: unknown action {action!r} in {line!r}"
+ raise DistutilsInternalError(msg) from None
# OK, now we know that the action is valid and we have the
# right number of words on the line for that action -- so we
@@ -700,9 +699,9 @@ write_setup_requirements = _requirestxt.write_setup_requirements
def write_toplevel_names(cmd, basename, filename):
- pkgs = dict.fromkeys(
- [k.split('.', 1)[0] for k in cmd.distribution.iter_distribution_names()]
- )
+ pkgs = dict.fromkeys([
+ k.split('.', 1)[0] for k in cmd.distribution.iter_distribution_names()
+ ])
cmd.write_file("top-level names", filename, '\n'.join(sorted(pkgs)) + '\n')
diff --git a/contrib/python/setuptools/py3/setuptools/command/install.py b/contrib/python/setuptools/py3/setuptools/command/install.py
index 606cce9d89..b97a9b4713 100644
--- a/contrib/python/setuptools/py3/setuptools/command/install.py
+++ b/contrib/python/setuptools/py3/setuptools/command/install.py
@@ -71,6 +71,7 @@ class install(orig.install):
# command without --root or --single-version-externally-managed
self.path_file = None
self.extra_dirs = ''
+ return None
def run(self):
# Explicit request for old-style install? Just do it
@@ -83,6 +84,8 @@ class install(orig.install):
else:
self.do_egg_install()
+ return None
+
@staticmethod
def _called_from_setup(run_frame):
"""
@@ -114,6 +117,8 @@ class install(orig.install):
return caller_module == 'distutils.dist' and info.function == 'run_commands'
+ return False
+
def do_egg_install(self):
easy_install = self.distribution.get_command_class('easy_install')
diff --git a/contrib/python/setuptools/py3/setuptools/command/sdist.py b/contrib/python/setuptools/py3/setuptools/command/sdist.py
index 5f45fb5dee..d455f44c5e 100644
--- a/contrib/python/setuptools/py3/setuptools/command/sdist.py
+++ b/contrib/python/setuptools/py3/setuptools/command/sdist.py
@@ -1,7 +1,6 @@
from distutils import log
import distutils.command.sdist as orig
import os
-import sys
import contextlib
from itertools import chain
@@ -14,8 +13,7 @@ _default_revctrl = list
def walk_revctrl(dirname=''):
"""Find all files under revision control"""
for ep in metadata.entry_points(group='setuptools.file_finders'):
- for item in ep.load()(dirname):
- yield item
+ yield from ep.load()(dirname)
class sdist(orig.sdist):
@@ -72,14 +70,6 @@ class sdist(orig.sdist):
def initialize_options(self):
orig.sdist.initialize_options(self)
- self._default_to_gztar()
-
- def _default_to_gztar(self):
- # only needed on Python prior to 3.6.
- if sys.version_info >= (3, 6, 0, 'beta', 1):
- return
- self.formats = ['gztar']
-
def make_distribution(self):
"""
Workaround for #516
@@ -106,7 +96,7 @@ class sdist(orig.sdist):
yield
finally:
if orig_val is not NoValue:
- setattr(os, 'link', orig_val)
+ os.link = orig_val
def add_defaults(self):
super().add_defaults()
@@ -190,7 +180,7 @@ class sdist(orig.sdist):
with open(self.manifest, 'rb') as fp:
first_line = fp.readline()
- return first_line != '# file GENERATED by distutils, do NOT edit\n'.encode()
+ return first_line != b'# file GENERATED by distutils, do NOT edit\n'
def read_manifest(self):
"""Read the manifest file (named by 'self.manifest') and use it to
diff --git a/contrib/python/setuptools/py3/setuptools/command/test.py b/contrib/python/setuptools/py3/setuptools/command/test.py
index 5fce6660c0..0a128f2a7a 100644
--- a/contrib/python/setuptools/py3/setuptools/command/test.py
+++ b/contrib/python/setuptools/py3/setuptools/command/test.py
@@ -132,7 +132,7 @@ class test(Command):
func()
@contextlib.contextmanager
- def project_on_sys_path(self, include_dists=[]):
+ def project_on_sys_path(self, include_dists=()):
self.run_command('egg_info')
# Build extensions in-place
diff --git a/contrib/python/setuptools/py3/setuptools/compat/__init__.py b/contrib/python/setuptools/py3/setuptools/compat/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/contrib/python/setuptools/py3/setuptools/compat/__init__.py
diff --git a/contrib/python/setuptools/py3/setuptools/compat/py310.py b/contrib/python/setuptools/py3/setuptools/compat/py310.py
new file mode 100644
index 0000000000..f7d53d6de9
--- /dev/null
+++ b/contrib/python/setuptools/py3/setuptools/compat/py310.py
@@ -0,0 +1,10 @@
+import sys
+
+
+__all__ = ['tomllib']
+
+
+if sys.version_info >= (3, 11):
+ import tomllib
+else: # pragma: no cover
+ from setuptools.extern import tomli as tomllib
diff --git a/contrib/python/setuptools/py3/setuptools/py312compat.py b/contrib/python/setuptools/py3/setuptools/compat/py311.py
index 28175b1f75..28175b1f75 100644
--- a/contrib/python/setuptools/py3/setuptools/py312compat.py
+++ b/contrib/python/setuptools/py3/setuptools/compat/py311.py
diff --git a/contrib/python/setuptools/py3/setuptools/config/__init__.py b/contrib/python/setuptools/py3/setuptools/config/__init__.py
index ffea394436..fcc7d008d6 100644
--- a/contrib/python/setuptools/py3/setuptools/config/__init__.py
+++ b/contrib/python/setuptools/py3/setuptools/config/__init__.py
@@ -1,6 +1,7 @@
"""For backward compatibility, expose main functions from
``setuptools.config.setupcfg``
"""
+
from functools import wraps
from typing import Callable, TypeVar, cast
diff --git a/contrib/python/setuptools/py3/setuptools/config/_apply_pyprojecttoml.py b/contrib/python/setuptools/py3/setuptools/config/_apply_pyprojecttoml.py
index 80318d5d0b..32fb00131e 100644
--- a/contrib/python/setuptools/py3/setuptools/config/_apply_pyprojecttoml.py
+++ b/contrib/python/setuptools/py3/setuptools/config/_apply_pyprojecttoml.py
@@ -7,6 +7,7 @@ need to be processed before being applied.
**PRIVATE MODULE**: API reserved for setuptools internal usage only.
"""
+
import logging
import os
from collections.abc import Mapping
@@ -240,7 +241,7 @@ def _unify_entry_points(project_table: dict):
if group # now we can skip empty groups
}
# Sometimes this will set `project["entry-points"] = {}`, and that is
- # intentional (for reseting configurations that are missing `dynamic`).
+ # intentional (for resetting configurations that are missing `dynamic`).
def _copy_command_options(pyproject: dict, dist: "Distribution", filename: _Path):
@@ -408,7 +409,7 @@ _RESET_PREVIOUSLY_DEFINED: dict = {
"scripts": {},
"gui-scripts": {},
"dependencies": [],
- "optional-dependencies": [],
+ "optional-dependencies": {},
}
@@ -423,7 +424,7 @@ class _MissingDynamic(SetuptoolsWarning):
According to the spec (see the link below), however, setuptools CANNOT
consider this value unless `{field}` is listed as `dynamic`.
- https://packaging.python.org/en/latest/specifications/declaring-project-metadata/
+ https://packaging.python.org/en/latest/specifications/pyproject-toml/#declaring-project-metadata-the-project-table
To prevent this problem, you can list `{field}` under `dynamic` or alternatively
remove the `[project]` table from your file and rely entirely on other means of
diff --git a/contrib/python/setuptools/py3/setuptools/config/_validate_pyproject/error_reporting.py b/contrib/python/setuptools/py3/setuptools/config/_validate_pyproject/error_reporting.py
index f78e4838fb..d44e290e36 100644
--- a/contrib/python/setuptools/py3/setuptools/config/_validate_pyproject/error_reporting.py
+++ b/contrib/python/setuptools/py3/setuptools/config/_validate_pyproject/error_reporting.py
@@ -24,7 +24,7 @@ _SKIP_DETAILS = (
"must not be there",
)
-_NEED_DETAILS = {"anyOf", "oneOf", "anyOf", "contains", "propertyNames", "not", "items"}
+_NEED_DETAILS = {"anyOf", "oneOf", "allOf", "contains", "propertyNames", "not", "items"}
_CAMEL_CASE_SPLITTER = re.compile(r"\W+|([A-Z][^A-Z\W]*)")
_IDENTIFIER = re.compile(r"^[\w_]+$", re.I)
diff --git a/contrib/python/setuptools/py3/setuptools/config/expand.py b/contrib/python/setuptools/py3/setuptools/config/expand.py
index 1bc71de546..b48fc1187e 100644
--- a/contrib/python/setuptools/py3/setuptools/config/expand.py
+++ b/contrib/python/setuptools/py3/setuptools/config/expand.py
@@ -17,6 +17,7 @@ functions among several configuration file formats.
**PRIVATE MODULE**: API reserved for setuptools internal usage only.
"""
+
import ast
import importlib
import os
diff --git a/contrib/python/setuptools/py3/setuptools/config/pyprojecttoml.py b/contrib/python/setuptools/py3/setuptools/config/pyprojecttoml.py
index 379ef222f9..9b9788eff4 100644
--- a/contrib/python/setuptools/py3/setuptools/config/pyprojecttoml.py
+++ b/contrib/python/setuptools/py3/setuptools/config/pyprojecttoml.py
@@ -8,6 +8,7 @@ To read project metadata, consider using
For simple scenarios, you can also try parsing the file directly
with the help of ``tomllib`` or ``tomli``.
"""
+
import logging
import os
from contextlib import contextmanager
@@ -28,10 +29,10 @@ _logger = logging.getLogger(__name__)
def load_file(filepath: _Path) -> dict:
- from setuptools.extern import tomli # type: ignore
+ from ..compat.py310 import tomllib
with open(filepath, "rb") as file:
- return tomli.load(file)
+ return tomllib.load(file)
def validate(config: dict, filepath: _Path) -> bool:
diff --git a/contrib/python/setuptools/py3/setuptools/config/setupcfg.py b/contrib/python/setuptools/py3/setuptools/config/setupcfg.py
index 1a0e4154b9..a7f02714cb 100644
--- a/contrib/python/setuptools/py3/setuptools/config/setupcfg.py
+++ b/contrib/python/setuptools/py3/setuptools/config/setupcfg.py
@@ -8,6 +8,7 @@ To read project metadata, consider using
For simple scenarios, you can also try parsing the file directly
with the help of ``configparser``.
"""
+
import contextlib
import functools
import os
@@ -282,8 +283,8 @@ class ConfigHandler(Generic[Target]):
try:
current_value = getattr(target_obj, option_name)
- except AttributeError:
- raise KeyError(option_name)
+ except AttributeError as e:
+ raise KeyError(option_name) from e
if current_value:
# Already inhabited. Skipping.
@@ -581,11 +582,11 @@ class ConfigMetadataHandler(ConfigHandler["DistributionMetadata"]):
# accidentally include newlines and other unintended content
try:
Version(version)
- except InvalidVersion:
+ except InvalidVersion as e:
raise OptionError(
f'Version loaded from {value} does not '
f'comply with PEP 440: {version}'
- )
+ ) from e
return version
@@ -694,9 +695,9 @@ class ConfigOptionsHandler(ConfigHandler["Distribution"]):
valid_keys = ['where', 'include', 'exclude']
- find_kwargs = dict(
- [(k, v) for k, v in section_data.items() if k in valid_keys and v]
- )
+ find_kwargs = dict([
+ (k, v) for k, v in section_data.items() if k in valid_keys and v
+ ])
where = find_kwargs.get('where')
if where is not None:
diff --git a/contrib/python/setuptools/py3/setuptools/depends.py b/contrib/python/setuptools/py3/setuptools/depends.py
index 42907d9bd4..c0ca84d404 100644
--- a/contrib/python/setuptools/py3/setuptools/depends.py
+++ b/contrib/python/setuptools/py3/setuptools/depends.py
@@ -159,6 +159,8 @@ def extract_constant(code, symbol, default=-1):
else:
const = default
+ return None
+
def _update_globals():
"""
diff --git a/contrib/python/setuptools/py3/setuptools/discovery.py b/contrib/python/setuptools/py3/setuptools/discovery.py
index 25962863b9..50a948750f 100644
--- a/contrib/python/setuptools/py3/setuptools/discovery.py
+++ b/contrib/python/setuptools/py3/setuptools/discovery.py
@@ -485,7 +485,7 @@ class ConfigDiscovery:
"""
if self.dist.metadata.name or self.dist.name:
# get_name() is not reliable (can return "UNKNOWN")
- return None
+ return
log.debug("No `name` configuration, performing automatic discovery")
diff --git a/contrib/python/setuptools/py3/setuptools/dist.py b/contrib/python/setuptools/py3/setuptools/dist.py
index 222e8a7623..d5787ed474 100644
--- a/contrib/python/setuptools/py3/setuptools/dist.py
+++ b/contrib/python/setuptools/py3/setuptools/dist.py
@@ -87,7 +87,7 @@ def check_nsp(dist, attr, value):
SetuptoolsDeprecationWarning.emit(
"The namespace_packages parameter is deprecated.",
"Please replace its usage with implicit namespaces (PEP 420).",
- see_docs="references/keywords.html#keyword-namespace-packages"
+ see_docs="references/keywords.html#keyword-namespace-packages",
# TODO: define due_date, it may break old packages that are no longer
# maintained (e.g. sphinxcontrib extensions) when installed from source.
# Warning officially introduced in May 2022, however the deprecation
@@ -778,6 +778,8 @@ class Distribution(_Distribution):
if p == package or p.startswith(pfx):
return True
+ return False
+
def _exclude_misc(self, name, value):
"""Handle 'exclude()' for list/tuple attrs without a special handler"""
if not isinstance(value, sequence):
@@ -912,11 +914,9 @@ class Distribution(_Distribution):
def iter_distribution_names(self):
"""Yield all packages, modules, and extension names in distribution"""
- for pkg in self.packages or ():
- yield pkg
+ yield from self.packages or ()
- for module in self.py_modules or ():
- yield module
+ yield from self.py_modules or ()
for ext in self.ext_modules or ():
if isinstance(ext, tuple):
diff --git a/contrib/python/setuptools/py3/setuptools/glob.py b/contrib/python/setuptools/py3/setuptools/glob.py
index 647b9bc6ed..a184c0b643 100644
--- a/contrib/python/setuptools/py3/setuptools/glob.py
+++ b/contrib/python/setuptools/py3/setuptools/glob.py
@@ -113,8 +113,7 @@ def glob0(dirname, basename):
def glob2(dirname, pattern):
assert _isrecursive(pattern)
yield pattern[:0]
- for x in _rlistdir(dirname):
- yield x
+ yield from _rlistdir(dirname)
# Recursively yields relative pathnames inside a literal directory.
@@ -126,7 +125,7 @@ def _rlistdir(dirname):
dirname = os.curdir
try:
names = os.listdir(dirname)
- except os.error:
+ except OSError:
return
for x in names:
yield x
@@ -160,7 +159,7 @@ def escape(pathname):
# Metacharacters do not work in the drive part and shouldn't be escaped.
drive, pathname = os.path.splitdrive(pathname)
if isinstance(pathname, bytes):
- pathname = magic_check_bytes.sub(br'[\1]', pathname)
+ pathname = magic_check_bytes.sub(rb'[\1]', pathname)
else:
pathname = magic_check.sub(r'[\1]', pathname)
return drive + pathname
diff --git a/contrib/python/setuptools/py3/setuptools/installer.py b/contrib/python/setuptools/py3/setuptools/installer.py
index e83f959a1b..a6aff723c2 100644
--- a/contrib/python/setuptools/py3/setuptools/installer.py
+++ b/contrib/python/setuptools/py3/setuptools/installer.py
@@ -107,10 +107,9 @@ def _fetch_build_egg_no_warn(dist, req): # noqa: C901 # is too complex (16) #
dist_metadata = pkg_resources.PathMetadata(
dist_location, os.path.join(dist_location, 'EGG-INFO')
)
- dist = pkg_resources.Distribution.from_filename(
+ return pkg_resources.Distribution.from_filename(
dist_location, metadata=dist_metadata
)
- return dist
def strip_marker(req):
diff --git a/contrib/python/setuptools/py3/setuptools/monkey.py b/contrib/python/setuptools/py3/setuptools/monkey.py
index 6c8a2f12f6..da0993506c 100644
--- a/contrib/python/setuptools/py3/setuptools/monkey.py
+++ b/contrib/python/setuptools/py3/setuptools/monkey.py
@@ -66,21 +66,6 @@ def patch_all():
# we can't patch distutils.cmd, alas
distutils.core.Command = setuptools.Command
- has_issue_12885 = sys.version_info <= (3, 5, 3)
-
- if has_issue_12885:
- # fix findall bug in distutils (https://bugs.python.org/issue12885)
- distutils.filelist.findall = setuptools.findall
-
- needs_warehouse = (3, 4) < sys.version_info < (3, 4, 6) or (
- 3,
- 5,
- ) < sys.version_info <= (3, 5, 3)
-
- if needs_warehouse:
- warehouse = 'https://upload.pypi.org/legacy/'
- distutils.config.PyPIRCCommand.DEFAULT_REPOSITORY = warehouse
-
_patch_distribution_metadata()
# Install Distribution throughout the distutils
@@ -130,7 +115,7 @@ def patch_func(replacement, target_mod, func_name):
def get_unpatched_function(candidate):
- return getattr(candidate, 'unpatched')
+ return candidate.unpatched
def patch_for_msvc_specialized_compiler():
@@ -138,8 +123,7 @@ def patch_for_msvc_specialized_compiler():
Patch functions in distutils to use standalone Microsoft Visual C++
compilers.
"""
- # import late to avoid circular imports on Python < 3.5
- msvc = import_module('setuptools.msvc')
+ from . import msvc
if platform.system() != 'Windows':
# Compilers only available on Microsoft Windows
diff --git a/contrib/python/setuptools/py3/setuptools/msvc.py b/contrib/python/setuptools/py3/setuptools/msvc.py
index a910a64b68..53fe7b0de1 100644
--- a/contrib/python/setuptools/py3/setuptools/msvc.py
+++ b/contrib/python/setuptools/py3/setuptools/msvc.py
@@ -12,7 +12,6 @@ This may also support compilers shipped with compatible Visual Studio versions.
"""
import json
-from io import open
from os import listdir, pathsep
from os.path import join, isfile, isdir, dirname
from subprocess import CalledProcessError
@@ -93,21 +92,17 @@ def _msvc14_find_vc2017():
# Workaround for `-requiresAny` (only available on VS 2017 > 15.6)
with contextlib.suppress(CalledProcessError, OSError, UnicodeDecodeError):
path = (
- subprocess.check_output(
- [
- join(
- root, "Microsoft Visual Studio", "Installer", "vswhere.exe"
- ),
- "-latest",
- "-prerelease",
- "-requires",
- component,
- "-property",
- "installationPath",
- "-products",
- "*",
- ]
- )
+ subprocess.check_output([
+ join(root, "Microsoft Visual Studio", "Installer", "vswhere.exe"),
+ "-latest",
+ "-prerelease",
+ "-requires",
+ component,
+ "-property",
+ "installationPath",
+ "-products",
+ "*",
+ ])
.decode(encoding="mbcs", errors="strict")
.strip()
)
@@ -582,6 +577,7 @@ class RegistryInfo:
finally:
if bkey:
closekey(bkey)
+ return None
class SystemInfo:
@@ -694,9 +690,9 @@ class SystemInfo:
listdir(join(vs_path, r'VC\Tools\MSVC'))
# Store version and path
- vs_versions[
- self._as_float_version(state['installationVersion'])
- ] = vs_path
+ vs_versions[self._as_float_version(state['installationVersion'])] = (
+ vs_path
+ )
except (OSError, KeyError):
# Skip if "state.json" file is missing or bad format
@@ -828,6 +824,7 @@ class SystemInfo:
return '8.1', '8.1a'
elif self.vs_ver >= 14.0:
return '10.0', '8.1'
+ return None
@property
def WindowsSdkLastVersion(self):
@@ -919,6 +916,8 @@ class SystemInfo:
if execpath:
return execpath
+ return None
+
@property
def FSharpInstallDir(self):
"""
@@ -951,6 +950,8 @@ class SystemInfo:
if sdkdir:
return sdkdir or ''
+ return None
+
@property
def UniversalCRTSdkLastVersion(self):
"""
diff --git a/contrib/python/setuptools/py3/setuptools/namespaces.py b/contrib/python/setuptools/py3/setuptools/namespaces.py
index 3332f864ae..e8f2941d45 100644
--- a/contrib/python/setuptools/py3/setuptools/namespaces.py
+++ b/contrib/python/setuptools/py3/setuptools/namespaces.py
@@ -42,12 +42,11 @@ class Installer:
_nspkg_tmpl = (
"import sys, types, os",
- "has_mfs = sys.version_info > (3, 5)",
"p = os.path.join(%(root)s, *%(pth)r)",
- "importlib = has_mfs and __import__('importlib.util')",
- "has_mfs and __import__('importlib.machinery')",
+ "importlib = __import__('importlib.util')",
+ "__import__('importlib.machinery')",
(
- "m = has_mfs and "
+ "m = "
"sys.modules.setdefault(%(pkg)r, "
"importlib.util.module_from_spec("
"importlib.machinery.PathFinder.find_spec(%(pkg)r, "
diff --git a/contrib/python/setuptools/py3/setuptools/package_index.py b/contrib/python/setuptools/py3/setuptools/package_index.py
index 3cedd5105c..271aa97f71 100644
--- a/contrib/python/setuptools/py3/setuptools/package_index.py
+++ b/contrib/python/setuptools/py3/setuptools/package_index.py
@@ -112,15 +112,13 @@ def egg_info_for_url(url):
def distros_for_url(url, metadata=None):
"""Yield egg or source distribution objects that might be found at a URL"""
base, fragment = egg_info_for_url(url)
- for dist in distros_for_location(url, base, metadata):
- yield dist
+ yield from distros_for_location(url, base, metadata)
if fragment:
match = EGG_FRAGMENT.match(fragment)
if match:
- for dist in interpret_distro_name(
+ yield from interpret_distro_name(
url, match.group(1), metadata, precedence=CHECKOUT_DIST
- ):
- yield dist
+ )
def distros_for_location(location, basename, metadata=None):
@@ -321,7 +319,7 @@ class PackageIndex(Environment):
try:
parse_version(dist.version)
except Exception:
- return
+ return None
return super().add(dist)
# FIXME: 'PackageIndex.process_url' is too complex (14)
@@ -408,6 +406,7 @@ class PackageIndex(Environment):
raise DistutilsError(msg % url)
else:
self.warn(msg, url)
+ return False
def scan_egg_links(self, search_path):
dirs = filter(os.path.isdir, search_path)
@@ -516,7 +515,7 @@ class PackageIndex(Environment):
if dist in requirement:
return dist
self.debug("%s does not match %s", requirement, dist)
- return super(PackageIndex, self).obtain(requirement, installer)
+ return super().obtain(requirement, installer)
def check_hash(self, checker, filename, tfp):
"""
@@ -650,6 +649,8 @@ class PackageIndex(Environment):
if os.path.exists(dist.download_location):
return dist
+ return None
+
if force_scan:
self.prescan()
self.find_packages(requirement)
@@ -673,6 +674,7 @@ class PackageIndex(Environment):
(source and "a source distribution of " or ""),
requirement,
)
+ return None
else:
self.info("Best match: %s", dist)
return dist.clone(location=dist.download_location)
@@ -1036,6 +1038,7 @@ class PyPIConfig(configparser.RawConfigParser):
for repository, cred in self.creds_by_repository.items():
if url.startswith(repository):
return cred
+ return None
def open_with_auth(url, opener=urllib.request.urlopen):
diff --git a/contrib/python/setuptools/py3/setuptools/sandbox.py b/contrib/python/setuptools/py3/setuptools/sandbox.py
index 017c897b86..7634b1320b 100644
--- a/contrib/python/setuptools/py3/setuptools/sandbox.py
+++ b/contrib/python/setuptools/py3/setuptools/sandbox.py
@@ -115,7 +115,7 @@ class UnpickleableException(Exception):
class ExceptionSaver:
"""
- A Context Manager that will save an exception, serialized, and restore it
+ A Context Manager that will save an exception, serialize, and restore it
later.
"""
@@ -124,7 +124,7 @@ class ExceptionSaver:
def __exit__(self, type, exc, tb):
if not exc:
- return
+ return False
# dump the exception
self._saved = UnpickleableException.dump(type, exc)
@@ -408,23 +408,21 @@ else:
class DirectorySandbox(AbstractSandbox):
"""Restrict operations to a single subdirectory - pseudo-chroot"""
- write_ops = dict.fromkeys(
- [
- "open",
- "chmod",
- "chown",
- "mkdir",
- "remove",
- "unlink",
- "rmdir",
- "utime",
- "lchown",
- "chroot",
- "mkfifo",
- "mknod",
- "tempnam",
- ]
- )
+ write_ops = dict.fromkeys([
+ "open",
+ "chmod",
+ "chown",
+ "mkdir",
+ "remove",
+ "unlink",
+ "rmdir",
+ "utime",
+ "lchown",
+ "chroot",
+ "mkfifo",
+ "mknod",
+ "tempnam",
+ ])
_exception_patterns = []
"exempt writing to paths that match the pattern"
diff --git a/contrib/python/setuptools/py3/setuptools/unicode_utils.py b/contrib/python/setuptools/py3/setuptools/unicode_utils.py
index e84e65e3e1..d43dcc11f9 100644
--- a/contrib/python/setuptools/py3/setuptools/unicode_utils.py
+++ b/contrib/python/setuptools/py3/setuptools/unicode_utils.py
@@ -18,7 +18,7 @@ def decompose(path):
def filesys_decode(path):
"""
Ensure that the given path is decoded,
- NONE when no expected encoding works
+ ``None`` when no expected encoding works
"""
if isinstance(path, str):
@@ -33,6 +33,8 @@ def filesys_decode(path):
except UnicodeDecodeError:
continue
+ return None
+
def try_encode(string, enc):
"turn unicode encoding into a functional routine"
diff --git a/contrib/python/setuptools/py3/setuptools/wheel.py b/contrib/python/setuptools/py3/setuptools/wheel.py
index c6eabddc1f..9861b5cf1c 100644
--- a/contrib/python/setuptools/py3/setuptools/wheel.py
+++ b/contrib/python/setuptools/py3/setuptools/wheel.py
@@ -38,7 +38,7 @@ def _get_supported_tags():
def unpack(src_dir, dst_dir):
- '''Move everything under `src_dir` to `dst_dir`, and delete the former.'''
+ """Move everything under `src_dir` to `dst_dir`, and delete the former."""
for dirpath, dirnames, filenames in os.walk(src_dir):
subdir = os.path.relpath(dirpath, src_dir)
for f in filenames:
@@ -83,7 +83,7 @@ class Wheel:
setattr(self, k, v)
def tags(self):
- '''List tags (py_version, abi, platform) supported by this wheel.'''
+ """List tags (py_version, abi, platform) supported by this wheel."""
return itertools.product(
self.py_version.split('.'),
self.abi.split('.'),
@@ -91,7 +91,7 @@ class Wheel:
)
def is_compatible(self):
- '''Is the wheel compatible with the current platform?'''
+ """Is the wheel compatible with the current platform?"""
return next((True for t in self.tags() if t in _get_supported_tags()), False)
def egg_name(self):
@@ -115,7 +115,7 @@ class Wheel:
raise ValueError("unsupported wheel format. .dist-info not found")
def install_as_egg(self, destination_eggdir):
- '''Install wheel as an egg directory.'''
+ """Install wheel as an egg directory."""
with zipfile.ZipFile(self.filename) as zf:
self._install_as_egg(destination_eggdir, zf)
diff --git a/contrib/python/setuptools/py3/ya.make b/contrib/python/setuptools/py3/ya.make
index 2cb2a8b26c..564d268875 100644
--- a/contrib/python/setuptools/py3/ya.make
+++ b/contrib/python/setuptools/py3/ya.make
@@ -2,7 +2,7 @@
PY3_LIBRARY()
-VERSION(69.0.3)
+VERSION(69.1.0)
LICENSE(MIT)
@@ -213,6 +213,9 @@ PY_SRCS(
setuptools/command/test.py
setuptools/command/upload.py
setuptools/command/upload_docs.py
+ setuptools/compat/__init__.py
+ setuptools/compat/py310.py
+ setuptools/compat/py311.py
setuptools/config/__init__.py
setuptools/config/_apply_pyprojecttoml.py
setuptools/config/_validate_pyproject/__init__.py
@@ -240,7 +243,6 @@ PY_SRCS(
setuptools/msvc.py
setuptools/namespaces.py
setuptools/package_index.py
- setuptools/py312compat.py
setuptools/sandbox.py
setuptools/unicode_utils.py
setuptools/version.py
diff --git a/contrib/python/types-protobuf/ya.make b/contrib/python/types-protobuf/ya.make
index 6632bf3426..9ba2128ebd 100644
--- a/contrib/python/types-protobuf/ya.make
+++ b/contrib/python/types-protobuf/ya.make
@@ -11,16 +11,9 @@ NO_LINT()
PY_SRCS(
TOP_LEVEL
google-stubs/protobuf/__init__.pyi
- google-stubs/protobuf/any_pb2.pyi
- google-stubs/protobuf/api_pb2.pyi
google-stubs/protobuf/compiler/__init__.pyi
- google-stubs/protobuf/compiler/plugin_pb2.pyi
google-stubs/protobuf/descriptor.pyi
- google-stubs/protobuf/descriptor_pb2.pyi
google-stubs/protobuf/descriptor_pool.pyi
- google-stubs/protobuf/duration_pb2.pyi
- google-stubs/protobuf/empty_pb2.pyi
- google-stubs/protobuf/field_mask_pb2.pyi
google-stubs/protobuf/internal/__init__.pyi
google-stubs/protobuf/internal/api_implementation.pyi
google-stubs/protobuf/internal/builder.pyi
@@ -39,14 +32,9 @@ PY_SRCS(
google-stubs/protobuf/message_factory.pyi
google-stubs/protobuf/reflection.pyi
google-stubs/protobuf/service.pyi
- google-stubs/protobuf/source_context_pb2.pyi
- google-stubs/protobuf/struct_pb2.pyi
google-stubs/protobuf/symbol_database.pyi
google-stubs/protobuf/text_format.pyi
- google-stubs/protobuf/timestamp_pb2.pyi
- google-stubs/protobuf/type_pb2.pyi
google-stubs/protobuf/util/__init__.pyi
- google-stubs/protobuf/wrappers_pb2.pyi
)
RESOURCE_FILES(