aboutsummaryrefslogtreecommitdiffstats
path: root/contrib
diff options
context:
space:
mode:
authorrekby <rekby@ydb.tech>2024-06-18 21:06:39 +0300
committerrekby <rekby@ydb.tech>2024-06-18 21:15:13 +0300
commitd024de4c41a3bc10bc73eadb3b0c5820150c3a3d (patch)
treee68319941a17fa4019c94325759efe9ed5148f51 /contrib
parent3dc640b139e0175239c26d2bf9013c90e106debe (diff)
downloadydb-d024de4c41a3bc10bc73eadb3b0c5820150c3a3d.tar.gz
Change "devtools/contrib/piglet/projects/ydblib/config.yaml"
e790478457ac44c1468389d3b31b067a90df8e4e
Diffstat (limited to 'contrib')
-rw-r--r--contrib/python/docker/.dist-info/METADATA122
-rw-r--r--contrib/python/docker/.dist-info/top_level.txt1
-rw-r--r--contrib/python/docker/LICENSE191
-rw-r--r--contrib/python/docker/README.md76
-rw-r--r--contrib/python/docker/docker/__init__.py7
-rw-r--r--contrib/python/docker/docker/_version.py16
-rw-r--r--contrib/python/docker/docker/api/__init__.py1
-rw-r--r--contrib/python/docker/docker/api/build.py382
-rw-r--r--contrib/python/docker/docker/api/client.py536
-rw-r--r--contrib/python/docker/docker/api/config.py92
-rw-r--r--contrib/python/docker/docker/api/container.py1348
-rw-r--r--contrib/python/docker/docker/api/daemon.py181
-rw-r--r--contrib/python/docker/docker/api/exec_api.py176
-rw-r--r--contrib/python/docker/docker/api/image.py601
-rw-r--r--contrib/python/docker/docker/api/network.py277
-rw-r--r--contrib/python/docker/docker/api/plugin.py261
-rw-r--r--contrib/python/docker/docker/api/secret.py98
-rw-r--r--contrib/python/docker/docker/api/service.py486
-rw-r--r--contrib/python/docker/docker/api/swarm.py462
-rw-r--r--contrib/python/docker/docker/api/volume.py163
-rw-r--r--contrib/python/docker/docker/auth.py378
-rw-r--r--contrib/python/docker/docker/client.py222
-rw-r--r--contrib/python/docker/docker/constants.py45
-rw-r--r--contrib/python/docker/docker/context/__init__.py2
-rw-r--r--contrib/python/docker/docker/context/api.py206
-rw-r--r--contrib/python/docker/docker/context/config.py81
-rw-r--r--contrib/python/docker/docker/context/context.py249
-rw-r--r--contrib/python/docker/docker/credentials/__init__.py8
-rw-r--r--contrib/python/docker/docker/credentials/constants.py4
-rw-r--r--contrib/python/docker/docker/credentials/errors.py17
-rw-r--r--contrib/python/docker/docker/credentials/store.py93
-rw-r--r--contrib/python/docker/docker/credentials/utils.py10
-rw-r--r--contrib/python/docker/docker/errors.py209
-rw-r--r--contrib/python/docker/docker/models/__init__.py0
-rw-r--r--contrib/python/docker/docker/models/configs.py70
-rw-r--r--contrib/python/docker/docker/models/containers.py1197
-rw-r--r--contrib/python/docker/docker/models/images.py505
-rw-r--r--contrib/python/docker/docker/models/networks.py218
-rw-r--r--contrib/python/docker/docker/models/nodes.py107
-rw-r--r--contrib/python/docker/docker/models/plugins.py206
-rw-r--r--contrib/python/docker/docker/models/resource.py92
-rw-r--r--contrib/python/docker/docker/models/secrets.py70
-rw-r--r--contrib/python/docker/docker/models/services.py390
-rw-r--r--contrib/python/docker/docker/models/swarm.py190
-rw-r--r--contrib/python/docker/docker/models/volumes.py99
-rw-r--r--contrib/python/docker/docker/tls.py67
-rw-r--r--contrib/python/docker/docker/transport/__init__.py12
-rw-r--r--contrib/python/docker/docker/transport/basehttpadapter.py13
-rw-r--r--contrib/python/docker/docker/transport/npipeconn.py102
-rw-r--r--contrib/python/docker/docker/transport/npipesocket.py230
-rw-r--r--contrib/python/docker/docker/transport/sshconn.py250
-rw-r--r--contrib/python/docker/docker/transport/unixconn.py86
-rw-r--r--contrib/python/docker/docker/types/__init__.py24
-rw-r--r--contrib/python/docker/docker/types/base.py4
-rw-r--r--contrib/python/docker/docker/types/containers.py790
-rw-r--r--contrib/python/docker/docker/types/daemon.py71
-rw-r--r--contrib/python/docker/docker/types/healthcheck.py88
-rw-r--r--contrib/python/docker/docker/types/networks.py128
-rw-r--r--contrib/python/docker/docker/types/services.py867
-rw-r--r--contrib/python/docker/docker/types/swarm.py119
-rw-r--r--contrib/python/docker/docker/utils/__init__.py28
-rw-r--r--contrib/python/docker/docker/utils/build.py260
-rw-r--r--contrib/python/docker/docker/utils/config.py66
-rw-r--r--contrib/python/docker/docker/utils/decorators.py45
-rw-r--r--contrib/python/docker/docker/utils/fnmatch.py115
-rw-r--r--contrib/python/docker/docker/utils/json_stream.py74
-rw-r--r--contrib/python/docker/docker/utils/ports.py83
-rw-r--r--contrib/python/docker/docker/utils/proxy.py77
-rw-r--r--contrib/python/docker/docker/utils/socket.py187
-rw-r--r--contrib/python/docker/docker/utils/utils.py517
-rw-r--r--contrib/python/docker/docker/version.py8
-rw-r--r--contrib/python/docker/ya.make99
72 files changed, 14555 insertions, 0 deletions
diff --git a/contrib/python/docker/.dist-info/METADATA b/contrib/python/docker/.dist-info/METADATA
new file mode 100644
index 0000000000..90e41721a6
--- /dev/null
+++ b/contrib/python/docker/.dist-info/METADATA
@@ -0,0 +1,122 @@
+Metadata-Version: 2.3
+Name: docker
+Version: 7.1.0
+Summary: A Python library for the Docker Engine API.
+Project-URL: Changelog, https://docker-py.readthedocs.io/en/stable/change-log.html
+Project-URL: Documentation, https://docker-py.readthedocs.io
+Project-URL: Homepage, https://github.com/docker/docker-py
+Project-URL: Source, https://github.com/docker/docker-py
+Project-URL: Tracker, https://github.com/docker/docker-py/issues
+Maintainer-email: "Docker Inc." <no-reply@docker.com>
+License-Expression: Apache-2.0
+License-File: LICENSE
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Environment :: Other Environment
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: Apache Software License
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: 3.10
+Classifier: Programming Language :: Python :: 3.11
+Classifier: Programming Language :: Python :: 3.12
+Classifier: Topic :: Software Development
+Classifier: Topic :: Utilities
+Requires-Python: >=3.8
+Requires-Dist: pywin32>=304; sys_platform == 'win32'
+Requires-Dist: requests>=2.26.0
+Requires-Dist: urllib3>=1.26.0
+Provides-Extra: dev
+Requires-Dist: coverage==7.2.7; extra == 'dev'
+Requires-Dist: pytest-cov==4.1.0; extra == 'dev'
+Requires-Dist: pytest-timeout==2.1.0; extra == 'dev'
+Requires-Dist: pytest==7.4.2; extra == 'dev'
+Requires-Dist: ruff==0.1.8; extra == 'dev'
+Provides-Extra: docs
+Requires-Dist: myst-parser==0.18.0; extra == 'docs'
+Requires-Dist: sphinx==5.1.1; extra == 'docs'
+Provides-Extra: ssh
+Requires-Dist: paramiko>=2.4.3; extra == 'ssh'
+Provides-Extra: tls
+Provides-Extra: websockets
+Requires-Dist: websocket-client>=1.3.0; extra == 'websockets'
+Description-Content-Type: text/markdown
+
+# Docker SDK for Python
+
+[![Build Status](https://github.com/docker/docker-py/actions/workflows/ci.yml/badge.svg)](https://github.com/docker/docker-py/actions/workflows/ci.yml)
+
+A Python library for the Docker Engine API. It lets you do anything the `docker` command does, but from within Python apps – run containers, manage containers, manage Swarms, etc.
+
+## Installation
+
+The latest stable version [is available on PyPI](https://pypi.python.org/pypi/docker/). Install with pip:
+
+ pip install docker
+
+> Older versions (< 6.0) required installing `docker[tls]` for SSL/TLS support.
+> This is no longer necessary and is a no-op, but is supported for backwards compatibility.
+
+## Usage
+
+Connect to Docker using the default socket or the configuration in your environment:
+
+```python
+import docker
+client = docker.from_env()
+```
+
+You can run containers:
+
+```python
+>>> client.containers.run("ubuntu:latest", "echo hello world")
+'hello world\n'
+```
+
+You can run containers in the background:
+
+```python
+>>> client.containers.run("bfirsh/reticulate-splines", detach=True)
+<Container '45e6d2de7c54'>
+```
+
+You can manage containers:
+
+```python
+>>> client.containers.list()
+[<Container '45e6d2de7c54'>, <Container 'db18e4f20eaa'>, ...]
+
+>>> container = client.containers.get('45e6d2de7c54')
+
+>>> container.attrs['Config']['Image']
+"bfirsh/reticulate-splines"
+
+>>> container.logs()
+"Reticulating spline 1...\n"
+
+>>> container.stop()
+```
+
+You can stream logs:
+
+```python
+>>> for line in container.logs(stream=True):
+... print(line.strip())
+Reticulating spline 2...
+Reticulating spline 3...
+...
+```
+
+You can manage images:
+
+```python
+>>> client.images.pull('nginx')
+<Image 'nginx'>
+
+>>> client.images.list()
+[<Image 'ubuntu'>, <Image 'nginx'>, ...]
+```
+
+[Read the full documentation](https://docker-py.readthedocs.io) to see everything you can do.
diff --git a/contrib/python/docker/.dist-info/top_level.txt b/contrib/python/docker/.dist-info/top_level.txt
new file mode 100644
index 0000000000..bdb9670965
--- /dev/null
+++ b/contrib/python/docker/.dist-info/top_level.txt
@@ -0,0 +1 @@
+docker
diff --git a/contrib/python/docker/LICENSE b/contrib/python/docker/LICENSE
new file mode 100644
index 0000000000..75191a4dc7
--- /dev/null
+++ b/contrib/python/docker/LICENSE
@@ -0,0 +1,191 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright 2016 Docker, Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/contrib/python/docker/README.md b/contrib/python/docker/README.md
new file mode 100644
index 0000000000..a6e06a229f
--- /dev/null
+++ b/contrib/python/docker/README.md
@@ -0,0 +1,76 @@
+# Docker SDK for Python
+
+[![Build Status](https://github.com/docker/docker-py/actions/workflows/ci.yml/badge.svg)](https://github.com/docker/docker-py/actions/workflows/ci.yml)
+
+A Python library for the Docker Engine API. It lets you do anything the `docker` command does, but from within Python apps – run containers, manage containers, manage Swarms, etc.
+
+## Installation
+
+The latest stable version [is available on PyPI](https://pypi.python.org/pypi/docker/). Install with pip:
+
+ pip install docker
+
+> Older versions (< 6.0) required installing `docker[tls]` for SSL/TLS support.
+> This is no longer necessary and is a no-op, but is supported for backwards compatibility.
+
+## Usage
+
+Connect to Docker using the default socket or the configuration in your environment:
+
+```python
+import docker
+client = docker.from_env()
+```
+
+You can run containers:
+
+```python
+>>> client.containers.run("ubuntu:latest", "echo hello world")
+'hello world\n'
+```
+
+You can run containers in the background:
+
+```python
+>>> client.containers.run("bfirsh/reticulate-splines", detach=True)
+<Container '45e6d2de7c54'>
+```
+
+You can manage containers:
+
+```python
+>>> client.containers.list()
+[<Container '45e6d2de7c54'>, <Container 'db18e4f20eaa'>, ...]
+
+>>> container = client.containers.get('45e6d2de7c54')
+
+>>> container.attrs['Config']['Image']
+"bfirsh/reticulate-splines"
+
+>>> container.logs()
+"Reticulating spline 1...\n"
+
+>>> container.stop()
+```
+
+You can stream logs:
+
+```python
+>>> for line in container.logs(stream=True):
+... print(line.strip())
+Reticulating spline 2...
+Reticulating spline 3...
+...
+```
+
+You can manage images:
+
+```python
+>>> client.images.pull('nginx')
+<Image 'nginx'>
+
+>>> client.images.list()
+[<Image 'ubuntu'>, <Image 'nginx'>, ...]
+```
+
+[Read the full documentation](https://docker-py.readthedocs.io) to see everything you can do.
diff --git a/contrib/python/docker/docker/__init__.py b/contrib/python/docker/docker/__init__.py
new file mode 100644
index 0000000000..fb7a5e921a
--- /dev/null
+++ b/contrib/python/docker/docker/__init__.py
@@ -0,0 +1,7 @@
+from .api import APIClient
+from .client import DockerClient, from_env
+from .context import Context, ContextAPI
+from .tls import TLSConfig
+from .version import __version__
+
+__title__ = 'docker'
diff --git a/contrib/python/docker/docker/_version.py b/contrib/python/docker/docker/_version.py
new file mode 100644
index 0000000000..32913e53d5
--- /dev/null
+++ b/contrib/python/docker/docker/_version.py
@@ -0,0 +1,16 @@
+# file generated by setuptools_scm
+# don't change, don't track in version control
+TYPE_CHECKING = False
+if TYPE_CHECKING:
+ from typing import Tuple, Union
+ VERSION_TUPLE = Tuple[Union[int, str], ...]
+else:
+ VERSION_TUPLE = object
+
+version: str
+__version__: str
+__version_tuple__: VERSION_TUPLE
+version_tuple: VERSION_TUPLE
+
+__version__ = version = '7.1.0'
+__version_tuple__ = version_tuple = (7, 1, 0)
diff --git a/contrib/python/docker/docker/api/__init__.py b/contrib/python/docker/docker/api/__init__.py
new file mode 100644
index 0000000000..7260e9537e
--- /dev/null
+++ b/contrib/python/docker/docker/api/__init__.py
@@ -0,0 +1 @@
+from .client import APIClient
diff --git a/contrib/python/docker/docker/api/build.py b/contrib/python/docker/docker/api/build.py
new file mode 100644
index 0000000000..47216a58fd
--- /dev/null
+++ b/contrib/python/docker/docker/api/build.py
@@ -0,0 +1,382 @@
+import json
+import logging
+import os
+import random
+
+from .. import auth, constants, errors, utils
+
+log = logging.getLogger(__name__)
+
+
+class BuildApiMixin:
+ def build(self, path=None, tag=None, quiet=False, fileobj=None,
+ nocache=False, rm=False, timeout=None,
+ custom_context=False, encoding=None, pull=False,
+ forcerm=False, dockerfile=None, container_limits=None,
+ decode=False, buildargs=None, gzip=False, shmsize=None,
+ labels=None, cache_from=None, target=None, network_mode=None,
+ squash=None, extra_hosts=None, platform=None, isolation=None,
+ use_config_proxy=True):
+ """
+ Similar to the ``docker build`` command. Either ``path`` or ``fileobj``
+ needs to be set. ``path`` can be a local path (to a directory
+ containing a Dockerfile) or a remote URL. ``fileobj`` must be a
+ readable file-like object to a Dockerfile.
+
+ If you have a tar file for the Docker build context (including a
+ Dockerfile) already, pass a readable file-like object to ``fileobj``
+ and also pass ``custom_context=True``. If the stream is compressed
+ also, set ``encoding`` to the correct value (e.g ``gzip``).
+
+ Example:
+ >>> from io import BytesIO
+ >>> from docker import APIClient
+ >>> dockerfile = '''
+ ... # Shared Volume
+ ... FROM busybox:buildroot-2014.02
+ ... VOLUME /data
+ ... CMD ["/bin/sh"]
+ ... '''
+ >>> f = BytesIO(dockerfile.encode('utf-8'))
+ >>> cli = APIClient(base_url='tcp://127.0.0.1:2375')
+ >>> response = [line for line in cli.build(
+ ... fileobj=f, rm=True, tag='yourname/volume'
+ ... )]
+ >>> response
+ ['{"stream":" ---\\u003e a9eb17255234\\n"}',
+ '{"stream":"Step 1 : VOLUME /data\\n"}',
+ '{"stream":" ---\\u003e Running in abdc1e6896c6\\n"}',
+ '{"stream":" ---\\u003e 713bca62012e\\n"}',
+ '{"stream":"Removing intermediate container abdc1e6896c6\\n"}',
+ '{"stream":"Step 2 : CMD [\\"/bin/sh\\"]\\n"}',
+ '{"stream":" ---\\u003e Running in dba30f2a1a7e\\n"}',
+ '{"stream":" ---\\u003e 032b8b2855fc\\n"}',
+ '{"stream":"Removing intermediate container dba30f2a1a7e\\n"}',
+ '{"stream":"Successfully built 032b8b2855fc\\n"}']
+
+ Args:
+ path (str): Path to the directory containing the Dockerfile
+ fileobj: A file object to use as the Dockerfile. (Or a file-like
+ object)
+ tag (str): A tag to add to the final image
+ quiet (bool): Whether to return the status
+ nocache (bool): Don't use the cache when set to ``True``
+ rm (bool): Remove intermediate containers. The ``docker build``
+ command now defaults to ``--rm=true``, but we have kept the old
+ default of `False` to preserve backward compatibility
+ timeout (int): HTTP timeout
+ custom_context (bool): Optional if using ``fileobj``
+ encoding (str): The encoding for a stream. Set to ``gzip`` for
+ compressing
+ pull (bool): Downloads any updates to the FROM image in Dockerfiles
+ forcerm (bool): Always remove intermediate containers, even after
+ unsuccessful builds
+ dockerfile (str): path within the build context to the Dockerfile
+ gzip (bool): If set to ``True``, gzip compression/encoding is used
+ buildargs (dict): A dictionary of build arguments
+ container_limits (dict): A dictionary of limits applied to each
+ container created by the build process. Valid keys:
+
+ - memory (int): set memory limit for build
+ - memswap (int): Total memory (memory + swap), -1 to disable
+ swap
+ - cpushares (int): CPU shares (relative weight)
+ - cpusetcpus (str): CPUs in which to allow execution, e.g.,
+ ``"0-3"``, ``"0,1"``
+ decode (bool): If set to ``True``, the returned stream will be
+ decoded into dicts on the fly. Default ``False``
+ shmsize (int): Size of `/dev/shm` in bytes. The size must be
+ greater than 0. If omitted the system uses 64MB
+ labels (dict): A dictionary of labels to set on the image
+ cache_from (:py:class:`list`): A list of images used for build
+ cache resolution
+ target (str): Name of the build-stage to build in a multi-stage
+ Dockerfile
+ network_mode (str): networking mode for the run commands during
+ build
+ squash (bool): Squash the resulting images layers into a
+ single layer.
+ extra_hosts (dict): Extra hosts to add to /etc/hosts in building
+ containers, as a mapping of hostname to IP address.
+ platform (str): Platform in the format ``os[/arch[/variant]]``
+ isolation (str): Isolation technology used during build.
+ Default: `None`.
+ use_config_proxy (bool): If ``True``, and if the docker client
+ configuration file (``~/.docker/config.json`` by default)
+ contains a proxy configuration, the corresponding environment
+ variables will be set in the container being built.
+
+ Returns:
+ A generator for the build output.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ ``TypeError``
+ If neither ``path`` nor ``fileobj`` is specified.
+ """
+ remote = context = None
+ headers = {}
+ container_limits = container_limits or {}
+ buildargs = buildargs or {}
+ if path is None and fileobj is None:
+ raise TypeError("Either path or fileobj needs to be provided.")
+ if gzip and encoding is not None:
+ raise errors.DockerException(
+ 'Can not use custom encoding if gzip is enabled'
+ )
+ if tag is not None:
+ if not utils.match_tag(tag):
+ raise errors.DockerException(
+ f"invalid tag '{tag}': invalid reference format"
+ )
+ for key in container_limits.keys():
+ if key not in constants.CONTAINER_LIMITS_KEYS:
+ raise errors.DockerException(
+ f"invalid tag '{tag}': invalid reference format"
+ )
+ if custom_context:
+ if not fileobj:
+ raise TypeError("You must specify fileobj with custom_context")
+ context = fileobj
+ elif fileobj is not None:
+ context = utils.mkbuildcontext(fileobj)
+ elif path.startswith(('http://', 'https://',
+ 'git://', 'github.com/', 'git@')):
+ remote = path
+ elif not os.path.isdir(path):
+ raise TypeError("You must specify a directory to build in path")
+ else:
+ dockerignore = os.path.join(path, '.dockerignore')
+ exclude = None
+ if os.path.exists(dockerignore):
+ with open(dockerignore) as f:
+ exclude = list(filter(
+ lambda x: x != '' and x[0] != '#',
+ [line.strip() for line in f.read().splitlines()]
+ ))
+ dockerfile = process_dockerfile(dockerfile, path)
+ context = utils.tar(
+ path, exclude=exclude, dockerfile=dockerfile, gzip=gzip
+ )
+ encoding = 'gzip' if gzip else encoding
+
+ u = self._url('/build')
+ params = {
+ 't': tag,
+ 'remote': remote,
+ 'q': quiet,
+ 'nocache': nocache,
+ 'rm': rm,
+ 'forcerm': forcerm,
+ 'pull': pull,
+ 'dockerfile': dockerfile,
+ }
+ params.update(container_limits)
+
+ if use_config_proxy:
+ proxy_args = self._proxy_configs.get_environment()
+ for k, v in proxy_args.items():
+ buildargs.setdefault(k, v)
+ if buildargs:
+ params.update({'buildargs': json.dumps(buildargs)})
+
+ if shmsize:
+ if utils.version_gte(self._version, '1.22'):
+ params.update({'shmsize': shmsize})
+ else:
+ raise errors.InvalidVersion(
+ 'shmsize was only introduced in API version 1.22'
+ )
+
+ if labels:
+ if utils.version_gte(self._version, '1.23'):
+ params.update({'labels': json.dumps(labels)})
+ else:
+ raise errors.InvalidVersion(
+ 'labels was only introduced in API version 1.23'
+ )
+
+ if cache_from:
+ if utils.version_gte(self._version, '1.25'):
+ params.update({'cachefrom': json.dumps(cache_from)})
+ else:
+ raise errors.InvalidVersion(
+ 'cache_from was only introduced in API version 1.25'
+ )
+
+ if target:
+ if utils.version_gte(self._version, '1.29'):
+ params.update({'target': target})
+ else:
+ raise errors.InvalidVersion(
+ 'target was only introduced in API version 1.29'
+ )
+
+ if network_mode:
+ if utils.version_gte(self._version, '1.25'):
+ params.update({'networkmode': network_mode})
+ else:
+ raise errors.InvalidVersion(
+ 'network_mode was only introduced in API version 1.25'
+ )
+
+ if squash:
+ if utils.version_gte(self._version, '1.25'):
+ params.update({'squash': squash})
+ else:
+ raise errors.InvalidVersion(
+ 'squash was only introduced in API version 1.25'
+ )
+
+ if extra_hosts is not None:
+ if utils.version_lt(self._version, '1.27'):
+ raise errors.InvalidVersion(
+ 'extra_hosts was only introduced in API version 1.27'
+ )
+
+ if isinstance(extra_hosts, dict):
+ extra_hosts = utils.format_extra_hosts(extra_hosts)
+ params.update({'extrahosts': extra_hosts})
+
+ if platform is not None:
+ if utils.version_lt(self._version, '1.32'):
+ raise errors.InvalidVersion(
+ 'platform was only introduced in API version 1.32'
+ )
+ params['platform'] = platform
+
+ if isolation is not None:
+ if utils.version_lt(self._version, '1.24'):
+ raise errors.InvalidVersion(
+ 'isolation was only introduced in API version 1.24'
+ )
+ params['isolation'] = isolation
+
+ if context is not None:
+ headers = {'Content-Type': 'application/tar'}
+ if encoding:
+ headers['Content-Encoding'] = encoding
+
+ self._set_auth_headers(headers)
+
+ response = self._post(
+ u,
+ data=context,
+ params=params,
+ headers=headers,
+ stream=True,
+ timeout=timeout,
+ )
+
+ if context is not None and not custom_context:
+ context.close()
+
+ return self._stream_helper(response, decode=decode)
+
+ @utils.minimum_version('1.31')
+ def prune_builds(self, filters=None, keep_storage=None, all=None):
+ """
+ Delete the builder cache
+
+ Args:
+ filters (dict): Filters to process on the prune list.
+ Needs Docker API v1.39+
+ Available filters:
+ - dangling (bool): When set to true (or 1), prune only
+ unused and untagged images.
+ - until (str): Can be Unix timestamps, date formatted
+ timestamps, or Go duration strings (e.g. 10m, 1h30m) computed
+ relative to the daemon's local time.
+ keep_storage (int): Amount of disk space in bytes to keep for cache.
+ Needs Docker API v1.39+
+ all (bool): Remove all types of build cache.
+ Needs Docker API v1.39+
+
+ Returns:
+ (dict): A dictionary containing information about the operation's
+ result. The ``SpaceReclaimed`` key indicates the amount of
+ bytes of disk space reclaimed.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ url = self._url("/build/prune")
+ if (filters, keep_storage, all) != (None, None, None) \
+ and utils.version_lt(self._version, '1.39'):
+ raise errors.InvalidVersion(
+ '`filters`, `keep_storage`, and `all` args are only available '
+ 'for API version > 1.38'
+ )
+ params = {}
+ if filters is not None:
+ params['filters'] = utils.convert_filters(filters)
+ if keep_storage is not None:
+ params['keep-storage'] = keep_storage
+ if all is not None:
+ params['all'] = all
+ return self._result(self._post(url, params=params), True)
+
+ def _set_auth_headers(self, headers):
+ log.debug('Looking for auth config')
+
+ # If we don't have any auth data so far, try reloading the config
+ # file one more time in case anything showed up in there.
+ if not self._auth_configs or self._auth_configs.is_empty:
+ log.debug("No auth config in memory - loading from filesystem")
+ self._auth_configs = auth.load_config(
+ credstore_env=self.credstore_env
+ )
+
+ # Send the full auth configuration (if any exists), since the build
+ # could use any (or all) of the registries.
+ if self._auth_configs:
+ auth_data = self._auth_configs.get_all_credentials()
+
+ # See https://github.com/docker/docker-py/issues/1683
+ if (auth.INDEX_URL not in auth_data and
+ auth.INDEX_NAME in auth_data):
+ auth_data[auth.INDEX_URL] = auth_data.get(auth.INDEX_NAME, {})
+
+ log.debug(
+ "Sending auth config (%s)",
+ ', '.join(repr(k) for k in auth_data),
+ )
+
+ if auth_data:
+ headers['X-Registry-Config'] = auth.encode_header(
+ auth_data
+ )
+ else:
+ log.debug('No auth config found')
+
+
+def process_dockerfile(dockerfile, path):
+ if not dockerfile:
+ return (None, None)
+
+ abs_dockerfile = dockerfile
+ if not os.path.isabs(dockerfile):
+ abs_dockerfile = os.path.join(path, dockerfile)
+ if constants.IS_WINDOWS_PLATFORM and path.startswith(
+ constants.WINDOWS_LONGPATH_PREFIX):
+ normpath = os.path.normpath(
+ abs_dockerfile[len(constants.WINDOWS_LONGPATH_PREFIX):])
+ abs_dockerfile = f'{constants.WINDOWS_LONGPATH_PREFIX}{normpath}'
+ if (os.path.splitdrive(path)[0] != os.path.splitdrive(abs_dockerfile)[0] or
+ os.path.relpath(abs_dockerfile, path).startswith('..')):
+ # Dockerfile not in context - read data to insert into tar later
+ with open(abs_dockerfile) as df:
+ return (
+ f'.dockerfile.{random.getrandbits(160):x}',
+ df.read()
+ )
+
+ # Dockerfile is inside the context - return path relative to context root
+ if dockerfile == abs_dockerfile:
+ # Only calculate relpath if necessary to avoid errors
+ # on Windows client -> Linux Docker
+ # see https://github.com/docker/compose/issues/5969
+ dockerfile = os.path.relpath(abs_dockerfile, path)
+ return (dockerfile, None)
diff --git a/contrib/python/docker/docker/api/client.py b/contrib/python/docker/docker/api/client.py
new file mode 100644
index 0000000000..45f68bfbf8
--- /dev/null
+++ b/contrib/python/docker/docker/api/client.py
@@ -0,0 +1,536 @@
+import json
+import struct
+import urllib
+from functools import partial
+
+import requests
+import requests.adapters
+import requests.exceptions
+
+from .. import auth
+from ..constants import (
+ DEFAULT_DOCKER_API_VERSION,
+ DEFAULT_MAX_POOL_SIZE,
+ DEFAULT_NUM_POOLS,
+ DEFAULT_NUM_POOLS_SSH,
+ DEFAULT_TIMEOUT_SECONDS,
+ DEFAULT_USER_AGENT,
+ IS_WINDOWS_PLATFORM,
+ MINIMUM_DOCKER_API_VERSION,
+ STREAM_HEADER_SIZE_BYTES,
+)
+from ..errors import (
+ DockerException,
+ InvalidVersion,
+ TLSParameterError,
+ create_api_error_from_http_exception,
+)
+from ..tls import TLSConfig
+from ..transport import UnixHTTPAdapter
+from ..utils import check_resource, config, update_headers, utils
+from ..utils.json_stream import json_stream
+from ..utils.proxy import ProxyConfig
+from ..utils.socket import consume_socket_output, demux_adaptor, frames_iter
+from .build import BuildApiMixin
+from .config import ConfigApiMixin
+from .container import ContainerApiMixin
+from .daemon import DaemonApiMixin
+from .exec_api import ExecApiMixin
+from .image import ImageApiMixin
+from .network import NetworkApiMixin
+from .plugin import PluginApiMixin
+from .secret import SecretApiMixin
+from .service import ServiceApiMixin
+from .swarm import SwarmApiMixin
+from .volume import VolumeApiMixin
+
+try:
+ from ..transport import NpipeHTTPAdapter
+except ImportError:
+ pass
+
+try:
+ from ..transport import SSHHTTPAdapter
+except ImportError:
+ pass
+
+
+class APIClient(
+ requests.Session,
+ BuildApiMixin,
+ ConfigApiMixin,
+ ContainerApiMixin,
+ DaemonApiMixin,
+ ExecApiMixin,
+ ImageApiMixin,
+ NetworkApiMixin,
+ PluginApiMixin,
+ SecretApiMixin,
+ ServiceApiMixin,
+ SwarmApiMixin,
+ VolumeApiMixin):
+ """
+ A low-level client for the Docker Engine API.
+
+ Example:
+
+ >>> import docker
+ >>> client = docker.APIClient(base_url='unix://var/run/docker.sock')
+ >>> client.version()
+ {u'ApiVersion': u'1.33',
+ u'Arch': u'amd64',
+ u'BuildTime': u'2017-11-19T18:46:37.000000000+00:00',
+ u'GitCommit': u'f4ffd2511c',
+ u'GoVersion': u'go1.9.2',
+ u'KernelVersion': u'4.14.3-1-ARCH',
+ u'MinAPIVersion': u'1.12',
+ u'Os': u'linux',
+ u'Version': u'17.10.0-ce'}
+
+ Args:
+ base_url (str): URL to the Docker server. For example,
+ ``unix:///var/run/docker.sock`` or ``tcp://127.0.0.1:1234``.
+ version (str): The version of the API to use. Set to ``auto`` to
+ automatically detect the server's version. Default: ``1.35``
+ timeout (int): Default timeout for API calls, in seconds.
+ tls (bool or :py:class:`~docker.tls.TLSConfig`): Enable TLS. Pass
+ ``True`` to enable it with default options, or pass a
+ :py:class:`~docker.tls.TLSConfig` object to use custom
+ configuration.
+ user_agent (str): Set a custom user agent for requests to the server.
+ credstore_env (dict): Override environment variables when calling the
+ credential store process.
+ use_ssh_client (bool): If set to `True`, an ssh connection is made
+ via shelling out to the ssh client. Ensure the ssh client is
+ installed and configured on the host.
+ max_pool_size (int): The maximum number of connections
+ to save in the pool.
+ """
+
+ __attrs__ = requests.Session.__attrs__ + ['_auth_configs',
+ '_general_configs',
+ '_version',
+ 'base_url',
+ 'timeout']
+
+ def __init__(self, base_url=None, version=None,
+ timeout=DEFAULT_TIMEOUT_SECONDS, tls=False,
+ user_agent=DEFAULT_USER_AGENT, num_pools=None,
+ credstore_env=None, use_ssh_client=False,
+ max_pool_size=DEFAULT_MAX_POOL_SIZE):
+ super().__init__()
+
+ if tls and not base_url:
+ raise TLSParameterError(
+ 'If using TLS, the base_url argument must be provided.'
+ )
+
+ self.base_url = base_url
+ self.timeout = timeout
+ self.headers['User-Agent'] = user_agent
+
+ self._general_configs = config.load_general_config()
+
+ proxy_config = self._general_configs.get('proxies', {})
+ try:
+ proxies = proxy_config[base_url]
+ except KeyError:
+ proxies = proxy_config.get('default', {})
+
+ self._proxy_configs = ProxyConfig.from_dict(proxies)
+
+ self._auth_configs = auth.load_config(
+ config_dict=self._general_configs, credstore_env=credstore_env,
+ )
+ self.credstore_env = credstore_env
+
+ base_url = utils.parse_host(
+ base_url, IS_WINDOWS_PLATFORM, tls=bool(tls)
+ )
+ # SSH has a different default for num_pools to all other adapters
+ num_pools = num_pools or DEFAULT_NUM_POOLS_SSH if \
+ base_url.startswith('ssh://') else DEFAULT_NUM_POOLS
+
+ if base_url.startswith('http+unix://'):
+ self._custom_adapter = UnixHTTPAdapter(
+ base_url, timeout, pool_connections=num_pools,
+ max_pool_size=max_pool_size
+ )
+ self.mount('http+docker://', self._custom_adapter)
+ self._unmount('http://', 'https://')
+ # host part of URL should be unused, but is resolved by requests
+ # module in proxy_bypass_macosx_sysconf()
+ self.base_url = 'http+docker://localhost'
+ elif base_url.startswith('npipe://'):
+ if not IS_WINDOWS_PLATFORM:
+ raise DockerException(
+ 'The npipe:// protocol is only supported on Windows'
+ )
+ try:
+ self._custom_adapter = NpipeHTTPAdapter(
+ base_url, timeout, pool_connections=num_pools,
+ max_pool_size=max_pool_size
+ )
+ except NameError as err:
+ raise DockerException(
+ 'Install pypiwin32 package to enable npipe:// support'
+ ) from err
+ self.mount('http+docker://', self._custom_adapter)
+ self.base_url = 'http+docker://localnpipe'
+ elif base_url.startswith('ssh://'):
+ try:
+ self._custom_adapter = SSHHTTPAdapter(
+ base_url, timeout, pool_connections=num_pools,
+ max_pool_size=max_pool_size, shell_out=use_ssh_client
+ )
+ except NameError as err:
+ raise DockerException(
+ 'Install paramiko package to enable ssh:// support'
+ ) from err
+ self.mount('http+docker://ssh', self._custom_adapter)
+ self._unmount('http://', 'https://')
+ self.base_url = 'http+docker://ssh'
+ else:
+ # Use SSLAdapter for the ability to specify SSL version
+ if isinstance(tls, TLSConfig):
+ tls.configure_client(self)
+ elif tls:
+ self._custom_adapter = requests.adapters.HTTPAdapter(
+ pool_connections=num_pools)
+ self.mount('https://', self._custom_adapter)
+ self.base_url = base_url
+
+ # version detection needs to be after unix adapter mounting
+ if version is None or (isinstance(
+ version,
+ str
+ ) and version.lower() == 'auto'):
+ try:
+ self._version = self._retrieve_server_version()
+ except:
+ self._version = DEFAULT_DOCKER_API_VERSION
+ else:
+ self._version = version
+ if not isinstance(self._version, str):
+ raise DockerException(
+ 'Version parameter must be a string or None. '
+ f'Found {type(version).__name__}'
+ )
+ if utils.version_lt(self._version, MINIMUM_DOCKER_API_VERSION):
+ raise InvalidVersion(
+ f'API versions below {MINIMUM_DOCKER_API_VERSION} are '
+ f'no longer supported by this library.'
+ )
+
+ def _retrieve_server_version(self):
+ try:
+ return self.version(api_version=False)["ApiVersion"]
+ except KeyError as ke:
+ raise DockerException(
+ 'Invalid response from docker daemon: key "ApiVersion"'
+ ' is missing.'
+ ) from ke
+ except Exception as e:
+ raise DockerException(
+ f'Error while fetching server API version: {e}'
+ ) from e
+
+ def _set_request_timeout(self, kwargs):
+ """Prepare the kwargs for an HTTP request by inserting the timeout
+ parameter, if not already present."""
+ kwargs.setdefault('timeout', self.timeout)
+ return kwargs
+
+ @update_headers
+ def _post(self, url, **kwargs):
+ return self.post(url, **self._set_request_timeout(kwargs))
+
+ @update_headers
+ def _get(self, url, **kwargs):
+ return self.get(url, **self._set_request_timeout(kwargs))
+
+ @update_headers
+ def _put(self, url, **kwargs):
+ return self.put(url, **self._set_request_timeout(kwargs))
+
+ @update_headers
+ def _delete(self, url, **kwargs):
+ return self.delete(url, **self._set_request_timeout(kwargs))
+
+ def _url(self, pathfmt, *args, **kwargs):
+ for arg in args:
+ if not isinstance(arg, str):
+ raise ValueError(
+ f'Expected a string but found {arg} ({type(arg)}) instead'
+ )
+
+ quote_f = partial(urllib.parse.quote, safe="/:")
+ args = map(quote_f, args)
+
+ formatted_path = pathfmt.format(*args)
+ if kwargs.get('versioned_api', True):
+ return f'{self.base_url}/v{self._version}{formatted_path}'
+ else:
+ return f'{self.base_url}{formatted_path}'
+
+ def _raise_for_status(self, response):
+ """Raises stored :class:`APIError`, if one occurred."""
+ try:
+ response.raise_for_status()
+ except requests.exceptions.HTTPError as e:
+ raise create_api_error_from_http_exception(e) from e
+
+ def _result(self, response, json=False, binary=False):
+ assert not (json and binary)
+ self._raise_for_status(response)
+
+ if json:
+ return response.json()
+ if binary:
+ return response.content
+ return response.text
+
+ def _post_json(self, url, data, **kwargs):
+ # Go <1.1 can't unserialize null to a string
+ # so we do this disgusting thing here.
+ data2 = {}
+ if data is not None and isinstance(data, dict):
+ for k, v in iter(data.items()):
+ if v is not None:
+ data2[k] = v
+ elif data is not None:
+ data2 = data
+
+ if 'headers' not in kwargs:
+ kwargs['headers'] = {}
+ kwargs['headers']['Content-Type'] = 'application/json'
+ return self._post(url, data=json.dumps(data2), **kwargs)
+
+ def _attach_params(self, override=None):
+ return override or {
+ 'stdout': 1,
+ 'stderr': 1,
+ 'stream': 1
+ }
+
+ @check_resource('container')
+ def _attach_websocket(self, container, params=None):
+ url = self._url("/containers/{0}/attach/ws", container)
+ req = requests.Request("POST", url, params=self._attach_params(params))
+ full_url = req.prepare().url
+ full_url = full_url.replace("http://", "ws://", 1)
+ full_url = full_url.replace("https://", "wss://", 1)
+ return self._create_websocket_connection(full_url)
+
+ def _create_websocket_connection(self, url):
+ try:
+ import websocket
+ return websocket.create_connection(url)
+ except ImportError as ie:
+ raise DockerException(
+ 'The `websocket-client` library is required '
+ 'for using websocket connections. '
+ 'You can install the `docker` library '
+ 'with the [websocket] extra to install it.'
+ ) from ie
+
+ def _get_raw_response_socket(self, response):
+ self._raise_for_status(response)
+ if self.base_url == "http+docker://localnpipe":
+ sock = response.raw._fp.fp.raw.sock
+ elif self.base_url.startswith('http+docker://ssh'):
+ sock = response.raw._fp.fp.channel
+ else:
+ sock = response.raw._fp.fp.raw
+ if self.base_url.startswith("https://"):
+ sock = sock._sock
+ try:
+ # Keep a reference to the response to stop it being garbage
+ # collected. If the response is garbage collected, it will
+ # close TLS sockets.
+ sock._response = response
+ except AttributeError:
+ # UNIX sockets can't have attributes set on them, but that's
+ # fine because we won't be doing TLS over them
+ pass
+
+ return sock
+
+ def _stream_helper(self, response, decode=False):
+ """Generator for data coming from a chunked-encoded HTTP response."""
+
+ if response.raw._fp.chunked:
+ if decode:
+ yield from json_stream(self._stream_helper(response, False))
+ else:
+ reader = response.raw
+ while not reader.closed:
+ # this read call will block until we get a chunk
+ data = reader.read(1)
+ if not data:
+ break
+ if reader._fp.chunk_left:
+ data += reader.read(reader._fp.chunk_left)
+ yield data
+ else:
+ # Response isn't chunked, meaning we probably
+ # encountered an error immediately
+ yield self._result(response, json=decode)
+
+ def _multiplexed_buffer_helper(self, response):
+ """A generator of multiplexed data blocks read from a buffered
+ response."""
+ buf = self._result(response, binary=True)
+ buf_length = len(buf)
+ walker = 0
+ while True:
+ if buf_length - walker < STREAM_HEADER_SIZE_BYTES:
+ break
+ header = buf[walker:walker + STREAM_HEADER_SIZE_BYTES]
+ _, length = struct.unpack_from('>BxxxL', header)
+ start = walker + STREAM_HEADER_SIZE_BYTES
+ end = start + length
+ walker = end
+ yield buf[start:end]
+
+ def _multiplexed_response_stream_helper(self, response):
+ """A generator of multiplexed data blocks coming from a response
+ stream."""
+
+ # Disable timeout on the underlying socket to prevent
+ # Read timed out(s) for long running processes
+ socket = self._get_raw_response_socket(response)
+ self._disable_socket_timeout(socket)
+
+ while True:
+ header = response.raw.read(STREAM_HEADER_SIZE_BYTES)
+ if not header:
+ break
+ _, length = struct.unpack('>BxxxL', header)
+ if not length:
+ continue
+ data = response.raw.read(length)
+ if not data:
+ break
+ yield data
+
+ def _stream_raw_result(self, response, chunk_size=1, decode=True):
+ ''' Stream result for TTY-enabled container and raw binary data'''
+ self._raise_for_status(response)
+
+ # Disable timeout on the underlying socket to prevent
+ # Read timed out(s) for long running processes
+ socket = self._get_raw_response_socket(response)
+ self._disable_socket_timeout(socket)
+
+ yield from response.iter_content(chunk_size, decode)
+
+ def _read_from_socket(self, response, stream, tty=True, demux=False):
+ """Consume all data from the socket, close the response and return the
+ data. If stream=True, then a generator is returned instead and the
+ caller is responsible for closing the response.
+ """
+ socket = self._get_raw_response_socket(response)
+
+ gen = frames_iter(socket, tty)
+
+ if demux:
+ # The generator will output tuples (stdout, stderr)
+ gen = (demux_adaptor(*frame) for frame in gen)
+ else:
+ # The generator will output strings
+ gen = (data for (_, data) in gen)
+
+ if stream:
+ return gen
+ else:
+ try:
+ # Wait for all frames, concatenate them, and return the result
+ return consume_socket_output(gen, demux=demux)
+ finally:
+ response.close()
+
+ def _disable_socket_timeout(self, socket):
+ """ Depending on the combination of python version and whether we're
+ connecting over http or https, we might need to access _sock, which
+ may or may not exist; or we may need to just settimeout on socket
+ itself, which also may or may not have settimeout on it. To avoid
+ missing the correct one, we try both.
+
+ We also do not want to set the timeout if it is already disabled, as
+ you run the risk of changing a socket that was non-blocking to
+ blocking, for example when using gevent.
+ """
+ sockets = [socket, getattr(socket, '_sock', None)]
+
+ for s in sockets:
+ if not hasattr(s, 'settimeout'):
+ continue
+
+ timeout = -1
+
+ if hasattr(s, 'gettimeout'):
+ timeout = s.gettimeout()
+
+ # Don't change the timeout if it is already disabled.
+ if timeout is None or timeout == 0.0:
+ continue
+
+ s.settimeout(None)
+
+ @check_resource('container')
+ def _check_is_tty(self, container):
+ cont = self.inspect_container(container)
+ return cont['Config']['Tty']
+
+ def _get_result(self, container, stream, res):
+ return self._get_result_tty(stream, res, self._check_is_tty(container))
+
+ def _get_result_tty(self, stream, res, is_tty):
+ # We should also use raw streaming (without keep-alives)
+ # if we're dealing with a tty-enabled container.
+ if is_tty:
+ return self._stream_raw_result(res) if stream else \
+ self._result(res, binary=True)
+
+ self._raise_for_status(res)
+ sep = b''
+ if stream:
+ return self._multiplexed_response_stream_helper(res)
+ else:
+ return sep.join(
+ list(self._multiplexed_buffer_helper(res))
+ )
+
+ def _unmount(self, *args):
+ for proto in args:
+ self.adapters.pop(proto)
+
+ def get_adapter(self, url):
+ try:
+ return super().get_adapter(url)
+ except requests.exceptions.InvalidSchema as e:
+ if self._custom_adapter:
+ return self._custom_adapter
+ else:
+ raise e
+
+ @property
+ def api_version(self):
+ return self._version
+
+ def reload_config(self, dockercfg_path=None):
+ """
+ Force a reload of the auth configuration
+
+ Args:
+ dockercfg_path (str): Use a custom path for the Docker config file
+ (default ``$HOME/.docker/config.json`` if present,
+ otherwise ``$HOME/.dockercfg``)
+
+ Returns:
+ None
+ """
+ self._auth_configs = auth.load_config(
+ dockercfg_path, credstore_env=self.credstore_env
+ )
diff --git a/contrib/python/docker/docker/api/config.py b/contrib/python/docker/docker/api/config.py
new file mode 100644
index 0000000000..88c367ec34
--- /dev/null
+++ b/contrib/python/docker/docker/api/config.py
@@ -0,0 +1,92 @@
+import base64
+
+from .. import utils
+
+
+class ConfigApiMixin:
+ @utils.minimum_version('1.30')
+ def create_config(self, name, data, labels=None, templating=None):
+ """
+ Create a config
+
+ Args:
+ name (string): Name of the config
+ data (bytes): Config data to be stored
+ labels (dict): A mapping of labels to assign to the config
+ templating (dict): dictionary containing the name of the
+ templating driver to be used expressed as
+ { name: <templating_driver_name>}
+
+ Returns (dict): ID of the newly created config
+ """
+ if not isinstance(data, bytes):
+ data = data.encode('utf-8')
+
+ data = base64.b64encode(data)
+ data = data.decode('ascii')
+ body = {
+ 'Data': data,
+ 'Name': name,
+ 'Labels': labels,
+ 'Templating': templating
+ }
+
+ url = self._url('/configs/create')
+ return self._result(
+ self._post_json(url, data=body), True
+ )
+
+ @utils.minimum_version('1.30')
+ @utils.check_resource('id')
+ def inspect_config(self, id):
+ """
+ Retrieve config metadata
+
+ Args:
+ id (string): Full ID of the config to inspect
+
+ Returns (dict): A dictionary of metadata
+
+ Raises:
+ :py:class:`docker.errors.NotFound`
+ if no config with that ID exists
+ """
+ url = self._url('/configs/{0}', id)
+ return self._result(self._get(url), True)
+
+ @utils.minimum_version('1.30')
+ @utils.check_resource('id')
+ def remove_config(self, id):
+ """
+ Remove a config
+
+ Args:
+ id (string): Full ID of the config to remove
+
+ Returns (boolean): True if successful
+
+ Raises:
+ :py:class:`docker.errors.NotFound`
+ if no config with that ID exists
+ """
+ url = self._url('/configs/{0}', id)
+ res = self._delete(url)
+ self._raise_for_status(res)
+ return True
+
+ @utils.minimum_version('1.30')
+ def configs(self, filters=None):
+ """
+ List configs
+
+ Args:
+ filters (dict): A map of filters to process on the configs
+ list. Available filters: ``names``
+
+ Returns (list): A list of configs
+ """
+ url = self._url('/configs')
+ params = {}
+ if filters:
+ params['filters'] = utils.convert_filters(filters)
+ return self._result(self._get(url, params=params), True)
diff --git a/contrib/python/docker/docker/api/container.py b/contrib/python/docker/docker/api/container.py
new file mode 100644
index 0000000000..d1b870f9c2
--- /dev/null
+++ b/contrib/python/docker/docker/api/container.py
@@ -0,0 +1,1348 @@
+from datetime import datetime
+
+from .. import errors, utils
+from ..constants import DEFAULT_DATA_CHUNK_SIZE
+from ..types import (
+ CancellableStream,
+ ContainerConfig,
+ EndpointConfig,
+ HostConfig,
+ NetworkingConfig,
+)
+
+
+class ContainerApiMixin:
+ @utils.check_resource('container')
+ def attach(self, container, stdout=True, stderr=True,
+ stream=False, logs=False, demux=False):
+ """
+ Attach to a container.
+
+ The ``.logs()`` function is a wrapper around this method, which you can
+ use instead if you want to fetch/stream container output without first
+ retrieving the entire backlog.
+
+ Args:
+ container (str): The container to attach to.
+ stdout (bool): Include stdout.
+ stderr (bool): Include stderr.
+ stream (bool): Return container output progressively as an iterator
+ of strings, rather than a single string.
+ logs (bool): Include the container's previous output.
+ demux (bool): Keep stdout and stderr separate.
+
+ Returns:
+ By default, the container's output as a single string (two if
+ ``demux=True``: one for stdout and one for stderr).
+
+ If ``stream=True``, an iterator of output strings. If
+ ``demux=True``, two iterators are returned: one for stdout and one
+ for stderr.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ params = {
+ 'logs': logs and 1 or 0,
+ 'stdout': stdout and 1 or 0,
+ 'stderr': stderr and 1 or 0,
+ 'stream': stream and 1 or 0
+ }
+
+ headers = {
+ 'Connection': 'Upgrade',
+ 'Upgrade': 'tcp'
+ }
+
+ u = self._url("/containers/{0}/attach", container)
+ response = self._post(u, headers=headers, params=params, stream=True)
+
+ output = self._read_from_socket(
+ response, stream, self._check_is_tty(container), demux=demux)
+
+ if stream:
+ return CancellableStream(output, response)
+ else:
+ return output
+
+ @utils.check_resource('container')
+ def attach_socket(self, container, params=None, ws=False):
+ """
+ Like ``attach``, but returns the underlying socket-like object for the
+ HTTP request.
+
+ Args:
+ container (str): The container to attach to.
+ params (dict): Dictionary of request parameters (e.g. ``stdout``,
+ ``stderr``, ``stream``).
+ For ``detachKeys``, ~/.docker/config.json is used by default.
+ ws (bool): Use websockets instead of raw HTTP.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ if params is None:
+ params = {
+ 'stdout': 1,
+ 'stderr': 1,
+ 'stream': 1
+ }
+
+ if 'detachKeys' not in params \
+ and 'detachKeys' in self._general_configs:
+
+ params['detachKeys'] = self._general_configs['detachKeys']
+
+ if ws:
+ return self._attach_websocket(container, params)
+
+ headers = {
+ 'Connection': 'Upgrade',
+ 'Upgrade': 'tcp'
+ }
+
+ u = self._url("/containers/{0}/attach", container)
+ return self._get_raw_response_socket(
+ self.post(
+ u, None, params=self._attach_params(params), stream=True,
+ headers=headers
+ )
+ )
+
+ @utils.check_resource('container')
+ def commit(self, container, repository=None, tag=None, message=None,
+ author=None, pause=True, changes=None, conf=None):
+ """
+ Commit a container to an image. Similar to the ``docker commit``
+ command.
+
+ Args:
+ container (str): The image hash of the container
+ repository (str): The repository to push the image to
+ tag (str): The tag to push
+ message (str): A commit message
+ author (str): The name of the author
+ pause (bool): Whether to pause the container before committing
+ changes (str): Dockerfile instructions to apply while committing
+ conf (dict): The configuration for the container. See the
+ `Engine API documentation
+ <https://docs.docker.com/reference/api/docker_remote_api/>`_
+ for full details.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ params = {
+ 'container': container,
+ 'repo': repository,
+ 'tag': tag,
+ 'comment': message,
+ 'author': author,
+ 'pause': pause,
+ 'changes': changes
+ }
+ u = self._url("/commit")
+ return self._result(
+ self._post_json(u, data=conf, params=params), json=True
+ )
+
+ def containers(self, quiet=False, all=False, trunc=False, latest=False,
+ since=None, before=None, limit=-1, size=False,
+ filters=None):
+ """
+ List containers. Similar to the ``docker ps`` command.
+
+ Args:
+ quiet (bool): Only display numeric Ids
+ all (bool): Show all containers. Only running containers are shown
+ by default
+ trunc (bool): Truncate output
+ latest (bool): Show only the latest created container, include
+ non-running ones.
+ since (str): Show only containers created since Id or Name, include
+ non-running ones
+ before (str): Show only container created before Id or Name,
+ include non-running ones
+ limit (int): Show `limit` last created containers, include
+ non-running ones
+ size (bool): Display sizes
+ filters (dict): Filters to be processed on the image list.
+ Available filters:
+
+ - `exited` (int): Only containers with specified exit code
+ - `status` (str): One of ``restarting``, ``running``,
+ ``paused``, ``exited``
+ - `label` (str|list): format either ``"key"``, ``"key=value"``
+ or a list of such.
+ - `id` (str): The id of the container.
+ - `name` (str): The name of the container.
+ - `ancestor` (str): Filter by container ancestor. Format of
+ ``<image-name>[:tag]``, ``<image-id>``, or
+ ``<image@digest>``.
+ - `before` (str): Only containers created before a particular
+ container. Give the container name or id.
+ - `since` (str): Only containers created after a particular
+ container. Give container name or id.
+
+ A comprehensive list can be found in the documentation for
+ `docker ps
+ <https://docs.docker.com/engine/reference/commandline/ps>`_.
+
+ Returns:
+ A list of dicts, one per container
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ params = {
+ 'limit': 1 if latest else limit,
+ 'all': 1 if all else 0,
+ 'size': 1 if size else 0,
+ 'trunc_cmd': 1 if trunc else 0,
+ 'since': since,
+ 'before': before
+ }
+ if filters:
+ params['filters'] = utils.convert_filters(filters)
+ u = self._url("/containers/json")
+ res = self._result(self._get(u, params=params), True)
+
+ if quiet:
+ return [{'Id': x['Id']} for x in res]
+ if trunc:
+ for x in res:
+ x['Id'] = x['Id'][:12]
+ return res
+
+ def create_container(self, image, command=None, hostname=None, user=None,
+ detach=False, stdin_open=False, tty=False, ports=None,
+ environment=None, volumes=None,
+ network_disabled=False, name=None, entrypoint=None,
+ working_dir=None, domainname=None, host_config=None,
+ mac_address=None, labels=None, stop_signal=None,
+ networking_config=None, healthcheck=None,
+ stop_timeout=None, runtime=None,
+ use_config_proxy=True, platform=None):
+ """
+ Creates a container. Parameters are similar to those for the ``docker
+ run`` command except it doesn't support the attach options (``-a``).
+
+ The arguments that are passed directly to this function are
+ host-independent configuration options. Host-specific configuration
+ is passed with the `host_config` argument. You'll normally want to
+ use this method in combination with the :py:meth:`create_host_config`
+ method to generate ``host_config``.
+
+ **Port bindings**
+
+ Port binding is done in two parts: first, provide a list of ports to
+ open inside the container with the ``ports`` parameter, then declare
+ bindings with the ``host_config`` parameter. For example:
+
+ .. code-block:: python
+
+ container_id = client.api.create_container(
+ 'busybox', 'ls', ports=[1111, 2222],
+ host_config=client.api.create_host_config(port_bindings={
+ 1111: 4567,
+ 2222: None
+ })
+ )
+
+
+ You can limit the host address on which the port will be exposed like
+ such:
+
+ .. code-block:: python
+
+ client.api.create_host_config(
+ port_bindings={1111: ('127.0.0.1', 4567)}
+ )
+
+ Or without host port assignment:
+
+ .. code-block:: python
+
+ client.api.create_host_config(port_bindings={1111: ('127.0.0.1',)})
+
+ If you wish to use UDP instead of TCP (default), you need to declare
+ ports as such in both the config and host config:
+
+ .. code-block:: python
+
+ container_id = client.api.create_container(
+ 'busybox', 'ls', ports=[(1111, 'udp'), 2222],
+ host_config=client.api.create_host_config(port_bindings={
+ '1111/udp': 4567, 2222: None
+ })
+ )
+
+ To bind multiple host ports to a single container port, use the
+ following syntax:
+
+ .. code-block:: python
+
+ client.api.create_host_config(port_bindings={
+ 1111: [1234, 4567]
+ })
+
+ You can also bind multiple IPs to a single container port:
+
+ .. code-block:: python
+
+ client.api.create_host_config(port_bindings={
+ 1111: [
+ ('192.168.0.100', 1234),
+ ('192.168.0.101', 1234)
+ ]
+ })
+
+ **Using volumes**
+
+ Volume declaration is done in two parts. Provide a list of
+ paths to use as mountpoints inside the container with the
+ ``volumes`` parameter, and declare mappings from paths on the host
+ in the ``host_config`` section.
+
+ .. code-block:: python
+
+ container_id = client.api.create_container(
+ 'busybox', 'ls', volumes=['/mnt/vol1', '/mnt/vol2'],
+ host_config=client.api.create_host_config(binds={
+ '/home/user1/': {
+ 'bind': '/mnt/vol2',
+ 'mode': 'rw',
+ },
+ '/var/www': {
+ 'bind': '/mnt/vol1',
+ 'mode': 'ro',
+ },
+ '/autofs/user1': {
+ 'bind': '/mnt/vol3',
+ 'mode': 'rw',
+ 'propagation': 'shared'
+ }
+ })
+ )
+
+ You can alternatively specify binds as a list. This code is equivalent
+ to the example above:
+
+ .. code-block:: python
+
+ container_id = client.api.create_container(
+ 'busybox', 'ls', volumes=['/mnt/vol1', '/mnt/vol2', '/mnt/vol3'],
+ host_config=client.api.create_host_config(binds=[
+ '/home/user1/:/mnt/vol2',
+ '/var/www:/mnt/vol1:ro',
+ '/autofs/user1:/mnt/vol3:rw,shared',
+ ])
+ )
+
+ **Networking**
+
+ You can specify networks to connect the container to by using the
+ ``networking_config`` parameter. At the time of creation, you can
+ only connect a container to a single networking, but you
+ can create more connections by using
+ :py:meth:`~connect_container_to_network`.
+
+ For example:
+
+ .. code-block:: python
+
+ networking_config = client.api.create_networking_config({
+ 'network1': client.api.create_endpoint_config(
+ ipv4_address='172.28.0.124',
+ aliases=['foo', 'bar'],
+ links=['container2']
+ )
+ })
+
+ ctnr = client.api.create_container(
+ img, command, networking_config=networking_config
+ )
+
+ Args:
+ image (str): The image to run
+ command (str or list): The command to be run in the container
+ hostname (str): Optional hostname for the container
+ user (str or int): Username or UID
+ detach (bool): Detached mode: run container in the background and
+ return container ID
+ stdin_open (bool): Keep STDIN open even if not attached
+ tty (bool): Allocate a pseudo-TTY
+ ports (list of ints): A list of port numbers
+ environment (dict or list): A dictionary or a list of strings in
+ the following format ``["PASSWORD=xxx"]`` or
+ ``{"PASSWORD": "xxx"}``.
+ volumes (str or list): List of paths inside the container to use
+ as volumes.
+ network_disabled (bool): Disable networking
+ name (str): A name for the container
+ entrypoint (str or list): An entrypoint
+ working_dir (str): Path to the working directory
+ domainname (str): The domain name to use for the container
+ host_config (dict): A dictionary created with
+ :py:meth:`create_host_config`.
+ mac_address (str): The Mac Address to assign the container
+ labels (dict or list): A dictionary of name-value labels (e.g.
+ ``{"label1": "value1", "label2": "value2"}``) or a list of
+ names of labels to set with empty values (e.g.
+ ``["label1", "label2"]``)
+ stop_signal (str): The stop signal to use to stop the container
+ (e.g. ``SIGINT``).
+ stop_timeout (int): Timeout to stop the container, in seconds.
+ Default: 10
+ networking_config (dict): A networking configuration generated
+ by :py:meth:`create_networking_config`.
+ runtime (str): Runtime to use with this container.
+ healthcheck (dict): Specify a test to perform to check that the
+ container is healthy.
+ use_config_proxy (bool): If ``True``, and if the docker client
+ configuration file (``~/.docker/config.json`` by default)
+ contains a proxy configuration, the corresponding environment
+ variables will be set in the container being created.
+ platform (str): Platform in the format ``os[/arch[/variant]]``.
+
+ Returns:
+ A dictionary with an image 'Id' key and a 'Warnings' key.
+
+ Raises:
+ :py:class:`docker.errors.ImageNotFound`
+ If the specified image does not exist.
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ if isinstance(volumes, str):
+ volumes = [volumes, ]
+
+ if isinstance(environment, dict):
+ environment = utils.utils.format_environment(environment)
+
+ if use_config_proxy:
+ environment = self._proxy_configs.inject_proxy_environment(
+ environment
+ ) or None
+
+ config = self.create_container_config(
+ image, command, hostname, user, detach, stdin_open, tty,
+ ports, environment, volumes,
+ network_disabled, entrypoint, working_dir, domainname,
+ host_config, mac_address, labels,
+ stop_signal, networking_config, healthcheck,
+ stop_timeout, runtime
+ )
+ return self.create_container_from_config(config, name, platform)
+
+ def create_container_config(self, *args, **kwargs):
+ return ContainerConfig(self._version, *args, **kwargs)
+
+ def create_container_from_config(self, config, name=None, platform=None):
+ u = self._url("/containers/create")
+ params = {
+ 'name': name
+ }
+ if platform:
+ if utils.version_lt(self._version, '1.41'):
+ raise errors.InvalidVersion(
+ 'platform is not supported for API version < 1.41'
+ )
+ params['platform'] = platform
+ res = self._post_json(u, data=config, params=params)
+ return self._result(res, True)
+
+ def create_host_config(self, *args, **kwargs):
+ """
+ Create a dictionary for the ``host_config`` argument to
+ :py:meth:`create_container`.
+
+ Args:
+ auto_remove (bool): enable auto-removal of the container on daemon
+ side when the container's process exits.
+ binds (dict): Volumes to bind. See :py:meth:`create_container`
+ for more information.
+ blkio_weight_device: Block IO weight (relative device weight) in
+ the form of: ``[{"Path": "device_path", "Weight": weight}]``.
+ blkio_weight: Block IO weight (relative weight), accepts a weight
+ value between 10 and 1000.
+ cap_add (list of str): Add kernel capabilities. For example,
+ ``["SYS_ADMIN", "MKNOD"]``.
+ cap_drop (list of str): Drop kernel capabilities.
+ cpu_period (int): The length of a CPU period in microseconds.
+ cpu_quota (int): Microseconds of CPU time that the container can
+ get in a CPU period.
+ cpu_shares (int): CPU shares (relative weight).
+ cpuset_cpus (str): CPUs in which to allow execution (``0-3``,
+ ``0,1``).
+ cpuset_mems (str): Memory nodes (MEMs) in which to allow execution
+ (``0-3``, ``0,1``). Only effective on NUMA systems.
+ device_cgroup_rules (:py:class:`list`): A list of cgroup rules to
+ apply to the container.
+ device_read_bps: Limit read rate (bytes per second) from a device
+ in the form of: `[{"Path": "device_path", "Rate": rate}]`
+ device_read_iops: Limit read rate (IO per second) from a device.
+ device_write_bps: Limit write rate (bytes per second) from a
+ device.
+ device_write_iops: Limit write rate (IO per second) from a device.
+ devices (:py:class:`list`): Expose host devices to the container,
+ as a list of strings in the form
+ ``<path_on_host>:<path_in_container>:<cgroup_permissions>``.
+
+ For example, ``/dev/sda:/dev/xvda:rwm`` allows the container
+ to have read-write access to the host's ``/dev/sda`` via a
+ node named ``/dev/xvda`` inside the container.
+ device_requests (:py:class:`list`): Expose host resources such as
+ GPUs to the container, as a list of
+ :py:class:`docker.types.DeviceRequest` instances.
+ dns (:py:class:`list`): Set custom DNS servers.
+ dns_opt (:py:class:`list`): Additional options to be added to the
+ container's ``resolv.conf`` file
+ dns_search (:py:class:`list`): DNS search domains.
+ extra_hosts (dict): Additional hostnames to resolve inside the
+ container, as a mapping of hostname to IP address.
+ group_add (:py:class:`list`): List of additional group names and/or
+ IDs that the container process will run as.
+ init (bool): Run an init inside the container that forwards
+ signals and reaps processes
+ ipc_mode (str): Set the IPC mode for the container.
+ isolation (str): Isolation technology to use. Default: ``None``.
+ links (dict): Mapping of links using the
+ ``{'container': 'alias'}`` format. The alias is optional.
+ Containers declared in this dict will be linked to the new
+ container using the provided alias. Default: ``None``.
+ log_config (LogConfig): Logging configuration
+ lxc_conf (dict): LXC config.
+ mem_limit (float or str): Memory limit. Accepts float values
+ (which represent the memory limit of the created container in
+ bytes) or a string with a units identification char
+ (``100000b``, ``1000k``, ``128m``, ``1g``). If a string is
+ specified without a units character, bytes are assumed as an
+ mem_reservation (float or str): Memory soft limit.
+ mem_swappiness (int): Tune a container's memory swappiness
+ behavior. Accepts number between 0 and 100.
+ memswap_limit (str or int): Maximum amount of memory + swap a
+ container is allowed to consume.
+ mounts (:py:class:`list`): Specification for mounts to be added to
+ the container. More powerful alternative to ``binds``. Each
+ item in the list is expected to be a
+ :py:class:`docker.types.Mount` object.
+ network_mode (str): One of:
+
+ - ``bridge`` Create a new network stack for the container on
+ the bridge network.
+ - ``none`` No networking for this container.
+ - ``container:<name|id>`` Reuse another container's network
+ stack.
+ - ``host`` Use the host network stack.
+ This mode is incompatible with ``port_bindings``.
+
+ oom_kill_disable (bool): Whether to disable OOM killer.
+ oom_score_adj (int): An integer value containing the score given
+ to the container in order to tune OOM killer preferences.
+ pid_mode (str): If set to ``host``, use the host PID namespace
+ inside the container.
+ pids_limit (int): Tune a container's pids limit. Set ``-1`` for
+ unlimited.
+ port_bindings (dict): See :py:meth:`create_container`
+ for more information.
+ Imcompatible with ``host`` in ``network_mode``.
+ privileged (bool): Give extended privileges to this container.
+ publish_all_ports (bool): Publish all ports to the host.
+ read_only (bool): Mount the container's root filesystem as read
+ only.
+ restart_policy (dict): Restart the container when it exits.
+ Configured as a dictionary with keys:
+
+ - ``Name`` One of ``on-failure``, or ``always``.
+ - ``MaximumRetryCount`` Number of times to restart the
+ container on failure.
+ security_opt (:py:class:`list`): A list of string values to
+ customize labels for MLS systems, such as SELinux.
+ shm_size (str or int): Size of /dev/shm (e.g. ``1G``).
+ storage_opt (dict): Storage driver options per container as a
+ key-value mapping.
+ sysctls (dict): Kernel parameters to set in the container.
+ tmpfs (dict): Temporary filesystems to mount, as a dictionary
+ mapping a path inside the container to options for that path.
+
+ For example:
+
+ .. code-block:: python
+
+ {
+ '/mnt/vol2': '',
+ '/mnt/vol1': 'size=3G,uid=1000'
+ }
+
+ ulimits (:py:class:`list`): Ulimits to set inside the container,
+ as a list of :py:class:`docker.types.Ulimit` instances.
+ userns_mode (str): Sets the user namespace mode for the container
+ when user namespace remapping option is enabled. Supported
+ values are: ``host``
+ uts_mode (str): Sets the UTS namespace mode for the container.
+ Supported values are: ``host``
+ volumes_from (:py:class:`list`): List of container names or IDs to
+ get volumes from.
+ runtime (str): Runtime to use with this container.
+
+
+ Returns:
+ (dict) A dictionary which can be passed to the ``host_config``
+ argument to :py:meth:`create_container`.
+
+ Example:
+
+ >>> client.api.create_host_config(
+ ... privileged=True,
+ ... cap_drop=['MKNOD'],
+ ... volumes_from=['nostalgic_newton'],
+ ... )
+ {'CapDrop': ['MKNOD'], 'LxcConf': None, 'Privileged': True,
+ 'VolumesFrom': ['nostalgic_newton'], 'PublishAllPorts': False}
+
+"""
+ if not kwargs:
+ kwargs = {}
+ if 'version' in kwargs:
+ raise TypeError(
+ "create_host_config() got an unexpected "
+ "keyword argument 'version'"
+ )
+ kwargs['version'] = self._version
+ return HostConfig(*args, **kwargs)
+
+ def create_networking_config(self, *args, **kwargs):
+ """
+ Create a networking config dictionary to be used as the
+ ``networking_config`` parameter in :py:meth:`create_container`.
+
+ Args:
+ endpoints_config (dict): A dictionary mapping network names to
+ endpoint configurations generated by
+ :py:meth:`create_endpoint_config`.
+
+ Returns:
+ (dict) A networking config.
+
+ Example:
+
+ >>> client.api.create_network('network1')
+ >>> networking_config = client.api.create_networking_config({
+ 'network1': client.api.create_endpoint_config()
+ })
+ >>> container = client.api.create_container(
+ img, command, networking_config=networking_config
+ )
+
+ """
+ return NetworkingConfig(*args, **kwargs)
+
+ def create_endpoint_config(self, *args, **kwargs):
+ """
+ Create an endpoint config dictionary to be used with
+ :py:meth:`create_networking_config`.
+
+ Args:
+ aliases (:py:class:`list`): A list of aliases for this endpoint.
+ Names in that list can be used within the network to reach the
+ container. Defaults to ``None``.
+ links (dict): Mapping of links for this endpoint using the
+ ``{'container': 'alias'}`` format. The alias is optional.
+ Containers declared in this dict will be linked to this
+ container using the provided alias. Defaults to ``None``.
+ ipv4_address (str): The IP address of this container on the
+ network, using the IPv4 protocol. Defaults to ``None``.
+ ipv6_address (str): The IP address of this container on the
+ network, using the IPv6 protocol. Defaults to ``None``.
+ link_local_ips (:py:class:`list`): A list of link-local (IPv4/IPv6)
+ addresses.
+ driver_opt (dict): A dictionary of options to provide to the
+ network driver. Defaults to ``None``.
+
+ Returns:
+ (dict) An endpoint config.
+
+ Example:
+
+ >>> endpoint_config = client.api.create_endpoint_config(
+ aliases=['web', 'app'],
+ links={'app_db': 'db', 'another': None},
+ ipv4_address='132.65.0.123'
+ )
+
+ """
+ return EndpointConfig(self._version, *args, **kwargs)
+
+ @utils.check_resource('container')
+ def diff(self, container):
+ """
+ Inspect changes on a container's filesystem.
+
+ Args:
+ container (str): The container to diff
+
+ Returns:
+ (list) A list of dictionaries containing the attributes `Path`
+ and `Kind`.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self._result(
+ self._get(self._url("/containers/{0}/changes", container)), True
+ )
+
+ @utils.check_resource('container')
+ def export(self, container, chunk_size=DEFAULT_DATA_CHUNK_SIZE):
+ """
+ Export the contents of a filesystem as a tar archive.
+
+ Args:
+ container (str): The container to export
+ chunk_size (int): The number of bytes returned by each iteration
+ of the generator. If ``None``, data will be streamed as it is
+ received. Default: 2 MB
+
+ Returns:
+ (generator): The archived filesystem data stream
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ res = self._get(
+ self._url("/containers/{0}/export", container), stream=True
+ )
+ return self._stream_raw_result(res, chunk_size, False)
+
+ @utils.check_resource('container')
+ def get_archive(self, container, path, chunk_size=DEFAULT_DATA_CHUNK_SIZE,
+ encode_stream=False):
+ """
+ Retrieve a file or folder from a container in the form of a tar
+ archive.
+
+ Args:
+ container (str): The container where the file is located
+ path (str): Path to the file or folder to retrieve
+ chunk_size (int): The number of bytes returned by each iteration
+ of the generator. If ``None``, data will be streamed as it is
+ received. Default: 2 MB
+ encode_stream (bool): Determines if data should be encoded
+ (gzip-compressed) during transmission. Default: False
+
+ Returns:
+ (tuple): First element is a raw tar data stream. Second element is
+ a dict containing ``stat`` information on the specified ``path``.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Example:
+
+ >>> c = docker.APIClient()
+ >>> f = open('./sh_bin.tar', 'wb')
+ >>> bits, stat = c.api.get_archive(container, '/bin/sh')
+ >>> print(stat)
+ {'name': 'sh', 'size': 1075464, 'mode': 493,
+ 'mtime': '2018-10-01T15:37:48-07:00', 'linkTarget': ''}
+ >>> for chunk in bits:
+ ... f.write(chunk)
+ >>> f.close()
+ """
+ params = {
+ 'path': path
+ }
+ headers = {
+ "Accept-Encoding": "gzip, deflate"
+ } if encode_stream else {
+ "Accept-Encoding": "identity"
+ }
+ url = self._url('/containers/{0}/archive', container)
+ res = self._get(url, params=params, stream=True, headers=headers)
+ self._raise_for_status(res)
+ encoded_stat = res.headers.get('x-docker-container-path-stat')
+ return (
+ self._stream_raw_result(res, chunk_size, False),
+ utils.decode_json_header(encoded_stat) if encoded_stat else None
+ )
+
+ @utils.check_resource('container')
+ def inspect_container(self, container):
+ """
+ Identical to the `docker inspect` command, but only for containers.
+
+ Args:
+ container (str): The container to inspect
+
+ Returns:
+ (dict): Similar to the output of `docker inspect`, but as a
+ single dict
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self._result(
+ self._get(self._url("/containers/{0}/json", container)), True
+ )
+
+ @utils.check_resource('container')
+ def kill(self, container, signal=None):
+ """
+ Kill a container or send a signal to a container.
+
+ Args:
+ container (str): The container to kill
+ signal (str or int): The signal to send. Defaults to ``SIGKILL``
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ url = self._url("/containers/{0}/kill", container)
+ params = {}
+ if signal is not None:
+ if not isinstance(signal, str):
+ signal = int(signal)
+ params['signal'] = signal
+ res = self._post(url, params=params)
+
+ self._raise_for_status(res)
+
+ @utils.check_resource('container')
+ def logs(self, container, stdout=True, stderr=True, stream=False,
+ timestamps=False, tail='all', since=None, follow=None,
+ until=None):
+ """
+ Get logs from a container. Similar to the ``docker logs`` command.
+
+ The ``stream`` parameter makes the ``logs`` function return a blocking
+ generator you can iterate over to retrieve log output as it happens.
+
+ Args:
+ container (str): The container to get logs from
+ stdout (bool): Get ``STDOUT``. Default ``True``
+ stderr (bool): Get ``STDERR``. Default ``True``
+ stream (bool): Stream the response. Default ``False``
+ timestamps (bool): Show timestamps. Default ``False``
+ tail (str or int): Output specified number of lines at the end of
+ logs. Either an integer of number of lines or the string
+ ``all``. Default ``all``
+ since (datetime, int, or float): Show logs since a given datetime,
+ integer epoch (in seconds) or float (in fractional seconds)
+ follow (bool): Follow log output. Default ``False``
+ until (datetime, int, or float): Show logs that occurred before
+ the given datetime, integer epoch (in seconds), or
+ float (in fractional seconds)
+
+ Returns:
+ (generator of bytes or bytes)
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ if follow is None:
+ follow = stream
+ params = {'stderr': stderr and 1 or 0,
+ 'stdout': stdout and 1 or 0,
+ 'timestamps': timestamps and 1 or 0,
+ 'follow': follow and 1 or 0,
+ }
+ if tail != 'all' and (not isinstance(tail, int) or tail < 0):
+ tail = 'all'
+ params['tail'] = tail
+
+ if since is not None:
+ if isinstance(since, datetime):
+ params['since'] = utils.datetime_to_timestamp(since)
+ elif (isinstance(since, int) and since > 0):
+ params['since'] = since
+ elif (isinstance(since, float) and since > 0.0):
+ params['since'] = since
+ else:
+ raise errors.InvalidArgument(
+ 'since value should be datetime or positive int/float,'
+ f' not {type(since)}'
+ )
+
+ if until is not None:
+ if utils.version_lt(self._version, '1.35'):
+ raise errors.InvalidVersion(
+ 'until is not supported for API version < 1.35'
+ )
+ if isinstance(until, datetime):
+ params['until'] = utils.datetime_to_timestamp(until)
+ elif (isinstance(until, int) and until > 0):
+ params['until'] = until
+ elif (isinstance(until, float) and until > 0.0):
+ params['until'] = until
+ else:
+ raise errors.InvalidArgument(
+ f'until value should be datetime or positive int/float, '
+ f'not {type(until)}'
+ )
+
+ url = self._url("/containers/{0}/logs", container)
+ res = self._get(url, params=params, stream=stream)
+ output = self._get_result(container, stream, res)
+
+ if stream:
+ return CancellableStream(output, res)
+ else:
+ return output
+
+ @utils.check_resource('container')
+ def pause(self, container):
+ """
+ Pauses all processes within a container.
+
+ Args:
+ container (str): The container to pause
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ url = self._url('/containers/{0}/pause', container)
+ res = self._post(url)
+ self._raise_for_status(res)
+
+ @utils.check_resource('container')
+ def port(self, container, private_port):
+ """
+ Lookup the public-facing port that is NAT-ed to ``private_port``.
+ Identical to the ``docker port`` command.
+
+ Args:
+ container (str): The container to look up
+ private_port (int): The private port to inspect
+
+ Returns:
+ (list of dict): The mapping for the host ports
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Example:
+ .. code-block:: bash
+
+ $ docker run -d -p 80:80 ubuntu:14.04 /bin/sleep 30
+ 7174d6347063a83f412fad6124c99cffd25ffe1a0807eb4b7f9cec76ac8cb43b
+
+ .. code-block:: python
+
+ >>> client.api.port('7174d6347063', 80)
+ [{'HostIp': '0.0.0.0', 'HostPort': '80'}]
+ """
+ res = self._get(self._url("/containers/{0}/json", container))
+ self._raise_for_status(res)
+ json_ = res.json()
+ private_port = str(private_port)
+ h_ports = None
+
+ # Port settings is None when the container is running with
+ # network_mode=host.
+ port_settings = json_.get('NetworkSettings', {}).get('Ports')
+ if port_settings is None:
+ return None
+
+ if '/' in private_port:
+ return port_settings.get(private_port)
+
+ for protocol in ['tcp', 'udp', 'sctp']:
+ h_ports = port_settings.get(f"{private_port}/{protocol}")
+ if h_ports:
+ break
+
+ return h_ports
+
+ @utils.check_resource('container')
+ def put_archive(self, container, path, data):
+ """
+ Insert a file or folder in an existing container using a tar archive as
+ source.
+
+ Args:
+ container (str): The container where the file(s) will be extracted
+ path (str): Path inside the container where the file(s) will be
+ extracted. Must exist.
+ data (bytes or stream): tar data to be extracted
+
+ Returns:
+ (bool): True if the call succeeds.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ params = {'path': path}
+ url = self._url('/containers/{0}/archive', container)
+ res = self._put(url, params=params, data=data)
+ self._raise_for_status(res)
+ return res.status_code == 200
+
+ @utils.minimum_version('1.25')
+ def prune_containers(self, filters=None):
+ """
+ Delete stopped containers
+
+ Args:
+ filters (dict): Filters to process on the prune list.
+
+ Returns:
+ (dict): A dict containing a list of deleted container IDs and
+ the amount of disk space reclaimed in bytes.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ params = {}
+ if filters:
+ params['filters'] = utils.convert_filters(filters)
+ url = self._url('/containers/prune')
+ return self._result(self._post(url, params=params), True)
+
+ @utils.check_resource('container')
+ def remove_container(self, container, v=False, link=False, force=False):
+ """
+ Remove a container. Similar to the ``docker rm`` command.
+
+ Args:
+ container (str): The container to remove
+ v (bool): Remove the volumes associated with the container
+ link (bool): Remove the specified link and not the underlying
+ container
+ force (bool): Force the removal of a running container (uses
+ ``SIGKILL``)
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ params = {'v': v, 'link': link, 'force': force}
+ res = self._delete(
+ self._url("/containers/{0}", container), params=params
+ )
+ self._raise_for_status(res)
+
+ @utils.check_resource('container')
+ def rename(self, container, name):
+ """
+ Rename a container. Similar to the ``docker rename`` command.
+
+ Args:
+ container (str): ID of the container to rename
+ name (str): New name for the container
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ url = self._url("/containers/{0}/rename", container)
+ params = {'name': name}
+ res = self._post(url, params=params)
+ self._raise_for_status(res)
+
+ @utils.check_resource('container')
+ def resize(self, container, height, width):
+ """
+ Resize the tty session.
+
+ Args:
+ container (str or dict): The container to resize
+ height (int): Height of tty session
+ width (int): Width of tty session
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ params = {'h': height, 'w': width}
+ url = self._url("/containers/{0}/resize", container)
+ res = self._post(url, params=params)
+ self._raise_for_status(res)
+
+ @utils.check_resource('container')
+ def restart(self, container, timeout=10):
+ """
+ Restart a container. Similar to the ``docker restart`` command.
+
+ Args:
+ container (str or dict): The container to restart. If a dict, the
+ ``Id`` key is used.
+ timeout (int): Number of seconds to try to stop for before killing
+ the container. Once killed it will then be restarted. Default
+ is 10 seconds.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ params = {'t': timeout}
+ url = self._url("/containers/{0}/restart", container)
+ conn_timeout = self.timeout
+ if conn_timeout is not None:
+ conn_timeout += timeout
+ res = self._post(url, params=params, timeout=conn_timeout)
+ self._raise_for_status(res)
+
+ @utils.check_resource('container')
+ def start(self, container, *args, **kwargs):
+ """
+ Start a container. Similar to the ``docker start`` command, but
+ doesn't support attach options.
+
+ **Deprecation warning:** Passing configuration options in ``start`` is
+ no longer supported. Users are expected to provide host config options
+ in the ``host_config`` parameter of
+ :py:meth:`~ContainerApiMixin.create_container`.
+
+
+ Args:
+ container (str): The container to start
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ :py:class:`docker.errors.DeprecatedMethod`
+ If any argument besides ``container`` are provided.
+
+ Example:
+
+ >>> container = client.api.create_container(
+ ... image='busybox:latest',
+ ... command='/bin/sleep 30')
+ >>> client.api.start(container=container.get('Id'))
+ """
+ if args or kwargs:
+ raise errors.DeprecatedMethod(
+ 'Providing configuration in the start() method is no longer '
+ 'supported. Use the host_config param in create_container '
+ 'instead.'
+ )
+ url = self._url("/containers/{0}/start", container)
+ res = self._post(url)
+ self._raise_for_status(res)
+
+ @utils.check_resource('container')
+ def stats(self, container, decode=None, stream=True, one_shot=None):
+ """
+ Stream statistics for a specific container. Similar to the
+ ``docker stats`` command.
+
+ Args:
+ container (str): The container to stream statistics from
+ decode (bool): If set to true, stream will be decoded into dicts
+ on the fly. Only applicable if ``stream`` is True.
+ False by default.
+ stream (bool): If set to false, only the current stats will be
+ returned instead of a stream. True by default.
+ one_shot (bool): If set to true, Only get a single stat instead of
+ waiting for 2 cycles. Must be used with stream=false. False by
+ default.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ """
+ url = self._url("/containers/{0}/stats", container)
+ params = {
+ 'stream': stream
+ }
+ if one_shot is not None:
+ if utils.version_lt(self._version, '1.41'):
+ raise errors.InvalidVersion(
+ 'one_shot is not supported for API version < 1.41'
+ )
+ params['one-shot'] = one_shot
+ if stream:
+ if one_shot:
+ raise errors.InvalidArgument(
+ 'one_shot is only available in conjunction with '
+ 'stream=False'
+ )
+ return self._stream_helper(
+ self._get(url, stream=True, params=params), decode=decode
+ )
+ else:
+ if decode:
+ raise errors.InvalidArgument(
+ "decode is only available in conjunction with stream=True"
+ )
+ return self._result(self._get(url, params=params), json=True)
+
+ @utils.check_resource('container')
+ def stop(self, container, timeout=None):
+ """
+ Stops a container. Similar to the ``docker stop`` command.
+
+ Args:
+ container (str): The container to stop
+ timeout (int): Timeout in seconds to wait for the container to
+ stop before sending a ``SIGKILL``. If None, then the
+ StopTimeout value of the container will be used.
+ Default: None
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ if timeout is None:
+ params = {}
+ timeout = 10
+ else:
+ params = {'t': timeout}
+ url = self._url("/containers/{0}/stop", container)
+ conn_timeout = self.timeout
+ if conn_timeout is not None:
+ conn_timeout += timeout
+ res = self._post(url, params=params, timeout=conn_timeout)
+ self._raise_for_status(res)
+
+ @utils.check_resource('container')
+ def top(self, container, ps_args=None):
+ """
+ Display the running processes of a container.
+
+ Args:
+ container (str): The container to inspect
+ ps_args (str): An optional arguments passed to ps (e.g. ``aux``)
+
+ Returns:
+ (str): The output of the top
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ u = self._url("/containers/{0}/top", container)
+ params = {}
+ if ps_args is not None:
+ params['ps_args'] = ps_args
+ return self._result(self._get(u, params=params), True)
+
+ @utils.check_resource('container')
+ def unpause(self, container):
+ """
+ Unpause all processes within a container.
+
+ Args:
+ container (str): The container to unpause
+ """
+ url = self._url('/containers/{0}/unpause', container)
+ res = self._post(url)
+ self._raise_for_status(res)
+
+ @utils.minimum_version('1.22')
+ @utils.check_resource('container')
+ def update_container(
+ self, container, blkio_weight=None, cpu_period=None, cpu_quota=None,
+ cpu_shares=None, cpuset_cpus=None, cpuset_mems=None, mem_limit=None,
+ mem_reservation=None, memswap_limit=None, kernel_memory=None,
+ restart_policy=None
+ ):
+ """
+ Update resource configs of one or more containers.
+
+ Args:
+ container (str): The container to inspect
+ blkio_weight (int): Block IO (relative weight), between 10 and 1000
+ cpu_period (int): Limit CPU CFS (Completely Fair Scheduler) period
+ cpu_quota (int): Limit CPU CFS (Completely Fair Scheduler) quota
+ cpu_shares (int): CPU shares (relative weight)
+ cpuset_cpus (str): CPUs in which to allow execution
+ cpuset_mems (str): MEMs in which to allow execution
+ mem_limit (float or str): Memory limit
+ mem_reservation (float or str): Memory soft limit
+ memswap_limit (int or str): Total memory (memory + swap), -1 to
+ disable swap
+ kernel_memory (int or str): Kernel memory limit
+ restart_policy (dict): Restart policy dictionary
+
+ Returns:
+ (dict): Dictionary containing a ``Warnings`` key.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ url = self._url('/containers/{0}/update', container)
+ data = {}
+ if blkio_weight:
+ data['BlkioWeight'] = blkio_weight
+ if cpu_period:
+ data['CpuPeriod'] = cpu_period
+ if cpu_shares:
+ data['CpuShares'] = cpu_shares
+ if cpu_quota:
+ data['CpuQuota'] = cpu_quota
+ if cpuset_cpus:
+ data['CpusetCpus'] = cpuset_cpus
+ if cpuset_mems:
+ data['CpusetMems'] = cpuset_mems
+ if mem_limit:
+ data['Memory'] = utils.parse_bytes(mem_limit)
+ if mem_reservation:
+ data['MemoryReservation'] = utils.parse_bytes(mem_reservation)
+ if memswap_limit:
+ data['MemorySwap'] = utils.parse_bytes(memswap_limit)
+ if kernel_memory:
+ data['KernelMemory'] = utils.parse_bytes(kernel_memory)
+ if restart_policy:
+ if utils.version_lt(self._version, '1.23'):
+ raise errors.InvalidVersion(
+ 'restart policy update is not supported '
+ 'for API version < 1.23'
+ )
+ data['RestartPolicy'] = restart_policy
+
+ res = self._post_json(url, data=data)
+ return self._result(res, True)
+
+ @utils.check_resource('container')
+ def wait(self, container, timeout=None, condition=None):
+ """
+ Block until a container stops, then return its exit code. Similar to
+ the ``docker wait`` command.
+
+ Args:
+ container (str or dict): The container to wait on. If a dict, the
+ ``Id`` key is used.
+ timeout (int): Request timeout
+ condition (str): Wait until a container state reaches the given
+ condition, either ``not-running`` (default), ``next-exit``,
+ or ``removed``
+
+ Returns:
+ (dict): The API's response as a Python dictionary, including
+ the container's exit code under the ``StatusCode`` attribute.
+
+ Raises:
+ :py:class:`requests.exceptions.ReadTimeout`
+ If the timeout is exceeded.
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ url = self._url("/containers/{0}/wait", container)
+ params = {}
+ if condition is not None:
+ if utils.version_lt(self._version, '1.30'):
+ raise errors.InvalidVersion(
+ 'wait condition is not supported for API version < 1.30'
+ )
+ params['condition'] = condition
+
+ res = self._post(url, timeout=timeout, params=params)
+ return self._result(res, True)
diff --git a/contrib/python/docker/docker/api/daemon.py b/contrib/python/docker/docker/api/daemon.py
new file mode 100644
index 0000000000..a857213265
--- /dev/null
+++ b/contrib/python/docker/docker/api/daemon.py
@@ -0,0 +1,181 @@
+import os
+from datetime import datetime
+
+from .. import auth, types, utils
+
+
+class DaemonApiMixin:
+ @utils.minimum_version('1.25')
+ def df(self):
+ """
+ Get data usage information.
+
+ Returns:
+ (dict): A dictionary representing different resource categories
+ and their respective data usage.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ url = self._url('/system/df')
+ return self._result(self._get(url), True)
+
+ def events(self, since=None, until=None, filters=None, decode=None):
+ """
+ Get real-time events from the server. Similar to the ``docker events``
+ command.
+
+ Args:
+ since (UTC datetime or int): Get events from this point
+ until (UTC datetime or int): Get events until this point
+ filters (dict): Filter the events by event time, container or image
+ decode (bool): If set to true, stream will be decoded into dicts on
+ the fly. False by default.
+
+ Returns:
+ A :py:class:`docker.types.daemon.CancellableStream` generator
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Example:
+
+ >>> for event in client.events(decode=True)
+ ... print(event)
+ {u'from': u'image/with:tag',
+ u'id': u'container-id',
+ u'status': u'start',
+ u'time': 1423339459}
+ ...
+
+ or
+
+ >>> events = client.events()
+ >>> for event in events:
+ ... print(event)
+ >>> # and cancel from another thread
+ >>> events.close()
+ """
+
+ if isinstance(since, datetime):
+ since = utils.datetime_to_timestamp(since)
+
+ if isinstance(until, datetime):
+ until = utils.datetime_to_timestamp(until)
+
+ if filters:
+ filters = utils.convert_filters(filters)
+
+ params = {
+ 'since': since,
+ 'until': until,
+ 'filters': filters
+ }
+ url = self._url('/events')
+
+ response = self._get(url, params=params, stream=True, timeout=None)
+ stream = self._stream_helper(response, decode=decode)
+
+ return types.CancellableStream(stream, response)
+
+ def info(self):
+ """
+ Display system-wide information. Identical to the ``docker info``
+ command.
+
+ Returns:
+ (dict): The info as a dict
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self._result(self._get(self._url("/info")), True)
+
+ def login(self, username, password=None, email=None, registry=None,
+ reauth=False, dockercfg_path=None):
+ """
+ Authenticate with a registry. Similar to the ``docker login`` command.
+
+ Args:
+ username (str): The registry username
+ password (str): The plaintext password
+ email (str): The email for the registry account
+ registry (str): URL to the registry. E.g.
+ ``https://index.docker.io/v1/``
+ reauth (bool): Whether or not to refresh existing authentication on
+ the Docker server.
+ dockercfg_path (str): Use a custom path for the Docker config file
+ (default ``$HOME/.docker/config.json`` if present,
+ otherwise ``$HOME/.dockercfg``)
+
+ Returns:
+ (dict): The response from the login request
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+
+ # If we don't have any auth data so far, try reloading the config file
+ # one more time in case anything showed up in there.
+ # If dockercfg_path is passed check to see if the config file exists,
+ # if so load that config.
+ if dockercfg_path and os.path.exists(dockercfg_path):
+ self._auth_configs = auth.load_config(
+ dockercfg_path, credstore_env=self.credstore_env
+ )
+ elif not self._auth_configs or self._auth_configs.is_empty:
+ self._auth_configs = auth.load_config(
+ credstore_env=self.credstore_env
+ )
+
+ authcfg = self._auth_configs.resolve_authconfig(registry)
+ # If we found an existing auth config for this registry and username
+ # combination, we can return it immediately unless reauth is requested.
+ if authcfg and authcfg.get('username', None) == username \
+ and not reauth:
+ return authcfg
+
+ req_data = {
+ 'username': username,
+ 'password': password,
+ 'email': email,
+ 'serveraddress': registry,
+ }
+
+ response = self._post_json(self._url('/auth'), data=req_data)
+ if response.status_code == 200:
+ self._auth_configs.add_auth(registry or auth.INDEX_NAME, req_data)
+ return self._result(response, json=True)
+
+ def ping(self):
+ """
+ Checks the server is responsive. An exception will be raised if it
+ isn't responding.
+
+ Returns:
+ (bool) The response from the server.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self._result(self._get(self._url('/_ping'))) == 'OK'
+
+ def version(self, api_version=True):
+ """
+ Returns version information from the server. Similar to the ``docker
+ version`` command.
+
+ Returns:
+ (dict): The server version information
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ url = self._url("/version", versioned_api=api_version)
+ return self._result(self._get(url), json=True)
diff --git a/contrib/python/docker/docker/api/exec_api.py b/contrib/python/docker/docker/api/exec_api.py
new file mode 100644
index 0000000000..d8fc50dd3d
--- /dev/null
+++ b/contrib/python/docker/docker/api/exec_api.py
@@ -0,0 +1,176 @@
+from .. import errors, utils
+from ..types import CancellableStream
+
+
+class ExecApiMixin:
+ @utils.check_resource('container')
+ def exec_create(self, container, cmd, stdout=True, stderr=True,
+ stdin=False, tty=False, privileged=False, user='',
+ environment=None, workdir=None, detach_keys=None):
+ """
+ Sets up an exec instance in a running container.
+
+ Args:
+ container (str): Target container where exec instance will be
+ created
+ cmd (str or list): Command to be executed
+ stdout (bool): Attach to stdout. Default: ``True``
+ stderr (bool): Attach to stderr. Default: ``True``
+ stdin (bool): Attach to stdin. Default: ``False``
+ tty (bool): Allocate a pseudo-TTY. Default: False
+ privileged (bool): Run as privileged.
+ user (str): User to execute command as. Default: root
+ environment (dict or list): A dictionary or a list of strings in
+ the following format ``["PASSWORD=xxx"]`` or
+ ``{"PASSWORD": "xxx"}``.
+ workdir (str): Path to working directory for this exec session
+ detach_keys (str): Override the key sequence for detaching
+ a container. Format is a single character `[a-Z]`
+ or `ctrl-<value>` where `<value>` is one of:
+ `a-z`, `@`, `^`, `[`, `,` or `_`.
+ ~/.docker/config.json is used by default.
+
+ Returns:
+ (dict): A dictionary with an exec ``Id`` key.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+
+ if environment is not None and utils.version_lt(self._version, '1.25'):
+ raise errors.InvalidVersion(
+ 'Setting environment for exec is not supported in API < 1.25'
+ )
+
+ if isinstance(cmd, str):
+ cmd = utils.split_command(cmd)
+
+ if isinstance(environment, dict):
+ environment = utils.utils.format_environment(environment)
+
+ data = {
+ 'Container': container,
+ 'User': user,
+ 'Privileged': privileged,
+ 'Tty': tty,
+ 'AttachStdin': stdin,
+ 'AttachStdout': stdout,
+ 'AttachStderr': stderr,
+ 'Cmd': cmd,
+ 'Env': environment,
+ }
+
+ if workdir is not None:
+ if utils.version_lt(self._version, '1.35'):
+ raise errors.InvalidVersion(
+ 'workdir is not supported for API version < 1.35'
+ )
+ data['WorkingDir'] = workdir
+
+ if detach_keys:
+ data['detachKeys'] = detach_keys
+ elif 'detachKeys' in self._general_configs:
+ data['detachKeys'] = self._general_configs['detachKeys']
+
+ url = self._url('/containers/{0}/exec', container)
+ res = self._post_json(url, data=data)
+ return self._result(res, True)
+
+ def exec_inspect(self, exec_id):
+ """
+ Return low-level information about an exec command.
+
+ Args:
+ exec_id (str): ID of the exec instance
+
+ Returns:
+ (dict): Dictionary of values returned by the endpoint.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ if isinstance(exec_id, dict):
+ exec_id = exec_id.get('Id')
+ res = self._get(self._url("/exec/{0}/json", exec_id))
+ return self._result(res, True)
+
+ def exec_resize(self, exec_id, height=None, width=None):
+ """
+ Resize the tty session used by the specified exec command.
+
+ Args:
+ exec_id (str): ID of the exec instance
+ height (int): Height of tty session
+ width (int): Width of tty session
+ """
+
+ if isinstance(exec_id, dict):
+ exec_id = exec_id.get('Id')
+
+ params = {'h': height, 'w': width}
+ url = self._url("/exec/{0}/resize", exec_id)
+ res = self._post(url, params=params)
+ self._raise_for_status(res)
+
+ @utils.check_resource('exec_id')
+ def exec_start(self, exec_id, detach=False, tty=False, stream=False,
+ socket=False, demux=False):
+ """
+ Start a previously set up exec instance.
+
+ Args:
+ exec_id (str): ID of the exec instance
+ detach (bool): If true, detach from the exec command.
+ Default: False
+ tty (bool): Allocate a pseudo-TTY. Default: False
+ stream (bool): Return response data progressively as an iterator
+ of strings, rather than a single string.
+ socket (bool): Return the connection socket to allow custom
+ read/write operations. Must be closed by the caller when done.
+ demux (bool): Return stdout and stderr separately
+
+ Returns:
+
+ (generator or str or tuple): If ``stream=True``, a generator
+ yielding response chunks. If ``socket=True``, a socket object for
+ the connection. A string containing response data otherwise. If
+ ``demux=True``, a tuple with two elements of type byte: stdout and
+ stderr.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ # we want opened socket if socket == True
+
+ data = {
+ 'Tty': tty,
+ 'Detach': detach
+ }
+
+ headers = {} if detach else {
+ 'Connection': 'Upgrade',
+ 'Upgrade': 'tcp'
+ }
+
+ res = self._post_json(
+ self._url('/exec/{0}/start', exec_id),
+ headers=headers,
+ data=data,
+ stream=True
+ )
+ if detach:
+ try:
+ return self._result(res)
+ finally:
+ res.close()
+ if socket:
+ return self._get_raw_response_socket(res)
+
+ output = self._read_from_socket(res, stream, tty=tty, demux=demux)
+ if stream:
+ return CancellableStream(output, res)
+ else:
+ return output
diff --git a/contrib/python/docker/docker/api/image.py b/contrib/python/docker/docker/api/image.py
new file mode 100644
index 0000000000..85109473bc
--- /dev/null
+++ b/contrib/python/docker/docker/api/image.py
@@ -0,0 +1,601 @@
+import logging
+import os
+
+from .. import auth, errors, utils
+from ..constants import DEFAULT_DATA_CHUNK_SIZE
+
+log = logging.getLogger(__name__)
+
+
+class ImageApiMixin:
+
+ @utils.check_resource('image')
+ def get_image(self, image, chunk_size=DEFAULT_DATA_CHUNK_SIZE):
+ """
+ Get a tarball of an image. Similar to the ``docker save`` command.
+
+ Args:
+ image (str): Image name to get
+ chunk_size (int): The number of bytes returned by each iteration
+ of the generator. If ``None``, data will be streamed as it is
+ received. Default: 2 MB
+
+ Returns:
+ (generator): A stream of raw archive data.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Example:
+
+ >>> image = client.api.get_image("busybox:latest")
+ >>> f = open('/tmp/busybox-latest.tar', 'wb')
+ >>> for chunk in image:
+ >>> f.write(chunk)
+ >>> f.close()
+ """
+ res = self._get(self._url("/images/{0}/get", image), stream=True)
+ return self._stream_raw_result(res, chunk_size, False)
+
+ @utils.check_resource('image')
+ def history(self, image):
+ """
+ Show the history of an image.
+
+ Args:
+ image (str): The image to show history for
+
+ Returns:
+ (list): The history of the image
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ res = self._get(self._url("/images/{0}/history", image))
+ return self._result(res, True)
+
+ def images(self, name=None, quiet=False, all=False, filters=None):
+ """
+ List images. Similar to the ``docker images`` command.
+
+ Args:
+ name (str): Only show images belonging to the repository ``name``
+ quiet (bool): Only return numeric IDs as a list.
+ all (bool): Show intermediate image layers. By default, these are
+ filtered out.
+ filters (dict): Filters to be processed on the image list.
+ Available filters:
+ - ``dangling`` (bool)
+ - `label` (str|list): format either ``"key"``, ``"key=value"``
+ or a list of such.
+
+ Returns:
+ (dict or list): A list if ``quiet=True``, otherwise a dict.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ params = {
+ 'only_ids': 1 if quiet else 0,
+ 'all': 1 if all else 0,
+ }
+ if name:
+ if utils.version_lt(self._version, '1.25'):
+ # only use "filter" on API 1.24 and under, as it is deprecated
+ params['filter'] = name
+ else:
+ if filters:
+ filters['reference'] = name
+ else:
+ filters = {'reference': name}
+ if filters:
+ params['filters'] = utils.convert_filters(filters)
+ res = self._result(self._get(self._url("/images/json"), params=params),
+ True)
+ if quiet:
+ return [x['Id'] for x in res]
+ return res
+
+ def import_image(self, src=None, repository=None, tag=None, image=None,
+ changes=None, stream_src=False):
+ """
+ Import an image. Similar to the ``docker import`` command.
+
+ If ``src`` is a string or unicode string, it will first be treated as a
+ path to a tarball on the local system. If there is an error reading
+ from that file, ``src`` will be treated as a URL instead to fetch the
+ image from. You can also pass an open file handle as ``src``, in which
+ case the data will be read from that file.
+
+ If ``src`` is unset but ``image`` is set, the ``image`` parameter will
+ be taken as the name of an existing image to import from.
+
+ Args:
+ src (str or file): Path to tarfile, URL, or file-like object
+ repository (str): The repository to create
+ tag (str): The tag to apply
+ image (str): Use another image like the ``FROM`` Dockerfile
+ parameter
+ """
+ if not (src or image):
+ raise errors.DockerException(
+ 'Must specify src or image to import from'
+ )
+ u = self._url('/images/create')
+
+ params = _import_image_params(
+ repository, tag, image,
+ src=(src if isinstance(src, str) else None),
+ changes=changes
+ )
+ headers = {'Content-Type': 'application/tar'}
+
+ if image or params.get('fromSrc') != '-': # from image or URL
+ return self._result(
+ self._post(u, data=None, params=params)
+ )
+ elif isinstance(src, str): # from file path
+ with open(src, 'rb') as f:
+ return self._result(
+ self._post(
+ u, data=f, params=params, headers=headers, timeout=None
+ )
+ )
+ else: # from raw data
+ if stream_src:
+ headers['Transfer-Encoding'] = 'chunked'
+ return self._result(
+ self._post(u, data=src, params=params, headers=headers)
+ )
+
+ def import_image_from_data(self, data, repository=None, tag=None,
+ changes=None):
+ """
+ Like :py:meth:`~docker.api.image.ImageApiMixin.import_image`, but
+ allows importing in-memory bytes data.
+
+ Args:
+ data (bytes collection): Bytes collection containing valid tar data
+ repository (str): The repository to create
+ tag (str): The tag to apply
+ """
+
+ u = self._url('/images/create')
+ params = _import_image_params(
+ repository, tag, src='-', changes=changes
+ )
+ headers = {'Content-Type': 'application/tar'}
+ return self._result(
+ self._post(
+ u, data=data, params=params, headers=headers, timeout=None
+ )
+ )
+
+ def import_image_from_file(self, filename, repository=None, tag=None,
+ changes=None):
+ """
+ Like :py:meth:`~docker.api.image.ImageApiMixin.import_image`, but only
+ supports importing from a tar file on disk.
+
+ Args:
+ filename (str): Full path to a tar file.
+ repository (str): The repository to create
+ tag (str): The tag to apply
+
+ Raises:
+ IOError: File does not exist.
+ """
+
+ return self.import_image(
+ src=filename, repository=repository, tag=tag, changes=changes
+ )
+
+ def import_image_from_stream(self, stream, repository=None, tag=None,
+ changes=None):
+ return self.import_image(
+ src=stream, stream_src=True, repository=repository, tag=tag,
+ changes=changes
+ )
+
+ def import_image_from_url(self, url, repository=None, tag=None,
+ changes=None):
+ """
+ Like :py:meth:`~docker.api.image.ImageApiMixin.import_image`, but only
+ supports importing from a URL.
+
+ Args:
+ url (str): A URL pointing to a tar file.
+ repository (str): The repository to create
+ tag (str): The tag to apply
+ """
+ return self.import_image(
+ src=url, repository=repository, tag=tag, changes=changes
+ )
+
+ def import_image_from_image(self, image, repository=None, tag=None,
+ changes=None):
+ """
+ Like :py:meth:`~docker.api.image.ImageApiMixin.import_image`, but only
+ supports importing from another image, like the ``FROM`` Dockerfile
+ parameter.
+
+ Args:
+ image (str): Image name to import from
+ repository (str): The repository to create
+ tag (str): The tag to apply
+ """
+ return self.import_image(
+ image=image, repository=repository, tag=tag, changes=changes
+ )
+
+ @utils.check_resource('image')
+ def inspect_image(self, image):
+ """
+ Get detailed information about an image. Similar to the ``docker
+ inspect`` command, but only for images.
+
+ Args:
+ image (str): The image to inspect
+
+ Returns:
+ (dict): Similar to the output of ``docker inspect``, but as a
+ single dict
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self._result(
+ self._get(self._url("/images/{0}/json", image)), True
+ )
+
+ @utils.minimum_version('1.30')
+ @utils.check_resource('image')
+ def inspect_distribution(self, image, auth_config=None):
+ """
+ Get image digest and platform information by contacting the registry.
+
+ Args:
+ image (str): The image name to inspect
+ auth_config (dict): Override the credentials that are found in the
+ config for this request. ``auth_config`` should contain the
+ ``username`` and ``password`` keys to be valid.
+
+ Returns:
+ (dict): A dict containing distribution data
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ registry, _ = auth.resolve_repository_name(image)
+
+ headers = {}
+ if auth_config is None:
+ header = auth.get_config_header(self, registry)
+ if header:
+ headers['X-Registry-Auth'] = header
+ else:
+ log.debug('Sending supplied auth config')
+ headers['X-Registry-Auth'] = auth.encode_header(auth_config)
+
+ url = self._url("/distribution/{0}/json", image)
+
+ return self._result(
+ self._get(url, headers=headers), True
+ )
+
+ def load_image(self, data, quiet=None):
+ """
+ Load an image that was previously saved using
+ :py:meth:`~docker.api.image.ImageApiMixin.get_image` (or ``docker
+ save``). Similar to ``docker load``.
+
+ Args:
+ data (binary): Image data to be loaded.
+ quiet (boolean): Suppress progress details in response.
+
+ Returns:
+ (generator): Progress output as JSON objects. Only available for
+ API version >= 1.23
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ params = {}
+
+ if quiet is not None:
+ if utils.version_lt(self._version, '1.23'):
+ raise errors.InvalidVersion(
+ 'quiet is not supported in API version < 1.23'
+ )
+ params['quiet'] = quiet
+
+ res = self._post(
+ self._url("/images/load"), data=data, params=params, stream=True
+ )
+ if utils.version_gte(self._version, '1.23'):
+ return self._stream_helper(res, decode=True)
+
+ self._raise_for_status(res)
+
+ @utils.minimum_version('1.25')
+ def prune_images(self, filters=None):
+ """
+ Delete unused images
+
+ Args:
+ filters (dict): Filters to process on the prune list.
+ Available filters:
+ - dangling (bool): When set to true (or 1), prune only
+ unused and untagged images.
+
+ Returns:
+ (dict): A dict containing a list of deleted image IDs and
+ the amount of disk space reclaimed in bytes.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ url = self._url("/images/prune")
+ params = {}
+ if filters is not None:
+ params['filters'] = utils.convert_filters(filters)
+ return self._result(self._post(url, params=params), True)
+
+ def pull(self, repository, tag=None, stream=False, auth_config=None,
+ decode=False, platform=None, all_tags=False):
+ """
+ Pulls an image. Similar to the ``docker pull`` command.
+
+ Args:
+ repository (str): The repository to pull
+ tag (str): The tag to pull. If ``tag`` is ``None`` or empty, it
+ is set to ``latest``.
+ stream (bool): Stream the output as a generator. Make sure to
+ consume the generator, otherwise pull might get cancelled.
+ auth_config (dict): Override the credentials that are found in the
+ config for this request. ``auth_config`` should contain the
+ ``username`` and ``password`` keys to be valid.
+ decode (bool): Decode the JSON data from the server into dicts.
+ Only applies with ``stream=True``
+ platform (str): Platform in the format ``os[/arch[/variant]]``
+ all_tags (bool): Pull all image tags, the ``tag`` parameter is
+ ignored.
+
+ Returns:
+ (generator or str): The output
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Example:
+
+ >>> resp = client.api.pull('busybox', stream=True, decode=True)
+ ... for line in resp:
+ ... print(json.dumps(line, indent=4))
+ {
+ "status": "Pulling image (latest) from busybox",
+ "progressDetail": {},
+ "id": "e72ac664f4f0"
+ }
+ {
+ "status": "Pulling image (latest) from busybox, endpoint: ...",
+ "progressDetail": {},
+ "id": "e72ac664f4f0"
+ }
+
+ """
+ repository, image_tag = utils.parse_repository_tag(repository)
+ tag = tag or image_tag or 'latest'
+
+ if all_tags:
+ tag = None
+
+ registry, repo_name = auth.resolve_repository_name(repository)
+
+ params = {
+ 'tag': tag,
+ 'fromImage': repository
+ }
+ headers = {}
+
+ if auth_config is None:
+ header = auth.get_config_header(self, registry)
+ if header:
+ headers['X-Registry-Auth'] = header
+ else:
+ log.debug('Sending supplied auth config')
+ headers['X-Registry-Auth'] = auth.encode_header(auth_config)
+
+ if platform is not None:
+ if utils.version_lt(self._version, '1.32'):
+ raise errors.InvalidVersion(
+ 'platform was only introduced in API version 1.32'
+ )
+ params['platform'] = platform
+
+ response = self._post(
+ self._url('/images/create'), params=params, headers=headers,
+ stream=stream, timeout=None
+ )
+
+ self._raise_for_status(response)
+
+ if stream:
+ return self._stream_helper(response, decode=decode)
+
+ return self._result(response)
+
+ def push(self, repository, tag=None, stream=False, auth_config=None,
+ decode=False):
+ """
+ Push an image or a repository to the registry. Similar to the ``docker
+ push`` command.
+
+ Args:
+ repository (str): The repository to push to
+ tag (str): An optional tag to push
+ stream (bool): Stream the output as a blocking generator
+ auth_config (dict): Override the credentials that are found in the
+ config for this request. ``auth_config`` should contain the
+ ``username`` and ``password`` keys to be valid.
+ decode (bool): Decode the JSON data from the server into dicts.
+ Only applies with ``stream=True``
+
+ Returns:
+ (generator or str): The output from the server.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Example:
+ >>> resp = client.api.push(
+ ... 'yourname/app',
+ ... stream=True,
+ ... decode=True,
+ ... )
+ ... for line in resp:
+ ... print(line)
+ {'status': 'Pushing repository yourname/app (1 tags)'}
+ {'status': 'Pushing','progressDetail': {}, 'id': '511136ea3c5a'}
+ {'status': 'Image already pushed, skipping', 'progressDetail':{},
+ 'id': '511136ea3c5a'}
+ ...
+
+ """
+ if not tag:
+ repository, tag = utils.parse_repository_tag(repository)
+ registry, repo_name = auth.resolve_repository_name(repository)
+ u = self._url("/images/{0}/push", repository)
+ params = {
+ 'tag': tag
+ }
+ headers = {}
+
+ if auth_config is None:
+ header = auth.get_config_header(self, registry)
+ if header:
+ headers['X-Registry-Auth'] = header
+ else:
+ log.debug('Sending supplied auth config')
+ headers['X-Registry-Auth'] = auth.encode_header(auth_config)
+
+ response = self._post_json(
+ u, None, headers=headers, stream=stream, params=params
+ )
+
+ self._raise_for_status(response)
+
+ if stream:
+ return self._stream_helper(response, decode=decode)
+
+ return self._result(response)
+
+ @utils.check_resource('image')
+ def remove_image(self, image, force=False, noprune=False):
+ """
+ Remove an image. Similar to the ``docker rmi`` command.
+
+ Args:
+ image (str): The image to remove
+ force (bool): Force removal of the image
+ noprune (bool): Do not delete untagged parents
+ """
+ params = {'force': force, 'noprune': noprune}
+ res = self._delete(self._url("/images/{0}", image), params=params)
+ return self._result(res, True)
+
+ def search(self, term, limit=None):
+ """
+ Search for images on Docker Hub. Similar to the ``docker search``
+ command.
+
+ Args:
+ term (str): A term to search for.
+ limit (int): The maximum number of results to return.
+
+ Returns:
+ (list of dicts): The response of the search.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ params = {'term': term}
+ if limit is not None:
+ params['limit'] = limit
+
+ return self._result(
+ self._get(self._url("/images/search"), params=params),
+ True
+ )
+
+ @utils.check_resource('image')
+ def tag(self, image, repository, tag=None, force=False):
+ """
+ Tag an image into a repository. Similar to the ``docker tag`` command.
+
+ Args:
+ image (str): The image to tag
+ repository (str): The repository to set for the tag
+ tag (str): The tag name
+ force (bool): Force
+
+ Returns:
+ (bool): ``True`` if successful
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Example:
+
+ >>> client.api.tag('ubuntu', 'localhost:5000/ubuntu', 'latest',
+ force=True)
+ """
+ params = {
+ 'tag': tag,
+ 'repo': repository,
+ 'force': 1 if force else 0
+ }
+ url = self._url("/images/{0}/tag", image)
+ res = self._post(url, params=params)
+ self._raise_for_status(res)
+ return res.status_code == 201
+
+
+def is_file(src):
+ try:
+ return (
+ isinstance(src, str) and
+ os.path.isfile(src)
+ )
+ except TypeError: # a data string will make isfile() raise a TypeError
+ return False
+
+
+def _import_image_params(repo, tag, image=None, src=None,
+ changes=None):
+ params = {
+ 'repo': repo,
+ 'tag': tag,
+ }
+ if image:
+ params['fromImage'] = image
+ elif src and not is_file(src):
+ params['fromSrc'] = src
+ else:
+ params['fromSrc'] = '-'
+
+ if changes:
+ params['changes'] = changes
+
+ return params
diff --git a/contrib/python/docker/docker/api/network.py b/contrib/python/docker/docker/api/network.py
new file mode 100644
index 0000000000..2b1925710e
--- /dev/null
+++ b/contrib/python/docker/docker/api/network.py
@@ -0,0 +1,277 @@
+from .. import utils
+from ..errors import InvalidVersion
+from ..utils import check_resource, minimum_version, version_lt
+
+
+class NetworkApiMixin:
+ def networks(self, names=None, ids=None, filters=None):
+ """
+ List networks. Similar to the ``docker network ls`` command.
+
+ Args:
+ names (:py:class:`list`): List of names to filter by
+ ids (:py:class:`list`): List of ids to filter by
+ filters (dict): Filters to be processed on the network list.
+ Available filters:
+ - ``driver=[<driver-name>]`` Matches a network's driver.
+ - ``label=[<key>]``, ``label=[<key>=<value>]`` or a list of
+ such.
+ - ``type=["custom"|"builtin"]`` Filters networks by type.
+
+ Returns:
+ (dict): List of network objects.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+
+ if filters is None:
+ filters = {}
+ if names:
+ filters['name'] = names
+ if ids:
+ filters['id'] = ids
+ params = {'filters': utils.convert_filters(filters)}
+ url = self._url("/networks")
+ res = self._get(url, params=params)
+ return self._result(res, json=True)
+
+ def create_network(self, name, driver=None, options=None, ipam=None,
+ check_duplicate=None, internal=False, labels=None,
+ enable_ipv6=False, attachable=None, scope=None,
+ ingress=None):
+ """
+ Create a network. Similar to the ``docker network create``.
+
+ Args:
+ name (str): Name of the network
+ driver (str): Name of the driver used to create the network
+ options (dict): Driver options as a key-value dictionary
+ ipam (IPAMConfig): Optional custom IP scheme for the network.
+ check_duplicate (bool): Request daemon to check for networks with
+ same name. Default: ``None``.
+ internal (bool): Restrict external access to the network. Default
+ ``False``.
+ labels (dict): Map of labels to set on the network. Default
+ ``None``.
+ enable_ipv6 (bool): Enable IPv6 on the network. Default ``False``.
+ attachable (bool): If enabled, and the network is in the global
+ scope, non-service containers on worker nodes will be able to
+ connect to the network.
+ scope (str): Specify the network's scope (``local``, ``global`` or
+ ``swarm``)
+ ingress (bool): If set, create an ingress network which provides
+ the routing-mesh in swarm mode.
+
+ Returns:
+ (dict): The created network reference object
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Example:
+ A network using the bridge driver:
+
+ >>> client.api.create_network("network1", driver="bridge")
+
+ You can also create more advanced networks with custom IPAM
+ configurations. For example, setting the subnet to
+ ``192.168.52.0/24`` and gateway address to ``192.168.52.254``.
+
+ .. code-block:: python
+
+ >>> ipam_pool = docker.types.IPAMPool(
+ subnet='192.168.52.0/24',
+ gateway='192.168.52.254'
+ )
+ >>> ipam_config = docker.types.IPAMConfig(
+ pool_configs=[ipam_pool]
+ )
+ >>> client.api.create_network("network1", driver="bridge",
+ ipam=ipam_config)
+ """
+ if options is not None and not isinstance(options, dict):
+ raise TypeError('options must be a dictionary')
+
+ data = {
+ 'Name': name,
+ 'Driver': driver,
+ 'Options': options,
+ 'IPAM': ipam,
+ 'CheckDuplicate': check_duplicate,
+ }
+
+ if labels is not None:
+ if version_lt(self._version, '1.23'):
+ raise InvalidVersion(
+ 'network labels were introduced in API 1.23'
+ )
+ if not isinstance(labels, dict):
+ raise TypeError('labels must be a dictionary')
+ data["Labels"] = labels
+
+ if enable_ipv6:
+ if version_lt(self._version, '1.23'):
+ raise InvalidVersion(
+ 'enable_ipv6 was introduced in API 1.23'
+ )
+ data['EnableIPv6'] = True
+
+ if internal:
+ if version_lt(self._version, '1.22'):
+ raise InvalidVersion('Internal networks are not '
+ 'supported in API version < 1.22')
+ data['Internal'] = True
+
+ if attachable is not None:
+ if version_lt(self._version, '1.24'):
+ raise InvalidVersion(
+ 'attachable is not supported in API version < 1.24'
+ )
+ data['Attachable'] = attachable
+
+ if ingress is not None:
+ if version_lt(self._version, '1.29'):
+ raise InvalidVersion(
+ 'ingress is not supported in API version < 1.29'
+ )
+
+ data['Ingress'] = ingress
+
+ if scope is not None:
+ if version_lt(self._version, '1.30'):
+ raise InvalidVersion(
+ 'scope is not supported in API version < 1.30'
+ )
+ data['Scope'] = scope
+
+ url = self._url("/networks/create")
+ res = self._post_json(url, data=data)
+ return self._result(res, json=True)
+
+ @minimum_version('1.25')
+ def prune_networks(self, filters=None):
+ """
+ Delete unused networks
+
+ Args:
+ filters (dict): Filters to process on the prune list.
+
+ Returns:
+ (dict): A dict containing a list of deleted network names and
+ the amount of disk space reclaimed in bytes.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ params = {}
+ if filters:
+ params['filters'] = utils.convert_filters(filters)
+ url = self._url('/networks/prune')
+ return self._result(self._post(url, params=params), True)
+
+ @check_resource('net_id')
+ def remove_network(self, net_id):
+ """
+ Remove a network. Similar to the ``docker network rm`` command.
+
+ Args:
+ net_id (str): The network's id
+ """
+ url = self._url("/networks/{0}", net_id)
+ res = self._delete(url)
+ self._raise_for_status(res)
+
+ @check_resource('net_id')
+ def inspect_network(self, net_id, verbose=None, scope=None):
+ """
+ Get detailed information about a network.
+
+ Args:
+ net_id (str): ID of network
+ verbose (bool): Show the service details across the cluster in
+ swarm mode.
+ scope (str): Filter the network by scope (``swarm``, ``global``
+ or ``local``).
+ """
+ params = {}
+ if verbose is not None:
+ if version_lt(self._version, '1.28'):
+ raise InvalidVersion('verbose was introduced in API 1.28')
+ params['verbose'] = verbose
+ if scope is not None:
+ if version_lt(self._version, '1.31'):
+ raise InvalidVersion('scope was introduced in API 1.31')
+ params['scope'] = scope
+
+ url = self._url("/networks/{0}", net_id)
+ res = self._get(url, params=params)
+ return self._result(res, json=True)
+
+ @check_resource('container')
+ def connect_container_to_network(self, container, net_id,
+ ipv4_address=None, ipv6_address=None,
+ aliases=None, links=None,
+ link_local_ips=None, driver_opt=None,
+ mac_address=None):
+ """
+ Connect a container to a network.
+
+ Args:
+ container (str): container-id/name to be connected to the network
+ net_id (str): network id
+ aliases (:py:class:`list`): A list of aliases for this endpoint.
+ Names in that list can be used within the network to reach the
+ container. Defaults to ``None``.
+ links (:py:class:`list`): A list of links for this endpoint.
+ Containers declared in this list will be linked to this
+ container. Defaults to ``None``.
+ ipv4_address (str): The IP address of this container on the
+ network, using the IPv4 protocol. Defaults to ``None``.
+ ipv6_address (str): The IP address of this container on the
+ network, using the IPv6 protocol. Defaults to ``None``.
+ link_local_ips (:py:class:`list`): A list of link-local
+ (IPv4/IPv6) addresses.
+ mac_address (str): The MAC address of this container on the
+ network. Defaults to ``None``.
+ """
+ data = {
+ "Container": container,
+ "EndpointConfig": self.create_endpoint_config(
+ aliases=aliases, links=links, ipv4_address=ipv4_address,
+ ipv6_address=ipv6_address, link_local_ips=link_local_ips,
+ driver_opt=driver_opt,
+ mac_address=mac_address
+ ),
+ }
+
+ url = self._url("/networks/{0}/connect", net_id)
+ res = self._post_json(url, data=data)
+ self._raise_for_status(res)
+
+ @check_resource('container')
+ def disconnect_container_from_network(self, container, net_id,
+ force=False):
+ """
+ Disconnect a container from a network.
+
+ Args:
+ container (str): container ID or name to be disconnected from the
+ network
+ net_id (str): network ID
+ force (bool): Force the container to disconnect from a network.
+ Default: ``False``
+ """
+ data = {"Container": container}
+ if force:
+ if version_lt(self._version, '1.22'):
+ raise InvalidVersion(
+ 'Forced disconnect was introduced in API 1.22'
+ )
+ data['Force'] = force
+ url = self._url("/networks/{0}/disconnect", net_id)
+ res = self._post_json(url, data=data)
+ self._raise_for_status(res)
diff --git a/contrib/python/docker/docker/api/plugin.py b/contrib/python/docker/docker/api/plugin.py
new file mode 100644
index 0000000000..10210c1a23
--- /dev/null
+++ b/contrib/python/docker/docker/api/plugin.py
@@ -0,0 +1,261 @@
+from .. import auth, utils
+
+
+class PluginApiMixin:
+ @utils.minimum_version('1.25')
+ @utils.check_resource('name')
+ def configure_plugin(self, name, options):
+ """
+ Configure a plugin.
+
+ Args:
+ name (string): The name of the plugin. The ``:latest`` tag is
+ optional, and is the default if omitted.
+ options (dict): A key-value mapping of options
+
+ Returns:
+ ``True`` if successful
+ """
+ url = self._url('/plugins/{0}/set', name)
+ data = options
+ if isinstance(data, dict):
+ data = [f'{k}={v}' for k, v in data.items()]
+ res = self._post_json(url, data=data)
+ self._raise_for_status(res)
+ return True
+
+ @utils.minimum_version('1.25')
+ def create_plugin(self, name, plugin_data_dir, gzip=False):
+ """
+ Create a new plugin.
+
+ Args:
+ name (string): The name of the plugin. The ``:latest`` tag is
+ optional, and is the default if omitted.
+ plugin_data_dir (string): Path to the plugin data directory.
+ Plugin data directory must contain the ``config.json``
+ manifest file and the ``rootfs`` directory.
+ gzip (bool): Compress the context using gzip. Default: False
+
+ Returns:
+ ``True`` if successful
+ """
+ url = self._url('/plugins/create')
+
+ with utils.create_archive(
+ root=plugin_data_dir, gzip=gzip,
+ files=set(utils.build.walk(plugin_data_dir, []))
+ ) as archv:
+ res = self._post(url, params={'name': name}, data=archv)
+ self._raise_for_status(res)
+ return True
+
+ @utils.minimum_version('1.25')
+ def disable_plugin(self, name, force=False):
+ """
+ Disable an installed plugin.
+
+ Args:
+ name (string): The name of the plugin. The ``:latest`` tag is
+ optional, and is the default if omitted.
+ force (bool): To enable the force query parameter.
+
+ Returns:
+ ``True`` if successful
+ """
+ url = self._url('/plugins/{0}/disable', name)
+ res = self._post(url, params={'force': force})
+ self._raise_for_status(res)
+ return True
+
+ @utils.minimum_version('1.25')
+ def enable_plugin(self, name, timeout=0):
+ """
+ Enable an installed plugin.
+
+ Args:
+ name (string): The name of the plugin. The ``:latest`` tag is
+ optional, and is the default if omitted.
+ timeout (int): Operation timeout (in seconds). Default: 0
+
+ Returns:
+ ``True`` if successful
+ """
+ url = self._url('/plugins/{0}/enable', name)
+ params = {'timeout': timeout}
+ res = self._post(url, params=params)
+ self._raise_for_status(res)
+ return True
+
+ @utils.minimum_version('1.25')
+ def inspect_plugin(self, name):
+ """
+ Retrieve plugin metadata.
+
+ Args:
+ name (string): The name of the plugin. The ``:latest`` tag is
+ optional, and is the default if omitted.
+
+ Returns:
+ A dict containing plugin info
+ """
+ url = self._url('/plugins/{0}/json', name)
+ return self._result(self._get(url), True)
+
+ @utils.minimum_version('1.25')
+ def pull_plugin(self, remote, privileges, name=None):
+ """
+ Pull and install a plugin. After the plugin is installed, it can be
+ enabled using :py:meth:`~enable_plugin`.
+
+ Args:
+ remote (string): Remote reference for the plugin to install.
+ The ``:latest`` tag is optional, and is the default if
+ omitted.
+ privileges (:py:class:`list`): A list of privileges the user
+ consents to grant to the plugin. Can be retrieved using
+ :py:meth:`~plugin_privileges`.
+ name (string): Local name for the pulled plugin. The
+ ``:latest`` tag is optional, and is the default if omitted.
+
+ Returns:
+ An iterable object streaming the decoded API logs
+ """
+ url = self._url('/plugins/pull')
+ params = {
+ 'remote': remote,
+ }
+ if name:
+ params['name'] = name
+
+ headers = {}
+ registry, repo_name = auth.resolve_repository_name(remote)
+ header = auth.get_config_header(self, registry)
+ if header:
+ headers['X-Registry-Auth'] = header
+ response = self._post_json(
+ url, params=params, headers=headers, data=privileges,
+ stream=True
+ )
+ self._raise_for_status(response)
+ return self._stream_helper(response, decode=True)
+
+ @utils.minimum_version('1.25')
+ def plugins(self):
+ """
+ Retrieve a list of installed plugins.
+
+ Returns:
+ A list of dicts, one per plugin
+ """
+ url = self._url('/plugins')
+ return self._result(self._get(url), True)
+
+ @utils.minimum_version('1.25')
+ def plugin_privileges(self, name):
+ """
+ Retrieve list of privileges to be granted to a plugin.
+
+ Args:
+ name (string): Name of the remote plugin to examine. The
+ ``:latest`` tag is optional, and is the default if omitted.
+
+ Returns:
+ A list of dictionaries representing the plugin's
+ permissions
+
+ """
+ params = {
+ 'remote': name,
+ }
+
+ headers = {}
+ registry, repo_name = auth.resolve_repository_name(name)
+ header = auth.get_config_header(self, registry)
+ if header:
+ headers['X-Registry-Auth'] = header
+
+ url = self._url('/plugins/privileges')
+ return self._result(
+ self._get(url, params=params, headers=headers), True
+ )
+
+ @utils.minimum_version('1.25')
+ @utils.check_resource('name')
+ def push_plugin(self, name):
+ """
+ Push a plugin to the registry.
+
+ Args:
+ name (string): Name of the plugin to upload. The ``:latest``
+ tag is optional, and is the default if omitted.
+
+ Returns:
+ ``True`` if successful
+ """
+ url = self._url('/plugins/{0}/pull', name)
+
+ headers = {}
+ registry, repo_name = auth.resolve_repository_name(name)
+ header = auth.get_config_header(self, registry)
+ if header:
+ headers['X-Registry-Auth'] = header
+ res = self._post(url, headers=headers)
+ self._raise_for_status(res)
+ return self._stream_helper(res, decode=True)
+
+ @utils.minimum_version('1.25')
+ @utils.check_resource('name')
+ def remove_plugin(self, name, force=False):
+ """
+ Remove an installed plugin.
+
+ Args:
+ name (string): Name of the plugin to remove. The ``:latest``
+ tag is optional, and is the default if omitted.
+ force (bool): Disable the plugin before removing. This may
+ result in issues if the plugin is in use by a container.
+
+ Returns:
+ ``True`` if successful
+ """
+ url = self._url('/plugins/{0}', name)
+ res = self._delete(url, params={'force': force})
+ self._raise_for_status(res)
+ return True
+
+ @utils.minimum_version('1.26')
+ @utils.check_resource('name')
+ def upgrade_plugin(self, name, remote, privileges):
+ """
+ Upgrade an installed plugin.
+
+ Args:
+ name (string): Name of the plugin to upgrade. The ``:latest``
+ tag is optional and is the default if omitted.
+ remote (string): Remote reference to upgrade to. The
+ ``:latest`` tag is optional and is the default if omitted.
+ privileges (:py:class:`list`): A list of privileges the user
+ consents to grant to the plugin. Can be retrieved using
+ :py:meth:`~plugin_privileges`.
+
+ Returns:
+ An iterable object streaming the decoded API logs
+ """
+
+ url = self._url('/plugins/{0}/upgrade', name)
+ params = {
+ 'remote': remote,
+ }
+
+ headers = {}
+ registry, repo_name = auth.resolve_repository_name(remote)
+ header = auth.get_config_header(self, registry)
+ if header:
+ headers['X-Registry-Auth'] = header
+ response = self._post_json(
+ url, params=params, headers=headers, data=privileges,
+ stream=True
+ )
+ self._raise_for_status(response)
+ return self._stream_helper(response, decode=True)
diff --git a/contrib/python/docker/docker/api/secret.py b/contrib/python/docker/docker/api/secret.py
new file mode 100644
index 0000000000..db1701bdc0
--- /dev/null
+++ b/contrib/python/docker/docker/api/secret.py
@@ -0,0 +1,98 @@
+import base64
+
+from .. import errors, utils
+
+
+class SecretApiMixin:
+ @utils.minimum_version('1.25')
+ def create_secret(self, name, data, labels=None, driver=None):
+ """
+ Create a secret
+
+ Args:
+ name (string): Name of the secret
+ data (bytes): Secret data to be stored
+ labels (dict): A mapping of labels to assign to the secret
+ driver (DriverConfig): A custom driver configuration. If
+ unspecified, the default ``internal`` driver will be used
+
+ Returns (dict): ID of the newly created secret
+ """
+ if not isinstance(data, bytes):
+ data = data.encode('utf-8')
+
+ data = base64.b64encode(data)
+ data = data.decode('ascii')
+ body = {
+ 'Data': data,
+ 'Name': name,
+ 'Labels': labels
+ }
+
+ if driver is not None:
+ if utils.version_lt(self._version, '1.31'):
+ raise errors.InvalidVersion(
+ 'Secret driver is only available for API version > 1.31'
+ )
+
+ body['Driver'] = driver
+
+ url = self._url('/secrets/create')
+ return self._result(
+ self._post_json(url, data=body), True
+ )
+
+ @utils.minimum_version('1.25')
+ @utils.check_resource('id')
+ def inspect_secret(self, id):
+ """
+ Retrieve secret metadata
+
+ Args:
+ id (string): Full ID of the secret to inspect
+
+ Returns (dict): A dictionary of metadata
+
+ Raises:
+ :py:class:`docker.errors.NotFound`
+ if no secret with that ID exists
+ """
+ url = self._url('/secrets/{0}', id)
+ return self._result(self._get(url), True)
+
+ @utils.minimum_version('1.25')
+ @utils.check_resource('id')
+ def remove_secret(self, id):
+ """
+ Remove a secret
+
+ Args:
+ id (string): Full ID of the secret to remove
+
+ Returns (boolean): True if successful
+
+ Raises:
+ :py:class:`docker.errors.NotFound`
+ if no secret with that ID exists
+ """
+ url = self._url('/secrets/{0}', id)
+ res = self._delete(url)
+ self._raise_for_status(res)
+ return True
+
+ @utils.minimum_version('1.25')
+ def secrets(self, filters=None):
+ """
+ List secrets
+
+ Args:
+ filters (dict): A map of filters to process on the secrets
+ list. Available filters: ``names``
+
+ Returns (list): A list of secrets
+ """
+ url = self._url('/secrets')
+ params = {}
+ if filters:
+ params['filters'] = utils.convert_filters(filters)
+ return self._result(self._get(url, params=params), True)
diff --git a/contrib/python/docker/docker/api/service.py b/contrib/python/docker/docker/api/service.py
new file mode 100644
index 0000000000..3aed065175
--- /dev/null
+++ b/contrib/python/docker/docker/api/service.py
@@ -0,0 +1,486 @@
+from .. import auth, errors, utils
+from ..types import ServiceMode
+
+
+def _check_api_features(version, task_template, update_config, endpoint_spec,
+ rollback_config):
+
+ def raise_version_error(param, min_version):
+ raise errors.InvalidVersion(
+ f'{param} is not supported in API version < {min_version}'
+ )
+
+ if update_config is not None:
+ if utils.version_lt(version, '1.25'):
+ if 'MaxFailureRatio' in update_config:
+ raise_version_error('UpdateConfig.max_failure_ratio', '1.25')
+ if 'Monitor' in update_config:
+ raise_version_error('UpdateConfig.monitor', '1.25')
+
+ if utils.version_lt(version, '1.28'):
+ if update_config.get('FailureAction') == 'rollback':
+ raise_version_error(
+ 'UpdateConfig.failure_action rollback', '1.28'
+ )
+
+ if utils.version_lt(version, '1.29'):
+ if 'Order' in update_config:
+ raise_version_error('UpdateConfig.order', '1.29')
+
+ if rollback_config is not None:
+ if utils.version_lt(version, '1.28'):
+ raise_version_error('rollback_config', '1.28')
+
+ if utils.version_lt(version, '1.29'):
+ if 'Order' in update_config:
+ raise_version_error('RollbackConfig.order', '1.29')
+
+ if endpoint_spec is not None:
+ if utils.version_lt(version, '1.32') and 'Ports' in endpoint_spec:
+ if any(p.get('PublishMode') for p in endpoint_spec['Ports']):
+ raise_version_error('EndpointSpec.Ports[].mode', '1.32')
+
+ if task_template is not None:
+ if 'ForceUpdate' in task_template and utils.version_lt(
+ version, '1.25'):
+ raise_version_error('force_update', '1.25')
+
+ if task_template.get('Placement'):
+ if utils.version_lt(version, '1.30'):
+ if task_template['Placement'].get('Platforms'):
+ raise_version_error('Placement.platforms', '1.30')
+ if utils.version_lt(version, '1.27'):
+ if task_template['Placement'].get('Preferences'):
+ raise_version_error('Placement.preferences', '1.27')
+
+ if task_template.get('ContainerSpec'):
+ container_spec = task_template.get('ContainerSpec')
+
+ if utils.version_lt(version, '1.25'):
+ if container_spec.get('TTY'):
+ raise_version_error('ContainerSpec.tty', '1.25')
+ if container_spec.get('Hostname') is not None:
+ raise_version_error('ContainerSpec.hostname', '1.25')
+ if container_spec.get('Hosts') is not None:
+ raise_version_error('ContainerSpec.hosts', '1.25')
+ if container_spec.get('Groups') is not None:
+ raise_version_error('ContainerSpec.groups', '1.25')
+ if container_spec.get('DNSConfig') is not None:
+ raise_version_error('ContainerSpec.dns_config', '1.25')
+ if container_spec.get('Healthcheck') is not None:
+ raise_version_error('ContainerSpec.healthcheck', '1.25')
+
+ if utils.version_lt(version, '1.28'):
+ if container_spec.get('ReadOnly') is not None:
+ raise_version_error('ContainerSpec.dns_config', '1.28')
+ if container_spec.get('StopSignal') is not None:
+ raise_version_error('ContainerSpec.stop_signal', '1.28')
+
+ if utils.version_lt(version, '1.30'):
+ if container_spec.get('Configs') is not None:
+ raise_version_error('ContainerSpec.configs', '1.30')
+ if container_spec.get('Privileges') is not None:
+ raise_version_error('ContainerSpec.privileges', '1.30')
+
+ if utils.version_lt(version, '1.35'):
+ if container_spec.get('Isolation') is not None:
+ raise_version_error('ContainerSpec.isolation', '1.35')
+
+ if utils.version_lt(version, '1.38'):
+ if container_spec.get('Init') is not None:
+ raise_version_error('ContainerSpec.init', '1.38')
+
+ if task_template.get('Resources'):
+ if utils.version_lt(version, '1.32'):
+ if task_template['Resources'].get('GenericResources'):
+ raise_version_error('Resources.generic_resources', '1.32')
+
+
+def _merge_task_template(current, override):
+ merged = current.copy()
+ if override is not None:
+ for ts_key, ts_value in override.items():
+ if ts_key == 'ContainerSpec':
+ if 'ContainerSpec' not in merged:
+ merged['ContainerSpec'] = {}
+ for cs_key, cs_value in override['ContainerSpec'].items():
+ if cs_value is not None:
+ merged['ContainerSpec'][cs_key] = cs_value
+ elif ts_value is not None:
+ merged[ts_key] = ts_value
+ return merged
+
+
+class ServiceApiMixin:
+ @utils.minimum_version('1.24')
+ def create_service(
+ self, task_template, name=None, labels=None, mode=None,
+ update_config=None, networks=None, endpoint_config=None,
+ endpoint_spec=None, rollback_config=None
+ ):
+ """
+ Create a service.
+
+ Args:
+ task_template (TaskTemplate): Specification of the task to start as
+ part of the new service.
+ name (string): User-defined name for the service. Optional.
+ labels (dict): A map of labels to associate with the service.
+ Optional.
+ mode (ServiceMode): Scheduling mode for the service (replicated
+ or global). Defaults to replicated.
+ update_config (UpdateConfig): Specification for the update strategy
+ of the service. Default: ``None``
+ rollback_config (RollbackConfig): Specification for the rollback
+ strategy of the service. Default: ``None``
+ networks (:py:class:`list`): List of network names or IDs or
+ :py:class:`~docker.types.NetworkAttachmentConfig` to attach the
+ service to. Default: ``None``.
+ endpoint_spec (EndpointSpec): Properties that can be configured to
+ access and load balance a service. Default: ``None``.
+
+ Returns:
+ A dictionary containing an ``ID`` key for the newly created
+ service.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+
+ _check_api_features(
+ self._version, task_template, update_config, endpoint_spec,
+ rollback_config
+ )
+
+ url = self._url('/services/create')
+ headers = {}
+ image = task_template.get('ContainerSpec', {}).get('Image', None)
+ if image is None:
+ raise errors.DockerException(
+ 'Missing mandatory Image key in ContainerSpec'
+ )
+ if mode and not isinstance(mode, dict):
+ mode = ServiceMode(mode)
+
+ registry, repo_name = auth.resolve_repository_name(image)
+ auth_header = auth.get_config_header(self, registry)
+ if auth_header:
+ headers['X-Registry-Auth'] = auth_header
+ if utils.version_lt(self._version, '1.25'):
+ networks = networks or task_template.pop('Networks', None)
+ data = {
+ 'Name': name,
+ 'Labels': labels,
+ 'TaskTemplate': task_template,
+ 'Mode': mode,
+ 'Networks': utils.convert_service_networks(networks),
+ 'EndpointSpec': endpoint_spec
+ }
+
+ if update_config is not None:
+ data['UpdateConfig'] = update_config
+
+ if rollback_config is not None:
+ data['RollbackConfig'] = rollback_config
+
+ return self._result(
+ self._post_json(url, data=data, headers=headers), True
+ )
+
+ @utils.minimum_version('1.24')
+ @utils.check_resource('service')
+ def inspect_service(self, service, insert_defaults=None):
+ """
+ Return information about a service.
+
+ Args:
+ service (str): Service name or ID.
+ insert_defaults (boolean): If true, default values will be merged
+ into the service inspect output.
+
+ Returns:
+ (dict): A dictionary of the server-side representation of the
+ service, including all relevant properties.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ url = self._url('/services/{0}', service)
+ params = {}
+ if insert_defaults is not None:
+ if utils.version_lt(self._version, '1.29'):
+ raise errors.InvalidVersion(
+ 'insert_defaults is not supported in API version < 1.29'
+ )
+ params['insertDefaults'] = insert_defaults
+
+ return self._result(self._get(url, params=params), True)
+
+ @utils.minimum_version('1.24')
+ @utils.check_resource('task')
+ def inspect_task(self, task):
+ """
+ Retrieve information about a task.
+
+ Args:
+ task (str): Task ID
+
+ Returns:
+ (dict): Information about the task.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ url = self._url('/tasks/{0}', task)
+ return self._result(self._get(url), True)
+
+ @utils.minimum_version('1.24')
+ @utils.check_resource('service')
+ def remove_service(self, service):
+ """
+ Stop and remove a service.
+
+ Args:
+ service (str): Service name or ID
+
+ Returns:
+ ``True`` if successful.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+
+ url = self._url('/services/{0}', service)
+ resp = self._delete(url)
+ self._raise_for_status(resp)
+ return True
+
+ @utils.minimum_version('1.24')
+ def services(self, filters=None, status=None):
+ """
+ List services.
+
+ Args:
+ filters (dict): Filters to process on the nodes list. Valid
+ filters: ``id``, ``name`` , ``label`` and ``mode``.
+ Default: ``None``.
+ status (bool): Include the service task count of running and
+ desired tasks. Default: ``None``.
+
+ Returns:
+ A list of dictionaries containing data about each service.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ params = {
+ 'filters': utils.convert_filters(filters) if filters else None
+ }
+ if status is not None:
+ if utils.version_lt(self._version, '1.41'):
+ raise errors.InvalidVersion(
+ 'status is not supported in API version < 1.41'
+ )
+ params['status'] = status
+ url = self._url('/services')
+ return self._result(self._get(url, params=params), True)
+
+ @utils.minimum_version('1.25')
+ @utils.check_resource('service')
+ def service_logs(self, service, details=False, follow=False, stdout=False,
+ stderr=False, since=0, timestamps=False, tail='all',
+ is_tty=None):
+ """
+ Get log stream for a service.
+ Note: This endpoint works only for services with the ``json-file``
+ or ``journald`` logging drivers.
+
+ Args:
+ service (str): ID or name of the service
+ details (bool): Show extra details provided to logs.
+ Default: ``False``
+ follow (bool): Keep connection open to read logs as they are
+ sent by the Engine. Default: ``False``
+ stdout (bool): Return logs from ``stdout``. Default: ``False``
+ stderr (bool): Return logs from ``stderr``. Default: ``False``
+ since (int): UNIX timestamp for the logs staring point.
+ Default: 0
+ timestamps (bool): Add timestamps to every log line.
+ tail (string or int): Number of log lines to be returned,
+ counting from the current end of the logs. Specify an
+ integer or ``'all'`` to output all log lines.
+ Default: ``all``
+ is_tty (bool): Whether the service's :py:class:`ContainerSpec`
+ enables the TTY option. If omitted, the method will query
+ the Engine for the information, causing an additional
+ roundtrip.
+
+ Returns (generator): Logs for the service.
+ """
+ params = {
+ 'details': details,
+ 'follow': follow,
+ 'stdout': stdout,
+ 'stderr': stderr,
+ 'since': since,
+ 'timestamps': timestamps,
+ 'tail': tail
+ }
+
+ url = self._url('/services/{0}/logs', service)
+ res = self._get(url, params=params, stream=True)
+ if is_tty is None:
+ is_tty = self.inspect_service(
+ service
+ )['Spec']['TaskTemplate']['ContainerSpec'].get('TTY', False)
+ return self._get_result_tty(True, res, is_tty)
+
+ @utils.minimum_version('1.24')
+ def tasks(self, filters=None):
+ """
+ Retrieve a list of tasks.
+
+ Args:
+ filters (dict): A map of filters to process on the tasks list.
+ Valid filters: ``id``, ``name``, ``service``, ``node``,
+ ``label`` and ``desired-state``.
+
+ Returns:
+ (:py:class:`list`): List of task dictionaries.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+
+ params = {
+ 'filters': utils.convert_filters(filters) if filters else None
+ }
+ url = self._url('/tasks')
+ return self._result(self._get(url, params=params), True)
+
+ @utils.minimum_version('1.24')
+ @utils.check_resource('service')
+ def update_service(self, service, version, task_template=None, name=None,
+ labels=None, mode=None, update_config=None,
+ networks=None, endpoint_config=None,
+ endpoint_spec=None, fetch_current_spec=False,
+ rollback_config=None):
+ """
+ Update a service.
+
+ Args:
+ service (string): A service identifier (either its name or service
+ ID).
+ version (int): The version number of the service object being
+ updated. This is required to avoid conflicting writes.
+ task_template (TaskTemplate): Specification of the updated task to
+ start as part of the service.
+ name (string): New name for the service. Optional.
+ labels (dict): A map of labels to associate with the service.
+ Optional.
+ mode (ServiceMode): Scheduling mode for the service (replicated
+ or global). Defaults to replicated.
+ update_config (UpdateConfig): Specification for the update strategy
+ of the service. Default: ``None``.
+ rollback_config (RollbackConfig): Specification for the rollback
+ strategy of the service. Default: ``None``
+ networks (:py:class:`list`): List of network names or IDs or
+ :py:class:`~docker.types.NetworkAttachmentConfig` to attach the
+ service to. Default: ``None``.
+ endpoint_spec (EndpointSpec): Properties that can be configured to
+ access and load balance a service. Default: ``None``.
+ fetch_current_spec (boolean): Use the undefined settings from the
+ current specification of the service. Default: ``False``
+
+ Returns:
+ A dictionary containing a ``Warnings`` key.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+
+ _check_api_features(
+ self._version, task_template, update_config, endpoint_spec,
+ rollback_config
+ )
+
+ if fetch_current_spec:
+ inspect_defaults = True
+ if utils.version_lt(self._version, '1.29'):
+ inspect_defaults = None
+ current = self.inspect_service(
+ service, insert_defaults=inspect_defaults
+ )['Spec']
+
+ else:
+ current = {}
+
+ url = self._url('/services/{0}/update', service)
+ data = {}
+ headers = {}
+
+ data['Name'] = current.get('Name') if name is None else name
+
+ data['Labels'] = current.get('Labels') if labels is None else labels
+
+ if mode is not None:
+ if not isinstance(mode, dict):
+ mode = ServiceMode(mode)
+ data['Mode'] = mode
+ else:
+ data['Mode'] = current.get('Mode')
+
+ data['TaskTemplate'] = _merge_task_template(
+ current.get('TaskTemplate', {}), task_template
+ )
+
+ container_spec = data['TaskTemplate'].get('ContainerSpec', {})
+ image = container_spec.get('Image', None)
+ if image is not None:
+ registry, repo_name = auth.resolve_repository_name(image)
+ auth_header = auth.get_config_header(self, registry)
+ if auth_header:
+ headers['X-Registry-Auth'] = auth_header
+
+ if update_config is not None:
+ data['UpdateConfig'] = update_config
+ else:
+ data['UpdateConfig'] = current.get('UpdateConfig')
+
+ if rollback_config is not None:
+ data['RollbackConfig'] = rollback_config
+ else:
+ data['RollbackConfig'] = current.get('RollbackConfig')
+
+ if networks is not None:
+ converted_networks = utils.convert_service_networks(networks)
+ if utils.version_lt(self._version, '1.25'):
+ data['Networks'] = converted_networks
+ else:
+ data['TaskTemplate']['Networks'] = converted_networks
+ elif utils.version_lt(self._version, '1.25'):
+ data['Networks'] = current.get('Networks')
+ elif data['TaskTemplate'].get('Networks') is None:
+ current_task_template = current.get('TaskTemplate', {})
+ current_networks = current_task_template.get('Networks')
+ if current_networks is None:
+ current_networks = current.get('Networks')
+ if current_networks is not None:
+ data['TaskTemplate']['Networks'] = current_networks
+
+ if endpoint_spec is not None:
+ data['EndpointSpec'] = endpoint_spec
+ else:
+ data['EndpointSpec'] = current.get('EndpointSpec')
+
+ resp = self._post_json(
+ url, data=data, params={'version': version}, headers=headers
+ )
+ return self._result(resp, json=True)
diff --git a/contrib/python/docker/docker/api/swarm.py b/contrib/python/docker/docker/api/swarm.py
new file mode 100644
index 0000000000..d60d18b619
--- /dev/null
+++ b/contrib/python/docker/docker/api/swarm.py
@@ -0,0 +1,462 @@
+import http.client as http_client
+import logging
+
+from .. import errors, types, utils
+from ..constants import DEFAULT_SWARM_ADDR_POOL, DEFAULT_SWARM_SUBNET_SIZE
+
+log = logging.getLogger(__name__)
+
+
+class SwarmApiMixin:
+
+ def create_swarm_spec(self, *args, **kwargs):
+ """
+ Create a :py:class:`docker.types.SwarmSpec` instance that can be used
+ as the ``swarm_spec`` argument in
+ :py:meth:`~docker.api.swarm.SwarmApiMixin.init_swarm`.
+
+ Args:
+ task_history_retention_limit (int): Maximum number of tasks
+ history stored.
+ snapshot_interval (int): Number of logs entries between snapshot.
+ keep_old_snapshots (int): Number of snapshots to keep beyond the
+ current snapshot.
+ log_entries_for_slow_followers (int): Number of log entries to
+ keep around to sync up slow followers after a snapshot is
+ created.
+ heartbeat_tick (int): Amount of ticks (in seconds) between each
+ heartbeat.
+ election_tick (int): Amount of ticks (in seconds) needed without a
+ leader to trigger a new election.
+ dispatcher_heartbeat_period (int): The delay for an agent to send
+ a heartbeat to the dispatcher.
+ node_cert_expiry (int): Automatic expiry for nodes certificates.
+ external_cas (:py:class:`list`): Configuration for forwarding
+ signing requests to an external certificate authority. Use
+ a list of :py:class:`docker.types.SwarmExternalCA`.
+ name (string): Swarm's name
+ labels (dict): User-defined key/value metadata.
+ signing_ca_cert (str): The desired signing CA certificate for all
+ swarm node TLS leaf certificates, in PEM format.
+ signing_ca_key (str): The desired signing CA key for all swarm
+ node TLS leaf certificates, in PEM format.
+ ca_force_rotate (int): An integer whose purpose is to force swarm
+ to generate a new signing CA certificate and key, if none have
+ been specified.
+ autolock_managers (boolean): If set, generate a key and use it to
+ lock data stored on the managers.
+ log_driver (DriverConfig): The default log driver to use for tasks
+ created in the orchestrator.
+
+ Returns:
+ :py:class:`docker.types.SwarmSpec`
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Example:
+
+ >>> spec = client.api.create_swarm_spec(
+ snapshot_interval=5000, log_entries_for_slow_followers=1200
+ )
+ >>> client.api.init_swarm(
+ advertise_addr='eth0', listen_addr='0.0.0.0:5000',
+ force_new_cluster=False, swarm_spec=spec
+ )
+ """
+ ext_ca = kwargs.pop('external_ca', None)
+ if ext_ca:
+ kwargs['external_cas'] = [ext_ca]
+ return types.SwarmSpec(self._version, *args, **kwargs)
+
+ @utils.minimum_version('1.24')
+ def get_unlock_key(self):
+ """
+ Get the unlock key for this Swarm manager.
+
+ Returns:
+ A ``dict`` containing an ``UnlockKey`` member
+ """
+ return self._result(self._get(self._url('/swarm/unlockkey')), True)
+
+ @utils.minimum_version('1.24')
+ def init_swarm(self, advertise_addr=None, listen_addr='0.0.0.0:2377',
+ force_new_cluster=False, swarm_spec=None,
+ default_addr_pool=None, subnet_size=None,
+ data_path_addr=None, data_path_port=None):
+ """
+ Initialize a new Swarm using the current connected engine as the first
+ node.
+
+ Args:
+ advertise_addr (string): Externally reachable address advertised
+ to other nodes. This can either be an address/port combination
+ in the form ``192.168.1.1:4567``, or an interface followed by a
+ port number, like ``eth0:4567``. If the port number is omitted,
+ the port number from the listen address is used. If
+ ``advertise_addr`` is not specified, it will be automatically
+ detected when possible. Default: None
+ listen_addr (string): Listen address used for inter-manager
+ communication, as well as determining the networking interface
+ used for the VXLAN Tunnel Endpoint (VTEP). This can either be
+ an address/port combination in the form ``192.168.1.1:4567``,
+ or an interface followed by a port number, like ``eth0:4567``.
+ If the port number is omitted, the default swarm listening port
+ is used. Default: '0.0.0.0:2377'
+ force_new_cluster (bool): Force creating a new Swarm, even if
+ already part of one. Default: False
+ swarm_spec (dict): Configuration settings of the new Swarm. Use
+ ``APIClient.create_swarm_spec`` to generate a valid
+ configuration. Default: None
+ default_addr_pool (list of strings): Default Address Pool specifies
+ default subnet pools for global scope networks. Each pool
+ should be specified as a CIDR block, like '10.0.0.0/8'.
+ Default: None
+ subnet_size (int): SubnetSize specifies the subnet size of the
+ networks created from the default subnet pool. Default: None
+ data_path_addr (string): Address or interface to use for data path
+ traffic. For example, 192.168.1.1, or an interface, like eth0.
+ data_path_port (int): Port number to use for data path traffic.
+ Acceptable port range is 1024 to 49151. If set to ``None`` or
+ 0, the default port 4789 will be used. Default: None
+
+ Returns:
+ (str): The ID of the created node.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+
+ url = self._url('/swarm/init')
+ if swarm_spec is not None and not isinstance(swarm_spec, dict):
+ raise TypeError('swarm_spec must be a dictionary')
+
+ if default_addr_pool is not None:
+ if utils.version_lt(self._version, '1.39'):
+ raise errors.InvalidVersion(
+ 'Address pool is only available for API version >= 1.39'
+ )
+ # subnet_size becomes 0 if not set with default_addr_pool
+ if subnet_size is None:
+ subnet_size = DEFAULT_SWARM_SUBNET_SIZE
+
+ if subnet_size is not None:
+ if utils.version_lt(self._version, '1.39'):
+ raise errors.InvalidVersion(
+ 'Subnet size is only available for API version >= 1.39'
+ )
+ # subnet_size is ignored if set without default_addr_pool
+ if default_addr_pool is None:
+ default_addr_pool = DEFAULT_SWARM_ADDR_POOL
+
+ data = {
+ 'AdvertiseAddr': advertise_addr,
+ 'ListenAddr': listen_addr,
+ 'DefaultAddrPool': default_addr_pool,
+ 'SubnetSize': subnet_size,
+ 'ForceNewCluster': force_new_cluster,
+ 'Spec': swarm_spec,
+ }
+
+ if data_path_addr is not None:
+ if utils.version_lt(self._version, '1.30'):
+ raise errors.InvalidVersion(
+ 'Data address path is only available for '
+ 'API version >= 1.30'
+ )
+ data['DataPathAddr'] = data_path_addr
+
+ if data_path_port is not None:
+ if utils.version_lt(self._version, '1.40'):
+ raise errors.InvalidVersion(
+ 'Data path port is only available for '
+ 'API version >= 1.40'
+ )
+ data['DataPathPort'] = data_path_port
+
+ response = self._post_json(url, data=data)
+ return self._result(response, json=True)
+
+ @utils.minimum_version('1.24')
+ def inspect_swarm(self):
+ """
+ Retrieve low-level information about the current swarm.
+
+ Returns:
+ A dictionary containing data about the swarm.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ url = self._url('/swarm')
+ return self._result(self._get(url), True)
+
+ @utils.check_resource('node_id')
+ @utils.minimum_version('1.24')
+ def inspect_node(self, node_id):
+ """
+ Retrieve low-level information about a swarm node
+
+ Args:
+ node_id (string): ID of the node to be inspected.
+
+ Returns:
+ A dictionary containing data about this node.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ url = self._url('/nodes/{0}', node_id)
+ return self._result(self._get(url), True)
+
+ @utils.minimum_version('1.24')
+ def join_swarm(self, remote_addrs, join_token, listen_addr='0.0.0.0:2377',
+ advertise_addr=None, data_path_addr=None):
+ """
+ Make this Engine join a swarm that has already been created.
+
+ Args:
+ remote_addrs (:py:class:`list`): Addresses of one or more manager
+ nodes already participating in the Swarm to join.
+ join_token (string): Secret token for joining this Swarm.
+ listen_addr (string): Listen address used for inter-manager
+ communication if the node gets promoted to manager, as well as
+ determining the networking interface used for the VXLAN Tunnel
+ Endpoint (VTEP). Default: ``'0.0.0.0:2377``
+ advertise_addr (string): Externally reachable address advertised
+ to other nodes. This can either be an address/port combination
+ in the form ``192.168.1.1:4567``, or an interface followed by a
+ port number, like ``eth0:4567``. If the port number is omitted,
+ the port number from the listen address is used. If
+ AdvertiseAddr is not specified, it will be automatically
+ detected when possible. Default: ``None``
+ data_path_addr (string): Address or interface to use for data path
+ traffic. For example, 192.168.1.1, or an interface, like eth0.
+
+ Returns:
+ ``True`` if the request went through.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ data = {
+ 'RemoteAddrs': remote_addrs,
+ 'ListenAddr': listen_addr,
+ 'JoinToken': join_token,
+ 'AdvertiseAddr': advertise_addr,
+ }
+
+ if data_path_addr is not None:
+ if utils.version_lt(self._version, '1.30'):
+ raise errors.InvalidVersion(
+ 'Data address path is only available for '
+ 'API version >= 1.30'
+ )
+ data['DataPathAddr'] = data_path_addr
+
+ url = self._url('/swarm/join')
+ response = self._post_json(url, data=data)
+ self._raise_for_status(response)
+ return True
+
+ @utils.minimum_version('1.24')
+ def leave_swarm(self, force=False):
+ """
+ Leave a swarm.
+
+ Args:
+ force (bool): Leave the swarm even if this node is a manager.
+ Default: ``False``
+
+ Returns:
+ ``True`` if the request went through.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ url = self._url('/swarm/leave')
+ response = self._post(url, params={'force': force})
+ # Ignore "this node is not part of a swarm" error
+ if force and response.status_code == http_client.NOT_ACCEPTABLE:
+ return True
+ # FIXME: Temporary workaround for 1.13.0-rc bug
+ # https://github.com/docker/docker/issues/29192
+ if force and response.status_code == http_client.SERVICE_UNAVAILABLE:
+ return True
+ self._raise_for_status(response)
+ return True
+
+ @utils.minimum_version('1.24')
+ def nodes(self, filters=None):
+ """
+ List swarm nodes.
+
+ Args:
+ filters (dict): Filters to process on the nodes list. Valid
+ filters: ``id``, ``name``, ``membership`` and ``role``.
+ Default: ``None``
+
+ Returns:
+ A list of dictionaries containing data about each swarm node.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ url = self._url('/nodes')
+ params = {}
+ if filters:
+ params['filters'] = utils.convert_filters(filters)
+
+ return self._result(self._get(url, params=params), True)
+
+ @utils.check_resource('node_id')
+ @utils.minimum_version('1.24')
+ def remove_node(self, node_id, force=False):
+ """
+ Remove a node from the swarm.
+
+ Args:
+ node_id (string): ID of the node to be removed.
+ force (bool): Force remove an active node. Default: `False`
+
+ Raises:
+ :py:class:`docker.errors.NotFound`
+ If the node referenced doesn't exist in the swarm.
+
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ Returns:
+ `True` if the request was successful.
+ """
+ url = self._url('/nodes/{0}', node_id)
+ params = {
+ 'force': force
+ }
+ res = self._delete(url, params=params)
+ self._raise_for_status(res)
+ return True
+
+ @utils.minimum_version('1.24')
+ def unlock_swarm(self, key):
+ """
+ Unlock a locked swarm.
+
+ Args:
+ key (string): The unlock key as provided by
+ :py:meth:`get_unlock_key`
+
+ Raises:
+ :py:class:`docker.errors.InvalidArgument`
+ If the key argument is in an incompatible format
+
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Returns:
+ `True` if the request was successful.
+
+ Example:
+
+ >>> key = client.api.get_unlock_key()
+ >>> client.unlock_swarm(key)
+
+ """
+ if isinstance(key, dict):
+ if 'UnlockKey' not in key:
+ raise errors.InvalidArgument('Invalid unlock key format')
+ else:
+ key = {'UnlockKey': key}
+
+ url = self._url('/swarm/unlock')
+ res = self._post_json(url, data=key)
+ self._raise_for_status(res)
+ return True
+
+ @utils.minimum_version('1.24')
+ def update_node(self, node_id, version, node_spec=None):
+ """
+ Update the node's configuration
+
+ Args:
+
+ node_id (string): ID of the node to be updated.
+ version (int): The version number of the node object being
+ updated. This is required to avoid conflicting writes.
+ node_spec (dict): Configuration settings to update. Any values
+ not provided will be removed. Default: ``None``
+
+ Returns:
+ `True` if the request went through.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Example:
+
+ >>> node_spec = {'Availability': 'active',
+ 'Name': 'node-name',
+ 'Role': 'manager',
+ 'Labels': {'foo': 'bar'}
+ }
+ >>> client.api.update_node(node_id='24ifsmvkjbyhk', version=8,
+ node_spec=node_spec)
+
+ """
+ url = self._url('/nodes/{0}/update?version={1}', node_id, str(version))
+ res = self._post_json(url, data=node_spec)
+ self._raise_for_status(res)
+ return True
+
+ @utils.minimum_version('1.24')
+ def update_swarm(self, version, swarm_spec=None,
+ rotate_worker_token=False,
+ rotate_manager_token=False,
+ rotate_manager_unlock_key=False):
+ """
+ Update the Swarm's configuration
+
+ Args:
+ version (int): The version number of the swarm object being
+ updated. This is required to avoid conflicting writes.
+ swarm_spec (dict): Configuration settings to update. Use
+ :py:meth:`~docker.api.swarm.SwarmApiMixin.create_swarm_spec` to
+ generate a valid configuration. Default: ``None``.
+ rotate_worker_token (bool): Rotate the worker join token. Default:
+ ``False``.
+ rotate_manager_token (bool): Rotate the manager join token.
+ Default: ``False``.
+ rotate_manager_unlock_key (bool): Rotate the manager unlock key.
+ Default: ``False``.
+
+ Returns:
+ ``True`` if the request went through.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ url = self._url('/swarm/update')
+ params = {
+ 'rotateWorkerToken': rotate_worker_token,
+ 'rotateManagerToken': rotate_manager_token,
+ 'version': version
+ }
+ if rotate_manager_unlock_key:
+ if utils.version_lt(self._version, '1.25'):
+ raise errors.InvalidVersion(
+ 'Rotate manager unlock key '
+ 'is only available for API version >= 1.25'
+ )
+ params['rotateManagerUnlockKey'] = rotate_manager_unlock_key
+
+ response = self._post_json(url, data=swarm_spec, params=params)
+ self._raise_for_status(response)
+ return True
diff --git a/contrib/python/docker/docker/api/volume.py b/contrib/python/docker/docker/api/volume.py
new file mode 100644
index 0000000000..c6c036fad0
--- /dev/null
+++ b/contrib/python/docker/docker/api/volume.py
@@ -0,0 +1,163 @@
+from .. import errors, utils
+
+
+class VolumeApiMixin:
+ def volumes(self, filters=None):
+ """
+ List volumes currently registered by the docker daemon. Similar to the
+ ``docker volume ls`` command.
+
+ Args:
+ filters (dict): Server-side list filtering options.
+
+ Returns:
+ (dict): Dictionary with list of volume objects as value of the
+ ``Volumes`` key.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Example:
+
+ >>> client.api.volumes()
+ {u'Volumes': [{u'Driver': u'local',
+ u'Mountpoint': u'/var/lib/docker/volumes/foobar/_data',
+ u'Name': u'foobar'},
+ {u'Driver': u'local',
+ u'Mountpoint': u'/var/lib/docker/volumes/baz/_data',
+ u'Name': u'baz'}]}
+ """
+
+ params = {
+ 'filters': utils.convert_filters(filters) if filters else None
+ }
+ url = self._url('/volumes')
+ return self._result(self._get(url, params=params), True)
+
+ def create_volume(self, name=None, driver=None, driver_opts=None,
+ labels=None):
+ """
+ Create and register a named volume
+
+ Args:
+ name (str): Name of the volume
+ driver (str): Name of the driver used to create the volume
+ driver_opts (dict): Driver options as a key-value dictionary
+ labels (dict): Labels to set on the volume
+
+ Returns:
+ (dict): The created volume reference object
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Example:
+
+ >>> volume = client.api.create_volume(
+ ... name='foobar',
+ ... driver='local',
+ ... driver_opts={'foo': 'bar', 'baz': 'false'},
+ ... labels={"key": "value"},
+ ... )
+ ... print(volume)
+ {u'Driver': u'local',
+ u'Labels': {u'key': u'value'},
+ u'Mountpoint': u'/var/lib/docker/volumes/foobar/_data',
+ u'Name': u'foobar',
+ u'Scope': u'local'}
+
+ """
+ url = self._url('/volumes/create')
+ if driver_opts is not None and not isinstance(driver_opts, dict):
+ raise TypeError('driver_opts must be a dictionary')
+
+ data = {
+ 'Name': name,
+ 'Driver': driver,
+ 'DriverOpts': driver_opts,
+ }
+
+ if labels is not None:
+ if utils.compare_version('1.23', self._version) < 0:
+ raise errors.InvalidVersion(
+ 'volume labels were introduced in API 1.23'
+ )
+ if not isinstance(labels, dict):
+ raise TypeError('labels must be a dictionary')
+ data["Labels"] = labels
+
+ return self._result(self._post_json(url, data=data), True)
+
+ def inspect_volume(self, name):
+ """
+ Retrieve volume info by name.
+
+ Args:
+ name (str): volume name
+
+ Returns:
+ (dict): Volume information dictionary
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Example:
+
+ >>> client.api.inspect_volume('foobar')
+ {u'Driver': u'local',
+ u'Mountpoint': u'/var/lib/docker/volumes/foobar/_data',
+ u'Name': u'foobar'}
+
+ """
+ url = self._url('/volumes/{0}', name)
+ return self._result(self._get(url), True)
+
+ @utils.minimum_version('1.25')
+ def prune_volumes(self, filters=None):
+ """
+ Delete unused volumes
+
+ Args:
+ filters (dict): Filters to process on the prune list.
+
+ Returns:
+ (dict): A dict containing a list of deleted volume names and
+ the amount of disk space reclaimed in bytes.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ params = {}
+ if filters:
+ params['filters'] = utils.convert_filters(filters)
+ url = self._url('/volumes/prune')
+ return self._result(self._post(url, params=params), True)
+
+ def remove_volume(self, name, force=False):
+ """
+ Remove a volume. Similar to the ``docker volume rm`` command.
+
+ Args:
+ name (str): The volume's name
+ force (bool): Force removal of volumes that were already removed
+ out of band by the volume driver plugin.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If volume failed to remove.
+ """
+ params = {}
+ if force:
+ if utils.version_lt(self._version, '1.25'):
+ raise errors.InvalidVersion(
+ 'force removal was introduced in API 1.25'
+ )
+ params = {'force': force}
+
+ url = self._url('/volumes/{0}', name, params=params)
+ resp = self._delete(url)
+ self._raise_for_status(resp)
diff --git a/contrib/python/docker/docker/auth.py b/contrib/python/docker/docker/auth.py
new file mode 100644
index 0000000000..96a6e3a656
--- /dev/null
+++ b/contrib/python/docker/docker/auth.py
@@ -0,0 +1,378 @@
+import base64
+import json
+import logging
+
+from . import credentials, errors
+from .utils import config
+
+INDEX_NAME = 'docker.io'
+INDEX_URL = f'https://index.{INDEX_NAME}/v1/'
+TOKEN_USERNAME = '<token>'
+
+log = logging.getLogger(__name__)
+
+
+def resolve_repository_name(repo_name):
+ if '://' in repo_name:
+ raise errors.InvalidRepository(
+ f'Repository name cannot contain a scheme ({repo_name})'
+ )
+
+ index_name, remote_name = split_repo_name(repo_name)
+ if index_name[0] == '-' or index_name[-1] == '-':
+ raise errors.InvalidRepository(
+ f'Invalid index name ({index_name}). '
+ 'Cannot begin or end with a hyphen.'
+ )
+ return resolve_index_name(index_name), remote_name
+
+
+def resolve_index_name(index_name):
+ index_name = convert_to_hostname(index_name)
+ if index_name == f"index.{INDEX_NAME}":
+ index_name = INDEX_NAME
+ return index_name
+
+
+def get_config_header(client, registry):
+ log.debug('Looking for auth config')
+ if not client._auth_configs or client._auth_configs.is_empty:
+ log.debug(
+ "No auth config in memory - loading from filesystem"
+ )
+ client._auth_configs = load_config(credstore_env=client.credstore_env)
+ authcfg = resolve_authconfig(
+ client._auth_configs, registry, credstore_env=client.credstore_env
+ )
+ # Do not fail here if no authentication exists for this
+ # specific registry as we can have a readonly pull. Just
+ # put the header if we can.
+ if authcfg:
+ log.debug('Found auth config')
+ # auth_config needs to be a dict in the format used by
+ # auth.py username , password, serveraddress, email
+ return encode_header(authcfg)
+ log.debug('No auth config found')
+ return None
+
+
+def split_repo_name(repo_name):
+ parts = repo_name.split('/', 1)
+ if len(parts) == 1 or (
+ '.' not in parts[0] and ':' not in parts[0] and parts[0] != 'localhost'
+ ):
+ # This is a docker index repo (ex: username/foobar or ubuntu)
+ return INDEX_NAME, repo_name
+ return tuple(parts)
+
+
+def get_credential_store(authconfig, registry):
+ if not isinstance(authconfig, AuthConfig):
+ authconfig = AuthConfig(authconfig)
+ return authconfig.get_credential_store(registry)
+
+
+class AuthConfig(dict):
+ def __init__(self, dct, credstore_env=None):
+ if 'auths' not in dct:
+ dct['auths'] = {}
+ self.update(dct)
+ self._credstore_env = credstore_env
+ self._stores = {}
+
+ @classmethod
+ def parse_auth(cls, entries, raise_on_error=False):
+ """
+ Parses authentication entries
+
+ Args:
+ entries: Dict of authentication entries.
+ raise_on_error: If set to true, an invalid format will raise
+ InvalidConfigFile
+
+ Returns:
+ Authentication registry.
+ """
+
+ conf = {}
+ for registry, entry in entries.items():
+ if not isinstance(entry, dict):
+ log.debug(
+ f'Config entry for key {registry} is not auth config'
+ )
+ # We sometimes fall back to parsing the whole config as if it
+ # was the auth config by itself, for legacy purposes. In that
+ # case, we fail silently and return an empty conf if any of the
+ # keys is not formatted properly.
+ if raise_on_error:
+ raise errors.InvalidConfigFile(
+ f'Invalid configuration for registry {registry}'
+ )
+ return {}
+ if 'identitytoken' in entry:
+ log.debug(f'Found an IdentityToken entry for registry {registry}')
+ conf[registry] = {
+ 'IdentityToken': entry['identitytoken']
+ }
+ continue # Other values are irrelevant if we have a token
+
+ if 'auth' not in entry:
+ # Starting with engine v1.11 (API 1.23), an empty dictionary is
+ # a valid value in the auths config.
+ # https://github.com/docker/compose/issues/3265
+ log.debug(
+ f'Auth data for {registry} is absent. '
+ f'Client might be using a credentials store instead.'
+ )
+ conf[registry] = {}
+ continue
+
+ username, password = decode_auth(entry['auth'])
+ log.debug(
+ f'Found entry (registry={registry!r}, username={username!r})'
+ )
+
+ conf[registry] = {
+ 'username': username,
+ 'password': password,
+ 'email': entry.get('email'),
+ 'serveraddress': registry,
+ }
+ return conf
+
+ @classmethod
+ def load_config(cls, config_path, config_dict, credstore_env=None):
+ """
+ Loads authentication data from a Docker configuration file in the given
+ root directory or if config_path is passed use given path.
+ Lookup priority:
+ explicit config_path parameter > DOCKER_CONFIG environment
+ variable > ~/.docker/config.json > ~/.dockercfg
+ """
+
+ if not config_dict:
+ config_file = config.find_config_file(config_path)
+
+ if not config_file:
+ return cls({}, credstore_env)
+ try:
+ with open(config_file) as f:
+ config_dict = json.load(f)
+ except (OSError, KeyError, ValueError) as e:
+ # Likely missing new Docker config file or it's in an
+ # unknown format, continue to attempt to read old location
+ # and format.
+ log.debug(e)
+ return cls(_load_legacy_config(config_file), credstore_env)
+
+ res = {}
+ if config_dict.get('auths'):
+ log.debug("Found 'auths' section")
+ res.update({
+ 'auths': cls.parse_auth(
+ config_dict.pop('auths'), raise_on_error=True
+ )
+ })
+ if config_dict.get('credsStore'):
+ log.debug("Found 'credsStore' section")
+ res.update({'credsStore': config_dict.pop('credsStore')})
+ if config_dict.get('credHelpers'):
+ log.debug("Found 'credHelpers' section")
+ res.update({'credHelpers': config_dict.pop('credHelpers')})
+ if res:
+ return cls(res, credstore_env)
+
+ log.debug(
+ "Couldn't find auth-related section ; attempting to interpret "
+ "as auth-only file"
+ )
+ return cls({'auths': cls.parse_auth(config_dict)}, credstore_env)
+
+ @property
+ def auths(self):
+ return self.get('auths', {})
+
+ @property
+ def creds_store(self):
+ return self.get('credsStore', None)
+
+ @property
+ def cred_helpers(self):
+ return self.get('credHelpers', {})
+
+ @property
+ def is_empty(self):
+ return (
+ not self.auths and not self.creds_store and not self.cred_helpers
+ )
+
+ def resolve_authconfig(self, registry=None):
+ """
+ Returns the authentication data from the given auth configuration for a
+ specific registry. As with the Docker client, legacy entries in the
+ config with full URLs are stripped down to hostnames before checking
+ for a match. Returns None if no match was found.
+ """
+
+ if self.creds_store or self.cred_helpers:
+ store_name = self.get_credential_store(registry)
+ if store_name is not None:
+ log.debug(
+ f'Using credentials store "{store_name}"'
+ )
+ cfg = self._resolve_authconfig_credstore(registry, store_name)
+ if cfg is not None:
+ return cfg
+ log.debug('No entry in credstore - fetching from auth dict')
+
+ # Default to the public index server
+ registry = resolve_index_name(registry) if registry else INDEX_NAME
+ log.debug(f"Looking for auth entry for {repr(registry)}")
+
+ if registry in self.auths:
+ log.debug(f"Found {repr(registry)}")
+ return self.auths[registry]
+
+ for key, conf in self.auths.items():
+ if resolve_index_name(key) == registry:
+ log.debug(f"Found {repr(key)}")
+ return conf
+
+ log.debug("No entry found")
+ return None
+
+ def _resolve_authconfig_credstore(self, registry, credstore_name):
+ if not registry or registry == INDEX_NAME:
+ # The ecosystem is a little schizophrenic with index.docker.io VS
+ # docker.io - in that case, it seems the full URL is necessary.
+ registry = INDEX_URL
+ log.debug(f"Looking for auth entry for {repr(registry)}")
+ store = self._get_store_instance(credstore_name)
+ try:
+ data = store.get(registry)
+ res = {
+ 'ServerAddress': registry,
+ }
+ if data['Username'] == TOKEN_USERNAME:
+ res['IdentityToken'] = data['Secret']
+ else:
+ res.update({
+ 'Username': data['Username'],
+ 'Password': data['Secret'],
+ })
+ return res
+ except credentials.CredentialsNotFound:
+ log.debug('No entry found')
+ return None
+ except credentials.StoreError as e:
+ raise errors.DockerException(
+ f'Credentials store error: {repr(e)}'
+ ) from e
+
+ def _get_store_instance(self, name):
+ if name not in self._stores:
+ self._stores[name] = credentials.Store(
+ name, environment=self._credstore_env
+ )
+ return self._stores[name]
+
+ def get_credential_store(self, registry):
+ if not registry or registry == INDEX_NAME:
+ registry = INDEX_URL
+
+ return self.cred_helpers.get(registry) or self.creds_store
+
+ def get_all_credentials(self):
+ auth_data = self.auths.copy()
+ if self.creds_store:
+ # Retrieve all credentials from the default store
+ store = self._get_store_instance(self.creds_store)
+ for k in store.list().keys():
+ auth_data[k] = self._resolve_authconfig_credstore(
+ k, self.creds_store
+ )
+ auth_data[convert_to_hostname(k)] = auth_data[k]
+
+ # credHelpers entries take priority over all others
+ for reg, store_name in self.cred_helpers.items():
+ auth_data[reg] = self._resolve_authconfig_credstore(
+ reg, store_name
+ )
+ auth_data[convert_to_hostname(reg)] = auth_data[reg]
+
+ return auth_data
+
+ def add_auth(self, reg, data):
+ self['auths'][reg] = data
+
+
+def resolve_authconfig(authconfig, registry=None, credstore_env=None):
+ if not isinstance(authconfig, AuthConfig):
+ authconfig = AuthConfig(authconfig, credstore_env)
+ return authconfig.resolve_authconfig(registry)
+
+
+def convert_to_hostname(url):
+ return url.replace('http://', '').replace('https://', '').split('/', 1)[0]
+
+
+def decode_auth(auth):
+ if isinstance(auth, str):
+ auth = auth.encode('ascii')
+ s = base64.b64decode(auth)
+ login, pwd = s.split(b':', 1)
+ return login.decode('utf8'), pwd.decode('utf8')
+
+
+def encode_header(auth):
+ auth_json = json.dumps(auth).encode('ascii')
+ return base64.urlsafe_b64encode(auth_json)
+
+
+def parse_auth(entries, raise_on_error=False):
+ """
+ Parses authentication entries
+
+ Args:
+ entries: Dict of authentication entries.
+ raise_on_error: If set to true, an invalid format will raise
+ InvalidConfigFile
+
+ Returns:
+ Authentication registry.
+ """
+
+ return AuthConfig.parse_auth(entries, raise_on_error)
+
+
+def load_config(config_path=None, config_dict=None, credstore_env=None):
+ return AuthConfig.load_config(config_path, config_dict, credstore_env)
+
+
+def _load_legacy_config(config_file):
+ log.debug("Attempting to parse legacy auth file format")
+ try:
+ data = []
+ with open(config_file) as f:
+ for line in f.readlines():
+ data.append(line.strip().split(' = ')[1])
+ if len(data) < 2:
+ # Not enough data
+ raise errors.InvalidConfigFile(
+ 'Invalid or empty configuration file!'
+ )
+
+ username, password = decode_auth(data[0])
+ return {'auths': {
+ INDEX_NAME: {
+ 'username': username,
+ 'password': password,
+ 'email': data[1],
+ 'serveraddress': INDEX_URL,
+ }
+ }}
+ except Exception as e:
+ log.debug(e)
+
+ log.debug("All parsing attempts failed - returning empty config")
+ return {}
diff --git a/contrib/python/docker/docker/client.py b/contrib/python/docker/docker/client.py
new file mode 100644
index 0000000000..9012d24c9c
--- /dev/null
+++ b/contrib/python/docker/docker/client.py
@@ -0,0 +1,222 @@
+from .api.client import APIClient
+from .constants import DEFAULT_MAX_POOL_SIZE, DEFAULT_TIMEOUT_SECONDS
+from .models.configs import ConfigCollection
+from .models.containers import ContainerCollection
+from .models.images import ImageCollection
+from .models.networks import NetworkCollection
+from .models.nodes import NodeCollection
+from .models.plugins import PluginCollection
+from .models.secrets import SecretCollection
+from .models.services import ServiceCollection
+from .models.swarm import Swarm
+from .models.volumes import VolumeCollection
+from .utils import kwargs_from_env
+
+
+class DockerClient:
+ """
+ A client for communicating with a Docker server.
+
+ Example:
+
+ >>> import docker
+ >>> client = docker.DockerClient(base_url='unix://var/run/docker.sock')
+
+ Args:
+ base_url (str): URL to the Docker server. For example,
+ ``unix:///var/run/docker.sock`` or ``tcp://127.0.0.1:1234``.
+ version (str): The version of the API to use. Set to ``auto`` to
+ automatically detect the server's version. Default: ``1.35``
+ timeout (int): Default timeout for API calls, in seconds.
+ tls (bool or :py:class:`~docker.tls.TLSConfig`): Enable TLS. Pass
+ ``True`` to enable it with default options, or pass a
+ :py:class:`~docker.tls.TLSConfig` object to use custom
+ configuration.
+ user_agent (str): Set a custom user agent for requests to the server.
+ credstore_env (dict): Override environment variables when calling the
+ credential store process.
+ use_ssh_client (bool): If set to `True`, an ssh connection is made
+ via shelling out to the ssh client. Ensure the ssh client is
+ installed and configured on the host.
+ max_pool_size (int): The maximum number of connections
+ to save in the pool.
+ """
+ def __init__(self, *args, **kwargs):
+ self.api = APIClient(*args, **kwargs)
+
+ @classmethod
+ def from_env(cls, **kwargs):
+ """
+ Return a client configured from environment variables.
+
+ The environment variables used are the same as those used by the
+ Docker command-line client. They are:
+
+ .. envvar:: DOCKER_HOST
+
+ The URL to the Docker host.
+
+ .. envvar:: DOCKER_TLS_VERIFY
+
+ Verify the host against a CA certificate.
+
+ .. envvar:: DOCKER_CERT_PATH
+
+ A path to a directory containing TLS certificates to use when
+ connecting to the Docker host.
+
+ Args:
+ version (str): The version of the API to use. Set to ``auto`` to
+ automatically detect the server's version. Default: ``auto``
+ timeout (int): Default timeout for API calls, in seconds.
+ max_pool_size (int): The maximum number of connections
+ to save in the pool.
+ environment (dict): The environment to read environment variables
+ from. Default: the value of ``os.environ``
+ credstore_env (dict): Override environment variables when calling
+ the credential store process.
+ use_ssh_client (bool): If set to `True`, an ssh connection is
+ made via shelling out to the ssh client. Ensure the ssh
+ client is installed and configured on the host.
+
+ Example:
+
+ >>> import docker
+ >>> client = docker.from_env()
+
+ .. _`SSL version`:
+ https://docs.python.org/3.5/library/ssl.html#ssl.PROTOCOL_TLSv1
+ """
+ timeout = kwargs.pop('timeout', DEFAULT_TIMEOUT_SECONDS)
+ max_pool_size = kwargs.pop('max_pool_size', DEFAULT_MAX_POOL_SIZE)
+ version = kwargs.pop('version', None)
+ use_ssh_client = kwargs.pop('use_ssh_client', False)
+ return cls(
+ timeout=timeout,
+ max_pool_size=max_pool_size,
+ version=version,
+ use_ssh_client=use_ssh_client,
+ **kwargs_from_env(**kwargs)
+ )
+
+ # Resources
+ @property
+ def configs(self):
+ """
+ An object for managing configs on the server. See the
+ :doc:`configs documentation <configs>` for full details.
+ """
+ return ConfigCollection(client=self)
+
+ @property
+ def containers(self):
+ """
+ An object for managing containers on the server. See the
+ :doc:`containers documentation <containers>` for full details.
+ """
+ return ContainerCollection(client=self)
+
+ @property
+ def images(self):
+ """
+ An object for managing images on the server. See the
+ :doc:`images documentation <images>` for full details.
+ """
+ return ImageCollection(client=self)
+
+ @property
+ def networks(self):
+ """
+ An object for managing networks on the server. See the
+ :doc:`networks documentation <networks>` for full details.
+ """
+ return NetworkCollection(client=self)
+
+ @property
+ def nodes(self):
+ """
+ An object for managing nodes on the server. See the
+ :doc:`nodes documentation <nodes>` for full details.
+ """
+ return NodeCollection(client=self)
+
+ @property
+ def plugins(self):
+ """
+ An object for managing plugins on the server. See the
+ :doc:`plugins documentation <plugins>` for full details.
+ """
+ return PluginCollection(client=self)
+
+ @property
+ def secrets(self):
+ """
+ An object for managing secrets on the server. See the
+ :doc:`secrets documentation <secrets>` for full details.
+ """
+ return SecretCollection(client=self)
+
+ @property
+ def services(self):
+ """
+ An object for managing services on the server. See the
+ :doc:`services documentation <services>` for full details.
+ """
+ return ServiceCollection(client=self)
+
+ @property
+ def swarm(self):
+ """
+ An object for managing a swarm on the server. See the
+ :doc:`swarm documentation <swarm>` for full details.
+ """
+ return Swarm(client=self)
+
+ @property
+ def volumes(self):
+ """
+ An object for managing volumes on the server. See the
+ :doc:`volumes documentation <volumes>` for full details.
+ """
+ return VolumeCollection(client=self)
+
+ # Top-level methods
+ def events(self, *args, **kwargs):
+ return self.api.events(*args, **kwargs)
+ events.__doc__ = APIClient.events.__doc__
+
+ def df(self):
+ return self.api.df()
+ df.__doc__ = APIClient.df.__doc__
+
+ def info(self, *args, **kwargs):
+ return self.api.info(*args, **kwargs)
+ info.__doc__ = APIClient.info.__doc__
+
+ def login(self, *args, **kwargs):
+ return self.api.login(*args, **kwargs)
+ login.__doc__ = APIClient.login.__doc__
+
+ def ping(self, *args, **kwargs):
+ return self.api.ping(*args, **kwargs)
+ ping.__doc__ = APIClient.ping.__doc__
+
+ def version(self, *args, **kwargs):
+ return self.api.version(*args, **kwargs)
+ version.__doc__ = APIClient.version.__doc__
+
+ def close(self):
+ return self.api.close()
+ close.__doc__ = APIClient.close.__doc__
+
+ def __getattr__(self, name):
+ s = [f"'DockerClient' object has no attribute '{name}'"]
+ # If a user calls a method on APIClient, they
+ if hasattr(APIClient, name):
+ s.append("In Docker SDK for Python 2.0, this method is now on the "
+ "object APIClient. See the low-level API section of the "
+ "documentation for more details.")
+ raise AttributeError(' '.join(s))
+
+
+from_env = DockerClient.from_env
diff --git a/contrib/python/docker/docker/constants.py b/contrib/python/docker/docker/constants.py
new file mode 100644
index 0000000000..3c527b47e3
--- /dev/null
+++ b/contrib/python/docker/docker/constants.py
@@ -0,0 +1,45 @@
+import sys
+
+from .version import __version__
+
+DEFAULT_DOCKER_API_VERSION = '1.44'
+MINIMUM_DOCKER_API_VERSION = '1.24'
+DEFAULT_TIMEOUT_SECONDS = 60
+STREAM_HEADER_SIZE_BYTES = 8
+CONTAINER_LIMITS_KEYS = [
+ 'memory', 'memswap', 'cpushares', 'cpusetcpus'
+]
+
+DEFAULT_HTTP_HOST = "127.0.0.1"
+DEFAULT_UNIX_SOCKET = "http+unix:///var/run/docker.sock"
+DEFAULT_NPIPE = 'npipe:////./pipe/docker_engine'
+
+BYTE_UNITS = {
+ 'b': 1,
+ 'k': 1024,
+ 'm': 1024 * 1024,
+ 'g': 1024 * 1024 * 1024
+}
+
+
+INSECURE_REGISTRY_DEPRECATION_WARNING = \
+ 'The `insecure_registry` argument to {} ' \
+ 'is deprecated and non-functional. Please remove it.'
+
+IS_WINDOWS_PLATFORM = (sys.platform == 'win32')
+WINDOWS_LONGPATH_PREFIX = '\\\\?\\'
+
+DEFAULT_USER_AGENT = f"docker-sdk-python/{__version__}"
+DEFAULT_NUM_POOLS = 25
+
+# The OpenSSH server default value for MaxSessions is 10 which means we can
+# use up to 9, leaving the final session for the underlying SSH connection.
+# For more details see: https://github.com/docker/docker-py/issues/2246
+DEFAULT_NUM_POOLS_SSH = 9
+
+DEFAULT_MAX_POOL_SIZE = 10
+
+DEFAULT_DATA_CHUNK_SIZE = 1024 * 2048
+
+DEFAULT_SWARM_ADDR_POOL = ['10.0.0.0/8']
+DEFAULT_SWARM_SUBNET_SIZE = 24
diff --git a/contrib/python/docker/docker/context/__init__.py b/contrib/python/docker/docker/context/__init__.py
new file mode 100644
index 0000000000..46d462b0cf
--- /dev/null
+++ b/contrib/python/docker/docker/context/__init__.py
@@ -0,0 +1,2 @@
+from .api import ContextAPI
+from .context import Context
diff --git a/contrib/python/docker/docker/context/api.py b/contrib/python/docker/docker/context/api.py
new file mode 100644
index 0000000000..9ac4ff470a
--- /dev/null
+++ b/contrib/python/docker/docker/context/api.py
@@ -0,0 +1,206 @@
+import json
+import os
+
+from docker import errors
+
+from .config import (
+ METAFILE,
+ get_current_context_name,
+ get_meta_dir,
+ write_context_name_to_docker_config,
+)
+from .context import Context
+
+
+class ContextAPI:
+ """Context API.
+ Contains methods for context management:
+ create, list, remove, get, inspect.
+ """
+ DEFAULT_CONTEXT = Context("default", "swarm")
+
+ @classmethod
+ def create_context(
+ cls, name, orchestrator=None, host=None, tls_cfg=None,
+ default_namespace=None, skip_tls_verify=False):
+ """Creates a new context.
+ Returns:
+ (Context): a Context object.
+ Raises:
+ :py:class:`docker.errors.MissingContextParameter`
+ If a context name is not provided.
+ :py:class:`docker.errors.ContextAlreadyExists`
+ If a context with the name already exists.
+ :py:class:`docker.errors.ContextException`
+ If name is default.
+
+ Example:
+
+ >>> from docker.context import ContextAPI
+ >>> ctx = ContextAPI.create_context(name='test')
+ >>> print(ctx.Metadata)
+ {
+ "Name": "test",
+ "Metadata": {},
+ "Endpoints": {
+ "docker": {
+ "Host": "unix:///var/run/docker.sock",
+ "SkipTLSVerify": false
+ }
+ }
+ }
+ """
+ if not name:
+ raise errors.MissingContextParameter("name")
+ if name == "default":
+ raise errors.ContextException(
+ '"default" is a reserved context name')
+ ctx = Context.load_context(name)
+ if ctx:
+ raise errors.ContextAlreadyExists(name)
+ endpoint = "docker"
+ if orchestrator and orchestrator != "swarm":
+ endpoint = orchestrator
+ ctx = Context(name, orchestrator)
+ ctx.set_endpoint(
+ endpoint, host, tls_cfg,
+ skip_tls_verify=skip_tls_verify,
+ def_namespace=default_namespace)
+ ctx.save()
+ return ctx
+
+ @classmethod
+ def get_context(cls, name=None):
+ """Retrieves a context object.
+ Args:
+ name (str): The name of the context
+
+ Example:
+
+ >>> from docker.context import ContextAPI
+ >>> ctx = ContextAPI.get_context(name='test')
+ >>> print(ctx.Metadata)
+ {
+ "Name": "test",
+ "Metadata": {},
+ "Endpoints": {
+ "docker": {
+ "Host": "unix:///var/run/docker.sock",
+ "SkipTLSVerify": false
+ }
+ }
+ }
+ """
+ if not name:
+ name = get_current_context_name()
+ if name == "default":
+ return cls.DEFAULT_CONTEXT
+ return Context.load_context(name)
+
+ @classmethod
+ def contexts(cls):
+ """Context list.
+ Returns:
+ (Context): List of context objects.
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ names = []
+ for dirname, dirnames, fnames in os.walk(get_meta_dir()):
+ for filename in fnames + dirnames:
+ if filename == METAFILE:
+ try:
+ data = json.load(
+ open(os.path.join(dirname, filename)))
+ names.append(data["Name"])
+ except Exception as e:
+ raise errors.ContextException(
+ f"Failed to load metafile {filename}: {e}",
+ ) from e
+
+ contexts = [cls.DEFAULT_CONTEXT]
+ for name in names:
+ contexts.append(Context.load_context(name))
+ return contexts
+
+ @classmethod
+ def get_current_context(cls):
+ """Get current context.
+ Returns:
+ (Context): current context object.
+ """
+ return cls.get_context()
+
+ @classmethod
+ def set_current_context(cls, name="default"):
+ ctx = cls.get_context(name)
+ if not ctx:
+ raise errors.ContextNotFound(name)
+
+ err = write_context_name_to_docker_config(name)
+ if err:
+ raise errors.ContextException(
+ f'Failed to set current context: {err}')
+
+ @classmethod
+ def remove_context(cls, name):
+ """Remove a context. Similar to the ``docker context rm`` command.
+
+ Args:
+ name (str): The name of the context
+
+ Raises:
+ :py:class:`docker.errors.MissingContextParameter`
+ If a context name is not provided.
+ :py:class:`docker.errors.ContextNotFound`
+ If a context with the name does not exist.
+ :py:class:`docker.errors.ContextException`
+ If name is default.
+
+ Example:
+
+ >>> from docker.context import ContextAPI
+ >>> ContextAPI.remove_context(name='test')
+ >>>
+ """
+ if not name:
+ raise errors.MissingContextParameter("name")
+ if name == "default":
+ raise errors.ContextException(
+ 'context "default" cannot be removed')
+ ctx = Context.load_context(name)
+ if not ctx:
+ raise errors.ContextNotFound(name)
+ if name == get_current_context_name():
+ write_context_name_to_docker_config(None)
+ ctx.remove()
+
+ @classmethod
+ def inspect_context(cls, name="default"):
+ """Remove a context. Similar to the ``docker context inspect`` command.
+
+ Args:
+ name (str): The name of the context
+
+ Raises:
+ :py:class:`docker.errors.MissingContextParameter`
+ If a context name is not provided.
+ :py:class:`docker.errors.ContextNotFound`
+ If a context with the name does not exist.
+
+ Example:
+
+ >>> from docker.context import ContextAPI
+ >>> ContextAPI.remove_context(name='test')
+ >>>
+ """
+ if not name:
+ raise errors.MissingContextParameter("name")
+ if name == "default":
+ return cls.DEFAULT_CONTEXT()
+ ctx = Context.load_context(name)
+ if not ctx:
+ raise errors.ContextNotFound(name)
+
+ return ctx()
diff --git a/contrib/python/docker/docker/context/config.py b/contrib/python/docker/docker/context/config.py
new file mode 100644
index 0000000000..5a6373aa4e
--- /dev/null
+++ b/contrib/python/docker/docker/context/config.py
@@ -0,0 +1,81 @@
+import hashlib
+import json
+import os
+
+from docker import utils
+from docker.constants import DEFAULT_UNIX_SOCKET, IS_WINDOWS_PLATFORM
+from docker.utils.config import find_config_file
+
+METAFILE = "meta.json"
+
+
+def get_current_context_name():
+ name = "default"
+ docker_cfg_path = find_config_file()
+ if docker_cfg_path:
+ try:
+ with open(docker_cfg_path) as f:
+ name = json.load(f).get("currentContext", "default")
+ except Exception:
+ return "default"
+ return name
+
+
+def write_context_name_to_docker_config(name=None):
+ if name == 'default':
+ name = None
+ docker_cfg_path = find_config_file()
+ config = {}
+ if docker_cfg_path:
+ try:
+ with open(docker_cfg_path) as f:
+ config = json.load(f)
+ except Exception as e:
+ return e
+ current_context = config.get("currentContext", None)
+ if current_context and not name:
+ del config["currentContext"]
+ elif name:
+ config["currentContext"] = name
+ else:
+ return
+ try:
+ with open(docker_cfg_path, "w") as f:
+ json.dump(config, f, indent=4)
+ except Exception as e:
+ return e
+
+
+def get_context_id(name):
+ return hashlib.sha256(name.encode('utf-8')).hexdigest()
+
+
+def get_context_dir():
+ return os.path.join(os.path.dirname(find_config_file() or ""), "contexts")
+
+
+def get_meta_dir(name=None):
+ meta_dir = os.path.join(get_context_dir(), "meta")
+ if name:
+ return os.path.join(meta_dir, get_context_id(name))
+ return meta_dir
+
+
+def get_meta_file(name):
+ return os.path.join(get_meta_dir(name), METAFILE)
+
+
+def get_tls_dir(name=None, endpoint=""):
+ context_dir = get_context_dir()
+ if name:
+ return os.path.join(context_dir, "tls", get_context_id(name), endpoint)
+ return os.path.join(context_dir, "tls")
+
+
+def get_context_host(path=None, tls=False):
+ host = utils.parse_host(path, IS_WINDOWS_PLATFORM, tls)
+ if host == DEFAULT_UNIX_SOCKET:
+ # remove http+ from default docker socket url
+ if host.startswith("http+"):
+ host = host[5:]
+ return host
diff --git a/contrib/python/docker/docker/context/context.py b/contrib/python/docker/docker/context/context.py
new file mode 100644
index 0000000000..da17d94781
--- /dev/null
+++ b/contrib/python/docker/docker/context/context.py
@@ -0,0 +1,249 @@
+import json
+import os
+from shutil import copyfile, rmtree
+
+from docker.errors import ContextException
+from docker.tls import TLSConfig
+
+from .config import (
+ get_context_host,
+ get_meta_dir,
+ get_meta_file,
+ get_tls_dir,
+)
+
+
+class Context:
+ """A context."""
+
+ def __init__(self, name, orchestrator=None, host=None, endpoints=None,
+ tls=False):
+ if not name:
+ raise Exception("Name not provided")
+ self.name = name
+ self.context_type = None
+ self.orchestrator = orchestrator
+ self.endpoints = {}
+ self.tls_cfg = {}
+ self.meta_path = "IN MEMORY"
+ self.tls_path = "IN MEMORY"
+
+ if not endpoints:
+ # set default docker endpoint if no endpoint is set
+ default_endpoint = "docker" if (
+ not orchestrator or orchestrator == "swarm"
+ ) else orchestrator
+
+ self.endpoints = {
+ default_endpoint: {
+ "Host": get_context_host(host, tls),
+ "SkipTLSVerify": not tls
+ }
+ }
+ return
+
+ # check docker endpoints
+ for k, v in endpoints.items():
+ if not isinstance(v, dict):
+ # unknown format
+ raise ContextException(
+ f"Unknown endpoint format for context {name}: {v}",
+ )
+
+ self.endpoints[k] = v
+ if k != "docker":
+ continue
+
+ self.endpoints[k]["Host"] = v.get("Host", get_context_host(
+ host, tls))
+ self.endpoints[k]["SkipTLSVerify"] = bool(v.get(
+ "SkipTLSVerify", not tls))
+
+ def set_endpoint(
+ self, name="docker", host=None, tls_cfg=None,
+ skip_tls_verify=False, def_namespace=None):
+ self.endpoints[name] = {
+ "Host": get_context_host(host, not skip_tls_verify),
+ "SkipTLSVerify": skip_tls_verify
+ }
+ if def_namespace:
+ self.endpoints[name]["DefaultNamespace"] = def_namespace
+
+ if tls_cfg:
+ self.tls_cfg[name] = tls_cfg
+
+ def inspect(self):
+ return self.__call__()
+
+ @classmethod
+ def load_context(cls, name):
+ meta = Context._load_meta(name)
+ if meta:
+ instance = cls(
+ meta["Name"],
+ orchestrator=meta["Metadata"].get("StackOrchestrator", None),
+ endpoints=meta.get("Endpoints", None))
+ instance.context_type = meta["Metadata"].get("Type", None)
+ instance._load_certs()
+ instance.meta_path = get_meta_dir(name)
+ return instance
+ return None
+
+ @classmethod
+ def _load_meta(cls, name):
+ meta_file = get_meta_file(name)
+ if not os.path.isfile(meta_file):
+ return None
+
+ metadata = {}
+ try:
+ with open(meta_file) as f:
+ metadata = json.load(f)
+ except (OSError, KeyError, ValueError) as e:
+ # unknown format
+ raise Exception(
+ f"Detected corrupted meta file for context {name} : {e}"
+ ) from e
+
+ # for docker endpoints, set defaults for
+ # Host and SkipTLSVerify fields
+ for k, v in metadata["Endpoints"].items():
+ if k != "docker":
+ continue
+ metadata["Endpoints"][k]["Host"] = v.get(
+ "Host", get_context_host(None, False))
+ metadata["Endpoints"][k]["SkipTLSVerify"] = bool(
+ v.get("SkipTLSVerify", True))
+
+ return metadata
+
+ def _load_certs(self):
+ certs = {}
+ tls_dir = get_tls_dir(self.name)
+ for endpoint in self.endpoints.keys():
+ if not os.path.isdir(os.path.join(tls_dir, endpoint)):
+ continue
+ ca_cert = None
+ cert = None
+ key = None
+ for filename in os.listdir(os.path.join(tls_dir, endpoint)):
+ if filename.startswith("ca"):
+ ca_cert = os.path.join(tls_dir, endpoint, filename)
+ elif filename.startswith("cert"):
+ cert = os.path.join(tls_dir, endpoint, filename)
+ elif filename.startswith("key"):
+ key = os.path.join(tls_dir, endpoint, filename)
+ if all([ca_cert, cert, key]):
+ verify = None
+ if endpoint == "docker" and not self.endpoints["docker"].get(
+ "SkipTLSVerify", False):
+ verify = True
+ certs[endpoint] = TLSConfig(
+ client_cert=(cert, key), ca_cert=ca_cert, verify=verify)
+ self.tls_cfg = certs
+ self.tls_path = tls_dir
+
+ def save(self):
+ meta_dir = get_meta_dir(self.name)
+ if not os.path.isdir(meta_dir):
+ os.makedirs(meta_dir)
+ with open(get_meta_file(self.name), "w") as f:
+ f.write(json.dumps(self.Metadata))
+
+ tls_dir = get_tls_dir(self.name)
+ for endpoint, tls in self.tls_cfg.items():
+ if not os.path.isdir(os.path.join(tls_dir, endpoint)):
+ os.makedirs(os.path.join(tls_dir, endpoint))
+
+ ca_file = tls.ca_cert
+ if ca_file:
+ copyfile(ca_file, os.path.join(
+ tls_dir, endpoint, os.path.basename(ca_file)))
+
+ if tls.cert:
+ cert_file, key_file = tls.cert
+ copyfile(cert_file, os.path.join(
+ tls_dir, endpoint, os.path.basename(cert_file)))
+ copyfile(key_file, os.path.join(
+ tls_dir, endpoint, os.path.basename(key_file)))
+
+ self.meta_path = get_meta_dir(self.name)
+ self.tls_path = get_tls_dir(self.name)
+
+ def remove(self):
+ if os.path.isdir(self.meta_path):
+ rmtree(self.meta_path)
+ if os.path.isdir(self.tls_path):
+ rmtree(self.tls_path)
+
+ def __repr__(self):
+ return f"<{self.__class__.__name__}: '{self.name}'>"
+
+ def __str__(self):
+ return json.dumps(self.__call__(), indent=2)
+
+ def __call__(self):
+ result = self.Metadata
+ result.update(self.TLSMaterial)
+ result.update(self.Storage)
+ return result
+
+ def is_docker_host(self):
+ return self.context_type is None
+
+ @property
+ def Name(self):
+ return self.name
+
+ @property
+ def Host(self):
+ if not self.orchestrator or self.orchestrator == "swarm":
+ endpoint = self.endpoints.get("docker", None)
+ if endpoint:
+ return endpoint.get("Host", None)
+ return None
+
+ return self.endpoints[self.orchestrator].get("Host", None)
+
+ @property
+ def Orchestrator(self):
+ return self.orchestrator
+
+ @property
+ def Metadata(self):
+ meta = {}
+ if self.orchestrator:
+ meta = {"StackOrchestrator": self.orchestrator}
+ return {
+ "Name": self.name,
+ "Metadata": meta,
+ "Endpoints": self.endpoints
+ }
+
+ @property
+ def TLSConfig(self):
+ key = self.orchestrator
+ if not key or key == "swarm":
+ key = "docker"
+ if key in self.tls_cfg.keys():
+ return self.tls_cfg[key]
+ return None
+
+ @property
+ def TLSMaterial(self):
+ certs = {}
+ for endpoint, tls in self.tls_cfg.items():
+ cert, key = tls.cert
+ certs[endpoint] = list(
+ map(os.path.basename, [tls.ca_cert, cert, key]))
+ return {
+ "TLSMaterial": certs
+ }
+
+ @property
+ def Storage(self):
+ return {
+ "Storage": {
+ "MetadataPath": self.meta_path,
+ "TLSPath": self.tls_path
+ }}
diff --git a/contrib/python/docker/docker/credentials/__init__.py b/contrib/python/docker/docker/credentials/__init__.py
new file mode 100644
index 0000000000..80d19e7986
--- /dev/null
+++ b/contrib/python/docker/docker/credentials/__init__.py
@@ -0,0 +1,8 @@
+from .constants import (
+ DEFAULT_LINUX_STORE,
+ DEFAULT_OSX_STORE,
+ DEFAULT_WIN32_STORE,
+ PROGRAM_PREFIX,
+)
+from .errors import CredentialsNotFound, StoreError
+from .store import Store
diff --git a/contrib/python/docker/docker/credentials/constants.py b/contrib/python/docker/docker/credentials/constants.py
new file mode 100644
index 0000000000..6a82d8da42
--- /dev/null
+++ b/contrib/python/docker/docker/credentials/constants.py
@@ -0,0 +1,4 @@
+PROGRAM_PREFIX = 'docker-credential-'
+DEFAULT_LINUX_STORE = 'secretservice'
+DEFAULT_OSX_STORE = 'osxkeychain'
+DEFAULT_WIN32_STORE = 'wincred'
diff --git a/contrib/python/docker/docker/credentials/errors.py b/contrib/python/docker/docker/credentials/errors.py
new file mode 100644
index 0000000000..d059fd9fbb
--- /dev/null
+++ b/contrib/python/docker/docker/credentials/errors.py
@@ -0,0 +1,17 @@
+class StoreError(RuntimeError):
+ pass
+
+
+class CredentialsNotFound(StoreError):
+ pass
+
+
+class InitializationError(StoreError):
+ pass
+
+
+def process_store_error(cpe, program):
+ message = cpe.output.decode('utf-8')
+ if 'credentials not found in native keychain' in message:
+ return CredentialsNotFound(f'No matching credentials in {program}')
+ return StoreError(f'Credentials store {program} exited with "{message}".')
diff --git a/contrib/python/docker/docker/credentials/store.py b/contrib/python/docker/docker/credentials/store.py
new file mode 100644
index 0000000000..00d693a4be
--- /dev/null
+++ b/contrib/python/docker/docker/credentials/store.py
@@ -0,0 +1,93 @@
+import errno
+import json
+import shutil
+import subprocess
+import warnings
+
+from . import constants, errors
+from .utils import create_environment_dict
+
+
+class Store:
+ def __init__(self, program, environment=None):
+ """ Create a store object that acts as an interface to
+ perform the basic operations for storing, retrieving
+ and erasing credentials using `program`.
+ """
+ self.program = constants.PROGRAM_PREFIX + program
+ self.exe = shutil.which(self.program)
+ self.environment = environment
+ if self.exe is None:
+ warnings.warn(
+ f'{self.program} not installed or not available in PATH',
+ stacklevel=1,
+ )
+
+ def get(self, server):
+ """ Retrieve credentials for `server`. If no credentials are found,
+ a `StoreError` will be raised.
+ """
+ if not isinstance(server, bytes):
+ server = server.encode('utf-8')
+ data = self._execute('get', server)
+ result = json.loads(data.decode('utf-8'))
+
+ # docker-credential-pass will return an object for inexistent servers
+ # whereas other helpers will exit with returncode != 0. For
+ # consistency, if no significant data is returned,
+ # raise CredentialsNotFound
+ if result['Username'] == '' and result['Secret'] == '':
+ raise errors.CredentialsNotFound(
+ f'No matching credentials in {self.program}'
+ )
+
+ return result
+
+ def store(self, server, username, secret):
+ """ Store credentials for `server`. Raises a `StoreError` if an error
+ occurs.
+ """
+ data_input = json.dumps({
+ 'ServerURL': server,
+ 'Username': username,
+ 'Secret': secret
+ }).encode('utf-8')
+ return self._execute('store', data_input)
+
+ def erase(self, server):
+ """ Erase credentials for `server`. Raises a `StoreError` if an error
+ occurs.
+ """
+ if not isinstance(server, bytes):
+ server = server.encode('utf-8')
+ self._execute('erase', server)
+
+ def list(self):
+ """ List stored credentials. Requires v0.4.0+ of the helper.
+ """
+ data = self._execute('list', None)
+ return json.loads(data.decode('utf-8'))
+
+ def _execute(self, subcmd, data_input):
+ if self.exe is None:
+ raise errors.StoreError(
+ f'{self.program} not installed or not available in PATH'
+ )
+ output = None
+ env = create_environment_dict(self.environment)
+ try:
+ output = subprocess.check_output(
+ [self.exe, subcmd], input=data_input, env=env,
+ )
+ except subprocess.CalledProcessError as e:
+ raise errors.process_store_error(e, self.program) from e
+ except OSError as e:
+ if e.errno == errno.ENOENT:
+ raise errors.StoreError(
+ f'{self.program} not installed or not available in PATH'
+ ) from e
+ else:
+ raise errors.StoreError(
+ f'Unexpected OS error "{e.strerror}", errno={e.errno}'
+ ) from e
+ return output
diff --git a/contrib/python/docker/docker/credentials/utils.py b/contrib/python/docker/docker/credentials/utils.py
new file mode 100644
index 0000000000..5c83d05cfb
--- /dev/null
+++ b/contrib/python/docker/docker/credentials/utils.py
@@ -0,0 +1,10 @@
+import os
+
+
+def create_environment_dict(overrides):
+ """
+ Create and return a copy of os.environ with the specified overrides
+ """
+ result = os.environ.copy()
+ result.update(overrides or {})
+ return result
diff --git a/contrib/python/docker/docker/errors.py b/contrib/python/docker/docker/errors.py
new file mode 100644
index 0000000000..d03e10f693
--- /dev/null
+++ b/contrib/python/docker/docker/errors.py
@@ -0,0 +1,209 @@
+import requests
+
+_image_not_found_explanation_fragments = frozenset(
+ fragment.lower() for fragment in [
+ 'no such image',
+ 'not found: does not exist or no pull access',
+ 'repository does not exist',
+ 'was found but does not match the specified platform',
+ ]
+)
+
+
+class DockerException(Exception):
+ """
+ A base class from which all other exceptions inherit.
+
+ If you want to catch all errors that the Docker SDK might raise,
+ catch this base exception.
+ """
+
+
+def create_api_error_from_http_exception(e):
+ """
+ Create a suitable APIError from requests.exceptions.HTTPError.
+ """
+ response = e.response
+ try:
+ explanation = response.json()['message']
+ except ValueError:
+ explanation = (response.text or '').strip()
+ cls = APIError
+ if response.status_code == 404:
+ explanation_msg = (explanation or '').lower()
+ if any(fragment in explanation_msg
+ for fragment in _image_not_found_explanation_fragments):
+ cls = ImageNotFound
+ else:
+ cls = NotFound
+ raise cls(e, response=response, explanation=explanation) from e
+
+
+class APIError(requests.exceptions.HTTPError, DockerException):
+ """
+ An HTTP error from the API.
+ """
+ def __init__(self, message, response=None, explanation=None):
+ # requests 1.2 supports response as a keyword argument, but
+ # requests 1.1 doesn't
+ super().__init__(message)
+ self.response = response
+ self.explanation = explanation
+
+ def __str__(self):
+ message = super().__str__()
+
+ if self.is_client_error():
+ message = (
+ f'{self.response.status_code} Client Error for '
+ f'{self.response.url}: {self.response.reason}'
+ )
+
+ elif self.is_server_error():
+ message = (
+ f'{self.response.status_code} Server Error for '
+ f'{self.response.url}: {self.response.reason}'
+ )
+
+ if self.explanation:
+ message = f'{message} ("{self.explanation}")'
+
+ return message
+
+ @property
+ def status_code(self):
+ if self.response is not None:
+ return self.response.status_code
+
+ def is_error(self):
+ return self.is_client_error() or self.is_server_error()
+
+ def is_client_error(self):
+ if self.status_code is None:
+ return False
+ return 400 <= self.status_code < 500
+
+ def is_server_error(self):
+ if self.status_code is None:
+ return False
+ return 500 <= self.status_code < 600
+
+
+class NotFound(APIError):
+ pass
+
+
+class ImageNotFound(NotFound):
+ pass
+
+
+class InvalidVersion(DockerException):
+ pass
+
+
+class InvalidRepository(DockerException):
+ pass
+
+
+class InvalidConfigFile(DockerException):
+ pass
+
+
+class InvalidArgument(DockerException):
+ pass
+
+
+class DeprecatedMethod(DockerException):
+ pass
+
+
+class TLSParameterError(DockerException):
+ def __init__(self, msg):
+ self.msg = msg
+
+ def __str__(self):
+ return self.msg + (". TLS configurations should map the Docker CLI "
+ "client configurations. See "
+ "https://docs.docker.com/engine/articles/https/ "
+ "for API details.")
+
+
+class NullResource(DockerException, ValueError):
+ pass
+
+
+class ContainerError(DockerException):
+ """
+ Represents a container that has exited with a non-zero exit code.
+ """
+ def __init__(self, container, exit_status, command, image, stderr):
+ self.container = container
+ self.exit_status = exit_status
+ self.command = command
+ self.image = image
+ self.stderr = stderr
+
+ err = f": {stderr}" if stderr is not None else ""
+ super().__init__(
+ f"Command '{command}' in image '{image}' "
+ f"returned non-zero exit status {exit_status}{err}"
+ )
+
+
+class StreamParseError(RuntimeError):
+ def __init__(self, reason):
+ self.msg = reason
+
+
+class BuildError(DockerException):
+ def __init__(self, reason, build_log):
+ super().__init__(reason)
+ self.msg = reason
+ self.build_log = build_log
+
+
+class ImageLoadError(DockerException):
+ pass
+
+
+def create_unexpected_kwargs_error(name, kwargs):
+ quoted_kwargs = [f"'{k}'" for k in sorted(kwargs)]
+ text = [f"{name}() "]
+ if len(quoted_kwargs) == 1:
+ text.append("got an unexpected keyword argument ")
+ else:
+ text.append("got unexpected keyword arguments ")
+ text.append(', '.join(quoted_kwargs))
+ return TypeError(''.join(text))
+
+
+class MissingContextParameter(DockerException):
+ def __init__(self, param):
+ self.param = param
+
+ def __str__(self):
+ return (f"missing parameter: {self.param}")
+
+
+class ContextAlreadyExists(DockerException):
+ def __init__(self, name):
+ self.name = name
+
+ def __str__(self):
+ return (f"context {self.name} already exists")
+
+
+class ContextException(DockerException):
+ def __init__(self, msg):
+ self.msg = msg
+
+ def __str__(self):
+ return (self.msg)
+
+
+class ContextNotFound(DockerException):
+ def __init__(self, name):
+ self.name = name
+
+ def __str__(self):
+ return (f"context '{self.name}' not found")
diff --git a/contrib/python/docker/docker/models/__init__.py b/contrib/python/docker/docker/models/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/contrib/python/docker/docker/models/__init__.py
diff --git a/contrib/python/docker/docker/models/configs.py b/contrib/python/docker/docker/models/configs.py
new file mode 100644
index 0000000000..4eba87f4e3
--- /dev/null
+++ b/contrib/python/docker/docker/models/configs.py
@@ -0,0 +1,70 @@
+from ..api import APIClient
+from .resource import Collection, Model
+
+
+class Config(Model):
+ """A config."""
+ id_attribute = 'ID'
+
+ def __repr__(self):
+ return f"<{self.__class__.__name__}: '{self.name}'>"
+
+ @property
+ def name(self):
+ return self.attrs['Spec']['Name']
+
+ def remove(self):
+ """
+ Remove this config.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If config failed to remove.
+ """
+ return self.client.api.remove_config(self.id)
+
+
+class ConfigCollection(Collection):
+ """Configs on the Docker server."""
+ model = Config
+
+ def create(self, **kwargs):
+ obj = self.client.api.create_config(**kwargs)
+ obj.setdefault("Spec", {})["Name"] = kwargs.get("name")
+ return self.prepare_model(obj)
+ create.__doc__ = APIClient.create_config.__doc__
+
+ def get(self, config_id):
+ """
+ Get a config.
+
+ Args:
+ config_id (str): Config ID.
+
+ Returns:
+ (:py:class:`Config`): The config.
+
+ Raises:
+ :py:class:`docker.errors.NotFound`
+ If the config does not exist.
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.prepare_model(self.client.api.inspect_config(config_id))
+
+ def list(self, **kwargs):
+ """
+ List configs. Similar to the ``docker config ls`` command.
+
+ Args:
+ filters (dict): Server-side list filtering options.
+
+ Returns:
+ (list of :py:class:`Config`): The configs.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ resp = self.client.api.configs(**kwargs)
+ return [self.prepare_model(obj) for obj in resp]
diff --git a/contrib/python/docker/docker/models/containers.py b/contrib/python/docker/docker/models/containers.py
new file mode 100644
index 0000000000..4795523a15
--- /dev/null
+++ b/contrib/python/docker/docker/models/containers.py
@@ -0,0 +1,1197 @@
+import copy
+import ntpath
+from collections import namedtuple
+
+from ..api import APIClient
+from ..constants import DEFAULT_DATA_CHUNK_SIZE
+from ..errors import (
+ ContainerError,
+ DockerException,
+ ImageNotFound,
+ NotFound,
+ create_unexpected_kwargs_error,
+)
+from ..types import HostConfig, NetworkingConfig
+from ..utils import version_gte
+from .images import Image
+from .resource import Collection, Model
+
+
+class Container(Model):
+ """ Local representation of a container object. Detailed configuration may
+ be accessed through the :py:attr:`attrs` attribute. Note that local
+ attributes are cached; users may call :py:meth:`reload` to
+ query the Docker daemon for the current properties, causing
+ :py:attr:`attrs` to be refreshed.
+ """
+
+ @property
+ def name(self):
+ """
+ The name of the container.
+ """
+ if self.attrs.get('Name') is not None:
+ return self.attrs['Name'].lstrip('/')
+
+ @property
+ def image(self):
+ """
+ The image of the container.
+ """
+ image_id = self.attrs.get('ImageID', self.attrs['Image'])
+ if image_id is None:
+ return None
+ return self.client.images.get(image_id.split(':')[1])
+
+ @property
+ def labels(self):
+ """
+ The labels of a container as dictionary.
+ """
+ try:
+ result = self.attrs['Config'].get('Labels')
+ return result or {}
+ except KeyError as ke:
+ raise DockerException(
+ 'Label data is not available for sparse objects. Call reload()'
+ ' to retrieve all information'
+ ) from ke
+
+ @property
+ def status(self):
+ """
+ The status of the container. For example, ``running``, or ``exited``.
+ """
+ if isinstance(self.attrs['State'], dict):
+ return self.attrs['State']['Status']
+ return self.attrs['State']
+
+ @property
+ def health(self):
+ """
+ The healthcheck status of the container.
+
+ For example, ``healthy`, or ``unhealthy`.
+ """
+ return self.attrs.get('State', {}).get('Health', {}).get('Status', 'unknown')
+
+ @property
+ def ports(self):
+ """
+ The ports that the container exposes as a dictionary.
+ """
+ return self.attrs.get('NetworkSettings', {}).get('Ports', {})
+
+ def attach(self, **kwargs):
+ """
+ Attach to this container.
+
+ :py:meth:`logs` is a wrapper around this method, which you can
+ use instead if you want to fetch/stream container output without first
+ retrieving the entire backlog.
+
+ Args:
+ stdout (bool): Include stdout.
+ stderr (bool): Include stderr.
+ stream (bool): Return container output progressively as an iterator
+ of strings, rather than a single string.
+ logs (bool): Include the container's previous output.
+
+ Returns:
+ By default, the container's output as a single string.
+
+ If ``stream=True``, an iterator of output strings.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.attach(self.id, **kwargs)
+
+ def attach_socket(self, **kwargs):
+ """
+ Like :py:meth:`attach`, but returns the underlying socket-like object
+ for the HTTP request.
+
+ Args:
+ params (dict): Dictionary of request parameters (e.g. ``stdout``,
+ ``stderr``, ``stream``).
+ ws (bool): Use websockets instead of raw HTTP.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.attach_socket(self.id, **kwargs)
+
+ def commit(self, repository=None, tag=None, **kwargs):
+ """
+ Commit a container to an image. Similar to the ``docker commit``
+ command.
+
+ Args:
+ repository (str): The repository to push the image to
+ tag (str): The tag to push
+ message (str): A commit message
+ author (str): The name of the author
+ pause (bool): Whether to pause the container before committing
+ changes (str): Dockerfile instructions to apply while committing
+ conf (dict): The configuration for the container. See the
+ `Engine API documentation
+ <https://docs.docker.com/reference/api/docker_remote_api/>`_
+ for full details.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+
+ resp = self.client.api.commit(self.id, repository=repository, tag=tag,
+ **kwargs)
+ return self.client.images.get(resp['Id'])
+
+ def diff(self):
+ """
+ Inspect changes on a container's filesystem.
+
+ Returns:
+ (list) A list of dictionaries containing the attributes `Path`
+ and `Kind`.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.diff(self.id)
+
+ def exec_run(self, cmd, stdout=True, stderr=True, stdin=False, tty=False,
+ privileged=False, user='', detach=False, stream=False,
+ socket=False, environment=None, workdir=None, demux=False):
+ """
+ Run a command inside this container. Similar to
+ ``docker exec``.
+
+ Args:
+ cmd (str or list): Command to be executed
+ stdout (bool): Attach to stdout. Default: ``True``
+ stderr (bool): Attach to stderr. Default: ``True``
+ stdin (bool): Attach to stdin. Default: ``False``
+ tty (bool): Allocate a pseudo-TTY. Default: False
+ privileged (bool): Run as privileged.
+ user (str): User to execute command as. Default: root
+ detach (bool): If true, detach from the exec command.
+ Default: False
+ stream (bool): Stream response data. Default: False
+ socket (bool): Return the connection socket to allow custom
+ read/write operations. Default: False
+ environment (dict or list): A dictionary or a list of strings in
+ the following format ``["PASSWORD=xxx"]`` or
+ ``{"PASSWORD": "xxx"}``.
+ workdir (str): Path to working directory for this exec session
+ demux (bool): Return stdout and stderr separately
+
+ Returns:
+ (ExecResult): A tuple of (exit_code, output)
+ exit_code: (int):
+ Exit code for the executed command or ``None`` if
+ either ``stream`` or ``socket`` is ``True``.
+ output: (generator, bytes, or tuple):
+ If ``stream=True``, a generator yielding response chunks.
+ If ``socket=True``, a socket object for the connection.
+ If ``demux=True``, a tuple of two bytes: stdout and stderr.
+ A bytestring containing response data otherwise.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ resp = self.client.api.exec_create(
+ self.id, cmd, stdout=stdout, stderr=stderr, stdin=stdin, tty=tty,
+ privileged=privileged, user=user, environment=environment,
+ workdir=workdir,
+ )
+ exec_output = self.client.api.exec_start(
+ resp['Id'], detach=detach, tty=tty, stream=stream, socket=socket,
+ demux=demux
+ )
+ if socket or stream:
+ return ExecResult(None, exec_output)
+
+ return ExecResult(
+ self.client.api.exec_inspect(resp['Id'])['ExitCode'],
+ exec_output
+ )
+
+ def export(self, chunk_size=DEFAULT_DATA_CHUNK_SIZE):
+ """
+ Export the contents of the container's filesystem as a tar archive.
+
+ Args:
+ chunk_size (int): The number of bytes returned by each iteration
+ of the generator. If ``None``, data will be streamed as it is
+ received. Default: 2 MB
+
+ Returns:
+ (str): The filesystem tar archive
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.export(self.id, chunk_size)
+
+ def get_archive(self, path, chunk_size=DEFAULT_DATA_CHUNK_SIZE,
+ encode_stream=False):
+ """
+ Retrieve a file or folder from the container in the form of a tar
+ archive.
+
+ Args:
+ path (str): Path to the file or folder to retrieve
+ chunk_size (int): The number of bytes returned by each iteration
+ of the generator. If ``None``, data will be streamed as it is
+ received. Default: 2 MB
+ encode_stream (bool): Determines if data should be encoded
+ (gzip-compressed) during transmission. Default: False
+
+ Returns:
+ (tuple): First element is a raw tar data stream. Second element is
+ a dict containing ``stat`` information on the specified ``path``.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Example:
+
+ >>> f = open('./sh_bin.tar', 'wb')
+ >>> bits, stat = container.get_archive('/bin/sh')
+ >>> print(stat)
+ {'name': 'sh', 'size': 1075464, 'mode': 493,
+ 'mtime': '2018-10-01T15:37:48-07:00', 'linkTarget': ''}
+ >>> for chunk in bits:
+ ... f.write(chunk)
+ >>> f.close()
+ """
+ return self.client.api.get_archive(self.id, path,
+ chunk_size, encode_stream)
+
+ def kill(self, signal=None):
+ """
+ Kill or send a signal to the container.
+
+ Args:
+ signal (str or int): The signal to send. Defaults to ``SIGKILL``
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+
+ return self.client.api.kill(self.id, signal=signal)
+
+ def logs(self, **kwargs):
+ """
+ Get logs from this container. Similar to the ``docker logs`` command.
+
+ The ``stream`` parameter makes the ``logs`` function return a blocking
+ generator you can iterate over to retrieve log output as it happens.
+
+ Args:
+ stdout (bool): Get ``STDOUT``. Default ``True``
+ stderr (bool): Get ``STDERR``. Default ``True``
+ stream (bool): Stream the response. Default ``False``
+ timestamps (bool): Show timestamps. Default ``False``
+ tail (str or int): Output specified number of lines at the end of
+ logs. Either an integer of number of lines or the string
+ ``all``. Default ``all``
+ since (datetime, int, or float): Show logs since a given datetime,
+ integer epoch (in seconds) or float (in nanoseconds)
+ follow (bool): Follow log output. Default ``False``
+ until (datetime, int, or float): Show logs that occurred before
+ the given datetime, integer epoch (in seconds), or
+ float (in nanoseconds)
+
+ Returns:
+ (generator of bytes or bytes): Logs from the container.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.logs(self.id, **kwargs)
+
+ def pause(self):
+ """
+ Pauses all processes within this container.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.pause(self.id)
+
+ def put_archive(self, path, data):
+ """
+ Insert a file or folder in this container using a tar archive as
+ source.
+
+ Args:
+ path (str): Path inside the container where the file(s) will be
+ extracted. Must exist.
+ data (bytes or stream): tar data to be extracted
+
+ Returns:
+ (bool): True if the call succeeds.
+
+ Raises:
+ :py:class:`~docker.errors.APIError` If an error occurs.
+ """
+ return self.client.api.put_archive(self.id, path, data)
+
+ def remove(self, **kwargs):
+ """
+ Remove this container. Similar to the ``docker rm`` command.
+
+ Args:
+ v (bool): Remove the volumes associated with the container
+ link (bool): Remove the specified link and not the underlying
+ container
+ force (bool): Force the removal of a running container (uses
+ ``SIGKILL``)
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.remove_container(self.id, **kwargs)
+
+ def rename(self, name):
+ """
+ Rename this container. Similar to the ``docker rename`` command.
+
+ Args:
+ name (str): New name for the container
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.rename(self.id, name)
+
+ def resize(self, height, width):
+ """
+ Resize the tty session.
+
+ Args:
+ height (int): Height of tty session
+ width (int): Width of tty session
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.resize(self.id, height, width)
+
+ def restart(self, **kwargs):
+ """
+ Restart this container. Similar to the ``docker restart`` command.
+
+ Args:
+ timeout (int): Number of seconds to try to stop for before killing
+ the container. Once killed it will then be restarted. Default
+ is 10 seconds.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.restart(self.id, **kwargs)
+
+ def start(self, **kwargs):
+ """
+ Start this container. Similar to the ``docker start`` command, but
+ doesn't support attach options.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.start(self.id, **kwargs)
+
+ def stats(self, **kwargs):
+ """
+ Stream statistics for this container. Similar to the
+ ``docker stats`` command.
+
+ Args:
+ decode (bool): If set to true, stream will be decoded into dicts
+ on the fly. Only applicable if ``stream`` is True.
+ False by default.
+ stream (bool): If set to false, only the current stats will be
+ returned instead of a stream. True by default.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.stats(self.id, **kwargs)
+
+ def stop(self, **kwargs):
+ """
+ Stops a container. Similar to the ``docker stop`` command.
+
+ Args:
+ timeout (int): Timeout in seconds to wait for the container to
+ stop before sending a ``SIGKILL``. Default: 10
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.stop(self.id, **kwargs)
+
+ def top(self, **kwargs):
+ """
+ Display the running processes of the container.
+
+ Args:
+ ps_args (str): An optional arguments passed to ps (e.g. ``aux``)
+
+ Returns:
+ (str): The output of the top
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.top(self.id, **kwargs)
+
+ def unpause(self):
+ """
+ Unpause all processes within the container.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.unpause(self.id)
+
+ def update(self, **kwargs):
+ """
+ Update resource configuration of the containers.
+
+ Args:
+ blkio_weight (int): Block IO (relative weight), between 10 and 1000
+ cpu_period (int): Limit CPU CFS (Completely Fair Scheduler) period
+ cpu_quota (int): Limit CPU CFS (Completely Fair Scheduler) quota
+ cpu_shares (int): CPU shares (relative weight)
+ cpuset_cpus (str): CPUs in which to allow execution
+ cpuset_mems (str): MEMs in which to allow execution
+ mem_limit (int or str): Memory limit
+ mem_reservation (int or str): Memory soft limit
+ memswap_limit (int or str): Total memory (memory + swap), -1 to
+ disable swap
+ kernel_memory (int or str): Kernel memory limit
+ restart_policy (dict): Restart policy dictionary
+
+ Returns:
+ (dict): Dictionary containing a ``Warnings`` key.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.update_container(self.id, **kwargs)
+
+ def wait(self, **kwargs):
+ """
+ Block until the container stops, then return its exit code. Similar to
+ the ``docker wait`` command.
+
+ Args:
+ timeout (int): Request timeout
+ condition (str): Wait until a container state reaches the given
+ condition, either ``not-running`` (default), ``next-exit``,
+ or ``removed``
+
+ Returns:
+ (dict): The API's response as a Python dictionary, including
+ the container's exit code under the ``StatusCode`` attribute.
+
+ Raises:
+ :py:class:`requests.exceptions.ReadTimeout`
+ If the timeout is exceeded.
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.wait(self.id, **kwargs)
+
+
+class ContainerCollection(Collection):
+ model = Container
+
+ def run(self, image, command=None, stdout=True, stderr=False,
+ remove=False, **kwargs):
+ """
+ Run a container. By default, it will wait for the container to finish
+ and return its logs, similar to ``docker run``.
+
+ If the ``detach`` argument is ``True``, it will start the container
+ and immediately return a :py:class:`Container` object, similar to
+ ``docker run -d``.
+
+ Example:
+ Run a container and get its output:
+
+ >>> import docker
+ >>> client = docker.from_env()
+ >>> client.containers.run('alpine', 'echo hello world')
+ b'hello world\\n'
+
+ Run a container and detach:
+
+ >>> container = client.containers.run('bfirsh/reticulate-splines',
+ detach=True)
+ >>> container.logs()
+ 'Reticulating spline 1...\\nReticulating spline 2...\\n'
+
+ Args:
+ image (str): The image to run.
+ command (str or list): The command to run in the container.
+ auto_remove (bool): enable auto-removal of the container on daemon
+ side when the container's process exits.
+ blkio_weight_device: Block IO weight (relative device weight) in
+ the form of: ``[{"Path": "device_path", "Weight": weight}]``.
+ blkio_weight: Block IO weight (relative weight), accepts a weight
+ value between 10 and 1000.
+ cap_add (list of str): Add kernel capabilities. For example,
+ ``["SYS_ADMIN", "MKNOD"]``.
+ cap_drop (list of str): Drop kernel capabilities.
+ cgroup_parent (str): Override the default parent cgroup.
+ cgroupns (str): Override the default cgroup namespace mode for the
+ container. One of:
+ - ``private`` the container runs in its own private cgroup
+ namespace.
+ - ``host`` use the host system's cgroup namespace.
+ cpu_count (int): Number of usable CPUs (Windows only).
+ cpu_percent (int): Usable percentage of the available CPUs
+ (Windows only).
+ cpu_period (int): The length of a CPU period in microseconds.
+ cpu_quota (int): Microseconds of CPU time that the container can
+ get in a CPU period.
+ cpu_rt_period (int): Limit CPU real-time period in microseconds.
+ cpu_rt_runtime (int): Limit CPU real-time runtime in microseconds.
+ cpu_shares (int): CPU shares (relative weight).
+ cpuset_cpus (str): CPUs in which to allow execution (``0-3``,
+ ``0,1``).
+ cpuset_mems (str): Memory nodes (MEMs) in which to allow execution
+ (``0-3``, ``0,1``). Only effective on NUMA systems.
+ detach (bool): Run container in the background and return a
+ :py:class:`Container` object.
+ device_cgroup_rules (:py:class:`list`): A list of cgroup rules to
+ apply to the container.
+ device_read_bps: Limit read rate (bytes per second) from a device
+ in the form of: `[{"Path": "device_path", "Rate": rate}]`
+ device_read_iops: Limit read rate (IO per second) from a device.
+ device_write_bps: Limit write rate (bytes per second) from a
+ device.
+ device_write_iops: Limit write rate (IO per second) from a device.
+ devices (:py:class:`list`): Expose host devices to the container,
+ as a list of strings in the form
+ ``<path_on_host>:<path_in_container>:<cgroup_permissions>``.
+
+ For example, ``/dev/sda:/dev/xvda:rwm`` allows the container
+ to have read-write access to the host's ``/dev/sda`` via a
+ node named ``/dev/xvda`` inside the container.
+ device_requests (:py:class:`list`): Expose host resources such as
+ GPUs to the container, as a list of
+ :py:class:`docker.types.DeviceRequest` instances.
+ dns (:py:class:`list`): Set custom DNS servers.
+ dns_opt (:py:class:`list`): Additional options to be added to the
+ container's ``resolv.conf`` file.
+ dns_search (:py:class:`list`): DNS search domains.
+ domainname (str or list): Set custom DNS search domains.
+ entrypoint (str or list): The entrypoint for the container.
+ environment (dict or list): Environment variables to set inside
+ the container, as a dictionary or a list of strings in the
+ format ``["SOMEVARIABLE=xxx"]``.
+ extra_hosts (dict): Additional hostnames to resolve inside the
+ container, as a mapping of hostname to IP address.
+ group_add (:py:class:`list`): List of additional group names and/or
+ IDs that the container process will run as.
+ healthcheck (dict): Specify a test to perform to check that the
+ container is healthy. The dict takes the following keys:
+
+ - test (:py:class:`list` or str): Test to perform to determine
+ container health. Possible values:
+
+ - Empty list: Inherit healthcheck from parent image
+ - ``["NONE"]``: Disable healthcheck
+ - ``["CMD", args...]``: exec arguments directly.
+ - ``["CMD-SHELL", command]``: Run command in the system's
+ default shell.
+
+ If a string is provided, it will be used as a ``CMD-SHELL``
+ command.
+ - interval (int): The time to wait between checks in
+ nanoseconds. It should be 0 or at least 1000000 (1 ms).
+ - timeout (int): The time to wait before considering the check
+ to have hung. It should be 0 or at least 1000000 (1 ms).
+ - retries (int): The number of consecutive failures needed to
+ consider a container as unhealthy.
+ - start_period (int): Start period for the container to
+ initialize before starting health-retries countdown in
+ nanoseconds. It should be 0 or at least 1000000 (1 ms).
+ hostname (str): Optional hostname for the container.
+ init (bool): Run an init inside the container that forwards
+ signals and reaps processes
+ init_path (str): Path to the docker-init binary
+ ipc_mode (str): Set the IPC mode for the container.
+ isolation (str): Isolation technology to use. Default: `None`.
+ kernel_memory (int or str): Kernel memory limit
+ labels (dict or list): A dictionary of name-value labels (e.g.
+ ``{"label1": "value1", "label2": "value2"}``) or a list of
+ names of labels to set with empty values (e.g.
+ ``["label1", "label2"]``)
+ links (dict): Mapping of links using the
+ ``{'container': 'alias'}`` format. The alias is optional.
+ Containers declared in this dict will be linked to the new
+ container using the provided alias. Default: ``None``.
+ log_config (LogConfig): Logging configuration.
+ lxc_conf (dict): LXC config.
+ mac_address (str): MAC address to assign to the container.
+ mem_limit (int or str): Memory limit. Accepts float values
+ (which represent the memory limit of the created container in
+ bytes) or a string with a units identification char
+ (``100000b``, ``1000k``, ``128m``, ``1g``). If a string is
+ specified without a units character, bytes are assumed as an
+ intended unit.
+ mem_reservation (int or str): Memory soft limit.
+ mem_swappiness (int): Tune a container's memory swappiness
+ behavior. Accepts number between 0 and 100.
+ memswap_limit (str or int): Maximum amount of memory + swap a
+ container is allowed to consume.
+ mounts (:py:class:`list`): Specification for mounts to be added to
+ the container. More powerful alternative to ``volumes``. Each
+ item in the list is expected to be a
+ :py:class:`docker.types.Mount` object.
+ name (str): The name for this container.
+ nano_cpus (int): CPU quota in units of 1e-9 CPUs.
+ network (str): Name of the network this container will be connected
+ to at creation time. You can connect to additional networks
+ using :py:meth:`Network.connect`. Incompatible with
+ ``network_mode``.
+ network_disabled (bool): Disable networking.
+ network_mode (str): One of:
+
+ - ``bridge`` Create a new network stack for the container on
+ the bridge network.
+ - ``none`` No networking for this container.
+ - ``container:<name|id>`` Reuse another container's network
+ stack.
+ - ``host`` Use the host network stack.
+ This mode is incompatible with ``ports``.
+
+ Incompatible with ``network``.
+ networking_config (Dict[str, EndpointConfig]):
+ Dictionary of EndpointConfig objects for each container network.
+ The key is the name of the network.
+ Defaults to ``None``.
+
+ Used in conjuction with ``network``.
+
+ Incompatible with ``network_mode``.
+ oom_kill_disable (bool): Whether to disable OOM killer.
+ oom_score_adj (int): An integer value containing the score given
+ to the container in order to tune OOM killer preferences.
+ pid_mode (str): If set to ``host``, use the host PID namespace
+ inside the container.
+ pids_limit (int): Tune a container's pids limit. Set ``-1`` for
+ unlimited.
+ platform (str): Platform in the format ``os[/arch[/variant]]``.
+ Only used if the method needs to pull the requested image.
+ ports (dict): Ports to bind inside the container.
+
+ The keys of the dictionary are the ports to bind inside the
+ container, either as an integer or a string in the form
+ ``port/protocol``, where the protocol is either ``tcp``,
+ ``udp``, or ``sctp``.
+
+ The values of the dictionary are the corresponding ports to
+ open on the host, which can be either:
+
+ - The port number, as an integer. For example,
+ ``{'2222/tcp': 3333}`` will expose port 2222 inside the
+ container as port 3333 on the host.
+ - ``None``, to assign a random host port. For example,
+ ``{'2222/tcp': None}``.
+ - A tuple of ``(address, port)`` if you want to specify the
+ host interface. For example,
+ ``{'1111/tcp': ('127.0.0.1', 1111)}``.
+ - A list of integers, if you want to bind multiple host ports
+ to a single container port. For example,
+ ``{'1111/tcp': [1234, 4567]}``.
+
+ Incompatible with ``host`` network mode.
+ privileged (bool): Give extended privileges to this container.
+ publish_all_ports (bool): Publish all ports to the host.
+ read_only (bool): Mount the container's root filesystem as read
+ only.
+ remove (bool): Remove the container when it has finished running.
+ Default: ``False``.
+ restart_policy (dict): Restart the container when it exits.
+ Configured as a dictionary with keys:
+
+ - ``Name`` One of ``on-failure``, or ``always``.
+ - ``MaximumRetryCount`` Number of times to restart the
+ container on failure.
+
+ For example:
+ ``{"Name": "on-failure", "MaximumRetryCount": 5}``
+
+ runtime (str): Runtime to use with this container.
+ security_opt (:py:class:`list`): A list of string values to
+ customize labels for MLS systems, such as SELinux.
+ shm_size (str or int): Size of /dev/shm (e.g. ``1G``).
+ stdin_open (bool): Keep ``STDIN`` open even if not attached.
+ stdout (bool): Return logs from ``STDOUT`` when ``detach=False``.
+ Default: ``True``.
+ stderr (bool): Return logs from ``STDERR`` when ``detach=False``.
+ Default: ``False``.
+ stop_signal (str): The stop signal to use to stop the container
+ (e.g. ``SIGINT``).
+ storage_opt (dict): Storage driver options per container as a
+ key-value mapping.
+ stream (bool): If true and ``detach`` is false, return a log
+ generator instead of a string. Ignored if ``detach`` is true.
+ Default: ``False``.
+ sysctls (dict): Kernel parameters to set in the container.
+ tmpfs (dict): Temporary filesystems to mount, as a dictionary
+ mapping a path inside the container to options for that path.
+
+ For example:
+
+ .. code-block:: python
+
+ {
+ '/mnt/vol2': '',
+ '/mnt/vol1': 'size=3G,uid=1000'
+ }
+
+ tty (bool): Allocate a pseudo-TTY.
+ ulimits (:py:class:`list`): Ulimits to set inside the container,
+ as a list of :py:class:`docker.types.Ulimit` instances.
+ use_config_proxy (bool): If ``True``, and if the docker client
+ configuration file (``~/.docker/config.json`` by default)
+ contains a proxy configuration, the corresponding environment
+ variables will be set in the container being built.
+ user (str or int): Username or UID to run commands as inside the
+ container.
+ userns_mode (str): Sets the user namespace mode for the container
+ when user namespace remapping option is enabled. Supported
+ values are: ``host``
+ uts_mode (str): Sets the UTS namespace mode for the container.
+ Supported values are: ``host``
+ version (str): The version of the API to use. Set to ``auto`` to
+ automatically detect the server's version. Default: ``1.35``
+ volume_driver (str): The name of a volume driver/plugin.
+ volumes (dict or list): A dictionary to configure volumes mounted
+ inside the container. The key is either the host path or a
+ volume name, and the value is a dictionary with the keys:
+
+ - ``bind`` The path to mount the volume inside the container
+ - ``mode`` Either ``rw`` to mount the volume read/write, or
+ ``ro`` to mount it read-only.
+
+ For example:
+
+ .. code-block:: python
+
+ {'/home/user1/': {'bind': '/mnt/vol2', 'mode': 'rw'},
+ '/var/www': {'bind': '/mnt/vol1', 'mode': 'ro'}}
+
+ Or a list of strings which each one of its elements specifies a
+ mount volume.
+
+ For example:
+
+ .. code-block:: python
+
+ ['/home/user1/:/mnt/vol2','/var/www:/mnt/vol1']
+
+ volumes_from (:py:class:`list`): List of container names or IDs to
+ get volumes from.
+ working_dir (str): Path to the working directory.
+
+ Returns:
+ The container logs, either ``STDOUT``, ``STDERR``, or both,
+ depending on the value of the ``stdout`` and ``stderr`` arguments.
+
+ ``STDOUT`` and ``STDERR`` may be read only if either ``json-file``
+ or ``journald`` logging driver used. Thus, if you are using none of
+ these drivers, a ``None`` object is returned instead. See the
+ `Engine API documentation
+ <https://docs.docker.com/engine/api/v1.30/#operation/ContainerLogs/>`_
+ for full details.
+
+ If ``detach`` is ``True``, a :py:class:`Container` object is
+ returned instead.
+
+ Raises:
+ :py:class:`docker.errors.ContainerError`
+ If the container exits with a non-zero exit code and
+ ``detach`` is ``False``.
+ :py:class:`docker.errors.ImageNotFound`
+ If the specified image does not exist.
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ if isinstance(image, Image):
+ image = image.id
+ stream = kwargs.pop('stream', False)
+ detach = kwargs.pop('detach', False)
+ platform = kwargs.get('platform', None)
+
+ if detach and remove:
+ if version_gte(self.client.api._version, '1.25'):
+ kwargs["auto_remove"] = True
+ else:
+ raise RuntimeError("The options 'detach' and 'remove' cannot "
+ "be used together in api versions < 1.25.")
+
+ if kwargs.get('network') and kwargs.get('network_mode'):
+ raise RuntimeError(
+ 'The options "network" and "network_mode" can not be used '
+ 'together.'
+ )
+
+ if kwargs.get('networking_config') and not kwargs.get('network'):
+ raise RuntimeError(
+ 'The option "networking_config" can not be used '
+ 'without "network".'
+ )
+
+ try:
+ container = self.create(image=image, command=command,
+ detach=detach, **kwargs)
+ except ImageNotFound:
+ self.client.images.pull(image, platform=platform)
+ container = self.create(image=image, command=command,
+ detach=detach, **kwargs)
+
+ container.start()
+
+ if detach:
+ return container
+
+ logging_driver = container.attrs['HostConfig']['LogConfig']['Type']
+
+ out = None
+ if logging_driver == 'json-file' or logging_driver == 'journald':
+ out = container.logs(
+ stdout=stdout, stderr=stderr, stream=True, follow=True
+ )
+
+ exit_status = container.wait()['StatusCode']
+ if exit_status != 0:
+ out = None
+ if not kwargs.get('auto_remove'):
+ out = container.logs(stdout=False, stderr=True)
+
+ if remove:
+ container.remove()
+ if exit_status != 0:
+ raise ContainerError(
+ container, exit_status, command, image, out
+ )
+
+ if stream or out is None:
+ return out
+ return b''.join(out)
+
+ def create(self, image, command=None, **kwargs):
+ """
+ Create a container without starting it. Similar to ``docker create``.
+
+ Takes the same arguments as :py:meth:`run`, except for ``stdout``,
+ ``stderr``, and ``remove``.
+
+ Returns:
+ A :py:class:`Container` object.
+
+ Raises:
+ :py:class:`docker.errors.ImageNotFound`
+ If the specified image does not exist.
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ if isinstance(image, Image):
+ image = image.id
+ kwargs['image'] = image
+ kwargs['command'] = command
+ kwargs['version'] = self.client.api._version
+ create_kwargs = _create_container_args(kwargs)
+ resp = self.client.api.create_container(**create_kwargs)
+ return self.get(resp['Id'])
+
+ def get(self, container_id):
+ """
+ Get a container by name or ID.
+
+ Args:
+ container_id (str): Container name or ID.
+
+ Returns:
+ A :py:class:`Container` object.
+
+ Raises:
+ :py:class:`docker.errors.NotFound`
+ If the container does not exist.
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ resp = self.client.api.inspect_container(container_id)
+ return self.prepare_model(resp)
+
+ def list(self, all=False, before=None, filters=None, limit=-1, since=None,
+ sparse=False, ignore_removed=False):
+ """
+ List containers. Similar to the ``docker ps`` command.
+
+ Args:
+ all (bool): Show all containers. Only running containers are shown
+ by default
+ since (str): Show only containers created since Id or Name, include
+ non-running ones
+ before (str): Show only container created before Id or Name,
+ include non-running ones
+ limit (int): Show `limit` last created containers, include
+ non-running ones
+ filters (dict): Filters to be processed on the image list.
+ Available filters:
+
+ - `exited` (int): Only containers with specified exit code
+ - `status` (str): One of ``restarting``, ``running``,
+ ``paused``, ``exited``
+ - `label` (str|list): format either ``"key"``, ``"key=value"``
+ or a list of such.
+ - `id` (str): The id of the container.
+ - `name` (str): The name of the container.
+ - `ancestor` (str): Filter by container ancestor. Format of
+ ``<image-name>[:tag]``, ``<image-id>``, or
+ ``<image@digest>``.
+ - `before` (str): Only containers created before a particular
+ container. Give the container name or id.
+ - `since` (str): Only containers created after a particular
+ container. Give container name or id.
+
+ A comprehensive list can be found in the documentation for
+ `docker ps
+ <https://docs.docker.com/engine/reference/commandline/ps>`_.
+
+ sparse (bool): Do not inspect containers. Returns partial
+ information, but guaranteed not to block. Use
+ :py:meth:`Container.reload` on resulting objects to retrieve
+ all attributes. Default: ``False``
+ ignore_removed (bool): Ignore failures due to missing containers
+ when attempting to inspect containers from the original list.
+ Set to ``True`` if race conditions are likely. Has no effect
+ if ``sparse=True``. Default: ``False``
+
+ Returns:
+ (list of :py:class:`Container`)
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ resp = self.client.api.containers(all=all, before=before,
+ filters=filters, limit=limit,
+ since=since)
+ if sparse:
+ return [self.prepare_model(r) for r in resp]
+ else:
+ containers = []
+ for r in resp:
+ try:
+ containers.append(self.get(r['Id']))
+ # a container may have been removed while iterating
+ except NotFound:
+ if not ignore_removed:
+ raise
+ return containers
+
+ def prune(self, filters=None):
+ return self.client.api.prune_containers(filters=filters)
+
+ prune.__doc__ = APIClient.prune_containers.__doc__
+
+
+# kwargs to copy straight from run to create
+RUN_CREATE_KWARGS = [
+ 'command',
+ 'detach',
+ 'domainname',
+ 'entrypoint',
+ 'environment',
+ 'healthcheck',
+ 'hostname',
+ 'image',
+ 'labels',
+ 'mac_address',
+ 'name',
+ 'network_disabled',
+ 'platform',
+ 'stdin_open',
+ 'stop_signal',
+ 'tty',
+ 'use_config_proxy',
+ 'user',
+ 'working_dir',
+]
+
+# kwargs to copy straight from run to host_config
+RUN_HOST_CONFIG_KWARGS = [
+ 'auto_remove',
+ 'blkio_weight_device',
+ 'blkio_weight',
+ 'cap_add',
+ 'cap_drop',
+ 'cgroup_parent',
+ 'cgroupns',
+ 'cpu_count',
+ 'cpu_percent',
+ 'cpu_period',
+ 'cpu_quota',
+ 'cpu_shares',
+ 'cpuset_cpus',
+ 'cpuset_mems',
+ 'cpu_rt_period',
+ 'cpu_rt_runtime',
+ 'device_cgroup_rules',
+ 'device_read_bps',
+ 'device_read_iops',
+ 'device_write_bps',
+ 'device_write_iops',
+ 'devices',
+ 'device_requests',
+ 'dns_opt',
+ 'dns_search',
+ 'dns',
+ 'extra_hosts',
+ 'group_add',
+ 'init',
+ 'init_path',
+ 'ipc_mode',
+ 'isolation',
+ 'kernel_memory',
+ 'links',
+ 'log_config',
+ 'lxc_conf',
+ 'mem_limit',
+ 'mem_reservation',
+ 'mem_swappiness',
+ 'memswap_limit',
+ 'mounts',
+ 'nano_cpus',
+ 'network_mode',
+ 'oom_kill_disable',
+ 'oom_score_adj',
+ 'pid_mode',
+ 'pids_limit',
+ 'privileged',
+ 'publish_all_ports',
+ 'read_only',
+ 'restart_policy',
+ 'security_opt',
+ 'shm_size',
+ 'storage_opt',
+ 'sysctls',
+ 'tmpfs',
+ 'ulimits',
+ 'userns_mode',
+ 'uts_mode',
+ 'version',
+ 'volume_driver',
+ 'volumes_from',
+ 'runtime'
+]
+
+
+def _create_container_args(kwargs):
+ """
+ Convert arguments to create() to arguments to create_container().
+ """
+ # Copy over kwargs which can be copied directly
+ create_kwargs = {}
+ for key in copy.copy(kwargs):
+ if key in RUN_CREATE_KWARGS:
+ create_kwargs[key] = kwargs.pop(key)
+ host_config_kwargs = {}
+ for key in copy.copy(kwargs):
+ if key in RUN_HOST_CONFIG_KWARGS:
+ host_config_kwargs[key] = kwargs.pop(key)
+
+ # Process kwargs which are split over both create and host_config
+ ports = kwargs.pop('ports', {})
+ if ports:
+ host_config_kwargs['port_bindings'] = ports
+
+ volumes = kwargs.pop('volumes', {})
+ if volumes:
+ host_config_kwargs['binds'] = volumes
+
+ network = kwargs.pop('network', None)
+ networking_config = kwargs.pop('networking_config', None)
+ if network:
+ if networking_config:
+ # Sanity check: check if the network is defined in the
+ # networking config dict, otherwise switch to None
+ if network not in networking_config:
+ networking_config = None
+
+ create_kwargs['networking_config'] = NetworkingConfig(
+ networking_config
+ ) if networking_config else {network: None}
+ host_config_kwargs['network_mode'] = network
+
+ # All kwargs should have been consumed by this point, so raise
+ # error if any are left
+ if kwargs:
+ raise create_unexpected_kwargs_error('run', kwargs)
+
+ create_kwargs['host_config'] = HostConfig(**host_config_kwargs)
+
+ # Fill in any kwargs which need processing by create_host_config first
+ port_bindings = create_kwargs['host_config'].get('PortBindings')
+ if port_bindings:
+ # sort to make consistent for tests
+ create_kwargs['ports'] = [tuple(p.split('/', 1))
+ for p in sorted(port_bindings.keys())]
+ if volumes:
+ if isinstance(volumes, dict):
+ create_kwargs['volumes'] = [
+ v.get('bind') for v in volumes.values()
+ ]
+ else:
+ create_kwargs['volumes'] = [
+ _host_volume_from_bind(v) for v in volumes
+ ]
+ return create_kwargs
+
+
+def _host_volume_from_bind(bind):
+ drive, rest = ntpath.splitdrive(bind)
+ bits = rest.split(':', 1)
+ if len(bits) == 1 or bits[1] in ('ro', 'rw'):
+ return drive + bits[0]
+ elif bits[1].endswith(':ro') or bits[1].endswith(':rw'):
+ return bits[1][:-3]
+ else:
+ return bits[1]
+
+
+ExecResult = namedtuple('ExecResult', 'exit_code,output')
+""" A result of Container.exec_run with the properties ``exit_code`` and
+ ``output``. """
diff --git a/contrib/python/docker/docker/models/images.py b/contrib/python/docker/docker/models/images.py
new file mode 100644
index 0000000000..4f058d24d9
--- /dev/null
+++ b/contrib/python/docker/docker/models/images.py
@@ -0,0 +1,505 @@
+import itertools
+import re
+import warnings
+
+from ..api import APIClient
+from ..constants import DEFAULT_DATA_CHUNK_SIZE
+from ..errors import BuildError, ImageLoadError, InvalidArgument
+from ..utils import parse_repository_tag
+from ..utils.json_stream import json_stream
+from .resource import Collection, Model
+
+
+class Image(Model):
+ """
+ An image on the server.
+ """
+ def __repr__(self):
+ tag_str = "', '".join(self.tags)
+ return f"<{self.__class__.__name__}: '{tag_str}'>"
+
+ @property
+ def labels(self):
+ """
+ The labels of an image as dictionary.
+ """
+ result = self.attrs['Config'].get('Labels')
+ return result or {}
+
+ @property
+ def short_id(self):
+ """
+ The ID of the image truncated to 12 characters, plus the ``sha256:``
+ prefix.
+ """
+ if self.id.startswith('sha256:'):
+ return self.id[:19]
+ return self.id[:12]
+
+ @property
+ def tags(self):
+ """
+ The image's tags.
+ """
+ tags = self.attrs.get('RepoTags')
+ if tags is None:
+ tags = []
+ return [tag for tag in tags if tag != '<none>:<none>']
+
+ def history(self):
+ """
+ Show the history of an image.
+
+ Returns:
+ (list): The history of the image.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.history(self.id)
+
+ def remove(self, force=False, noprune=False):
+ """
+ Remove this image.
+
+ Args:
+ force (bool): Force removal of the image
+ noprune (bool): Do not delete untagged parents
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.remove_image(
+ self.id,
+ force=force,
+ noprune=noprune,
+ )
+
+ def save(self, chunk_size=DEFAULT_DATA_CHUNK_SIZE, named=False):
+ """
+ Get a tarball of an image. Similar to the ``docker save`` command.
+
+ Args:
+ chunk_size (int): The generator will return up to that much data
+ per iteration, but may return less. If ``None``, data will be
+ streamed as it is received. Default: 2 MB
+ named (str or bool): If ``False`` (default), the tarball will not
+ retain repository and tag information for this image. If set
+ to ``True``, the first tag in the :py:attr:`~tags` list will
+ be used to identify the image. Alternatively, any element of
+ the :py:attr:`~tags` list can be used as an argument to use
+ that specific tag as the saved identifier.
+
+ Returns:
+ (generator): A stream of raw archive data.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Example:
+
+ >>> image = cli.images.get("busybox:latest")
+ >>> f = open('/tmp/busybox-latest.tar', 'wb')
+ >>> for chunk in image.save():
+ >>> f.write(chunk)
+ >>> f.close()
+ """
+ img = self.id
+ if named:
+ img = self.tags[0] if self.tags else img
+ if isinstance(named, str):
+ if named not in self.tags:
+ raise InvalidArgument(
+ f"{named} is not a valid tag for this image"
+ )
+ img = named
+
+ return self.client.api.get_image(img, chunk_size)
+
+ def tag(self, repository, tag=None, **kwargs):
+ """
+ Tag this image into a repository. Similar to the ``docker tag``
+ command.
+
+ Args:
+ repository (str): The repository to set for the tag
+ tag (str): The tag name
+ force (bool): Force
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Returns:
+ (bool): ``True`` if successful
+ """
+ return self.client.api.tag(self.id, repository, tag=tag, **kwargs)
+
+
+class RegistryData(Model):
+ """
+ Image metadata stored on the registry, including available platforms.
+ """
+ def __init__(self, image_name, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ self.image_name = image_name
+
+ @property
+ def id(self):
+ """
+ The ID of the object.
+ """
+ return self.attrs['Descriptor']['digest']
+
+ @property
+ def short_id(self):
+ """
+ The ID of the image truncated to 12 characters, plus the ``sha256:``
+ prefix.
+ """
+ return self.id[:19]
+
+ def pull(self, platform=None):
+ """
+ Pull the image digest.
+
+ Args:
+ platform (str): The platform to pull the image for.
+ Default: ``None``
+
+ Returns:
+ (:py:class:`Image`): A reference to the pulled image.
+ """
+ repository, _ = parse_repository_tag(self.image_name)
+ return self.collection.pull(repository, tag=self.id, platform=platform)
+
+ def has_platform(self, platform):
+ """
+ Check whether the given platform identifier is available for this
+ digest.
+
+ Args:
+ platform (str or dict): A string using the ``os[/arch[/variant]]``
+ format, or a platform dictionary.
+
+ Returns:
+ (bool): ``True`` if the platform is recognized as available,
+ ``False`` otherwise.
+
+ Raises:
+ :py:class:`docker.errors.InvalidArgument`
+ If the platform argument is not a valid descriptor.
+ """
+ if platform and not isinstance(platform, dict):
+ parts = platform.split('/')
+ if len(parts) > 3 or len(parts) < 1:
+ raise InvalidArgument(
+ f'"{platform}" is not a valid platform descriptor'
+ )
+ platform = {'os': parts[0]}
+ if len(parts) > 2:
+ platform['variant'] = parts[2]
+ if len(parts) > 1:
+ platform['architecture'] = parts[1]
+ return normalize_platform(
+ platform, self.client.version()
+ ) in self.attrs['Platforms']
+
+ def reload(self):
+ self.attrs = self.client.api.inspect_distribution(self.image_name)
+
+ reload.__doc__ = Model.reload.__doc__
+
+
+class ImageCollection(Collection):
+ model = Image
+
+ def build(self, **kwargs):
+ """
+ Build an image and return it. Similar to the ``docker build``
+ command. Either ``path`` or ``fileobj`` must be set.
+
+ If you already have a tar file for the Docker build context (including
+ a Dockerfile), pass a readable file-like object to ``fileobj``
+ and also pass ``custom_context=True``. If the stream is also
+ compressed, set ``encoding`` to the correct value (e.g ``gzip``).
+
+ If you want to get the raw output of the build, use the
+ :py:meth:`~docker.api.build.BuildApiMixin.build` method in the
+ low-level API.
+
+ Args:
+ path (str): Path to the directory containing the Dockerfile
+ fileobj: A file object to use as the Dockerfile. (Or a file-like
+ object)
+ tag (str): A tag to add to the final image
+ quiet (bool): Whether to return the status
+ nocache (bool): Don't use the cache when set to ``True``
+ rm (bool): Remove intermediate containers. The ``docker build``
+ command now defaults to ``--rm=true``, but we have kept the old
+ default of `False` to preserve backward compatibility
+ timeout (int): HTTP timeout
+ custom_context (bool): Optional if using ``fileobj``
+ encoding (str): The encoding for a stream. Set to ``gzip`` for
+ compressing
+ pull (bool): Downloads any updates to the FROM image in Dockerfiles
+ forcerm (bool): Always remove intermediate containers, even after
+ unsuccessful builds
+ dockerfile (str): path within the build context to the Dockerfile
+ buildargs (dict): A dictionary of build arguments
+ container_limits (dict): A dictionary of limits applied to each
+ container created by the build process. Valid keys:
+
+ - memory (int): set memory limit for build
+ - memswap (int): Total memory (memory + swap), -1 to disable
+ swap
+ - cpushares (int): CPU shares (relative weight)
+ - cpusetcpus (str): CPUs in which to allow execution, e.g.,
+ ``"0-3"``, ``"0,1"``
+ shmsize (int): Size of `/dev/shm` in bytes. The size must be
+ greater than 0. If omitted the system uses 64MB
+ labels (dict): A dictionary of labels to set on the image
+ cache_from (list): A list of images used for build cache
+ resolution
+ target (str): Name of the build-stage to build in a multi-stage
+ Dockerfile
+ network_mode (str): networking mode for the run commands during
+ build
+ squash (bool): Squash the resulting images layers into a
+ single layer.
+ extra_hosts (dict): Extra hosts to add to /etc/hosts in building
+ containers, as a mapping of hostname to IP address.
+ platform (str): Platform in the format ``os[/arch[/variant]]``.
+ isolation (str): Isolation technology used during build.
+ Default: `None`.
+ use_config_proxy (bool): If ``True``, and if the docker client
+ configuration file (``~/.docker/config.json`` by default)
+ contains a proxy configuration, the corresponding environment
+ variables will be set in the container being built.
+
+ Returns:
+ (tuple): The first item is the :py:class:`Image` object for the
+ image that was built. The second item is a generator of the
+ build logs as JSON-decoded objects.
+
+ Raises:
+ :py:class:`docker.errors.BuildError`
+ If there is an error during the build.
+ :py:class:`docker.errors.APIError`
+ If the server returns any other error.
+ ``TypeError``
+ If neither ``path`` nor ``fileobj`` is specified.
+ """
+ resp = self.client.api.build(**kwargs)
+ if isinstance(resp, str):
+ return self.get(resp)
+ last_event = None
+ image_id = None
+ result_stream, internal_stream = itertools.tee(json_stream(resp))
+ for chunk in internal_stream:
+ if 'error' in chunk:
+ raise BuildError(chunk['error'], result_stream)
+ if 'stream' in chunk:
+ match = re.search(
+ r'(^Successfully built |sha256:)([0-9a-f]+)$',
+ chunk['stream']
+ )
+ if match:
+ image_id = match.group(2)
+ last_event = chunk
+ if image_id:
+ return (self.get(image_id), result_stream)
+ raise BuildError(last_event or 'Unknown', result_stream)
+
+ def get(self, name):
+ """
+ Gets an image.
+
+ Args:
+ name (str): The name of the image.
+
+ Returns:
+ (:py:class:`Image`): The image.
+
+ Raises:
+ :py:class:`docker.errors.ImageNotFound`
+ If the image does not exist.
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.prepare_model(self.client.api.inspect_image(name))
+
+ def get_registry_data(self, name, auth_config=None):
+ """
+ Gets the registry data for an image.
+
+ Args:
+ name (str): The name of the image.
+ auth_config (dict): Override the credentials that are found in the
+ config for this request. ``auth_config`` should contain the
+ ``username`` and ``password`` keys to be valid.
+
+ Returns:
+ (:py:class:`RegistryData`): The data object.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return RegistryData(
+ image_name=name,
+ attrs=self.client.api.inspect_distribution(name, auth_config),
+ client=self.client,
+ collection=self,
+ )
+
+ def list(self, name=None, all=False, filters=None):
+ """
+ List images on the server.
+
+ Args:
+ name (str): Only show images belonging to the repository ``name``
+ all (bool): Show intermediate image layers. By default, these are
+ filtered out.
+ filters (dict): Filters to be processed on the image list.
+ Available filters:
+ - ``dangling`` (bool)
+ - `label` (str|list): format either ``"key"``, ``"key=value"``
+ or a list of such.
+
+ Returns:
+ (list of :py:class:`Image`): The images.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ resp = self.client.api.images(name=name, all=all, filters=filters)
+ return [self.get(r["Id"]) for r in resp]
+
+ def load(self, data):
+ """
+ Load an image that was previously saved using
+ :py:meth:`~docker.models.images.Image.save` (or ``docker save``).
+ Similar to ``docker load``.
+
+ Args:
+ data (binary): Image data to be loaded.
+
+ Returns:
+ (list of :py:class:`Image`): The images.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ resp = self.client.api.load_image(data)
+ images = []
+ for chunk in resp:
+ if 'stream' in chunk:
+ match = re.search(
+ r'(^Loaded image ID: |^Loaded image: )(.+)$',
+ chunk['stream']
+ )
+ if match:
+ image_id = match.group(2)
+ images.append(image_id)
+ if 'error' in chunk:
+ raise ImageLoadError(chunk['error'])
+
+ return [self.get(i) for i in images]
+
+ def pull(self, repository, tag=None, all_tags=False, **kwargs):
+ """
+ Pull an image of the given name and return it. Similar to the
+ ``docker pull`` command.
+ If ``tag`` is ``None`` or empty, it is set to ``latest``.
+ If ``all_tags`` is set, the ``tag`` parameter is ignored and all image
+ tags will be pulled.
+
+ If you want to get the raw pull output, use the
+ :py:meth:`~docker.api.image.ImageApiMixin.pull` method in the
+ low-level API.
+
+ Args:
+ repository (str): The repository to pull
+ tag (str): The tag to pull
+ auth_config (dict): Override the credentials that are found in the
+ config for this request. ``auth_config`` should contain the
+ ``username`` and ``password`` keys to be valid.
+ platform (str): Platform in the format ``os[/arch[/variant]]``
+ all_tags (bool): Pull all image tags
+
+ Returns:
+ (:py:class:`Image` or list): The image that has been pulled.
+ If ``all_tags`` is True, the method will return a list
+ of :py:class:`Image` objects belonging to this repository.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Example:
+
+ >>> # Pull the image tagged `latest` in the busybox repo
+ >>> image = client.images.pull('busybox')
+
+ >>> # Pull all tags in the busybox repo
+ >>> images = client.images.pull('busybox', all_tags=True)
+ """
+ repository, image_tag = parse_repository_tag(repository)
+ tag = tag or image_tag or 'latest'
+
+ if 'stream' in kwargs:
+ warnings.warn(
+ '`stream` is not a valid parameter for this method'
+ ' and will be overridden',
+ stacklevel=1,
+ )
+ del kwargs['stream']
+
+ pull_log = self.client.api.pull(
+ repository, tag=tag, stream=True, all_tags=all_tags, **kwargs
+ )
+ for _ in pull_log:
+ # We don't do anything with the logs, but we need
+ # to keep the connection alive and wait for the image
+ # to be pulled.
+ pass
+ if not all_tags:
+ sep = '@' if tag.startswith('sha256:') else ':'
+ return self.get(f'{repository}{sep}{tag}')
+ return self.list(repository)
+
+ def push(self, repository, tag=None, **kwargs):
+ return self.client.api.push(repository, tag=tag, **kwargs)
+ push.__doc__ = APIClient.push.__doc__
+
+ def remove(self, *args, **kwargs):
+ self.client.api.remove_image(*args, **kwargs)
+ remove.__doc__ = APIClient.remove_image.__doc__
+
+ def search(self, *args, **kwargs):
+ return self.client.api.search(*args, **kwargs)
+ search.__doc__ = APIClient.search.__doc__
+
+ def prune(self, filters=None):
+ return self.client.api.prune_images(filters=filters)
+ prune.__doc__ = APIClient.prune_images.__doc__
+
+ def prune_builds(self, *args, **kwargs):
+ return self.client.api.prune_builds(*args, **kwargs)
+ prune_builds.__doc__ = APIClient.prune_builds.__doc__
+
+
+def normalize_platform(platform, engine_info):
+ if platform is None:
+ platform = {}
+ if 'os' not in platform:
+ platform['os'] = engine_info['Os']
+ if 'architecture' not in platform:
+ platform['architecture'] = engine_info['Arch']
+ return platform
diff --git a/contrib/python/docker/docker/models/networks.py b/contrib/python/docker/docker/models/networks.py
new file mode 100644
index 0000000000..9b3ed7829c
--- /dev/null
+++ b/contrib/python/docker/docker/models/networks.py
@@ -0,0 +1,218 @@
+from ..api import APIClient
+from ..utils import version_gte
+from .containers import Container
+from .resource import Collection, Model
+
+
+class Network(Model):
+ """
+ A Docker network.
+ """
+ @property
+ def name(self):
+ """
+ The name of the network.
+ """
+ return self.attrs.get('Name')
+
+ @property
+ def containers(self):
+ """
+ The containers that are connected to the network, as a list of
+ :py:class:`~docker.models.containers.Container` objects.
+ """
+ return [
+ self.client.containers.get(cid) for cid in
+ (self.attrs.get('Containers') or {}).keys()
+ ]
+
+ def connect(self, container, *args, **kwargs):
+ """
+ Connect a container to this network.
+
+ Args:
+ container (str): Container to connect to this network, as either
+ an ID, name, or :py:class:`~docker.models.containers.Container`
+ object.
+ aliases (:py:class:`list`): A list of aliases for this endpoint.
+ Names in that list can be used within the network to reach the
+ container. Defaults to ``None``.
+ links (:py:class:`list`): A list of links for this endpoint.
+ Containers declared in this list will be linkedto this
+ container. Defaults to ``None``.
+ ipv4_address (str): The IP address of this container on the
+ network, using the IPv4 protocol. Defaults to ``None``.
+ ipv6_address (str): The IP address of this container on the
+ network, using the IPv6 protocol. Defaults to ``None``.
+ link_local_ips (:py:class:`list`): A list of link-local (IPv4/IPv6)
+ addresses.
+ driver_opt (dict): A dictionary of options to provide to the
+ network driver. Defaults to ``None``.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ if isinstance(container, Container):
+ container = container.id
+ return self.client.api.connect_container_to_network(
+ container, self.id, *args, **kwargs
+ )
+
+ def disconnect(self, container, *args, **kwargs):
+ """
+ Disconnect a container from this network.
+
+ Args:
+ container (str): Container to disconnect from this network, as
+ either an ID, name, or
+ :py:class:`~docker.models.containers.Container` object.
+ force (bool): Force the container to disconnect from a network.
+ Default: ``False``
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ if isinstance(container, Container):
+ container = container.id
+ return self.client.api.disconnect_container_from_network(
+ container, self.id, *args, **kwargs
+ )
+
+ def remove(self):
+ """
+ Remove this network.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.remove_network(self.id)
+
+
+class NetworkCollection(Collection):
+ """
+ Networks on the Docker server.
+ """
+ model = Network
+
+ def create(self, name, *args, **kwargs):
+ """
+ Create a network. Similar to the ``docker network create``.
+
+ Args:
+ name (str): Name of the network
+ driver (str): Name of the driver used to create the network
+ options (dict): Driver options as a key-value dictionary
+ ipam (IPAMConfig): Optional custom IP scheme for the network.
+ check_duplicate (bool): Request daemon to check for networks with
+ same name. Default: ``None``.
+ internal (bool): Restrict external access to the network. Default
+ ``False``.
+ labels (dict): Map of labels to set on the network. Default
+ ``None``.
+ enable_ipv6 (bool): Enable IPv6 on the network. Default ``False``.
+ attachable (bool): If enabled, and the network is in the global
+ scope, non-service containers on worker nodes will be able to
+ connect to the network.
+ scope (str): Specify the network's scope (``local``, ``global`` or
+ ``swarm``)
+ ingress (bool): If set, create an ingress network which provides
+ the routing-mesh in swarm mode.
+
+ Returns:
+ (:py:class:`Network`): The network that was created.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Example:
+ A network using the bridge driver:
+
+ >>> client.networks.create("network1", driver="bridge")
+
+ You can also create more advanced networks with custom IPAM
+ configurations. For example, setting the subnet to
+ ``192.168.52.0/24`` and gateway address to ``192.168.52.254``.
+
+ .. code-block:: python
+
+ >>> ipam_pool = docker.types.IPAMPool(
+ subnet='192.168.52.0/24',
+ gateway='192.168.52.254'
+ )
+ >>> ipam_config = docker.types.IPAMConfig(
+ pool_configs=[ipam_pool]
+ )
+ >>> client.networks.create(
+ "network1",
+ driver="bridge",
+ ipam=ipam_config
+ )
+
+ """
+ resp = self.client.api.create_network(name, *args, **kwargs)
+ return self.get(resp['Id'])
+
+ def get(self, network_id, *args, **kwargs):
+ """
+ Get a network by its ID.
+
+ Args:
+ network_id (str): The ID of the network.
+ verbose (bool): Retrieve the service details across the cluster in
+ swarm mode.
+ scope (str): Filter the network by scope (``swarm``, ``global``
+ or ``local``).
+
+ Returns:
+ (:py:class:`Network`) The network.
+
+ Raises:
+ :py:class:`docker.errors.NotFound`
+ If the network does not exist.
+
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ """
+ return self.prepare_model(
+ self.client.api.inspect_network(network_id, *args, **kwargs)
+ )
+
+ def list(self, *args, **kwargs):
+ """
+ List networks. Similar to the ``docker network ls`` command.
+
+ Args:
+ names (:py:class:`list`): List of names to filter by.
+ ids (:py:class:`list`): List of ids to filter by.
+ filters (dict): Filters to be processed on the network list.
+ Available filters:
+ - ``driver=[<driver-name>]`` Matches a network's driver.
+ - `label` (str|list): format either ``"key"``, ``"key=value"``
+ or a list of such.
+ - ``type=["custom"|"builtin"]`` Filters networks by type.
+ greedy (bool): Fetch more details for each network individually.
+ You might want this to get the containers attached to them.
+
+ Returns:
+ (list of :py:class:`Network`) The networks on the server.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ greedy = kwargs.pop('greedy', False)
+ resp = self.client.api.networks(*args, **kwargs)
+ networks = [self.prepare_model(item) for item in resp]
+ if greedy and version_gte(self.client.api._version, '1.28'):
+ for net in networks:
+ net.reload()
+ return networks
+
+ def prune(self, filters=None):
+ return self.client.api.prune_networks(filters=filters)
+ prune.__doc__ = APIClient.prune_networks.__doc__
diff --git a/contrib/python/docker/docker/models/nodes.py b/contrib/python/docker/docker/models/nodes.py
new file mode 100644
index 0000000000..2fa480c544
--- /dev/null
+++ b/contrib/python/docker/docker/models/nodes.py
@@ -0,0 +1,107 @@
+from .resource import Collection, Model
+
+
+class Node(Model):
+ """A node in a swarm."""
+ id_attribute = 'ID'
+
+ @property
+ def version(self):
+ """
+ The version number of the service. If this is not the same as the
+ server, the :py:meth:`update` function will not work and you will
+ need to call :py:meth:`reload` before calling it again.
+ """
+ return self.attrs.get('Version').get('Index')
+
+ def update(self, node_spec):
+ """
+ Update the node's configuration.
+
+ Args:
+ node_spec (dict): Configuration settings to update. Any values
+ not provided will be removed. Default: ``None``
+
+ Returns:
+ `True` if the request went through.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Example:
+
+ >>> node_spec = {'Availability': 'active',
+ 'Name': 'node-name',
+ 'Role': 'manager',
+ 'Labels': {'foo': 'bar'}
+ }
+ >>> node.update(node_spec)
+
+ """
+ return self.client.api.update_node(self.id, self.version, node_spec)
+
+ def remove(self, force=False):
+ """
+ Remove this node from the swarm.
+
+ Args:
+ force (bool): Force remove an active node. Default: `False`
+
+ Returns:
+ `True` if the request was successful.
+
+ Raises:
+ :py:class:`docker.errors.NotFound`
+ If the node doesn't exist in the swarm.
+
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.remove_node(self.id, force=force)
+
+
+class NodeCollection(Collection):
+ """Nodes on the Docker server."""
+ model = Node
+
+ def get(self, node_id):
+ """
+ Get a node.
+
+ Args:
+ node_id (string): ID of the node to be inspected.
+
+ Returns:
+ A :py:class:`Node` object.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.prepare_model(self.client.api.inspect_node(node_id))
+
+ def list(self, *args, **kwargs):
+ """
+ List swarm nodes.
+
+ Args:
+ filters (dict): Filters to process on the nodes list. Valid
+ filters: ``id``, ``name``, ``membership`` and ``role``.
+ Default: ``None``
+
+ Returns:
+ A list of :py:class:`Node` objects.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Example:
+
+ >>> client.nodes.list(filters={'role': 'manager'})
+ """
+ return [
+ self.prepare_model(n)
+ for n in self.client.api.nodes(*args, **kwargs)
+ ]
diff --git a/contrib/python/docker/docker/models/plugins.py b/contrib/python/docker/docker/models/plugins.py
new file mode 100644
index 0000000000..85d768c935
--- /dev/null
+++ b/contrib/python/docker/docker/models/plugins.py
@@ -0,0 +1,206 @@
+from .. import errors
+from .resource import Collection, Model
+
+
+class Plugin(Model):
+ """
+ A plugin on the server.
+ """
+ def __repr__(self):
+ return f"<{self.__class__.__name__}: '{self.name}'>"
+
+ @property
+ def name(self):
+ """
+ The plugin's name.
+ """
+ return self.attrs.get('Name')
+
+ @property
+ def enabled(self):
+ """
+ Whether the plugin is enabled.
+ """
+ return self.attrs.get('Enabled')
+
+ @property
+ def settings(self):
+ """
+ A dictionary representing the plugin's configuration.
+ """
+ return self.attrs.get('Settings')
+
+ def configure(self, options):
+ """
+ Update the plugin's settings.
+
+ Args:
+ options (dict): A key-value mapping of options.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ self.client.api.configure_plugin(self.name, options)
+ self.reload()
+
+ def disable(self, force=False):
+ """
+ Disable the plugin.
+
+ Args:
+ force (bool): Force disable. Default: False
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+
+ self.client.api.disable_plugin(self.name, force)
+ self.reload()
+
+ def enable(self, timeout=0):
+ """
+ Enable the plugin.
+
+ Args:
+ timeout (int): Timeout in seconds. Default: 0
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ self.client.api.enable_plugin(self.name, timeout)
+ self.reload()
+
+ def push(self):
+ """
+ Push the plugin to a remote registry.
+
+ Returns:
+ A dict iterator streaming the status of the upload.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.push_plugin(self.name)
+
+ def remove(self, force=False):
+ """
+ Remove the plugin from the server.
+
+ Args:
+ force (bool): Remove even if the plugin is enabled.
+ Default: False
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.remove_plugin(self.name, force=force)
+
+ def upgrade(self, remote=None):
+ """
+ Upgrade the plugin.
+
+ Args:
+ remote (string): Remote reference to upgrade to. The
+ ``:latest`` tag is optional and is the default if omitted.
+ Default: this plugin's name.
+
+ Returns:
+ A generator streaming the decoded API logs
+ """
+ if self.enabled:
+ raise errors.DockerError(
+ 'Plugin must be disabled before upgrading.'
+ )
+
+ if remote is None:
+ remote = self.name
+ privileges = self.client.api.plugin_privileges(remote)
+ yield from self.client.api.upgrade_plugin(
+ self.name,
+ remote,
+ privileges,
+ )
+ self.reload()
+
+
+class PluginCollection(Collection):
+ model = Plugin
+
+ def create(self, name, plugin_data_dir, gzip=False):
+ """
+ Create a new plugin.
+
+ Args:
+ name (string): The name of the plugin. The ``:latest`` tag is
+ optional, and is the default if omitted.
+ plugin_data_dir (string): Path to the plugin data directory.
+ Plugin data directory must contain the ``config.json``
+ manifest file and the ``rootfs`` directory.
+ gzip (bool): Compress the context using gzip. Default: False
+
+ Returns:
+ (:py:class:`Plugin`): The newly created plugin.
+ """
+ self.client.api.create_plugin(name, plugin_data_dir, gzip)
+ return self.get(name)
+
+ def get(self, name):
+ """
+ Gets a plugin.
+
+ Args:
+ name (str): The name of the plugin.
+
+ Returns:
+ (:py:class:`Plugin`): The plugin.
+
+ Raises:
+ :py:class:`docker.errors.NotFound` If the plugin does not
+ exist.
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.prepare_model(self.client.api.inspect_plugin(name))
+
+ def install(self, remote_name, local_name=None):
+ """
+ Pull and install a plugin.
+
+ Args:
+ remote_name (string): Remote reference for the plugin to
+ install. The ``:latest`` tag is optional, and is the
+ default if omitted.
+ local_name (string): Local name for the pulled plugin.
+ The ``:latest`` tag is optional, and is the default if
+ omitted. Optional.
+
+ Returns:
+ (:py:class:`Plugin`): The installed plugin
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ privileges = self.client.api.plugin_privileges(remote_name)
+ it = self.client.api.pull_plugin(remote_name, privileges, local_name)
+ for _data in it:
+ pass
+ return self.get(local_name or remote_name)
+
+ def list(self):
+ """
+ List plugins installed on the server.
+
+ Returns:
+ (list of :py:class:`Plugin`): The plugins.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ resp = self.client.api.plugins()
+ return [self.prepare_model(r) for r in resp]
diff --git a/contrib/python/docker/docker/models/resource.py b/contrib/python/docker/docker/models/resource.py
new file mode 100644
index 0000000000..d3a35e84be
--- /dev/null
+++ b/contrib/python/docker/docker/models/resource.py
@@ -0,0 +1,92 @@
+class Model:
+ """
+ A base class for representing a single object on the server.
+ """
+ id_attribute = 'Id'
+
+ def __init__(self, attrs=None, client=None, collection=None):
+ #: A client pointing at the server that this object is on.
+ self.client = client
+
+ #: The collection that this model is part of.
+ self.collection = collection
+
+ #: The raw representation of this object from the API
+ self.attrs = attrs
+ if self.attrs is None:
+ self.attrs = {}
+
+ def __repr__(self):
+ return f"<{self.__class__.__name__}: {self.short_id}>"
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.id == other.id
+
+ def __hash__(self):
+ return hash(f"{self.__class__.__name__}:{self.id}")
+
+ @property
+ def id(self):
+ """
+ The ID of the object.
+ """
+ return self.attrs.get(self.id_attribute)
+
+ @property
+ def short_id(self):
+ """
+ The ID of the object, truncated to 12 characters.
+ """
+ return self.id[:12]
+
+ def reload(self):
+ """
+ Load this object from the server again and update ``attrs`` with the
+ new data.
+ """
+ new_model = self.collection.get(self.id)
+ self.attrs = new_model.attrs
+
+
+class Collection:
+ """
+ A base class for representing all objects of a particular type on the
+ server.
+ """
+
+ #: The type of object this collection represents, set by subclasses
+ model = None
+
+ def __init__(self, client=None):
+ #: The client pointing at the server that this collection of objects
+ #: is on.
+ self.client = client
+
+ def __call__(self, *args, **kwargs):
+ raise TypeError(
+ f"'{self.__class__.__name__}' object is not callable. "
+ "You might be trying to use the old (pre-2.0) API - "
+ "use docker.APIClient if so."
+ )
+
+ def list(self):
+ raise NotImplementedError
+
+ def get(self, key):
+ raise NotImplementedError
+
+ def create(self, attrs=None):
+ raise NotImplementedError
+
+ def prepare_model(self, attrs):
+ """
+ Create a model from a set of attributes.
+ """
+ if isinstance(attrs, Model):
+ attrs.client = self.client
+ attrs.collection = self
+ return attrs
+ elif isinstance(attrs, dict):
+ return self.model(attrs=attrs, client=self.client, collection=self)
+ else:
+ raise Exception(f"Can't create {self.model.__name__} from {attrs}")
diff --git a/contrib/python/docker/docker/models/secrets.py b/contrib/python/docker/docker/models/secrets.py
new file mode 100644
index 0000000000..38c48dc7eb
--- /dev/null
+++ b/contrib/python/docker/docker/models/secrets.py
@@ -0,0 +1,70 @@
+from ..api import APIClient
+from .resource import Collection, Model
+
+
+class Secret(Model):
+ """A secret."""
+ id_attribute = 'ID'
+
+ def __repr__(self):
+ return f"<{self.__class__.__name__}: '{self.name}'>"
+
+ @property
+ def name(self):
+ return self.attrs['Spec']['Name']
+
+ def remove(self):
+ """
+ Remove this secret.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If secret failed to remove.
+ """
+ return self.client.api.remove_secret(self.id)
+
+
+class SecretCollection(Collection):
+ """Secrets on the Docker server."""
+ model = Secret
+
+ def create(self, **kwargs):
+ obj = self.client.api.create_secret(**kwargs)
+ obj.setdefault("Spec", {})["Name"] = kwargs.get("name")
+ return self.prepare_model(obj)
+ create.__doc__ = APIClient.create_secret.__doc__
+
+ def get(self, secret_id):
+ """
+ Get a secret.
+
+ Args:
+ secret_id (str): Secret ID.
+
+ Returns:
+ (:py:class:`Secret`): The secret.
+
+ Raises:
+ :py:class:`docker.errors.NotFound`
+ If the secret does not exist.
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.prepare_model(self.client.api.inspect_secret(secret_id))
+
+ def list(self, **kwargs):
+ """
+ List secrets. Similar to the ``docker secret ls`` command.
+
+ Args:
+ filters (dict): Server-side list filtering options.
+
+ Returns:
+ (list of :py:class:`Secret`): The secrets.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ resp = self.client.api.secrets(**kwargs)
+ return [self.prepare_model(obj) for obj in resp]
diff --git a/contrib/python/docker/docker/models/services.py b/contrib/python/docker/docker/models/services.py
new file mode 100644
index 0000000000..09502633e5
--- /dev/null
+++ b/contrib/python/docker/docker/models/services.py
@@ -0,0 +1,390 @@
+import copy
+
+from docker.errors import InvalidArgument, create_unexpected_kwargs_error
+from docker.types import ContainerSpec, Placement, ServiceMode, TaskTemplate
+
+from .resource import Collection, Model
+
+
+class Service(Model):
+ """A service."""
+ id_attribute = 'ID'
+
+ @property
+ def name(self):
+ """The service's name."""
+ return self.attrs['Spec']['Name']
+
+ @property
+ def version(self):
+ """
+ The version number of the service. If this is not the same as the
+ server, the :py:meth:`update` function will not work and you will
+ need to call :py:meth:`reload` before calling it again.
+ """
+ return self.attrs.get('Version').get('Index')
+
+ def remove(self):
+ """
+ Stop and remove the service.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.remove_service(self.id)
+
+ def tasks(self, filters=None):
+ """
+ List the tasks in this service.
+
+ Args:
+ filters (dict): A map of filters to process on the tasks list.
+ Valid filters: ``id``, ``name``, ``node``,
+ ``label``, and ``desired-state``.
+
+ Returns:
+ :py:class:`list`: List of task dictionaries.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ if filters is None:
+ filters = {}
+ filters['service'] = self.id
+ return self.client.api.tasks(filters=filters)
+
+ def update(self, **kwargs):
+ """
+ Update a service's configuration. Similar to the ``docker service
+ update`` command.
+
+ Takes the same parameters as :py:meth:`~ServiceCollection.create`.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ # Image is required, so if it hasn't been set, use current image
+ if 'image' not in kwargs:
+ spec = self.attrs['Spec']['TaskTemplate']['ContainerSpec']
+ kwargs['image'] = spec['Image']
+
+ if kwargs.get('force_update') is True:
+ task_template = self.attrs['Spec']['TaskTemplate']
+ current_value = int(task_template.get('ForceUpdate', 0))
+ kwargs['force_update'] = current_value + 1
+
+ create_kwargs = _get_create_service_kwargs('update', kwargs)
+
+ return self.client.api.update_service(
+ self.id,
+ self.version,
+ **create_kwargs
+ )
+
+ def logs(self, **kwargs):
+ """
+ Get log stream for the service.
+ Note: This method works only for services with the ``json-file``
+ or ``journald`` logging drivers.
+
+ Args:
+ details (bool): Show extra details provided to logs.
+ Default: ``False``
+ follow (bool): Keep connection open to read logs as they are
+ sent by the Engine. Default: ``False``
+ stdout (bool): Return logs from ``stdout``. Default: ``False``
+ stderr (bool): Return logs from ``stderr``. Default: ``False``
+ since (int): UNIX timestamp for the logs staring point.
+ Default: 0
+ timestamps (bool): Add timestamps to every log line.
+ tail (string or int): Number of log lines to be returned,
+ counting from the current end of the logs. Specify an
+ integer or ``'all'`` to output all log lines.
+ Default: ``all``
+
+ Returns:
+ generator: Logs for the service.
+ """
+ is_tty = self.attrs['Spec']['TaskTemplate']['ContainerSpec'].get(
+ 'TTY', False
+ )
+ return self.client.api.service_logs(self.id, is_tty=is_tty, **kwargs)
+
+ def scale(self, replicas):
+ """
+ Scale service container.
+
+ Args:
+ replicas (int): The number of containers that should be running.
+
+ Returns:
+ bool: ``True`` if successful.
+ """
+
+ if 'Global' in self.attrs['Spec']['Mode'].keys():
+ raise InvalidArgument('Cannot scale a global container')
+
+ service_mode = ServiceMode('replicated', replicas)
+ return self.client.api.update_service(self.id, self.version,
+ mode=service_mode,
+ fetch_current_spec=True)
+
+ def force_update(self):
+ """
+ Force update the service even if no changes require it.
+
+ Returns:
+ bool: ``True`` if successful.
+ """
+
+ return self.update(force_update=True, fetch_current_spec=True)
+
+
+class ServiceCollection(Collection):
+ """Services on the Docker server."""
+ model = Service
+
+ def create(self, image, command=None, **kwargs):
+ """
+ Create a service. Similar to the ``docker service create`` command.
+
+ Args:
+ image (str): The image name to use for the containers.
+ command (list of str or str): Command to run.
+ args (list of str): Arguments to the command.
+ constraints (list of str): :py:class:`~docker.types.Placement`
+ constraints.
+ preferences (list of tuple): :py:class:`~docker.types.Placement`
+ preferences.
+ maxreplicas (int): :py:class:`~docker.types.Placement` maxreplicas
+ or (int) representing maximum number of replicas per node.
+ platforms (list of tuple): A list of platform constraints
+ expressed as ``(arch, os)`` tuples.
+ container_labels (dict): Labels to apply to the container.
+ endpoint_spec (EndpointSpec): Properties that can be configured to
+ access and load balance a service. Default: ``None``.
+ env (list of str): Environment variables, in the form
+ ``KEY=val``.
+ hostname (string): Hostname to set on the container.
+ init (boolean): Run an init inside the container that forwards
+ signals and reaps processes
+ isolation (string): Isolation technology used by the service's
+ containers. Only used for Windows containers.
+ labels (dict): Labels to apply to the service.
+ log_driver (str): Log driver to use for containers.
+ log_driver_options (dict): Log driver options.
+ mode (ServiceMode): Scheduling mode for the service.
+ Default:``None``
+ mounts (list of str): Mounts for the containers, in the form
+ ``source:target:options``, where options is either
+ ``ro`` or ``rw``.
+ name (str): Name to give to the service.
+ networks (:py:class:`list`): List of network names or IDs or
+ :py:class:`~docker.types.NetworkAttachmentConfig` to attach the
+ service to. Default: ``None``.
+ resources (Resources): Resource limits and reservations.
+ restart_policy (RestartPolicy): Restart policy for containers.
+ secrets (list of :py:class:`~docker.types.SecretReference`): List
+ of secrets accessible to containers for this service.
+ stop_grace_period (int): Amount of time to wait for
+ containers to terminate before forcefully killing them.
+ update_config (UpdateConfig): Specification for the update strategy
+ of the service. Default: ``None``
+ rollback_config (RollbackConfig): Specification for the rollback
+ strategy of the service. Default: ``None``
+ user (str): User to run commands as.
+ workdir (str): Working directory for commands to run.
+ tty (boolean): Whether a pseudo-TTY should be allocated.
+ groups (:py:class:`list`): A list of additional groups that the
+ container process will run as.
+ open_stdin (boolean): Open ``stdin``
+ read_only (boolean): Mount the container's root filesystem as read
+ only.
+ stop_signal (string): Set signal to stop the service's containers
+ healthcheck (Healthcheck): Healthcheck
+ configuration for this service.
+ hosts (:py:class:`dict`): A set of host to IP mappings to add to
+ the container's `hosts` file.
+ dns_config (DNSConfig): Specification for DNS
+ related configurations in resolver configuration file.
+ configs (:py:class:`list`): List of
+ :py:class:`~docker.types.ConfigReference` that will be exposed
+ to the service.
+ privileges (Privileges): Security options for the service's
+ containers.
+ cap_add (:py:class:`list`): A list of kernel capabilities to add to
+ the default set for the container.
+ cap_drop (:py:class:`list`): A list of kernel capabilities to drop
+ from the default set for the container.
+ sysctls (:py:class:`dict`): A dict of sysctl values to add to the
+ container
+
+ Returns:
+ :py:class:`Service`: The created service.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ kwargs['image'] = image
+ kwargs['command'] = command
+ create_kwargs = _get_create_service_kwargs('create', kwargs)
+ service_id = self.client.api.create_service(**create_kwargs)
+ return self.get(service_id)
+
+ def get(self, service_id, insert_defaults=None):
+ """
+ Get a service.
+
+ Args:
+ service_id (str): The ID of the service.
+ insert_defaults (boolean): If true, default values will be merged
+ into the output.
+
+ Returns:
+ :py:class:`Service`: The service.
+
+ Raises:
+ :py:class:`docker.errors.NotFound`
+ If the service does not exist.
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ :py:class:`docker.errors.InvalidVersion`
+ If one of the arguments is not supported with the current
+ API version.
+ """
+ return self.prepare_model(
+ self.client.api.inspect_service(service_id, insert_defaults)
+ )
+
+ def list(self, **kwargs):
+ """
+ List services.
+
+ Args:
+ filters (dict): Filters to process on the nodes list. Valid
+ filters: ``id``, ``name`` , ``label`` and ``mode``.
+ Default: ``None``.
+ status (bool): Include the service task count of running and
+ desired tasks. Default: ``None``.
+
+ Returns:
+ list of :py:class:`Service`: The services.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return [
+ self.prepare_model(s)
+ for s in self.client.api.services(**kwargs)
+ ]
+
+
+# kwargs to copy straight over to ContainerSpec
+CONTAINER_SPEC_KWARGS = [
+ 'args',
+ 'cap_add',
+ 'cap_drop',
+ 'command',
+ 'configs',
+ 'dns_config',
+ 'env',
+ 'groups',
+ 'healthcheck',
+ 'hostname',
+ 'hosts',
+ 'image',
+ 'init',
+ 'isolation',
+ 'labels',
+ 'mounts',
+ 'open_stdin',
+ 'privileges',
+ 'read_only',
+ 'secrets',
+ 'stop_grace_period',
+ 'stop_signal',
+ 'tty',
+ 'user',
+ 'workdir',
+ 'sysctls',
+]
+
+# kwargs to copy straight over to TaskTemplate
+TASK_TEMPLATE_KWARGS = [
+ 'networks',
+ 'resources',
+ 'restart_policy',
+]
+
+# kwargs to copy straight over to create_service
+CREATE_SERVICE_KWARGS = [
+ 'name',
+ 'labels',
+ 'mode',
+ 'update_config',
+ 'rollback_config',
+ 'endpoint_spec',
+]
+
+PLACEMENT_KWARGS = [
+ 'constraints',
+ 'preferences',
+ 'platforms',
+ 'maxreplicas',
+]
+
+
+def _get_create_service_kwargs(func_name, kwargs):
+ # Copy over things which can be copied directly
+ create_kwargs = {}
+ for key in copy.copy(kwargs):
+ if key in CREATE_SERVICE_KWARGS:
+ create_kwargs[key] = kwargs.pop(key)
+ container_spec_kwargs = {}
+ for key in copy.copy(kwargs):
+ if key in CONTAINER_SPEC_KWARGS:
+ container_spec_kwargs[key] = kwargs.pop(key)
+ task_template_kwargs = {}
+ for key in copy.copy(kwargs):
+ if key in TASK_TEMPLATE_KWARGS:
+ task_template_kwargs[key] = kwargs.pop(key)
+
+ if 'container_labels' in kwargs:
+ container_spec_kwargs['labels'] = kwargs.pop('container_labels')
+
+ placement = {}
+ for key in copy.copy(kwargs):
+ if key in PLACEMENT_KWARGS:
+ placement[key] = kwargs.pop(key)
+ placement = Placement(**placement)
+ task_template_kwargs['placement'] = placement
+
+ if 'log_driver' in kwargs:
+ task_template_kwargs['log_driver'] = {
+ 'Name': kwargs.pop('log_driver'),
+ 'Options': kwargs.pop('log_driver_options', {})
+ }
+
+ if func_name == 'update':
+ if 'force_update' in kwargs:
+ task_template_kwargs['force_update'] = kwargs.pop('force_update')
+
+ # fetch the current spec by default if updating the service
+ # through the model
+ fetch_current_spec = kwargs.pop('fetch_current_spec', True)
+ create_kwargs['fetch_current_spec'] = fetch_current_spec
+
+ # All kwargs should have been consumed by this point, so raise
+ # error if any are left
+ if kwargs:
+ raise create_unexpected_kwargs_error(func_name, kwargs)
+
+ container_spec = ContainerSpec(**container_spec_kwargs)
+ task_template_kwargs['container_spec'] = container_spec
+ create_kwargs['task_template'] = TaskTemplate(**task_template_kwargs)
+ return create_kwargs
diff --git a/contrib/python/docker/docker/models/swarm.py b/contrib/python/docker/docker/models/swarm.py
new file mode 100644
index 0000000000..271cc5dcb1
--- /dev/null
+++ b/contrib/python/docker/docker/models/swarm.py
@@ -0,0 +1,190 @@
+from docker.api import APIClient
+from docker.errors import APIError
+
+from .resource import Model
+
+
+class Swarm(Model):
+ """
+ The server's Swarm state. This a singleton that must be reloaded to get
+ the current state of the Swarm.
+ """
+ id_attribute = 'ID'
+
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ if self.client:
+ try:
+ self.reload()
+ except APIError as e:
+ # FIXME: https://github.com/docker/docker/issues/29192
+ if e.response.status_code not in (406, 503):
+ raise
+
+ @property
+ def version(self):
+ """
+ The version number of the swarm. If this is not the same as the
+ server, the :py:meth:`update` function will not work and you will
+ need to call :py:meth:`reload` before calling it again.
+ """
+ return self.attrs.get('Version').get('Index')
+
+ def get_unlock_key(self):
+ return self.client.api.get_unlock_key()
+ get_unlock_key.__doc__ = APIClient.get_unlock_key.__doc__
+
+ def init(self, advertise_addr=None, listen_addr='0.0.0.0:2377',
+ force_new_cluster=False, default_addr_pool=None,
+ subnet_size=None, data_path_addr=None, data_path_port=None,
+ **kwargs):
+ """
+ Initialize a new swarm on this Engine.
+
+ Args:
+ advertise_addr (str): Externally reachable address advertised to
+ other nodes. This can either be an address/port combination in
+ the form ``192.168.1.1:4567``, or an interface followed by a
+ port number, like ``eth0:4567``. If the port number is omitted,
+ the port number from the listen address is used.
+
+ If not specified, it will be automatically detected when
+ possible.
+ listen_addr (str): Listen address used for inter-manager
+ communication, as well as determining the networking interface
+ used for the VXLAN Tunnel Endpoint (VTEP). This can either be
+ an address/port combination in the form ``192.168.1.1:4567``,
+ or an interface followed by a port number, like ``eth0:4567``.
+ If the port number is omitted, the default swarm listening port
+ is used. Default: ``0.0.0.0:2377``
+ force_new_cluster (bool): Force creating a new Swarm, even if
+ already part of one. Default: False
+ default_addr_pool (list of str): Default Address Pool specifies
+ default subnet pools for global scope networks. Each pool
+ should be specified as a CIDR block, like '10.0.0.0/8'.
+ Default: None
+ subnet_size (int): SubnetSize specifies the subnet size of the
+ networks created from the default subnet pool. Default: None
+ data_path_addr (string): Address or interface to use for data path
+ traffic. For example, 192.168.1.1, or an interface, like eth0.
+ data_path_port (int): Port number to use for data path traffic.
+ Acceptable port range is 1024 to 49151. If set to ``None`` or
+ 0, the default port 4789 will be used. Default: None
+ task_history_retention_limit (int): Maximum number of tasks
+ history stored.
+ snapshot_interval (int): Number of logs entries between snapshot.
+ keep_old_snapshots (int): Number of snapshots to keep beyond the
+ current snapshot.
+ log_entries_for_slow_followers (int): Number of log entries to
+ keep around to sync up slow followers after a snapshot is
+ created.
+ heartbeat_tick (int): Amount of ticks (in seconds) between each
+ heartbeat.
+ election_tick (int): Amount of ticks (in seconds) needed without a
+ leader to trigger a new election.
+ dispatcher_heartbeat_period (int): The delay for an agent to send
+ a heartbeat to the dispatcher.
+ node_cert_expiry (int): Automatic expiry for nodes certificates.
+ external_ca (dict): Configuration for forwarding signing requests
+ to an external certificate authority. Use
+ ``docker.types.SwarmExternalCA``.
+ name (string): Swarm's name
+ labels (dict): User-defined key/value metadata.
+ signing_ca_cert (str): The desired signing CA certificate for all
+ swarm node TLS leaf certificates, in PEM format.
+ signing_ca_key (str): The desired signing CA key for all swarm
+ node TLS leaf certificates, in PEM format.
+ ca_force_rotate (int): An integer whose purpose is to force swarm
+ to generate a new signing CA certificate and key, if none have
+ been specified.
+ autolock_managers (boolean): If set, generate a key and use it to
+ lock data stored on the managers.
+ log_driver (DriverConfig): The default log driver to use for tasks
+ created in the orchestrator.
+
+ Returns:
+ (str): The ID of the created node.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Example:
+
+ >>> client.swarm.init(
+ advertise_addr='eth0', listen_addr='0.0.0.0:5000',
+ force_new_cluster=False, default_addr_pool=['10.20.0.0/16],
+ subnet_size=24, snapshot_interval=5000,
+ log_entries_for_slow_followers=1200
+ )
+
+ """
+ init_kwargs = {
+ 'advertise_addr': advertise_addr,
+ 'listen_addr': listen_addr,
+ 'force_new_cluster': force_new_cluster,
+ 'default_addr_pool': default_addr_pool,
+ 'subnet_size': subnet_size,
+ 'data_path_addr': data_path_addr,
+ 'data_path_port': data_path_port,
+ }
+ init_kwargs['swarm_spec'] = self.client.api.create_swarm_spec(**kwargs)
+ node_id = self.client.api.init_swarm(**init_kwargs)
+ self.reload()
+ return node_id
+
+ def join(self, *args, **kwargs):
+ return self.client.api.join_swarm(*args, **kwargs)
+ join.__doc__ = APIClient.join_swarm.__doc__
+
+ def leave(self, *args, **kwargs):
+ return self.client.api.leave_swarm(*args, **kwargs)
+ leave.__doc__ = APIClient.leave_swarm.__doc__
+
+ def reload(self):
+ """
+ Inspect the swarm on the server and store the response in
+ :py:attr:`attrs`.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ self.attrs = self.client.api.inspect_swarm()
+
+ def unlock(self, key):
+ return self.client.api.unlock_swarm(key)
+ unlock.__doc__ = APIClient.unlock_swarm.__doc__
+
+ def update(self, rotate_worker_token=False, rotate_manager_token=False,
+ rotate_manager_unlock_key=False, **kwargs):
+ """
+ Update the swarm's configuration.
+
+ It takes the same arguments as :py:meth:`init`, except
+ ``advertise_addr``, ``listen_addr``, and ``force_new_cluster``. In
+ addition, it takes these arguments:
+
+ Args:
+ rotate_worker_token (bool): Rotate the worker join token. Default:
+ ``False``.
+ rotate_manager_token (bool): Rotate the manager join token.
+ Default: ``False``.
+ rotate_manager_unlock_key (bool): Rotate the manager unlock key.
+ Default: ``False``.
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ """
+ # this seems to have to be set
+ if kwargs.get('node_cert_expiry') is None:
+ kwargs['node_cert_expiry'] = 7776000000000000
+
+ return self.client.api.update_swarm(
+ version=self.version,
+ swarm_spec=self.client.api.create_swarm_spec(**kwargs),
+ rotate_worker_token=rotate_worker_token,
+ rotate_manager_token=rotate_manager_token,
+ rotate_manager_unlock_key=rotate_manager_unlock_key
+ )
diff --git a/contrib/python/docker/docker/models/volumes.py b/contrib/python/docker/docker/models/volumes.py
new file mode 100644
index 0000000000..12c9f14b27
--- /dev/null
+++ b/contrib/python/docker/docker/models/volumes.py
@@ -0,0 +1,99 @@
+from ..api import APIClient
+from .resource import Collection, Model
+
+
+class Volume(Model):
+ """A volume."""
+ id_attribute = 'Name'
+
+ @property
+ def name(self):
+ """The name of the volume."""
+ return self.attrs['Name']
+
+ def remove(self, force=False):
+ """
+ Remove this volume.
+
+ Args:
+ force (bool): Force removal of volumes that were already removed
+ out of band by the volume driver plugin.
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If volume failed to remove.
+ """
+ return self.client.api.remove_volume(self.id, force=force)
+
+
+class VolumeCollection(Collection):
+ """Volumes on the Docker server."""
+ model = Volume
+
+ def create(self, name=None, **kwargs):
+ """
+ Create a volume.
+
+ Args:
+ name (str): Name of the volume. If not specified, the engine
+ generates a name.
+ driver (str): Name of the driver used to create the volume
+ driver_opts (dict): Driver options as a key-value dictionary
+ labels (dict): Labels to set on the volume
+
+ Returns:
+ (:py:class:`Volume`): The volume created.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Example:
+
+ >>> volume = client.volumes.create(name='foobar', driver='local',
+ driver_opts={'foo': 'bar', 'baz': 'false'},
+ labels={"key": "value"})
+
+ """
+ obj = self.client.api.create_volume(name, **kwargs)
+ return self.prepare_model(obj)
+
+ def get(self, volume_id):
+ """
+ Get a volume.
+
+ Args:
+ volume_id (str): Volume name.
+
+ Returns:
+ (:py:class:`Volume`): The volume.
+
+ Raises:
+ :py:class:`docker.errors.NotFound`
+ If the volume does not exist.
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.prepare_model(self.client.api.inspect_volume(volume_id))
+
+ def list(self, **kwargs):
+ """
+ List volumes. Similar to the ``docker volume ls`` command.
+
+ Args:
+ filters (dict): Server-side list filtering options.
+
+ Returns:
+ (list of :py:class:`Volume`): The volumes.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ resp = self.client.api.volumes(**kwargs)
+ if not resp.get('Volumes'):
+ return []
+ return [self.prepare_model(obj) for obj in resp['Volumes']]
+
+ def prune(self, filters=None):
+ return self.client.api.prune_volumes(filters=filters)
+ prune.__doc__ = APIClient.prune_volumes.__doc__
diff --git a/contrib/python/docker/docker/tls.py b/contrib/python/docker/docker/tls.py
new file mode 100644
index 0000000000..ad4966c903
--- /dev/null
+++ b/contrib/python/docker/docker/tls.py
@@ -0,0 +1,67 @@
+import os
+
+from . import errors
+
+
+class TLSConfig:
+ """
+ TLS configuration.
+
+ Args:
+ client_cert (tuple of str): Path to client cert, path to client key.
+ ca_cert (str): Path to CA cert file.
+ verify (bool or str): This can be a bool or a path to a CA cert
+ file to verify against. If ``True``, verify using ca_cert;
+ if ``False`` or not specified, do not verify.
+ """
+ cert = None
+ ca_cert = None
+ verify = None
+
+ def __init__(self, client_cert=None, ca_cert=None, verify=None):
+ # Argument compatibility/mapping with
+ # https://docs.docker.com/engine/articles/https/
+ # This diverges from the Docker CLI in that users can specify 'tls'
+ # here, but also disable any public/default CA pool verification by
+ # leaving verify=False
+
+ # "client_cert" must have both or neither cert/key files. In
+ # either case, Alert the user when both are expected, but any are
+ # missing.
+
+ if client_cert:
+ try:
+ tls_cert, tls_key = client_cert
+ except ValueError:
+ raise errors.TLSParameterError(
+ 'client_cert must be a tuple of'
+ ' (client certificate, key file)'
+ ) from None
+
+ if not (tls_cert and tls_key) or (not os.path.isfile(tls_cert) or
+ not os.path.isfile(tls_key)):
+ raise errors.TLSParameterError(
+ 'Path to a certificate and key files must be provided'
+ ' through the client_cert param'
+ )
+ self.cert = (tls_cert, tls_key)
+
+ # If verify is set, make sure the cert exists
+ self.verify = verify
+ self.ca_cert = ca_cert
+ if self.verify and self.ca_cert and not os.path.isfile(self.ca_cert):
+ raise errors.TLSParameterError(
+ 'Invalid CA certificate provided for `ca_cert`.'
+ )
+
+ def configure_client(self, client):
+ """
+ Configure a client with these TLS options.
+ """
+ if self.verify and self.ca_cert:
+ client.verify = self.ca_cert
+ else:
+ client.verify = self.verify
+
+ if self.cert:
+ client.cert = self.cert
diff --git a/contrib/python/docker/docker/transport/__init__.py b/contrib/python/docker/docker/transport/__init__.py
new file mode 100644
index 0000000000..8c68b1f6e2
--- /dev/null
+++ b/contrib/python/docker/docker/transport/__init__.py
@@ -0,0 +1,12 @@
+from .unixconn import UnixHTTPAdapter
+
+try:
+ from .npipeconn import NpipeHTTPAdapter
+ from .npipesocket import NpipeSocket
+except ImportError:
+ pass
+
+try:
+ from .sshconn import SSHHTTPAdapter
+except ImportError:
+ pass
diff --git a/contrib/python/docker/docker/transport/basehttpadapter.py b/contrib/python/docker/docker/transport/basehttpadapter.py
new file mode 100644
index 0000000000..2301b6b07a
--- /dev/null
+++ b/contrib/python/docker/docker/transport/basehttpadapter.py
@@ -0,0 +1,13 @@
+import requests.adapters
+
+
+class BaseHTTPAdapter(requests.adapters.HTTPAdapter):
+ def close(self):
+ super().close()
+ if hasattr(self, 'pools'):
+ self.pools.clear()
+
+ # Fix for requests 2.32.2+:
+ # https://github.com/psf/requests/commit/c98e4d133ef29c46a9b68cd783087218a8075e05
+ def get_connection_with_tls_context(self, request, verify, proxies=None, cert=None):
+ return self.get_connection(request.url, proxies)
diff --git a/contrib/python/docker/docker/transport/npipeconn.py b/contrib/python/docker/docker/transport/npipeconn.py
new file mode 100644
index 0000000000..44d6921c2c
--- /dev/null
+++ b/contrib/python/docker/docker/transport/npipeconn.py
@@ -0,0 +1,102 @@
+import queue
+
+import requests.adapters
+import urllib3
+import urllib3.connection
+
+from .. import constants
+from .basehttpadapter import BaseHTTPAdapter
+from .npipesocket import NpipeSocket
+
+RecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer
+
+
+class NpipeHTTPConnection(urllib3.connection.HTTPConnection):
+ def __init__(self, npipe_path, timeout=60):
+ super().__init__(
+ 'localhost', timeout=timeout
+ )
+ self.npipe_path = npipe_path
+ self.timeout = timeout
+
+ def connect(self):
+ sock = NpipeSocket()
+ sock.settimeout(self.timeout)
+ sock.connect(self.npipe_path)
+ self.sock = sock
+
+
+class NpipeHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
+ def __init__(self, npipe_path, timeout=60, maxsize=10):
+ super().__init__(
+ 'localhost', timeout=timeout, maxsize=maxsize
+ )
+ self.npipe_path = npipe_path
+ self.timeout = timeout
+
+ def _new_conn(self):
+ return NpipeHTTPConnection(
+ self.npipe_path, self.timeout
+ )
+
+ # When re-using connections, urllib3 tries to call select() on our
+ # NpipeSocket instance, causing a crash. To circumvent this, we override
+ # _get_conn, where that check happens.
+ def _get_conn(self, timeout):
+ conn = None
+ try:
+ conn = self.pool.get(block=self.block, timeout=timeout)
+ except AttributeError as ae: # self.pool is None
+ raise urllib3.exceptions.ClosedPoolError(self, "Pool is closed.") from ae
+
+ except queue.Empty:
+ if self.block:
+ raise urllib3.exceptions.EmptyPoolError(
+ self,
+ "Pool reached maximum size and no more "
+ "connections are allowed."
+ ) from None
+ # Oh well, we'll create a new connection then
+
+ return conn or self._new_conn()
+
+
+class NpipeHTTPAdapter(BaseHTTPAdapter):
+
+ __attrs__ = requests.adapters.HTTPAdapter.__attrs__ + ['npipe_path',
+ 'pools',
+ 'timeout',
+ 'max_pool_size']
+
+ def __init__(self, base_url, timeout=60,
+ pool_connections=constants.DEFAULT_NUM_POOLS,
+ max_pool_size=constants.DEFAULT_MAX_POOL_SIZE):
+ self.npipe_path = base_url.replace('npipe://', '')
+ self.timeout = timeout
+ self.max_pool_size = max_pool_size
+ self.pools = RecentlyUsedContainer(
+ pool_connections, dispose_func=lambda p: p.close()
+ )
+ super().__init__()
+
+ def get_connection(self, url, proxies=None):
+ with self.pools.lock:
+ pool = self.pools.get(url)
+ if pool:
+ return pool
+
+ pool = NpipeHTTPConnectionPool(
+ self.npipe_path, self.timeout,
+ maxsize=self.max_pool_size
+ )
+ self.pools[url] = pool
+
+ return pool
+
+ def request_url(self, request, proxies):
+ # The select_proxy utility in requests errors out when the provided URL
+ # doesn't have a hostname, like is the case when using a UNIX socket.
+ # Since proxies are an irrelevant notion in the case of UNIX sockets
+ # anyway, we simply return the path URL directly.
+ # See also: https://github.com/docker/docker-sdk-python/issues/811
+ return request.path_url
diff --git a/contrib/python/docker/docker/transport/npipesocket.py b/contrib/python/docker/docker/transport/npipesocket.py
new file mode 100644
index 0000000000..d91938e766
--- /dev/null
+++ b/contrib/python/docker/docker/transport/npipesocket.py
@@ -0,0 +1,230 @@
+import functools
+import io
+import time
+
+import pywintypes
+import win32api
+import win32event
+import win32file
+import win32pipe
+
+cERROR_PIPE_BUSY = 0xe7
+cSECURITY_SQOS_PRESENT = 0x100000
+cSECURITY_ANONYMOUS = 0
+
+MAXIMUM_RETRY_COUNT = 10
+
+
+def check_closed(f):
+ @functools.wraps(f)
+ def wrapped(self, *args, **kwargs):
+ if self._closed:
+ raise RuntimeError(
+ 'Can not reuse socket after connection was closed.'
+ )
+ return f(self, *args, **kwargs)
+ return wrapped
+
+
+class NpipeSocket:
+ """ Partial implementation of the socket API over windows named pipes.
+ This implementation is only designed to be used as a client socket,
+ and server-specific methods (bind, listen, accept...) are not
+ implemented.
+ """
+
+ def __init__(self, handle=None):
+ self._timeout = win32pipe.NMPWAIT_USE_DEFAULT_WAIT
+ self._handle = handle
+ self._closed = False
+
+ def accept(self):
+ raise NotImplementedError()
+
+ def bind(self, address):
+ raise NotImplementedError()
+
+ def close(self):
+ self._handle.Close()
+ self._closed = True
+
+ @check_closed
+ def connect(self, address, retry_count=0):
+ try:
+ handle = win32file.CreateFile(
+ address,
+ win32file.GENERIC_READ | win32file.GENERIC_WRITE,
+ 0,
+ None,
+ win32file.OPEN_EXISTING,
+ (cSECURITY_ANONYMOUS
+ | cSECURITY_SQOS_PRESENT
+ | win32file.FILE_FLAG_OVERLAPPED),
+ 0
+ )
+ except win32pipe.error as e:
+ # See Remarks:
+ # https://msdn.microsoft.com/en-us/library/aa365800.aspx
+ if e.winerror == cERROR_PIPE_BUSY:
+ # Another program or thread has grabbed our pipe instance
+ # before we got to it. Wait for availability and attempt to
+ # connect again.
+ retry_count = retry_count + 1
+ if (retry_count < MAXIMUM_RETRY_COUNT):
+ time.sleep(1)
+ return self.connect(address, retry_count)
+ raise e
+
+ self.flags = win32pipe.GetNamedPipeInfo(handle)[0]
+
+ self._handle = handle
+ self._address = address
+
+ @check_closed
+ def connect_ex(self, address):
+ return self.connect(address)
+
+ @check_closed
+ def detach(self):
+ self._closed = True
+ return self._handle
+
+ @check_closed
+ def dup(self):
+ return NpipeSocket(self._handle)
+
+ def getpeername(self):
+ return self._address
+
+ def getsockname(self):
+ return self._address
+
+ def getsockopt(self, level, optname, buflen=None):
+ raise NotImplementedError()
+
+ def ioctl(self, control, option):
+ raise NotImplementedError()
+
+ def listen(self, backlog):
+ raise NotImplementedError()
+
+ def makefile(self, mode=None, bufsize=None):
+ if mode.strip('b') != 'r':
+ raise NotImplementedError()
+ rawio = NpipeFileIOBase(self)
+ if bufsize is None or bufsize <= 0:
+ bufsize = io.DEFAULT_BUFFER_SIZE
+ return io.BufferedReader(rawio, buffer_size=bufsize)
+
+ @check_closed
+ def recv(self, bufsize, flags=0):
+ err, data = win32file.ReadFile(self._handle, bufsize)
+ return data
+
+ @check_closed
+ def recvfrom(self, bufsize, flags=0):
+ data = self.recv(bufsize, flags)
+ return (data, self._address)
+
+ @check_closed
+ def recvfrom_into(self, buf, nbytes=0, flags=0):
+ return self.recv_into(buf, nbytes, flags), self._address
+
+ @check_closed
+ def recv_into(self, buf, nbytes=0):
+ readbuf = buf
+ if not isinstance(buf, memoryview):
+ readbuf = memoryview(buf)
+
+ event = win32event.CreateEvent(None, True, True, None)
+ try:
+ overlapped = pywintypes.OVERLAPPED()
+ overlapped.hEvent = event
+ err, data = win32file.ReadFile(
+ self._handle,
+ readbuf[:nbytes] if nbytes else readbuf,
+ overlapped
+ )
+ wait_result = win32event.WaitForSingleObject(event, self._timeout)
+ if wait_result == win32event.WAIT_TIMEOUT:
+ win32file.CancelIo(self._handle)
+ raise TimeoutError
+ return win32file.GetOverlappedResult(self._handle, overlapped, 0)
+ finally:
+ win32api.CloseHandle(event)
+
+ @check_closed
+ def send(self, string, flags=0):
+ event = win32event.CreateEvent(None, True, True, None)
+ try:
+ overlapped = pywintypes.OVERLAPPED()
+ overlapped.hEvent = event
+ win32file.WriteFile(self._handle, string, overlapped)
+ wait_result = win32event.WaitForSingleObject(event, self._timeout)
+ if wait_result == win32event.WAIT_TIMEOUT:
+ win32file.CancelIo(self._handle)
+ raise TimeoutError
+ return win32file.GetOverlappedResult(self._handle, overlapped, 0)
+ finally:
+ win32api.CloseHandle(event)
+
+ @check_closed
+ def sendall(self, string, flags=0):
+ return self.send(string, flags)
+
+ @check_closed
+ def sendto(self, string, address):
+ self.connect(address)
+ return self.send(string)
+
+ def setblocking(self, flag):
+ if flag:
+ return self.settimeout(None)
+ return self.settimeout(0)
+
+ def settimeout(self, value):
+ if value is None:
+ # Blocking mode
+ self._timeout = win32event.INFINITE
+ elif not isinstance(value, (float, int)) or value < 0:
+ raise ValueError('Timeout value out of range')
+ else:
+ # Timeout mode - Value converted to milliseconds
+ self._timeout = int(value * 1000)
+
+ def gettimeout(self):
+ return self._timeout
+
+ def setsockopt(self, level, optname, value):
+ raise NotImplementedError()
+
+ @check_closed
+ def shutdown(self, how):
+ return self.close()
+
+
+class NpipeFileIOBase(io.RawIOBase):
+ def __init__(self, npipe_socket):
+ self.sock = npipe_socket
+
+ def close(self):
+ super().close()
+ self.sock = None
+
+ def fileno(self):
+ return self.sock.fileno()
+
+ def isatty(self):
+ return False
+
+ def readable(self):
+ return True
+
+ def readinto(self, buf):
+ return self.sock.recv_into(buf)
+
+ def seekable(self):
+ return False
+
+ def writable(self):
+ return False
diff --git a/contrib/python/docker/docker/transport/sshconn.py b/contrib/python/docker/docker/transport/sshconn.py
new file mode 100644
index 0000000000..1870668010
--- /dev/null
+++ b/contrib/python/docker/docker/transport/sshconn.py
@@ -0,0 +1,250 @@
+import logging
+import os
+import queue
+import signal
+import socket
+import subprocess
+import urllib.parse
+
+import paramiko
+import requests.adapters
+import urllib3
+import urllib3.connection
+
+from .. import constants
+from .basehttpadapter import BaseHTTPAdapter
+
+RecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer
+
+
+class SSHSocket(socket.socket):
+ def __init__(self, host):
+ super().__init__(
+ socket.AF_INET, socket.SOCK_STREAM)
+ self.host = host
+ self.port = None
+ self.user = None
+ if ':' in self.host:
+ self.host, self.port = self.host.split(':')
+ if '@' in self.host:
+ self.user, self.host = self.host.split('@')
+
+ self.proc = None
+
+ def connect(self, **kwargs):
+ args = ['ssh']
+ if self.user:
+ args = args + ['-l', self.user]
+
+ if self.port:
+ args = args + ['-p', self.port]
+
+ args = args + ['--', self.host, 'docker system dial-stdio']
+
+ preexec_func = None
+ if not constants.IS_WINDOWS_PLATFORM:
+ def f():
+ signal.signal(signal.SIGINT, signal.SIG_IGN)
+ preexec_func = f
+
+ env = dict(os.environ)
+
+ # drop LD_LIBRARY_PATH and SSL_CERT_FILE
+ env.pop('LD_LIBRARY_PATH', None)
+ env.pop('SSL_CERT_FILE', None)
+
+ self.proc = subprocess.Popen(
+ args,
+ env=env,
+ stdout=subprocess.PIPE,
+ stdin=subprocess.PIPE,
+ preexec_fn=preexec_func)
+
+ def _write(self, data):
+ if not self.proc or self.proc.stdin.closed:
+ raise Exception('SSH subprocess not initiated.'
+ 'connect() must be called first.')
+ written = self.proc.stdin.write(data)
+ self.proc.stdin.flush()
+ return written
+
+ def sendall(self, data):
+ self._write(data)
+
+ def send(self, data):
+ return self._write(data)
+
+ def recv(self, n):
+ if not self.proc:
+ raise Exception('SSH subprocess not initiated.'
+ 'connect() must be called first.')
+ return self.proc.stdout.read(n)
+
+ def makefile(self, mode):
+ if not self.proc:
+ self.connect()
+ self.proc.stdout.channel = self
+
+ return self.proc.stdout
+
+ def close(self):
+ if not self.proc or self.proc.stdin.closed:
+ return
+ self.proc.stdin.write(b'\n\n')
+ self.proc.stdin.flush()
+ self.proc.terminate()
+
+
+class SSHConnection(urllib3.connection.HTTPConnection):
+ def __init__(self, ssh_transport=None, timeout=60, host=None):
+ super().__init__(
+ 'localhost', timeout=timeout
+ )
+ self.ssh_transport = ssh_transport
+ self.timeout = timeout
+ self.ssh_host = host
+
+ def connect(self):
+ if self.ssh_transport:
+ sock = self.ssh_transport.open_session()
+ sock.settimeout(self.timeout)
+ sock.exec_command('docker system dial-stdio')
+ else:
+ sock = SSHSocket(self.ssh_host)
+ sock.settimeout(self.timeout)
+ sock.connect()
+
+ self.sock = sock
+
+
+class SSHConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
+ scheme = 'ssh'
+
+ def __init__(self, ssh_client=None, timeout=60, maxsize=10, host=None):
+ super().__init__(
+ 'localhost', timeout=timeout, maxsize=maxsize
+ )
+ self.ssh_transport = None
+ self.timeout = timeout
+ if ssh_client:
+ self.ssh_transport = ssh_client.get_transport()
+ self.ssh_host = host
+
+ def _new_conn(self):
+ return SSHConnection(self.ssh_transport, self.timeout, self.ssh_host)
+
+ # When re-using connections, urllib3 calls fileno() on our
+ # SSH channel instance, quickly overloading our fd limit. To avoid this,
+ # we override _get_conn
+ def _get_conn(self, timeout):
+ conn = None
+ try:
+ conn = self.pool.get(block=self.block, timeout=timeout)
+
+ except AttributeError as ae: # self.pool is None
+ raise urllib3.exceptions.ClosedPoolError(self, "Pool is closed.") from ae
+
+ except queue.Empty:
+ if self.block:
+ raise urllib3.exceptions.EmptyPoolError(
+ self,
+ "Pool reached maximum size and no more "
+ "connections are allowed."
+ ) from None
+ # Oh well, we'll create a new connection then
+
+ return conn or self._new_conn()
+
+
+class SSHHTTPAdapter(BaseHTTPAdapter):
+
+ __attrs__ = requests.adapters.HTTPAdapter.__attrs__ + [
+ 'pools', 'timeout', 'ssh_client', 'ssh_params', 'max_pool_size'
+ ]
+
+ def __init__(self, base_url, timeout=60,
+ pool_connections=constants.DEFAULT_NUM_POOLS,
+ max_pool_size=constants.DEFAULT_MAX_POOL_SIZE,
+ shell_out=False):
+ self.ssh_client = None
+ if not shell_out:
+ self._create_paramiko_client(base_url)
+ self._connect()
+
+ self.ssh_host = base_url
+ if base_url.startswith('ssh://'):
+ self.ssh_host = base_url[len('ssh://'):]
+
+ self.timeout = timeout
+ self.max_pool_size = max_pool_size
+ self.pools = RecentlyUsedContainer(
+ pool_connections, dispose_func=lambda p: p.close()
+ )
+ super().__init__()
+
+ def _create_paramiko_client(self, base_url):
+ logging.getLogger("paramiko").setLevel(logging.WARNING)
+ self.ssh_client = paramiko.SSHClient()
+ base_url = urllib.parse.urlparse(base_url)
+ self.ssh_params = {
+ "hostname": base_url.hostname,
+ "port": base_url.port,
+ "username": base_url.username
+ }
+ ssh_config_file = os.path.expanduser("~/.ssh/config")
+ if os.path.exists(ssh_config_file):
+ conf = paramiko.SSHConfig()
+ with open(ssh_config_file) as f:
+ conf.parse(f)
+ host_config = conf.lookup(base_url.hostname)
+ if 'proxycommand' in host_config:
+ self.ssh_params["sock"] = paramiko.ProxyCommand(
+ host_config['proxycommand']
+ )
+ if 'hostname' in host_config:
+ self.ssh_params['hostname'] = host_config['hostname']
+ if base_url.port is None and 'port' in host_config:
+ self.ssh_params['port'] = host_config['port']
+ if base_url.username is None and 'user' in host_config:
+ self.ssh_params['username'] = host_config['user']
+ if 'identityfile' in host_config:
+ self.ssh_params['key_filename'] = host_config['identityfile']
+
+ self.ssh_client.load_system_host_keys()
+ self.ssh_client.set_missing_host_key_policy(paramiko.RejectPolicy())
+
+ def _connect(self):
+ if self.ssh_client:
+ self.ssh_client.connect(**self.ssh_params)
+
+ def get_connection(self, url, proxies=None):
+ if not self.ssh_client:
+ return SSHConnectionPool(
+ ssh_client=self.ssh_client,
+ timeout=self.timeout,
+ maxsize=self.max_pool_size,
+ host=self.ssh_host
+ )
+ with self.pools.lock:
+ pool = self.pools.get(url)
+ if pool:
+ return pool
+
+ # Connection is closed try a reconnect
+ if self.ssh_client and not self.ssh_client.get_transport():
+ self._connect()
+
+ pool = SSHConnectionPool(
+ ssh_client=self.ssh_client,
+ timeout=self.timeout,
+ maxsize=self.max_pool_size,
+ host=self.ssh_host
+ )
+ self.pools[url] = pool
+
+ return pool
+
+ def close(self):
+ super().close()
+ if self.ssh_client:
+ self.ssh_client.close()
diff --git a/contrib/python/docker/docker/transport/unixconn.py b/contrib/python/docker/docker/transport/unixconn.py
new file mode 100644
index 0000000000..d571833f04
--- /dev/null
+++ b/contrib/python/docker/docker/transport/unixconn.py
@@ -0,0 +1,86 @@
+import socket
+
+import requests.adapters
+import urllib3
+import urllib3.connection
+
+from .. import constants
+from .basehttpadapter import BaseHTTPAdapter
+
+RecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer
+
+
+class UnixHTTPConnection(urllib3.connection.HTTPConnection):
+
+ def __init__(self, base_url, unix_socket, timeout=60):
+ super().__init__(
+ 'localhost', timeout=timeout
+ )
+ self.base_url = base_url
+ self.unix_socket = unix_socket
+ self.timeout = timeout
+
+ def connect(self):
+ sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+ sock.settimeout(self.timeout)
+ sock.connect(self.unix_socket)
+ self.sock = sock
+
+
+class UnixHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
+ def __init__(self, base_url, socket_path, timeout=60, maxsize=10):
+ super().__init__(
+ 'localhost', timeout=timeout, maxsize=maxsize
+ )
+ self.base_url = base_url
+ self.socket_path = socket_path
+ self.timeout = timeout
+
+ def _new_conn(self):
+ return UnixHTTPConnection(
+ self.base_url, self.socket_path, self.timeout
+ )
+
+
+class UnixHTTPAdapter(BaseHTTPAdapter):
+
+ __attrs__ = requests.adapters.HTTPAdapter.__attrs__ + ['pools',
+ 'socket_path',
+ 'timeout',
+ 'max_pool_size']
+
+ def __init__(self, socket_url, timeout=60,
+ pool_connections=constants.DEFAULT_NUM_POOLS,
+ max_pool_size=constants.DEFAULT_MAX_POOL_SIZE):
+ socket_path = socket_url.replace('http+unix://', '')
+ if not socket_path.startswith('/'):
+ socket_path = f"/{socket_path}"
+ self.socket_path = socket_path
+ self.timeout = timeout
+ self.max_pool_size = max_pool_size
+ self.pools = RecentlyUsedContainer(
+ pool_connections, dispose_func=lambda p: p.close()
+ )
+ super().__init__()
+
+ def get_connection(self, url, proxies=None):
+ with self.pools.lock:
+ pool = self.pools.get(url)
+ if pool:
+ return pool
+
+ pool = UnixHTTPConnectionPool(
+ url, self.socket_path, self.timeout,
+ maxsize=self.max_pool_size
+ )
+ self.pools[url] = pool
+
+ return pool
+
+ def request_url(self, request, proxies):
+ # The select_proxy utility in requests errors out when the provided URL
+ # doesn't have a hostname, like is the case when using a UNIX socket.
+ # Since proxies are an irrelevant notion in the case of UNIX sockets
+ # anyway, we simply return the path URL directly.
+ # See also: https://github.com/docker/docker-py/issues/811
+ return request.path_url
diff --git a/contrib/python/docker/docker/types/__init__.py b/contrib/python/docker/docker/types/__init__.py
new file mode 100644
index 0000000000..fbe247210b
--- /dev/null
+++ b/contrib/python/docker/docker/types/__init__.py
@@ -0,0 +1,24 @@
+from .containers import ContainerConfig, DeviceRequest, HostConfig, LogConfig, Ulimit
+from .daemon import CancellableStream
+from .healthcheck import Healthcheck
+from .networks import EndpointConfig, IPAMConfig, IPAMPool, NetworkingConfig
+from .services import (
+ ConfigReference,
+ ContainerSpec,
+ DNSConfig,
+ DriverConfig,
+ EndpointSpec,
+ Mount,
+ NetworkAttachmentConfig,
+ Placement,
+ PlacementPreference,
+ Privileges,
+ Resources,
+ RestartPolicy,
+ RollbackConfig,
+ SecretReference,
+ ServiceMode,
+ TaskTemplate,
+ UpdateConfig,
+)
+from .swarm import SwarmExternalCA, SwarmSpec
diff --git a/contrib/python/docker/docker/types/base.py b/contrib/python/docker/docker/types/base.py
new file mode 100644
index 0000000000..8851f1e2cb
--- /dev/null
+++ b/contrib/python/docker/docker/types/base.py
@@ -0,0 +1,4 @@
+class DictType(dict):
+ def __init__(self, init):
+ for k, v in init.items():
+ self[k] = v
diff --git a/contrib/python/docker/docker/types/containers.py b/contrib/python/docker/docker/types/containers.py
new file mode 100644
index 0000000000..598188a25e
--- /dev/null
+++ b/contrib/python/docker/docker/types/containers.py
@@ -0,0 +1,790 @@
+from .. import errors
+from ..utils.utils import (
+ convert_port_bindings,
+ convert_tmpfs_mounts,
+ convert_volume_binds,
+ format_environment,
+ format_extra_hosts,
+ normalize_links,
+ parse_bytes,
+ parse_devices,
+ split_command,
+ version_gte,
+ version_lt,
+)
+from .base import DictType
+from .healthcheck import Healthcheck
+
+
+class LogConfigTypesEnum:
+ _values = (
+ 'json-file',
+ 'syslog',
+ 'journald',
+ 'gelf',
+ 'fluentd',
+ 'none'
+ )
+ JSON, SYSLOG, JOURNALD, GELF, FLUENTD, NONE = _values
+
+
+class LogConfig(DictType):
+ """
+ Configure logging for a container, when provided as an argument to
+ :py:meth:`~docker.api.container.ContainerApiMixin.create_host_config`.
+ You may refer to the
+ `official logging driver documentation <https://docs.docker.com/config/containers/logging/configure/>`_
+ for more information.
+
+ Args:
+ type (str): Indicate which log driver to use. A set of valid drivers
+ is provided as part of the :py:attr:`LogConfig.types`
+ enum. Other values may be accepted depending on the engine version
+ and available logging plugins.
+ config (dict): A driver-dependent configuration dictionary. Please
+ refer to the driver's documentation for a list of valid config
+ keys.
+
+ Example:
+
+ >>> from docker.types import LogConfig
+ >>> lc = LogConfig(type=LogConfig.types.JSON, config={
+ ... 'max-size': '1g',
+ ... 'labels': 'production_status,geo'
+ ... })
+ >>> hc = client.create_host_config(log_config=lc)
+ >>> container = client.create_container('busybox', 'true',
+ ... host_config=hc)
+ >>> client.inspect_container(container)['HostConfig']['LogConfig']
+ {
+ 'Type': 'json-file',
+ 'Config': {'labels': 'production_status,geo', 'max-size': '1g'}
+ }
+ """
+ types = LogConfigTypesEnum
+
+ def __init__(self, **kwargs):
+ log_driver_type = kwargs.get('type', kwargs.get('Type'))
+ config = kwargs.get('config', kwargs.get('Config')) or {}
+
+ if config and not isinstance(config, dict):
+ raise ValueError("LogConfig.config must be a dictionary")
+
+ super().__init__({
+ 'Type': log_driver_type,
+ 'Config': config
+ })
+
+ @property
+ def type(self):
+ return self['Type']
+
+ @type.setter
+ def type(self, value):
+ self['Type'] = value
+
+ @property
+ def config(self):
+ return self['Config']
+
+ def set_config_value(self, key, value):
+ """ Set a the value for ``key`` to ``value`` inside the ``config``
+ dict.
+ """
+ self.config[key] = value
+
+ def unset_config(self, key):
+ """ Remove the ``key`` property from the ``config`` dict. """
+ if key in self.config:
+ del self.config[key]
+
+
+class Ulimit(DictType):
+ """
+ Create a ulimit declaration to be used with
+ :py:meth:`~docker.api.container.ContainerApiMixin.create_host_config`.
+
+ Args:
+
+ name (str): Which ulimit will this apply to. The valid names can be
+ found in '/etc/security/limits.conf' on a gnu/linux system.
+ soft (int): The soft limit for this ulimit. Optional.
+ hard (int): The hard limit for this ulimit. Optional.
+
+ Example:
+
+ >>> nproc_limit = docker.types.Ulimit(name='nproc', soft=1024)
+ >>> hc = client.create_host_config(ulimits=[nproc_limit])
+ >>> container = client.create_container(
+ 'busybox', 'true', host_config=hc
+ )
+ >>> client.inspect_container(container)['HostConfig']['Ulimits']
+ [{'Name': 'nproc', 'Hard': 0, 'Soft': 1024}]
+
+ """
+ def __init__(self, **kwargs):
+ name = kwargs.get('name', kwargs.get('Name'))
+ soft = kwargs.get('soft', kwargs.get('Soft'))
+ hard = kwargs.get('hard', kwargs.get('Hard'))
+ if not isinstance(name, str):
+ raise ValueError("Ulimit.name must be a string")
+ if soft and not isinstance(soft, int):
+ raise ValueError("Ulimit.soft must be an integer")
+ if hard and not isinstance(hard, int):
+ raise ValueError("Ulimit.hard must be an integer")
+ super().__init__({
+ 'Name': name,
+ 'Soft': soft,
+ 'Hard': hard
+ })
+
+ @property
+ def name(self):
+ return self['Name']
+
+ @name.setter
+ def name(self, value):
+ self['Name'] = value
+
+ @property
+ def soft(self):
+ return self.get('Soft')
+
+ @soft.setter
+ def soft(self, value):
+ self['Soft'] = value
+
+ @property
+ def hard(self):
+ return self.get('Hard')
+
+ @hard.setter
+ def hard(self, value):
+ self['Hard'] = value
+
+
+class DeviceRequest(DictType):
+ """
+ Create a device request to be used with
+ :py:meth:`~docker.api.container.ContainerApiMixin.create_host_config`.
+
+ Args:
+
+ driver (str): Which driver to use for this device. Optional.
+ count (int): Number or devices to request. Optional.
+ Set to -1 to request all available devices.
+ device_ids (list): List of strings for device IDs. Optional.
+ Set either ``count`` or ``device_ids``.
+ capabilities (list): List of lists of strings to request
+ capabilities. Optional. The global list acts like an OR,
+ and the sub-lists are AND. The driver will try to satisfy
+ one of the sub-lists.
+ Available capabilities for the ``nvidia`` driver can be found
+ `here <https://github.com/NVIDIA/nvidia-container-runtime>`_.
+ options (dict): Driver-specific options. Optional.
+ """
+
+ def __init__(self, **kwargs):
+ driver = kwargs.get('driver', kwargs.get('Driver'))
+ count = kwargs.get('count', kwargs.get('Count'))
+ device_ids = kwargs.get('device_ids', kwargs.get('DeviceIDs'))
+ capabilities = kwargs.get('capabilities', kwargs.get('Capabilities'))
+ options = kwargs.get('options', kwargs.get('Options'))
+
+ if driver is None:
+ driver = ''
+ elif not isinstance(driver, str):
+ raise ValueError('DeviceRequest.driver must be a string')
+ if count is None:
+ count = 0
+ elif not isinstance(count, int):
+ raise ValueError('DeviceRequest.count must be an integer')
+ if device_ids is None:
+ device_ids = []
+ elif not isinstance(device_ids, list):
+ raise ValueError('DeviceRequest.device_ids must be a list')
+ if capabilities is None:
+ capabilities = []
+ elif not isinstance(capabilities, list):
+ raise ValueError('DeviceRequest.capabilities must be a list')
+ if options is None:
+ options = {}
+ elif not isinstance(options, dict):
+ raise ValueError('DeviceRequest.options must be a dict')
+
+ super().__init__({
+ 'Driver': driver,
+ 'Count': count,
+ 'DeviceIDs': device_ids,
+ 'Capabilities': capabilities,
+ 'Options': options
+ })
+
+ @property
+ def driver(self):
+ return self['Driver']
+
+ @driver.setter
+ def driver(self, value):
+ self['Driver'] = value
+
+ @property
+ def count(self):
+ return self['Count']
+
+ @count.setter
+ def count(self, value):
+ self['Count'] = value
+
+ @property
+ def device_ids(self):
+ return self['DeviceIDs']
+
+ @device_ids.setter
+ def device_ids(self, value):
+ self['DeviceIDs'] = value
+
+ @property
+ def capabilities(self):
+ return self['Capabilities']
+
+ @capabilities.setter
+ def capabilities(self, value):
+ self['Capabilities'] = value
+
+ @property
+ def options(self):
+ return self['Options']
+
+ @options.setter
+ def options(self, value):
+ self['Options'] = value
+
+
+class HostConfig(dict):
+ def __init__(self, version, binds=None, port_bindings=None,
+ lxc_conf=None, publish_all_ports=False, links=None,
+ privileged=False, dns=None, dns_search=None,
+ volumes_from=None, network_mode=None, restart_policy=None,
+ cap_add=None, cap_drop=None, devices=None, extra_hosts=None,
+ read_only=None, pid_mode=None, ipc_mode=None,
+ security_opt=None, ulimits=None, log_config=None,
+ mem_limit=None, memswap_limit=None, mem_reservation=None,
+ kernel_memory=None, mem_swappiness=None, cgroup_parent=None,
+ group_add=None, cpu_quota=None, cpu_period=None,
+ blkio_weight=None, blkio_weight_device=None,
+ device_read_bps=None, device_write_bps=None,
+ device_read_iops=None, device_write_iops=None,
+ oom_kill_disable=False, shm_size=None, sysctls=None,
+ tmpfs=None, oom_score_adj=None, dns_opt=None, cpu_shares=None,
+ cpuset_cpus=None, userns_mode=None, uts_mode=None,
+ pids_limit=None, isolation=None, auto_remove=False,
+ storage_opt=None, init=None, init_path=None,
+ volume_driver=None, cpu_count=None, cpu_percent=None,
+ nano_cpus=None, cpuset_mems=None, runtime=None, mounts=None,
+ cpu_rt_period=None, cpu_rt_runtime=None,
+ device_cgroup_rules=None, device_requests=None,
+ cgroupns=None):
+
+ if mem_limit is not None:
+ self['Memory'] = parse_bytes(mem_limit)
+
+ if memswap_limit is not None:
+ self['MemorySwap'] = parse_bytes(memswap_limit)
+
+ if mem_reservation:
+ self['MemoryReservation'] = parse_bytes(mem_reservation)
+
+ if kernel_memory:
+ self['KernelMemory'] = parse_bytes(kernel_memory)
+
+ if mem_swappiness is not None:
+ if not isinstance(mem_swappiness, int):
+ raise host_config_type_error(
+ 'mem_swappiness', mem_swappiness, 'int'
+ )
+
+ self['MemorySwappiness'] = mem_swappiness
+
+ if shm_size is not None:
+ if isinstance(shm_size, str):
+ shm_size = parse_bytes(shm_size)
+
+ self['ShmSize'] = shm_size
+
+ if pid_mode:
+ if version_lt(version, '1.24') and pid_mode != 'host':
+ raise host_config_value_error('pid_mode', pid_mode)
+ self['PidMode'] = pid_mode
+
+ if ipc_mode:
+ self['IpcMode'] = ipc_mode
+
+ if privileged:
+ self['Privileged'] = privileged
+
+ if oom_kill_disable:
+ self['OomKillDisable'] = oom_kill_disable
+
+ if oom_score_adj:
+ if version_lt(version, '1.22'):
+ raise host_config_version_error('oom_score_adj', '1.22')
+ if not isinstance(oom_score_adj, int):
+ raise host_config_type_error(
+ 'oom_score_adj', oom_score_adj, 'int'
+ )
+ self['OomScoreAdj'] = oom_score_adj
+
+ if publish_all_ports:
+ self['PublishAllPorts'] = publish_all_ports
+
+ if read_only is not None:
+ self['ReadonlyRootfs'] = read_only
+
+ if dns_search:
+ self['DnsSearch'] = dns_search
+
+ if network_mode == 'host' and port_bindings:
+ raise host_config_incompatible_error(
+ 'network_mode', 'host', 'port_bindings'
+ )
+ self['NetworkMode'] = network_mode or 'default'
+
+ if restart_policy:
+ if not isinstance(restart_policy, dict):
+ raise host_config_type_error(
+ 'restart_policy', restart_policy, 'dict'
+ )
+
+ self['RestartPolicy'] = restart_policy
+
+ if cap_add:
+ self['CapAdd'] = cap_add
+
+ if cap_drop:
+ self['CapDrop'] = cap_drop
+
+ if devices:
+ self['Devices'] = parse_devices(devices)
+
+ if group_add:
+ self['GroupAdd'] = [str(grp) for grp in group_add]
+
+ if dns is not None:
+ self['Dns'] = dns
+
+ if dns_opt is not None:
+ self['DnsOptions'] = dns_opt
+
+ if security_opt is not None:
+ if not isinstance(security_opt, list):
+ raise host_config_type_error(
+ 'security_opt', security_opt, 'list'
+ )
+
+ self['SecurityOpt'] = security_opt
+
+ if sysctls:
+ if not isinstance(sysctls, dict):
+ raise host_config_type_error('sysctls', sysctls, 'dict')
+ self['Sysctls'] = {}
+ for k, v in sysctls.items():
+ self['Sysctls'][k] = str(v)
+
+ if volumes_from is not None:
+ if isinstance(volumes_from, str):
+ volumes_from = volumes_from.split(',')
+
+ self['VolumesFrom'] = volumes_from
+
+ if binds is not None:
+ self['Binds'] = convert_volume_binds(binds)
+
+ if port_bindings is not None:
+ self['PortBindings'] = convert_port_bindings(port_bindings)
+
+ if extra_hosts is not None:
+ if isinstance(extra_hosts, dict):
+ extra_hosts = format_extra_hosts(extra_hosts)
+
+ self['ExtraHosts'] = extra_hosts
+
+ if links is not None:
+ self['Links'] = normalize_links(links)
+
+ if isinstance(lxc_conf, dict):
+ formatted = []
+ for k, v in lxc_conf.items():
+ formatted.append({'Key': k, 'Value': str(v)})
+ lxc_conf = formatted
+
+ if lxc_conf is not None:
+ self['LxcConf'] = lxc_conf
+
+ if cgroup_parent is not None:
+ self['CgroupParent'] = cgroup_parent
+
+ if ulimits is not None:
+ if not isinstance(ulimits, list):
+ raise host_config_type_error('ulimits', ulimits, 'list')
+ self['Ulimits'] = []
+ for lmt in ulimits:
+ if not isinstance(lmt, Ulimit):
+ lmt = Ulimit(**lmt)
+ self['Ulimits'].append(lmt)
+
+ if log_config is not None:
+ if not isinstance(log_config, LogConfig):
+ if not isinstance(log_config, dict):
+ raise host_config_type_error(
+ 'log_config', log_config, 'LogConfig'
+ )
+ log_config = LogConfig(**log_config)
+
+ self['LogConfig'] = log_config
+
+ if cpu_quota:
+ if not isinstance(cpu_quota, int):
+ raise host_config_type_error('cpu_quota', cpu_quota, 'int')
+ self['CpuQuota'] = cpu_quota
+
+ if cpu_period:
+ if not isinstance(cpu_period, int):
+ raise host_config_type_error('cpu_period', cpu_period, 'int')
+ self['CpuPeriod'] = cpu_period
+
+ if cpu_shares:
+ if not isinstance(cpu_shares, int):
+ raise host_config_type_error('cpu_shares', cpu_shares, 'int')
+
+ self['CpuShares'] = cpu_shares
+
+ if cpuset_cpus:
+ self['CpusetCpus'] = cpuset_cpus
+
+ if cpuset_mems:
+ if not isinstance(cpuset_mems, str):
+ raise host_config_type_error(
+ 'cpuset_mems', cpuset_mems, 'str'
+ )
+ self['CpusetMems'] = cpuset_mems
+
+ if cpu_rt_period:
+ if version_lt(version, '1.25'):
+ raise host_config_version_error('cpu_rt_period', '1.25')
+
+ if not isinstance(cpu_rt_period, int):
+ raise host_config_type_error(
+ 'cpu_rt_period', cpu_rt_period, 'int'
+ )
+ self['CPURealtimePeriod'] = cpu_rt_period
+
+ if cpu_rt_runtime:
+ if version_lt(version, '1.25'):
+ raise host_config_version_error('cpu_rt_runtime', '1.25')
+
+ if not isinstance(cpu_rt_runtime, int):
+ raise host_config_type_error(
+ 'cpu_rt_runtime', cpu_rt_runtime, 'int'
+ )
+ self['CPURealtimeRuntime'] = cpu_rt_runtime
+
+ if blkio_weight:
+ if not isinstance(blkio_weight, int):
+ raise host_config_type_error(
+ 'blkio_weight', blkio_weight, 'int'
+ )
+ if version_lt(version, '1.22'):
+ raise host_config_version_error('blkio_weight', '1.22')
+ self["BlkioWeight"] = blkio_weight
+
+ if blkio_weight_device:
+ if not isinstance(blkio_weight_device, list):
+ raise host_config_type_error(
+ 'blkio_weight_device', blkio_weight_device, 'list'
+ )
+ if version_lt(version, '1.22'):
+ raise host_config_version_error('blkio_weight_device', '1.22')
+ self["BlkioWeightDevice"] = blkio_weight_device
+
+ if device_read_bps:
+ if not isinstance(device_read_bps, list):
+ raise host_config_type_error(
+ 'device_read_bps', device_read_bps, 'list'
+ )
+ if version_lt(version, '1.22'):
+ raise host_config_version_error('device_read_bps', '1.22')
+ self["BlkioDeviceReadBps"] = device_read_bps
+
+ if device_write_bps:
+ if not isinstance(device_write_bps, list):
+ raise host_config_type_error(
+ 'device_write_bps', device_write_bps, 'list'
+ )
+ if version_lt(version, '1.22'):
+ raise host_config_version_error('device_write_bps', '1.22')
+ self["BlkioDeviceWriteBps"] = device_write_bps
+
+ if device_read_iops:
+ if not isinstance(device_read_iops, list):
+ raise host_config_type_error(
+ 'device_read_iops', device_read_iops, 'list'
+ )
+ if version_lt(version, '1.22'):
+ raise host_config_version_error('device_read_iops', '1.22')
+ self["BlkioDeviceReadIOps"] = device_read_iops
+
+ if device_write_iops:
+ if not isinstance(device_write_iops, list):
+ raise host_config_type_error(
+ 'device_write_iops', device_write_iops, 'list'
+ )
+ if version_lt(version, '1.22'):
+ raise host_config_version_error('device_write_iops', '1.22')
+ self["BlkioDeviceWriteIOps"] = device_write_iops
+
+ if tmpfs:
+ if version_lt(version, '1.22'):
+ raise host_config_version_error('tmpfs', '1.22')
+ self["Tmpfs"] = convert_tmpfs_mounts(tmpfs)
+
+ if userns_mode:
+ if version_lt(version, '1.23'):
+ raise host_config_version_error('userns_mode', '1.23')
+
+ if userns_mode != "host":
+ raise host_config_value_error("userns_mode", userns_mode)
+ self['UsernsMode'] = userns_mode
+
+ if uts_mode:
+ if uts_mode != "host":
+ raise host_config_value_error("uts_mode", uts_mode)
+ self['UTSMode'] = uts_mode
+
+ if pids_limit:
+ if not isinstance(pids_limit, int):
+ raise host_config_type_error('pids_limit', pids_limit, 'int')
+ if version_lt(version, '1.23'):
+ raise host_config_version_error('pids_limit', '1.23')
+ self["PidsLimit"] = pids_limit
+
+ if isolation:
+ if not isinstance(isolation, str):
+ raise host_config_type_error('isolation', isolation, 'string')
+ if version_lt(version, '1.24'):
+ raise host_config_version_error('isolation', '1.24')
+ self['Isolation'] = isolation
+
+ if auto_remove:
+ if version_lt(version, '1.25'):
+ raise host_config_version_error('auto_remove', '1.25')
+ self['AutoRemove'] = auto_remove
+
+ if storage_opt is not None:
+ if version_lt(version, '1.24'):
+ raise host_config_version_error('storage_opt', '1.24')
+ self['StorageOpt'] = storage_opt
+
+ if init is not None:
+ if version_lt(version, '1.25'):
+ raise host_config_version_error('init', '1.25')
+ self['Init'] = init
+
+ if init_path is not None:
+ if version_lt(version, '1.25'):
+ raise host_config_version_error('init_path', '1.25')
+
+ if version_gte(version, '1.29'):
+ # https://github.com/moby/moby/pull/32470
+ raise host_config_version_error('init_path', '1.29', False)
+ self['InitPath'] = init_path
+
+ if volume_driver is not None:
+ self['VolumeDriver'] = volume_driver
+
+ if cpu_count:
+ if not isinstance(cpu_count, int):
+ raise host_config_type_error('cpu_count', cpu_count, 'int')
+ if version_lt(version, '1.25'):
+ raise host_config_version_error('cpu_count', '1.25')
+
+ self['CpuCount'] = cpu_count
+
+ if cpu_percent:
+ if not isinstance(cpu_percent, int):
+ raise host_config_type_error('cpu_percent', cpu_percent, 'int')
+ if version_lt(version, '1.25'):
+ raise host_config_version_error('cpu_percent', '1.25')
+
+ self['CpuPercent'] = cpu_percent
+
+ if nano_cpus:
+ if not isinstance(nano_cpus, int):
+ raise host_config_type_error('nano_cpus', nano_cpus, 'int')
+ if version_lt(version, '1.25'):
+ raise host_config_version_error('nano_cpus', '1.25')
+
+ self['NanoCpus'] = nano_cpus
+
+ if runtime:
+ if version_lt(version, '1.25'):
+ raise host_config_version_error('runtime', '1.25')
+ self['Runtime'] = runtime
+
+ if mounts is not None:
+ if version_lt(version, '1.30'):
+ raise host_config_version_error('mounts', '1.30')
+ self['Mounts'] = mounts
+
+ if device_cgroup_rules is not None:
+ if version_lt(version, '1.28'):
+ raise host_config_version_error('device_cgroup_rules', '1.28')
+ if not isinstance(device_cgroup_rules, list):
+ raise host_config_type_error(
+ 'device_cgroup_rules', device_cgroup_rules, 'list'
+ )
+ self['DeviceCgroupRules'] = device_cgroup_rules
+
+ if device_requests is not None:
+ if version_lt(version, '1.40'):
+ raise host_config_version_error('device_requests', '1.40')
+ if not isinstance(device_requests, list):
+ raise host_config_type_error(
+ 'device_requests', device_requests, 'list'
+ )
+ self['DeviceRequests'] = []
+ for req in device_requests:
+ if not isinstance(req, DeviceRequest):
+ req = DeviceRequest(**req)
+ self['DeviceRequests'].append(req)
+
+ if cgroupns:
+ self['CgroupnsMode'] = cgroupns
+
+
+def host_config_type_error(param, param_value, expected):
+ return TypeError(
+ f'Invalid type for {param} param: expected {expected} '
+ f'but found {type(param_value)}'
+ )
+
+
+def host_config_version_error(param, version, less_than=True):
+ operator = '<' if less_than else '>'
+ return errors.InvalidVersion(
+ f'{param} param is not supported in API versions {operator} {version}',
+ )
+
+def host_config_value_error(param, param_value):
+ return ValueError(f'Invalid value for {param} param: {param_value}')
+
+
+def host_config_incompatible_error(param, param_value, incompatible_param):
+ return errors.InvalidArgument(
+ f'\"{param_value}\" {param} is incompatible with {incompatible_param}'
+ )
+
+
+class ContainerConfig(dict):
+ def __init__(
+ self, version, image, command, hostname=None, user=None, detach=False,
+ stdin_open=False, tty=False, ports=None, environment=None,
+ volumes=None, network_disabled=False, entrypoint=None,
+ working_dir=None, domainname=None, host_config=None, mac_address=None,
+ labels=None, stop_signal=None, networking_config=None,
+ healthcheck=None, stop_timeout=None, runtime=None
+ ):
+
+ if stop_timeout is not None and version_lt(version, '1.25'):
+ raise errors.InvalidVersion(
+ 'stop_timeout was only introduced in API version 1.25'
+ )
+
+ if healthcheck is not None:
+ if version_lt(version, '1.24'):
+ raise errors.InvalidVersion(
+ 'Health options were only introduced in API version 1.24'
+ )
+
+ if version_lt(version, '1.29') and 'StartPeriod' in healthcheck:
+ raise errors.InvalidVersion(
+ 'healthcheck start period was introduced in API '
+ 'version 1.29'
+ )
+
+ if isinstance(command, str):
+ command = split_command(command)
+
+ if isinstance(entrypoint, str):
+ entrypoint = split_command(entrypoint)
+
+ if isinstance(environment, dict):
+ environment = format_environment(environment)
+
+ if isinstance(labels, list):
+ labels = {lbl: '' for lbl in labels}
+
+ if isinstance(ports, list):
+ exposed_ports = {}
+ for port_definition in ports:
+ port = port_definition
+ proto = 'tcp'
+ if isinstance(port_definition, tuple):
+ if len(port_definition) == 2:
+ proto = port_definition[1]
+ port = port_definition[0]
+ exposed_ports[f'{port}/{proto}'] = {}
+ ports = exposed_ports
+
+ if isinstance(volumes, str):
+ volumes = [volumes, ]
+
+ if isinstance(volumes, list):
+ volumes_dict = {}
+ for vol in volumes:
+ volumes_dict[vol] = {}
+ volumes = volumes_dict
+
+ if healthcheck and isinstance(healthcheck, dict):
+ healthcheck = Healthcheck(**healthcheck)
+
+ attach_stdin = False
+ attach_stdout = False
+ attach_stderr = False
+ stdin_once = False
+
+ if not detach:
+ attach_stdout = True
+ attach_stderr = True
+
+ if stdin_open:
+ attach_stdin = True
+ stdin_once = True
+
+ self.update({
+ 'Hostname': hostname,
+ 'Domainname': domainname,
+ 'ExposedPorts': ports,
+ 'User': str(user) if user is not None else None,
+ 'Tty': tty,
+ 'OpenStdin': stdin_open,
+ 'StdinOnce': stdin_once,
+ 'AttachStdin': attach_stdin,
+ 'AttachStdout': attach_stdout,
+ 'AttachStderr': attach_stderr,
+ 'Env': environment,
+ 'Cmd': command,
+ 'Image': image,
+ 'Volumes': volumes,
+ 'NetworkDisabled': network_disabled,
+ 'Entrypoint': entrypoint,
+ 'WorkingDir': working_dir,
+ 'HostConfig': host_config,
+ 'NetworkingConfig': networking_config,
+ 'MacAddress': mac_address,
+ 'Labels': labels,
+ 'StopSignal': stop_signal,
+ 'Healthcheck': healthcheck,
+ 'StopTimeout': stop_timeout,
+ 'Runtime': runtime
+ })
diff --git a/contrib/python/docker/docker/types/daemon.py b/contrib/python/docker/docker/types/daemon.py
new file mode 100644
index 0000000000..04e6ccb2d7
--- /dev/null
+++ b/contrib/python/docker/docker/types/daemon.py
@@ -0,0 +1,71 @@
+import socket
+
+import urllib3
+
+from ..errors import DockerException
+
+
+class CancellableStream:
+ """
+ Stream wrapper for real-time events, logs, etc. from the server.
+
+ Example:
+ >>> events = client.events()
+ >>> for event in events:
+ ... print(event)
+ >>> # and cancel from another thread
+ >>> events.close()
+ """
+
+ def __init__(self, stream, response):
+ self._stream = stream
+ self._response = response
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ try:
+ return next(self._stream)
+ except urllib3.exceptions.ProtocolError:
+ raise StopIteration from None
+ except OSError:
+ raise StopIteration from None
+
+ next = __next__
+
+ def close(self):
+ """
+ Closes the event streaming.
+ """
+
+ if not self._response.raw.closed:
+ # find the underlying socket object
+ # based on api.client._get_raw_response_socket
+
+ sock_fp = self._response.raw._fp.fp
+
+ if hasattr(sock_fp, 'raw'):
+ sock_raw = sock_fp.raw
+
+ if hasattr(sock_raw, 'sock'):
+ sock = sock_raw.sock
+
+ elif hasattr(sock_raw, '_sock'):
+ sock = sock_raw._sock
+
+ elif hasattr(sock_fp, 'channel'):
+ # We're working with a paramiko (SSH) channel, which doesn't
+ # support cancelable streams with the current implementation
+ raise DockerException(
+ 'Cancellable streams not supported for the SSH protocol'
+ )
+ else:
+ sock = sock_fp._sock
+
+ if hasattr(urllib3.contrib, 'pyopenssl') and isinstance(
+ sock, urllib3.contrib.pyopenssl.WrappedSocket):
+ sock = sock.socket
+
+ sock.shutdown(socket.SHUT_RDWR)
+ sock.close()
diff --git a/contrib/python/docker/docker/types/healthcheck.py b/contrib/python/docker/docker/types/healthcheck.py
new file mode 100644
index 0000000000..dfc88a9771
--- /dev/null
+++ b/contrib/python/docker/docker/types/healthcheck.py
@@ -0,0 +1,88 @@
+from .base import DictType
+
+
+class Healthcheck(DictType):
+ """
+ Defines a healthcheck configuration for a container or service.
+
+ Args:
+ test (:py:class:`list` or str): Test to perform to determine
+ container health. Possible values:
+
+ - Empty list: Inherit healthcheck from parent image
+ - ``["NONE"]``: Disable healthcheck
+ - ``["CMD", args...]``: exec arguments directly.
+ - ``["CMD-SHELL", command]``: Run command in the system's
+ default shell.
+
+ If a string is provided, it will be used as a ``CMD-SHELL``
+ command.
+ interval (int): The time to wait between checks in nanoseconds. It
+ should be 0 or at least 1000000 (1 ms).
+ timeout (int): The time to wait before considering the check to
+ have hung. It should be 0 or at least 1000000 (1 ms).
+ retries (int): The number of consecutive failures needed to
+ consider a container as unhealthy.
+ start_period (int): Start period for the container to
+ initialize before starting health-retries countdown in
+ nanoseconds. It should be 0 or at least 1000000 (1 ms).
+ """
+ def __init__(self, **kwargs):
+ test = kwargs.get('test', kwargs.get('Test'))
+ if isinstance(test, str):
+ test = ["CMD-SHELL", test]
+
+ interval = kwargs.get('interval', kwargs.get('Interval'))
+ timeout = kwargs.get('timeout', kwargs.get('Timeout'))
+ retries = kwargs.get('retries', kwargs.get('Retries'))
+ start_period = kwargs.get('start_period', kwargs.get('StartPeriod'))
+
+ super().__init__({
+ 'Test': test,
+ 'Interval': interval,
+ 'Timeout': timeout,
+ 'Retries': retries,
+ 'StartPeriod': start_period
+ })
+
+ @property
+ def test(self):
+ return self['Test']
+
+ @test.setter
+ def test(self, value):
+ if isinstance(value, str):
+ value = ["CMD-SHELL", value]
+ self['Test'] = value
+
+ @property
+ def interval(self):
+ return self['Interval']
+
+ @interval.setter
+ def interval(self, value):
+ self['Interval'] = value
+
+ @property
+ def timeout(self):
+ return self['Timeout']
+
+ @timeout.setter
+ def timeout(self, value):
+ self['Timeout'] = value
+
+ @property
+ def retries(self):
+ return self['Retries']
+
+ @retries.setter
+ def retries(self, value):
+ self['Retries'] = value
+
+ @property
+ def start_period(self):
+ return self['StartPeriod']
+
+ @start_period.setter
+ def start_period(self, value):
+ self['StartPeriod'] = value
diff --git a/contrib/python/docker/docker/types/networks.py b/contrib/python/docker/docker/types/networks.py
new file mode 100644
index 0000000000..ed1ced13ed
--- /dev/null
+++ b/contrib/python/docker/docker/types/networks.py
@@ -0,0 +1,128 @@
+from .. import errors
+from ..utils import normalize_links, version_lt
+
+
+class EndpointConfig(dict):
+ def __init__(self, version, aliases=None, links=None, ipv4_address=None,
+ ipv6_address=None, link_local_ips=None, driver_opt=None,
+ mac_address=None):
+ if version_lt(version, '1.22'):
+ raise errors.InvalidVersion(
+ 'Endpoint config is not supported for API version < 1.22'
+ )
+
+ if aliases:
+ self["Aliases"] = aliases
+
+ if links:
+ self["Links"] = normalize_links(links)
+
+ ipam_config = {}
+ if ipv4_address:
+ ipam_config['IPv4Address'] = ipv4_address
+
+ if ipv6_address:
+ ipam_config['IPv6Address'] = ipv6_address
+
+ if mac_address:
+ if version_lt(version, '1.25'):
+ raise errors.InvalidVersion(
+ 'mac_address is not supported for API version < 1.25'
+ )
+ self['MacAddress'] = mac_address
+
+ if link_local_ips is not None:
+ if version_lt(version, '1.24'):
+ raise errors.InvalidVersion(
+ 'link_local_ips is not supported for API version < 1.24'
+ )
+ ipam_config['LinkLocalIPs'] = link_local_ips
+
+ if ipam_config:
+ self['IPAMConfig'] = ipam_config
+
+ if driver_opt:
+ if version_lt(version, '1.32'):
+ raise errors.InvalidVersion(
+ 'DriverOpts is not supported for API version < 1.32'
+ )
+ if not isinstance(driver_opt, dict):
+ raise TypeError('driver_opt must be a dictionary')
+ self['DriverOpts'] = driver_opt
+
+
+class NetworkingConfig(dict):
+ def __init__(self, endpoints_config=None):
+ if endpoints_config:
+ self["EndpointsConfig"] = endpoints_config
+
+
+class IPAMConfig(dict):
+ """
+ Create an IPAM (IP Address Management) config dictionary to be used with
+ :py:meth:`~docker.api.network.NetworkApiMixin.create_network`.
+
+ Args:
+
+ driver (str): The IPAM driver to use. Defaults to ``default``.
+ pool_configs (:py:class:`list`): A list of pool configurations
+ (:py:class:`~docker.types.IPAMPool`). Defaults to empty list.
+ options (dict): Driver options as a key-value dictionary.
+ Defaults to `None`.
+
+ Example:
+
+ >>> ipam_config = docker.types.IPAMConfig(driver='default')
+ >>> network = client.create_network('network1', ipam=ipam_config)
+
+ """
+ def __init__(self, driver='default', pool_configs=None, options=None):
+ self.update({
+ 'Driver': driver,
+ 'Config': pool_configs or []
+ })
+
+ if options:
+ if not isinstance(options, dict):
+ raise TypeError('IPAMConfig options must be a dictionary')
+ self['Options'] = options
+
+
+class IPAMPool(dict):
+ """
+ Create an IPAM pool config dictionary to be added to the
+ ``pool_configs`` parameter of
+ :py:class:`~docker.types.IPAMConfig`.
+
+ Args:
+
+ subnet (str): Custom subnet for this IPAM pool using the CIDR
+ notation. Defaults to ``None``.
+ iprange (str): Custom IP range for endpoints in this IPAM pool using
+ the CIDR notation. Defaults to ``None``.
+ gateway (str): Custom IP address for the pool's gateway.
+ aux_addresses (dict): A dictionary of ``key -> ip_address``
+ relationships specifying auxiliary addresses that need to be
+ allocated by the IPAM driver.
+
+ Example:
+
+ >>> ipam_pool = docker.types.IPAMPool(
+ subnet='124.42.0.0/16',
+ iprange='124.42.0.0/24',
+ gateway='124.42.0.254',
+ aux_addresses={
+ 'reserved1': '124.42.1.1'
+ }
+ )
+ >>> ipam_config = docker.types.IPAMConfig(
+ pool_configs=[ipam_pool])
+ """
+ def __init__(self, subnet=None, iprange=None, gateway=None,
+ aux_addresses=None):
+ self.update({
+ 'Subnet': subnet,
+ 'IPRange': iprange,
+ 'Gateway': gateway,
+ 'AuxiliaryAddresses': aux_addresses
+ })
diff --git a/contrib/python/docker/docker/types/services.py b/contrib/python/docker/docker/types/services.py
new file mode 100644
index 0000000000..821115411c
--- /dev/null
+++ b/contrib/python/docker/docker/types/services.py
@@ -0,0 +1,867 @@
+from .. import errors
+from ..constants import IS_WINDOWS_PLATFORM
+from ..utils import (
+ check_resource,
+ convert_service_networks,
+ format_environment,
+ format_extra_hosts,
+ parse_bytes,
+ split_command,
+)
+
+
+class TaskTemplate(dict):
+ """
+ Describe the task specification to be used when creating or updating a
+ service.
+
+ Args:
+
+ container_spec (ContainerSpec): Container settings for containers
+ started as part of this task.
+ log_driver (DriverConfig): Log configuration for containers created as
+ part of the service.
+ resources (Resources): Resource requirements which apply to each
+ individual container created as part of the service.
+ restart_policy (RestartPolicy): Specification for the restart policy
+ which applies to containers created as part of this service.
+ placement (Placement): Placement instructions for the scheduler.
+ If a list is passed instead, it is assumed to be a list of
+ constraints as part of a :py:class:`Placement` object.
+ networks (:py:class:`list`): List of network names or IDs or
+ :py:class:`NetworkAttachmentConfig` to attach the service to.
+ force_update (int): A counter that triggers an update even if no
+ relevant parameters have been changed.
+ """
+
+ def __init__(self, container_spec, resources=None, restart_policy=None,
+ placement=None, log_driver=None, networks=None,
+ force_update=None):
+ self['ContainerSpec'] = container_spec
+ if resources:
+ self['Resources'] = resources
+ if restart_policy:
+ self['RestartPolicy'] = restart_policy
+ if placement:
+ if isinstance(placement, list):
+ placement = Placement(constraints=placement)
+ self['Placement'] = placement
+ if log_driver:
+ self['LogDriver'] = log_driver
+ if networks:
+ self['Networks'] = convert_service_networks(networks)
+
+ if force_update is not None:
+ if not isinstance(force_update, int):
+ raise TypeError('force_update must be an integer')
+ self['ForceUpdate'] = force_update
+
+ @property
+ def container_spec(self):
+ return self.get('ContainerSpec')
+
+ @property
+ def resources(self):
+ return self.get('Resources')
+
+ @property
+ def restart_policy(self):
+ return self.get('RestartPolicy')
+
+ @property
+ def placement(self):
+ return self.get('Placement')
+
+
+class ContainerSpec(dict):
+ """
+ Describes the behavior of containers that are part of a task, and is used
+ when declaring a :py:class:`~docker.types.TaskTemplate`.
+
+ Args:
+
+ image (string): The image name to use for the container.
+ command (string or list): The command to be run in the image.
+ args (:py:class:`list`): Arguments to the command.
+ hostname (string): The hostname to set on the container.
+ env (dict): Environment variables.
+ workdir (string): The working directory for commands to run in.
+ user (string): The user inside the container.
+ labels (dict): A map of labels to associate with the service.
+ mounts (:py:class:`list`): A list of specifications for mounts to be
+ added to containers created as part of the service. See the
+ :py:class:`~docker.types.Mount` class for details.
+ stop_grace_period (int): Amount of time to wait for the container to
+ terminate before forcefully killing it.
+ secrets (:py:class:`list`): List of :py:class:`SecretReference` to be
+ made available inside the containers.
+ tty (boolean): Whether a pseudo-TTY should be allocated.
+ groups (:py:class:`list`): A list of additional groups that the
+ container process will run as.
+ open_stdin (boolean): Open ``stdin``
+ read_only (boolean): Mount the container's root filesystem as read
+ only.
+ stop_signal (string): Set signal to stop the service's containers
+ healthcheck (Healthcheck): Healthcheck
+ configuration for this service.
+ hosts (:py:class:`dict`): A set of host to IP mappings to add to
+ the container's ``hosts`` file.
+ dns_config (DNSConfig): Specification for DNS
+ related configurations in resolver configuration file.
+ configs (:py:class:`list`): List of :py:class:`ConfigReference` that
+ will be exposed to the service.
+ privileges (Privileges): Security options for the service's containers.
+ isolation (string): Isolation technology used by the service's
+ containers. Only used for Windows containers.
+ init (boolean): Run an init inside the container that forwards signals
+ and reaps processes.
+ cap_add (:py:class:`list`): A list of kernel capabilities to add to the
+ default set for the container.
+ cap_drop (:py:class:`list`): A list of kernel capabilities to drop from
+ the default set for the container.
+ sysctls (:py:class:`dict`): A dict of sysctl values to add to
+ the container
+ """
+
+ def __init__(self, image, command=None, args=None, hostname=None, env=None,
+ workdir=None, user=None, labels=None, mounts=None,
+ stop_grace_period=None, secrets=None, tty=None, groups=None,
+ open_stdin=None, read_only=None, stop_signal=None,
+ healthcheck=None, hosts=None, dns_config=None, configs=None,
+ privileges=None, isolation=None, init=None, cap_add=None,
+ cap_drop=None, sysctls=None):
+ self['Image'] = image
+
+ if isinstance(command, str):
+ command = split_command(command)
+ self['Command'] = command
+ self['Args'] = args
+
+ if hostname is not None:
+ self['Hostname'] = hostname
+ if env is not None:
+ if isinstance(env, dict):
+ self['Env'] = format_environment(env)
+ else:
+ self['Env'] = env
+ if workdir is not None:
+ self['Dir'] = workdir
+ if user is not None:
+ self['User'] = user
+ if groups is not None:
+ self['Groups'] = groups
+ if stop_signal is not None:
+ self['StopSignal'] = stop_signal
+ if stop_grace_period is not None:
+ self['StopGracePeriod'] = stop_grace_period
+ if labels is not None:
+ self['Labels'] = labels
+ if hosts is not None:
+ self['Hosts'] = format_extra_hosts(hosts, task=True)
+
+ if mounts is not None:
+ parsed_mounts = []
+ for mount in mounts:
+ if isinstance(mount, str):
+ parsed_mounts.append(Mount.parse_mount_string(mount))
+ else:
+ # If mount already parsed
+ parsed_mounts.append(mount)
+ self['Mounts'] = parsed_mounts
+
+ if secrets is not None:
+ if not isinstance(secrets, list):
+ raise TypeError('secrets must be a list')
+ self['Secrets'] = secrets
+
+ if configs is not None:
+ if not isinstance(configs, list):
+ raise TypeError('configs must be a list')
+ self['Configs'] = configs
+
+ if dns_config is not None:
+ self['DNSConfig'] = dns_config
+ if privileges is not None:
+ self['Privileges'] = privileges
+ if healthcheck is not None:
+ self['Healthcheck'] = healthcheck
+
+ if tty is not None:
+ self['TTY'] = tty
+ if open_stdin is not None:
+ self['OpenStdin'] = open_stdin
+ if read_only is not None:
+ self['ReadOnly'] = read_only
+
+ if isolation is not None:
+ self['Isolation'] = isolation
+
+ if init is not None:
+ self['Init'] = init
+
+ if cap_add is not None:
+ if not isinstance(cap_add, list):
+ raise TypeError('cap_add must be a list')
+
+ self['CapabilityAdd'] = cap_add
+
+ if cap_drop is not None:
+ if not isinstance(cap_drop, list):
+ raise TypeError('cap_drop must be a list')
+
+ self['CapabilityDrop'] = cap_drop
+
+ if sysctls is not None:
+ if not isinstance(sysctls, dict):
+ raise TypeError('sysctls must be a dict')
+
+ self['Sysctls'] = sysctls
+
+
+class Mount(dict):
+ """
+ Describes a mounted folder's configuration inside a container. A list of
+ :py:class:`Mount` would be used as part of a
+ :py:class:`~docker.types.ContainerSpec`.
+
+ Args:
+
+ target (string): Container path.
+ source (string): Mount source (e.g. a volume name or a host path).
+ type (string): The mount type (``bind`` / ``volume`` / ``tmpfs`` /
+ ``npipe``). Default: ``volume``.
+ read_only (bool): Whether the mount should be read-only.
+ consistency (string): The consistency requirement for the mount. One of
+ ``default```, ``consistent``, ``cached``, ``delegated``.
+ propagation (string): A propagation mode with the value ``[r]private``,
+ ``[r]shared``, or ``[r]slave``. Only valid for the ``bind`` type.
+ no_copy (bool): False if the volume should be populated with the data
+ from the target. Default: ``False``. Only valid for the ``volume``
+ type.
+ labels (dict): User-defined name and labels for the volume. Only valid
+ for the ``volume`` type.
+ driver_config (DriverConfig): Volume driver configuration. Only valid
+ for the ``volume`` type.
+ tmpfs_size (int or string): The size for the tmpfs mount in bytes.
+ tmpfs_mode (int): The permission mode for the tmpfs mount.
+ """
+
+ def __init__(self, target, source, type='volume', read_only=False,
+ consistency=None, propagation=None, no_copy=False,
+ labels=None, driver_config=None, tmpfs_size=None,
+ tmpfs_mode=None):
+ self['Target'] = target
+ self['Source'] = source
+ if type not in ('bind', 'volume', 'tmpfs', 'npipe'):
+ raise errors.InvalidArgument(
+ f'Unsupported mount type: "{type}"'
+ )
+ self['Type'] = type
+ self['ReadOnly'] = read_only
+
+ if consistency:
+ self['Consistency'] = consistency
+
+ if type == 'bind':
+ if propagation is not None:
+ self['BindOptions'] = {
+ 'Propagation': propagation
+ }
+ if any([labels, driver_config, no_copy, tmpfs_size, tmpfs_mode]):
+ raise errors.InvalidArgument(
+ 'Incompatible options have been provided for the bind '
+ 'type mount.'
+ )
+ elif type == 'volume':
+ volume_opts = {}
+ if no_copy:
+ volume_opts['NoCopy'] = True
+ if labels:
+ volume_opts['Labels'] = labels
+ if driver_config:
+ volume_opts['DriverConfig'] = driver_config
+ if volume_opts:
+ self['VolumeOptions'] = volume_opts
+ if any([propagation, tmpfs_size, tmpfs_mode]):
+ raise errors.InvalidArgument(
+ 'Incompatible options have been provided for the volume '
+ 'type mount.'
+ )
+ elif type == 'tmpfs':
+ tmpfs_opts = {}
+ if tmpfs_mode:
+ if not isinstance(tmpfs_mode, int):
+ raise errors.InvalidArgument(
+ 'tmpfs_mode must be an integer'
+ )
+ tmpfs_opts['Mode'] = tmpfs_mode
+ if tmpfs_size:
+ tmpfs_opts['SizeBytes'] = parse_bytes(tmpfs_size)
+ if tmpfs_opts:
+ self['TmpfsOptions'] = tmpfs_opts
+ if any([propagation, labels, driver_config, no_copy]):
+ raise errors.InvalidArgument(
+ 'Incompatible options have been provided for the tmpfs '
+ 'type mount.'
+ )
+
+ @classmethod
+ def parse_mount_string(cls, string):
+ parts = string.split(':')
+ if len(parts) > 3:
+ raise errors.InvalidArgument(
+ f'Invalid mount format "{string}"'
+ )
+ if len(parts) == 1:
+ return cls(target=parts[0], source=None)
+ else:
+ target = parts[1]
+ source = parts[0]
+ mount_type = 'volume'
+ if source.startswith('/') or (
+ IS_WINDOWS_PLATFORM and source[0].isalpha() and
+ source[1] == ':'
+ ):
+ # FIXME: That windows condition will fail earlier since we
+ # split on ':'. We should look into doing a smarter split
+ # if we detect we are on Windows.
+ mount_type = 'bind'
+ read_only = not (len(parts) == 2 or parts[2] == 'rw')
+ return cls(target, source, read_only=read_only, type=mount_type)
+
+
+class Resources(dict):
+ """
+ Configures resource allocation for containers when made part of a
+ :py:class:`~docker.types.ContainerSpec`.
+
+ Args:
+
+ cpu_limit (int): CPU limit in units of 10^9 CPU shares.
+ mem_limit (int): Memory limit in Bytes.
+ cpu_reservation (int): CPU reservation in units of 10^9 CPU shares.
+ mem_reservation (int): Memory reservation in Bytes.
+ generic_resources (dict or :py:class:`list`): Node level generic
+ resources, for example a GPU, using the following format:
+ ``{ resource_name: resource_value }``. Alternatively, a list of
+ of resource specifications as defined by the Engine API.
+ """
+
+ def __init__(self, cpu_limit=None, mem_limit=None, cpu_reservation=None,
+ mem_reservation=None, generic_resources=None):
+ limits = {}
+ reservation = {}
+ if cpu_limit is not None:
+ limits['NanoCPUs'] = cpu_limit
+ if mem_limit is not None:
+ limits['MemoryBytes'] = mem_limit
+ if cpu_reservation is not None:
+ reservation['NanoCPUs'] = cpu_reservation
+ if mem_reservation is not None:
+ reservation['MemoryBytes'] = mem_reservation
+ if generic_resources is not None:
+ reservation['GenericResources'] = (
+ _convert_generic_resources_dict(generic_resources)
+ )
+ if limits:
+ self['Limits'] = limits
+ if reservation:
+ self['Reservations'] = reservation
+
+
+def _convert_generic_resources_dict(generic_resources):
+ if isinstance(generic_resources, list):
+ return generic_resources
+ if not isinstance(generic_resources, dict):
+ raise errors.InvalidArgument(
+ 'generic_resources must be a dict or a list '
+ f'(found {type(generic_resources)})'
+ )
+ resources = []
+ for kind, value in generic_resources.items():
+ resource_type = None
+ if isinstance(value, int):
+ resource_type = 'DiscreteResourceSpec'
+ elif isinstance(value, str):
+ resource_type = 'NamedResourceSpec'
+ else:
+ kv = {kind: value}
+ raise errors.InvalidArgument(
+ f'Unsupported generic resource reservation type: {kv}'
+ )
+ resources.append({
+ resource_type: {'Kind': kind, 'Value': value}
+ })
+ return resources
+
+
+class UpdateConfig(dict):
+ """
+
+ Used to specify the way container updates should be performed by a service.
+
+ Args:
+
+ parallelism (int): Maximum number of tasks to be updated in one
+ iteration (0 means unlimited parallelism). Default: 0.
+ delay (int): Amount of time between updates, in nanoseconds.
+ failure_action (string): Action to take if an updated task fails to
+ run, or stops running during the update. Acceptable values are
+ ``continue``, ``pause``, as well as ``rollback`` since API v1.28.
+ Default: ``continue``
+ monitor (int): Amount of time to monitor each updated task for
+ failures, in nanoseconds.
+ max_failure_ratio (float): The fraction of tasks that may fail during
+ an update before the failure action is invoked, specified as a
+ floating point number between 0 and 1. Default: 0
+ order (string): Specifies the order of operations when rolling out an
+ updated task. Either ``start-first`` or ``stop-first`` are accepted.
+ """
+
+ def __init__(self, parallelism=0, delay=None, failure_action='continue',
+ monitor=None, max_failure_ratio=None, order=None):
+ self['Parallelism'] = parallelism
+ if delay is not None:
+ self['Delay'] = delay
+ if failure_action not in ('pause', 'continue', 'rollback'):
+ raise errors.InvalidArgument(
+ 'failure_action must be one of `pause`, `continue`, `rollback`'
+ )
+ self['FailureAction'] = failure_action
+
+ if monitor is not None:
+ if not isinstance(monitor, int):
+ raise TypeError('monitor must be an integer')
+ self['Monitor'] = monitor
+
+ if max_failure_ratio is not None:
+ if not isinstance(max_failure_ratio, (float, int)):
+ raise TypeError('max_failure_ratio must be a float')
+ if max_failure_ratio > 1 or max_failure_ratio < 0:
+ raise errors.InvalidArgument(
+ 'max_failure_ratio must be a number between 0 and 1'
+ )
+ self['MaxFailureRatio'] = max_failure_ratio
+
+ if order is not None:
+ if order not in ('start-first', 'stop-first'):
+ raise errors.InvalidArgument(
+ 'order must be either `start-first` or `stop-first`'
+ )
+ self['Order'] = order
+
+
+class RollbackConfig(UpdateConfig):
+ """
+ Used to specify the way container rollbacks should be performed by a
+ service
+
+ Args:
+ parallelism (int): Maximum number of tasks to be rolled back in one
+ iteration (0 means unlimited parallelism). Default: 0
+ delay (int): Amount of time between rollbacks, in nanoseconds.
+ failure_action (string): Action to take if a rolled back task fails to
+ run, or stops running during the rollback. Acceptable values are
+ ``continue``, ``pause`` or ``rollback``.
+ Default: ``continue``
+ monitor (int): Amount of time to monitor each rolled back task for
+ failures, in nanoseconds.
+ max_failure_ratio (float): The fraction of tasks that may fail during
+ a rollback before the failure action is invoked, specified as a
+ floating point number between 0 and 1. Default: 0
+ order (string): Specifies the order of operations when rolling out a
+ rolled back task. Either ``start-first`` or ``stop-first`` are
+ accepted.
+ """
+ pass
+
+
+class RestartConditionTypesEnum:
+ _values = (
+ 'none',
+ 'on-failure',
+ 'any',
+ )
+ NONE, ON_FAILURE, ANY = _values
+
+
+class RestartPolicy(dict):
+ """
+ Used when creating a :py:class:`~docker.types.ContainerSpec`,
+ dictates whether a container should restart after stopping or failing.
+
+ Args:
+
+ condition (string): Condition for restart (``none``, ``on-failure``,
+ or ``any``). Default: `none`.
+ delay (int): Delay between restart attempts. Default: 0
+ max_attempts (int): Maximum attempts to restart a given container
+ before giving up. Default value is 0, which is ignored.
+ window (int): Time window used to evaluate the restart policy. Default
+ value is 0, which is unbounded.
+ """
+
+ condition_types = RestartConditionTypesEnum
+
+ def __init__(self, condition=RestartConditionTypesEnum.NONE, delay=0,
+ max_attempts=0, window=0):
+ if condition not in self.condition_types._values:
+ raise TypeError(
+ f'Invalid RestartPolicy condition {condition}'
+ )
+
+ self['Condition'] = condition
+ self['Delay'] = delay
+ self['MaxAttempts'] = max_attempts
+ self['Window'] = window
+
+
+class DriverConfig(dict):
+ """
+ Indicates which driver to use, as well as its configuration. Can be used
+ as ``log_driver`` in a :py:class:`~docker.types.ContainerSpec`,
+ for the `driver_config` in a volume :py:class:`~docker.types.Mount`, or
+ as the driver object in
+ :py:meth:`create_secret`.
+
+ Args:
+
+ name (string): Name of the driver to use.
+ options (dict): Driver-specific options. Default: ``None``.
+ """
+
+ def __init__(self, name, options=None):
+ self['Name'] = name
+ if options:
+ self['Options'] = options
+
+
+class EndpointSpec(dict):
+ """
+ Describes properties to access and load-balance a service.
+
+ Args:
+
+ mode (string): The mode of resolution to use for internal load
+ balancing between tasks (``'vip'`` or ``'dnsrr'``). Defaults to
+ ``'vip'`` if not provided.
+ ports (dict): Exposed ports that this service is accessible on from the
+ outside, in the form of ``{ published_port: target_port }`` or
+ ``{ published_port: <port_config_tuple> }``. Port config tuple format
+ is ``(target_port [, protocol [, publish_mode]])``.
+ Ports can only be provided if the ``vip`` resolution mode is used.
+ """
+
+ def __init__(self, mode=None, ports=None):
+ if ports:
+ self['Ports'] = convert_service_ports(ports)
+ if mode:
+ self['Mode'] = mode
+
+
+def convert_service_ports(ports):
+ if isinstance(ports, list):
+ return ports
+ if not isinstance(ports, dict):
+ raise TypeError(
+ 'Invalid type for ports, expected dict or list'
+ )
+
+ result = []
+ for k, v in ports.items():
+ port_spec = {
+ 'Protocol': 'tcp',
+ 'PublishedPort': k
+ }
+
+ if isinstance(v, tuple):
+ port_spec['TargetPort'] = v[0]
+ if len(v) >= 2 and v[1] is not None:
+ port_spec['Protocol'] = v[1]
+ if len(v) == 3:
+ port_spec['PublishMode'] = v[2]
+ if len(v) > 3:
+ raise ValueError(
+ 'Service port configuration can have at most 3 elements: '
+ '(target_port, protocol, mode)'
+ )
+ else:
+ port_spec['TargetPort'] = v
+
+ result.append(port_spec)
+ return result
+
+
+class ServiceMode(dict):
+ """
+ Indicate whether a service or a job should be deployed as a replicated
+ or global service, and associated parameters
+
+ Args:
+ mode (string): Can be either ``replicated``, ``global``,
+ ``replicated-job`` or ``global-job``
+ replicas (int): Number of replicas. For replicated services only.
+ concurrency (int): Number of concurrent jobs. For replicated job
+ services only.
+ """
+
+ def __init__(self, mode, replicas=None, concurrency=None):
+ replicated_modes = ('replicated', 'replicated-job')
+ supported_modes = replicated_modes + ('global', 'global-job')
+
+ if mode not in supported_modes:
+ raise errors.InvalidArgument(
+ 'mode must be either "replicated", "global", "replicated-job"'
+ ' or "global-job"'
+ )
+
+ if mode not in replicated_modes:
+ if replicas is not None:
+ raise errors.InvalidArgument(
+ 'replicas can only be used for "replicated" or'
+ ' "replicated-job" mode'
+ )
+
+ if concurrency is not None:
+ raise errors.InvalidArgument(
+ 'concurrency can only be used for "replicated-job" mode'
+ )
+
+ service_mode = self._convert_mode(mode)
+ self.mode = service_mode
+ self[service_mode] = {}
+
+ if replicas is not None:
+ if mode == 'replicated':
+ self[service_mode]['Replicas'] = replicas
+
+ if mode == 'replicated-job':
+ self[service_mode]['MaxConcurrent'] = concurrency or 1
+ self[service_mode]['TotalCompletions'] = replicas
+
+ @staticmethod
+ def _convert_mode(original_mode):
+ if original_mode == 'global-job':
+ return 'GlobalJob'
+
+ if original_mode == 'replicated-job':
+ return 'ReplicatedJob'
+
+ return original_mode
+
+ @property
+ def replicas(self):
+ if 'replicated' in self:
+ return self['replicated'].get('Replicas')
+
+ if 'ReplicatedJob' in self:
+ return self['ReplicatedJob'].get('TotalCompletions')
+
+ return None
+
+
+class SecretReference(dict):
+ """
+ Secret reference to be used as part of a :py:class:`ContainerSpec`.
+ Describes how a secret is made accessible inside the service's
+ containers.
+
+ Args:
+ secret_id (string): Secret's ID
+ secret_name (string): Secret's name as defined at its creation.
+ filename (string): Name of the file containing the secret. Defaults
+ to the secret's name if not specified.
+ uid (string): UID of the secret file's owner. Default: 0
+ gid (string): GID of the secret file's group. Default: 0
+ mode (int): File access mode inside the container. Default: 0o444
+ """
+ @check_resource('secret_id')
+ def __init__(self, secret_id, secret_name, filename=None, uid=None,
+ gid=None, mode=0o444):
+ self['SecretName'] = secret_name
+ self['SecretID'] = secret_id
+ self['File'] = {
+ 'Name': filename or secret_name,
+ 'UID': uid or '0',
+ 'GID': gid or '0',
+ 'Mode': mode
+ }
+
+
+class ConfigReference(dict):
+ """
+ Config reference to be used as part of a :py:class:`ContainerSpec`.
+ Describes how a config is made accessible inside the service's
+ containers.
+
+ Args:
+ config_id (string): Config's ID
+ config_name (string): Config's name as defined at its creation.
+ filename (string): Name of the file containing the config. Defaults
+ to the config's name if not specified.
+ uid (string): UID of the config file's owner. Default: 0
+ gid (string): GID of the config file's group. Default: 0
+ mode (int): File access mode inside the container. Default: 0o444
+ """
+ @check_resource('config_id')
+ def __init__(self, config_id, config_name, filename=None, uid=None,
+ gid=None, mode=0o444):
+ self['ConfigName'] = config_name
+ self['ConfigID'] = config_id
+ self['File'] = {
+ 'Name': filename or config_name,
+ 'UID': uid or '0',
+ 'GID': gid or '0',
+ 'Mode': mode
+ }
+
+
+class Placement(dict):
+ """
+ Placement constraints to be used as part of a :py:class:`TaskTemplate`
+
+ Args:
+ constraints (:py:class:`list` of str): A list of constraints
+ preferences (:py:class:`list` of tuple): Preferences provide a way
+ to make the scheduler aware of factors such as topology. They
+ are provided in order from highest to lowest precedence and
+ are expressed as ``(strategy, descriptor)`` tuples. See
+ :py:class:`PlacementPreference` for details.
+ maxreplicas (int): Maximum number of replicas per node
+ platforms (:py:class:`list` of tuple): A list of platforms
+ expressed as ``(arch, os)`` tuples
+ """
+
+ def __init__(self, constraints=None, preferences=None, platforms=None,
+ maxreplicas=None):
+ if constraints is not None:
+ self['Constraints'] = constraints
+ if preferences is not None:
+ self['Preferences'] = []
+ for pref in preferences:
+ if isinstance(pref, tuple):
+ pref = PlacementPreference(*pref)
+ self['Preferences'].append(pref)
+ if maxreplicas is not None:
+ self['MaxReplicas'] = maxreplicas
+ if platforms:
+ self['Platforms'] = []
+ for plat in platforms:
+ self['Platforms'].append({
+ 'Architecture': plat[0], 'OS': plat[1]
+ })
+
+
+class PlacementPreference(dict):
+ """
+ Placement preference to be used as an element in the list of
+ preferences for :py:class:`Placement` objects.
+
+ Args:
+ strategy (string): The placement strategy to implement. Currently,
+ the only supported strategy is ``spread``.
+ descriptor (string): A label descriptor. For the spread strategy,
+ the scheduler will try to spread tasks evenly over groups of
+ nodes identified by this label.
+ """
+
+ def __init__(self, strategy, descriptor):
+ if strategy != 'spread':
+ raise errors.InvalidArgument(
+ f'PlacementPreference strategy value is invalid ({strategy}): '
+ 'must be "spread".'
+ )
+ self['Spread'] = {'SpreadDescriptor': descriptor}
+
+
+class DNSConfig(dict):
+ """
+ Specification for DNS related configurations in resolver configuration
+ file (``resolv.conf``). Part of a :py:class:`ContainerSpec` definition.
+
+ Args:
+ nameservers (:py:class:`list`): The IP addresses of the name
+ servers.
+ search (:py:class:`list`): A search list for host-name lookup.
+ options (:py:class:`list`): A list of internal resolver variables
+ to be modified (e.g., ``debug``, ``ndots:3``, etc.).
+ """
+
+ def __init__(self, nameservers=None, search=None, options=None):
+ self['Nameservers'] = nameservers
+ self['Search'] = search
+ self['Options'] = options
+
+
+class Privileges(dict):
+ r"""
+ Security options for a service's containers.
+ Part of a :py:class:`ContainerSpec` definition.
+
+ Args:
+ credentialspec_file (str): Load credential spec from this file.
+ The file is read by the daemon, and must be present in the
+ CredentialSpecs subdirectory in the docker data directory,
+ which defaults to ``C:\ProgramData\Docker\`` on Windows.
+ Can not be combined with credentialspec_registry.
+
+ credentialspec_registry (str): Load credential spec from this value
+ in the Windows registry. The specified registry value must be
+ located in: ``HKLM\SOFTWARE\Microsoft\Windows NT\CurrentVersion
+ \Virtualization\Containers\CredentialSpecs``.
+ Can not be combined with credentialspec_file.
+
+ selinux_disable (boolean): Disable SELinux
+ selinux_user (string): SELinux user label
+ selinux_role (string): SELinux role label
+ selinux_type (string): SELinux type label
+ selinux_level (string): SELinux level label
+ """
+
+ def __init__(self, credentialspec_file=None, credentialspec_registry=None,
+ selinux_disable=None, selinux_user=None, selinux_role=None,
+ selinux_type=None, selinux_level=None):
+ credential_spec = {}
+ if credentialspec_registry is not None:
+ credential_spec['Registry'] = credentialspec_registry
+ if credentialspec_file is not None:
+ credential_spec['File'] = credentialspec_file
+
+ if len(credential_spec) > 1:
+ raise errors.InvalidArgument(
+ 'credentialspec_file and credentialspec_registry are mutually'
+ ' exclusive'
+ )
+
+ selinux_context = {
+ 'Disable': selinux_disable,
+ 'User': selinux_user,
+ 'Role': selinux_role,
+ 'Type': selinux_type,
+ 'Level': selinux_level,
+ }
+
+ if len(credential_spec) > 0:
+ self['CredentialSpec'] = credential_spec
+
+ if len(selinux_context) > 0:
+ self['SELinuxContext'] = selinux_context
+
+
+class NetworkAttachmentConfig(dict):
+ """
+ Network attachment options for a service.
+
+ Args:
+ target (str): The target network for attachment.
+ Can be a network name or ID.
+ aliases (:py:class:`list`): A list of discoverable alternate names
+ for the service.
+ options (:py:class:`dict`): Driver attachment options for the
+ network target.
+ """
+
+ def __init__(self, target, aliases=None, options=None):
+ self['Target'] = target
+ self['Aliases'] = aliases
+ self['DriverOpts'] = options
diff --git a/contrib/python/docker/docker/types/swarm.py b/contrib/python/docker/docker/types/swarm.py
new file mode 100644
index 0000000000..9687a82d82
--- /dev/null
+++ b/contrib/python/docker/docker/types/swarm.py
@@ -0,0 +1,119 @@
+from ..errors import InvalidVersion
+from ..utils import version_lt
+
+
+class SwarmSpec(dict):
+ """
+ Describe a Swarm's configuration and options. Use
+ :py:meth:`~docker.api.swarm.SwarmApiMixin.create_swarm_spec`
+ to instantiate.
+ """
+ def __init__(self, version, task_history_retention_limit=None,
+ snapshot_interval=None, keep_old_snapshots=None,
+ log_entries_for_slow_followers=None, heartbeat_tick=None,
+ election_tick=None, dispatcher_heartbeat_period=None,
+ node_cert_expiry=None, external_cas=None, name=None,
+ labels=None, signing_ca_cert=None, signing_ca_key=None,
+ ca_force_rotate=None, autolock_managers=None,
+ log_driver=None):
+ if task_history_retention_limit is not None:
+ self['Orchestration'] = {
+ 'TaskHistoryRetentionLimit': task_history_retention_limit
+ }
+ if any([snapshot_interval,
+ keep_old_snapshots,
+ log_entries_for_slow_followers,
+ heartbeat_tick,
+ election_tick]):
+ self['Raft'] = {
+ 'SnapshotInterval': snapshot_interval,
+ 'KeepOldSnapshots': keep_old_snapshots,
+ 'LogEntriesForSlowFollowers': log_entries_for_slow_followers,
+ 'HeartbeatTick': heartbeat_tick,
+ 'ElectionTick': election_tick
+ }
+
+ if dispatcher_heartbeat_period:
+ self['Dispatcher'] = {
+ 'HeartbeatPeriod': dispatcher_heartbeat_period
+ }
+
+ ca_config = {}
+ if node_cert_expiry is not None:
+ ca_config['NodeCertExpiry'] = node_cert_expiry
+ if external_cas:
+ if version_lt(version, '1.25'):
+ if len(external_cas) > 1:
+ raise InvalidVersion(
+ 'Support for multiple external CAs is not available '
+ 'for API version < 1.25'
+ )
+ ca_config['ExternalCA'] = external_cas[0]
+ else:
+ ca_config['ExternalCAs'] = external_cas
+ if signing_ca_key:
+ if version_lt(version, '1.30'):
+ raise InvalidVersion(
+ 'signing_ca_key is not supported in API version < 1.30'
+ )
+ ca_config['SigningCAKey'] = signing_ca_key
+ if signing_ca_cert:
+ if version_lt(version, '1.30'):
+ raise InvalidVersion(
+ 'signing_ca_cert is not supported in API version < 1.30'
+ )
+ ca_config['SigningCACert'] = signing_ca_cert
+ if ca_force_rotate is not None:
+ if version_lt(version, '1.30'):
+ raise InvalidVersion(
+ 'force_rotate is not supported in API version < 1.30'
+ )
+ ca_config['ForceRotate'] = ca_force_rotate
+ if ca_config:
+ self['CAConfig'] = ca_config
+
+ if autolock_managers is not None:
+ if version_lt(version, '1.25'):
+ raise InvalidVersion(
+ 'autolock_managers is not supported in API version < 1.25'
+ )
+
+ self['EncryptionConfig'] = {'AutoLockManagers': autolock_managers}
+
+ if log_driver is not None:
+ if version_lt(version, '1.25'):
+ raise InvalidVersion(
+ 'log_driver is not supported in API version < 1.25'
+ )
+
+ self['TaskDefaults'] = {'LogDriver': log_driver}
+
+ if name is not None:
+ self['Name'] = name
+ if labels is not None:
+ self['Labels'] = labels
+
+
+class SwarmExternalCA(dict):
+ """
+ Configuration for forwarding signing requests to an external
+ certificate authority.
+
+ Args:
+ url (string): URL where certificate signing requests should be
+ sent.
+ protocol (string): Protocol for communication with the external CA.
+ options (dict): An object with key/value pairs that are interpreted
+ as protocol-specific options for the external CA driver.
+ ca_cert (string): The root CA certificate (in PEM format) this
+ external CA uses to issue TLS certificates (assumed to be to
+ the current swarm root CA certificate if not provided).
+
+
+
+ """
+ def __init__(self, url, protocol=None, options=None, ca_cert=None):
+ self['URL'] = url
+ self['Protocol'] = protocol
+ self['Options'] = options
+ self['CACert'] = ca_cert
diff --git a/contrib/python/docker/docker/utils/__init__.py b/contrib/python/docker/docker/utils/__init__.py
new file mode 100644
index 0000000000..c086a9f073
--- /dev/null
+++ b/contrib/python/docker/docker/utils/__init__.py
@@ -0,0 +1,28 @@
+
+from .build import create_archive, exclude_paths, match_tag, mkbuildcontext, tar
+from .decorators import check_resource, minimum_version, update_headers
+from .utils import (
+ compare_version,
+ convert_filters,
+ convert_port_bindings,
+ convert_service_networks,
+ convert_volume_binds,
+ create_host_config,
+ create_ipam_config,
+ create_ipam_pool,
+ datetime_to_timestamp,
+ decode_json_header,
+ format_environment,
+ format_extra_hosts,
+ kwargs_from_env,
+ normalize_links,
+ parse_bytes,
+ parse_devices,
+ parse_env_file,
+ parse_host,
+ parse_repository_tag,
+ split_command,
+ version_gte,
+ version_lt,
+)
+
diff --git a/contrib/python/docker/docker/utils/build.py b/contrib/python/docker/docker/utils/build.py
new file mode 100644
index 0000000000..b841391044
--- /dev/null
+++ b/contrib/python/docker/docker/utils/build.py
@@ -0,0 +1,260 @@
+import io
+import os
+import re
+import tarfile
+import tempfile
+
+from ..constants import IS_WINDOWS_PLATFORM
+from .fnmatch import fnmatch
+
+_SEP = re.compile('/|\\\\') if IS_WINDOWS_PLATFORM else re.compile('/')
+_TAG = re.compile(
+ r"^[a-z0-9]+((\.|_|__|-+)[a-z0-9]+)*"
+ r"(?::[0-9]+)?(/[a-z0-9]+((\.|_|__|-+)[a-z0-9]+)*)*"
+ r"(:[a-zA-Z0-9_][a-zA-Z0-9._-]{0,127})?$"
+)
+
+
+def match_tag(tag: str) -> bool:
+ return bool(_TAG.match(tag))
+
+
+def tar(path, exclude=None, dockerfile=None, fileobj=None, gzip=False):
+ root = os.path.abspath(path)
+ exclude = exclude or []
+ dockerfile = dockerfile or (None, None)
+ extra_files = []
+ if dockerfile[1] is not None:
+ dockerignore_contents = '\n'.join(
+ (exclude or ['.dockerignore']) + [dockerfile[0]]
+ )
+ extra_files = [
+ ('.dockerignore', dockerignore_contents),
+ dockerfile,
+ ]
+ return create_archive(
+ files=sorted(exclude_paths(root, exclude, dockerfile=dockerfile[0])),
+ root=root, fileobj=fileobj, gzip=gzip, extra_files=extra_files
+ )
+
+
+def exclude_paths(root, patterns, dockerfile=None):
+ """
+ Given a root directory path and a list of .dockerignore patterns, return
+ an iterator of all paths (both regular files and directories) in the root
+ directory that do *not* match any of the patterns.
+
+ All paths returned are relative to the root.
+ """
+
+ if dockerfile is None:
+ dockerfile = 'Dockerfile'
+
+ patterns.append(f"!{dockerfile}")
+ pm = PatternMatcher(patterns)
+ return set(pm.walk(root))
+
+
+def build_file_list(root):
+ files = []
+ for dirname, dirnames, fnames in os.walk(root):
+ for filename in fnames + dirnames:
+ longpath = os.path.join(dirname, filename)
+ files.append(
+ longpath.replace(root, '', 1).lstrip('/')
+ )
+
+ return files
+
+
+def create_archive(root, files=None, fileobj=None, gzip=False,
+ extra_files=None):
+ extra_files = extra_files or []
+ if not fileobj:
+ fileobj = tempfile.NamedTemporaryFile()
+ t = tarfile.open(mode='w:gz' if gzip else 'w', fileobj=fileobj)
+ if files is None:
+ files = build_file_list(root)
+ extra_names = {e[0] for e in extra_files}
+ for path in files:
+ if path in extra_names:
+ # Extra files override context files with the same name
+ continue
+ full_path = os.path.join(root, path)
+
+ i = t.gettarinfo(full_path, arcname=path)
+ if i is None:
+ # This happens when we encounter a socket file. We can safely
+ # ignore it and proceed.
+ continue
+
+ # Workaround https://bugs.python.org/issue32713
+ if i.mtime < 0 or i.mtime > 8**11 - 1:
+ i.mtime = int(i.mtime)
+
+ if IS_WINDOWS_PLATFORM:
+ # Windows doesn't keep track of the execute bit, so we make files
+ # and directories executable by default.
+ i.mode = i.mode & 0o755 | 0o111
+
+ if i.isfile():
+ try:
+ with open(full_path, 'rb') as f:
+ t.addfile(i, f)
+ except OSError as oe:
+ raise OSError(
+ f'Can not read file in context: {full_path}'
+ ) from oe
+ else:
+ # Directories, FIFOs, symlinks... don't need to be read.
+ t.addfile(i, None)
+
+ for name, contents in extra_files:
+ info = tarfile.TarInfo(name)
+ contents_encoded = contents.encode('utf-8')
+ info.size = len(contents_encoded)
+ t.addfile(info, io.BytesIO(contents_encoded))
+
+ t.close()
+ fileobj.seek(0)
+ return fileobj
+
+
+def mkbuildcontext(dockerfile):
+ f = tempfile.NamedTemporaryFile()
+ t = tarfile.open(mode='w', fileobj=f)
+ if isinstance(dockerfile, io.StringIO):
+ dfinfo = tarfile.TarInfo('Dockerfile')
+ raise TypeError('Please use io.BytesIO to create in-memory '
+ 'Dockerfiles with Python 3')
+ elif isinstance(dockerfile, io.BytesIO):
+ dfinfo = tarfile.TarInfo('Dockerfile')
+ dfinfo.size = len(dockerfile.getvalue())
+ dockerfile.seek(0)
+ else:
+ dfinfo = t.gettarinfo(fileobj=dockerfile, arcname='Dockerfile')
+ t.addfile(dfinfo, dockerfile)
+ t.close()
+ f.seek(0)
+ return f
+
+
+def split_path(p):
+ return [pt for pt in re.split(_SEP, p) if pt and pt != '.']
+
+
+def normalize_slashes(p):
+ if IS_WINDOWS_PLATFORM:
+ return '/'.join(split_path(p))
+ return p
+
+
+def walk(root, patterns, default=True):
+ pm = PatternMatcher(patterns)
+ return pm.walk(root)
+
+
+# Heavily based on
+# https://github.com/moby/moby/blob/master/pkg/fileutils/fileutils.go
+class PatternMatcher:
+ def __init__(self, patterns):
+ self.patterns = list(filter(
+ lambda p: p.dirs, [Pattern(p) for p in patterns]
+ ))
+ self.patterns.append(Pattern('!.dockerignore'))
+
+ def matches(self, filepath):
+ matched = False
+ parent_path = os.path.dirname(filepath)
+ parent_path_dirs = split_path(parent_path)
+
+ for pattern in self.patterns:
+ negative = pattern.exclusion
+ match = pattern.match(filepath)
+ if not match and parent_path != '':
+ if len(pattern.dirs) <= len(parent_path_dirs):
+ match = pattern.match(
+ os.path.sep.join(parent_path_dirs[:len(pattern.dirs)])
+ )
+
+ if match:
+ matched = not negative
+
+ return matched
+
+ def walk(self, root):
+ def rec_walk(current_dir):
+ for f in os.listdir(current_dir):
+ fpath = os.path.join(
+ os.path.relpath(current_dir, root), f
+ )
+ if fpath.startswith(f".{os.path.sep}"):
+ fpath = fpath[2:]
+ match = self.matches(fpath)
+ if not match:
+ yield fpath
+
+ cur = os.path.join(root, fpath)
+ if not os.path.isdir(cur) or os.path.islink(cur):
+ continue
+
+ if match:
+ # If we want to skip this file and it's a directory
+ # then we should first check to see if there's an
+ # excludes pattern (e.g. !dir/file) that starts with this
+ # dir. If so then we can't skip this dir.
+ skip = True
+
+ for pat in self.patterns:
+ if not pat.exclusion:
+ continue
+ if pat.cleaned_pattern.startswith(
+ normalize_slashes(fpath)):
+ skip = False
+ break
+ if skip:
+ continue
+ yield from rec_walk(cur)
+
+ return rec_walk(root)
+
+
+class Pattern:
+ def __init__(self, pattern_str):
+ self.exclusion = False
+ if pattern_str.startswith('!'):
+ self.exclusion = True
+ pattern_str = pattern_str[1:]
+
+ self.dirs = self.normalize(pattern_str)
+ self.cleaned_pattern = '/'.join(self.dirs)
+
+ @classmethod
+ def normalize(cls, p):
+
+ # Remove trailing spaces
+ p = p.strip()
+
+ # Leading and trailing slashes are not relevant. Yes,
+ # "foo.py/" must exclude the "foo.py" regular file. "."
+ # components are not relevant either, even if the whole
+ # pattern is only ".", as the Docker reference states: "For
+ # historical reasons, the pattern . is ignored."
+ # ".." component must be cleared with the potential previous
+ # component, regardless of whether it exists: "A preprocessing
+ # step [...] eliminates . and .. elements using Go's
+ # filepath.".
+ i = 0
+ split = split_path(p)
+ while i < len(split):
+ if split[i] == '..':
+ del split[i]
+ if i > 0:
+ del split[i - 1]
+ i -= 1
+ else:
+ i += 1
+ return split
+
+ def match(self, filepath):
+ return fnmatch(normalize_slashes(filepath), self.cleaned_pattern)
diff --git a/contrib/python/docker/docker/utils/config.py b/contrib/python/docker/docker/utils/config.py
new file mode 100644
index 0000000000..8e24959a5d
--- /dev/null
+++ b/contrib/python/docker/docker/utils/config.py
@@ -0,0 +1,66 @@
+import json
+import logging
+import os
+
+from ..constants import IS_WINDOWS_PLATFORM
+
+DOCKER_CONFIG_FILENAME = os.path.join('.docker', 'config.json')
+LEGACY_DOCKER_CONFIG_FILENAME = '.dockercfg'
+
+log = logging.getLogger(__name__)
+
+
+def find_config_file(config_path=None):
+ paths = list(filter(None, [
+ config_path, # 1
+ config_path_from_environment(), # 2
+ os.path.join(home_dir(), DOCKER_CONFIG_FILENAME), # 3
+ os.path.join(home_dir(), LEGACY_DOCKER_CONFIG_FILENAME), # 4
+ ]))
+
+ log.debug(f"Trying paths: {repr(paths)}")
+
+ for path in paths:
+ if os.path.exists(path):
+ log.debug(f"Found file at path: {path}")
+ return path
+
+ log.debug("No config file found")
+
+ return None
+
+
+def config_path_from_environment():
+ config_dir = os.environ.get('DOCKER_CONFIG')
+ if not config_dir:
+ return None
+ return os.path.join(config_dir, os.path.basename(DOCKER_CONFIG_FILENAME))
+
+
+def home_dir():
+ """
+ Get the user's home directory, using the same logic as the Docker Engine
+ client - use %USERPROFILE% on Windows, $HOME/getuid on POSIX.
+ """
+ if IS_WINDOWS_PLATFORM:
+ return os.environ.get('USERPROFILE', '')
+ else:
+ return os.path.expanduser('~')
+
+
+def load_general_config(config_path=None):
+ config_file = find_config_file(config_path)
+
+ if not config_file:
+ return {}
+
+ try:
+ with open(config_file) as f:
+ return json.load(f)
+ except (OSError, ValueError) as e:
+ # In the case of a legacy `.dockercfg` file, we won't
+ # be able to load any JSON data.
+ log.debug(e)
+
+ log.debug("All parsing attempts failed - returning empty config")
+ return {}
diff --git a/contrib/python/docker/docker/utils/decorators.py b/contrib/python/docker/docker/utils/decorators.py
new file mode 100644
index 0000000000..5aab98cd46
--- /dev/null
+++ b/contrib/python/docker/docker/utils/decorators.py
@@ -0,0 +1,45 @@
+import functools
+
+from .. import errors
+from . import utils
+
+
+def check_resource(resource_name):
+ def decorator(f):
+ @functools.wraps(f)
+ def wrapped(self, resource_id=None, *args, **kwargs):
+ if resource_id is None and kwargs.get(resource_name):
+ resource_id = kwargs.pop(resource_name)
+ if isinstance(resource_id, dict):
+ resource_id = resource_id.get('Id', resource_id.get('ID'))
+ if not resource_id:
+ raise errors.NullResource(
+ 'Resource ID was not provided'
+ )
+ return f(self, resource_id, *args, **kwargs)
+ return wrapped
+ return decorator
+
+
+def minimum_version(version):
+ def decorator(f):
+ @functools.wraps(f)
+ def wrapper(self, *args, **kwargs):
+ if utils.version_lt(self._version, version):
+ raise errors.InvalidVersion(
+ f'{f.__name__} is not available for version < {version}',
+ )
+ return f(self, *args, **kwargs)
+ return wrapper
+ return decorator
+
+
+def update_headers(f):
+ def inner(self, *args, **kwargs):
+ if 'HttpHeaders' in self._general_configs:
+ if not kwargs.get('headers'):
+ kwargs['headers'] = self._general_configs['HttpHeaders']
+ else:
+ kwargs['headers'].update(self._general_configs['HttpHeaders'])
+ return f(self, *args, **kwargs)
+ return inner
diff --git a/contrib/python/docker/docker/utils/fnmatch.py b/contrib/python/docker/docker/utils/fnmatch.py
new file mode 100644
index 0000000000..be745381e4
--- /dev/null
+++ b/contrib/python/docker/docker/utils/fnmatch.py
@@ -0,0 +1,115 @@
+"""Filename matching with shell patterns.
+
+fnmatch(FILENAME, PATTERN) matches according to the local convention.
+fnmatchcase(FILENAME, PATTERN) always takes case in account.
+
+The functions operate by translating the pattern into a regular
+expression. They cache the compiled regular expressions for speed.
+
+The function translate(PATTERN) returns a regular expression
+corresponding to PATTERN. (It does not compile it.)
+"""
+
+import re
+
+__all__ = ["fnmatch", "fnmatchcase", "translate"]
+
+_cache = {}
+_MAXCACHE = 100
+
+
+def _purge():
+ """Clear the pattern cache"""
+ _cache.clear()
+
+
+def fnmatch(name, pat):
+ """Test whether FILENAME matches PATTERN.
+
+ Patterns are Unix shell style:
+
+ * matches everything
+ ? matches any single character
+ [seq] matches any character in seq
+ [!seq] matches any char not in seq
+
+ An initial period in FILENAME is not special.
+ Both FILENAME and PATTERN are first case-normalized
+ if the operating system requires it.
+ If you don't want this, use fnmatchcase(FILENAME, PATTERN).
+ """
+
+ name = name.lower()
+ pat = pat.lower()
+ return fnmatchcase(name, pat)
+
+
+def fnmatchcase(name, pat):
+ """Test whether FILENAME matches PATTERN, including case.
+ This is a version of fnmatch() which doesn't case-normalize
+ its arguments.
+ """
+
+ try:
+ re_pat = _cache[pat]
+ except KeyError:
+ res = translate(pat)
+ if len(_cache) >= _MAXCACHE:
+ _cache.clear()
+ _cache[pat] = re_pat = re.compile(res)
+ return re_pat.match(name) is not None
+
+
+def translate(pat):
+ """Translate a shell PATTERN to a regular expression.
+
+ There is no way to quote meta-characters.
+ """
+ i, n = 0, len(pat)
+ res = '^'
+ while i < n:
+ c = pat[i]
+ i = i + 1
+ if c == '*':
+ if i < n and pat[i] == '*':
+ # is some flavor of "**"
+ i = i + 1
+ # Treat **/ as ** so eat the "/"
+ if i < n and pat[i] == '/':
+ i = i + 1
+ if i >= n:
+ # is "**EOF" - to align with .gitignore just accept all
+ res = f"{res}.*"
+ else:
+ # is "**"
+ # Note that this allows for any # of /'s (even 0) because
+ # the .* will eat everything, even /'s
+ res = f"{res}(.*/)?"
+ else:
+ # is "*" so map it to anything but "/"
+ res = f"{res}[^/]*"
+ elif c == '?':
+ # "?" is any char except "/"
+ res = f"{res}[^/]"
+ elif c == '[':
+ j = i
+ if j < n and pat[j] == '!':
+ j = j + 1
+ if j < n and pat[j] == ']':
+ j = j + 1
+ while j < n and pat[j] != ']':
+ j = j + 1
+ if j >= n:
+ res = f"{res}\\["
+ else:
+ stuff = pat[i:j].replace('\\', '\\\\')
+ i = j + 1
+ if stuff[0] == '!':
+ stuff = f"^{stuff[1:]}"
+ elif stuff[0] == '^':
+ stuff = f"\\{stuff}"
+ res = f'{res}[{stuff}]'
+ else:
+ res = res + re.escape(c)
+
+ return f"{res}$"
diff --git a/contrib/python/docker/docker/utils/json_stream.py b/contrib/python/docker/docker/utils/json_stream.py
new file mode 100644
index 0000000000..41d25920ce
--- /dev/null
+++ b/contrib/python/docker/docker/utils/json_stream.py
@@ -0,0 +1,74 @@
+import json
+import json.decoder
+
+from ..errors import StreamParseError
+
+json_decoder = json.JSONDecoder()
+
+
+def stream_as_text(stream):
+ """
+ Given a stream of bytes or text, if any of the items in the stream
+ are bytes convert them to text.
+ This function can be removed once we return text streams
+ instead of byte streams.
+ """
+ for data in stream:
+ if not isinstance(data, str):
+ data = data.decode('utf-8', 'replace')
+ yield data
+
+
+def json_splitter(buffer):
+ """Attempt to parse a json object from a buffer. If there is at least one
+ object, return it and the rest of the buffer, otherwise return None.
+ """
+ buffer = buffer.strip()
+ try:
+ obj, index = json_decoder.raw_decode(buffer)
+ rest = buffer[json.decoder.WHITESPACE.match(buffer, index).end():]
+ return obj, rest
+ except ValueError:
+ return None
+
+
+def json_stream(stream):
+ """Given a stream of text, return a stream of json objects.
+ This handles streams which are inconsistently buffered (some entries may
+ be newline delimited, and others are not).
+ """
+ return split_buffer(stream, json_splitter, json_decoder.decode)
+
+
+def line_splitter(buffer, separator='\n'):
+ index = buffer.find(str(separator))
+ if index == -1:
+ return None
+ return buffer[:index + 1], buffer[index + 1:]
+
+
+def split_buffer(stream, splitter=None, decoder=lambda a: a):
+ """Given a generator which yields strings and a splitter function,
+ joins all input, splits on the separator and yields each chunk.
+ Unlike string.split(), each chunk includes the trailing
+ separator, except for the last one if none was found on the end
+ of the input.
+ """
+ splitter = splitter or line_splitter
+ buffered = ''
+
+ for data in stream_as_text(stream):
+ buffered += data
+ while True:
+ buffer_split = splitter(buffered)
+ if buffer_split is None:
+ break
+
+ item, buffered = buffer_split
+ yield item
+
+ if buffered:
+ try:
+ yield decoder(buffered)
+ except Exception as e:
+ raise StreamParseError(e) from e
diff --git a/contrib/python/docker/docker/utils/ports.py b/contrib/python/docker/docker/utils/ports.py
new file mode 100644
index 0000000000..9fd6e8f6b8
--- /dev/null
+++ b/contrib/python/docker/docker/utils/ports.py
@@ -0,0 +1,83 @@
+import re
+
+PORT_SPEC = re.compile(
+ "^" # Match full string
+ "(" # External part
+ r"(\[?(?P<host>[a-fA-F\d.:]+)\]?:)?" # Address
+ r"(?P<ext>[\d]*)(-(?P<ext_end>[\d]+))?:" # External range
+ ")?"
+ r"(?P<int>[\d]+)(-(?P<int_end>[\d]+))?" # Internal range
+ "(?P<proto>/(udp|tcp|sctp))?" # Protocol
+ "$" # Match full string
+)
+
+
+def add_port_mapping(port_bindings, internal_port, external):
+ if internal_port in port_bindings:
+ port_bindings[internal_port].append(external)
+ else:
+ port_bindings[internal_port] = [external]
+
+
+def add_port(port_bindings, internal_port_range, external_range):
+ if external_range is None:
+ for internal_port in internal_port_range:
+ add_port_mapping(port_bindings, internal_port, None)
+ else:
+ ports = zip(internal_port_range, external_range)
+ for internal_port, external_port in ports:
+ add_port_mapping(port_bindings, internal_port, external_port)
+
+
+def build_port_bindings(ports):
+ port_bindings = {}
+ for port in ports:
+ internal_port_range, external_range = split_port(port)
+ add_port(port_bindings, internal_port_range, external_range)
+ return port_bindings
+
+
+def _raise_invalid_port(port):
+ raise ValueError('Invalid port "%s", should be '
+ '[[remote_ip:]remote_port[-remote_port]:]'
+ 'port[/protocol]' % port)
+
+
+def port_range(start, end, proto, randomly_available_port=False):
+ if not start:
+ return start
+ if not end:
+ return [start + proto]
+ if randomly_available_port:
+ return [f"{start}-{end}{proto}"]
+ return [str(port) + proto for port in range(int(start), int(end) + 1)]
+
+
+def split_port(port):
+ if hasattr(port, 'legacy_repr'):
+ # This is the worst hack, but it prevents a bug in Compose 1.14.0
+ # https://github.com/docker/docker-py/issues/1668
+ # TODO: remove once fixed in Compose stable
+ port = port.legacy_repr()
+ port = str(port)
+ match = PORT_SPEC.match(port)
+ if match is None:
+ _raise_invalid_port(port)
+ parts = match.groupdict()
+
+ host = parts['host']
+ proto = parts['proto'] or ''
+ internal = port_range(parts['int'], parts['int_end'], proto)
+ external = port_range(
+ parts['ext'], parts['ext_end'], '', len(internal) == 1)
+
+ if host is None:
+ if external is not None and len(internal) != len(external):
+ raise ValueError('Port ranges don\'t match in length')
+ return internal, external
+ else:
+ if not external:
+ external = [None] * len(internal)
+ elif len(internal) != len(external):
+ raise ValueError('Port ranges don\'t match in length')
+ return internal, [(host, ext_port) for ext_port in external]
diff --git a/contrib/python/docker/docker/utils/proxy.py b/contrib/python/docker/docker/utils/proxy.py
new file mode 100644
index 0000000000..e7164b6cea
--- /dev/null
+++ b/contrib/python/docker/docker/utils/proxy.py
@@ -0,0 +1,77 @@
+from .utils import format_environment
+
+
+class ProxyConfig(dict):
+ '''
+ Hold the client's proxy configuration
+ '''
+ @property
+ def http(self):
+ return self.get('http')
+
+ @property
+ def https(self):
+ return self.get('https')
+
+ @property
+ def ftp(self):
+ return self.get('ftp')
+
+ @property
+ def no_proxy(self):
+ return self.get('no_proxy')
+
+ @staticmethod
+ def from_dict(config):
+ '''
+ Instantiate a new ProxyConfig from a dictionary that represents a
+ client configuration, as described in `the documentation`_.
+
+ .. _the documentation:
+ https://docs.docker.com/network/proxy/#configure-the-docker-client
+ '''
+ return ProxyConfig(
+ http=config.get('httpProxy'),
+ https=config.get('httpsProxy'),
+ ftp=config.get('ftpProxy'),
+ no_proxy=config.get('noProxy'),
+ )
+
+ def get_environment(self):
+ '''
+ Return a dictionary representing the environment variables used to
+ set the proxy settings.
+ '''
+ env = {}
+ if self.http:
+ env['http_proxy'] = env['HTTP_PROXY'] = self.http
+ if self.https:
+ env['https_proxy'] = env['HTTPS_PROXY'] = self.https
+ if self.ftp:
+ env['ftp_proxy'] = env['FTP_PROXY'] = self.ftp
+ if self.no_proxy:
+ env['no_proxy'] = env['NO_PROXY'] = self.no_proxy
+ return env
+
+ def inject_proxy_environment(self, environment):
+ '''
+ Given a list of strings representing environment variables, prepend the
+ environment variables corresponding to the proxy settings.
+ '''
+ if not self:
+ return environment
+
+ proxy_env = format_environment(self.get_environment())
+ if not environment:
+ return proxy_env
+ # It is important to prepend our variables, because we want the
+ # variables defined in "environment" to take precedence.
+ return proxy_env + environment
+
+ def __str__(self):
+ return (
+ 'ProxyConfig('
+ f'http={self.http}, https={self.https}, '
+ f'ftp={self.ftp}, no_proxy={self.no_proxy}'
+ ')'
+ )
diff --git a/contrib/python/docker/docker/utils/socket.py b/contrib/python/docker/docker/utils/socket.py
new file mode 100644
index 0000000000..c7cb584d4f
--- /dev/null
+++ b/contrib/python/docker/docker/utils/socket.py
@@ -0,0 +1,187 @@
+import errno
+import os
+import select
+import socket as pysocket
+import struct
+
+try:
+ from ..transport import NpipeSocket
+except ImportError:
+ NpipeSocket = type(None)
+
+
+STDOUT = 1
+STDERR = 2
+
+
+class SocketError(Exception):
+ pass
+
+
+# NpipeSockets have their own error types
+# pywintypes.error: (109, 'ReadFile', 'The pipe has been ended.')
+NPIPE_ENDED = 109
+
+
+def read(socket, n=4096):
+ """
+ Reads at most n bytes from socket
+ """
+
+ recoverable_errors = (errno.EINTR, errno.EDEADLK, errno.EWOULDBLOCK)
+
+ if not isinstance(socket, NpipeSocket):
+ if not hasattr(select, "poll"):
+ # Limited to 1024
+ select.select([socket], [], [])
+ else:
+ poll = select.poll()
+ poll.register(socket, select.POLLIN | select.POLLPRI)
+ poll.poll()
+
+ try:
+ if hasattr(socket, 'recv'):
+ return socket.recv(n)
+ if isinstance(socket, pysocket.SocketIO):
+ return socket.read(n)
+ return os.read(socket.fileno(), n)
+ except OSError as e:
+ if e.errno not in recoverable_errors:
+ raise
+ except Exception as e:
+ is_pipe_ended = (isinstance(socket, NpipeSocket) and
+ len(e.args) > 0 and
+ e.args[0] == NPIPE_ENDED)
+ if is_pipe_ended:
+ # npipes don't support duplex sockets, so we interpret
+ # a PIPE_ENDED error as a close operation (0-length read).
+ return ''
+ raise
+
+
+def read_exactly(socket, n):
+ """
+ Reads exactly n bytes from socket
+ Raises SocketError if there isn't enough data
+ """
+ data = b""
+ while len(data) < n:
+ next_data = read(socket, n - len(data))
+ if not next_data:
+ raise SocketError("Unexpected EOF")
+ data += next_data
+ return data
+
+
+def next_frame_header(socket):
+ """
+ Returns the stream and size of the next frame of data waiting to be read
+ from socket, according to the protocol defined here:
+
+ https://docs.docker.com/engine/api/v1.24/#attach-to-a-container
+ """
+ try:
+ data = read_exactly(socket, 8)
+ except SocketError:
+ return (-1, -1)
+
+ stream, actual = struct.unpack('>BxxxL', data)
+ return (stream, actual)
+
+
+def frames_iter(socket, tty):
+ """
+ Return a generator of frames read from socket. A frame is a tuple where
+ the first item is the stream number and the second item is a chunk of data.
+
+ If the tty setting is enabled, the streams are multiplexed into the stdout
+ stream.
+ """
+ if tty:
+ return ((STDOUT, frame) for frame in frames_iter_tty(socket))
+ else:
+ return frames_iter_no_tty(socket)
+
+
+def frames_iter_no_tty(socket):
+ """
+ Returns a generator of data read from the socket when the tty setting is
+ not enabled.
+ """
+ while True:
+ (stream, n) = next_frame_header(socket)
+ if n < 0:
+ break
+ while n > 0:
+ result = read(socket, n)
+ if result is None:
+ continue
+ data_length = len(result)
+ if data_length == 0:
+ # We have reached EOF
+ return
+ n -= data_length
+ yield (stream, result)
+
+
+def frames_iter_tty(socket):
+ """
+ Return a generator of data read from the socket when the tty setting is
+ enabled.
+ """
+ while True:
+ result = read(socket)
+ if len(result) == 0:
+ # We have reached EOF
+ return
+ yield result
+
+
+def consume_socket_output(frames, demux=False):
+ """
+ Iterate through frames read from the socket and return the result.
+
+ Args:
+
+ demux (bool):
+ If False, stdout and stderr are multiplexed, and the result is the
+ concatenation of all the frames. If True, the streams are
+ demultiplexed, and the result is a 2-tuple where each item is the
+ concatenation of frames belonging to the same stream.
+ """
+ if demux is False:
+ # If the streams are multiplexed, the generator returns strings, that
+ # we just need to concatenate.
+ return b"".join(frames)
+
+ # If the streams are demultiplexed, the generator yields tuples
+ # (stdout, stderr)
+ out = [None, None]
+ for frame in frames:
+ # It is guaranteed that for each frame, one and only one stream
+ # is not None.
+ assert frame != (None, None)
+ if frame[0] is not None:
+ if out[0] is None:
+ out[0] = frame[0]
+ else:
+ out[0] += frame[0]
+ else:
+ if out[1] is None:
+ out[1] = frame[1]
+ else:
+ out[1] += frame[1]
+ return tuple(out)
+
+
+def demux_adaptor(stream_id, data):
+ """
+ Utility to demultiplex stdout and stderr when reading frames from the
+ socket.
+ """
+ if stream_id == STDOUT:
+ return (data, None)
+ elif stream_id == STDERR:
+ return (None, data)
+ else:
+ raise ValueError(f'{stream_id} is not a valid stream')
diff --git a/contrib/python/docker/docker/utils/utils.py b/contrib/python/docker/docker/utils/utils.py
new file mode 100644
index 0000000000..f36a3afb89
--- /dev/null
+++ b/contrib/python/docker/docker/utils/utils.py
@@ -0,0 +1,517 @@
+import base64
+import collections
+import json
+import os
+import os.path
+import shlex
+import string
+from datetime import datetime, timezone
+from functools import lru_cache
+from itertools import zip_longest
+from urllib.parse import urlparse, urlunparse
+
+from .. import errors
+from ..constants import (
+ BYTE_UNITS,
+ DEFAULT_HTTP_HOST,
+ DEFAULT_NPIPE,
+ DEFAULT_UNIX_SOCKET,
+)
+from ..tls import TLSConfig
+
+URLComponents = collections.namedtuple(
+ 'URLComponents',
+ 'scheme netloc url params query fragment',
+)
+
+
+def create_ipam_pool(*args, **kwargs):
+ raise errors.DeprecatedMethod(
+ 'utils.create_ipam_pool has been removed. Please use a '
+ 'docker.types.IPAMPool object instead.'
+ )
+
+
+def create_ipam_config(*args, **kwargs):
+ raise errors.DeprecatedMethod(
+ 'utils.create_ipam_config has been removed. Please use a '
+ 'docker.types.IPAMConfig object instead.'
+ )
+
+
+def decode_json_header(header):
+ data = base64.b64decode(header)
+ data = data.decode('utf-8')
+ return json.loads(data)
+
+
+@lru_cache(maxsize=None)
+def compare_version(v1, v2):
+ """Compare docker versions
+
+ >>> v1 = '1.9'
+ >>> v2 = '1.10'
+ >>> compare_version(v1, v2)
+ 1
+ >>> compare_version(v2, v1)
+ -1
+ >>> compare_version(v2, v2)
+ 0
+ """
+ if v1 == v2:
+ return 0
+ # Split into `sys.version_info` like tuples.
+ s1 = tuple(int(p) for p in v1.split('.'))
+ s2 = tuple(int(p) for p in v2.split('.'))
+ # Compare each component, padding with 0 if necessary.
+ for c1, c2 in zip_longest(s1, s2, fillvalue=0):
+ if c1 == c2:
+ continue
+ elif c1 > c2:
+ return -1
+ else:
+ return 1
+ return 0
+
+
+def version_lt(v1, v2):
+ return compare_version(v1, v2) > 0
+
+
+def version_gte(v1, v2):
+ return not version_lt(v1, v2)
+
+
+def _convert_port_binding(binding):
+ result = {'HostIp': '', 'HostPort': ''}
+ if isinstance(binding, tuple):
+ if len(binding) == 2:
+ result['HostPort'] = binding[1]
+ result['HostIp'] = binding[0]
+ elif isinstance(binding[0], str):
+ result['HostIp'] = binding[0]
+ else:
+ result['HostPort'] = binding[0]
+ elif isinstance(binding, dict):
+ if 'HostPort' in binding:
+ result['HostPort'] = binding['HostPort']
+ if 'HostIp' in binding:
+ result['HostIp'] = binding['HostIp']
+ else:
+ raise ValueError(binding)
+ else:
+ result['HostPort'] = binding
+
+ if result['HostPort'] is None:
+ result['HostPort'] = ''
+ else:
+ result['HostPort'] = str(result['HostPort'])
+
+ return result
+
+
+def convert_port_bindings(port_bindings):
+ result = {}
+ for k, v in iter(port_bindings.items()):
+ key = str(k)
+ if '/' not in key:
+ key += '/tcp'
+ if isinstance(v, list):
+ result[key] = [_convert_port_binding(binding) for binding in v]
+ else:
+ result[key] = [_convert_port_binding(v)]
+ return result
+
+
+def convert_volume_binds(binds):
+ if isinstance(binds, list):
+ return binds
+
+ result = []
+ for k, v in binds.items():
+ if isinstance(k, bytes):
+ k = k.decode('utf-8')
+
+ if isinstance(v, dict):
+ if 'ro' in v and 'mode' in v:
+ raise ValueError(
+ f'Binding cannot contain both "ro" and "mode": {v!r}'
+ )
+
+ bind = v['bind']
+ if isinstance(bind, bytes):
+ bind = bind.decode('utf-8')
+
+ if 'ro' in v:
+ mode = 'ro' if v['ro'] else 'rw'
+ elif 'mode' in v:
+ mode = v['mode']
+ else:
+ mode = 'rw'
+
+ # NOTE: this is only relevant for Linux hosts
+ # (doesn't apply in Docker Desktop)
+ propagation_modes = [
+ 'rshared',
+ 'shared',
+ 'rslave',
+ 'slave',
+ 'rprivate',
+ 'private',
+ ]
+ if 'propagation' in v and v['propagation'] in propagation_modes:
+ if mode:
+ mode = f"{mode},{v['propagation']}"
+ else:
+ mode = v['propagation']
+
+ result.append(
+ f'{k}:{bind}:{mode}'
+ )
+ else:
+ if isinstance(v, bytes):
+ v = v.decode('utf-8')
+ result.append(
+ f'{k}:{v}:rw'
+ )
+ return result
+
+
+def convert_tmpfs_mounts(tmpfs):
+ if isinstance(tmpfs, dict):
+ return tmpfs
+
+ if not isinstance(tmpfs, list):
+ raise ValueError(
+ 'Expected tmpfs value to be either a list or a dict, '
+ f'found: {type(tmpfs).__name__}'
+ )
+
+ result = {}
+ for mount in tmpfs:
+ if isinstance(mount, str):
+ if ":" in mount:
+ name, options = mount.split(":", 1)
+ else:
+ name = mount
+ options = ""
+
+ else:
+ raise ValueError(
+ "Expected item in tmpfs list to be a string, "
+ f"found: {type(mount).__name__}"
+ )
+
+ result[name] = options
+ return result
+
+
+def convert_service_networks(networks):
+ if not networks:
+ return networks
+ if not isinstance(networks, list):
+ raise TypeError('networks parameter must be a list.')
+
+ result = []
+ for n in networks:
+ if isinstance(n, str):
+ n = {'Target': n}
+ result.append(n)
+ return result
+
+
+def parse_repository_tag(repo_name):
+ parts = repo_name.rsplit('@', 1)
+ if len(parts) == 2:
+ return tuple(parts)
+ parts = repo_name.rsplit(':', 1)
+ if len(parts) == 2 and '/' not in parts[1]:
+ return tuple(parts)
+ return repo_name, None
+
+
+def parse_host(addr, is_win32=False, tls=False):
+ # Sensible defaults
+ if not addr and is_win32:
+ return DEFAULT_NPIPE
+ if not addr or addr.strip() == 'unix://':
+ return DEFAULT_UNIX_SOCKET
+
+ addr = addr.strip()
+
+ parsed_url = urlparse(addr)
+ proto = parsed_url.scheme
+ if not proto or any(x not in f"{string.ascii_letters}+" for x in proto):
+ # https://bugs.python.org/issue754016
+ parsed_url = urlparse(f"//{addr}", 'tcp')
+ proto = 'tcp'
+
+ if proto == 'fd':
+ raise errors.DockerException('fd protocol is not implemented')
+
+ # These protos are valid aliases for our library but not for the
+ # official spec
+ if proto == 'http' or proto == 'https':
+ tls = proto == 'https'
+ proto = 'tcp'
+ elif proto == 'http+unix':
+ proto = 'unix'
+
+ if proto not in ('tcp', 'unix', 'npipe', 'ssh'):
+ raise errors.DockerException(
+ f"Invalid bind address protocol: {addr}"
+ )
+
+ if proto == 'tcp' and not parsed_url.netloc:
+ # "tcp://" is exceptionally disallowed by convention;
+ # omitting a hostname for other protocols is fine
+ raise errors.DockerException(
+ f'Invalid bind address format: {addr}'
+ )
+
+ if any([
+ parsed_url.params, parsed_url.query, parsed_url.fragment,
+ parsed_url.password
+ ]):
+ raise errors.DockerException(
+ f'Invalid bind address format: {addr}'
+ )
+
+ if parsed_url.path and proto == 'ssh':
+ raise errors.DockerException(
+ f'Invalid bind address format: no path allowed for this protocol: {addr}'
+ )
+ else:
+ path = parsed_url.path
+ if proto == 'unix' and parsed_url.hostname is not None:
+ # For legacy reasons, we consider unix://path
+ # to be valid and equivalent to unix:///path
+ path = f"{parsed_url.hostname}/{path}"
+
+ netloc = parsed_url.netloc
+ if proto in ('tcp', 'ssh'):
+ port = parsed_url.port or 0
+ if port <= 0:
+ if proto != 'ssh':
+ raise errors.DockerException(
+ f'Invalid bind address format: port is required: {addr}'
+ )
+ port = 22
+ netloc = f'{parsed_url.netloc}:{port}'
+
+ if not parsed_url.hostname:
+ netloc = f'{DEFAULT_HTTP_HOST}:{port}'
+
+ # Rewrite schemes to fit library internals (requests adapters)
+ if proto == 'tcp':
+ proto = f"http{'s' if tls else ''}"
+ elif proto == 'unix':
+ proto = 'http+unix'
+
+ if proto in ('http+unix', 'npipe'):
+ return f"{proto}://{path}".rstrip('/')
+
+ return urlunparse(URLComponents(
+ scheme=proto,
+ netloc=netloc,
+ url=path,
+ params='',
+ query='',
+ fragment='',
+ )).rstrip('/')
+
+
+def parse_devices(devices):
+ device_list = []
+ for device in devices:
+ if isinstance(device, dict):
+ device_list.append(device)
+ continue
+ if not isinstance(device, str):
+ raise errors.DockerException(
+ f'Invalid device type {type(device)}'
+ )
+ device_mapping = device.split(':')
+ if device_mapping:
+ path_on_host = device_mapping[0]
+ if len(device_mapping) > 1:
+ path_in_container = device_mapping[1]
+ else:
+ path_in_container = path_on_host
+ if len(device_mapping) > 2:
+ permissions = device_mapping[2]
+ else:
+ permissions = 'rwm'
+ device_list.append({
+ 'PathOnHost': path_on_host,
+ 'PathInContainer': path_in_container,
+ 'CgroupPermissions': permissions
+ })
+ return device_list
+
+
+def kwargs_from_env(environment=None):
+ if not environment:
+ environment = os.environ
+ host = environment.get('DOCKER_HOST')
+
+ # empty string for cert path is the same as unset.
+ cert_path = environment.get('DOCKER_CERT_PATH') or None
+
+ # empty string for tls verify counts as "false".
+ # Any value or 'unset' counts as true.
+ tls_verify = environment.get('DOCKER_TLS_VERIFY')
+ if tls_verify == '':
+ tls_verify = False
+ else:
+ tls_verify = tls_verify is not None
+ enable_tls = cert_path or tls_verify
+
+ params = {}
+
+ if host:
+ params['base_url'] = host
+
+ if not enable_tls:
+ return params
+
+ if not cert_path:
+ cert_path = os.path.join(os.path.expanduser('~'), '.docker')
+
+ params['tls'] = TLSConfig(
+ client_cert=(os.path.join(cert_path, 'cert.pem'),
+ os.path.join(cert_path, 'key.pem')),
+ ca_cert=os.path.join(cert_path, 'ca.pem'),
+ verify=tls_verify,
+ )
+
+ return params
+
+
+def convert_filters(filters):
+ result = {}
+ for k, v in iter(filters.items()):
+ if isinstance(v, bool):
+ v = 'true' if v else 'false'
+ if not isinstance(v, list):
+ v = [v, ]
+ result[k] = [
+ str(item) if not isinstance(item, str) else item
+ for item in v
+ ]
+ return json.dumps(result)
+
+
+def datetime_to_timestamp(dt):
+ """Convert a datetime to a Unix timestamp"""
+ delta = dt.astimezone(timezone.utc) - datetime(1970, 1, 1, tzinfo=timezone.utc)
+ return delta.seconds + delta.days * 24 * 3600
+
+
+def parse_bytes(s):
+ if isinstance(s, (int, float,)):
+ return s
+ if len(s) == 0:
+ return 0
+
+ if s[-2:-1].isalpha() and s[-1].isalpha():
+ if s[-1] == "b" or s[-1] == "B":
+ s = s[:-1]
+ units = BYTE_UNITS
+ suffix = s[-1].lower()
+
+ # Check if the variable is a string representation of an int
+ # without a units part. Assuming that the units are bytes.
+ if suffix.isdigit():
+ digits_part = s
+ suffix = 'b'
+ else:
+ digits_part = s[:-1]
+
+ if suffix in units.keys() or suffix.isdigit():
+ try:
+ digits = float(digits_part)
+ except ValueError as ve:
+ raise errors.DockerException(
+ 'Failed converting the string value for memory '
+ f'({digits_part}) to an integer.'
+ ) from ve
+
+ # Reconvert to long for the final result
+ s = int(digits * units[suffix])
+ else:
+ raise errors.DockerException(
+ f'The specified value for memory ({s}) should specify the units. '
+ 'The postfix should be one of the `b` `k` `m` `g` characters'
+ )
+
+ return s
+
+
+def normalize_links(links):
+ if isinstance(links, dict):
+ links = iter(links.items())
+
+ return [f'{k}:{v}' if v else k for k, v in sorted(links)]
+
+
+def parse_env_file(env_file):
+ """
+ Reads a line-separated environment file.
+ The format of each line should be "key=value".
+ """
+ environment = {}
+
+ with open(env_file) as f:
+ for line in f:
+
+ if line[0] == '#':
+ continue
+
+ line = line.strip()
+ if not line:
+ continue
+
+ parse_line = line.split('=', 1)
+ if len(parse_line) == 2:
+ k, v = parse_line
+ environment[k] = v
+ else:
+ raise errors.DockerException(
+ f'Invalid line in environment file {env_file}:\n{line}')
+
+ return environment
+
+
+def split_command(command):
+ return shlex.split(command)
+
+
+def format_environment(environment):
+ def format_env(key, value):
+ if value is None:
+ return key
+ if isinstance(value, bytes):
+ value = value.decode('utf-8')
+
+ return f'{key}={value}'
+ return [format_env(*var) for var in iter(environment.items())]
+
+
+def format_extra_hosts(extra_hosts, task=False):
+ # Use format dictated by Swarm API if container is part of a task
+ if task:
+ return [
+ f'{v} {k}' for k, v in sorted(iter(extra_hosts.items()))
+ ]
+
+ return [
+ f'{k}:{v}' for k, v in sorted(iter(extra_hosts.items()))
+ ]
+
+
+def create_host_config(self, *args, **kwargs):
+ raise errors.DeprecatedMethod(
+ 'utils.create_host_config has been removed. Please use a '
+ 'docker.types.HostConfig object instead.'
+ )
diff --git a/contrib/python/docker/docker/version.py b/contrib/python/docker/docker/version.py
new file mode 100644
index 0000000000..72b12b84df
--- /dev/null
+++ b/contrib/python/docker/docker/version.py
@@ -0,0 +1,8 @@
+try:
+ from ._version import __version__
+except ImportError:
+ from importlib.metadata import PackageNotFoundError, version
+ try:
+ __version__ = version('docker')
+ except PackageNotFoundError:
+ __version__ = '0.0.0'
diff --git a/contrib/python/docker/ya.make b/contrib/python/docker/ya.make
new file mode 100644
index 0000000000..5dbd794e30
--- /dev/null
+++ b/contrib/python/docker/ya.make
@@ -0,0 +1,99 @@
+# Generated by devtools/yamaker (pypi).
+
+PY3_LIBRARY()
+
+VERSION(7.1.0)
+
+LICENSE(Apache-2.0)
+
+PEERDIR(
+ contrib/python/requests
+ contrib/python/urllib3
+)
+
+NO_LINT()
+
+NO_CHECK_IMPORTS(
+ docker.transport.npipeconn
+ docker.transport.npipesocket
+ docker.transport.sshconn
+)
+
+PY_SRCS(
+ TOP_LEVEL
+ docker/__init__.py
+ docker/_version.py
+ docker/api/__init__.py
+ docker/api/build.py
+ docker/api/client.py
+ docker/api/config.py
+ docker/api/container.py
+ docker/api/daemon.py
+ docker/api/exec_api.py
+ docker/api/image.py
+ docker/api/network.py
+ docker/api/plugin.py
+ docker/api/secret.py
+ docker/api/service.py
+ docker/api/swarm.py
+ docker/api/volume.py
+ docker/auth.py
+ docker/client.py
+ docker/constants.py
+ docker/context/__init__.py
+ docker/context/api.py
+ docker/context/config.py
+ docker/context/context.py
+ docker/credentials/__init__.py
+ docker/credentials/constants.py
+ docker/credentials/errors.py
+ docker/credentials/store.py
+ docker/credentials/utils.py
+ docker/errors.py
+ docker/models/__init__.py
+ docker/models/configs.py
+ docker/models/containers.py
+ docker/models/images.py
+ docker/models/networks.py
+ docker/models/nodes.py
+ docker/models/plugins.py
+ docker/models/resource.py
+ docker/models/secrets.py
+ docker/models/services.py
+ docker/models/swarm.py
+ docker/models/volumes.py
+ docker/tls.py
+ docker/transport/__init__.py
+ docker/transport/basehttpadapter.py
+ docker/transport/npipeconn.py
+ docker/transport/npipesocket.py
+ docker/transport/sshconn.py
+ docker/transport/unixconn.py
+ docker/types/__init__.py
+ docker/types/base.py
+ docker/types/containers.py
+ docker/types/daemon.py
+ docker/types/healthcheck.py
+ docker/types/networks.py
+ docker/types/services.py
+ docker/types/swarm.py
+ docker/utils/__init__.py
+ docker/utils/build.py
+ docker/utils/config.py
+ docker/utils/decorators.py
+ docker/utils/fnmatch.py
+ docker/utils/json_stream.py
+ docker/utils/ports.py
+ docker/utils/proxy.py
+ docker/utils/socket.py
+ docker/utils/utils.py
+ docker/version.py
+)
+
+RESOURCE_FILES(
+ PREFIX contrib/python/docker/
+ .dist-info/METADATA
+ .dist-info/top_level.txt
+)
+
+END()