aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/tools/python3/Lib/asyncio
diff options
context:
space:
mode:
authorAlexSm <alex@ydb.tech>2024-03-05 10:40:59 +0100
committerGitHub <noreply@github.com>2024-03-05 12:40:59 +0300
commit1ac13c847b5358faba44dbb638a828e24369467b (patch)
tree07672b4dd3604ad3dee540a02c6494cb7d10dc3d /contrib/tools/python3/Lib/asyncio
parentffcca3e7f7958ddc6487b91d3df8c01054bd0638 (diff)
downloadydb-1ac13c847b5358faba44dbb638a828e24369467b.tar.gz
Library import 16 (#2433)
Co-authored-by: robot-piglet <robot-piglet@yandex-team.com> Co-authored-by: deshevoy <deshevoy@yandex-team.com> Co-authored-by: robot-contrib <robot-contrib@yandex-team.com> Co-authored-by: thegeorg <thegeorg@yandex-team.com> Co-authored-by: robot-ya-builder <robot-ya-builder@yandex-team.com> Co-authored-by: svidyuk <svidyuk@yandex-team.com> Co-authored-by: shadchin <shadchin@yandex-team.com> Co-authored-by: robot-ratatosk <robot-ratatosk@yandex-team.com> Co-authored-by: innokentii <innokentii@yandex-team.com> Co-authored-by: arkady-e1ppa <arkady-e1ppa@yandex-team.com> Co-authored-by: snermolaev <snermolaev@yandex-team.com> Co-authored-by: dimdim11 <dimdim11@yandex-team.com> Co-authored-by: kickbutt <kickbutt@yandex-team.com> Co-authored-by: abdullinsaid <abdullinsaid@yandex-team.com> Co-authored-by: korsunandrei <korsunandrei@yandex-team.com> Co-authored-by: petrk <petrk@yandex-team.com> Co-authored-by: miroslav2 <miroslav2@yandex-team.com> Co-authored-by: serjflint <serjflint@yandex-team.com> Co-authored-by: akhropov <akhropov@yandex-team.com> Co-authored-by: prettyboy <prettyboy@yandex-team.com> Co-authored-by: ilikepugs <ilikepugs@yandex-team.com> Co-authored-by: hiddenpath <hiddenpath@yandex-team.com> Co-authored-by: mikhnenko <mikhnenko@yandex-team.com> Co-authored-by: spreis <spreis@yandex-team.com> Co-authored-by: andreyshspb <andreyshspb@yandex-team.com> Co-authored-by: dimaandreev <dimaandreev@yandex-team.com> Co-authored-by: rashid <rashid@yandex-team.com> Co-authored-by: robot-ydb-importer <robot-ydb-importer@yandex-team.com> Co-authored-by: r-vetrov <r-vetrov@yandex-team.com> Co-authored-by: ypodlesov <ypodlesov@yandex-team.com> Co-authored-by: zaverden <zaverden@yandex-team.com> Co-authored-by: vpozdyayev <vpozdyayev@yandex-team.com> Co-authored-by: robot-cozmo <robot-cozmo@yandex-team.com> Co-authored-by: v-korovin <v-korovin@yandex-team.com> Co-authored-by: arikon <arikon@yandex-team.com> Co-authored-by: khoden <khoden@yandex-team.com> Co-authored-by: psydmm <psydmm@yandex-team.com> Co-authored-by: robot-javacom <robot-javacom@yandex-team.com> Co-authored-by: dtorilov <dtorilov@yandex-team.com> Co-authored-by: sennikovmv <sennikovmv@yandex-team.com> Co-authored-by: hcpp <hcpp@ydb.tech>
Diffstat (limited to 'contrib/tools/python3/Lib/asyncio')
-rw-r--r--contrib/tools/python3/Lib/asyncio/__init__.py47
-rw-r--r--contrib/tools/python3/Lib/asyncio/__main__.py125
-rw-r--r--contrib/tools/python3/Lib/asyncio/base_events.py2010
-rw-r--r--contrib/tools/python3/Lib/asyncio/base_futures.py67
-rw-r--r--contrib/tools/python3/Lib/asyncio/base_subprocess.py285
-rw-r--r--contrib/tools/python3/Lib/asyncio/base_tasks.py94
-rw-r--r--contrib/tools/python3/Lib/asyncio/constants.py41
-rw-r--r--contrib/tools/python3/Lib/asyncio/coroutines.py109
-rw-r--r--contrib/tools/python3/Lib/asyncio/events.py868
-rw-r--r--contrib/tools/python3/Lib/asyncio/exceptions.py62
-rw-r--r--contrib/tools/python3/Lib/asyncio/format_helpers.py76
-rw-r--r--contrib/tools/python3/Lib/asyncio/futures.py428
-rw-r--r--contrib/tools/python3/Lib/asyncio/locks.py586
-rw-r--r--contrib/tools/python3/Lib/asyncio/log.py7
-rw-r--r--contrib/tools/python3/Lib/asyncio/mixins.py21
-rw-r--r--contrib/tools/python3/Lib/asyncio/proactor_events.py895
-rw-r--r--contrib/tools/python3/Lib/asyncio/protocols.py216
-rw-r--r--contrib/tools/python3/Lib/asyncio/queues.py244
-rw-r--r--contrib/tools/python3/Lib/asyncio/runners.py215
-rw-r--r--contrib/tools/python3/Lib/asyncio/selector_events.py1321
-rw-r--r--contrib/tools/python3/Lib/asyncio/sslproto.py926
-rw-r--r--contrib/tools/python3/Lib/asyncio/staggered.py149
-rw-r--r--contrib/tools/python3/Lib/asyncio/streams.py770
-rw-r--r--contrib/tools/python3/Lib/asyncio/subprocess.py229
-rw-r--r--contrib/tools/python3/Lib/asyncio/taskgroups.py240
-rw-r--r--contrib/tools/python3/Lib/asyncio/tasks.py1065
-rw-r--r--contrib/tools/python3/Lib/asyncio/threads.py25
-rw-r--r--contrib/tools/python3/Lib/asyncio/timeouts.py168
-rw-r--r--contrib/tools/python3/Lib/asyncio/transports.py335
-rw-r--r--contrib/tools/python3/Lib/asyncio/trsock.py98
-rw-r--r--contrib/tools/python3/Lib/asyncio/unix_events.py1500
-rw-r--r--contrib/tools/python3/Lib/asyncio/windows_events.py896
-rw-r--r--contrib/tools/python3/Lib/asyncio/windows_utils.py173
33 files changed, 14291 insertions, 0 deletions
diff --git a/contrib/tools/python3/Lib/asyncio/__init__.py b/contrib/tools/python3/Lib/asyncio/__init__.py
new file mode 100644
index 0000000000..03165a425e
--- /dev/null
+++ b/contrib/tools/python3/Lib/asyncio/__init__.py
@@ -0,0 +1,47 @@
+"""The asyncio package, tracking PEP 3156."""
+
+# flake8: noqa
+
+import sys
+
+# This relies on each of the submodules having an __all__ variable.
+from .base_events import *
+from .coroutines import *
+from .events import *
+from .exceptions import *
+from .futures import *
+from .locks import *
+from .protocols import *
+from .runners import *
+from .queues import *
+from .streams import *
+from .subprocess import *
+from .tasks import *
+from .taskgroups import *
+from .timeouts import *
+from .threads import *
+from .transports import *
+
+__all__ = (base_events.__all__ +
+ coroutines.__all__ +
+ events.__all__ +
+ exceptions.__all__ +
+ futures.__all__ +
+ locks.__all__ +
+ protocols.__all__ +
+ runners.__all__ +
+ queues.__all__ +
+ streams.__all__ +
+ subprocess.__all__ +
+ tasks.__all__ +
+ taskgroups.__all__ +
+ threads.__all__ +
+ timeouts.__all__ +
+ transports.__all__)
+
+if sys.platform == 'win32': # pragma: no cover
+ from .windows_events import *
+ __all__ += windows_events.__all__
+else:
+ from .unix_events import * # pragma: no cover
+ __all__ += unix_events.__all__
diff --git a/contrib/tools/python3/Lib/asyncio/__main__.py b/contrib/tools/python3/Lib/asyncio/__main__.py
new file mode 100644
index 0000000000..18bb87a5bc
--- /dev/null
+++ b/contrib/tools/python3/Lib/asyncio/__main__.py
@@ -0,0 +1,125 @@
+import ast
+import asyncio
+import code
+import concurrent.futures
+import inspect
+import sys
+import threading
+import types
+import warnings
+
+from . import futures
+
+
+class AsyncIOInteractiveConsole(code.InteractiveConsole):
+
+ def __init__(self, locals, loop):
+ super().__init__(locals)
+ self.compile.compiler.flags |= ast.PyCF_ALLOW_TOP_LEVEL_AWAIT
+
+ self.loop = loop
+
+ def runcode(self, code):
+ future = concurrent.futures.Future()
+
+ def callback():
+ global repl_future
+ global repl_future_interrupted
+
+ repl_future = None
+ repl_future_interrupted = False
+
+ func = types.FunctionType(code, self.locals)
+ try:
+ coro = func()
+ except SystemExit:
+ raise
+ except KeyboardInterrupt as ex:
+ repl_future_interrupted = True
+ future.set_exception(ex)
+ return
+ except BaseException as ex:
+ future.set_exception(ex)
+ return
+
+ if not inspect.iscoroutine(coro):
+ future.set_result(coro)
+ return
+
+ try:
+ repl_future = self.loop.create_task(coro)
+ futures._chain_future(repl_future, future)
+ except BaseException as exc:
+ future.set_exception(exc)
+
+ loop.call_soon_threadsafe(callback)
+
+ try:
+ return future.result()
+ except SystemExit:
+ raise
+ except BaseException:
+ if repl_future_interrupted:
+ self.write("\nKeyboardInterrupt\n")
+ else:
+ self.showtraceback()
+
+
+class REPLThread(threading.Thread):
+
+ def run(self):
+ try:
+ banner = (
+ f'asyncio REPL {sys.version} on {sys.platform}\n'
+ f'Use "await" directly instead of "asyncio.run()".\n'
+ f'Type "help", "copyright", "credits" or "license" '
+ f'for more information.\n'
+ f'{getattr(sys, "ps1", ">>> ")}import asyncio'
+ )
+
+ console.interact(
+ banner=banner,
+ exitmsg='exiting asyncio REPL...')
+ finally:
+ warnings.filterwarnings(
+ 'ignore',
+ message=r'^coroutine .* was never awaited$',
+ category=RuntimeWarning)
+
+ loop.call_soon_threadsafe(loop.stop)
+
+
+if __name__ == '__main__':
+ loop = asyncio.new_event_loop()
+ asyncio.set_event_loop(loop)
+
+ repl_locals = {'asyncio': asyncio}
+ for key in {'__name__', '__package__',
+ '__loader__', '__spec__',
+ '__builtins__', '__file__'}:
+ repl_locals[key] = locals()[key]
+
+ console = AsyncIOInteractiveConsole(repl_locals, loop)
+
+ repl_future = None
+ repl_future_interrupted = False
+
+ try:
+ import readline # NoQA
+ except ImportError:
+ pass
+
+ repl_thread = REPLThread()
+ repl_thread.daemon = True
+ repl_thread.start()
+
+ while True:
+ try:
+ loop.run_forever()
+ except KeyboardInterrupt:
+ if repl_future and not repl_future.done():
+ repl_future.cancel()
+ repl_future_interrupted = True
+ continue
+ else:
+ break
diff --git a/contrib/tools/python3/Lib/asyncio/base_events.py b/contrib/tools/python3/Lib/asyncio/base_events.py
new file mode 100644
index 0000000000..c16c445bde
--- /dev/null
+++ b/contrib/tools/python3/Lib/asyncio/base_events.py
@@ -0,0 +1,2010 @@
+"""Base implementation of event loop.
+
+The event loop can be broken up into a multiplexer (the part
+responsible for notifying us of I/O events) and the event loop proper,
+which wraps a multiplexer with functionality for scheduling callbacks,
+immediately or at a given time in the future.
+
+Whenever a public API takes a callback, subsequent positional
+arguments will be passed to the callback if/when it is called. This
+avoids the proliferation of trivial lambdas implementing closures.
+Keyword arguments for the callback are not supported; this is a
+conscious design decision, leaving the door open for keyword arguments
+to modify the meaning of the API call itself.
+"""
+
+import collections
+import collections.abc
+import concurrent.futures
+import errno
+import functools
+import heapq
+import itertools
+import os
+import socket
+import stat
+import subprocess
+import threading
+import time
+import traceback
+import sys
+import warnings
+import weakref
+
+try:
+ import ssl
+except ImportError: # pragma: no cover
+ ssl = None
+
+from . import constants
+from . import coroutines
+from . import events
+from . import exceptions
+from . import futures
+from . import protocols
+from . import sslproto
+from . import staggered
+from . import tasks
+from . import transports
+from . import trsock
+from .log import logger
+
+
+__all__ = 'BaseEventLoop','Server',
+
+
+# Minimum number of _scheduled timer handles before cleanup of
+# cancelled handles is performed.
+_MIN_SCHEDULED_TIMER_HANDLES = 100
+
+# Minimum fraction of _scheduled timer handles that are cancelled
+# before cleanup of cancelled handles is performed.
+_MIN_CANCELLED_TIMER_HANDLES_FRACTION = 0.5
+
+
+_HAS_IPv6 = hasattr(socket, 'AF_INET6')
+
+# Maximum timeout passed to select to avoid OS limitations
+MAXIMUM_SELECT_TIMEOUT = 24 * 3600
+
+
+def _format_handle(handle):
+ cb = handle._callback
+ if isinstance(getattr(cb, '__self__', None), tasks.Task):
+ # format the task
+ return repr(cb.__self__)
+ else:
+ return str(handle)
+
+
+def _format_pipe(fd):
+ if fd == subprocess.PIPE:
+ return '<pipe>'
+ elif fd == subprocess.STDOUT:
+ return '<stdout>'
+ else:
+ return repr(fd)
+
+
+def _set_reuseport(sock):
+ if not hasattr(socket, 'SO_REUSEPORT'):
+ raise ValueError('reuse_port not supported by socket module')
+ else:
+ try:
+ sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
+ except OSError:
+ raise ValueError('reuse_port not supported by socket module, '
+ 'SO_REUSEPORT defined but not implemented.')
+
+
+def _ipaddr_info(host, port, family, type, proto, flowinfo=0, scopeid=0):
+ # Try to skip getaddrinfo if "host" is already an IP. Users might have
+ # handled name resolution in their own code and pass in resolved IPs.
+ if not hasattr(socket, 'inet_pton'):
+ return
+
+ if proto not in {0, socket.IPPROTO_TCP, socket.IPPROTO_UDP} or \
+ host is None:
+ return None
+
+ if type == socket.SOCK_STREAM:
+ proto = socket.IPPROTO_TCP
+ elif type == socket.SOCK_DGRAM:
+ proto = socket.IPPROTO_UDP
+ else:
+ return None
+
+ if port is None:
+ port = 0
+ elif isinstance(port, bytes) and port == b'':
+ port = 0
+ elif isinstance(port, str) and port == '':
+ port = 0
+ else:
+ # If port's a service name like "http", don't skip getaddrinfo.
+ try:
+ port = int(port)
+ except (TypeError, ValueError):
+ return None
+
+ if family == socket.AF_UNSPEC:
+ afs = [socket.AF_INET]
+ if _HAS_IPv6:
+ afs.append(socket.AF_INET6)
+ else:
+ afs = [family]
+
+ if isinstance(host, bytes):
+ host = host.decode('idna')
+ if '%' in host:
+ # Linux's inet_pton doesn't accept an IPv6 zone index after host,
+ # like '::1%lo0'.
+ return None
+
+ for af in afs:
+ try:
+ socket.inet_pton(af, host)
+ # The host has already been resolved.
+ if _HAS_IPv6 and af == socket.AF_INET6:
+ return af, type, proto, '', (host, port, flowinfo, scopeid)
+ else:
+ return af, type, proto, '', (host, port)
+ except OSError:
+ pass
+
+ # "host" is not an IP address.
+ return None
+
+
+def _interleave_addrinfos(addrinfos, first_address_family_count=1):
+ """Interleave list of addrinfo tuples by family."""
+ # Group addresses by family
+ addrinfos_by_family = collections.OrderedDict()
+ for addr in addrinfos:
+ family = addr[0]
+ if family not in addrinfos_by_family:
+ addrinfos_by_family[family] = []
+ addrinfos_by_family[family].append(addr)
+ addrinfos_lists = list(addrinfos_by_family.values())
+
+ reordered = []
+ if first_address_family_count > 1:
+ reordered.extend(addrinfos_lists[0][:first_address_family_count - 1])
+ del addrinfos_lists[0][:first_address_family_count - 1]
+ reordered.extend(
+ a for a in itertools.chain.from_iterable(
+ itertools.zip_longest(*addrinfos_lists)
+ ) if a is not None)
+ return reordered
+
+
+def _run_until_complete_cb(fut):
+ if not fut.cancelled():
+ exc = fut.exception()
+ if isinstance(exc, (SystemExit, KeyboardInterrupt)):
+ # Issue #22429: run_forever() already finished, no need to
+ # stop it.
+ return
+ futures._get_loop(fut).stop()
+
+
+if hasattr(socket, 'TCP_NODELAY'):
+ def _set_nodelay(sock):
+ if (sock.family in {socket.AF_INET, socket.AF_INET6} and
+ sock.type == socket.SOCK_STREAM and
+ sock.proto == socket.IPPROTO_TCP):
+ sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
+else:
+ def _set_nodelay(sock):
+ pass
+
+
+def _check_ssl_socket(sock):
+ if ssl is not None and isinstance(sock, ssl.SSLSocket):
+ raise TypeError("Socket cannot be of type SSLSocket")
+
+
+class _SendfileFallbackProtocol(protocols.Protocol):
+ def __init__(self, transp):
+ if not isinstance(transp, transports._FlowControlMixin):
+ raise TypeError("transport should be _FlowControlMixin instance")
+ self._transport = transp
+ self._proto = transp.get_protocol()
+ self._should_resume_reading = transp.is_reading()
+ self._should_resume_writing = transp._protocol_paused
+ transp.pause_reading()
+ transp.set_protocol(self)
+ if self._should_resume_writing:
+ self._write_ready_fut = self._transport._loop.create_future()
+ else:
+ self._write_ready_fut = None
+
+ async def drain(self):
+ if self._transport.is_closing():
+ raise ConnectionError("Connection closed by peer")
+ fut = self._write_ready_fut
+ if fut is None:
+ return
+ await fut
+
+ def connection_made(self, transport):
+ raise RuntimeError("Invalid state: "
+ "connection should have been established already.")
+
+ def connection_lost(self, exc):
+ if self._write_ready_fut is not None:
+ # Never happens if peer disconnects after sending the whole content
+ # Thus disconnection is always an exception from user perspective
+ if exc is None:
+ self._write_ready_fut.set_exception(
+ ConnectionError("Connection is closed by peer"))
+ else:
+ self._write_ready_fut.set_exception(exc)
+ self._proto.connection_lost(exc)
+
+ def pause_writing(self):
+ if self._write_ready_fut is not None:
+ return
+ self._write_ready_fut = self._transport._loop.create_future()
+
+ def resume_writing(self):
+ if self._write_ready_fut is None:
+ return
+ self._write_ready_fut.set_result(False)
+ self._write_ready_fut = None
+
+ def data_received(self, data):
+ raise RuntimeError("Invalid state: reading should be paused")
+
+ def eof_received(self):
+ raise RuntimeError("Invalid state: reading should be paused")
+
+ async def restore(self):
+ self._transport.set_protocol(self._proto)
+ if self._should_resume_reading:
+ self._transport.resume_reading()
+ if self._write_ready_fut is not None:
+ # Cancel the future.
+ # Basically it has no effect because protocol is switched back,
+ # no code should wait for it anymore.
+ self._write_ready_fut.cancel()
+ if self._should_resume_writing:
+ self._proto.resume_writing()
+
+
+class Server(events.AbstractServer):
+
+ def __init__(self, loop, sockets, protocol_factory, ssl_context, backlog,
+ ssl_handshake_timeout, ssl_shutdown_timeout=None):
+ self._loop = loop
+ self._sockets = sockets
+ self._active_count = 0
+ self._waiters = []
+ self._protocol_factory = protocol_factory
+ self._backlog = backlog
+ self._ssl_context = ssl_context
+ self._ssl_handshake_timeout = ssl_handshake_timeout
+ self._ssl_shutdown_timeout = ssl_shutdown_timeout
+ self._serving = False
+ self._serving_forever_fut = None
+
+ def __repr__(self):
+ return f'<{self.__class__.__name__} sockets={self.sockets!r}>'
+
+ def _attach(self):
+ assert self._sockets is not None
+ self._active_count += 1
+
+ def _detach(self):
+ assert self._active_count > 0
+ self._active_count -= 1
+ if self._active_count == 0 and self._sockets is None:
+ self._wakeup()
+
+ def _wakeup(self):
+ waiters = self._waiters
+ self._waiters = None
+ for waiter in waiters:
+ if not waiter.done():
+ waiter.set_result(None)
+
+ def _start_serving(self):
+ if self._serving:
+ return
+ self._serving = True
+ for sock in self._sockets:
+ sock.listen(self._backlog)
+ self._loop._start_serving(
+ self._protocol_factory, sock, self._ssl_context,
+ self, self._backlog, self._ssl_handshake_timeout,
+ self._ssl_shutdown_timeout)
+
+ def get_loop(self):
+ return self._loop
+
+ def is_serving(self):
+ return self._serving
+
+ @property
+ def sockets(self):
+ if self._sockets is None:
+ return ()
+ return tuple(trsock.TransportSocket(s) for s in self._sockets)
+
+ def close(self):
+ sockets = self._sockets
+ if sockets is None:
+ return
+ self._sockets = None
+
+ for sock in sockets:
+ self._loop._stop_serving(sock)
+
+ self._serving = False
+
+ if (self._serving_forever_fut is not None and
+ not self._serving_forever_fut.done()):
+ self._serving_forever_fut.cancel()
+ self._serving_forever_fut = None
+
+ if self._active_count == 0:
+ self._wakeup()
+
+ async def start_serving(self):
+ self._start_serving()
+ # Skip one loop iteration so that all 'loop.add_reader'
+ # go through.
+ await tasks.sleep(0)
+
+ async def serve_forever(self):
+ if self._serving_forever_fut is not None:
+ raise RuntimeError(
+ f'server {self!r} is already being awaited on serve_forever()')
+ if self._sockets is None:
+ raise RuntimeError(f'server {self!r} is closed')
+
+ self._start_serving()
+ self._serving_forever_fut = self._loop.create_future()
+
+ try:
+ await self._serving_forever_fut
+ except exceptions.CancelledError:
+ try:
+ self.close()
+ await self.wait_closed()
+ finally:
+ raise
+ finally:
+ self._serving_forever_fut = None
+
+ async def wait_closed(self):
+ """Wait until server is closed and all connections are dropped.
+
+ - If the server is not closed, wait.
+ - If it is closed, but there are still active connections, wait.
+
+ Anyone waiting here will be unblocked once both conditions
+ (server is closed and all connections have been dropped)
+ have become true, in either order.
+
+ Historical note: In 3.11 and before, this was broken, returning
+ immediately if the server was already closed, even if there
+ were still active connections. An attempted fix in 3.12.0 was
+ still broken, returning immediately if the server was still
+ open and there were no active connections. Hopefully in 3.12.1
+ we have it right.
+ """
+ # Waiters are unblocked by self._wakeup(), which is called
+ # from two places: self.close() and self._detach(), but only
+ # when both conditions have become true. To signal that this
+ # has happened, self._wakeup() sets self._waiters to None.
+ if self._waiters is None:
+ return
+ waiter = self._loop.create_future()
+ self._waiters.append(waiter)
+ await waiter
+
+
+class BaseEventLoop(events.AbstractEventLoop):
+
+ def __init__(self):
+ self._timer_cancelled_count = 0
+ self._closed = False
+ self._stopping = False
+ self._ready = collections.deque()
+ self._scheduled = []
+ self._default_executor = None
+ self._internal_fds = 0
+ # Identifier of the thread running the event loop, or None if the
+ # event loop is not running
+ self._thread_id = None
+ self._clock_resolution = time.get_clock_info('monotonic').resolution
+ self._exception_handler = None
+ self.set_debug(coroutines._is_debug_mode())
+ # In debug mode, if the execution of a callback or a step of a task
+ # exceed this duration in seconds, the slow callback/task is logged.
+ self.slow_callback_duration = 0.1
+ self._current_handle = None
+ self._task_factory = None
+ self._coroutine_origin_tracking_enabled = False
+ self._coroutine_origin_tracking_saved_depth = None
+
+ # A weak set of all asynchronous generators that are
+ # being iterated by the loop.
+ self._asyncgens = weakref.WeakSet()
+ # Set to True when `loop.shutdown_asyncgens` is called.
+ self._asyncgens_shutdown_called = False
+ # Set to True when `loop.shutdown_default_executor` is called.
+ self._executor_shutdown_called = False
+
+ def __repr__(self):
+ return (
+ f'<{self.__class__.__name__} running={self.is_running()} '
+ f'closed={self.is_closed()} debug={self.get_debug()}>'
+ )
+
+ def create_future(self):
+ """Create a Future object attached to the loop."""
+ return futures.Future(loop=self)
+
+ def create_task(self, coro, *, name=None, context=None):
+ """Schedule a coroutine object.
+
+ Return a task object.
+ """
+ self._check_closed()
+ if self._task_factory is None:
+ task = tasks.Task(coro, loop=self, name=name, context=context)
+ if task._source_traceback:
+ del task._source_traceback[-1]
+ else:
+ if context is None:
+ # Use legacy API if context is not needed
+ task = self._task_factory(self, coro)
+ else:
+ task = self._task_factory(self, coro, context=context)
+
+ tasks._set_task_name(task, name)
+
+ return task
+
+ def set_task_factory(self, factory):
+ """Set a task factory that will be used by loop.create_task().
+
+ If factory is None the default task factory will be set.
+
+ If factory is a callable, it should have a signature matching
+ '(loop, coro)', where 'loop' will be a reference to the active
+ event loop, 'coro' will be a coroutine object. The callable
+ must return a Future.
+ """
+ if factory is not None and not callable(factory):
+ raise TypeError('task factory must be a callable or None')
+ self._task_factory = factory
+
+ def get_task_factory(self):
+ """Return a task factory, or None if the default one is in use."""
+ return self._task_factory
+
+ def _make_socket_transport(self, sock, protocol, waiter=None, *,
+ extra=None, server=None):
+ """Create socket transport."""
+ raise NotImplementedError
+
+ def _make_ssl_transport(
+ self, rawsock, protocol, sslcontext, waiter=None,
+ *, server_side=False, server_hostname=None,
+ extra=None, server=None,
+ ssl_handshake_timeout=None,
+ ssl_shutdown_timeout=None,
+ call_connection_made=True):
+ """Create SSL transport."""
+ raise NotImplementedError
+
+ def _make_datagram_transport(self, sock, protocol,
+ address=None, waiter=None, extra=None):
+ """Create datagram transport."""
+ raise NotImplementedError
+
+ def _make_read_pipe_transport(self, pipe, protocol, waiter=None,
+ extra=None):
+ """Create read pipe transport."""
+ raise NotImplementedError
+
+ def _make_write_pipe_transport(self, pipe, protocol, waiter=None,
+ extra=None):
+ """Create write pipe transport."""
+ raise NotImplementedError
+
+ async def _make_subprocess_transport(self, protocol, args, shell,
+ stdin, stdout, stderr, bufsize,
+ extra=None, **kwargs):
+ """Create subprocess transport."""
+ raise NotImplementedError
+
+ def _write_to_self(self):
+ """Write a byte to self-pipe, to wake up the event loop.
+
+ This may be called from a different thread.
+
+ The subclass is responsible for implementing the self-pipe.
+ """
+ raise NotImplementedError
+
+ def _process_events(self, event_list):
+ """Process selector events."""
+ raise NotImplementedError
+
+ def _check_closed(self):
+ if self._closed:
+ raise RuntimeError('Event loop is closed')
+
+ def _check_default_executor(self):
+ if self._executor_shutdown_called:
+ raise RuntimeError('Executor shutdown has been called')
+
+ def _asyncgen_finalizer_hook(self, agen):
+ self._asyncgens.discard(agen)
+ if not self.is_closed():
+ self.call_soon_threadsafe(self.create_task, agen.aclose())
+
+ def _asyncgen_firstiter_hook(self, agen):
+ if self._asyncgens_shutdown_called:
+ warnings.warn(
+ f"asynchronous generator {agen!r} was scheduled after "
+ f"loop.shutdown_asyncgens() call",
+ ResourceWarning, source=self)
+
+ self._asyncgens.add(agen)
+
+ async def shutdown_asyncgens(self):
+ """Shutdown all active asynchronous generators."""
+ self._asyncgens_shutdown_called = True
+
+ if not len(self._asyncgens):
+ # If Python version is <3.6 or we don't have any asynchronous
+ # generators alive.
+ return
+
+ closing_agens = list(self._asyncgens)
+ self._asyncgens.clear()
+
+ results = await tasks.gather(
+ *[ag.aclose() for ag in closing_agens],
+ return_exceptions=True)
+
+ for result, agen in zip(results, closing_agens):
+ if isinstance(result, Exception):
+ self.call_exception_handler({
+ 'message': f'an error occurred during closing of '
+ f'asynchronous generator {agen!r}',
+ 'exception': result,
+ 'asyncgen': agen
+ })
+
+ async def shutdown_default_executor(self, timeout=None):
+ """Schedule the shutdown of the default executor.
+
+ The timeout parameter specifies the amount of time the executor will
+ be given to finish joining. The default value is None, which means
+ that the executor will be given an unlimited amount of time.
+ """
+ self._executor_shutdown_called = True
+ if self._default_executor is None:
+ return
+ future = self.create_future()
+ thread = threading.Thread(target=self._do_shutdown, args=(future,))
+ thread.start()
+ try:
+ await future
+ finally:
+ thread.join(timeout)
+
+ if thread.is_alive():
+ warnings.warn("The executor did not finishing joining "
+ f"its threads within {timeout} seconds.",
+ RuntimeWarning, stacklevel=2)
+ self._default_executor.shutdown(wait=False)
+
+ def _do_shutdown(self, future):
+ try:
+ self._default_executor.shutdown(wait=True)
+ if not self.is_closed():
+ self.call_soon_threadsafe(future.set_result, None)
+ except Exception as ex:
+ if not self.is_closed():
+ self.call_soon_threadsafe(future.set_exception, ex)
+
+ def _check_running(self):
+ if self.is_running():
+ raise RuntimeError('This event loop is already running')
+ if events._get_running_loop() is not None:
+ raise RuntimeError(
+ 'Cannot run the event loop while another loop is running')
+
+ def run_forever(self):
+ """Run until stop() is called."""
+ self._check_closed()
+ self._check_running()
+ self._set_coroutine_origin_tracking(self._debug)
+
+ old_agen_hooks = sys.get_asyncgen_hooks()
+ try:
+ self._thread_id = threading.get_ident()
+ sys.set_asyncgen_hooks(firstiter=self._asyncgen_firstiter_hook,
+ finalizer=self._asyncgen_finalizer_hook)
+
+ events._set_running_loop(self)
+ while True:
+ self._run_once()
+ if self._stopping:
+ break
+ finally:
+ self._stopping = False
+ self._thread_id = None
+ events._set_running_loop(None)
+ self._set_coroutine_origin_tracking(False)
+ sys.set_asyncgen_hooks(*old_agen_hooks)
+
+ def run_until_complete(self, future):
+ """Run until the Future is done.
+
+ If the argument is a coroutine, it is wrapped in a Task.
+
+ WARNING: It would be disastrous to call run_until_complete()
+ with the same coroutine twice -- it would wrap it in two
+ different Tasks and that can't be good.
+
+ Return the Future's result, or raise its exception.
+ """
+ self._check_closed()
+ self._check_running()
+
+ new_task = not futures.isfuture(future)
+ future = tasks.ensure_future(future, loop=self)
+ if new_task:
+ # An exception is raised if the future didn't complete, so there
+ # is no need to log the "destroy pending task" message
+ future._log_destroy_pending = False
+
+ future.add_done_callback(_run_until_complete_cb)
+ try:
+ self.run_forever()
+ except:
+ if new_task and future.done() and not future.cancelled():
+ # The coroutine raised a BaseException. Consume the exception
+ # to not log a warning, the caller doesn't have access to the
+ # local task.
+ future.exception()
+ raise
+ finally:
+ future.remove_done_callback(_run_until_complete_cb)
+ if not future.done():
+ raise RuntimeError('Event loop stopped before Future completed.')
+
+ return future.result()
+
+ def stop(self):
+ """Stop running the event loop.
+
+ Every callback already scheduled will still run. This simply informs
+ run_forever to stop looping after a complete iteration.
+ """
+ self._stopping = True
+
+ def close(self):
+ """Close the event loop.
+
+ This clears the queues and shuts down the executor,
+ but does not wait for the executor to finish.
+
+ The event loop must not be running.
+ """
+ if self.is_running():
+ raise RuntimeError("Cannot close a running event loop")
+ if self._closed:
+ return
+ if self._debug:
+ logger.debug("Close %r", self)
+ self._closed = True
+ self._ready.clear()
+ self._scheduled.clear()
+ self._executor_shutdown_called = True
+ executor = self._default_executor
+ if executor is not None:
+ self._default_executor = None
+ executor.shutdown(wait=False)
+
+ def is_closed(self):
+ """Returns True if the event loop was closed."""
+ return self._closed
+
+ def __del__(self, _warn=warnings.warn):
+ if not self.is_closed():
+ _warn(f"unclosed event loop {self!r}", ResourceWarning, source=self)
+ if not self.is_running():
+ self.close()
+
+ def is_running(self):
+ """Returns True if the event loop is running."""
+ return (self._thread_id is not None)
+
+ def time(self):
+ """Return the time according to the event loop's clock.
+
+ This is a float expressed in seconds since an epoch, but the
+ epoch, precision, accuracy and drift are unspecified and may
+ differ per event loop.
+ """
+ return time.monotonic()
+
+ def call_later(self, delay, callback, *args, context=None):
+ """Arrange for a callback to be called at a given time.
+
+ Return a Handle: an opaque object with a cancel() method that
+ can be used to cancel the call.
+
+ The delay can be an int or float, expressed in seconds. It is
+ always relative to the current time.
+
+ Each callback will be called exactly once. If two callbacks
+ are scheduled for exactly the same time, it is undefined which
+ will be called first.
+
+ Any positional arguments after the callback will be passed to
+ the callback when it is called.
+ """
+ if delay is None:
+ raise TypeError('delay must not be None')
+ timer = self.call_at(self.time() + delay, callback, *args,
+ context=context)
+ if timer._source_traceback:
+ del timer._source_traceback[-1]
+ return timer
+
+ def call_at(self, when, callback, *args, context=None):
+ """Like call_later(), but uses an absolute time.
+
+ Absolute time corresponds to the event loop's time() method.
+ """
+ if when is None:
+ raise TypeError("when cannot be None")
+ self._check_closed()
+ if self._debug:
+ self._check_thread()
+ self._check_callback(callback, 'call_at')
+ timer = events.TimerHandle(when, callback, args, self, context)
+ if timer._source_traceback:
+ del timer._source_traceback[-1]
+ heapq.heappush(self._scheduled, timer)
+ timer._scheduled = True
+ return timer
+
+ def call_soon(self, callback, *args, context=None):
+ """Arrange for a callback to be called as soon as possible.
+
+ This operates as a FIFO queue: callbacks are called in the
+ order in which they are registered. Each callback will be
+ called exactly once.
+
+ Any positional arguments after the callback will be passed to
+ the callback when it is called.
+ """
+ self._check_closed()
+ if self._debug:
+ self._check_thread()
+ self._check_callback(callback, 'call_soon')
+ handle = self._call_soon(callback, args, context)
+ if handle._source_traceback:
+ del handle._source_traceback[-1]
+ return handle
+
+ def _check_callback(self, callback, method):
+ if (coroutines.iscoroutine(callback) or
+ coroutines.iscoroutinefunction(callback)):
+ raise TypeError(
+ f"coroutines cannot be used with {method}()")
+ if not callable(callback):
+ raise TypeError(
+ f'a callable object was expected by {method}(), '
+ f'got {callback!r}')
+
+ def _call_soon(self, callback, args, context):
+ handle = events.Handle(callback, args, self, context)
+ if handle._source_traceback:
+ del handle._source_traceback[-1]
+ self._ready.append(handle)
+ return handle
+
+ def _check_thread(self):
+ """Check that the current thread is the thread running the event loop.
+
+ Non-thread-safe methods of this class make this assumption and will
+ likely behave incorrectly when the assumption is violated.
+
+ Should only be called when (self._debug == True). The caller is
+ responsible for checking this condition for performance reasons.
+ """
+ if self._thread_id is None:
+ return
+ thread_id = threading.get_ident()
+ if thread_id != self._thread_id:
+ raise RuntimeError(
+ "Non-thread-safe operation invoked on an event loop other "
+ "than the current one")
+
+ def call_soon_threadsafe(self, callback, *args, context=None):
+ """Like call_soon(), but thread-safe."""
+ self._check_closed()
+ if self._debug:
+ self._check_callback(callback, 'call_soon_threadsafe')
+ handle = self._call_soon(callback, args, context)
+ if handle._source_traceback:
+ del handle._source_traceback[-1]
+ self._write_to_self()
+ return handle
+
+ def run_in_executor(self, executor, func, *args):
+ self._check_closed()
+ if self._debug:
+ self._check_callback(func, 'run_in_executor')
+ if executor is None:
+ executor = self._default_executor
+ # Only check when the default executor is being used
+ self._check_default_executor()
+ if executor is None:
+ executor = concurrent.futures.ThreadPoolExecutor(
+ thread_name_prefix='asyncio'
+ )
+ self._default_executor = executor
+ return futures.wrap_future(
+ executor.submit(func, *args), loop=self)
+
+ def set_default_executor(self, executor):
+ if not isinstance(executor, concurrent.futures.ThreadPoolExecutor):
+ raise TypeError('executor must be ThreadPoolExecutor instance')
+ self._default_executor = executor
+
+ def _getaddrinfo_debug(self, host, port, family, type, proto, flags):
+ msg = [f"{host}:{port!r}"]
+ if family:
+ msg.append(f'family={family!r}')
+ if type:
+ msg.append(f'type={type!r}')
+ if proto:
+ msg.append(f'proto={proto!r}')
+ if flags:
+ msg.append(f'flags={flags!r}')
+ msg = ', '.join(msg)
+ logger.debug('Get address info %s', msg)
+
+ t0 = self.time()
+ addrinfo = socket.getaddrinfo(host, port, family, type, proto, flags)
+ dt = self.time() - t0
+
+ msg = f'Getting address info {msg} took {dt * 1e3:.3f}ms: {addrinfo!r}'
+ if dt >= self.slow_callback_duration:
+ logger.info(msg)
+ else:
+ logger.debug(msg)
+ return addrinfo
+
+ async def getaddrinfo(self, host, port, *,
+ family=0, type=0, proto=0, flags=0):
+ if self._debug:
+ getaddr_func = self._getaddrinfo_debug
+ else:
+ getaddr_func = socket.getaddrinfo
+
+ return await self.run_in_executor(
+ None, getaddr_func, host, port, family, type, proto, flags)
+
+ async def getnameinfo(self, sockaddr, flags=0):
+ return await self.run_in_executor(
+ None, socket.getnameinfo, sockaddr, flags)
+
+ async def sock_sendfile(self, sock, file, offset=0, count=None,
+ *, fallback=True):
+ if self._debug and sock.gettimeout() != 0:
+ raise ValueError("the socket must be non-blocking")
+ _check_ssl_socket(sock)
+ self._check_sendfile_params(sock, file, offset, count)
+ try:
+ return await self._sock_sendfile_native(sock, file,
+ offset, count)
+ except exceptions.SendfileNotAvailableError as exc:
+ if not fallback:
+ raise
+ return await self._sock_sendfile_fallback(sock, file,
+ offset, count)
+
+ async def _sock_sendfile_native(self, sock, file, offset, count):
+ # NB: sendfile syscall is not supported for SSL sockets and
+ # non-mmap files even if sendfile is supported by OS
+ raise exceptions.SendfileNotAvailableError(
+ f"syscall sendfile is not available for socket {sock!r} "
+ f"and file {file!r} combination")
+
+ async def _sock_sendfile_fallback(self, sock, file, offset, count):
+ if offset:
+ file.seek(offset)
+ blocksize = (
+ min(count, constants.SENDFILE_FALLBACK_READBUFFER_SIZE)
+ if count else constants.SENDFILE_FALLBACK_READBUFFER_SIZE
+ )
+ buf = bytearray(blocksize)
+ total_sent = 0
+ try:
+ while True:
+ if count:
+ blocksize = min(count - total_sent, blocksize)
+ if blocksize <= 0:
+ break
+ view = memoryview(buf)[:blocksize]
+ read = await self.run_in_executor(None, file.readinto, view)
+ if not read:
+ break # EOF
+ await self.sock_sendall(sock, view[:read])
+ total_sent += read
+ return total_sent
+ finally:
+ if total_sent > 0 and hasattr(file, 'seek'):
+ file.seek(offset + total_sent)
+
+ def _check_sendfile_params(self, sock, file, offset, count):
+ if 'b' not in getattr(file, 'mode', 'b'):
+ raise ValueError("file should be opened in binary mode")
+ if not sock.type == socket.SOCK_STREAM:
+ raise ValueError("only SOCK_STREAM type sockets are supported")
+ if count is not None:
+ if not isinstance(count, int):
+ raise TypeError(
+ "count must be a positive integer (got {!r})".format(count))
+ if count <= 0:
+ raise ValueError(
+ "count must be a positive integer (got {!r})".format(count))
+ if not isinstance(offset, int):
+ raise TypeError(
+ "offset must be a non-negative integer (got {!r})".format(
+ offset))
+ if offset < 0:
+ raise ValueError(
+ "offset must be a non-negative integer (got {!r})".format(
+ offset))
+
+ async def _connect_sock(self, exceptions, addr_info, local_addr_infos=None):
+ """Create, bind and connect one socket."""
+ my_exceptions = []
+ exceptions.append(my_exceptions)
+ family, type_, proto, _, address = addr_info
+ sock = None
+ try:
+ sock = socket.socket(family=family, type=type_, proto=proto)
+ sock.setblocking(False)
+ if local_addr_infos is not None:
+ for lfamily, _, _, _, laddr in local_addr_infos:
+ # skip local addresses of different family
+ if lfamily != family:
+ continue
+ try:
+ sock.bind(laddr)
+ break
+ except OSError as exc:
+ msg = (
+ f'error while attempting to bind on '
+ f'address {laddr!r}: '
+ f'{exc.strerror.lower()}'
+ )
+ exc = OSError(exc.errno, msg)
+ my_exceptions.append(exc)
+ else: # all bind attempts failed
+ if my_exceptions:
+ raise my_exceptions.pop()
+ else:
+ raise OSError(f"no matching local address with {family=} found")
+ await self.sock_connect(sock, address)
+ return sock
+ except OSError as exc:
+ my_exceptions.append(exc)
+ if sock is not None:
+ sock.close()
+ raise
+ except:
+ if sock is not None:
+ sock.close()
+ raise
+ finally:
+ exceptions = my_exceptions = None
+
+ async def create_connection(
+ self, protocol_factory, host=None, port=None,
+ *, ssl=None, family=0,
+ proto=0, flags=0, sock=None,
+ local_addr=None, server_hostname=None,
+ ssl_handshake_timeout=None,
+ ssl_shutdown_timeout=None,
+ happy_eyeballs_delay=None, interleave=None,
+ all_errors=False):
+ """Connect to a TCP server.
+
+ Create a streaming transport connection to a given internet host and
+ port: socket family AF_INET or socket.AF_INET6 depending on host (or
+ family if specified), socket type SOCK_STREAM. protocol_factory must be
+ a callable returning a protocol instance.
+
+ This method is a coroutine which will try to establish the connection
+ in the background. When successful, the coroutine returns a
+ (transport, protocol) pair.
+ """
+ if server_hostname is not None and not ssl:
+ raise ValueError('server_hostname is only meaningful with ssl')
+
+ if server_hostname is None and ssl:
+ # Use host as default for server_hostname. It is an error
+ # if host is empty or not set, e.g. when an
+ # already-connected socket was passed or when only a port
+ # is given. To avoid this error, you can pass
+ # server_hostname='' -- this will bypass the hostname
+ # check. (This also means that if host is a numeric
+ # IP/IPv6 address, we will attempt to verify that exact
+ # address; this will probably fail, but it is possible to
+ # create a certificate for a specific IP address, so we
+ # don't judge it here.)
+ if not host:
+ raise ValueError('You must set server_hostname '
+ 'when using ssl without a host')
+ server_hostname = host
+
+ if ssl_handshake_timeout is not None and not ssl:
+ raise ValueError(
+ 'ssl_handshake_timeout is only meaningful with ssl')
+
+ if ssl_shutdown_timeout is not None and not ssl:
+ raise ValueError(
+ 'ssl_shutdown_timeout is only meaningful with ssl')
+
+ if sock is not None:
+ _check_ssl_socket(sock)
+
+ if happy_eyeballs_delay is not None and interleave is None:
+ # If using happy eyeballs, default to interleave addresses by family
+ interleave = 1
+
+ if host is not None or port is not None:
+ if sock is not None:
+ raise ValueError(
+ 'host/port and sock can not be specified at the same time')
+
+ infos = await self._ensure_resolved(
+ (host, port), family=family,
+ type=socket.SOCK_STREAM, proto=proto, flags=flags, loop=self)
+ if not infos:
+ raise OSError('getaddrinfo() returned empty list')
+
+ if local_addr is not None:
+ laddr_infos = await self._ensure_resolved(
+ local_addr, family=family,
+ type=socket.SOCK_STREAM, proto=proto,
+ flags=flags, loop=self)
+ if not laddr_infos:
+ raise OSError('getaddrinfo() returned empty list')
+ else:
+ laddr_infos = None
+
+ if interleave:
+ infos = _interleave_addrinfos(infos, interleave)
+
+ exceptions = []
+ if happy_eyeballs_delay is None:
+ # not using happy eyeballs
+ for addrinfo in infos:
+ try:
+ sock = await self._connect_sock(
+ exceptions, addrinfo, laddr_infos)
+ break
+ except OSError:
+ continue
+ else: # using happy eyeballs
+ sock, _, _ = await staggered.staggered_race(
+ (functools.partial(self._connect_sock,
+ exceptions, addrinfo, laddr_infos)
+ for addrinfo in infos),
+ happy_eyeballs_delay, loop=self)
+
+ if sock is None:
+ exceptions = [exc for sub in exceptions for exc in sub]
+ try:
+ if all_errors:
+ raise ExceptionGroup("create_connection failed", exceptions)
+ if len(exceptions) == 1:
+ raise exceptions[0]
+ else:
+ # If they all have the same str(), raise one.
+ model = str(exceptions[0])
+ if all(str(exc) == model for exc in exceptions):
+ raise exceptions[0]
+ # Raise a combined exception so the user can see all
+ # the various error messages.
+ raise OSError('Multiple exceptions: {}'.format(
+ ', '.join(str(exc) for exc in exceptions)))
+ finally:
+ exceptions = None
+
+ else:
+ if sock is None:
+ raise ValueError(
+ 'host and port was not specified and no sock specified')
+ if sock.type != socket.SOCK_STREAM:
+ # We allow AF_INET, AF_INET6, AF_UNIX as long as they
+ # are SOCK_STREAM.
+ # We support passing AF_UNIX sockets even though we have
+ # a dedicated API for that: create_unix_connection.
+ # Disallowing AF_UNIX in this method, breaks backwards
+ # compatibility.
+ raise ValueError(
+ f'A Stream Socket was expected, got {sock!r}')
+
+ transport, protocol = await self._create_connection_transport(
+ sock, protocol_factory, ssl, server_hostname,
+ ssl_handshake_timeout=ssl_handshake_timeout,
+ ssl_shutdown_timeout=ssl_shutdown_timeout)
+ if self._debug:
+ # Get the socket from the transport because SSL transport closes
+ # the old socket and creates a new SSL socket
+ sock = transport.get_extra_info('socket')
+ logger.debug("%r connected to %s:%r: (%r, %r)",
+ sock, host, port, transport, protocol)
+ return transport, protocol
+
+ async def _create_connection_transport(
+ self, sock, protocol_factory, ssl,
+ server_hostname, server_side=False,
+ ssl_handshake_timeout=None,
+ ssl_shutdown_timeout=None):
+
+ sock.setblocking(False)
+
+ protocol = protocol_factory()
+ waiter = self.create_future()
+ if ssl:
+ sslcontext = None if isinstance(ssl, bool) else ssl
+ transport = self._make_ssl_transport(
+ sock, protocol, sslcontext, waiter,
+ server_side=server_side, server_hostname=server_hostname,
+ ssl_handshake_timeout=ssl_handshake_timeout,
+ ssl_shutdown_timeout=ssl_shutdown_timeout)
+ else:
+ transport = self._make_socket_transport(sock, protocol, waiter)
+
+ try:
+ await waiter
+ except:
+ transport.close()
+ raise
+
+ return transport, protocol
+
+ async def sendfile(self, transport, file, offset=0, count=None,
+ *, fallback=True):
+ """Send a file to transport.
+
+ Return the total number of bytes which were sent.
+
+ The method uses high-performance os.sendfile if available.
+
+ file must be a regular file object opened in binary mode.
+
+ offset tells from where to start reading the file. If specified,
+ count is the total number of bytes to transmit as opposed to
+ sending the file until EOF is reached. File position is updated on
+ return or also in case of error in which case file.tell()
+ can be used to figure out the number of bytes
+ which were sent.
+
+ fallback set to True makes asyncio to manually read and send
+ the file when the platform does not support the sendfile syscall
+ (e.g. Windows or SSL socket on Unix).
+
+ Raise SendfileNotAvailableError if the system does not support
+ sendfile syscall and fallback is False.
+ """
+ if transport.is_closing():
+ raise RuntimeError("Transport is closing")
+ mode = getattr(transport, '_sendfile_compatible',
+ constants._SendfileMode.UNSUPPORTED)
+ if mode is constants._SendfileMode.UNSUPPORTED:
+ raise RuntimeError(
+ f"sendfile is not supported for transport {transport!r}")
+ if mode is constants._SendfileMode.TRY_NATIVE:
+ try:
+ return await self._sendfile_native(transport, file,
+ offset, count)
+ except exceptions.SendfileNotAvailableError as exc:
+ if not fallback:
+ raise
+
+ if not fallback:
+ raise RuntimeError(
+ f"fallback is disabled and native sendfile is not "
+ f"supported for transport {transport!r}")
+
+ return await self._sendfile_fallback(transport, file,
+ offset, count)
+
+ async def _sendfile_native(self, transp, file, offset, count):
+ raise exceptions.SendfileNotAvailableError(
+ "sendfile syscall is not supported")
+
+ async def _sendfile_fallback(self, transp, file, offset, count):
+ if offset:
+ file.seek(offset)
+ blocksize = min(count, 16384) if count else 16384
+ buf = bytearray(blocksize)
+ total_sent = 0
+ proto = _SendfileFallbackProtocol(transp)
+ try:
+ while True:
+ if count:
+ blocksize = min(count - total_sent, blocksize)
+ if blocksize <= 0:
+ return total_sent
+ view = memoryview(buf)[:blocksize]
+ read = await self.run_in_executor(None, file.readinto, view)
+ if not read:
+ return total_sent # EOF
+ await proto.drain()
+ transp.write(view[:read])
+ total_sent += read
+ finally:
+ if total_sent > 0 and hasattr(file, 'seek'):
+ file.seek(offset + total_sent)
+ await proto.restore()
+
+ async def start_tls(self, transport, protocol, sslcontext, *,
+ server_side=False,
+ server_hostname=None,
+ ssl_handshake_timeout=None,
+ ssl_shutdown_timeout=None):
+ """Upgrade transport to TLS.
+
+ Return a new transport that *protocol* should start using
+ immediately.
+ """
+ if ssl is None:
+ raise RuntimeError('Python ssl module is not available')
+
+ if not isinstance(sslcontext, ssl.SSLContext):
+ raise TypeError(
+ f'sslcontext is expected to be an instance of ssl.SSLContext, '
+ f'got {sslcontext!r}')
+
+ if not getattr(transport, '_start_tls_compatible', False):
+ raise TypeError(
+ f'transport {transport!r} is not supported by start_tls()')
+
+ waiter = self.create_future()
+ ssl_protocol = sslproto.SSLProtocol(
+ self, protocol, sslcontext, waiter,
+ server_side, server_hostname,
+ ssl_handshake_timeout=ssl_handshake_timeout,
+ ssl_shutdown_timeout=ssl_shutdown_timeout,
+ call_connection_made=False)
+
+ # Pause early so that "ssl_protocol.data_received()" doesn't
+ # have a chance to get called before "ssl_protocol.connection_made()".
+ transport.pause_reading()
+
+ transport.set_protocol(ssl_protocol)
+ conmade_cb = self.call_soon(ssl_protocol.connection_made, transport)
+ resume_cb = self.call_soon(transport.resume_reading)
+
+ try:
+ await waiter
+ except BaseException:
+ transport.close()
+ conmade_cb.cancel()
+ resume_cb.cancel()
+ raise
+
+ return ssl_protocol._app_transport
+
+ async def create_datagram_endpoint(self, protocol_factory,
+ local_addr=None, remote_addr=None, *,
+ family=0, proto=0, flags=0,
+ reuse_port=None,
+ allow_broadcast=None, sock=None):
+ """Create datagram connection."""
+ if sock is not None:
+ if sock.type == socket.SOCK_STREAM:
+ raise ValueError(
+ f'A datagram socket was expected, got {sock!r}')
+ if (local_addr or remote_addr or
+ family or proto or flags or
+ reuse_port or allow_broadcast):
+ # show the problematic kwargs in exception msg
+ opts = dict(local_addr=local_addr, remote_addr=remote_addr,
+ family=family, proto=proto, flags=flags,
+ reuse_port=reuse_port,
+ allow_broadcast=allow_broadcast)
+ problems = ', '.join(f'{k}={v}' for k, v in opts.items() if v)
+ raise ValueError(
+ f'socket modifier keyword arguments can not be used '
+ f'when sock is specified. ({problems})')
+ sock.setblocking(False)
+ r_addr = None
+ else:
+ if not (local_addr or remote_addr):
+ if family == 0:
+ raise ValueError('unexpected address family')
+ addr_pairs_info = (((family, proto), (None, None)),)
+ elif hasattr(socket, 'AF_UNIX') and family == socket.AF_UNIX:
+ for addr in (local_addr, remote_addr):
+ if addr is not None and not isinstance(addr, str):
+ raise TypeError('string is expected')
+
+ if local_addr and local_addr[0] not in (0, '\x00'):
+ try:
+ if stat.S_ISSOCK(os.stat(local_addr).st_mode):
+ os.remove(local_addr)
+ except FileNotFoundError:
+ pass
+ except OSError as err:
+ # Directory may have permissions only to create socket.
+ logger.error('Unable to check or remove stale UNIX '
+ 'socket %r: %r',
+ local_addr, err)
+
+ addr_pairs_info = (((family, proto),
+ (local_addr, remote_addr)), )
+ else:
+ # join address by (family, protocol)
+ addr_infos = {} # Using order preserving dict
+ for idx, addr in ((0, local_addr), (1, remote_addr)):
+ if addr is not None:
+ if not (isinstance(addr, tuple) and len(addr) == 2):
+ raise TypeError('2-tuple is expected')
+
+ infos = await self._ensure_resolved(
+ addr, family=family, type=socket.SOCK_DGRAM,
+ proto=proto, flags=flags, loop=self)
+ if not infos:
+ raise OSError('getaddrinfo() returned empty list')
+
+ for fam, _, pro, _, address in infos:
+ key = (fam, pro)
+ if key not in addr_infos:
+ addr_infos[key] = [None, None]
+ addr_infos[key][idx] = address
+
+ # each addr has to have info for each (family, proto) pair
+ addr_pairs_info = [
+ (key, addr_pair) for key, addr_pair in addr_infos.items()
+ if not ((local_addr and addr_pair[0] is None) or
+ (remote_addr and addr_pair[1] is None))]
+
+ if not addr_pairs_info:
+ raise ValueError('can not get address information')
+
+ exceptions = []
+
+ for ((family, proto),
+ (local_address, remote_address)) in addr_pairs_info:
+ sock = None
+ r_addr = None
+ try:
+ sock = socket.socket(
+ family=family, type=socket.SOCK_DGRAM, proto=proto)
+ if reuse_port:
+ _set_reuseport(sock)
+ if allow_broadcast:
+ sock.setsockopt(
+ socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
+ sock.setblocking(False)
+
+ if local_addr:
+ sock.bind(local_address)
+ if remote_addr:
+ if not allow_broadcast:
+ await self.sock_connect(sock, remote_address)
+ r_addr = remote_address
+ except OSError as exc:
+ if sock is not None:
+ sock.close()
+ exceptions.append(exc)
+ except:
+ if sock is not None:
+ sock.close()
+ raise
+ else:
+ break
+ else:
+ raise exceptions[0]
+
+ protocol = protocol_factory()
+ waiter = self.create_future()
+ transport = self._make_datagram_transport(
+ sock, protocol, r_addr, waiter)
+ if self._debug:
+ if local_addr:
+ logger.info("Datagram endpoint local_addr=%r remote_addr=%r "
+ "created: (%r, %r)",
+ local_addr, remote_addr, transport, protocol)
+ else:
+ logger.debug("Datagram endpoint remote_addr=%r created: "
+ "(%r, %r)",
+ remote_addr, transport, protocol)
+
+ try:
+ await waiter
+ except:
+ transport.close()
+ raise
+
+ return transport, protocol
+
+ async def _ensure_resolved(self, address, *,
+ family=0, type=socket.SOCK_STREAM,
+ proto=0, flags=0, loop):
+ host, port = address[:2]
+ info = _ipaddr_info(host, port, family, type, proto, *address[2:])
+ if info is not None:
+ # "host" is already a resolved IP.
+ return [info]
+ else:
+ return await loop.getaddrinfo(host, port, family=family, type=type,
+ proto=proto, flags=flags)
+
+ async def _create_server_getaddrinfo(self, host, port, family, flags):
+ infos = await self._ensure_resolved((host, port), family=family,
+ type=socket.SOCK_STREAM,
+ flags=flags, loop=self)
+ if not infos:
+ raise OSError(f'getaddrinfo({host!r}) returned empty list')
+ return infos
+
+ async def create_server(
+ self, protocol_factory, host=None, port=None,
+ *,
+ family=socket.AF_UNSPEC,
+ flags=socket.AI_PASSIVE,
+ sock=None,
+ backlog=100,
+ ssl=None,
+ reuse_address=None,
+ reuse_port=None,
+ ssl_handshake_timeout=None,
+ ssl_shutdown_timeout=None,
+ start_serving=True):
+ """Create a TCP server.
+
+ The host parameter can be a string, in that case the TCP server is
+ bound to host and port.
+
+ The host parameter can also be a sequence of strings and in that case
+ the TCP server is bound to all hosts of the sequence. If a host
+ appears multiple times (possibly indirectly e.g. when hostnames
+ resolve to the same IP address), the server is only bound once to that
+ host.
+
+ Return a Server object which can be used to stop the service.
+
+ This method is a coroutine.
+ """
+ if isinstance(ssl, bool):
+ raise TypeError('ssl argument must be an SSLContext or None')
+
+ if ssl_handshake_timeout is not None and ssl is None:
+ raise ValueError(
+ 'ssl_handshake_timeout is only meaningful with ssl')
+
+ if ssl_shutdown_timeout is not None and ssl is None:
+ raise ValueError(
+ 'ssl_shutdown_timeout is only meaningful with ssl')
+
+ if sock is not None:
+ _check_ssl_socket(sock)
+
+ if host is not None or port is not None:
+ if sock is not None:
+ raise ValueError(
+ 'host/port and sock can not be specified at the same time')
+
+ if reuse_address is None:
+ reuse_address = os.name == "posix" and sys.platform != "cygwin"
+ sockets = []
+ if host == '':
+ hosts = [None]
+ elif (isinstance(host, str) or
+ not isinstance(host, collections.abc.Iterable)):
+ hosts = [host]
+ else:
+ hosts = host
+
+ fs = [self._create_server_getaddrinfo(host, port, family=family,
+ flags=flags)
+ for host in hosts]
+ infos = await tasks.gather(*fs)
+ infos = set(itertools.chain.from_iterable(infos))
+
+ completed = False
+ try:
+ for res in infos:
+ af, socktype, proto, canonname, sa = res
+ try:
+ sock = socket.socket(af, socktype, proto)
+ except socket.error:
+ # Assume it's a bad family/type/protocol combination.
+ if self._debug:
+ logger.warning('create_server() failed to create '
+ 'socket.socket(%r, %r, %r)',
+ af, socktype, proto, exc_info=True)
+ continue
+ sockets.append(sock)
+ if reuse_address:
+ sock.setsockopt(
+ socket.SOL_SOCKET, socket.SO_REUSEADDR, True)
+ if reuse_port:
+ _set_reuseport(sock)
+ # Disable IPv4/IPv6 dual stack support (enabled by
+ # default on Linux) which makes a single socket
+ # listen on both address families.
+ if (_HAS_IPv6 and
+ af == socket.AF_INET6 and
+ hasattr(socket, 'IPPROTO_IPV6')):
+ sock.setsockopt(socket.IPPROTO_IPV6,
+ socket.IPV6_V6ONLY,
+ True)
+ try:
+ sock.bind(sa)
+ except OSError as err:
+ msg = ('error while attempting '
+ 'to bind on address %r: %s'
+ % (sa, err.strerror.lower()))
+ if err.errno == errno.EADDRNOTAVAIL:
+ # Assume the family is not enabled (bpo-30945)
+ sockets.pop()
+ sock.close()
+ if self._debug:
+ logger.warning(msg)
+ continue
+ raise OSError(err.errno, msg) from None
+
+ if not sockets:
+ raise OSError('could not bind on any address out of %r'
+ % ([info[4] for info in infos],))
+
+ completed = True
+ finally:
+ if not completed:
+ for sock in sockets:
+ sock.close()
+ else:
+ if sock is None:
+ raise ValueError('Neither host/port nor sock were specified')
+ if sock.type != socket.SOCK_STREAM:
+ raise ValueError(f'A Stream Socket was expected, got {sock!r}')
+ sockets = [sock]
+
+ for sock in sockets:
+ sock.setblocking(False)
+
+ server = Server(self, sockets, protocol_factory,
+ ssl, backlog, ssl_handshake_timeout,
+ ssl_shutdown_timeout)
+ if start_serving:
+ server._start_serving()
+ # Skip one loop iteration so that all 'loop.add_reader'
+ # go through.
+ await tasks.sleep(0)
+
+ if self._debug:
+ logger.info("%r is serving", server)
+ return server
+
+ async def connect_accepted_socket(
+ self, protocol_factory, sock,
+ *, ssl=None,
+ ssl_handshake_timeout=None,
+ ssl_shutdown_timeout=None):
+ if sock.type != socket.SOCK_STREAM:
+ raise ValueError(f'A Stream Socket was expected, got {sock!r}')
+
+ if ssl_handshake_timeout is not None and not ssl:
+ raise ValueError(
+ 'ssl_handshake_timeout is only meaningful with ssl')
+
+ if ssl_shutdown_timeout is not None and not ssl:
+ raise ValueError(
+ 'ssl_shutdown_timeout is only meaningful with ssl')
+
+ if sock is not None:
+ _check_ssl_socket(sock)
+
+ transport, protocol = await self._create_connection_transport(
+ sock, protocol_factory, ssl, '', server_side=True,
+ ssl_handshake_timeout=ssl_handshake_timeout,
+ ssl_shutdown_timeout=ssl_shutdown_timeout)
+ if self._debug:
+ # Get the socket from the transport because SSL transport closes
+ # the old socket and creates a new SSL socket
+ sock = transport.get_extra_info('socket')
+ logger.debug("%r handled: (%r, %r)", sock, transport, protocol)
+ return transport, protocol
+
+ async def connect_read_pipe(self, protocol_factory, pipe):
+ protocol = protocol_factory()
+ waiter = self.create_future()
+ transport = self._make_read_pipe_transport(pipe, protocol, waiter)
+
+ try:
+ await waiter
+ except:
+ transport.close()
+ raise
+
+ if self._debug:
+ logger.debug('Read pipe %r connected: (%r, %r)',
+ pipe.fileno(), transport, protocol)
+ return transport, protocol
+
+ async def connect_write_pipe(self, protocol_factory, pipe):
+ protocol = protocol_factory()
+ waiter = self.create_future()
+ transport = self._make_write_pipe_transport(pipe, protocol, waiter)
+
+ try:
+ await waiter
+ except:
+ transport.close()
+ raise
+
+ if self._debug:
+ logger.debug('Write pipe %r connected: (%r, %r)',
+ pipe.fileno(), transport, protocol)
+ return transport, protocol
+
+ def _log_subprocess(self, msg, stdin, stdout, stderr):
+ info = [msg]
+ if stdin is not None:
+ info.append(f'stdin={_format_pipe(stdin)}')
+ if stdout is not None and stderr == subprocess.STDOUT:
+ info.append(f'stdout=stderr={_format_pipe(stdout)}')
+ else:
+ if stdout is not None:
+ info.append(f'stdout={_format_pipe(stdout)}')
+ if stderr is not None:
+ info.append(f'stderr={_format_pipe(stderr)}')
+ logger.debug(' '.join(info))
+
+ async def subprocess_shell(self, protocol_factory, cmd, *,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ universal_newlines=False,
+ shell=True, bufsize=0,
+ encoding=None, errors=None, text=None,
+ **kwargs):
+ if not isinstance(cmd, (bytes, str)):
+ raise ValueError("cmd must be a string")
+ if universal_newlines:
+ raise ValueError("universal_newlines must be False")
+ if not shell:
+ raise ValueError("shell must be True")
+ if bufsize != 0:
+ raise ValueError("bufsize must be 0")
+ if text:
+ raise ValueError("text must be False")
+ if encoding is not None:
+ raise ValueError("encoding must be None")
+ if errors is not None:
+ raise ValueError("errors must be None")
+
+ protocol = protocol_factory()
+ debug_log = None
+ if self._debug:
+ # don't log parameters: they may contain sensitive information
+ # (password) and may be too long
+ debug_log = 'run shell command %r' % cmd
+ self._log_subprocess(debug_log, stdin, stdout, stderr)
+ transport = await self._make_subprocess_transport(
+ protocol, cmd, True, stdin, stdout, stderr, bufsize, **kwargs)
+ if self._debug and debug_log is not None:
+ logger.info('%s: %r', debug_log, transport)
+ return transport, protocol
+
+ async def subprocess_exec(self, protocol_factory, program, *args,
+ stdin=subprocess.PIPE, stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE, universal_newlines=False,
+ shell=False, bufsize=0,
+ encoding=None, errors=None, text=None,
+ **kwargs):
+ if universal_newlines:
+ raise ValueError("universal_newlines must be False")
+ if shell:
+ raise ValueError("shell must be False")
+ if bufsize != 0:
+ raise ValueError("bufsize must be 0")
+ if text:
+ raise ValueError("text must be False")
+ if encoding is not None:
+ raise ValueError("encoding must be None")
+ if errors is not None:
+ raise ValueError("errors must be None")
+
+ popen_args = (program,) + args
+ protocol = protocol_factory()
+ debug_log = None
+ if self._debug:
+ # don't log parameters: they may contain sensitive information
+ # (password) and may be too long
+ debug_log = f'execute program {program!r}'
+ self._log_subprocess(debug_log, stdin, stdout, stderr)
+ transport = await self._make_subprocess_transport(
+ protocol, popen_args, False, stdin, stdout, stderr,
+ bufsize, **kwargs)
+ if self._debug and debug_log is not None:
+ logger.info('%s: %r', debug_log, transport)
+ return transport, protocol
+
+ def get_exception_handler(self):
+ """Return an exception handler, or None if the default one is in use.
+ """
+ return self._exception_handler
+
+ def set_exception_handler(self, handler):
+ """Set handler as the new event loop exception handler.
+
+ If handler is None, the default exception handler will
+ be set.
+
+ If handler is a callable object, it should have a
+ signature matching '(loop, context)', where 'loop'
+ will be a reference to the active event loop, 'context'
+ will be a dict object (see `call_exception_handler()`
+ documentation for details about context).
+ """
+ if handler is not None and not callable(handler):
+ raise TypeError(f'A callable object or None is expected, '
+ f'got {handler!r}')
+ self._exception_handler = handler
+
+ def default_exception_handler(self, context):
+ """Default exception handler.
+
+ This is called when an exception occurs and no exception
+ handler is set, and can be called by a custom exception
+ handler that wants to defer to the default behavior.
+
+ This default handler logs the error message and other
+ context-dependent information. In debug mode, a truncated
+ stack trace is also appended showing where the given object
+ (e.g. a handle or future or task) was created, if any.
+
+ The context parameter has the same meaning as in
+ `call_exception_handler()`.
+ """
+ message = context.get('message')
+ if not message:
+ message = 'Unhandled exception in event loop'
+
+ exception = context.get('exception')
+ if exception is not None:
+ exc_info = (type(exception), exception, exception.__traceback__)
+ else:
+ exc_info = False
+
+ if ('source_traceback' not in context and
+ self._current_handle is not None and
+ self._current_handle._source_traceback):
+ context['handle_traceback'] = \
+ self._current_handle._source_traceback
+
+ log_lines = [message]
+ for key in sorted(context):
+ if key in {'message', 'exception'}:
+ continue
+ value = context[key]
+ if key == 'source_traceback':
+ tb = ''.join(traceback.format_list(value))
+ value = 'Object created at (most recent call last):\n'
+ value += tb.rstrip()
+ elif key == 'handle_traceback':
+ tb = ''.join(traceback.format_list(value))
+ value = 'Handle created at (most recent call last):\n'
+ value += tb.rstrip()
+ else:
+ value = repr(value)
+ log_lines.append(f'{key}: {value}')
+
+ logger.error('\n'.join(log_lines), exc_info=exc_info)
+
+ def call_exception_handler(self, context):
+ """Call the current event loop's exception handler.
+
+ The context argument is a dict containing the following keys:
+
+ - 'message': Error message;
+ - 'exception' (optional): Exception object;
+ - 'future' (optional): Future instance;
+ - 'task' (optional): Task instance;
+ - 'handle' (optional): Handle instance;
+ - 'protocol' (optional): Protocol instance;
+ - 'transport' (optional): Transport instance;
+ - 'socket' (optional): Socket instance;
+ - 'asyncgen' (optional): Asynchronous generator that caused
+ the exception.
+
+ New keys maybe introduced in the future.
+
+ Note: do not overload this method in an event loop subclass.
+ For custom exception handling, use the
+ `set_exception_handler()` method.
+ """
+ if self._exception_handler is None:
+ try:
+ self.default_exception_handler(context)
+ except (SystemExit, KeyboardInterrupt):
+ raise
+ except BaseException:
+ # Second protection layer for unexpected errors
+ # in the default implementation, as well as for subclassed
+ # event loops with overloaded "default_exception_handler".
+ logger.error('Exception in default exception handler',
+ exc_info=True)
+ else:
+ try:
+ ctx = None
+ thing = context.get("task")
+ if thing is None:
+ # Even though Futures don't have a context,
+ # Task is a subclass of Future,
+ # and sometimes the 'future' key holds a Task.
+ thing = context.get("future")
+ if thing is None:
+ # Handles also have a context.
+ thing = context.get("handle")
+ if thing is not None and hasattr(thing, "get_context"):
+ ctx = thing.get_context()
+ if ctx is not None and hasattr(ctx, "run"):
+ ctx.run(self._exception_handler, self, context)
+ else:
+ self._exception_handler(self, context)
+ except (SystemExit, KeyboardInterrupt):
+ raise
+ except BaseException as exc:
+ # Exception in the user set custom exception handler.
+ try:
+ # Let's try default handler.
+ self.default_exception_handler({
+ 'message': 'Unhandled error in exception handler',
+ 'exception': exc,
+ 'context': context,
+ })
+ except (SystemExit, KeyboardInterrupt):
+ raise
+ except BaseException:
+ # Guard 'default_exception_handler' in case it is
+ # overloaded.
+ logger.error('Exception in default exception handler '
+ 'while handling an unexpected error '
+ 'in custom exception handler',
+ exc_info=True)
+
+ def _add_callback(self, handle):
+ """Add a Handle to _ready."""
+ if not handle._cancelled:
+ self._ready.append(handle)
+
+ def _add_callback_signalsafe(self, handle):
+ """Like _add_callback() but called from a signal handler."""
+ self._add_callback(handle)
+ self._write_to_self()
+
+ def _timer_handle_cancelled(self, handle):
+ """Notification that a TimerHandle has been cancelled."""
+ if handle._scheduled:
+ self._timer_cancelled_count += 1
+
+ def _run_once(self):
+ """Run one full iteration of the event loop.
+
+ This calls all currently ready callbacks, polls for I/O,
+ schedules the resulting callbacks, and finally schedules
+ 'call_later' callbacks.
+ """
+
+ sched_count = len(self._scheduled)
+ if (sched_count > _MIN_SCHEDULED_TIMER_HANDLES and
+ self._timer_cancelled_count / sched_count >
+ _MIN_CANCELLED_TIMER_HANDLES_FRACTION):
+ # Remove delayed calls that were cancelled if their number
+ # is too high
+ new_scheduled = []
+ for handle in self._scheduled:
+ if handle._cancelled:
+ handle._scheduled = False
+ else:
+ new_scheduled.append(handle)
+
+ heapq.heapify(new_scheduled)
+ self._scheduled = new_scheduled
+ self._timer_cancelled_count = 0
+ else:
+ # Remove delayed calls that were cancelled from head of queue.
+ while self._scheduled and self._scheduled[0]._cancelled:
+ self._timer_cancelled_count -= 1
+ handle = heapq.heappop(self._scheduled)
+ handle._scheduled = False
+
+ timeout = None
+ if self._ready or self._stopping:
+ timeout = 0
+ elif self._scheduled:
+ # Compute the desired timeout.
+ when = self._scheduled[0]._when
+ timeout = min(max(0, when - self.time()), MAXIMUM_SELECT_TIMEOUT)
+
+ event_list = self._selector.select(timeout)
+ self._process_events(event_list)
+ # Needed to break cycles when an exception occurs.
+ event_list = None
+
+ # Handle 'later' callbacks that are ready.
+ end_time = self.time() + self._clock_resolution
+ while self._scheduled:
+ handle = self._scheduled[0]
+ if handle._when >= end_time:
+ break
+ handle = heapq.heappop(self._scheduled)
+ handle._scheduled = False
+ self._ready.append(handle)
+
+ # This is the only place where callbacks are actually *called*.
+ # All other places just add them to ready.
+ # Note: We run all currently scheduled callbacks, but not any
+ # callbacks scheduled by callbacks run this time around --
+ # they will be run the next time (after another I/O poll).
+ # Use an idiom that is thread-safe without using locks.
+ ntodo = len(self._ready)
+ for i in range(ntodo):
+ handle = self._ready.popleft()
+ if handle._cancelled:
+ continue
+ if self._debug:
+ try:
+ self._current_handle = handle
+ t0 = self.time()
+ handle._run()
+ dt = self.time() - t0
+ if dt >= self.slow_callback_duration:
+ logger.warning('Executing %s took %.3f seconds',
+ _format_handle(handle), dt)
+ finally:
+ self._current_handle = None
+ else:
+ handle._run()
+ handle = None # Needed to break cycles when an exception occurs.
+
+ def _set_coroutine_origin_tracking(self, enabled):
+ if bool(enabled) == bool(self._coroutine_origin_tracking_enabled):
+ return
+
+ if enabled:
+ self._coroutine_origin_tracking_saved_depth = (
+ sys.get_coroutine_origin_tracking_depth())
+ sys.set_coroutine_origin_tracking_depth(
+ constants.DEBUG_STACK_DEPTH)
+ else:
+ sys.set_coroutine_origin_tracking_depth(
+ self._coroutine_origin_tracking_saved_depth)
+
+ self._coroutine_origin_tracking_enabled = enabled
+
+ def get_debug(self):
+ return self._debug
+
+ def set_debug(self, enabled):
+ self._debug = enabled
+
+ if self.is_running():
+ self.call_soon_threadsafe(self._set_coroutine_origin_tracking, enabled)
diff --git a/contrib/tools/python3/Lib/asyncio/base_futures.py b/contrib/tools/python3/Lib/asyncio/base_futures.py
new file mode 100644
index 0000000000..7987963bd9
--- /dev/null
+++ b/contrib/tools/python3/Lib/asyncio/base_futures.py
@@ -0,0 +1,67 @@
+__all__ = ()
+
+import reprlib
+
+from . import format_helpers
+
+# States for Future.
+_PENDING = 'PENDING'
+_CANCELLED = 'CANCELLED'
+_FINISHED = 'FINISHED'
+
+
+def isfuture(obj):
+ """Check for a Future.
+
+ This returns True when obj is a Future instance or is advertising
+ itself as duck-type compatible by setting _asyncio_future_blocking.
+ See comment in Future for more details.
+ """
+ return (hasattr(obj.__class__, '_asyncio_future_blocking') and
+ obj._asyncio_future_blocking is not None)
+
+
+def _format_callbacks(cb):
+ """helper function for Future.__repr__"""
+ size = len(cb)
+ if not size:
+ cb = ''
+
+ def format_cb(callback):
+ return format_helpers._format_callback_source(callback, ())
+
+ if size == 1:
+ cb = format_cb(cb[0][0])
+ elif size == 2:
+ cb = '{}, {}'.format(format_cb(cb[0][0]), format_cb(cb[1][0]))
+ elif size > 2:
+ cb = '{}, <{} more>, {}'.format(format_cb(cb[0][0]),
+ size - 2,
+ format_cb(cb[-1][0]))
+ return f'cb=[{cb}]'
+
+
+def _future_repr_info(future):
+ # (Future) -> str
+ """helper function for Future.__repr__"""
+ info = [future._state.lower()]
+ if future._state == _FINISHED:
+ if future._exception is not None:
+ info.append(f'exception={future._exception!r}')
+ else:
+ # use reprlib to limit the length of the output, especially
+ # for very long strings
+ result = reprlib.repr(future._result)
+ info.append(f'result={result}')
+ if future._callbacks:
+ info.append(_format_callbacks(future._callbacks))
+ if future._source_traceback:
+ frame = future._source_traceback[-1]
+ info.append(f'created at {frame[0]}:{frame[1]}')
+ return info
+
+
+@reprlib.recursive_repr()
+def _future_repr(future):
+ info = ' '.join(_future_repr_info(future))
+ return f'<{future.__class__.__name__} {info}>'
diff --git a/contrib/tools/python3/Lib/asyncio/base_subprocess.py b/contrib/tools/python3/Lib/asyncio/base_subprocess.py
new file mode 100644
index 0000000000..4c9b0dd565
--- /dev/null
+++ b/contrib/tools/python3/Lib/asyncio/base_subprocess.py
@@ -0,0 +1,285 @@
+import collections
+import subprocess
+import warnings
+
+from . import protocols
+from . import transports
+from .log import logger
+
+
+class BaseSubprocessTransport(transports.SubprocessTransport):
+
+ def __init__(self, loop, protocol, args, shell,
+ stdin, stdout, stderr, bufsize,
+ waiter=None, extra=None, **kwargs):
+ super().__init__(extra)
+ self._closed = False
+ self._protocol = protocol
+ self._loop = loop
+ self._proc = None
+ self._pid = None
+ self._returncode = None
+ self._exit_waiters = []
+ self._pending_calls = collections.deque()
+ self._pipes = {}
+ self._finished = False
+
+ if stdin == subprocess.PIPE:
+ self._pipes[0] = None
+ if stdout == subprocess.PIPE:
+ self._pipes[1] = None
+ if stderr == subprocess.PIPE:
+ self._pipes[2] = None
+
+ # Create the child process: set the _proc attribute
+ try:
+ self._start(args=args, shell=shell, stdin=stdin, stdout=stdout,
+ stderr=stderr, bufsize=bufsize, **kwargs)
+ except:
+ self.close()
+ raise
+
+ self._pid = self._proc.pid
+ self._extra['subprocess'] = self._proc
+
+ if self._loop.get_debug():
+ if isinstance(args, (bytes, str)):
+ program = args
+ else:
+ program = args[0]
+ logger.debug('process %r created: pid %s',
+ program, self._pid)
+
+ self._loop.create_task(self._connect_pipes(waiter))
+
+ def __repr__(self):
+ info = [self.__class__.__name__]
+ if self._closed:
+ info.append('closed')
+ if self._pid is not None:
+ info.append(f'pid={self._pid}')
+ if self._returncode is not None:
+ info.append(f'returncode={self._returncode}')
+ elif self._pid is not None:
+ info.append('running')
+ else:
+ info.append('not started')
+
+ stdin = self._pipes.get(0)
+ if stdin is not None:
+ info.append(f'stdin={stdin.pipe}')
+
+ stdout = self._pipes.get(1)
+ stderr = self._pipes.get(2)
+ if stdout is not None and stderr is stdout:
+ info.append(f'stdout=stderr={stdout.pipe}')
+ else:
+ if stdout is not None:
+ info.append(f'stdout={stdout.pipe}')
+ if stderr is not None:
+ info.append(f'stderr={stderr.pipe}')
+
+ return '<{}>'.format(' '.join(info))
+
+ def _start(self, args, shell, stdin, stdout, stderr, bufsize, **kwargs):
+ raise NotImplementedError
+
+ def set_protocol(self, protocol):
+ self._protocol = protocol
+
+ def get_protocol(self):
+ return self._protocol
+
+ def is_closing(self):
+ return self._closed
+
+ def close(self):
+ if self._closed:
+ return
+ self._closed = True
+
+ for proto in self._pipes.values():
+ if proto is None:
+ continue
+ proto.pipe.close()
+
+ if (self._proc is not None and
+ # has the child process finished?
+ self._returncode is None and
+ # the child process has finished, but the
+ # transport hasn't been notified yet?
+ self._proc.poll() is None):
+
+ if self._loop.get_debug():
+ logger.warning('Close running child process: kill %r', self)
+
+ try:
+ self._proc.kill()
+ except ProcessLookupError:
+ pass
+
+ # Don't clear the _proc reference yet: _post_init() may still run
+
+ def __del__(self, _warn=warnings.warn):
+ if not self._closed:
+ _warn(f"unclosed transport {self!r}", ResourceWarning, source=self)
+ self.close()
+
+ def get_pid(self):
+ return self._pid
+
+ def get_returncode(self):
+ return self._returncode
+
+ def get_pipe_transport(self, fd):
+ if fd in self._pipes:
+ return self._pipes[fd].pipe
+ else:
+ return None
+
+ def _check_proc(self):
+ if self._proc is None:
+ raise ProcessLookupError()
+
+ def send_signal(self, signal):
+ self._check_proc()
+ self._proc.send_signal(signal)
+
+ def terminate(self):
+ self._check_proc()
+ self._proc.terminate()
+
+ def kill(self):
+ self._check_proc()
+ self._proc.kill()
+
+ async def _connect_pipes(self, waiter):
+ try:
+ proc = self._proc
+ loop = self._loop
+
+ if proc.stdin is not None:
+ _, pipe = await loop.connect_write_pipe(
+ lambda: WriteSubprocessPipeProto(self, 0),
+ proc.stdin)
+ self._pipes[0] = pipe
+
+ if proc.stdout is not None:
+ _, pipe = await loop.connect_read_pipe(
+ lambda: ReadSubprocessPipeProto(self, 1),
+ proc.stdout)
+ self._pipes[1] = pipe
+
+ if proc.stderr is not None:
+ _, pipe = await loop.connect_read_pipe(
+ lambda: ReadSubprocessPipeProto(self, 2),
+ proc.stderr)
+ self._pipes[2] = pipe
+
+ assert self._pending_calls is not None
+
+ loop.call_soon(self._protocol.connection_made, self)
+ for callback, data in self._pending_calls:
+ loop.call_soon(callback, *data)
+ self._pending_calls = None
+ except (SystemExit, KeyboardInterrupt):
+ raise
+ except BaseException as exc:
+ if waiter is not None and not waiter.cancelled():
+ waiter.set_exception(exc)
+ else:
+ if waiter is not None and not waiter.cancelled():
+ waiter.set_result(None)
+
+ def _call(self, cb, *data):
+ if self._pending_calls is not None:
+ self._pending_calls.append((cb, data))
+ else:
+ self._loop.call_soon(cb, *data)
+
+ def _pipe_connection_lost(self, fd, exc):
+ self._call(self._protocol.pipe_connection_lost, fd, exc)
+ self._try_finish()
+
+ def _pipe_data_received(self, fd, data):
+ self._call(self._protocol.pipe_data_received, fd, data)
+
+ def _process_exited(self, returncode):
+ assert returncode is not None, returncode
+ assert self._returncode is None, self._returncode
+ if self._loop.get_debug():
+ logger.info('%r exited with return code %r', self, returncode)
+ self._returncode = returncode
+ if self._proc.returncode is None:
+ # asyncio uses a child watcher: copy the status into the Popen
+ # object. On Python 3.6, it is required to avoid a ResourceWarning.
+ self._proc.returncode = returncode
+ self._call(self._protocol.process_exited)
+
+ self._try_finish()
+
+ async def _wait(self):
+ """Wait until the process exit and return the process return code.
+
+ This method is a coroutine."""
+ if self._returncode is not None:
+ return self._returncode
+
+ waiter = self._loop.create_future()
+ self._exit_waiters.append(waiter)
+ return await waiter
+
+ def _try_finish(self):
+ assert not self._finished
+ if self._returncode is None:
+ return
+ if all(p is not None and p.disconnected
+ for p in self._pipes.values()):
+ self._finished = True
+ self._call(self._call_connection_lost, None)
+
+ def _call_connection_lost(self, exc):
+ try:
+ self._protocol.connection_lost(exc)
+ finally:
+ # wake up futures waiting for wait()
+ for waiter in self._exit_waiters:
+ if not waiter.cancelled():
+ waiter.set_result(self._returncode)
+ self._exit_waiters = None
+ self._loop = None
+ self._proc = None
+ self._protocol = None
+
+
+class WriteSubprocessPipeProto(protocols.BaseProtocol):
+
+ def __init__(self, proc, fd):
+ self.proc = proc
+ self.fd = fd
+ self.pipe = None
+ self.disconnected = False
+
+ def connection_made(self, transport):
+ self.pipe = transport
+
+ def __repr__(self):
+ return f'<{self.__class__.__name__} fd={self.fd} pipe={self.pipe!r}>'
+
+ def connection_lost(self, exc):
+ self.disconnected = True
+ self.proc._pipe_connection_lost(self.fd, exc)
+ self.proc = None
+
+ def pause_writing(self):
+ self.proc._protocol.pause_writing()
+
+ def resume_writing(self):
+ self.proc._protocol.resume_writing()
+
+
+class ReadSubprocessPipeProto(WriteSubprocessPipeProto,
+ protocols.Protocol):
+
+ def data_received(self, data):
+ self.proc._pipe_data_received(self.fd, data)
diff --git a/contrib/tools/python3/Lib/asyncio/base_tasks.py b/contrib/tools/python3/Lib/asyncio/base_tasks.py
new file mode 100644
index 0000000000..c907b68341
--- /dev/null
+++ b/contrib/tools/python3/Lib/asyncio/base_tasks.py
@@ -0,0 +1,94 @@
+import linecache
+import reprlib
+import traceback
+
+from . import base_futures
+from . import coroutines
+
+
+def _task_repr_info(task):
+ info = base_futures._future_repr_info(task)
+
+ if task.cancelling() and not task.done():
+ # replace status
+ info[0] = 'cancelling'
+
+ info.insert(1, 'name=%r' % task.get_name())
+
+ if task._fut_waiter is not None:
+ info.insert(2, f'wait_for={task._fut_waiter!r}')
+
+ if task._coro:
+ coro = coroutines._format_coroutine(task._coro)
+ info.insert(2, f'coro=<{coro}>')
+
+ return info
+
+
+@reprlib.recursive_repr()
+def _task_repr(task):
+ info = ' '.join(_task_repr_info(task))
+ return f'<{task.__class__.__name__} {info}>'
+
+
+def _task_get_stack(task, limit):
+ frames = []
+ if hasattr(task._coro, 'cr_frame'):
+ # case 1: 'async def' coroutines
+ f = task._coro.cr_frame
+ elif hasattr(task._coro, 'gi_frame'):
+ # case 2: legacy coroutines
+ f = task._coro.gi_frame
+ elif hasattr(task._coro, 'ag_frame'):
+ # case 3: async generators
+ f = task._coro.ag_frame
+ else:
+ # case 4: unknown objects
+ f = None
+ if f is not None:
+ while f is not None:
+ if limit is not None:
+ if limit <= 0:
+ break
+ limit -= 1
+ frames.append(f)
+ f = f.f_back
+ frames.reverse()
+ elif task._exception is not None:
+ tb = task._exception.__traceback__
+ while tb is not None:
+ if limit is not None:
+ if limit <= 0:
+ break
+ limit -= 1
+ frames.append(tb.tb_frame)
+ tb = tb.tb_next
+ return frames
+
+
+def _task_print_stack(task, limit, file):
+ extracted_list = []
+ checked = set()
+ for f in task.get_stack(limit=limit):
+ lineno = f.f_lineno
+ co = f.f_code
+ filename = co.co_filename
+ name = co.co_name
+ if filename not in checked:
+ checked.add(filename)
+ linecache.checkcache(filename)
+ line = linecache.getline(filename, lineno, f.f_globals)
+ extracted_list.append((filename, lineno, name, line))
+
+ exc = task._exception
+ if not extracted_list:
+ print(f'No stack for {task!r}', file=file)
+ elif exc is not None:
+ print(f'Traceback for {task!r} (most recent call last):', file=file)
+ else:
+ print(f'Stack for {task!r} (most recent call last):', file=file)
+
+ traceback.print_list(extracted_list, file=file)
+ if exc is not None:
+ for line in traceback.format_exception_only(exc.__class__, exc):
+ print(line, file=file, end='')
diff --git a/contrib/tools/python3/Lib/asyncio/constants.py b/contrib/tools/python3/Lib/asyncio/constants.py
new file mode 100644
index 0000000000..b60c1e4236
--- /dev/null
+++ b/contrib/tools/python3/Lib/asyncio/constants.py
@@ -0,0 +1,41 @@
+# Contains code from https://github.com/MagicStack/uvloop/tree/v0.16.0
+# SPDX-License-Identifier: PSF-2.0 AND (MIT OR Apache-2.0)
+# SPDX-FileCopyrightText: Copyright (c) 2015-2021 MagicStack Inc. http://magic.io
+
+import enum
+
+# After the connection is lost, log warnings after this many write()s.
+LOG_THRESHOLD_FOR_CONNLOST_WRITES = 5
+
+# Seconds to wait before retrying accept().
+ACCEPT_RETRY_DELAY = 1
+
+# Number of stack entries to capture in debug mode.
+# The larger the number, the slower the operation in debug mode
+# (see extract_stack() in format_helpers.py).
+DEBUG_STACK_DEPTH = 10
+
+# Number of seconds to wait for SSL handshake to complete
+# The default timeout matches that of Nginx.
+SSL_HANDSHAKE_TIMEOUT = 60.0
+
+# Number of seconds to wait for SSL shutdown to complete
+# The default timeout mimics lingering_time
+SSL_SHUTDOWN_TIMEOUT = 30.0
+
+# Used in sendfile fallback code. We use fallback for platforms
+# that don't support sendfile, or for TLS connections.
+SENDFILE_FALLBACK_READBUFFER_SIZE = 1024 * 256
+
+FLOW_CONTROL_HIGH_WATER_SSL_READ = 256 # KiB
+FLOW_CONTROL_HIGH_WATER_SSL_WRITE = 512 # KiB
+
+# Default timeout for joining the threads in the threadpool
+THREAD_JOIN_TIMEOUT = 300
+
+# The enum should be here to break circular dependencies between
+# base_events and sslproto
+class _SendfileMode(enum.Enum):
+ UNSUPPORTED = enum.auto()
+ TRY_NATIVE = enum.auto()
+ FALLBACK = enum.auto()
diff --git a/contrib/tools/python3/Lib/asyncio/coroutines.py b/contrib/tools/python3/Lib/asyncio/coroutines.py
new file mode 100644
index 0000000000..ab4f30eb51
--- /dev/null
+++ b/contrib/tools/python3/Lib/asyncio/coroutines.py
@@ -0,0 +1,109 @@
+__all__ = 'iscoroutinefunction', 'iscoroutine'
+
+import collections.abc
+import inspect
+import os
+import sys
+import types
+
+
+def _is_debug_mode():
+ # See: https://docs.python.org/3/library/asyncio-dev.html#asyncio-debug-mode.
+ return sys.flags.dev_mode or (not sys.flags.ignore_environment and
+ bool(os.environ.get('PYTHONASYNCIODEBUG')))
+
+
+# A marker for iscoroutinefunction.
+_is_coroutine = object()
+
+
+def iscoroutinefunction(func):
+ """Return True if func is a decorated coroutine function."""
+ return (inspect.iscoroutinefunction(func) or
+ getattr(func, '_is_coroutine', None) is _is_coroutine)
+
+
+# Prioritize native coroutine check to speed-up
+# asyncio.iscoroutine.
+_COROUTINE_TYPES = (types.CoroutineType, collections.abc.Coroutine)
+_iscoroutine_typecache = set()
+
+
+def iscoroutine(obj):
+ """Return True if obj is a coroutine object."""
+ if type(obj) in _iscoroutine_typecache:
+ return True
+
+ if isinstance(obj, _COROUTINE_TYPES):
+ # Just in case we don't want to cache more than 100
+ # positive types. That shouldn't ever happen, unless
+ # someone stressing the system on purpose.
+ if len(_iscoroutine_typecache) < 100:
+ _iscoroutine_typecache.add(type(obj))
+ return True
+ else:
+ return False
+
+
+def _format_coroutine(coro):
+ assert iscoroutine(coro)
+
+ def get_name(coro):
+ # Coroutines compiled with Cython sometimes don't have
+ # proper __qualname__ or __name__. While that is a bug
+ # in Cython, asyncio shouldn't crash with an AttributeError
+ # in its __repr__ functions.
+ if hasattr(coro, '__qualname__') and coro.__qualname__:
+ coro_name = coro.__qualname__
+ elif hasattr(coro, '__name__') and coro.__name__:
+ coro_name = coro.__name__
+ else:
+ # Stop masking Cython bugs, expose them in a friendly way.
+ coro_name = f'<{type(coro).__name__} without __name__>'
+ return f'{coro_name}()'
+
+ def is_running(coro):
+ try:
+ return coro.cr_running
+ except AttributeError:
+ try:
+ return coro.gi_running
+ except AttributeError:
+ return False
+
+ coro_code = None
+ if hasattr(coro, 'cr_code') and coro.cr_code:
+ coro_code = coro.cr_code
+ elif hasattr(coro, 'gi_code') and coro.gi_code:
+ coro_code = coro.gi_code
+
+ coro_name = get_name(coro)
+
+ if not coro_code:
+ # Built-in types might not have __qualname__ or __name__.
+ if is_running(coro):
+ return f'{coro_name} running'
+ else:
+ return coro_name
+
+ coro_frame = None
+ if hasattr(coro, 'gi_frame') and coro.gi_frame:
+ coro_frame = coro.gi_frame
+ elif hasattr(coro, 'cr_frame') and coro.cr_frame:
+ coro_frame = coro.cr_frame
+
+ # If Cython's coroutine has a fake code object without proper
+ # co_filename -- expose that.
+ filename = coro_code.co_filename or '<empty co_filename>'
+
+ lineno = 0
+
+ if coro_frame is not None:
+ lineno = coro_frame.f_lineno
+ coro_repr = f'{coro_name} running at {filename}:{lineno}'
+
+ else:
+ lineno = coro_code.co_firstlineno
+ coro_repr = f'{coro_name} done, defined at {filename}:{lineno}'
+
+ return coro_repr
diff --git a/contrib/tools/python3/Lib/asyncio/events.py b/contrib/tools/python3/Lib/asyncio/events.py
new file mode 100644
index 0000000000..016852880c
--- /dev/null
+++ b/contrib/tools/python3/Lib/asyncio/events.py
@@ -0,0 +1,868 @@
+"""Event loop and event loop policy."""
+
+# Contains code from https://github.com/MagicStack/uvloop/tree/v0.16.0
+# SPDX-License-Identifier: PSF-2.0 AND (MIT OR Apache-2.0)
+# SPDX-FileCopyrightText: Copyright (c) 2015-2021 MagicStack Inc. http://magic.io
+
+__all__ = (
+ 'AbstractEventLoopPolicy',
+ 'AbstractEventLoop', 'AbstractServer',
+ 'Handle', 'TimerHandle',
+ 'get_event_loop_policy', 'set_event_loop_policy',
+ 'get_event_loop', 'set_event_loop', 'new_event_loop',
+ 'get_child_watcher', 'set_child_watcher',
+ '_set_running_loop', 'get_running_loop',
+ '_get_running_loop',
+)
+
+import contextvars
+import os
+import signal
+import socket
+import subprocess
+import sys
+import threading
+
+from . import format_helpers
+
+
+class Handle:
+ """Object returned by callback registration methods."""
+
+ __slots__ = ('_callback', '_args', '_cancelled', '_loop',
+ '_source_traceback', '_repr', '__weakref__',
+ '_context')
+
+ def __init__(self, callback, args, loop, context=None):
+ if context is None:
+ context = contextvars.copy_context()
+ self._context = context
+ self._loop = loop
+ self._callback = callback
+ self._args = args
+ self._cancelled = False
+ self._repr = None
+ if self._loop.get_debug():
+ self._source_traceback = format_helpers.extract_stack(
+ sys._getframe(1))
+ else:
+ self._source_traceback = None
+
+ def _repr_info(self):
+ info = [self.__class__.__name__]
+ if self._cancelled:
+ info.append('cancelled')
+ if self._callback is not None:
+ info.append(format_helpers._format_callback_source(
+ self._callback, self._args))
+ if self._source_traceback:
+ frame = self._source_traceback[-1]
+ info.append(f'created at {frame[0]}:{frame[1]}')
+ return info
+
+ def __repr__(self):
+ if self._repr is not None:
+ return self._repr
+ info = self._repr_info()
+ return '<{}>'.format(' '.join(info))
+
+ def get_context(self):
+ return self._context
+
+ def cancel(self):
+ if not self._cancelled:
+ self._cancelled = True
+ if self._loop.get_debug():
+ # Keep a representation in debug mode to keep callback and
+ # parameters. For example, to log the warning
+ # "Executing <Handle...> took 2.5 second"
+ self._repr = repr(self)
+ self._callback = None
+ self._args = None
+
+ def cancelled(self):
+ return self._cancelled
+
+ def _run(self):
+ try:
+ self._context.run(self._callback, *self._args)
+ except (SystemExit, KeyboardInterrupt):
+ raise
+ except BaseException as exc:
+ cb = format_helpers._format_callback_source(
+ self._callback, self._args)
+ msg = f'Exception in callback {cb}'
+ context = {
+ 'message': msg,
+ 'exception': exc,
+ 'handle': self,
+ }
+ if self._source_traceback:
+ context['source_traceback'] = self._source_traceback
+ self._loop.call_exception_handler(context)
+ self = None # Needed to break cycles when an exception occurs.
+
+
+class TimerHandle(Handle):
+ """Object returned by timed callback registration methods."""
+
+ __slots__ = ['_scheduled', '_when']
+
+ def __init__(self, when, callback, args, loop, context=None):
+ super().__init__(callback, args, loop, context)
+ if self._source_traceback:
+ del self._source_traceback[-1]
+ self._when = when
+ self._scheduled = False
+
+ def _repr_info(self):
+ info = super()._repr_info()
+ pos = 2 if self._cancelled else 1
+ info.insert(pos, f'when={self._when}')
+ return info
+
+ def __hash__(self):
+ return hash(self._when)
+
+ def __lt__(self, other):
+ if isinstance(other, TimerHandle):
+ return self._when < other._when
+ return NotImplemented
+
+ def __le__(self, other):
+ if isinstance(other, TimerHandle):
+ return self._when < other._when or self.__eq__(other)
+ return NotImplemented
+
+ def __gt__(self, other):
+ if isinstance(other, TimerHandle):
+ return self._when > other._when
+ return NotImplemented
+
+ def __ge__(self, other):
+ if isinstance(other, TimerHandle):
+ return self._when > other._when or self.__eq__(other)
+ return NotImplemented
+
+ def __eq__(self, other):
+ if isinstance(other, TimerHandle):
+ return (self._when == other._when and
+ self._callback == other._callback and
+ self._args == other._args and
+ self._cancelled == other._cancelled)
+ return NotImplemented
+
+ def cancel(self):
+ if not self._cancelled:
+ self._loop._timer_handle_cancelled(self)
+ super().cancel()
+
+ def when(self):
+ """Return a scheduled callback time.
+
+ The time is an absolute timestamp, using the same time
+ reference as loop.time().
+ """
+ return self._when
+
+
+class AbstractServer:
+ """Abstract server returned by create_server()."""
+
+ def close(self):
+ """Stop serving. This leaves existing connections open."""
+ raise NotImplementedError
+
+ def get_loop(self):
+ """Get the event loop the Server object is attached to."""
+ raise NotImplementedError
+
+ def is_serving(self):
+ """Return True if the server is accepting connections."""
+ raise NotImplementedError
+
+ async def start_serving(self):
+ """Start accepting connections.
+
+ This method is idempotent, so it can be called when
+ the server is already being serving.
+ """
+ raise NotImplementedError
+
+ async def serve_forever(self):
+ """Start accepting connections until the coroutine is cancelled.
+
+ The server is closed when the coroutine is cancelled.
+ """
+ raise NotImplementedError
+
+ async def wait_closed(self):
+ """Coroutine to wait until service is closed."""
+ raise NotImplementedError
+
+ async def __aenter__(self):
+ return self
+
+ async def __aexit__(self, *exc):
+ self.close()
+ await self.wait_closed()
+
+
+class AbstractEventLoop:
+ """Abstract event loop."""
+
+ # Running and stopping the event loop.
+
+ def run_forever(self):
+ """Run the event loop until stop() is called."""
+ raise NotImplementedError
+
+ def run_until_complete(self, future):
+ """Run the event loop until a Future is done.
+
+ Return the Future's result, or raise its exception.
+ """
+ raise NotImplementedError
+
+ def stop(self):
+ """Stop the event loop as soon as reasonable.
+
+ Exactly how soon that is may depend on the implementation, but
+ no more I/O callbacks should be scheduled.
+ """
+ raise NotImplementedError
+
+ def is_running(self):
+ """Return whether the event loop is currently running."""
+ raise NotImplementedError
+
+ def is_closed(self):
+ """Returns True if the event loop was closed."""
+ raise NotImplementedError
+
+ def close(self):
+ """Close the loop.
+
+ The loop should not be running.
+
+ This is idempotent and irreversible.
+
+ No other methods should be called after this one.
+ """
+ raise NotImplementedError
+
+ async def shutdown_asyncgens(self):
+ """Shutdown all active asynchronous generators."""
+ raise NotImplementedError
+
+ async def shutdown_default_executor(self):
+ """Schedule the shutdown of the default executor."""
+ raise NotImplementedError
+
+ # Methods scheduling callbacks. All these return Handles.
+
+ def _timer_handle_cancelled(self, handle):
+ """Notification that a TimerHandle has been cancelled."""
+ raise NotImplementedError
+
+ def call_soon(self, callback, *args, context=None):
+ return self.call_later(0, callback, *args, context=context)
+
+ def call_later(self, delay, callback, *args, context=None):
+ raise NotImplementedError
+
+ def call_at(self, when, callback, *args, context=None):
+ raise NotImplementedError
+
+ def time(self):
+ raise NotImplementedError
+
+ def create_future(self):
+ raise NotImplementedError
+
+ # Method scheduling a coroutine object: create a task.
+
+ def create_task(self, coro, *, name=None, context=None):
+ raise NotImplementedError
+
+ # Methods for interacting with threads.
+
+ def call_soon_threadsafe(self, callback, *args, context=None):
+ raise NotImplementedError
+
+ def run_in_executor(self, executor, func, *args):
+ raise NotImplementedError
+
+ def set_default_executor(self, executor):
+ raise NotImplementedError
+
+ # Network I/O methods returning Futures.
+
+ async def getaddrinfo(self, host, port, *,
+ family=0, type=0, proto=0, flags=0):
+ raise NotImplementedError
+
+ async def getnameinfo(self, sockaddr, flags=0):
+ raise NotImplementedError
+
+ async def create_connection(
+ self, protocol_factory, host=None, port=None,
+ *, ssl=None, family=0, proto=0,
+ flags=0, sock=None, local_addr=None,
+ server_hostname=None,
+ ssl_handshake_timeout=None,
+ ssl_shutdown_timeout=None,
+ happy_eyeballs_delay=None, interleave=None):
+ raise NotImplementedError
+
+ async def create_server(
+ self, protocol_factory, host=None, port=None,
+ *, family=socket.AF_UNSPEC,
+ flags=socket.AI_PASSIVE, sock=None, backlog=100,
+ ssl=None, reuse_address=None, reuse_port=None,
+ ssl_handshake_timeout=None,
+ ssl_shutdown_timeout=None,
+ start_serving=True):
+ """A coroutine which creates a TCP server bound to host and port.
+
+ The return value is a Server object which can be used to stop
+ the service.
+
+ If host is an empty string or None all interfaces are assumed
+ and a list of multiple sockets will be returned (most likely
+ one for IPv4 and another one for IPv6). The host parameter can also be
+ a sequence (e.g. list) of hosts to bind to.
+
+ family can be set to either AF_INET or AF_INET6 to force the
+ socket to use IPv4 or IPv6. If not set it will be determined
+ from host (defaults to AF_UNSPEC).
+
+ flags is a bitmask for getaddrinfo().
+
+ sock can optionally be specified in order to use a preexisting
+ socket object.
+
+ backlog is the maximum number of queued connections passed to
+ listen() (defaults to 100).
+
+ ssl can be set to an SSLContext to enable SSL over the
+ accepted connections.
+
+ reuse_address tells the kernel to reuse a local socket in
+ TIME_WAIT state, without waiting for its natural timeout to
+ expire. If not specified will automatically be set to True on
+ UNIX.
+
+ reuse_port tells the kernel to allow this endpoint to be bound to
+ the same port as other existing endpoints are bound to, so long as
+ they all set this flag when being created. This option is not
+ supported on Windows.
+
+ ssl_handshake_timeout is the time in seconds that an SSL server
+ will wait for completion of the SSL handshake before aborting the
+ connection. Default is 60s.
+
+ ssl_shutdown_timeout is the time in seconds that an SSL server
+ will wait for completion of the SSL shutdown procedure
+ before aborting the connection. Default is 30s.
+
+ start_serving set to True (default) causes the created server
+ to start accepting connections immediately. When set to False,
+ the user should await Server.start_serving() or Server.serve_forever()
+ to make the server to start accepting connections.
+ """
+ raise NotImplementedError
+
+ async def sendfile(self, transport, file, offset=0, count=None,
+ *, fallback=True):
+ """Send a file through a transport.
+
+ Return an amount of sent bytes.
+ """
+ raise NotImplementedError
+
+ async def start_tls(self, transport, protocol, sslcontext, *,
+ server_side=False,
+ server_hostname=None,
+ ssl_handshake_timeout=None,
+ ssl_shutdown_timeout=None):
+ """Upgrade a transport to TLS.
+
+ Return a new transport that *protocol* should start using
+ immediately.
+ """
+ raise NotImplementedError
+
+ async def create_unix_connection(
+ self, protocol_factory, path=None, *,
+ ssl=None, sock=None,
+ server_hostname=None,
+ ssl_handshake_timeout=None,
+ ssl_shutdown_timeout=None):
+ raise NotImplementedError
+
+ async def create_unix_server(
+ self, protocol_factory, path=None, *,
+ sock=None, backlog=100, ssl=None,
+ ssl_handshake_timeout=None,
+ ssl_shutdown_timeout=None,
+ start_serving=True):
+ """A coroutine which creates a UNIX Domain Socket server.
+
+ The return value is a Server object, which can be used to stop
+ the service.
+
+ path is a str, representing a file system path to bind the
+ server socket to.
+
+ sock can optionally be specified in order to use a preexisting
+ socket object.
+
+ backlog is the maximum number of queued connections passed to
+ listen() (defaults to 100).
+
+ ssl can be set to an SSLContext to enable SSL over the
+ accepted connections.
+
+ ssl_handshake_timeout is the time in seconds that an SSL server
+ will wait for the SSL handshake to complete (defaults to 60s).
+
+ ssl_shutdown_timeout is the time in seconds that an SSL server
+ will wait for the SSL shutdown to finish (defaults to 30s).
+
+ start_serving set to True (default) causes the created server
+ to start accepting connections immediately. When set to False,
+ the user should await Server.start_serving() or Server.serve_forever()
+ to make the server to start accepting connections.
+ """
+ raise NotImplementedError
+
+ async def connect_accepted_socket(
+ self, protocol_factory, sock,
+ *, ssl=None,
+ ssl_handshake_timeout=None,
+ ssl_shutdown_timeout=None):
+ """Handle an accepted connection.
+
+ This is used by servers that accept connections outside of
+ asyncio, but use asyncio to handle connections.
+
+ This method is a coroutine. When completed, the coroutine
+ returns a (transport, protocol) pair.
+ """
+ raise NotImplementedError
+
+ async def create_datagram_endpoint(self, protocol_factory,
+ local_addr=None, remote_addr=None, *,
+ family=0, proto=0, flags=0,
+ reuse_address=None, reuse_port=None,
+ allow_broadcast=None, sock=None):
+ """A coroutine which creates a datagram endpoint.
+
+ This method will try to establish the endpoint in the background.
+ When successful, the coroutine returns a (transport, protocol) pair.
+
+ protocol_factory must be a callable returning a protocol instance.
+
+ socket family AF_INET, socket.AF_INET6 or socket.AF_UNIX depending on
+ host (or family if specified), socket type SOCK_DGRAM.
+
+ reuse_address tells the kernel to reuse a local socket in
+ TIME_WAIT state, without waiting for its natural timeout to
+ expire. If not specified it will automatically be set to True on
+ UNIX.
+
+ reuse_port tells the kernel to allow this endpoint to be bound to
+ the same port as other existing endpoints are bound to, so long as
+ they all set this flag when being created. This option is not
+ supported on Windows and some UNIX's. If the
+ :py:data:`~socket.SO_REUSEPORT` constant is not defined then this
+ capability is unsupported.
+
+ allow_broadcast tells the kernel to allow this endpoint to send
+ messages to the broadcast address.
+
+ sock can optionally be specified in order to use a preexisting
+ socket object.
+ """
+ raise NotImplementedError
+
+ # Pipes and subprocesses.
+
+ async def connect_read_pipe(self, protocol_factory, pipe):
+ """Register read pipe in event loop. Set the pipe to non-blocking mode.
+
+ protocol_factory should instantiate object with Protocol interface.
+ pipe is a file-like object.
+ Return pair (transport, protocol), where transport supports the
+ ReadTransport interface."""
+ # The reason to accept file-like object instead of just file descriptor
+ # is: we need to own pipe and close it at transport finishing
+ # Can got complicated errors if pass f.fileno(),
+ # close fd in pipe transport then close f and vice versa.
+ raise NotImplementedError
+
+ async def connect_write_pipe(self, protocol_factory, pipe):
+ """Register write pipe in event loop.
+
+ protocol_factory should instantiate object with BaseProtocol interface.
+ Pipe is file-like object already switched to nonblocking.
+ Return pair (transport, protocol), where transport support
+ WriteTransport interface."""
+ # The reason to accept file-like object instead of just file descriptor
+ # is: we need to own pipe and close it at transport finishing
+ # Can got complicated errors if pass f.fileno(),
+ # close fd in pipe transport then close f and vice versa.
+ raise NotImplementedError
+
+ async def subprocess_shell(self, protocol_factory, cmd, *,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ **kwargs):
+ raise NotImplementedError
+
+ async def subprocess_exec(self, protocol_factory, *args,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ **kwargs):
+ raise NotImplementedError
+
+ # Ready-based callback registration methods.
+ # The add_*() methods return None.
+ # The remove_*() methods return True if something was removed,
+ # False if there was nothing to delete.
+
+ def add_reader(self, fd, callback, *args):
+ raise NotImplementedError
+
+ def remove_reader(self, fd):
+ raise NotImplementedError
+
+ def add_writer(self, fd, callback, *args):
+ raise NotImplementedError
+
+ def remove_writer(self, fd):
+ raise NotImplementedError
+
+ # Completion based I/O methods returning Futures.
+
+ async def sock_recv(self, sock, nbytes):
+ raise NotImplementedError
+
+ async def sock_recv_into(self, sock, buf):
+ raise NotImplementedError
+
+ async def sock_recvfrom(self, sock, bufsize):
+ raise NotImplementedError
+
+ async def sock_recvfrom_into(self, sock, buf, nbytes=0):
+ raise NotImplementedError
+
+ async def sock_sendall(self, sock, data):
+ raise NotImplementedError
+
+ async def sock_sendto(self, sock, data, address):
+ raise NotImplementedError
+
+ async def sock_connect(self, sock, address):
+ raise NotImplementedError
+
+ async def sock_accept(self, sock):
+ raise NotImplementedError
+
+ async def sock_sendfile(self, sock, file, offset=0, count=None,
+ *, fallback=None):
+ raise NotImplementedError
+
+ # Signal handling.
+
+ def add_signal_handler(self, sig, callback, *args):
+ raise NotImplementedError
+
+ def remove_signal_handler(self, sig):
+ raise NotImplementedError
+
+ # Task factory.
+
+ def set_task_factory(self, factory):
+ raise NotImplementedError
+
+ def get_task_factory(self):
+ raise NotImplementedError
+
+ # Error handlers.
+
+ def get_exception_handler(self):
+ raise NotImplementedError
+
+ def set_exception_handler(self, handler):
+ raise NotImplementedError
+
+ def default_exception_handler(self, context):
+ raise NotImplementedError
+
+ def call_exception_handler(self, context):
+ raise NotImplementedError
+
+ # Debug flag management.
+
+ def get_debug(self):
+ raise NotImplementedError
+
+ def set_debug(self, enabled):
+ raise NotImplementedError
+
+
+class AbstractEventLoopPolicy:
+ """Abstract policy for accessing the event loop."""
+
+ def get_event_loop(self):
+ """Get the event loop for the current context.
+
+ Returns an event loop object implementing the AbstractEventLoop interface,
+ or raises an exception in case no event loop has been set for the
+ current context and the current policy does not specify to create one.
+
+ It should never return None."""
+ raise NotImplementedError
+
+ def set_event_loop(self, loop):
+ """Set the event loop for the current context to loop."""
+ raise NotImplementedError
+
+ def new_event_loop(self):
+ """Create and return a new event loop object according to this
+ policy's rules. If there's need to set this loop as the event loop for
+ the current context, set_event_loop must be called explicitly."""
+ raise NotImplementedError
+
+ # Child processes handling (Unix only).
+
+ def get_child_watcher(self):
+ "Get the watcher for child processes."
+ raise NotImplementedError
+
+ def set_child_watcher(self, watcher):
+ """Set the watcher for child processes."""
+ raise NotImplementedError
+
+
+class BaseDefaultEventLoopPolicy(AbstractEventLoopPolicy):
+ """Default policy implementation for accessing the event loop.
+
+ In this policy, each thread has its own event loop. However, we
+ only automatically create an event loop by default for the main
+ thread; other threads by default have no event loop.
+
+ Other policies may have different rules (e.g. a single global
+ event loop, or automatically creating an event loop per thread, or
+ using some other notion of context to which an event loop is
+ associated).
+ """
+
+ _loop_factory = None
+
+ class _Local(threading.local):
+ _loop = None
+ _set_called = False
+
+ def __init__(self):
+ self._local = self._Local()
+
+ def get_event_loop(self):
+ """Get the event loop for the current context.
+
+ Returns an instance of EventLoop or raises an exception.
+ """
+ if (self._local._loop is None and
+ not self._local._set_called and
+ threading.current_thread() is threading.main_thread()):
+ stacklevel = 2
+ try:
+ f = sys._getframe(1)
+ except AttributeError:
+ pass
+ else:
+ # Move up the call stack so that the warning is attached
+ # to the line outside asyncio itself.
+ while f:
+ module = f.f_globals.get('__name__')
+ if not (module == 'asyncio' or module.startswith('asyncio.')):
+ break
+ f = f.f_back
+ stacklevel += 1
+ import warnings
+ warnings.warn('There is no current event loop',
+ DeprecationWarning, stacklevel=stacklevel)
+ self.set_event_loop(self.new_event_loop())
+
+ if self._local._loop is None:
+ raise RuntimeError('There is no current event loop in thread %r.'
+ % threading.current_thread().name)
+
+ return self._local._loop
+
+ def set_event_loop(self, loop):
+ """Set the event loop."""
+ self._local._set_called = True
+ if loop is not None and not isinstance(loop, AbstractEventLoop):
+ raise TypeError(f"loop must be an instance of AbstractEventLoop or None, not '{type(loop).__name__}'")
+ self._local._loop = loop
+
+ def new_event_loop(self):
+ """Create a new event loop.
+
+ You must call set_event_loop() to make this the current event
+ loop.
+ """
+ return self._loop_factory()
+
+
+# Event loop policy. The policy itself is always global, even if the
+# policy's rules say that there is an event loop per thread (or other
+# notion of context). The default policy is installed by the first
+# call to get_event_loop_policy().
+_event_loop_policy = None
+
+# Lock for protecting the on-the-fly creation of the event loop policy.
+_lock = threading.Lock()
+
+
+# A TLS for the running event loop, used by _get_running_loop.
+class _RunningLoop(threading.local):
+ loop_pid = (None, None)
+
+
+_running_loop = _RunningLoop()
+
+
+def get_running_loop():
+ """Return the running event loop. Raise a RuntimeError if there is none.
+
+ This function is thread-specific.
+ """
+ # NOTE: this function is implemented in C (see _asynciomodule.c)
+ loop = _get_running_loop()
+ if loop is None:
+ raise RuntimeError('no running event loop')
+ return loop
+
+
+def _get_running_loop():
+ """Return the running event loop or None.
+
+ This is a low-level function intended to be used by event loops.
+ This function is thread-specific.
+ """
+ # NOTE: this function is implemented in C (see _asynciomodule.c)
+ running_loop, pid = _running_loop.loop_pid
+ if running_loop is not None and pid == os.getpid():
+ return running_loop
+
+
+def _set_running_loop(loop):
+ """Set the running event loop.
+
+ This is a low-level function intended to be used by event loops.
+ This function is thread-specific.
+ """
+ # NOTE: this function is implemented in C (see _asynciomodule.c)
+ _running_loop.loop_pid = (loop, os.getpid())
+
+
+def _init_event_loop_policy():
+ global _event_loop_policy
+ with _lock:
+ if _event_loop_policy is None: # pragma: no branch
+ from . import DefaultEventLoopPolicy
+ _event_loop_policy = DefaultEventLoopPolicy()
+
+
+def get_event_loop_policy():
+ """Get the current event loop policy."""
+ if _event_loop_policy is None:
+ _init_event_loop_policy()
+ return _event_loop_policy
+
+
+def set_event_loop_policy(policy):
+ """Set the current event loop policy.
+
+ If policy is None, the default policy is restored."""
+ global _event_loop_policy
+ if policy is not None and not isinstance(policy, AbstractEventLoopPolicy):
+ raise TypeError(f"policy must be an instance of AbstractEventLoopPolicy or None, not '{type(policy).__name__}'")
+ _event_loop_policy = policy
+
+
+def get_event_loop():
+ """Return an asyncio event loop.
+
+ When called from a coroutine or a callback (e.g. scheduled with call_soon
+ or similar API), this function will always return the running event loop.
+
+ If there is no running event loop set, the function will return
+ the result of `get_event_loop_policy().get_event_loop()` call.
+ """
+ # NOTE: this function is implemented in C (see _asynciomodule.c)
+ current_loop = _get_running_loop()
+ if current_loop is not None:
+ return current_loop
+ return get_event_loop_policy().get_event_loop()
+
+
+def set_event_loop(loop):
+ """Equivalent to calling get_event_loop_policy().set_event_loop(loop)."""
+ get_event_loop_policy().set_event_loop(loop)
+
+
+def new_event_loop():
+ """Equivalent to calling get_event_loop_policy().new_event_loop()."""
+ return get_event_loop_policy().new_event_loop()
+
+
+def get_child_watcher():
+ """Equivalent to calling get_event_loop_policy().get_child_watcher()."""
+ return get_event_loop_policy().get_child_watcher()
+
+
+def set_child_watcher(watcher):
+ """Equivalent to calling
+ get_event_loop_policy().set_child_watcher(watcher)."""
+ return get_event_loop_policy().set_child_watcher(watcher)
+
+
+# Alias pure-Python implementations for testing purposes.
+_py__get_running_loop = _get_running_loop
+_py__set_running_loop = _set_running_loop
+_py_get_running_loop = get_running_loop
+_py_get_event_loop = get_event_loop
+
+
+try:
+ # get_event_loop() is one of the most frequently called
+ # functions in asyncio. Pure Python implementation is
+ # about 4 times slower than C-accelerated.
+ from _asyncio import (_get_running_loop, _set_running_loop,
+ get_running_loop, get_event_loop)
+except ImportError:
+ pass
+else:
+ # Alias C implementations for testing purposes.
+ _c__get_running_loop = _get_running_loop
+ _c__set_running_loop = _set_running_loop
+ _c_get_running_loop = get_running_loop
+ _c_get_event_loop = get_event_loop
+
+
+if hasattr(os, 'fork'):
+ def on_fork():
+ # Reset the loop and wakeupfd in the forked child process.
+ if _event_loop_policy is not None:
+ _event_loop_policy._local = BaseDefaultEventLoopPolicy._Local()
+ _set_running_loop(None)
+ signal.set_wakeup_fd(-1)
+
+ os.register_at_fork(after_in_child=on_fork)
diff --git a/contrib/tools/python3/Lib/asyncio/exceptions.py b/contrib/tools/python3/Lib/asyncio/exceptions.py
new file mode 100644
index 0000000000..5ece595aad
--- /dev/null
+++ b/contrib/tools/python3/Lib/asyncio/exceptions.py
@@ -0,0 +1,62 @@
+"""asyncio exceptions."""
+
+
+__all__ = ('BrokenBarrierError',
+ 'CancelledError', 'InvalidStateError', 'TimeoutError',
+ 'IncompleteReadError', 'LimitOverrunError',
+ 'SendfileNotAvailableError')
+
+
+class CancelledError(BaseException):
+ """The Future or Task was cancelled."""
+
+
+TimeoutError = TimeoutError # make local alias for the standard exception
+
+
+class InvalidStateError(Exception):
+ """The operation is not allowed in this state."""
+
+
+class SendfileNotAvailableError(RuntimeError):
+ """Sendfile syscall is not available.
+
+ Raised if OS does not support sendfile syscall for given socket or
+ file type.
+ """
+
+
+class IncompleteReadError(EOFError):
+ """
+ Incomplete read error. Attributes:
+
+ - partial: read bytes string before the end of stream was reached
+ - expected: total number of expected bytes (or None if unknown)
+ """
+ def __init__(self, partial, expected):
+ r_expected = 'undefined' if expected is None else repr(expected)
+ super().__init__(f'{len(partial)} bytes read on a total of '
+ f'{r_expected} expected bytes')
+ self.partial = partial
+ self.expected = expected
+
+ def __reduce__(self):
+ return type(self), (self.partial, self.expected)
+
+
+class LimitOverrunError(Exception):
+ """Reached the buffer limit while looking for a separator.
+
+ Attributes:
+ - consumed: total number of to be consumed bytes.
+ """
+ def __init__(self, message, consumed):
+ super().__init__(message)
+ self.consumed = consumed
+
+ def __reduce__(self):
+ return type(self), (self.args[0], self.consumed)
+
+
+class BrokenBarrierError(RuntimeError):
+ """Barrier is broken by barrier.abort() call."""
diff --git a/contrib/tools/python3/Lib/asyncio/format_helpers.py b/contrib/tools/python3/Lib/asyncio/format_helpers.py
new file mode 100644
index 0000000000..27d11fd4fa
--- /dev/null
+++ b/contrib/tools/python3/Lib/asyncio/format_helpers.py
@@ -0,0 +1,76 @@
+import functools
+import inspect
+import reprlib
+import sys
+import traceback
+
+from . import constants
+
+
+def _get_function_source(func):
+ func = inspect.unwrap(func)
+ if inspect.isfunction(func):
+ code = func.__code__
+ return (code.co_filename, code.co_firstlineno)
+ if isinstance(func, functools.partial):
+ return _get_function_source(func.func)
+ if isinstance(func, functools.partialmethod):
+ return _get_function_source(func.func)
+ return None
+
+
+def _format_callback_source(func, args):
+ func_repr = _format_callback(func, args, None)
+ source = _get_function_source(func)
+ if source:
+ func_repr += f' at {source[0]}:{source[1]}'
+ return func_repr
+
+
+def _format_args_and_kwargs(args, kwargs):
+ """Format function arguments and keyword arguments.
+
+ Special case for a single parameter: ('hello',) is formatted as ('hello').
+ """
+ # use reprlib to limit the length of the output
+ items = []
+ if args:
+ items.extend(reprlib.repr(arg) for arg in args)
+ if kwargs:
+ items.extend(f'{k}={reprlib.repr(v)}' for k, v in kwargs.items())
+ return '({})'.format(', '.join(items))
+
+
+def _format_callback(func, args, kwargs, suffix=''):
+ if isinstance(func, functools.partial):
+ suffix = _format_args_and_kwargs(args, kwargs) + suffix
+ return _format_callback(func.func, func.args, func.keywords, suffix)
+
+ if hasattr(func, '__qualname__') and func.__qualname__:
+ func_repr = func.__qualname__
+ elif hasattr(func, '__name__') and func.__name__:
+ func_repr = func.__name__
+ else:
+ func_repr = repr(func)
+
+ func_repr += _format_args_and_kwargs(args, kwargs)
+ if suffix:
+ func_repr += suffix
+ return func_repr
+
+
+def extract_stack(f=None, limit=None):
+ """Replacement for traceback.extract_stack() that only does the
+ necessary work for asyncio debug mode.
+ """
+ if f is None:
+ f = sys._getframe().f_back
+ if limit is None:
+ # Limit the amount of work to a reasonable amount, as extract_stack()
+ # can be called for each coroutine and future in debug mode.
+ limit = constants.DEBUG_STACK_DEPTH
+ stack = traceback.StackSummary.extract(traceback.walk_stack(f),
+ limit=limit,
+ lookup_lines=False)
+ stack.reverse()
+ return stack
diff --git a/contrib/tools/python3/Lib/asyncio/futures.py b/contrib/tools/python3/Lib/asyncio/futures.py
new file mode 100644
index 0000000000..97fc4e3fcb
--- /dev/null
+++ b/contrib/tools/python3/Lib/asyncio/futures.py
@@ -0,0 +1,428 @@
+"""A Future class similar to the one in PEP 3148."""
+
+__all__ = (
+ 'Future', 'wrap_future', 'isfuture',
+)
+
+import concurrent.futures
+import contextvars
+import logging
+import sys
+from types import GenericAlias
+
+from . import base_futures
+from . import events
+from . import exceptions
+from . import format_helpers
+
+
+isfuture = base_futures.isfuture
+
+
+_PENDING = base_futures._PENDING
+_CANCELLED = base_futures._CANCELLED
+_FINISHED = base_futures._FINISHED
+
+
+STACK_DEBUG = logging.DEBUG - 1 # heavy-duty debugging
+
+
+class Future:
+ """This class is *almost* compatible with concurrent.futures.Future.
+
+ Differences:
+
+ - This class is not thread-safe.
+
+ - result() and exception() do not take a timeout argument and
+ raise an exception when the future isn't done yet.
+
+ - Callbacks registered with add_done_callback() are always called
+ via the event loop's call_soon().
+
+ - This class is not compatible with the wait() and as_completed()
+ methods in the concurrent.futures package.
+
+ (In Python 3.4 or later we may be able to unify the implementations.)
+ """
+
+ # Class variables serving as defaults for instance variables.
+ _state = _PENDING
+ _result = None
+ _exception = None
+ _loop = None
+ _source_traceback = None
+ _cancel_message = None
+ # A saved CancelledError for later chaining as an exception context.
+ _cancelled_exc = None
+
+ # This field is used for a dual purpose:
+ # - Its presence is a marker to declare that a class implements
+ # the Future protocol (i.e. is intended to be duck-type compatible).
+ # The value must also be not-None, to enable a subclass to declare
+ # that it is not compatible by setting this to None.
+ # - It is set by __iter__() below so that Task._step() can tell
+ # the difference between
+ # `await Future()` or`yield from Future()` (correct) vs.
+ # `yield Future()` (incorrect).
+ _asyncio_future_blocking = False
+
+ __log_traceback = False
+
+ def __init__(self, *, loop=None):
+ """Initialize the future.
+
+ The optional event_loop argument allows explicitly setting the event
+ loop object used by the future. If it's not provided, the future uses
+ the default event loop.
+ """
+ if loop is None:
+ self._loop = events.get_event_loop()
+ else:
+ self._loop = loop
+ self._callbacks = []
+ if self._loop.get_debug():
+ self._source_traceback = format_helpers.extract_stack(
+ sys._getframe(1))
+
+ def __repr__(self):
+ return base_futures._future_repr(self)
+
+ def __del__(self):
+ if not self.__log_traceback:
+ # set_exception() was not called, or result() or exception()
+ # has consumed the exception
+ return
+ exc = self._exception
+ context = {
+ 'message':
+ f'{self.__class__.__name__} exception was never retrieved',
+ 'exception': exc,
+ 'future': self,
+ }
+ if self._source_traceback:
+ context['source_traceback'] = self._source_traceback
+ self._loop.call_exception_handler(context)
+
+ __class_getitem__ = classmethod(GenericAlias)
+
+ @property
+ def _log_traceback(self):
+ return self.__log_traceback
+
+ @_log_traceback.setter
+ def _log_traceback(self, val):
+ if val:
+ raise ValueError('_log_traceback can only be set to False')
+ self.__log_traceback = False
+
+ def get_loop(self):
+ """Return the event loop the Future is bound to."""
+ loop = self._loop
+ if loop is None:
+ raise RuntimeError("Future object is not initialized.")
+ return loop
+
+ def _make_cancelled_error(self):
+ """Create the CancelledError to raise if the Future is cancelled.
+
+ This should only be called once when handling a cancellation since
+ it erases the saved context exception value.
+ """
+ if self._cancelled_exc is not None:
+ exc = self._cancelled_exc
+ self._cancelled_exc = None
+ return exc
+
+ if self._cancel_message is None:
+ exc = exceptions.CancelledError()
+ else:
+ exc = exceptions.CancelledError(self._cancel_message)
+ exc.__context__ = self._cancelled_exc
+ # Remove the reference since we don't need this anymore.
+ self._cancelled_exc = None
+ return exc
+
+ def cancel(self, msg=None):
+ """Cancel the future and schedule callbacks.
+
+ If the future is already done or cancelled, return False. Otherwise,
+ change the future's state to cancelled, schedule the callbacks and
+ return True.
+ """
+ self.__log_traceback = False
+ if self._state != _PENDING:
+ return False
+ self._state = _CANCELLED
+ self._cancel_message = msg
+ self.__schedule_callbacks()
+ return True
+
+ def __schedule_callbacks(self):
+ """Internal: Ask the event loop to call all callbacks.
+
+ The callbacks are scheduled to be called as soon as possible. Also
+ clears the callback list.
+ """
+ callbacks = self._callbacks[:]
+ if not callbacks:
+ return
+
+ self._callbacks[:] = []
+ for callback, ctx in callbacks:
+ self._loop.call_soon(callback, self, context=ctx)
+
+ def cancelled(self):
+ """Return True if the future was cancelled."""
+ return self._state == _CANCELLED
+
+ # Don't implement running(); see http://bugs.python.org/issue18699
+
+ def done(self):
+ """Return True if the future is done.
+
+ Done means either that a result / exception are available, or that the
+ future was cancelled.
+ """
+ return self._state != _PENDING
+
+ def result(self):
+ """Return the result this future represents.
+
+ If the future has been cancelled, raises CancelledError. If the
+ future's result isn't yet available, raises InvalidStateError. If
+ the future is done and has an exception set, this exception is raised.
+ """
+ if self._state == _CANCELLED:
+ exc = self._make_cancelled_error()
+ raise exc
+ if self._state != _FINISHED:
+ raise exceptions.InvalidStateError('Result is not ready.')
+ self.__log_traceback = False
+ if self._exception is not None:
+ raise self._exception.with_traceback(self._exception_tb)
+ return self._result
+
+ def exception(self):
+ """Return the exception that was set on this future.
+
+ The exception (or None if no exception was set) is returned only if
+ the future is done. If the future has been cancelled, raises
+ CancelledError. If the future isn't done yet, raises
+ InvalidStateError.
+ """
+ if self._state == _CANCELLED:
+ exc = self._make_cancelled_error()
+ raise exc
+ if self._state != _FINISHED:
+ raise exceptions.InvalidStateError('Exception is not set.')
+ self.__log_traceback = False
+ return self._exception
+
+ def add_done_callback(self, fn, *, context=None):
+ """Add a callback to be run when the future becomes done.
+
+ The callback is called with a single argument - the future object. If
+ the future is already done when this is called, the callback is
+ scheduled with call_soon.
+ """
+ if self._state != _PENDING:
+ self._loop.call_soon(fn, self, context=context)
+ else:
+ if context is None:
+ context = contextvars.copy_context()
+ self._callbacks.append((fn, context))
+
+ # New method not in PEP 3148.
+
+ def remove_done_callback(self, fn):
+ """Remove all instances of a callback from the "call when done" list.
+
+ Returns the number of callbacks removed.
+ """
+ filtered_callbacks = [(f, ctx)
+ for (f, ctx) in self._callbacks
+ if f != fn]
+ removed_count = len(self._callbacks) - len(filtered_callbacks)
+ if removed_count:
+ self._callbacks[:] = filtered_callbacks
+ return removed_count
+
+ # So-called internal methods (note: no set_running_or_notify_cancel()).
+
+ def set_result(self, result):
+ """Mark the future done and set its result.
+
+ If the future is already done when this method is called, raises
+ InvalidStateError.
+ """
+ if self._state != _PENDING:
+ raise exceptions.InvalidStateError(f'{self._state}: {self!r}')
+ self._result = result
+ self._state = _FINISHED
+ self.__schedule_callbacks()
+
+ def set_exception(self, exception):
+ """Mark the future done and set an exception.
+
+ If the future is already done when this method is called, raises
+ InvalidStateError.
+ """
+ if self._state != _PENDING:
+ raise exceptions.InvalidStateError(f'{self._state}: {self!r}')
+ if isinstance(exception, type):
+ exception = exception()
+ if type(exception) is StopIteration:
+ raise TypeError("StopIteration interacts badly with generators "
+ "and cannot be raised into a Future")
+ self._exception = exception
+ self._exception_tb = exception.__traceback__
+ self._state = _FINISHED
+ self.__schedule_callbacks()
+ self.__log_traceback = True
+
+ def __await__(self):
+ if not self.done():
+ self._asyncio_future_blocking = True
+ yield self # This tells Task to wait for completion.
+ if not self.done():
+ raise RuntimeError("await wasn't used with future")
+ return self.result() # May raise too.
+
+ __iter__ = __await__ # make compatible with 'yield from'.
+
+
+# Needed for testing purposes.
+_PyFuture = Future
+
+
+def _get_loop(fut):
+ # Tries to call Future.get_loop() if it's available.
+ # Otherwise fallbacks to using the old '_loop' property.
+ try:
+ get_loop = fut.get_loop
+ except AttributeError:
+ pass
+ else:
+ return get_loop()
+ return fut._loop
+
+
+def _set_result_unless_cancelled(fut, result):
+ """Helper setting the result only if the future was not cancelled."""
+ if fut.cancelled():
+ return
+ fut.set_result(result)
+
+
+def _convert_future_exc(exc):
+ exc_class = type(exc)
+ if exc_class is concurrent.futures.CancelledError:
+ return exceptions.CancelledError(*exc.args)
+ elif exc_class is concurrent.futures.TimeoutError:
+ return exceptions.TimeoutError(*exc.args)
+ elif exc_class is concurrent.futures.InvalidStateError:
+ return exceptions.InvalidStateError(*exc.args)
+ else:
+ return exc
+
+
+def _set_concurrent_future_state(concurrent, source):
+ """Copy state from a future to a concurrent.futures.Future."""
+ assert source.done()
+ if source.cancelled():
+ concurrent.cancel()
+ if not concurrent.set_running_or_notify_cancel():
+ return
+ exception = source.exception()
+ if exception is not None:
+ concurrent.set_exception(_convert_future_exc(exception))
+ else:
+ result = source.result()
+ concurrent.set_result(result)
+
+
+def _copy_future_state(source, dest):
+ """Internal helper to copy state from another Future.
+
+ The other Future may be a concurrent.futures.Future.
+ """
+ assert source.done()
+ if dest.cancelled():
+ return
+ assert not dest.done()
+ if source.cancelled():
+ dest.cancel()
+ else:
+ exception = source.exception()
+ if exception is not None:
+ dest.set_exception(_convert_future_exc(exception))
+ else:
+ result = source.result()
+ dest.set_result(result)
+
+
+def _chain_future(source, destination):
+ """Chain two futures so that when one completes, so does the other.
+
+ The result (or exception) of source will be copied to destination.
+ If destination is cancelled, source gets cancelled too.
+ Compatible with both asyncio.Future and concurrent.futures.Future.
+ """
+ if not isfuture(source) and not isinstance(source,
+ concurrent.futures.Future):
+ raise TypeError('A future is required for source argument')
+ if not isfuture(destination) and not isinstance(destination,
+ concurrent.futures.Future):
+ raise TypeError('A future is required for destination argument')
+ source_loop = _get_loop(source) if isfuture(source) else None
+ dest_loop = _get_loop(destination) if isfuture(destination) else None
+
+ def _set_state(future, other):
+ if isfuture(future):
+ _copy_future_state(other, future)
+ else:
+ _set_concurrent_future_state(future, other)
+
+ def _call_check_cancel(destination):
+ if destination.cancelled():
+ if source_loop is None or source_loop is dest_loop:
+ source.cancel()
+ else:
+ source_loop.call_soon_threadsafe(source.cancel)
+
+ def _call_set_state(source):
+ if (destination.cancelled() and
+ dest_loop is not None and dest_loop.is_closed()):
+ return
+ if dest_loop is None or dest_loop is source_loop:
+ _set_state(destination, source)
+ else:
+ if dest_loop.is_closed():
+ return
+ dest_loop.call_soon_threadsafe(_set_state, destination, source)
+
+ destination.add_done_callback(_call_check_cancel)
+ source.add_done_callback(_call_set_state)
+
+
+def wrap_future(future, *, loop=None):
+ """Wrap concurrent.futures.Future object."""
+ if isfuture(future):
+ return future
+ assert isinstance(future, concurrent.futures.Future), \
+ f'concurrent.futures.Future is expected, got {future!r}'
+ if loop is None:
+ loop = events.get_event_loop()
+ new_future = loop.create_future()
+ _chain_future(future, new_future)
+ return new_future
+
+
+try:
+ import _asyncio
+except ImportError:
+ pass
+else:
+ # _CFuture is needed for tests.
+ Future = _CFuture = _asyncio.Future
diff --git a/contrib/tools/python3/Lib/asyncio/locks.py b/contrib/tools/python3/Lib/asyncio/locks.py
new file mode 100644
index 0000000000..ce5d8d5bfb
--- /dev/null
+++ b/contrib/tools/python3/Lib/asyncio/locks.py
@@ -0,0 +1,586 @@
+"""Synchronization primitives."""
+
+__all__ = ('Lock', 'Event', 'Condition', 'Semaphore',
+ 'BoundedSemaphore', 'Barrier')
+
+import collections
+import enum
+
+from . import exceptions
+from . import mixins
+
+class _ContextManagerMixin:
+ async def __aenter__(self):
+ await self.acquire()
+ # We have no use for the "as ..." clause in the with
+ # statement for locks.
+ return None
+
+ async def __aexit__(self, exc_type, exc, tb):
+ self.release()
+
+
+class Lock(_ContextManagerMixin, mixins._LoopBoundMixin):
+ """Primitive lock objects.
+
+ A primitive lock is a synchronization primitive that is not owned
+ by a particular coroutine when locked. A primitive lock is in one
+ of two states, 'locked' or 'unlocked'.
+
+ It is created in the unlocked state. It has two basic methods,
+ acquire() and release(). When the state is unlocked, acquire()
+ changes the state to locked and returns immediately. When the
+ state is locked, acquire() blocks until a call to release() in
+ another coroutine changes it to unlocked, then the acquire() call
+ resets it to locked and returns. The release() method should only
+ be called in the locked state; it changes the state to unlocked
+ and returns immediately. If an attempt is made to release an
+ unlocked lock, a RuntimeError will be raised.
+
+ When more than one coroutine is blocked in acquire() waiting for
+ the state to turn to unlocked, only one coroutine proceeds when a
+ release() call resets the state to unlocked; first coroutine which
+ is blocked in acquire() is being processed.
+
+ acquire() is a coroutine and should be called with 'await'.
+
+ Locks also support the asynchronous context management protocol.
+ 'async with lock' statement should be used.
+
+ Usage:
+
+ lock = Lock()
+ ...
+ await lock.acquire()
+ try:
+ ...
+ finally:
+ lock.release()
+
+ Context manager usage:
+
+ lock = Lock()
+ ...
+ async with lock:
+ ...
+
+ Lock objects can be tested for locking state:
+
+ if not lock.locked():
+ await lock.acquire()
+ else:
+ # lock is acquired
+ ...
+
+ """
+
+ def __init__(self):
+ self._waiters = None
+ self._locked = False
+
+ def __repr__(self):
+ res = super().__repr__()
+ extra = 'locked' if self._locked else 'unlocked'
+ if self._waiters:
+ extra = f'{extra}, waiters:{len(self._waiters)}'
+ return f'<{res[1:-1]} [{extra}]>'
+
+ def locked(self):
+ """Return True if lock is acquired."""
+ return self._locked
+
+ async def acquire(self):
+ """Acquire a lock.
+
+ This method blocks until the lock is unlocked, then sets it to
+ locked and returns True.
+ """
+ if (not self._locked and (self._waiters is None or
+ all(w.cancelled() for w in self._waiters))):
+ self._locked = True
+ return True
+
+ if self._waiters is None:
+ self._waiters = collections.deque()
+ fut = self._get_loop().create_future()
+ self._waiters.append(fut)
+
+ # Finally block should be called before the CancelledError
+ # handling as we don't want CancelledError to call
+ # _wake_up_first() and attempt to wake up itself.
+ try:
+ try:
+ await fut
+ finally:
+ self._waiters.remove(fut)
+ except exceptions.CancelledError:
+ if not self._locked:
+ self._wake_up_first()
+ raise
+
+ self._locked = True
+ return True
+
+ def release(self):
+ """Release a lock.
+
+ When the lock is locked, reset it to unlocked, and return.
+ If any other coroutines are blocked waiting for the lock to become
+ unlocked, allow exactly one of them to proceed.
+
+ When invoked on an unlocked lock, a RuntimeError is raised.
+
+ There is no return value.
+ """
+ if self._locked:
+ self._locked = False
+ self._wake_up_first()
+ else:
+ raise RuntimeError('Lock is not acquired.')
+
+ def _wake_up_first(self):
+ """Wake up the first waiter if it isn't done."""
+ if not self._waiters:
+ return
+ try:
+ fut = next(iter(self._waiters))
+ except StopIteration:
+ return
+
+ # .done() necessarily means that a waiter will wake up later on and
+ # either take the lock, or, if it was cancelled and lock wasn't
+ # taken already, will hit this again and wake up a new waiter.
+ if not fut.done():
+ fut.set_result(True)
+
+
+class Event(mixins._LoopBoundMixin):
+ """Asynchronous equivalent to threading.Event.
+
+ Class implementing event objects. An event manages a flag that can be set
+ to true with the set() method and reset to false with the clear() method.
+ The wait() method blocks until the flag is true. The flag is initially
+ false.
+ """
+
+ def __init__(self):
+ self._waiters = collections.deque()
+ self._value = False
+
+ def __repr__(self):
+ res = super().__repr__()
+ extra = 'set' if self._value else 'unset'
+ if self._waiters:
+ extra = f'{extra}, waiters:{len(self._waiters)}'
+ return f'<{res[1:-1]} [{extra}]>'
+
+ def is_set(self):
+ """Return True if and only if the internal flag is true."""
+ return self._value
+
+ def set(self):
+ """Set the internal flag to true. All coroutines waiting for it to
+ become true are awakened. Coroutine that call wait() once the flag is
+ true will not block at all.
+ """
+ if not self._value:
+ self._value = True
+
+ for fut in self._waiters:
+ if not fut.done():
+ fut.set_result(True)
+
+ def clear(self):
+ """Reset the internal flag to false. Subsequently, coroutines calling
+ wait() will block until set() is called to set the internal flag
+ to true again."""
+ self._value = False
+
+ async def wait(self):
+ """Block until the internal flag is true.
+
+ If the internal flag is true on entry, return True
+ immediately. Otherwise, block until another coroutine calls
+ set() to set the flag to true, then return True.
+ """
+ if self._value:
+ return True
+
+ fut = self._get_loop().create_future()
+ self._waiters.append(fut)
+ try:
+ await fut
+ return True
+ finally:
+ self._waiters.remove(fut)
+
+
+class Condition(_ContextManagerMixin, mixins._LoopBoundMixin):
+ """Asynchronous equivalent to threading.Condition.
+
+ This class implements condition variable objects. A condition variable
+ allows one or more coroutines to wait until they are notified by another
+ coroutine.
+
+ A new Lock object is created and used as the underlying lock.
+ """
+
+ def __init__(self, lock=None):
+ if lock is None:
+ lock = Lock()
+
+ self._lock = lock
+ # Export the lock's locked(), acquire() and release() methods.
+ self.locked = lock.locked
+ self.acquire = lock.acquire
+ self.release = lock.release
+
+ self._waiters = collections.deque()
+
+ def __repr__(self):
+ res = super().__repr__()
+ extra = 'locked' if self.locked() else 'unlocked'
+ if self._waiters:
+ extra = f'{extra}, waiters:{len(self._waiters)}'
+ return f'<{res[1:-1]} [{extra}]>'
+
+ async def wait(self):
+ """Wait until notified.
+
+ If the calling coroutine has not acquired the lock when this
+ method is called, a RuntimeError is raised.
+
+ This method releases the underlying lock, and then blocks
+ until it is awakened by a notify() or notify_all() call for
+ the same condition variable in another coroutine. Once
+ awakened, it re-acquires the lock and returns True.
+ """
+ if not self.locked():
+ raise RuntimeError('cannot wait on un-acquired lock')
+
+ self.release()
+ try:
+ fut = self._get_loop().create_future()
+ self._waiters.append(fut)
+ try:
+ await fut
+ return True
+ finally:
+ self._waiters.remove(fut)
+
+ finally:
+ # Must reacquire lock even if wait is cancelled
+ cancelled = False
+ while True:
+ try:
+ await self.acquire()
+ break
+ except exceptions.CancelledError:
+ cancelled = True
+
+ if cancelled:
+ raise exceptions.CancelledError
+
+ async def wait_for(self, predicate):
+ """Wait until a predicate becomes true.
+
+ The predicate should be a callable which result will be
+ interpreted as a boolean value. The final predicate value is
+ the return value.
+ """
+ result = predicate()
+ while not result:
+ await self.wait()
+ result = predicate()
+ return result
+
+ def notify(self, n=1):
+ """By default, wake up one coroutine waiting on this condition, if any.
+ If the calling coroutine has not acquired the lock when this method
+ is called, a RuntimeError is raised.
+
+ This method wakes up at most n of the coroutines waiting for the
+ condition variable; it is a no-op if no coroutines are waiting.
+
+ Note: an awakened coroutine does not actually return from its
+ wait() call until it can reacquire the lock. Since notify() does
+ not release the lock, its caller should.
+ """
+ if not self.locked():
+ raise RuntimeError('cannot notify on un-acquired lock')
+
+ idx = 0
+ for fut in self._waiters:
+ if idx >= n:
+ break
+
+ if not fut.done():
+ idx += 1
+ fut.set_result(False)
+
+ def notify_all(self):
+ """Wake up all threads waiting on this condition. This method acts
+ like notify(), but wakes up all waiting threads instead of one. If the
+ calling thread has not acquired the lock when this method is called,
+ a RuntimeError is raised.
+ """
+ self.notify(len(self._waiters))
+
+
+class Semaphore(_ContextManagerMixin, mixins._LoopBoundMixin):
+ """A Semaphore implementation.
+
+ A semaphore manages an internal counter which is decremented by each
+ acquire() call and incremented by each release() call. The counter
+ can never go below zero; when acquire() finds that it is zero, it blocks,
+ waiting until some other thread calls release().
+
+ Semaphores also support the context management protocol.
+
+ The optional argument gives the initial value for the internal
+ counter; it defaults to 1. If the value given is less than 0,
+ ValueError is raised.
+ """
+
+ def __init__(self, value=1):
+ if value < 0:
+ raise ValueError("Semaphore initial value must be >= 0")
+ self._waiters = None
+ self._value = value
+
+ def __repr__(self):
+ res = super().__repr__()
+ extra = 'locked' if self.locked() else f'unlocked, value:{self._value}'
+ if self._waiters:
+ extra = f'{extra}, waiters:{len(self._waiters)}'
+ return f'<{res[1:-1]} [{extra}]>'
+
+ def locked(self):
+ """Returns True if semaphore cannot be acquired immediately."""
+ return self._value == 0 or (
+ any(not w.cancelled() for w in (self._waiters or ())))
+
+ async def acquire(self):
+ """Acquire a semaphore.
+
+ If the internal counter is larger than zero on entry,
+ decrement it by one and return True immediately. If it is
+ zero on entry, block, waiting until some other coroutine has
+ called release() to make it larger than 0, and then return
+ True.
+ """
+ if not self.locked():
+ self._value -= 1
+ return True
+
+ if self._waiters is None:
+ self._waiters = collections.deque()
+ fut = self._get_loop().create_future()
+ self._waiters.append(fut)
+
+ # Finally block should be called before the CancelledError
+ # handling as we don't want CancelledError to call
+ # _wake_up_first() and attempt to wake up itself.
+ try:
+ try:
+ await fut
+ finally:
+ self._waiters.remove(fut)
+ except exceptions.CancelledError:
+ if not fut.cancelled():
+ self._value += 1
+ self._wake_up_next()
+ raise
+
+ if self._value > 0:
+ self._wake_up_next()
+ return True
+
+ def release(self):
+ """Release a semaphore, incrementing the internal counter by one.
+
+ When it was zero on entry and another coroutine is waiting for it to
+ become larger than zero again, wake up that coroutine.
+ """
+ self._value += 1
+ self._wake_up_next()
+
+ def _wake_up_next(self):
+ """Wake up the first waiter that isn't done."""
+ if not self._waiters:
+ return
+
+ for fut in self._waiters:
+ if not fut.done():
+ self._value -= 1
+ fut.set_result(True)
+ return
+
+
+class BoundedSemaphore(Semaphore):
+ """A bounded semaphore implementation.
+
+ This raises ValueError in release() if it would increase the value
+ above the initial value.
+ """
+
+ def __init__(self, value=1):
+ self._bound_value = value
+ super().__init__(value)
+
+ def release(self):
+ if self._value >= self._bound_value:
+ raise ValueError('BoundedSemaphore released too many times')
+ super().release()
+
+
+
+class _BarrierState(enum.Enum):
+ FILLING = 'filling'
+ DRAINING = 'draining'
+ RESETTING = 'resetting'
+ BROKEN = 'broken'
+
+
+class Barrier(mixins._LoopBoundMixin):
+ """Asyncio equivalent to threading.Barrier
+
+ Implements a Barrier primitive.
+ Useful for synchronizing a fixed number of tasks at known synchronization
+ points. Tasks block on 'wait()' and are simultaneously awoken once they
+ have all made their call.
+ """
+
+ def __init__(self, parties):
+ """Create a barrier, initialised to 'parties' tasks."""
+ if parties < 1:
+ raise ValueError('parties must be > 0')
+
+ self._cond = Condition() # notify all tasks when state changes
+
+ self._parties = parties
+ self._state = _BarrierState.FILLING
+ self._count = 0 # count tasks in Barrier
+
+ def __repr__(self):
+ res = super().__repr__()
+ extra = f'{self._state.value}'
+ if not self.broken:
+ extra += f', waiters:{self.n_waiting}/{self.parties}'
+ return f'<{res[1:-1]} [{extra}]>'
+
+ async def __aenter__(self):
+ # wait for the barrier reaches the parties number
+ # when start draining release and return index of waited task
+ return await self.wait()
+
+ async def __aexit__(self, *args):
+ pass
+
+ async def wait(self):
+ """Wait for the barrier.
+
+ When the specified number of tasks have started waiting, they are all
+ simultaneously awoken.
+ Returns an unique and individual index number from 0 to 'parties-1'.
+ """
+ async with self._cond:
+ await self._block() # Block while the barrier drains or resets.
+ try:
+ index = self._count
+ self._count += 1
+ if index + 1 == self._parties:
+ # We release the barrier
+ await self._release()
+ else:
+ await self._wait()
+ return index
+ finally:
+ self._count -= 1
+ # Wake up any tasks waiting for barrier to drain.
+ self._exit()
+
+ async def _block(self):
+ # Block until the barrier is ready for us,
+ # or raise an exception if it is broken.
+ #
+ # It is draining or resetting, wait until done
+ # unless a CancelledError occurs
+ await self._cond.wait_for(
+ lambda: self._state not in (
+ _BarrierState.DRAINING, _BarrierState.RESETTING
+ )
+ )
+
+ # see if the barrier is in a broken state
+ if self._state is _BarrierState.BROKEN:
+ raise exceptions.BrokenBarrierError("Barrier aborted")
+
+ async def _release(self):
+ # Release the tasks waiting in the barrier.
+
+ # Enter draining state.
+ # Next waiting tasks will be blocked until the end of draining.
+ self._state = _BarrierState.DRAINING
+ self._cond.notify_all()
+
+ async def _wait(self):
+ # Wait in the barrier until we are released. Raise an exception
+ # if the barrier is reset or broken.
+
+ # wait for end of filling
+ # unless a CancelledError occurs
+ await self._cond.wait_for(lambda: self._state is not _BarrierState.FILLING)
+
+ if self._state in (_BarrierState.BROKEN, _BarrierState.RESETTING):
+ raise exceptions.BrokenBarrierError("Abort or reset of barrier")
+
+ def _exit(self):
+ # If we are the last tasks to exit the barrier, signal any tasks
+ # waiting for the barrier to drain.
+ if self._count == 0:
+ if self._state in (_BarrierState.RESETTING, _BarrierState.DRAINING):
+ self._state = _BarrierState.FILLING
+ self._cond.notify_all()
+
+ async def reset(self):
+ """Reset the barrier to the initial state.
+
+ Any tasks currently waiting will get the BrokenBarrier exception
+ raised.
+ """
+ async with self._cond:
+ if self._count > 0:
+ if self._state is not _BarrierState.RESETTING:
+ #reset the barrier, waking up tasks
+ self._state = _BarrierState.RESETTING
+ else:
+ self._state = _BarrierState.FILLING
+ self._cond.notify_all()
+
+ async def abort(self):
+ """Place the barrier into a 'broken' state.
+
+ Useful in case of error. Any currently waiting tasks and tasks
+ attempting to 'wait()' will have BrokenBarrierError raised.
+ """
+ async with self._cond:
+ self._state = _BarrierState.BROKEN
+ self._cond.notify_all()
+
+ @property
+ def parties(self):
+ """Return the number of tasks required to trip the barrier."""
+ return self._parties
+
+ @property
+ def n_waiting(self):
+ """Return the number of tasks currently waiting at the barrier."""
+ if self._state is _BarrierState.FILLING:
+ return self._count
+ return 0
+
+ @property
+ def broken(self):
+ """Return True if the barrier is in a broken state."""
+ return self._state is _BarrierState.BROKEN
diff --git a/contrib/tools/python3/Lib/asyncio/log.py b/contrib/tools/python3/Lib/asyncio/log.py
new file mode 100644
index 0000000000..23a7074afb
--- /dev/null
+++ b/contrib/tools/python3/Lib/asyncio/log.py
@@ -0,0 +1,7 @@
+"""Logging configuration."""
+
+import logging
+
+
+# Name the logger after the package.
+logger = logging.getLogger(__package__)
diff --git a/contrib/tools/python3/Lib/asyncio/mixins.py b/contrib/tools/python3/Lib/asyncio/mixins.py
new file mode 100644
index 0000000000..c6bf97329e
--- /dev/null
+++ b/contrib/tools/python3/Lib/asyncio/mixins.py
@@ -0,0 +1,21 @@
+"""Event loop mixins."""
+
+import threading
+from . import events
+
+_global_lock = threading.Lock()
+
+
+class _LoopBoundMixin:
+ _loop = None
+
+ def _get_loop(self):
+ loop = events._get_running_loop()
+
+ if self._loop is None:
+ with _global_lock:
+ if self._loop is None:
+ self._loop = loop
+ if loop is not self._loop:
+ raise RuntimeError(f'{self!r} is bound to a different event loop')
+ return loop
diff --git a/contrib/tools/python3/Lib/asyncio/proactor_events.py b/contrib/tools/python3/Lib/asyncio/proactor_events.py
new file mode 100644
index 0000000000..1e2a730cf3
--- /dev/null
+++ b/contrib/tools/python3/Lib/asyncio/proactor_events.py
@@ -0,0 +1,895 @@
+"""Event loop using a proactor and related classes.
+
+A proactor is a "notify-on-completion" multiplexer. Currently a
+proactor is only implemented on Windows with IOCP.
+"""
+
+__all__ = 'BaseProactorEventLoop',
+
+import io
+import os
+import socket
+import warnings
+import signal
+import threading
+import collections
+
+from . import base_events
+from . import constants
+from . import futures
+from . import exceptions
+from . import protocols
+from . import sslproto
+from . import transports
+from . import trsock
+from .log import logger
+
+
+def _set_socket_extra(transport, sock):
+ transport._extra['socket'] = trsock.TransportSocket(sock)
+
+ try:
+ transport._extra['sockname'] = sock.getsockname()
+ except socket.error:
+ if transport._loop.get_debug():
+ logger.warning(
+ "getsockname() failed on %r", sock, exc_info=True)
+
+ if 'peername' not in transport._extra:
+ try:
+ transport._extra['peername'] = sock.getpeername()
+ except socket.error:
+ # UDP sockets may not have a peer name
+ transport._extra['peername'] = None
+
+
+class _ProactorBasePipeTransport(transports._FlowControlMixin,
+ transports.BaseTransport):
+ """Base class for pipe and socket transports."""
+
+ def __init__(self, loop, sock, protocol, waiter=None,
+ extra=None, server=None):
+ super().__init__(extra, loop)
+ self._set_extra(sock)
+ self._sock = sock
+ self.set_protocol(protocol)
+ self._server = server
+ self._buffer = None # None or bytearray.
+ self._read_fut = None
+ self._write_fut = None
+ self._pending_write = 0
+ self._conn_lost = 0
+ self._closing = False # Set when close() called.
+ self._called_connection_lost = False
+ self._eof_written = False
+ if self._server is not None:
+ self._server._attach()
+ self._loop.call_soon(self._protocol.connection_made, self)
+ if waiter is not None:
+ # only wake up the waiter when connection_made() has been called
+ self._loop.call_soon(futures._set_result_unless_cancelled,
+ waiter, None)
+
+ def __repr__(self):
+ info = [self.__class__.__name__]
+ if self._sock is None:
+ info.append('closed')
+ elif self._closing:
+ info.append('closing')
+ if self._sock is not None:
+ info.append(f'fd={self._sock.fileno()}')
+ if self._read_fut is not None:
+ info.append(f'read={self._read_fut!r}')
+ if self._write_fut is not None:
+ info.append(f'write={self._write_fut!r}')
+ if self._buffer:
+ info.append(f'write_bufsize={len(self._buffer)}')
+ if self._eof_written:
+ info.append('EOF written')
+ return '<{}>'.format(' '.join(info))
+
+ def _set_extra(self, sock):
+ self._extra['pipe'] = sock
+
+ def set_protocol(self, protocol):
+ self._protocol = protocol
+
+ def get_protocol(self):
+ return self._protocol
+
+ def is_closing(self):
+ return self._closing
+
+ def close(self):
+ if self._closing:
+ return
+ self._closing = True
+ self._conn_lost += 1
+ if not self._buffer and self._write_fut is None:
+ self._loop.call_soon(self._call_connection_lost, None)
+ if self._read_fut is not None:
+ self._read_fut.cancel()
+ self._read_fut = None
+
+ def __del__(self, _warn=warnings.warn):
+ if self._sock is not None:
+ _warn(f"unclosed transport {self!r}", ResourceWarning, source=self)
+ self._sock.close()
+
+ def _fatal_error(self, exc, message='Fatal error on pipe transport'):
+ try:
+ if isinstance(exc, OSError):
+ if self._loop.get_debug():
+ logger.debug("%r: %s", self, message, exc_info=True)
+ else:
+ self._loop.call_exception_handler({
+ 'message': message,
+ 'exception': exc,
+ 'transport': self,
+ 'protocol': self._protocol,
+ })
+ finally:
+ self._force_close(exc)
+
+ def _force_close(self, exc):
+ if self._empty_waiter is not None and not self._empty_waiter.done():
+ if exc is None:
+ self._empty_waiter.set_result(None)
+ else:
+ self._empty_waiter.set_exception(exc)
+ if self._closing and self._called_connection_lost:
+ return
+ self._closing = True
+ self._conn_lost += 1
+ if self._write_fut:
+ self._write_fut.cancel()
+ self._write_fut = None
+ if self._read_fut:
+ self._read_fut.cancel()
+ self._read_fut = None
+ self._pending_write = 0
+ self._buffer = None
+ self._loop.call_soon(self._call_connection_lost, exc)
+
+ def _call_connection_lost(self, exc):
+ if self._called_connection_lost:
+ return
+ try:
+ self._protocol.connection_lost(exc)
+ finally:
+ # XXX If there is a pending overlapped read on the other
+ # end then it may fail with ERROR_NETNAME_DELETED if we
+ # just close our end. First calling shutdown() seems to
+ # cure it, but maybe using DisconnectEx() would be better.
+ if hasattr(self._sock, 'shutdown') and self._sock.fileno() != -1:
+ self._sock.shutdown(socket.SHUT_RDWR)
+ self._sock.close()
+ self._sock = None
+ server = self._server
+ if server is not None:
+ server._detach()
+ self._server = None
+ self._called_connection_lost = True
+
+ def get_write_buffer_size(self):
+ size = self._pending_write
+ if self._buffer is not None:
+ size += len(self._buffer)
+ return size
+
+
+class _ProactorReadPipeTransport(_ProactorBasePipeTransport,
+ transports.ReadTransport):
+ """Transport for read pipes."""
+
+ def __init__(self, loop, sock, protocol, waiter=None,
+ extra=None, server=None, buffer_size=65536):
+ self._pending_data_length = -1
+ self._paused = True
+ super().__init__(loop, sock, protocol, waiter, extra, server)
+
+ self._data = bytearray(buffer_size)
+ self._loop.call_soon(self._loop_reading)
+ self._paused = False
+
+ def is_reading(self):
+ return not self._paused and not self._closing
+
+ def pause_reading(self):
+ if self._closing or self._paused:
+ return
+ self._paused = True
+
+ # bpo-33694: Don't cancel self._read_fut because cancelling an
+ # overlapped WSASend() loss silently data with the current proactor
+ # implementation.
+ #
+ # If CancelIoEx() fails with ERROR_NOT_FOUND, it means that WSASend()
+ # completed (even if HasOverlappedIoCompleted() returns 0), but
+ # Overlapped.cancel() currently silently ignores the ERROR_NOT_FOUND
+ # error. Once the overlapped is ignored, the IOCP loop will ignores the
+ # completion I/O event and so not read the result of the overlapped
+ # WSARecv().
+
+ if self._loop.get_debug():
+ logger.debug("%r pauses reading", self)
+
+ def resume_reading(self):
+ if self._closing or not self._paused:
+ return
+
+ self._paused = False
+ if self._read_fut is None:
+ self._loop.call_soon(self._loop_reading, None)
+
+ length = self._pending_data_length
+ self._pending_data_length = -1
+ if length > -1:
+ # Call the protocol method after calling _loop_reading(),
+ # since the protocol can decide to pause reading again.
+ self._loop.call_soon(self._data_received, self._data[:length], length)
+
+ if self._loop.get_debug():
+ logger.debug("%r resumes reading", self)
+
+ def _eof_received(self):
+ if self._loop.get_debug():
+ logger.debug("%r received EOF", self)
+
+ try:
+ keep_open = self._protocol.eof_received()
+ except (SystemExit, KeyboardInterrupt):
+ raise
+ except BaseException as exc:
+ self._fatal_error(
+ exc, 'Fatal error: protocol.eof_received() call failed.')
+ return
+
+ if not keep_open:
+ self.close()
+
+ def _data_received(self, data, length):
+ if self._paused:
+ # Don't call any protocol method while reading is paused.
+ # The protocol will be called on resume_reading().
+ assert self._pending_data_length == -1
+ self._pending_data_length = length
+ return
+
+ if length == 0:
+ self._eof_received()
+ return
+
+ if isinstance(self._protocol, protocols.BufferedProtocol):
+ try:
+ protocols._feed_data_to_buffered_proto(self._protocol, data)
+ except (SystemExit, KeyboardInterrupt):
+ raise
+ except BaseException as exc:
+ self._fatal_error(exc,
+ 'Fatal error: protocol.buffer_updated() '
+ 'call failed.')
+ return
+ else:
+ self._protocol.data_received(data)
+
+ def _loop_reading(self, fut=None):
+ length = -1
+ data = None
+ try:
+ if fut is not None:
+ assert self._read_fut is fut or (self._read_fut is None and
+ self._closing)
+ self._read_fut = None
+ if fut.done():
+ # deliver data later in "finally" clause
+ length = fut.result()
+ if length == 0:
+ # we got end-of-file so no need to reschedule a new read
+ return
+
+ # It's a new slice so make it immutable so protocols upstream don't have problems
+ data = bytes(memoryview(self._data)[:length])
+ else:
+ # the future will be replaced by next proactor.recv call
+ fut.cancel()
+
+ if self._closing:
+ # since close() has been called we ignore any read data
+ return
+
+ # bpo-33694: buffer_updated() has currently no fast path because of
+ # a data loss issue caused by overlapped WSASend() cancellation.
+
+ if not self._paused:
+ # reschedule a new read
+ self._read_fut = self._loop._proactor.recv_into(self._sock, self._data)
+ except ConnectionAbortedError as exc:
+ if not self._closing:
+ self._fatal_error(exc, 'Fatal read error on pipe transport')
+ elif self._loop.get_debug():
+ logger.debug("Read error on pipe transport while closing",
+ exc_info=True)
+ except ConnectionResetError as exc:
+ self._force_close(exc)
+ except OSError as exc:
+ self._fatal_error(exc, 'Fatal read error on pipe transport')
+ except exceptions.CancelledError:
+ if not self._closing:
+ raise
+ else:
+ if not self._paused:
+ self._read_fut.add_done_callback(self._loop_reading)
+ finally:
+ if length > -1:
+ self._data_received(data, length)
+
+
+class _ProactorBaseWritePipeTransport(_ProactorBasePipeTransport,
+ transports.WriteTransport):
+ """Transport for write pipes."""
+
+ _start_tls_compatible = True
+
+ def __init__(self, *args, **kw):
+ super().__init__(*args, **kw)
+ self._empty_waiter = None
+
+ def write(self, data):
+ if not isinstance(data, (bytes, bytearray, memoryview)):
+ raise TypeError(
+ f"data argument must be a bytes-like object, "
+ f"not {type(data).__name__}")
+ if self._eof_written:
+ raise RuntimeError('write_eof() already called')
+ if self._empty_waiter is not None:
+ raise RuntimeError('unable to write; sendfile is in progress')
+
+ if not data:
+ return
+
+ if self._conn_lost:
+ if self._conn_lost >= constants.LOG_THRESHOLD_FOR_CONNLOST_WRITES:
+ logger.warning('socket.send() raised exception.')
+ self._conn_lost += 1
+ return
+
+ # Observable states:
+ # 1. IDLE: _write_fut and _buffer both None
+ # 2. WRITING: _write_fut set; _buffer None
+ # 3. BACKED UP: _write_fut set; _buffer a bytearray
+ # We always copy the data, so the caller can't modify it
+ # while we're still waiting for the I/O to happen.
+ if self._write_fut is None: # IDLE -> WRITING
+ assert self._buffer is None
+ # Pass a copy, except if it's already immutable.
+ self._loop_writing(data=bytes(data))
+ elif not self._buffer: # WRITING -> BACKED UP
+ # Make a mutable copy which we can extend.
+ self._buffer = bytearray(data)
+ self._maybe_pause_protocol()
+ else: # BACKED UP
+ # Append to buffer (also copies).
+ self._buffer.extend(data)
+ self._maybe_pause_protocol()
+
+ def _loop_writing(self, f=None, data=None):
+ try:
+ if f is not None and self._write_fut is None and self._closing:
+ # XXX most likely self._force_close() has been called, and
+ # it has set self._write_fut to None.
+ return
+ assert f is self._write_fut
+ self._write_fut = None
+ self._pending_write = 0
+ if f:
+ f.result()
+ if data is None:
+ data = self._buffer
+ self._buffer = None
+ if not data:
+ if self._closing:
+ self._loop.call_soon(self._call_connection_lost, None)
+ if self._eof_written:
+ self._sock.shutdown(socket.SHUT_WR)
+ # Now that we've reduced the buffer size, tell the
+ # protocol to resume writing if it was paused. Note that
+ # we do this last since the callback is called immediately
+ # and it may add more data to the buffer (even causing the
+ # protocol to be paused again).
+ self._maybe_resume_protocol()
+ else:
+ self._write_fut = self._loop._proactor.send(self._sock, data)
+ if not self._write_fut.done():
+ assert self._pending_write == 0
+ self._pending_write = len(data)
+ self._write_fut.add_done_callback(self._loop_writing)
+ self._maybe_pause_protocol()
+ else:
+ self._write_fut.add_done_callback(self._loop_writing)
+ if self._empty_waiter is not None and self._write_fut is None:
+ self._empty_waiter.set_result(None)
+ except ConnectionResetError as exc:
+ self._force_close(exc)
+ except OSError as exc:
+ self._fatal_error(exc, 'Fatal write error on pipe transport')
+
+ def can_write_eof(self):
+ return True
+
+ def write_eof(self):
+ self.close()
+
+ def abort(self):
+ self._force_close(None)
+
+ def _make_empty_waiter(self):
+ if self._empty_waiter is not None:
+ raise RuntimeError("Empty waiter is already set")
+ self._empty_waiter = self._loop.create_future()
+ if self._write_fut is None:
+ self._empty_waiter.set_result(None)
+ return self._empty_waiter
+
+ def _reset_empty_waiter(self):
+ self._empty_waiter = None
+
+
+class _ProactorWritePipeTransport(_ProactorBaseWritePipeTransport):
+ def __init__(self, *args, **kw):
+ super().__init__(*args, **kw)
+ self._read_fut = self._loop._proactor.recv(self._sock, 16)
+ self._read_fut.add_done_callback(self._pipe_closed)
+
+ def _pipe_closed(self, fut):
+ if fut.cancelled():
+ # the transport has been closed
+ return
+ assert fut.result() == b''
+ if self._closing:
+ assert self._read_fut is None
+ return
+ assert fut is self._read_fut, (fut, self._read_fut)
+ self._read_fut = None
+ if self._write_fut is not None:
+ self._force_close(BrokenPipeError())
+ else:
+ self.close()
+
+
+class _ProactorDatagramTransport(_ProactorBasePipeTransport,
+ transports.DatagramTransport):
+ max_size = 256 * 1024
+ def __init__(self, loop, sock, protocol, address=None,
+ waiter=None, extra=None):
+ self._address = address
+ self._empty_waiter = None
+ self._buffer_size = 0
+ # We don't need to call _protocol.connection_made() since our base
+ # constructor does it for us.
+ super().__init__(loop, sock, protocol, waiter=waiter, extra=extra)
+
+ # The base constructor sets _buffer = None, so we set it here
+ self._buffer = collections.deque()
+ self._loop.call_soon(self._loop_reading)
+
+ def _set_extra(self, sock):
+ _set_socket_extra(self, sock)
+
+ def get_write_buffer_size(self):
+ return self._buffer_size
+
+ def abort(self):
+ self._force_close(None)
+
+ def sendto(self, data, addr=None):
+ if not isinstance(data, (bytes, bytearray, memoryview)):
+ raise TypeError('data argument must be bytes-like object (%r)',
+ type(data))
+
+ if not data:
+ return
+
+ if self._address is not None and addr not in (None, self._address):
+ raise ValueError(
+ f'Invalid address: must be None or {self._address}')
+
+ if self._conn_lost and self._address:
+ if self._conn_lost >= constants.LOG_THRESHOLD_FOR_CONNLOST_WRITES:
+ logger.warning('socket.sendto() raised exception.')
+ self._conn_lost += 1
+ return
+
+ # Ensure that what we buffer is immutable.
+ self._buffer.append((bytes(data), addr))
+ self._buffer_size += len(data)
+
+ if self._write_fut is None:
+ # No current write operations are active, kick one off
+ self._loop_writing()
+ # else: A write operation is already kicked off
+
+ self._maybe_pause_protocol()
+
+ def _loop_writing(self, fut=None):
+ try:
+ if self._conn_lost:
+ return
+
+ assert fut is self._write_fut
+ self._write_fut = None
+ if fut:
+ # We are in a _loop_writing() done callback, get the result
+ fut.result()
+
+ if not self._buffer or (self._conn_lost and self._address):
+ # The connection has been closed
+ if self._closing:
+ self._loop.call_soon(self._call_connection_lost, None)
+ return
+
+ data, addr = self._buffer.popleft()
+ self._buffer_size -= len(data)
+ if self._address is not None:
+ self._write_fut = self._loop._proactor.send(self._sock,
+ data)
+ else:
+ self._write_fut = self._loop._proactor.sendto(self._sock,
+ data,
+ addr=addr)
+ except OSError as exc:
+ self._protocol.error_received(exc)
+ except Exception as exc:
+ self._fatal_error(exc, 'Fatal write error on datagram transport')
+ else:
+ self._write_fut.add_done_callback(self._loop_writing)
+ self._maybe_resume_protocol()
+
+ def _loop_reading(self, fut=None):
+ data = None
+ try:
+ if self._conn_lost:
+ return
+
+ assert self._read_fut is fut or (self._read_fut is None and
+ self._closing)
+
+ self._read_fut = None
+ if fut is not None:
+ res = fut.result()
+
+ if self._closing:
+ # since close() has been called we ignore any read data
+ data = None
+ return
+
+ if self._address is not None:
+ data, addr = res, self._address
+ else:
+ data, addr = res
+
+ if self._conn_lost:
+ return
+ if self._address is not None:
+ self._read_fut = self._loop._proactor.recv(self._sock,
+ self.max_size)
+ else:
+ self._read_fut = self._loop._proactor.recvfrom(self._sock,
+ self.max_size)
+ except OSError as exc:
+ self._protocol.error_received(exc)
+ except exceptions.CancelledError:
+ if not self._closing:
+ raise
+ else:
+ if self._read_fut is not None:
+ self._read_fut.add_done_callback(self._loop_reading)
+ finally:
+ if data:
+ self._protocol.datagram_received(data, addr)
+
+
+class _ProactorDuplexPipeTransport(_ProactorReadPipeTransport,
+ _ProactorBaseWritePipeTransport,
+ transports.Transport):
+ """Transport for duplex pipes."""
+
+ def can_write_eof(self):
+ return False
+
+ def write_eof(self):
+ raise NotImplementedError
+
+
+class _ProactorSocketTransport(_ProactorReadPipeTransport,
+ _ProactorBaseWritePipeTransport,
+ transports.Transport):
+ """Transport for connected sockets."""
+
+ _sendfile_compatible = constants._SendfileMode.TRY_NATIVE
+
+ def __init__(self, loop, sock, protocol, waiter=None,
+ extra=None, server=None):
+ super().__init__(loop, sock, protocol, waiter, extra, server)
+ base_events._set_nodelay(sock)
+
+ def _set_extra(self, sock):
+ _set_socket_extra(self, sock)
+
+ def can_write_eof(self):
+ return True
+
+ def write_eof(self):
+ if self._closing or self._eof_written:
+ return
+ self._eof_written = True
+ if self._write_fut is None:
+ self._sock.shutdown(socket.SHUT_WR)
+
+
+class BaseProactorEventLoop(base_events.BaseEventLoop):
+
+ def __init__(self, proactor):
+ super().__init__()
+ logger.debug('Using proactor: %s', proactor.__class__.__name__)
+ self._proactor = proactor
+ self._selector = proactor # convenient alias
+ self._self_reading_future = None
+ self._accept_futures = {} # socket file descriptor => Future
+ proactor.set_loop(self)
+ self._make_self_pipe()
+ if threading.current_thread() is threading.main_thread():
+ # wakeup fd can only be installed to a file descriptor from the main thread
+ signal.set_wakeup_fd(self._csock.fileno())
+
+ def _make_socket_transport(self, sock, protocol, waiter=None,
+ extra=None, server=None):
+ return _ProactorSocketTransport(self, sock, protocol, waiter,
+ extra, server)
+
+ def _make_ssl_transport(
+ self, rawsock, protocol, sslcontext, waiter=None,
+ *, server_side=False, server_hostname=None,
+ extra=None, server=None,
+ ssl_handshake_timeout=None,
+ ssl_shutdown_timeout=None):
+ ssl_protocol = sslproto.SSLProtocol(
+ self, protocol, sslcontext, waiter,
+ server_side, server_hostname,
+ ssl_handshake_timeout=ssl_handshake_timeout,
+ ssl_shutdown_timeout=ssl_shutdown_timeout)
+ _ProactorSocketTransport(self, rawsock, ssl_protocol,
+ extra=extra, server=server)
+ return ssl_protocol._app_transport
+
+ def _make_datagram_transport(self, sock, protocol,
+ address=None, waiter=None, extra=None):
+ return _ProactorDatagramTransport(self, sock, protocol, address,
+ waiter, extra)
+
+ def _make_duplex_pipe_transport(self, sock, protocol, waiter=None,
+ extra=None):
+ return _ProactorDuplexPipeTransport(self,
+ sock, protocol, waiter, extra)
+
+ def _make_read_pipe_transport(self, sock, protocol, waiter=None,
+ extra=None):
+ return _ProactorReadPipeTransport(self, sock, protocol, waiter, extra)
+
+ def _make_write_pipe_transport(self, sock, protocol, waiter=None,
+ extra=None):
+ # We want connection_lost() to be called when other end closes
+ return _ProactorWritePipeTransport(self,
+ sock, protocol, waiter, extra)
+
+ def close(self):
+ if self.is_running():
+ raise RuntimeError("Cannot close a running event loop")
+ if self.is_closed():
+ return
+
+ if threading.current_thread() is threading.main_thread():
+ signal.set_wakeup_fd(-1)
+ # Call these methods before closing the event loop (before calling
+ # BaseEventLoop.close), because they can schedule callbacks with
+ # call_soon(), which is forbidden when the event loop is closed.
+ self._stop_accept_futures()
+ self._close_self_pipe()
+ self._proactor.close()
+ self._proactor = None
+ self._selector = None
+
+ # Close the event loop
+ super().close()
+
+ async def sock_recv(self, sock, n):
+ return await self._proactor.recv(sock, n)
+
+ async def sock_recv_into(self, sock, buf):
+ return await self._proactor.recv_into(sock, buf)
+
+ async def sock_recvfrom(self, sock, bufsize):
+ return await self._proactor.recvfrom(sock, bufsize)
+
+ async def sock_recvfrom_into(self, sock, buf, nbytes=0):
+ if not nbytes:
+ nbytes = len(buf)
+
+ return await self._proactor.recvfrom_into(sock, buf, nbytes)
+
+ async def sock_sendall(self, sock, data):
+ return await self._proactor.send(sock, data)
+
+ async def sock_sendto(self, sock, data, address):
+ return await self._proactor.sendto(sock, data, 0, address)
+
+ async def sock_connect(self, sock, address):
+ return await self._proactor.connect(sock, address)
+
+ async def sock_accept(self, sock):
+ return await self._proactor.accept(sock)
+
+ async def _sock_sendfile_native(self, sock, file, offset, count):
+ try:
+ fileno = file.fileno()
+ except (AttributeError, io.UnsupportedOperation) as err:
+ raise exceptions.SendfileNotAvailableError("not a regular file")
+ try:
+ fsize = os.fstat(fileno).st_size
+ except OSError:
+ raise exceptions.SendfileNotAvailableError("not a regular file")
+ blocksize = count if count else fsize
+ if not blocksize:
+ return 0 # empty file
+
+ blocksize = min(blocksize, 0xffff_ffff)
+ end_pos = min(offset + count, fsize) if count else fsize
+ offset = min(offset, fsize)
+ total_sent = 0
+ try:
+ while True:
+ blocksize = min(end_pos - offset, blocksize)
+ if blocksize <= 0:
+ return total_sent
+ await self._proactor.sendfile(sock, file, offset, blocksize)
+ offset += blocksize
+ total_sent += blocksize
+ finally:
+ if total_sent > 0:
+ file.seek(offset)
+
+ async def _sendfile_native(self, transp, file, offset, count):
+ resume_reading = transp.is_reading()
+ transp.pause_reading()
+ await transp._make_empty_waiter()
+ try:
+ return await self.sock_sendfile(transp._sock, file, offset, count,
+ fallback=False)
+ finally:
+ transp._reset_empty_waiter()
+ if resume_reading:
+ transp.resume_reading()
+
+ def _close_self_pipe(self):
+ if self._self_reading_future is not None:
+ self._self_reading_future.cancel()
+ self._self_reading_future = None
+ self._ssock.close()
+ self._ssock = None
+ self._csock.close()
+ self._csock = None
+ self._internal_fds -= 1
+
+ def _make_self_pipe(self):
+ # A self-socket, really. :-)
+ self._ssock, self._csock = socket.socketpair()
+ self._ssock.setblocking(False)
+ self._csock.setblocking(False)
+ self._internal_fds += 1
+
+ def _loop_self_reading(self, f=None):
+ try:
+ if f is not None:
+ f.result() # may raise
+ if self._self_reading_future is not f:
+ # When we scheduled this Future, we assigned it to
+ # _self_reading_future. If it's not there now, something has
+ # tried to cancel the loop while this callback was still in the
+ # queue (see windows_events.ProactorEventLoop.run_forever). In
+ # that case stop here instead of continuing to schedule a new
+ # iteration.
+ return
+ f = self._proactor.recv(self._ssock, 4096)
+ except exceptions.CancelledError:
+ # _close_self_pipe() has been called, stop waiting for data
+ return
+ except (SystemExit, KeyboardInterrupt):
+ raise
+ except BaseException as exc:
+ self.call_exception_handler({
+ 'message': 'Error on reading from the event loop self pipe',
+ 'exception': exc,
+ 'loop': self,
+ })
+ else:
+ self._self_reading_future = f
+ f.add_done_callback(self._loop_self_reading)
+
+ def _write_to_self(self):
+ # This may be called from a different thread, possibly after
+ # _close_self_pipe() has been called or even while it is
+ # running. Guard for self._csock being None or closed. When
+ # a socket is closed, send() raises OSError (with errno set to
+ # EBADF, but let's not rely on the exact error code).
+ csock = self._csock
+ if csock is None:
+ return
+
+ try:
+ csock.send(b'\0')
+ except OSError:
+ if self._debug:
+ logger.debug("Fail to write a null byte into the "
+ "self-pipe socket",
+ exc_info=True)
+
+ def _start_serving(self, protocol_factory, sock,
+ sslcontext=None, server=None, backlog=100,
+ ssl_handshake_timeout=None,
+ ssl_shutdown_timeout=None):
+
+ def loop(f=None):
+ try:
+ if f is not None:
+ conn, addr = f.result()
+ if self._debug:
+ logger.debug("%r got a new connection from %r: %r",
+ server, addr, conn)
+ protocol = protocol_factory()
+ if sslcontext is not None:
+ self._make_ssl_transport(
+ conn, protocol, sslcontext, server_side=True,
+ extra={'peername': addr}, server=server,
+ ssl_handshake_timeout=ssl_handshake_timeout,
+ ssl_shutdown_timeout=ssl_shutdown_timeout)
+ else:
+ self._make_socket_transport(
+ conn, protocol,
+ extra={'peername': addr}, server=server)
+ if self.is_closed():
+ return
+ f = self._proactor.accept(sock)
+ except OSError as exc:
+ if sock.fileno() != -1:
+ self.call_exception_handler({
+ 'message': 'Accept failed on a socket',
+ 'exception': exc,
+ 'socket': trsock.TransportSocket(sock),
+ })
+ sock.close()
+ elif self._debug:
+ logger.debug("Accept failed on socket %r",
+ sock, exc_info=True)
+ except exceptions.CancelledError:
+ sock.close()
+ else:
+ self._accept_futures[sock.fileno()] = f
+ f.add_done_callback(loop)
+
+ self.call_soon(loop)
+
+ def _process_events(self, event_list):
+ # Events are processed in the IocpProactor._poll() method
+ pass
+
+ def _stop_accept_futures(self):
+ for future in self._accept_futures.values():
+ future.cancel()
+ self._accept_futures.clear()
+
+ def _stop_serving(self, sock):
+ future = self._accept_futures.pop(sock.fileno(), None)
+ if future:
+ future.cancel()
+ self._proactor._stop_serving(sock)
+ sock.close()
diff --git a/contrib/tools/python3/Lib/asyncio/protocols.py b/contrib/tools/python3/Lib/asyncio/protocols.py
new file mode 100644
index 0000000000..09987b164c
--- /dev/null
+++ b/contrib/tools/python3/Lib/asyncio/protocols.py
@@ -0,0 +1,216 @@
+"""Abstract Protocol base classes."""
+
+__all__ = (
+ 'BaseProtocol', 'Protocol', 'DatagramProtocol',
+ 'SubprocessProtocol', 'BufferedProtocol',
+)
+
+
+class BaseProtocol:
+ """Common base class for protocol interfaces.
+
+ Usually user implements protocols that derived from BaseProtocol
+ like Protocol or ProcessProtocol.
+
+ The only case when BaseProtocol should be implemented directly is
+ write-only transport like write pipe
+ """
+
+ __slots__ = ()
+
+ def connection_made(self, transport):
+ """Called when a connection is made.
+
+ The argument is the transport representing the pipe connection.
+ To receive data, wait for data_received() calls.
+ When the connection is closed, connection_lost() is called.
+ """
+
+ def connection_lost(self, exc):
+ """Called when the connection is lost or closed.
+
+ The argument is an exception object or None (the latter
+ meaning a regular EOF is received or the connection was
+ aborted or closed).
+ """
+
+ def pause_writing(self):
+ """Called when the transport's buffer goes over the high-water mark.
+
+ Pause and resume calls are paired -- pause_writing() is called
+ once when the buffer goes strictly over the high-water mark
+ (even if subsequent writes increases the buffer size even
+ more), and eventually resume_writing() is called once when the
+ buffer size reaches the low-water mark.
+
+ Note that if the buffer size equals the high-water mark,
+ pause_writing() is not called -- it must go strictly over.
+ Conversely, resume_writing() is called when the buffer size is
+ equal or lower than the low-water mark. These end conditions
+ are important to ensure that things go as expected when either
+ mark is zero.
+
+ NOTE: This is the only Protocol callback that is not called
+ through EventLoop.call_soon() -- if it were, it would have no
+ effect when it's most needed (when the app keeps writing
+ without yielding until pause_writing() is called).
+ """
+
+ def resume_writing(self):
+ """Called when the transport's buffer drains below the low-water mark.
+
+ See pause_writing() for details.
+ """
+
+
+class Protocol(BaseProtocol):
+ """Interface for stream protocol.
+
+ The user should implement this interface. They can inherit from
+ this class but don't need to. The implementations here do
+ nothing (they don't raise exceptions).
+
+ When the user wants to requests a transport, they pass a protocol
+ factory to a utility function (e.g., EventLoop.create_connection()).
+
+ When the connection is made successfully, connection_made() is
+ called with a suitable transport object. Then data_received()
+ will be called 0 or more times with data (bytes) received from the
+ transport; finally, connection_lost() will be called exactly once
+ with either an exception object or None as an argument.
+
+ State machine of calls:
+
+ start -> CM [-> DR*] [-> ER?] -> CL -> end
+
+ * CM: connection_made()
+ * DR: data_received()
+ * ER: eof_received()
+ * CL: connection_lost()
+ """
+
+ __slots__ = ()
+
+ def data_received(self, data):
+ """Called when some data is received.
+
+ The argument is a bytes object.
+ """
+
+ def eof_received(self):
+ """Called when the other end calls write_eof() or equivalent.
+
+ If this returns a false value (including None), the transport
+ will close itself. If it returns a true value, closing the
+ transport is up to the protocol.
+ """
+
+
+class BufferedProtocol(BaseProtocol):
+ """Interface for stream protocol with manual buffer control.
+
+ Event methods, such as `create_server` and `create_connection`,
+ accept factories that return protocols that implement this interface.
+
+ The idea of BufferedProtocol is that it allows to manually allocate
+ and control the receive buffer. Event loops can then use the buffer
+ provided by the protocol to avoid unnecessary data copies. This
+ can result in noticeable performance improvement for protocols that
+ receive big amounts of data. Sophisticated protocols can allocate
+ the buffer only once at creation time.
+
+ State machine of calls:
+
+ start -> CM [-> GB [-> BU?]]* [-> ER?] -> CL -> end
+
+ * CM: connection_made()
+ * GB: get_buffer()
+ * BU: buffer_updated()
+ * ER: eof_received()
+ * CL: connection_lost()
+ """
+
+ __slots__ = ()
+
+ def get_buffer(self, sizehint):
+ """Called to allocate a new receive buffer.
+
+ *sizehint* is a recommended minimal size for the returned
+ buffer. When set to -1, the buffer size can be arbitrary.
+
+ Must return an object that implements the
+ :ref:`buffer protocol <bufferobjects>`.
+ It is an error to return a zero-sized buffer.
+ """
+
+ def buffer_updated(self, nbytes):
+ """Called when the buffer was updated with the received data.
+
+ *nbytes* is the total number of bytes that were written to
+ the buffer.
+ """
+
+ def eof_received(self):
+ """Called when the other end calls write_eof() or equivalent.
+
+ If this returns a false value (including None), the transport
+ will close itself. If it returns a true value, closing the
+ transport is up to the protocol.
+ """
+
+
+class DatagramProtocol(BaseProtocol):
+ """Interface for datagram protocol."""
+
+ __slots__ = ()
+
+ def datagram_received(self, data, addr):
+ """Called when some datagram is received."""
+
+ def error_received(self, exc):
+ """Called when a send or receive operation raises an OSError.
+
+ (Other than BlockingIOError or InterruptedError.)
+ """
+
+
+class SubprocessProtocol(BaseProtocol):
+ """Interface for protocol for subprocess calls."""
+
+ __slots__ = ()
+
+ def pipe_data_received(self, fd, data):
+ """Called when the subprocess writes data into stdout/stderr pipe.
+
+ fd is int file descriptor.
+ data is bytes object.
+ """
+
+ def pipe_connection_lost(self, fd, exc):
+ """Called when a file descriptor associated with the child process is
+ closed.
+
+ fd is the int file descriptor that was closed.
+ """
+
+ def process_exited(self):
+ """Called when subprocess has exited."""
+
+
+def _feed_data_to_buffered_proto(proto, data):
+ data_len = len(data)
+ while data_len:
+ buf = proto.get_buffer(data_len)
+ buf_len = len(buf)
+ if not buf_len:
+ raise RuntimeError('get_buffer() returned an empty buffer')
+
+ if buf_len >= data_len:
+ buf[:data_len] = data
+ proto.buffer_updated(data_len)
+ return
+ else:
+ buf[:buf_len] = data[:buf_len]
+ proto.buffer_updated(buf_len)
+ data = data[buf_len:]
+ data_len = len(data)
diff --git a/contrib/tools/python3/Lib/asyncio/queues.py b/contrib/tools/python3/Lib/asyncio/queues.py
new file mode 100644
index 0000000000..a9656a6df5
--- /dev/null
+++ b/contrib/tools/python3/Lib/asyncio/queues.py
@@ -0,0 +1,244 @@
+__all__ = ('Queue', 'PriorityQueue', 'LifoQueue', 'QueueFull', 'QueueEmpty')
+
+import collections
+import heapq
+from types import GenericAlias
+
+from . import locks
+from . import mixins
+
+
+class QueueEmpty(Exception):
+ """Raised when Queue.get_nowait() is called on an empty Queue."""
+ pass
+
+
+class QueueFull(Exception):
+ """Raised when the Queue.put_nowait() method is called on a full Queue."""
+ pass
+
+
+class Queue(mixins._LoopBoundMixin):
+ """A queue, useful for coordinating producer and consumer coroutines.
+
+ If maxsize is less than or equal to zero, the queue size is infinite. If it
+ is an integer greater than 0, then "await put()" will block when the
+ queue reaches maxsize, until an item is removed by get().
+
+ Unlike the standard library Queue, you can reliably know this Queue's size
+ with qsize(), since your single-threaded asyncio application won't be
+ interrupted between calling qsize() and doing an operation on the Queue.
+ """
+
+ def __init__(self, maxsize=0):
+ self._maxsize = maxsize
+
+ # Futures.
+ self._getters = collections.deque()
+ # Futures.
+ self._putters = collections.deque()
+ self._unfinished_tasks = 0
+ self._finished = locks.Event()
+ self._finished.set()
+ self._init(maxsize)
+
+ # These three are overridable in subclasses.
+
+ def _init(self, maxsize):
+ self._queue = collections.deque()
+
+ def _get(self):
+ return self._queue.popleft()
+
+ def _put(self, item):
+ self._queue.append(item)
+
+ # End of the overridable methods.
+
+ def _wakeup_next(self, waiters):
+ # Wake up the next waiter (if any) that isn't cancelled.
+ while waiters:
+ waiter = waiters.popleft()
+ if not waiter.done():
+ waiter.set_result(None)
+ break
+
+ def __repr__(self):
+ return f'<{type(self).__name__} at {id(self):#x} {self._format()}>'
+
+ def __str__(self):
+ return f'<{type(self).__name__} {self._format()}>'
+
+ __class_getitem__ = classmethod(GenericAlias)
+
+ def _format(self):
+ result = f'maxsize={self._maxsize!r}'
+ if getattr(self, '_queue', None):
+ result += f' _queue={list(self._queue)!r}'
+ if self._getters:
+ result += f' _getters[{len(self._getters)}]'
+ if self._putters:
+ result += f' _putters[{len(self._putters)}]'
+ if self._unfinished_tasks:
+ result += f' tasks={self._unfinished_tasks}'
+ return result
+
+ def qsize(self):
+ """Number of items in the queue."""
+ return len(self._queue)
+
+ @property
+ def maxsize(self):
+ """Number of items allowed in the queue."""
+ return self._maxsize
+
+ def empty(self):
+ """Return True if the queue is empty, False otherwise."""
+ return not self._queue
+
+ def full(self):
+ """Return True if there are maxsize items in the queue.
+
+ Note: if the Queue was initialized with maxsize=0 (the default),
+ then full() is never True.
+ """
+ if self._maxsize <= 0:
+ return False
+ else:
+ return self.qsize() >= self._maxsize
+
+ async def put(self, item):
+ """Put an item into the queue.
+
+ Put an item into the queue. If the queue is full, wait until a free
+ slot is available before adding item.
+ """
+ while self.full():
+ putter = self._get_loop().create_future()
+ self._putters.append(putter)
+ try:
+ await putter
+ except:
+ putter.cancel() # Just in case putter is not done yet.
+ try:
+ # Clean self._putters from canceled putters.
+ self._putters.remove(putter)
+ except ValueError:
+ # The putter could be removed from self._putters by a
+ # previous get_nowait call.
+ pass
+ if not self.full() and not putter.cancelled():
+ # We were woken up by get_nowait(), but can't take
+ # the call. Wake up the next in line.
+ self._wakeup_next(self._putters)
+ raise
+ return self.put_nowait(item)
+
+ def put_nowait(self, item):
+ """Put an item into the queue without blocking.
+
+ If no free slot is immediately available, raise QueueFull.
+ """
+ if self.full():
+ raise QueueFull
+ self._put(item)
+ self._unfinished_tasks += 1
+ self._finished.clear()
+ self._wakeup_next(self._getters)
+
+ async def get(self):
+ """Remove and return an item from the queue.
+
+ If queue is empty, wait until an item is available.
+ """
+ while self.empty():
+ getter = self._get_loop().create_future()
+ self._getters.append(getter)
+ try:
+ await getter
+ except:
+ getter.cancel() # Just in case getter is not done yet.
+ try:
+ # Clean self._getters from canceled getters.
+ self._getters.remove(getter)
+ except ValueError:
+ # The getter could be removed from self._getters by a
+ # previous put_nowait call.
+ pass
+ if not self.empty() and not getter.cancelled():
+ # We were woken up by put_nowait(), but can't take
+ # the call. Wake up the next in line.
+ self._wakeup_next(self._getters)
+ raise
+ return self.get_nowait()
+
+ def get_nowait(self):
+ """Remove and return an item from the queue.
+
+ Return an item if one is immediately available, else raise QueueEmpty.
+ """
+ if self.empty():
+ raise QueueEmpty
+ item = self._get()
+ self._wakeup_next(self._putters)
+ return item
+
+ def task_done(self):
+ """Indicate that a formerly enqueued task is complete.
+
+ Used by queue consumers. For each get() used to fetch a task,
+ a subsequent call to task_done() tells the queue that the processing
+ on the task is complete.
+
+ If a join() is currently blocking, it will resume when all items have
+ been processed (meaning that a task_done() call was received for every
+ item that had been put() into the queue).
+
+ Raises ValueError if called more times than there were items placed in
+ the queue.
+ """
+ if self._unfinished_tasks <= 0:
+ raise ValueError('task_done() called too many times')
+ self._unfinished_tasks -= 1
+ if self._unfinished_tasks == 0:
+ self._finished.set()
+
+ async def join(self):
+ """Block until all items in the queue have been gotten and processed.
+
+ The count of unfinished tasks goes up whenever an item is added to the
+ queue. The count goes down whenever a consumer calls task_done() to
+ indicate that the item was retrieved and all work on it is complete.
+ When the count of unfinished tasks drops to zero, join() unblocks.
+ """
+ if self._unfinished_tasks > 0:
+ await self._finished.wait()
+
+
+class PriorityQueue(Queue):
+ """A subclass of Queue; retrieves entries in priority order (lowest first).
+
+ Entries are typically tuples of the form: (priority number, data).
+ """
+
+ def _init(self, maxsize):
+ self._queue = []
+
+ def _put(self, item, heappush=heapq.heappush):
+ heappush(self._queue, item)
+
+ def _get(self, heappop=heapq.heappop):
+ return heappop(self._queue)
+
+
+class LifoQueue(Queue):
+ """A subclass of Queue that retrieves most recently added entries first."""
+
+ def _init(self, maxsize):
+ self._queue = []
+
+ def _put(self, item):
+ self._queue.append(item)
+
+ def _get(self):
+ return self._queue.pop()
diff --git a/contrib/tools/python3/Lib/asyncio/runners.py b/contrib/tools/python3/Lib/asyncio/runners.py
new file mode 100644
index 0000000000..1b89236599
--- /dev/null
+++ b/contrib/tools/python3/Lib/asyncio/runners.py
@@ -0,0 +1,215 @@
+__all__ = ('Runner', 'run')
+
+import contextvars
+import enum
+import functools
+import threading
+import signal
+from . import coroutines
+from . import events
+from . import exceptions
+from . import tasks
+from . import constants
+
+class _State(enum.Enum):
+ CREATED = "created"
+ INITIALIZED = "initialized"
+ CLOSED = "closed"
+
+
+class Runner:
+ """A context manager that controls event loop life cycle.
+
+ The context manager always creates a new event loop,
+ allows to run async functions inside it,
+ and properly finalizes the loop at the context manager exit.
+
+ If debug is True, the event loop will be run in debug mode.
+ If loop_factory is passed, it is used for new event loop creation.
+
+ asyncio.run(main(), debug=True)
+
+ is a shortcut for
+
+ with asyncio.Runner(debug=True) as runner:
+ runner.run(main())
+
+ The run() method can be called multiple times within the runner's context.
+
+ This can be useful for interactive console (e.g. IPython),
+ unittest runners, console tools, -- everywhere when async code
+ is called from existing sync framework and where the preferred single
+ asyncio.run() call doesn't work.
+
+ """
+
+ # Note: the class is final, it is not intended for inheritance.
+
+ def __init__(self, *, debug=None, loop_factory=None):
+ self._state = _State.CREATED
+ self._debug = debug
+ self._loop_factory = loop_factory
+ self._loop = None
+ self._context = None
+ self._interrupt_count = 0
+ self._set_event_loop = False
+
+ def __enter__(self):
+ self._lazy_init()
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ self.close()
+
+ def close(self):
+ """Shutdown and close event loop."""
+ if self._state is not _State.INITIALIZED:
+ return
+ try:
+ loop = self._loop
+ _cancel_all_tasks(loop)
+ loop.run_until_complete(loop.shutdown_asyncgens())
+ loop.run_until_complete(
+ loop.shutdown_default_executor(constants.THREAD_JOIN_TIMEOUT))
+ finally:
+ if self._set_event_loop:
+ events.set_event_loop(None)
+ loop.close()
+ self._loop = None
+ self._state = _State.CLOSED
+
+ def get_loop(self):
+ """Return embedded event loop."""
+ self._lazy_init()
+ return self._loop
+
+ def run(self, coro, *, context=None):
+ """Run a coroutine inside the embedded event loop."""
+ if not coroutines.iscoroutine(coro):
+ raise ValueError("a coroutine was expected, got {!r}".format(coro))
+
+ if events._get_running_loop() is not None:
+ # fail fast with short traceback
+ raise RuntimeError(
+ "Runner.run() cannot be called from a running event loop")
+
+ self._lazy_init()
+
+ if context is None:
+ context = self._context
+ task = self._loop.create_task(coro, context=context)
+
+ if (threading.current_thread() is threading.main_thread()
+ and signal.getsignal(signal.SIGINT) is signal.default_int_handler
+ ):
+ sigint_handler = functools.partial(self._on_sigint, main_task=task)
+ try:
+ signal.signal(signal.SIGINT, sigint_handler)
+ except ValueError:
+ # `signal.signal` may throw if `threading.main_thread` does
+ # not support signals (e.g. embedded interpreter with signals
+ # not registered - see gh-91880)
+ sigint_handler = None
+ else:
+ sigint_handler = None
+
+ self._interrupt_count = 0
+ try:
+ return self._loop.run_until_complete(task)
+ except exceptions.CancelledError:
+ if self._interrupt_count > 0:
+ uncancel = getattr(task, "uncancel", None)
+ if uncancel is not None and uncancel() == 0:
+ raise KeyboardInterrupt()
+ raise # CancelledError
+ finally:
+ if (sigint_handler is not None
+ and signal.getsignal(signal.SIGINT) is sigint_handler
+ ):
+ signal.signal(signal.SIGINT, signal.default_int_handler)
+
+ def _lazy_init(self):
+ if self._state is _State.CLOSED:
+ raise RuntimeError("Runner is closed")
+ if self._state is _State.INITIALIZED:
+ return
+ if self._loop_factory is None:
+ self._loop = events.new_event_loop()
+ if not self._set_event_loop:
+ # Call set_event_loop only once to avoid calling
+ # attach_loop multiple times on child watchers
+ events.set_event_loop(self._loop)
+ self._set_event_loop = True
+ else:
+ self._loop = self._loop_factory()
+ if self._debug is not None:
+ self._loop.set_debug(self._debug)
+ self._context = contextvars.copy_context()
+ self._state = _State.INITIALIZED
+
+ def _on_sigint(self, signum, frame, main_task):
+ self._interrupt_count += 1
+ if self._interrupt_count == 1 and not main_task.done():
+ main_task.cancel()
+ # wakeup loop if it is blocked by select() with long timeout
+ self._loop.call_soon_threadsafe(lambda: None)
+ return
+ raise KeyboardInterrupt()
+
+
+def run(main, *, debug=None, loop_factory=None):
+ """Execute the coroutine and return the result.
+
+ This function runs the passed coroutine, taking care of
+ managing the asyncio event loop, finalizing asynchronous
+ generators and closing the default executor.
+
+ This function cannot be called when another asyncio event loop is
+ running in the same thread.
+
+ If debug is True, the event loop will be run in debug mode.
+
+ This function always creates a new event loop and closes it at the end.
+ It should be used as a main entry point for asyncio programs, and should
+ ideally only be called once.
+
+ The executor is given a timeout duration of 5 minutes to shutdown.
+ If the executor hasn't finished within that duration, a warning is
+ emitted and the executor is closed.
+
+ Example:
+
+ async def main():
+ await asyncio.sleep(1)
+ print('hello')
+
+ asyncio.run(main())
+ """
+ if events._get_running_loop() is not None:
+ # fail fast with short traceback
+ raise RuntimeError(
+ "asyncio.run() cannot be called from a running event loop")
+
+ with Runner(debug=debug, loop_factory=loop_factory) as runner:
+ return runner.run(main)
+
+
+def _cancel_all_tasks(loop):
+ to_cancel = tasks.all_tasks(loop)
+ if not to_cancel:
+ return
+
+ for task in to_cancel:
+ task.cancel()
+
+ loop.run_until_complete(tasks.gather(*to_cancel, return_exceptions=True))
+
+ for task in to_cancel:
+ if task.cancelled():
+ continue
+ if task.exception() is not None:
+ loop.call_exception_handler({
+ 'message': 'unhandled exception during asyncio.run() shutdown',
+ 'exception': task.exception(),
+ 'task': task,
+ })
diff --git a/contrib/tools/python3/Lib/asyncio/selector_events.py b/contrib/tools/python3/Lib/asyncio/selector_events.py
new file mode 100644
index 0000000000..790711f834
--- /dev/null
+++ b/contrib/tools/python3/Lib/asyncio/selector_events.py
@@ -0,0 +1,1321 @@
+"""Event loop using a selector and related classes.
+
+A selector is a "notify-when-ready" multiplexer. For a subclass which
+also includes support for signal handling, see the unix_events sub-module.
+"""
+
+__all__ = 'BaseSelectorEventLoop',
+
+import collections
+import errno
+import functools
+import itertools
+import os
+import selectors
+import socket
+import warnings
+import weakref
+try:
+ import ssl
+except ImportError: # pragma: no cover
+ ssl = None
+
+from . import base_events
+from . import constants
+from . import events
+from . import futures
+from . import protocols
+from . import sslproto
+from . import transports
+from . import trsock
+from .log import logger
+
+_HAS_SENDMSG = hasattr(socket.socket, 'sendmsg')
+
+if _HAS_SENDMSG:
+ try:
+ SC_IOV_MAX = os.sysconf('SC_IOV_MAX')
+ except OSError:
+ # Fallback to send
+ _HAS_SENDMSG = False
+
+def _test_selector_event(selector, fd, event):
+ # Test if the selector is monitoring 'event' events
+ # for the file descriptor 'fd'.
+ try:
+ key = selector.get_key(fd)
+ except KeyError:
+ return False
+ else:
+ return bool(key.events & event)
+
+
+class BaseSelectorEventLoop(base_events.BaseEventLoop):
+ """Selector event loop.
+
+ See events.EventLoop for API specification.
+ """
+
+ def __init__(self, selector=None):
+ super().__init__()
+
+ if selector is None:
+ selector = selectors.DefaultSelector()
+ logger.debug('Using selector: %s', selector.__class__.__name__)
+ self._selector = selector
+ self._make_self_pipe()
+ self._transports = weakref.WeakValueDictionary()
+
+ def _make_socket_transport(self, sock, protocol, waiter=None, *,
+ extra=None, server=None):
+ self._ensure_fd_no_transport(sock)
+ return _SelectorSocketTransport(self, sock, protocol, waiter,
+ extra, server)
+
+ def _make_ssl_transport(
+ self, rawsock, protocol, sslcontext, waiter=None,
+ *, server_side=False, server_hostname=None,
+ extra=None, server=None,
+ ssl_handshake_timeout=constants.SSL_HANDSHAKE_TIMEOUT,
+ ssl_shutdown_timeout=constants.SSL_SHUTDOWN_TIMEOUT,
+ ):
+ self._ensure_fd_no_transport(rawsock)
+ ssl_protocol = sslproto.SSLProtocol(
+ self, protocol, sslcontext, waiter,
+ server_side, server_hostname,
+ ssl_handshake_timeout=ssl_handshake_timeout,
+ ssl_shutdown_timeout=ssl_shutdown_timeout
+ )
+ _SelectorSocketTransport(self, rawsock, ssl_protocol,
+ extra=extra, server=server)
+ return ssl_protocol._app_transport
+
+ def _make_datagram_transport(self, sock, protocol,
+ address=None, waiter=None, extra=None):
+ self._ensure_fd_no_transport(sock)
+ return _SelectorDatagramTransport(self, sock, protocol,
+ address, waiter, extra)
+
+ def close(self):
+ if self.is_running():
+ raise RuntimeError("Cannot close a running event loop")
+ if self.is_closed():
+ return
+ self._close_self_pipe()
+ super().close()
+ if self._selector is not None:
+ self._selector.close()
+ self._selector = None
+
+ def _close_self_pipe(self):
+ self._remove_reader(self._ssock.fileno())
+ self._ssock.close()
+ self._ssock = None
+ self._csock.close()
+ self._csock = None
+ self._internal_fds -= 1
+
+ def _make_self_pipe(self):
+ # A self-socket, really. :-)
+ self._ssock, self._csock = socket.socketpair()
+ self._ssock.setblocking(False)
+ self._csock.setblocking(False)
+ self._internal_fds += 1
+ self._add_reader(self._ssock.fileno(), self._read_from_self)
+
+ def _process_self_data(self, data):
+ pass
+
+ def _read_from_self(self):
+ while True:
+ try:
+ data = self._ssock.recv(4096)
+ if not data:
+ break
+ self._process_self_data(data)
+ except InterruptedError:
+ continue
+ except BlockingIOError:
+ break
+
+ def _write_to_self(self):
+ # This may be called from a different thread, possibly after
+ # _close_self_pipe() has been called or even while it is
+ # running. Guard for self._csock being None or closed. When
+ # a socket is closed, send() raises OSError (with errno set to
+ # EBADF, but let's not rely on the exact error code).
+ csock = self._csock
+ if csock is None:
+ return
+
+ try:
+ csock.send(b'\0')
+ except OSError:
+ if self._debug:
+ logger.debug("Fail to write a null byte into the "
+ "self-pipe socket",
+ exc_info=True)
+
+ def _start_serving(self, protocol_factory, sock,
+ sslcontext=None, server=None, backlog=100,
+ ssl_handshake_timeout=constants.SSL_HANDSHAKE_TIMEOUT,
+ ssl_shutdown_timeout=constants.SSL_SHUTDOWN_TIMEOUT):
+ self._add_reader(sock.fileno(), self._accept_connection,
+ protocol_factory, sock, sslcontext, server, backlog,
+ ssl_handshake_timeout, ssl_shutdown_timeout)
+
+ def _accept_connection(
+ self, protocol_factory, sock,
+ sslcontext=None, server=None, backlog=100,
+ ssl_handshake_timeout=constants.SSL_HANDSHAKE_TIMEOUT,
+ ssl_shutdown_timeout=constants.SSL_SHUTDOWN_TIMEOUT):
+ # This method is only called once for each event loop tick where the
+ # listening socket has triggered an EVENT_READ. There may be multiple
+ # connections waiting for an .accept() so it is called in a loop.
+ # See https://bugs.python.org/issue27906 for more details.
+ for _ in range(backlog):
+ try:
+ conn, addr = sock.accept()
+ if self._debug:
+ logger.debug("%r got a new connection from %r: %r",
+ server, addr, conn)
+ conn.setblocking(False)
+ except (BlockingIOError, InterruptedError, ConnectionAbortedError):
+ # Early exit because the socket accept buffer is empty.
+ return None
+ except OSError as exc:
+ # There's nowhere to send the error, so just log it.
+ if exc.errno in (errno.EMFILE, errno.ENFILE,
+ errno.ENOBUFS, errno.ENOMEM):
+ # Some platforms (e.g. Linux keep reporting the FD as
+ # ready, so we remove the read handler temporarily.
+ # We'll try again in a while.
+ self.call_exception_handler({
+ 'message': 'socket.accept() out of system resource',
+ 'exception': exc,
+ 'socket': trsock.TransportSocket(sock),
+ })
+ self._remove_reader(sock.fileno())
+ self.call_later(constants.ACCEPT_RETRY_DELAY,
+ self._start_serving,
+ protocol_factory, sock, sslcontext, server,
+ backlog, ssl_handshake_timeout,
+ ssl_shutdown_timeout)
+ else:
+ raise # The event loop will catch, log and ignore it.
+ else:
+ extra = {'peername': addr}
+ accept = self._accept_connection2(
+ protocol_factory, conn, extra, sslcontext, server,
+ ssl_handshake_timeout, ssl_shutdown_timeout)
+ self.create_task(accept)
+
+ async def _accept_connection2(
+ self, protocol_factory, conn, extra,
+ sslcontext=None, server=None,
+ ssl_handshake_timeout=constants.SSL_HANDSHAKE_TIMEOUT,
+ ssl_shutdown_timeout=constants.SSL_SHUTDOWN_TIMEOUT):
+ protocol = None
+ transport = None
+ try:
+ protocol = protocol_factory()
+ waiter = self.create_future()
+ if sslcontext:
+ transport = self._make_ssl_transport(
+ conn, protocol, sslcontext, waiter=waiter,
+ server_side=True, extra=extra, server=server,
+ ssl_handshake_timeout=ssl_handshake_timeout,
+ ssl_shutdown_timeout=ssl_shutdown_timeout)
+ else:
+ transport = self._make_socket_transport(
+ conn, protocol, waiter=waiter, extra=extra,
+ server=server)
+
+ try:
+ await waiter
+ except BaseException:
+ transport.close()
+ # gh-109534: When an exception is raised by the SSLProtocol object the
+ # exception set in this future can keep the protocol object alive and
+ # cause a reference cycle.
+ waiter = None
+ raise
+ # It's now up to the protocol to handle the connection.
+
+ except (SystemExit, KeyboardInterrupt):
+ raise
+ except BaseException as exc:
+ if self._debug:
+ context = {
+ 'message':
+ 'Error on transport creation for incoming connection',
+ 'exception': exc,
+ }
+ if protocol is not None:
+ context['protocol'] = protocol
+ if transport is not None:
+ context['transport'] = transport
+ self.call_exception_handler(context)
+
+ def _ensure_fd_no_transport(self, fd):
+ fileno = fd
+ if not isinstance(fileno, int):
+ try:
+ fileno = int(fileno.fileno())
+ except (AttributeError, TypeError, ValueError):
+ # This code matches selectors._fileobj_to_fd function.
+ raise ValueError(f"Invalid file object: {fd!r}") from None
+ try:
+ transport = self._transports[fileno]
+ except KeyError:
+ pass
+ else:
+ if not transport.is_closing():
+ raise RuntimeError(
+ f'File descriptor {fd!r} is used by transport '
+ f'{transport!r}')
+
+ def _add_reader(self, fd, callback, *args):
+ self._check_closed()
+ handle = events.Handle(callback, args, self, None)
+ try:
+ key = self._selector.get_key(fd)
+ except KeyError:
+ self._selector.register(fd, selectors.EVENT_READ,
+ (handle, None))
+ else:
+ mask, (reader, writer) = key.events, key.data
+ self._selector.modify(fd, mask | selectors.EVENT_READ,
+ (handle, writer))
+ if reader is not None:
+ reader.cancel()
+ return handle
+
+ def _remove_reader(self, fd):
+ if self.is_closed():
+ return False
+ try:
+ key = self._selector.get_key(fd)
+ except KeyError:
+ return False
+ else:
+ mask, (reader, writer) = key.events, key.data
+ mask &= ~selectors.EVENT_READ
+ if not mask:
+ self._selector.unregister(fd)
+ else:
+ self._selector.modify(fd, mask, (None, writer))
+
+ if reader is not None:
+ reader.cancel()
+ return True
+ else:
+ return False
+
+ def _add_writer(self, fd, callback, *args):
+ self._check_closed()
+ handle = events.Handle(callback, args, self, None)
+ try:
+ key = self._selector.get_key(fd)
+ except KeyError:
+ self._selector.register(fd, selectors.EVENT_WRITE,
+ (None, handle))
+ else:
+ mask, (reader, writer) = key.events, key.data
+ self._selector.modify(fd, mask | selectors.EVENT_WRITE,
+ (reader, handle))
+ if writer is not None:
+ writer.cancel()
+ return handle
+
+ def _remove_writer(self, fd):
+ """Remove a writer callback."""
+ if self.is_closed():
+ return False
+ try:
+ key = self._selector.get_key(fd)
+ except KeyError:
+ return False
+ else:
+ mask, (reader, writer) = key.events, key.data
+ # Remove both writer and connector.
+ mask &= ~selectors.EVENT_WRITE
+ if not mask:
+ self._selector.unregister(fd)
+ else:
+ self._selector.modify(fd, mask, (reader, None))
+
+ if writer is not None:
+ writer.cancel()
+ return True
+ else:
+ return False
+
+ def add_reader(self, fd, callback, *args):
+ """Add a reader callback."""
+ self._ensure_fd_no_transport(fd)
+ self._add_reader(fd, callback, *args)
+
+ def remove_reader(self, fd):
+ """Remove a reader callback."""
+ self._ensure_fd_no_transport(fd)
+ return self._remove_reader(fd)
+
+ def add_writer(self, fd, callback, *args):
+ """Add a writer callback.."""
+ self._ensure_fd_no_transport(fd)
+ self._add_writer(fd, callback, *args)
+
+ def remove_writer(self, fd):
+ """Remove a writer callback."""
+ self._ensure_fd_no_transport(fd)
+ return self._remove_writer(fd)
+
+ async def sock_recv(self, sock, n):
+ """Receive data from the socket.
+
+ The return value is a bytes object representing the data received.
+ The maximum amount of data to be received at once is specified by
+ nbytes.
+ """
+ base_events._check_ssl_socket(sock)
+ if self._debug and sock.gettimeout() != 0:
+ raise ValueError("the socket must be non-blocking")
+ try:
+ return sock.recv(n)
+ except (BlockingIOError, InterruptedError):
+ pass
+ fut = self.create_future()
+ fd = sock.fileno()
+ self._ensure_fd_no_transport(fd)
+ handle = self._add_reader(fd, self._sock_recv, fut, sock, n)
+ fut.add_done_callback(
+ functools.partial(self._sock_read_done, fd, handle=handle))
+ return await fut
+
+ def _sock_read_done(self, fd, fut, handle=None):
+ if handle is None or not handle.cancelled():
+ self.remove_reader(fd)
+
+ def _sock_recv(self, fut, sock, n):
+ # _sock_recv() can add itself as an I/O callback if the operation can't
+ # be done immediately. Don't use it directly, call sock_recv().
+ if fut.done():
+ return
+ try:
+ data = sock.recv(n)
+ except (BlockingIOError, InterruptedError):
+ return # try again next time
+ except (SystemExit, KeyboardInterrupt):
+ raise
+ except BaseException as exc:
+ fut.set_exception(exc)
+ else:
+ fut.set_result(data)
+
+ async def sock_recv_into(self, sock, buf):
+ """Receive data from the socket.
+
+ The received data is written into *buf* (a writable buffer).
+ The return value is the number of bytes written.
+ """
+ base_events._check_ssl_socket(sock)
+ if self._debug and sock.gettimeout() != 0:
+ raise ValueError("the socket must be non-blocking")
+ try:
+ return sock.recv_into(buf)
+ except (BlockingIOError, InterruptedError):
+ pass
+ fut = self.create_future()
+ fd = sock.fileno()
+ self._ensure_fd_no_transport(fd)
+ handle = self._add_reader(fd, self._sock_recv_into, fut, sock, buf)
+ fut.add_done_callback(
+ functools.partial(self._sock_read_done, fd, handle=handle))
+ return await fut
+
+ def _sock_recv_into(self, fut, sock, buf):
+ # _sock_recv_into() can add itself as an I/O callback if the operation
+ # can't be done immediately. Don't use it directly, call
+ # sock_recv_into().
+ if fut.done():
+ return
+ try:
+ nbytes = sock.recv_into(buf)
+ except (BlockingIOError, InterruptedError):
+ return # try again next time
+ except (SystemExit, KeyboardInterrupt):
+ raise
+ except BaseException as exc:
+ fut.set_exception(exc)
+ else:
+ fut.set_result(nbytes)
+
+ async def sock_recvfrom(self, sock, bufsize):
+ """Receive a datagram from a datagram socket.
+
+ The return value is a tuple of (bytes, address) representing the
+ datagram received and the address it came from.
+ The maximum amount of data to be received at once is specified by
+ nbytes.
+ """
+ base_events._check_ssl_socket(sock)
+ if self._debug and sock.gettimeout() != 0:
+ raise ValueError("the socket must be non-blocking")
+ try:
+ return sock.recvfrom(bufsize)
+ except (BlockingIOError, InterruptedError):
+ pass
+ fut = self.create_future()
+ fd = sock.fileno()
+ self._ensure_fd_no_transport(fd)
+ handle = self._add_reader(fd, self._sock_recvfrom, fut, sock, bufsize)
+ fut.add_done_callback(
+ functools.partial(self._sock_read_done, fd, handle=handle))
+ return await fut
+
+ def _sock_recvfrom(self, fut, sock, bufsize):
+ # _sock_recvfrom() can add itself as an I/O callback if the operation
+ # can't be done immediately. Don't use it directly, call
+ # sock_recvfrom().
+ if fut.done():
+ return
+ try:
+ result = sock.recvfrom(bufsize)
+ except (BlockingIOError, InterruptedError):
+ return # try again next time
+ except (SystemExit, KeyboardInterrupt):
+ raise
+ except BaseException as exc:
+ fut.set_exception(exc)
+ else:
+ fut.set_result(result)
+
+ async def sock_recvfrom_into(self, sock, buf, nbytes=0):
+ """Receive data from the socket.
+
+ The received data is written into *buf* (a writable buffer).
+ The return value is a tuple of (number of bytes written, address).
+ """
+ base_events._check_ssl_socket(sock)
+ if self._debug and sock.gettimeout() != 0:
+ raise ValueError("the socket must be non-blocking")
+ if not nbytes:
+ nbytes = len(buf)
+
+ try:
+ return sock.recvfrom_into(buf, nbytes)
+ except (BlockingIOError, InterruptedError):
+ pass
+ fut = self.create_future()
+ fd = sock.fileno()
+ self._ensure_fd_no_transport(fd)
+ handle = self._add_reader(fd, self._sock_recvfrom_into, fut, sock, buf,
+ nbytes)
+ fut.add_done_callback(
+ functools.partial(self._sock_read_done, fd, handle=handle))
+ return await fut
+
+ def _sock_recvfrom_into(self, fut, sock, buf, bufsize):
+ # _sock_recv_into() can add itself as an I/O callback if the operation
+ # can't be done immediately. Don't use it directly, call
+ # sock_recv_into().
+ if fut.done():
+ return
+ try:
+ result = sock.recvfrom_into(buf, bufsize)
+ except (BlockingIOError, InterruptedError):
+ return # try again next time
+ except (SystemExit, KeyboardInterrupt):
+ raise
+ except BaseException as exc:
+ fut.set_exception(exc)
+ else:
+ fut.set_result(result)
+
+ async def sock_sendall(self, sock, data):
+ """Send data to the socket.
+
+ The socket must be connected to a remote socket. This method continues
+ to send data from data until either all data has been sent or an
+ error occurs. None is returned on success. On error, an exception is
+ raised, and there is no way to determine how much data, if any, was
+ successfully processed by the receiving end of the connection.
+ """
+ base_events._check_ssl_socket(sock)
+ if self._debug and sock.gettimeout() != 0:
+ raise ValueError("the socket must be non-blocking")
+ try:
+ n = sock.send(data)
+ except (BlockingIOError, InterruptedError):
+ n = 0
+
+ if n == len(data):
+ # all data sent
+ return
+
+ fut = self.create_future()
+ fd = sock.fileno()
+ self._ensure_fd_no_transport(fd)
+ # use a trick with a list in closure to store a mutable state
+ handle = self._add_writer(fd, self._sock_sendall, fut, sock,
+ memoryview(data), [n])
+ fut.add_done_callback(
+ functools.partial(self._sock_write_done, fd, handle=handle))
+ return await fut
+
+ def _sock_sendall(self, fut, sock, view, pos):
+ if fut.done():
+ # Future cancellation can be scheduled on previous loop iteration
+ return
+ start = pos[0]
+ try:
+ n = sock.send(view[start:])
+ except (BlockingIOError, InterruptedError):
+ return
+ except (SystemExit, KeyboardInterrupt):
+ raise
+ except BaseException as exc:
+ fut.set_exception(exc)
+ return
+
+ start += n
+
+ if start == len(view):
+ fut.set_result(None)
+ else:
+ pos[0] = start
+
+ async def sock_sendto(self, sock, data, address):
+ """Send data to the socket.
+
+ The socket must be connected to a remote socket. This method continues
+ to send data from data until either all data has been sent or an
+ error occurs. None is returned on success. On error, an exception is
+ raised, and there is no way to determine how much data, if any, was
+ successfully processed by the receiving end of the connection.
+ """
+ base_events._check_ssl_socket(sock)
+ if self._debug and sock.gettimeout() != 0:
+ raise ValueError("the socket must be non-blocking")
+ try:
+ return sock.sendto(data, address)
+ except (BlockingIOError, InterruptedError):
+ pass
+
+ fut = self.create_future()
+ fd = sock.fileno()
+ self._ensure_fd_no_transport(fd)
+ # use a trick with a list in closure to store a mutable state
+ handle = self._add_writer(fd, self._sock_sendto, fut, sock, data,
+ address)
+ fut.add_done_callback(
+ functools.partial(self._sock_write_done, fd, handle=handle))
+ return await fut
+
+ def _sock_sendto(self, fut, sock, data, address):
+ if fut.done():
+ # Future cancellation can be scheduled on previous loop iteration
+ return
+ try:
+ n = sock.sendto(data, 0, address)
+ except (BlockingIOError, InterruptedError):
+ return
+ except (SystemExit, KeyboardInterrupt):
+ raise
+ except BaseException as exc:
+ fut.set_exception(exc)
+ else:
+ fut.set_result(n)
+
+ async def sock_connect(self, sock, address):
+ """Connect to a remote socket at address.
+
+ This method is a coroutine.
+ """
+ base_events._check_ssl_socket(sock)
+ if self._debug and sock.gettimeout() != 0:
+ raise ValueError("the socket must be non-blocking")
+
+ if sock.family == socket.AF_INET or (
+ base_events._HAS_IPv6 and sock.family == socket.AF_INET6):
+ resolved = await self._ensure_resolved(
+ address, family=sock.family, type=sock.type, proto=sock.proto,
+ loop=self,
+ )
+ _, _, _, _, address = resolved[0]
+
+ fut = self.create_future()
+ self._sock_connect(fut, sock, address)
+ try:
+ return await fut
+ finally:
+ # Needed to break cycles when an exception occurs.
+ fut = None
+
+ def _sock_connect(self, fut, sock, address):
+ fd = sock.fileno()
+ try:
+ sock.connect(address)
+ except (BlockingIOError, InterruptedError):
+ # Issue #23618: When the C function connect() fails with EINTR, the
+ # connection runs in background. We have to wait until the socket
+ # becomes writable to be notified when the connection succeed or
+ # fails.
+ self._ensure_fd_no_transport(fd)
+ handle = self._add_writer(
+ fd, self._sock_connect_cb, fut, sock, address)
+ fut.add_done_callback(
+ functools.partial(self._sock_write_done, fd, handle=handle))
+ except (SystemExit, KeyboardInterrupt):
+ raise
+ except BaseException as exc:
+ fut.set_exception(exc)
+ else:
+ fut.set_result(None)
+ finally:
+ fut = None
+
+ def _sock_write_done(self, fd, fut, handle=None):
+ if handle is None or not handle.cancelled():
+ self.remove_writer(fd)
+
+ def _sock_connect_cb(self, fut, sock, address):
+ if fut.done():
+ return
+
+ try:
+ err = sock.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
+ if err != 0:
+ # Jump to any except clause below.
+ raise OSError(err, f'Connect call failed {address}')
+ except (BlockingIOError, InterruptedError):
+ # socket is still registered, the callback will be retried later
+ pass
+ except (SystemExit, KeyboardInterrupt):
+ raise
+ except BaseException as exc:
+ fut.set_exception(exc)
+ else:
+ fut.set_result(None)
+ finally:
+ fut = None
+
+ async def sock_accept(self, sock):
+ """Accept a connection.
+
+ The socket must be bound to an address and listening for connections.
+ The return value is a pair (conn, address) where conn is a new socket
+ object usable to send and receive data on the connection, and address
+ is the address bound to the socket on the other end of the connection.
+ """
+ base_events._check_ssl_socket(sock)
+ if self._debug and sock.gettimeout() != 0:
+ raise ValueError("the socket must be non-blocking")
+ fut = self.create_future()
+ self._sock_accept(fut, sock)
+ return await fut
+
+ def _sock_accept(self, fut, sock):
+ fd = sock.fileno()
+ try:
+ conn, address = sock.accept()
+ conn.setblocking(False)
+ except (BlockingIOError, InterruptedError):
+ self._ensure_fd_no_transport(fd)
+ handle = self._add_reader(fd, self._sock_accept, fut, sock)
+ fut.add_done_callback(
+ functools.partial(self._sock_read_done, fd, handle=handle))
+ except (SystemExit, KeyboardInterrupt):
+ raise
+ except BaseException as exc:
+ fut.set_exception(exc)
+ else:
+ fut.set_result((conn, address))
+
+ async def _sendfile_native(self, transp, file, offset, count):
+ del self._transports[transp._sock_fd]
+ resume_reading = transp.is_reading()
+ transp.pause_reading()
+ await transp._make_empty_waiter()
+ try:
+ return await self.sock_sendfile(transp._sock, file, offset, count,
+ fallback=False)
+ finally:
+ transp._reset_empty_waiter()
+ if resume_reading:
+ transp.resume_reading()
+ self._transports[transp._sock_fd] = transp
+
+ def _process_events(self, event_list):
+ for key, mask in event_list:
+ fileobj, (reader, writer) = key.fileobj, key.data
+ if mask & selectors.EVENT_READ and reader is not None:
+ if reader._cancelled:
+ self._remove_reader(fileobj)
+ else:
+ self._add_callback(reader)
+ if mask & selectors.EVENT_WRITE and writer is not None:
+ if writer._cancelled:
+ self._remove_writer(fileobj)
+ else:
+ self._add_callback(writer)
+
+ def _stop_serving(self, sock):
+ self._remove_reader(sock.fileno())
+ sock.close()
+
+
+class _SelectorTransport(transports._FlowControlMixin,
+ transports.Transport):
+
+ max_size = 256 * 1024 # Buffer size passed to recv().
+
+ # Attribute used in the destructor: it must be set even if the constructor
+ # is not called (see _SelectorSslTransport which may start by raising an
+ # exception)
+ _sock = None
+
+ def __init__(self, loop, sock, protocol, extra=None, server=None):
+ super().__init__(extra, loop)
+ self._extra['socket'] = trsock.TransportSocket(sock)
+ try:
+ self._extra['sockname'] = sock.getsockname()
+ except OSError:
+ self._extra['sockname'] = None
+ if 'peername' not in self._extra:
+ try:
+ self._extra['peername'] = sock.getpeername()
+ except socket.error:
+ self._extra['peername'] = None
+ self._sock = sock
+ self._sock_fd = sock.fileno()
+
+ self._protocol_connected = False
+ self.set_protocol(protocol)
+
+ self._server = server
+ self._buffer = collections.deque()
+ self._conn_lost = 0 # Set when call to connection_lost scheduled.
+ self._closing = False # Set when close() called.
+ self._paused = False # Set when pause_reading() called
+
+ if self._server is not None:
+ self._server._attach()
+ loop._transports[self._sock_fd] = self
+
+ def __repr__(self):
+ info = [self.__class__.__name__]
+ if self._sock is None:
+ info.append('closed')
+ elif self._closing:
+ info.append('closing')
+ info.append(f'fd={self._sock_fd}')
+ # test if the transport was closed
+ if self._loop is not None and not self._loop.is_closed():
+ polling = _test_selector_event(self._loop._selector,
+ self._sock_fd, selectors.EVENT_READ)
+ if polling:
+ info.append('read=polling')
+ else:
+ info.append('read=idle')
+
+ polling = _test_selector_event(self._loop._selector,
+ self._sock_fd,
+ selectors.EVENT_WRITE)
+ if polling:
+ state = 'polling'
+ else:
+ state = 'idle'
+
+ bufsize = self.get_write_buffer_size()
+ info.append(f'write=<{state}, bufsize={bufsize}>')
+ return '<{}>'.format(' '.join(info))
+
+ def abort(self):
+ self._force_close(None)
+
+ def set_protocol(self, protocol):
+ self._protocol = protocol
+ self._protocol_connected = True
+
+ def get_protocol(self):
+ return self._protocol
+
+ def is_closing(self):
+ return self._closing
+
+ def is_reading(self):
+ return not self.is_closing() and not self._paused
+
+ def pause_reading(self):
+ if not self.is_reading():
+ return
+ self._paused = True
+ self._loop._remove_reader(self._sock_fd)
+ if self._loop.get_debug():
+ logger.debug("%r pauses reading", self)
+
+ def resume_reading(self):
+ if self._closing or not self._paused:
+ return
+ self._paused = False
+ self._add_reader(self._sock_fd, self._read_ready)
+ if self._loop.get_debug():
+ logger.debug("%r resumes reading", self)
+
+ def close(self):
+ if self._closing:
+ return
+ self._closing = True
+ self._loop._remove_reader(self._sock_fd)
+ if not self._buffer:
+ self._conn_lost += 1
+ self._loop._remove_writer(self._sock_fd)
+ self._loop.call_soon(self._call_connection_lost, None)
+
+ def __del__(self, _warn=warnings.warn):
+ if self._sock is not None:
+ _warn(f"unclosed transport {self!r}", ResourceWarning, source=self)
+ self._sock.close()
+
+ def _fatal_error(self, exc, message='Fatal error on transport'):
+ # Should be called from exception handler only.
+ if isinstance(exc, OSError):
+ if self._loop.get_debug():
+ logger.debug("%r: %s", self, message, exc_info=True)
+ else:
+ self._loop.call_exception_handler({
+ 'message': message,
+ 'exception': exc,
+ 'transport': self,
+ 'protocol': self._protocol,
+ })
+ self._force_close(exc)
+
+ def _force_close(self, exc):
+ if self._conn_lost:
+ return
+ if self._buffer:
+ self._buffer.clear()
+ self._loop._remove_writer(self._sock_fd)
+ if not self._closing:
+ self._closing = True
+ self._loop._remove_reader(self._sock_fd)
+ self._conn_lost += 1
+ self._loop.call_soon(self._call_connection_lost, exc)
+
+ def _call_connection_lost(self, exc):
+ try:
+ if self._protocol_connected:
+ self._protocol.connection_lost(exc)
+ finally:
+ self._sock.close()
+ self._sock = None
+ self._protocol = None
+ self._loop = None
+ server = self._server
+ if server is not None:
+ server._detach()
+ self._server = None
+
+ def get_write_buffer_size(self):
+ return sum(map(len, self._buffer))
+
+ def _add_reader(self, fd, callback, *args):
+ if not self.is_reading():
+ return
+ self._loop._add_reader(fd, callback, *args)
+
+
+class _SelectorSocketTransport(_SelectorTransport):
+
+ _start_tls_compatible = True
+ _sendfile_compatible = constants._SendfileMode.TRY_NATIVE
+
+ def __init__(self, loop, sock, protocol, waiter=None,
+ extra=None, server=None):
+
+ self._read_ready_cb = None
+ super().__init__(loop, sock, protocol, extra, server)
+ self._eof = False
+ self._empty_waiter = None
+ if _HAS_SENDMSG:
+ self._write_ready = self._write_sendmsg
+ else:
+ self._write_ready = self._write_send
+ # Disable the Nagle algorithm -- small writes will be
+ # sent without waiting for the TCP ACK. This generally
+ # decreases the latency (in some cases significantly.)
+ base_events._set_nodelay(self._sock)
+
+ self._loop.call_soon(self._protocol.connection_made, self)
+ # only start reading when connection_made() has been called
+ self._loop.call_soon(self._add_reader,
+ self._sock_fd, self._read_ready)
+ if waiter is not None:
+ # only wake up the waiter when connection_made() has been called
+ self._loop.call_soon(futures._set_result_unless_cancelled,
+ waiter, None)
+
+ def set_protocol(self, protocol):
+ if isinstance(protocol, protocols.BufferedProtocol):
+ self._read_ready_cb = self._read_ready__get_buffer
+ else:
+ self._read_ready_cb = self._read_ready__data_received
+
+ super().set_protocol(protocol)
+
+ def _read_ready(self):
+ self._read_ready_cb()
+
+ def _read_ready__get_buffer(self):
+ if self._conn_lost:
+ return
+
+ try:
+ buf = self._protocol.get_buffer(-1)
+ if not len(buf):
+ raise RuntimeError('get_buffer() returned an empty buffer')
+ except (SystemExit, KeyboardInterrupt):
+ raise
+ except BaseException as exc:
+ self._fatal_error(
+ exc, 'Fatal error: protocol.get_buffer() call failed.')
+ return
+
+ try:
+ nbytes = self._sock.recv_into(buf)
+ except (BlockingIOError, InterruptedError):
+ return
+ except (SystemExit, KeyboardInterrupt):
+ raise
+ except BaseException as exc:
+ self._fatal_error(exc, 'Fatal read error on socket transport')
+ return
+
+ if not nbytes:
+ self._read_ready__on_eof()
+ return
+
+ try:
+ self._protocol.buffer_updated(nbytes)
+ except (SystemExit, KeyboardInterrupt):
+ raise
+ except BaseException as exc:
+ self._fatal_error(
+ exc, 'Fatal error: protocol.buffer_updated() call failed.')
+
+ def _read_ready__data_received(self):
+ if self._conn_lost:
+ return
+ try:
+ data = self._sock.recv(self.max_size)
+ except (BlockingIOError, InterruptedError):
+ return
+ except (SystemExit, KeyboardInterrupt):
+ raise
+ except BaseException as exc:
+ self._fatal_error(exc, 'Fatal read error on socket transport')
+ return
+
+ if not data:
+ self._read_ready__on_eof()
+ return
+
+ try:
+ self._protocol.data_received(data)
+ except (SystemExit, KeyboardInterrupt):
+ raise
+ except BaseException as exc:
+ self._fatal_error(
+ exc, 'Fatal error: protocol.data_received() call failed.')
+
+ def _read_ready__on_eof(self):
+ if self._loop.get_debug():
+ logger.debug("%r received EOF", self)
+
+ try:
+ keep_open = self._protocol.eof_received()
+ except (SystemExit, KeyboardInterrupt):
+ raise
+ except BaseException as exc:
+ self._fatal_error(
+ exc, 'Fatal error: protocol.eof_received() call failed.')
+ return
+
+ if keep_open:
+ # We're keeping the connection open so the
+ # protocol can write more, but we still can't
+ # receive more, so remove the reader callback.
+ self._loop._remove_reader(self._sock_fd)
+ else:
+ self.close()
+
+ def write(self, data):
+ if not isinstance(data, (bytes, bytearray, memoryview)):
+ raise TypeError(f'data argument must be a bytes-like object, '
+ f'not {type(data).__name__!r}')
+ if self._eof:
+ raise RuntimeError('Cannot call write() after write_eof()')
+ if self._empty_waiter is not None:
+ raise RuntimeError('unable to write; sendfile is in progress')
+ if not data:
+ return
+
+ if self._conn_lost:
+ if self._conn_lost >= constants.LOG_THRESHOLD_FOR_CONNLOST_WRITES:
+ logger.warning('socket.send() raised exception.')
+ self._conn_lost += 1
+ return
+
+ if not self._buffer:
+ # Optimization: try to send now.
+ try:
+ n = self._sock.send(data)
+ except (BlockingIOError, InterruptedError):
+ pass
+ except (SystemExit, KeyboardInterrupt):
+ raise
+ except BaseException as exc:
+ self._fatal_error(exc, 'Fatal write error on socket transport')
+ return
+ else:
+ data = memoryview(data)[n:]
+ if not data:
+ return
+ # Not all was written; register write handler.
+ self._loop._add_writer(self._sock_fd, self._write_ready)
+
+ # Add it to the buffer.
+ self._buffer.append(data)
+ self._maybe_pause_protocol()
+
+ def _get_sendmsg_buffer(self):
+ return itertools.islice(self._buffer, SC_IOV_MAX)
+
+ def _write_sendmsg(self):
+ assert self._buffer, 'Data should not be empty'
+ if self._conn_lost:
+ return
+ try:
+ nbytes = self._sock.sendmsg(self._get_sendmsg_buffer())
+ self._adjust_leftover_buffer(nbytes)
+ except (BlockingIOError, InterruptedError):
+ pass
+ except (SystemExit, KeyboardInterrupt):
+ raise
+ except BaseException as exc:
+ self._loop._remove_writer(self._sock_fd)
+ self._buffer.clear()
+ self._fatal_error(exc, 'Fatal write error on socket transport')
+ if self._empty_waiter is not None:
+ self._empty_waiter.set_exception(exc)
+ else:
+ self._maybe_resume_protocol() # May append to buffer.
+ if not self._buffer:
+ self._loop._remove_writer(self._sock_fd)
+ if self._empty_waiter is not None:
+ self._empty_waiter.set_result(None)
+ if self._closing:
+ self._call_connection_lost(None)
+ elif self._eof:
+ self._sock.shutdown(socket.SHUT_WR)
+
+ def _adjust_leftover_buffer(self, nbytes: int) -> None:
+ buffer = self._buffer
+ while nbytes:
+ b = buffer.popleft()
+ b_len = len(b)
+ if b_len <= nbytes:
+ nbytes -= b_len
+ else:
+ buffer.appendleft(b[nbytes:])
+ break
+
+ def _write_send(self):
+ assert self._buffer, 'Data should not be empty'
+ if self._conn_lost:
+ return
+ try:
+ buffer = self._buffer.popleft()
+ n = self._sock.send(buffer)
+ if n != len(buffer):
+ # Not all data was written
+ self._buffer.appendleft(buffer[n:])
+ except (BlockingIOError, InterruptedError):
+ pass
+ except (SystemExit, KeyboardInterrupt):
+ raise
+ except BaseException as exc:
+ self._loop._remove_writer(self._sock_fd)
+ self._buffer.clear()
+ self._fatal_error(exc, 'Fatal write error on socket transport')
+ if self._empty_waiter is not None:
+ self._empty_waiter.set_exception(exc)
+ else:
+ self._maybe_resume_protocol() # May append to buffer.
+ if not self._buffer:
+ self._loop._remove_writer(self._sock_fd)
+ if self._empty_waiter is not None:
+ self._empty_waiter.set_result(None)
+ if self._closing:
+ self._call_connection_lost(None)
+ elif self._eof:
+ self._sock.shutdown(socket.SHUT_WR)
+
+ def write_eof(self):
+ if self._closing or self._eof:
+ return
+ self._eof = True
+ if not self._buffer:
+ self._sock.shutdown(socket.SHUT_WR)
+
+ def writelines(self, list_of_data):
+ if self._eof:
+ raise RuntimeError('Cannot call writelines() after write_eof()')
+ if self._empty_waiter is not None:
+ raise RuntimeError('unable to writelines; sendfile is in progress')
+ if not list_of_data:
+ return
+ self._buffer.extend([memoryview(data) for data in list_of_data])
+ self._write_ready()
+ # If the entire buffer couldn't be written, register a write handler
+ if self._buffer:
+ self._loop._add_writer(self._sock_fd, self._write_ready)
+
+ def can_write_eof(self):
+ return True
+
+ def _call_connection_lost(self, exc):
+ super()._call_connection_lost(exc)
+ if self._empty_waiter is not None:
+ self._empty_waiter.set_exception(
+ ConnectionError("Connection is closed by peer"))
+
+ def _make_empty_waiter(self):
+ if self._empty_waiter is not None:
+ raise RuntimeError("Empty waiter is already set")
+ self._empty_waiter = self._loop.create_future()
+ if not self._buffer:
+ self._empty_waiter.set_result(None)
+ return self._empty_waiter
+
+ def _reset_empty_waiter(self):
+ self._empty_waiter = None
+
+ def close(self):
+ self._read_ready_cb = None
+ self._write_ready = None
+ super().close()
+
+
+class _SelectorDatagramTransport(_SelectorTransport, transports.DatagramTransport):
+
+ _buffer_factory = collections.deque
+
+ def __init__(self, loop, sock, protocol, address=None,
+ waiter=None, extra=None):
+ super().__init__(loop, sock, protocol, extra)
+ self._address = address
+ self._buffer_size = 0
+ self._loop.call_soon(self._protocol.connection_made, self)
+ # only start reading when connection_made() has been called
+ self._loop.call_soon(self._add_reader,
+ self._sock_fd, self._read_ready)
+ if waiter is not None:
+ # only wake up the waiter when connection_made() has been called
+ self._loop.call_soon(futures._set_result_unless_cancelled,
+ waiter, None)
+
+ def get_write_buffer_size(self):
+ return self._buffer_size
+
+ def _read_ready(self):
+ if self._conn_lost:
+ return
+ try:
+ data, addr = self._sock.recvfrom(self.max_size)
+ except (BlockingIOError, InterruptedError):
+ pass
+ except OSError as exc:
+ self._protocol.error_received(exc)
+ except (SystemExit, KeyboardInterrupt):
+ raise
+ except BaseException as exc:
+ self._fatal_error(exc, 'Fatal read error on datagram transport')
+ else:
+ self._protocol.datagram_received(data, addr)
+
+ def sendto(self, data, addr=None):
+ if not isinstance(data, (bytes, bytearray, memoryview)):
+ raise TypeError(f'data argument must be a bytes-like object, '
+ f'not {type(data).__name__!r}')
+ if not data:
+ return
+
+ if self._address:
+ if addr not in (None, self._address):
+ raise ValueError(
+ f'Invalid address: must be None or {self._address}')
+ addr = self._address
+
+ if self._conn_lost and self._address:
+ if self._conn_lost >= constants.LOG_THRESHOLD_FOR_CONNLOST_WRITES:
+ logger.warning('socket.send() raised exception.')
+ self._conn_lost += 1
+ return
+
+ if not self._buffer:
+ # Attempt to send it right away first.
+ try:
+ if self._extra['peername']:
+ self._sock.send(data)
+ else:
+ self._sock.sendto(data, addr)
+ return
+ except (BlockingIOError, InterruptedError):
+ self._loop._add_writer(self._sock_fd, self._sendto_ready)
+ except OSError as exc:
+ self._protocol.error_received(exc)
+ return
+ except (SystemExit, KeyboardInterrupt):
+ raise
+ except BaseException as exc:
+ self._fatal_error(
+ exc, 'Fatal write error on datagram transport')
+ return
+
+ # Ensure that what we buffer is immutable.
+ self._buffer.append((bytes(data), addr))
+ self._buffer_size += len(data)
+ self._maybe_pause_protocol()
+
+ def _sendto_ready(self):
+ while self._buffer:
+ data, addr = self._buffer.popleft()
+ self._buffer_size -= len(data)
+ try:
+ if self._extra['peername']:
+ self._sock.send(data)
+ else:
+ self._sock.sendto(data, addr)
+ except (BlockingIOError, InterruptedError):
+ self._buffer.appendleft((data, addr)) # Try again later.
+ self._buffer_size += len(data)
+ break
+ except OSError as exc:
+ self._protocol.error_received(exc)
+ return
+ except (SystemExit, KeyboardInterrupt):
+ raise
+ except BaseException as exc:
+ self._fatal_error(
+ exc, 'Fatal write error on datagram transport')
+ return
+
+ self._maybe_resume_protocol() # May append to buffer.
+ if not self._buffer:
+ self._loop._remove_writer(self._sock_fd)
+ if self._closing:
+ self._call_connection_lost(None)
diff --git a/contrib/tools/python3/Lib/asyncio/sslproto.py b/contrib/tools/python3/Lib/asyncio/sslproto.py
new file mode 100644
index 0000000000..e51669a2ab
--- /dev/null
+++ b/contrib/tools/python3/Lib/asyncio/sslproto.py
@@ -0,0 +1,926 @@
+# Contains code from https://github.com/MagicStack/uvloop/tree/v0.16.0
+# SPDX-License-Identifier: PSF-2.0 AND (MIT OR Apache-2.0)
+# SPDX-FileCopyrightText: Copyright (c) 2015-2021 MagicStack Inc. http://magic.io
+
+import collections
+import enum
+import warnings
+try:
+ import ssl
+except ImportError: # pragma: no cover
+ ssl = None
+
+from . import constants
+from . import exceptions
+from . import protocols
+from . import transports
+from .log import logger
+
+if ssl is not None:
+ SSLAgainErrors = (ssl.SSLWantReadError, ssl.SSLSyscallError)
+
+
+class SSLProtocolState(enum.Enum):
+ UNWRAPPED = "UNWRAPPED"
+ DO_HANDSHAKE = "DO_HANDSHAKE"
+ WRAPPED = "WRAPPED"
+ FLUSHING = "FLUSHING"
+ SHUTDOWN = "SHUTDOWN"
+
+
+class AppProtocolState(enum.Enum):
+ # This tracks the state of app protocol (https://git.io/fj59P):
+ #
+ # INIT -cm-> CON_MADE [-dr*->] [-er-> EOF?] -cl-> CON_LOST
+ #
+ # * cm: connection_made()
+ # * dr: data_received()
+ # * er: eof_received()
+ # * cl: connection_lost()
+
+ STATE_INIT = "STATE_INIT"
+ STATE_CON_MADE = "STATE_CON_MADE"
+ STATE_EOF = "STATE_EOF"
+ STATE_CON_LOST = "STATE_CON_LOST"
+
+
+def _create_transport_context(server_side, server_hostname):
+ if server_side:
+ raise ValueError('Server side SSL needs a valid SSLContext')
+
+ # Client side may pass ssl=True to use a default
+ # context; in that case the sslcontext passed is None.
+ # The default is secure for client connections.
+ # Python 3.4+: use up-to-date strong settings.
+ sslcontext = ssl.create_default_context()
+ if not server_hostname:
+ sslcontext.check_hostname = False
+ return sslcontext
+
+
+def add_flowcontrol_defaults(high, low, kb):
+ if high is None:
+ if low is None:
+ hi = kb * 1024
+ else:
+ lo = low
+ hi = 4 * lo
+ else:
+ hi = high
+ if low is None:
+ lo = hi // 4
+ else:
+ lo = low
+
+ if not hi >= lo >= 0:
+ raise ValueError('high (%r) must be >= low (%r) must be >= 0' %
+ (hi, lo))
+
+ return hi, lo
+
+
+class _SSLProtocolTransport(transports._FlowControlMixin,
+ transports.Transport):
+
+ _start_tls_compatible = True
+ _sendfile_compatible = constants._SendfileMode.FALLBACK
+
+ def __init__(self, loop, ssl_protocol):
+ self._loop = loop
+ self._ssl_protocol = ssl_protocol
+ self._closed = False
+
+ def get_extra_info(self, name, default=None):
+ """Get optional transport information."""
+ return self._ssl_protocol._get_extra_info(name, default)
+
+ def set_protocol(self, protocol):
+ self._ssl_protocol._set_app_protocol(protocol)
+
+ def get_protocol(self):
+ return self._ssl_protocol._app_protocol
+
+ def is_closing(self):
+ return self._closed
+
+ def close(self):
+ """Close the transport.
+
+ Buffered data will be flushed asynchronously. No more data
+ will be received. After all buffered data is flushed, the
+ protocol's connection_lost() method will (eventually) called
+ with None as its argument.
+ """
+ if not self._closed:
+ self._closed = True
+ self._ssl_protocol._start_shutdown()
+ else:
+ self._ssl_protocol = None
+
+ def __del__(self, _warnings=warnings):
+ if not self._closed:
+ self._closed = True
+ _warnings.warn(
+ "unclosed transport <asyncio._SSLProtocolTransport "
+ "object>", ResourceWarning)
+
+ def is_reading(self):
+ return not self._ssl_protocol._app_reading_paused
+
+ def pause_reading(self):
+ """Pause the receiving end.
+
+ No data will be passed to the protocol's data_received()
+ method until resume_reading() is called.
+ """
+ self._ssl_protocol._pause_reading()
+
+ def resume_reading(self):
+ """Resume the receiving end.
+
+ Data received will once again be passed to the protocol's
+ data_received() method.
+ """
+ self._ssl_protocol._resume_reading()
+
+ def set_write_buffer_limits(self, high=None, low=None):
+ """Set the high- and low-water limits for write flow control.
+
+ These two values control when to call the protocol's
+ pause_writing() and resume_writing() methods. If specified,
+ the low-water limit must be less than or equal to the
+ high-water limit. Neither value can be negative.
+
+ The defaults are implementation-specific. If only the
+ high-water limit is given, the low-water limit defaults to an
+ implementation-specific value less than or equal to the
+ high-water limit. Setting high to zero forces low to zero as
+ well, and causes pause_writing() to be called whenever the
+ buffer becomes non-empty. Setting low to zero causes
+ resume_writing() to be called only once the buffer is empty.
+ Use of zero for either limit is generally sub-optimal as it
+ reduces opportunities for doing I/O and computation
+ concurrently.
+ """
+ self._ssl_protocol._set_write_buffer_limits(high, low)
+ self._ssl_protocol._control_app_writing()
+
+ def get_write_buffer_limits(self):
+ return (self._ssl_protocol._outgoing_low_water,
+ self._ssl_protocol._outgoing_high_water)
+
+ def get_write_buffer_size(self):
+ """Return the current size of the write buffers."""
+ return self._ssl_protocol._get_write_buffer_size()
+
+ def set_read_buffer_limits(self, high=None, low=None):
+ """Set the high- and low-water limits for read flow control.
+
+ These two values control when to call the upstream transport's
+ pause_reading() and resume_reading() methods. If specified,
+ the low-water limit must be less than or equal to the
+ high-water limit. Neither value can be negative.
+
+ The defaults are implementation-specific. If only the
+ high-water limit is given, the low-water limit defaults to an
+ implementation-specific value less than or equal to the
+ high-water limit. Setting high to zero forces low to zero as
+ well, and causes pause_reading() to be called whenever the
+ buffer becomes non-empty. Setting low to zero causes
+ resume_reading() to be called only once the buffer is empty.
+ Use of zero for either limit is generally sub-optimal as it
+ reduces opportunities for doing I/O and computation
+ concurrently.
+ """
+ self._ssl_protocol._set_read_buffer_limits(high, low)
+ self._ssl_protocol._control_ssl_reading()
+
+ def get_read_buffer_limits(self):
+ return (self._ssl_protocol._incoming_low_water,
+ self._ssl_protocol._incoming_high_water)
+
+ def get_read_buffer_size(self):
+ """Return the current size of the read buffer."""
+ return self._ssl_protocol._get_read_buffer_size()
+
+ @property
+ def _protocol_paused(self):
+ # Required for sendfile fallback pause_writing/resume_writing logic
+ return self._ssl_protocol._app_writing_paused
+
+ def write(self, data):
+ """Write some data bytes to the transport.
+
+ This does not block; it buffers the data and arranges for it
+ to be sent out asynchronously.
+ """
+ if not isinstance(data, (bytes, bytearray, memoryview)):
+ raise TypeError(f"data: expecting a bytes-like instance, "
+ f"got {type(data).__name__}")
+ if not data:
+ return
+ self._ssl_protocol._write_appdata((data,))
+
+ def writelines(self, list_of_data):
+ """Write a list (or any iterable) of data bytes to the transport.
+
+ The default implementation concatenates the arguments and
+ calls write() on the result.
+ """
+ self._ssl_protocol._write_appdata(list_of_data)
+
+ def write_eof(self):
+ """Close the write end after flushing buffered data.
+
+ This raises :exc:`NotImplementedError` right now.
+ """
+ raise NotImplementedError
+
+ def can_write_eof(self):
+ """Return True if this transport supports write_eof(), False if not."""
+ return False
+
+ def abort(self):
+ """Close the transport immediately.
+
+ Buffered data will be lost. No more data will be received.
+ The protocol's connection_lost() method will (eventually) be
+ called with None as its argument.
+ """
+ self._force_close(None)
+
+ def _force_close(self, exc):
+ self._closed = True
+ if self._ssl_protocol is not None:
+ self._ssl_protocol._abort(exc)
+
+ def _test__append_write_backlog(self, data):
+ # for test only
+ self._ssl_protocol._write_backlog.append(data)
+ self._ssl_protocol._write_buffer_size += len(data)
+
+
+class SSLProtocol(protocols.BufferedProtocol):
+ max_size = 256 * 1024 # Buffer size passed to read()
+
+ _handshake_start_time = None
+ _handshake_timeout_handle = None
+ _shutdown_timeout_handle = None
+
+ def __init__(self, loop, app_protocol, sslcontext, waiter,
+ server_side=False, server_hostname=None,
+ call_connection_made=True,
+ ssl_handshake_timeout=None,
+ ssl_shutdown_timeout=None):
+ if ssl is None:
+ raise RuntimeError("stdlib ssl module not available")
+
+ self._ssl_buffer = bytearray(self.max_size)
+ self._ssl_buffer_view = memoryview(self._ssl_buffer)
+
+ if ssl_handshake_timeout is None:
+ ssl_handshake_timeout = constants.SSL_HANDSHAKE_TIMEOUT
+ elif ssl_handshake_timeout <= 0:
+ raise ValueError(
+ f"ssl_handshake_timeout should be a positive number, "
+ f"got {ssl_handshake_timeout}")
+ if ssl_shutdown_timeout is None:
+ ssl_shutdown_timeout = constants.SSL_SHUTDOWN_TIMEOUT
+ elif ssl_shutdown_timeout <= 0:
+ raise ValueError(
+ f"ssl_shutdown_timeout should be a positive number, "
+ f"got {ssl_shutdown_timeout}")
+
+ if not sslcontext:
+ sslcontext = _create_transport_context(
+ server_side, server_hostname)
+
+ self._server_side = server_side
+ if server_hostname and not server_side:
+ self._server_hostname = server_hostname
+ else:
+ self._server_hostname = None
+ self._sslcontext = sslcontext
+ # SSL-specific extra info. More info are set when the handshake
+ # completes.
+ self._extra = dict(sslcontext=sslcontext)
+
+ # App data write buffering
+ self._write_backlog = collections.deque()
+ self._write_buffer_size = 0
+
+ self._waiter = waiter
+ self._loop = loop
+ self._set_app_protocol(app_protocol)
+ self._app_transport = None
+ self._app_transport_created = False
+ # transport, ex: SelectorSocketTransport
+ self._transport = None
+ self._ssl_handshake_timeout = ssl_handshake_timeout
+ self._ssl_shutdown_timeout = ssl_shutdown_timeout
+ # SSL and state machine
+ self._incoming = ssl.MemoryBIO()
+ self._outgoing = ssl.MemoryBIO()
+ self._state = SSLProtocolState.UNWRAPPED
+ self._conn_lost = 0 # Set when connection_lost called
+ if call_connection_made:
+ self._app_state = AppProtocolState.STATE_INIT
+ else:
+ self._app_state = AppProtocolState.STATE_CON_MADE
+ self._sslobj = self._sslcontext.wrap_bio(
+ self._incoming, self._outgoing,
+ server_side=self._server_side,
+ server_hostname=self._server_hostname)
+
+ # Flow Control
+
+ self._ssl_writing_paused = False
+
+ self._app_reading_paused = False
+
+ self._ssl_reading_paused = False
+ self._incoming_high_water = 0
+ self._incoming_low_water = 0
+ self._set_read_buffer_limits()
+ self._eof_received = False
+
+ self._app_writing_paused = False
+ self._outgoing_high_water = 0
+ self._outgoing_low_water = 0
+ self._set_write_buffer_limits()
+ self._get_app_transport()
+
+ def _set_app_protocol(self, app_protocol):
+ self._app_protocol = app_protocol
+ # Make fast hasattr check first
+ if (hasattr(app_protocol, 'get_buffer') and
+ isinstance(app_protocol, protocols.BufferedProtocol)):
+ self._app_protocol_get_buffer = app_protocol.get_buffer
+ self._app_protocol_buffer_updated = app_protocol.buffer_updated
+ self._app_protocol_is_buffer = True
+ else:
+ self._app_protocol_is_buffer = False
+
+ def _wakeup_waiter(self, exc=None):
+ if self._waiter is None:
+ return
+ if not self._waiter.cancelled():
+ if exc is not None:
+ self._waiter.set_exception(exc)
+ else:
+ self._waiter.set_result(None)
+ self._waiter = None
+
+ def _get_app_transport(self):
+ if self._app_transport is None:
+ if self._app_transport_created:
+ raise RuntimeError('Creating _SSLProtocolTransport twice')
+ self._app_transport = _SSLProtocolTransport(self._loop, self)
+ self._app_transport_created = True
+ return self._app_transport
+
+ def connection_made(self, transport):
+ """Called when the low-level connection is made.
+
+ Start the SSL handshake.
+ """
+ self._transport = transport
+ self._start_handshake()
+
+ def connection_lost(self, exc):
+ """Called when the low-level connection is lost or closed.
+
+ The argument is an exception object or None (the latter
+ meaning a regular EOF is received or the connection was
+ aborted or closed).
+ """
+ self._write_backlog.clear()
+ self._outgoing.read()
+ self._conn_lost += 1
+
+ # Just mark the app transport as closed so that its __dealloc__
+ # doesn't complain.
+ if self._app_transport is not None:
+ self._app_transport._closed = True
+
+ if self._state != SSLProtocolState.DO_HANDSHAKE:
+ if (
+ self._app_state == AppProtocolState.STATE_CON_MADE or
+ self._app_state == AppProtocolState.STATE_EOF
+ ):
+ self._app_state = AppProtocolState.STATE_CON_LOST
+ self._loop.call_soon(self._app_protocol.connection_lost, exc)
+ self._set_state(SSLProtocolState.UNWRAPPED)
+ self._transport = None
+ self._app_transport = None
+ self._app_protocol = None
+ self._wakeup_waiter(exc)
+
+ if self._shutdown_timeout_handle:
+ self._shutdown_timeout_handle.cancel()
+ self._shutdown_timeout_handle = None
+ if self._handshake_timeout_handle:
+ self._handshake_timeout_handle.cancel()
+ self._handshake_timeout_handle = None
+
+ def get_buffer(self, n):
+ want = n
+ if want <= 0 or want > self.max_size:
+ want = self.max_size
+ if len(self._ssl_buffer) < want:
+ self._ssl_buffer = bytearray(want)
+ self._ssl_buffer_view = memoryview(self._ssl_buffer)
+ return self._ssl_buffer_view
+
+ def buffer_updated(self, nbytes):
+ self._incoming.write(self._ssl_buffer_view[:nbytes])
+
+ if self._state == SSLProtocolState.DO_HANDSHAKE:
+ self._do_handshake()
+
+ elif self._state == SSLProtocolState.WRAPPED:
+ self._do_read()
+
+ elif self._state == SSLProtocolState.FLUSHING:
+ self._do_flush()
+
+ elif self._state == SSLProtocolState.SHUTDOWN:
+ self._do_shutdown()
+
+ def eof_received(self):
+ """Called when the other end of the low-level stream
+ is half-closed.
+
+ If this returns a false value (including None), the transport
+ will close itself. If it returns a true value, closing the
+ transport is up to the protocol.
+ """
+ self._eof_received = True
+ try:
+ if self._loop.get_debug():
+ logger.debug("%r received EOF", self)
+
+ if self._state == SSLProtocolState.DO_HANDSHAKE:
+ self._on_handshake_complete(ConnectionResetError)
+
+ elif self._state == SSLProtocolState.WRAPPED:
+ self._set_state(SSLProtocolState.FLUSHING)
+ if self._app_reading_paused:
+ return True
+ else:
+ self._do_flush()
+
+ elif self._state == SSLProtocolState.FLUSHING:
+ self._do_write()
+ self._set_state(SSLProtocolState.SHUTDOWN)
+ self._do_shutdown()
+
+ elif self._state == SSLProtocolState.SHUTDOWN:
+ self._do_shutdown()
+
+ except Exception:
+ self._transport.close()
+ raise
+
+ def _get_extra_info(self, name, default=None):
+ if name in self._extra:
+ return self._extra[name]
+ elif self._transport is not None:
+ return self._transport.get_extra_info(name, default)
+ else:
+ return default
+
+ def _set_state(self, new_state):
+ allowed = False
+
+ if new_state == SSLProtocolState.UNWRAPPED:
+ allowed = True
+
+ elif (
+ self._state == SSLProtocolState.UNWRAPPED and
+ new_state == SSLProtocolState.DO_HANDSHAKE
+ ):
+ allowed = True
+
+ elif (
+ self._state == SSLProtocolState.DO_HANDSHAKE and
+ new_state == SSLProtocolState.WRAPPED
+ ):
+ allowed = True
+
+ elif (
+ self._state == SSLProtocolState.WRAPPED and
+ new_state == SSLProtocolState.FLUSHING
+ ):
+ allowed = True
+
+ elif (
+ self._state == SSLProtocolState.FLUSHING and
+ new_state == SSLProtocolState.SHUTDOWN
+ ):
+ allowed = True
+
+ if allowed:
+ self._state = new_state
+
+ else:
+ raise RuntimeError(
+ 'cannot switch state from {} to {}'.format(
+ self._state, new_state))
+
+ # Handshake flow
+
+ def _start_handshake(self):
+ if self._loop.get_debug():
+ logger.debug("%r starts SSL handshake", self)
+ self._handshake_start_time = self._loop.time()
+ else:
+ self._handshake_start_time = None
+
+ self._set_state(SSLProtocolState.DO_HANDSHAKE)
+
+ # start handshake timeout count down
+ self._handshake_timeout_handle = \
+ self._loop.call_later(self._ssl_handshake_timeout,
+ lambda: self._check_handshake_timeout())
+
+ self._do_handshake()
+
+ def _check_handshake_timeout(self):
+ if self._state == SSLProtocolState.DO_HANDSHAKE:
+ msg = (
+ f"SSL handshake is taking longer than "
+ f"{self._ssl_handshake_timeout} seconds: "
+ f"aborting the connection"
+ )
+ self._fatal_error(ConnectionAbortedError(msg))
+
+ def _do_handshake(self):
+ try:
+ self._sslobj.do_handshake()
+ except SSLAgainErrors:
+ self._process_outgoing()
+ except ssl.SSLError as exc:
+ self._on_handshake_complete(exc)
+ else:
+ self._on_handshake_complete(None)
+
+ def _on_handshake_complete(self, handshake_exc):
+ if self._handshake_timeout_handle is not None:
+ self._handshake_timeout_handle.cancel()
+ self._handshake_timeout_handle = None
+
+ sslobj = self._sslobj
+ try:
+ if handshake_exc is None:
+ self._set_state(SSLProtocolState.WRAPPED)
+ else:
+ raise handshake_exc
+
+ peercert = sslobj.getpeercert()
+ except Exception as exc:
+ handshake_exc = None
+ self._set_state(SSLProtocolState.UNWRAPPED)
+ if isinstance(exc, ssl.CertificateError):
+ msg = 'SSL handshake failed on verifying the certificate'
+ else:
+ msg = 'SSL handshake failed'
+ self._fatal_error(exc, msg)
+ self._wakeup_waiter(exc)
+ return
+
+ if self._loop.get_debug():
+ dt = self._loop.time() - self._handshake_start_time
+ logger.debug("%r: SSL handshake took %.1f ms", self, dt * 1e3)
+
+ # Add extra info that becomes available after handshake.
+ self._extra.update(peercert=peercert,
+ cipher=sslobj.cipher(),
+ compression=sslobj.compression(),
+ ssl_object=sslobj)
+ if self._app_state == AppProtocolState.STATE_INIT:
+ self._app_state = AppProtocolState.STATE_CON_MADE
+ self._app_protocol.connection_made(self._get_app_transport())
+ self._wakeup_waiter()
+ self._do_read()
+
+ # Shutdown flow
+
+ def _start_shutdown(self):
+ if (
+ self._state in (
+ SSLProtocolState.FLUSHING,
+ SSLProtocolState.SHUTDOWN,
+ SSLProtocolState.UNWRAPPED
+ )
+ ):
+ return
+ if self._app_transport is not None:
+ self._app_transport._closed = True
+ if self._state == SSLProtocolState.DO_HANDSHAKE:
+ self._abort(None)
+ else:
+ self._set_state(SSLProtocolState.FLUSHING)
+ self._shutdown_timeout_handle = self._loop.call_later(
+ self._ssl_shutdown_timeout,
+ lambda: self._check_shutdown_timeout()
+ )
+ self._do_flush()
+
+ def _check_shutdown_timeout(self):
+ if (
+ self._state in (
+ SSLProtocolState.FLUSHING,
+ SSLProtocolState.SHUTDOWN
+ )
+ ):
+ self._transport._force_close(
+ exceptions.TimeoutError('SSL shutdown timed out'))
+
+ def _do_flush(self):
+ self._do_read()
+ self._set_state(SSLProtocolState.SHUTDOWN)
+ self._do_shutdown()
+
+ def _do_shutdown(self):
+ try:
+ if not self._eof_received:
+ self._sslobj.unwrap()
+ except SSLAgainErrors:
+ self._process_outgoing()
+ except ssl.SSLError as exc:
+ self._on_shutdown_complete(exc)
+ else:
+ self._process_outgoing()
+ self._call_eof_received()
+ self._on_shutdown_complete(None)
+
+ def _on_shutdown_complete(self, shutdown_exc):
+ if self._shutdown_timeout_handle is not None:
+ self._shutdown_timeout_handle.cancel()
+ self._shutdown_timeout_handle = None
+
+ if shutdown_exc:
+ self._fatal_error(shutdown_exc)
+ else:
+ self._loop.call_soon(self._transport.close)
+
+ def _abort(self, exc):
+ self._set_state(SSLProtocolState.UNWRAPPED)
+ if self._transport is not None:
+ self._transport._force_close(exc)
+
+ # Outgoing flow
+
+ def _write_appdata(self, list_of_data):
+ if (
+ self._state in (
+ SSLProtocolState.FLUSHING,
+ SSLProtocolState.SHUTDOWN,
+ SSLProtocolState.UNWRAPPED
+ )
+ ):
+ if self._conn_lost >= constants.LOG_THRESHOLD_FOR_CONNLOST_WRITES:
+ logger.warning('SSL connection is closed')
+ self._conn_lost += 1
+ return
+
+ for data in list_of_data:
+ self._write_backlog.append(data)
+ self._write_buffer_size += len(data)
+
+ try:
+ if self._state == SSLProtocolState.WRAPPED:
+ self._do_write()
+
+ except Exception as ex:
+ self._fatal_error(ex, 'Fatal error on SSL protocol')
+
+ def _do_write(self):
+ try:
+ while self._write_backlog:
+ data = self._write_backlog[0]
+ count = self._sslobj.write(data)
+ data_len = len(data)
+ if count < data_len:
+ self._write_backlog[0] = data[count:]
+ self._write_buffer_size -= count
+ else:
+ del self._write_backlog[0]
+ self._write_buffer_size -= data_len
+ except SSLAgainErrors:
+ pass
+ self._process_outgoing()
+
+ def _process_outgoing(self):
+ if not self._ssl_writing_paused:
+ data = self._outgoing.read()
+ if len(data):
+ self._transport.write(data)
+ self._control_app_writing()
+
+ # Incoming flow
+
+ def _do_read(self):
+ if (
+ self._state not in (
+ SSLProtocolState.WRAPPED,
+ SSLProtocolState.FLUSHING,
+ )
+ ):
+ return
+ try:
+ if not self._app_reading_paused:
+ if self._app_protocol_is_buffer:
+ self._do_read__buffered()
+ else:
+ self._do_read__copied()
+ if self._write_backlog:
+ self._do_write()
+ else:
+ self._process_outgoing()
+ self._control_ssl_reading()
+ except Exception as ex:
+ self._fatal_error(ex, 'Fatal error on SSL protocol')
+
+ def _do_read__buffered(self):
+ offset = 0
+ count = 1
+
+ buf = self._app_protocol_get_buffer(self._get_read_buffer_size())
+ wants = len(buf)
+
+ try:
+ count = self._sslobj.read(wants, buf)
+
+ if count > 0:
+ offset = count
+ while offset < wants:
+ count = self._sslobj.read(wants - offset, buf[offset:])
+ if count > 0:
+ offset += count
+ else:
+ break
+ else:
+ self._loop.call_soon(lambda: self._do_read())
+ except SSLAgainErrors:
+ pass
+ if offset > 0:
+ self._app_protocol_buffer_updated(offset)
+ if not count:
+ # close_notify
+ self._call_eof_received()
+ self._start_shutdown()
+
+ def _do_read__copied(self):
+ chunk = b'1'
+ zero = True
+ one = False
+
+ try:
+ while True:
+ chunk = self._sslobj.read(self.max_size)
+ if not chunk:
+ break
+ if zero:
+ zero = False
+ one = True
+ first = chunk
+ elif one:
+ one = False
+ data = [first, chunk]
+ else:
+ data.append(chunk)
+ except SSLAgainErrors:
+ pass
+ if one:
+ self._app_protocol.data_received(first)
+ elif not zero:
+ self._app_protocol.data_received(b''.join(data))
+ if not chunk:
+ # close_notify
+ self._call_eof_received()
+ self._start_shutdown()
+
+ def _call_eof_received(self):
+ try:
+ if self._app_state == AppProtocolState.STATE_CON_MADE:
+ self._app_state = AppProtocolState.STATE_EOF
+ keep_open = self._app_protocol.eof_received()
+ if keep_open:
+ logger.warning('returning true from eof_received() '
+ 'has no effect when using ssl')
+ except (KeyboardInterrupt, SystemExit):
+ raise
+ except BaseException as ex:
+ self._fatal_error(ex, 'Error calling eof_received()')
+
+ # Flow control for writes from APP socket
+
+ def _control_app_writing(self):
+ size = self._get_write_buffer_size()
+ if size >= self._outgoing_high_water and not self._app_writing_paused:
+ self._app_writing_paused = True
+ try:
+ self._app_protocol.pause_writing()
+ except (KeyboardInterrupt, SystemExit):
+ raise
+ except BaseException as exc:
+ self._loop.call_exception_handler({
+ 'message': 'protocol.pause_writing() failed',
+ 'exception': exc,
+ 'transport': self._app_transport,
+ 'protocol': self,
+ })
+ elif size <= self._outgoing_low_water and self._app_writing_paused:
+ self._app_writing_paused = False
+ try:
+ self._app_protocol.resume_writing()
+ except (KeyboardInterrupt, SystemExit):
+ raise
+ except BaseException as exc:
+ self._loop.call_exception_handler({
+ 'message': 'protocol.resume_writing() failed',
+ 'exception': exc,
+ 'transport': self._app_transport,
+ 'protocol': self,
+ })
+
+ def _get_write_buffer_size(self):
+ return self._outgoing.pending + self._write_buffer_size
+
+ def _set_write_buffer_limits(self, high=None, low=None):
+ high, low = add_flowcontrol_defaults(
+ high, low, constants.FLOW_CONTROL_HIGH_WATER_SSL_WRITE)
+ self._outgoing_high_water = high
+ self._outgoing_low_water = low
+
+ # Flow control for reads to APP socket
+
+ def _pause_reading(self):
+ self._app_reading_paused = True
+
+ def _resume_reading(self):
+ if self._app_reading_paused:
+ self._app_reading_paused = False
+
+ def resume():
+ if self._state == SSLProtocolState.WRAPPED:
+ self._do_read()
+ elif self._state == SSLProtocolState.FLUSHING:
+ self._do_flush()
+ elif self._state == SSLProtocolState.SHUTDOWN:
+ self._do_shutdown()
+ self._loop.call_soon(resume)
+
+ # Flow control for reads from SSL socket
+
+ def _control_ssl_reading(self):
+ size = self._get_read_buffer_size()
+ if size >= self._incoming_high_water and not self._ssl_reading_paused:
+ self._ssl_reading_paused = True
+ self._transport.pause_reading()
+ elif size <= self._incoming_low_water and self._ssl_reading_paused:
+ self._ssl_reading_paused = False
+ self._transport.resume_reading()
+
+ def _set_read_buffer_limits(self, high=None, low=None):
+ high, low = add_flowcontrol_defaults(
+ high, low, constants.FLOW_CONTROL_HIGH_WATER_SSL_READ)
+ self._incoming_high_water = high
+ self._incoming_low_water = low
+
+ def _get_read_buffer_size(self):
+ return self._incoming.pending
+
+ # Flow control for writes to SSL socket
+
+ def pause_writing(self):
+ """Called when the low-level transport's buffer goes over
+ the high-water mark.
+ """
+ assert not self._ssl_writing_paused
+ self._ssl_writing_paused = True
+
+ def resume_writing(self):
+ """Called when the low-level transport's buffer drains below
+ the low-water mark.
+ """
+ assert self._ssl_writing_paused
+ self._ssl_writing_paused = False
+ self._process_outgoing()
+
+ def _fatal_error(self, exc, message='Fatal error on transport'):
+ if self._transport:
+ self._transport._force_close(exc)
+
+ if isinstance(exc, OSError):
+ if self._loop.get_debug():
+ logger.debug("%r: %s", self, message, exc_info=True)
+ elif not isinstance(exc, exceptions.CancelledError):
+ self._loop.call_exception_handler({
+ 'message': message,
+ 'exception': exc,
+ 'transport': self._transport,
+ 'protocol': self,
+ })
diff --git a/contrib/tools/python3/Lib/asyncio/staggered.py b/contrib/tools/python3/Lib/asyncio/staggered.py
new file mode 100644
index 0000000000..451a53a16f
--- /dev/null
+++ b/contrib/tools/python3/Lib/asyncio/staggered.py
@@ -0,0 +1,149 @@
+"""Support for running coroutines in parallel with staggered start times."""
+
+__all__ = 'staggered_race',
+
+import contextlib
+import typing
+
+from . import events
+from . import exceptions as exceptions_mod
+from . import locks
+from . import tasks
+
+
+async def staggered_race(
+ coro_fns: typing.Iterable[typing.Callable[[], typing.Awaitable]],
+ delay: typing.Optional[float],
+ *,
+ loop: events.AbstractEventLoop = None,
+) -> typing.Tuple[
+ typing.Any,
+ typing.Optional[int],
+ typing.List[typing.Optional[Exception]]
+]:
+ """Run coroutines with staggered start times and take the first to finish.
+
+ This method takes an iterable of coroutine functions. The first one is
+ started immediately. From then on, whenever the immediately preceding one
+ fails (raises an exception), or when *delay* seconds has passed, the next
+ coroutine is started. This continues until one of the coroutines complete
+ successfully, in which case all others are cancelled, or until all
+ coroutines fail.
+
+ The coroutines provided should be well-behaved in the following way:
+
+ * They should only ``return`` if completed successfully.
+
+ * They should always raise an exception if they did not complete
+ successfully. In particular, if they handle cancellation, they should
+ probably reraise, like this::
+
+ try:
+ # do work
+ except asyncio.CancelledError:
+ # undo partially completed work
+ raise
+
+ Args:
+ coro_fns: an iterable of coroutine functions, i.e. callables that
+ return a coroutine object when called. Use ``functools.partial`` or
+ lambdas to pass arguments.
+
+ delay: amount of time, in seconds, between starting coroutines. If
+ ``None``, the coroutines will run sequentially.
+
+ loop: the event loop to use.
+
+ Returns:
+ tuple *(winner_result, winner_index, exceptions)* where
+
+ - *winner_result*: the result of the winning coroutine, or ``None``
+ if no coroutines won.
+
+ - *winner_index*: the index of the winning coroutine in
+ ``coro_fns``, or ``None`` if no coroutines won. If the winning
+ coroutine may return None on success, *winner_index* can be used
+ to definitively determine whether any coroutine won.
+
+ - *exceptions*: list of exceptions returned by the coroutines.
+ ``len(exceptions)`` is equal to the number of coroutines actually
+ started, and the order is the same as in ``coro_fns``. The winning
+ coroutine's entry is ``None``.
+
+ """
+ # TODO: when we have aiter() and anext(), allow async iterables in coro_fns.
+ loop = loop or events.get_running_loop()
+ enum_coro_fns = enumerate(coro_fns)
+ winner_result = None
+ winner_index = None
+ exceptions = []
+ running_tasks = []
+
+ async def run_one_coro(
+ previous_failed: typing.Optional[locks.Event]) -> None:
+ # Wait for the previous task to finish, or for delay seconds
+ if previous_failed is not None:
+ with contextlib.suppress(exceptions_mod.TimeoutError):
+ # Use asyncio.wait_for() instead of asyncio.wait() here, so
+ # that if we get cancelled at this point, Event.wait() is also
+ # cancelled, otherwise there will be a "Task destroyed but it is
+ # pending" later.
+ await tasks.wait_for(previous_failed.wait(), delay)
+ # Get the next coroutine to run
+ try:
+ this_index, coro_fn = next(enum_coro_fns)
+ except StopIteration:
+ return
+ # Start task that will run the next coroutine
+ this_failed = locks.Event()
+ next_task = loop.create_task(run_one_coro(this_failed))
+ running_tasks.append(next_task)
+ assert len(running_tasks) == this_index + 2
+ # Prepare place to put this coroutine's exceptions if not won
+ exceptions.append(None)
+ assert len(exceptions) == this_index + 1
+
+ try:
+ result = await coro_fn()
+ except (SystemExit, KeyboardInterrupt):
+ raise
+ except BaseException as e:
+ exceptions[this_index] = e
+ this_failed.set() # Kickstart the next coroutine
+ else:
+ # Store winner's results
+ nonlocal winner_index, winner_result
+ assert winner_index is None
+ winner_index = this_index
+ winner_result = result
+ # Cancel all other tasks. We take care to not cancel the current
+ # task as well. If we do so, then since there is no `await` after
+ # here and CancelledError are usually thrown at one, we will
+ # encounter a curious corner case where the current task will end
+ # up as done() == True, cancelled() == False, exception() ==
+ # asyncio.CancelledError. This behavior is specified in
+ # https://bugs.python.org/issue30048
+ for i, t in enumerate(running_tasks):
+ if i != this_index:
+ t.cancel()
+
+ first_task = loop.create_task(run_one_coro(None))
+ running_tasks.append(first_task)
+ try:
+ # Wait for a growing list of tasks to all finish: poor man's version of
+ # curio's TaskGroup or trio's nursery
+ done_count = 0
+ while done_count != len(running_tasks):
+ done, _ = await tasks.wait(running_tasks)
+ done_count = len(done)
+ # If run_one_coro raises an unhandled exception, it's probably a
+ # programming error, and I want to see it.
+ if __debug__:
+ for d in done:
+ if d.done() and not d.cancelled() and d.exception():
+ raise d.exception()
+ return winner_result, winner_index, exceptions
+ finally:
+ # Make sure no tasks are left running if we leave this function
+ for t in running_tasks:
+ t.cancel()
diff --git a/contrib/tools/python3/Lib/asyncio/streams.py b/contrib/tools/python3/Lib/asyncio/streams.py
new file mode 100644
index 0000000000..f310aa2f36
--- /dev/null
+++ b/contrib/tools/python3/Lib/asyncio/streams.py
@@ -0,0 +1,770 @@
+__all__ = (
+ 'StreamReader', 'StreamWriter', 'StreamReaderProtocol',
+ 'open_connection', 'start_server')
+
+import collections
+import socket
+import sys
+import warnings
+import weakref
+
+if hasattr(socket, 'AF_UNIX'):
+ __all__ += ('open_unix_connection', 'start_unix_server')
+
+from . import coroutines
+from . import events
+from . import exceptions
+from . import format_helpers
+from . import protocols
+from .log import logger
+from .tasks import sleep
+
+
+_DEFAULT_LIMIT = 2 ** 16 # 64 KiB
+
+
+async def open_connection(host=None, port=None, *,
+ limit=_DEFAULT_LIMIT, **kwds):
+ """A wrapper for create_connection() returning a (reader, writer) pair.
+
+ The reader returned is a StreamReader instance; the writer is a
+ StreamWriter instance.
+
+ The arguments are all the usual arguments to create_connection()
+ except protocol_factory; most common are positional host and port,
+ with various optional keyword arguments following.
+
+ Additional optional keyword arguments are loop (to set the event loop
+ instance to use) and limit (to set the buffer limit passed to the
+ StreamReader).
+
+ (If you want to customize the StreamReader and/or
+ StreamReaderProtocol classes, just copy the code -- there's
+ really nothing special here except some convenience.)
+ """
+ loop = events.get_running_loop()
+ reader = StreamReader(limit=limit, loop=loop)
+ protocol = StreamReaderProtocol(reader, loop=loop)
+ transport, _ = await loop.create_connection(
+ lambda: protocol, host, port, **kwds)
+ writer = StreamWriter(transport, protocol, reader, loop)
+ return reader, writer
+
+
+async def start_server(client_connected_cb, host=None, port=None, *,
+ limit=_DEFAULT_LIMIT, **kwds):
+ """Start a socket server, call back for each client connected.
+
+ The first parameter, `client_connected_cb`, takes two parameters:
+ client_reader, client_writer. client_reader is a StreamReader
+ object, while client_writer is a StreamWriter object. This
+ parameter can either be a plain callback function or a coroutine;
+ if it is a coroutine, it will be automatically converted into a
+ Task.
+
+ The rest of the arguments are all the usual arguments to
+ loop.create_server() except protocol_factory; most common are
+ positional host and port, with various optional keyword arguments
+ following. The return value is the same as loop.create_server().
+
+ Additional optional keyword argument is limit (to set the buffer
+ limit passed to the StreamReader).
+
+ The return value is the same as loop.create_server(), i.e. a
+ Server object which can be used to stop the service.
+ """
+ loop = events.get_running_loop()
+
+ def factory():
+ reader = StreamReader(limit=limit, loop=loop)
+ protocol = StreamReaderProtocol(reader, client_connected_cb,
+ loop=loop)
+ return protocol
+
+ return await loop.create_server(factory, host, port, **kwds)
+
+
+if hasattr(socket, 'AF_UNIX'):
+ # UNIX Domain Sockets are supported on this platform
+
+ async def open_unix_connection(path=None, *,
+ limit=_DEFAULT_LIMIT, **kwds):
+ """Similar to `open_connection` but works with UNIX Domain Sockets."""
+ loop = events.get_running_loop()
+
+ reader = StreamReader(limit=limit, loop=loop)
+ protocol = StreamReaderProtocol(reader, loop=loop)
+ transport, _ = await loop.create_unix_connection(
+ lambda: protocol, path, **kwds)
+ writer = StreamWriter(transport, protocol, reader, loop)
+ return reader, writer
+
+ async def start_unix_server(client_connected_cb, path=None, *,
+ limit=_DEFAULT_LIMIT, **kwds):
+ """Similar to `start_server` but works with UNIX Domain Sockets."""
+ loop = events.get_running_loop()
+
+ def factory():
+ reader = StreamReader(limit=limit, loop=loop)
+ protocol = StreamReaderProtocol(reader, client_connected_cb,
+ loop=loop)
+ return protocol
+
+ return await loop.create_unix_server(factory, path, **kwds)
+
+
+class FlowControlMixin(protocols.Protocol):
+ """Reusable flow control logic for StreamWriter.drain().
+
+ This implements the protocol methods pause_writing(),
+ resume_writing() and connection_lost(). If the subclass overrides
+ these it must call the super methods.
+
+ StreamWriter.drain() must wait for _drain_helper() coroutine.
+ """
+
+ def __init__(self, loop=None):
+ if loop is None:
+ self._loop = events.get_event_loop()
+ else:
+ self._loop = loop
+ self._paused = False
+ self._drain_waiters = collections.deque()
+ self._connection_lost = False
+
+ def pause_writing(self):
+ assert not self._paused
+ self._paused = True
+ if self._loop.get_debug():
+ logger.debug("%r pauses writing", self)
+
+ def resume_writing(self):
+ assert self._paused
+ self._paused = False
+ if self._loop.get_debug():
+ logger.debug("%r resumes writing", self)
+
+ for waiter in self._drain_waiters:
+ if not waiter.done():
+ waiter.set_result(None)
+
+ def connection_lost(self, exc):
+ self._connection_lost = True
+ # Wake up the writer(s) if currently paused.
+ if not self._paused:
+ return
+
+ for waiter in self._drain_waiters:
+ if not waiter.done():
+ if exc is None:
+ waiter.set_result(None)
+ else:
+ waiter.set_exception(exc)
+
+ async def _drain_helper(self):
+ if self._connection_lost:
+ raise ConnectionResetError('Connection lost')
+ if not self._paused:
+ return
+ waiter = self._loop.create_future()
+ self._drain_waiters.append(waiter)
+ try:
+ await waiter
+ finally:
+ self._drain_waiters.remove(waiter)
+
+ def _get_close_waiter(self, stream):
+ raise NotImplementedError
+
+
+class StreamReaderProtocol(FlowControlMixin, protocols.Protocol):
+ """Helper class to adapt between Protocol and StreamReader.
+
+ (This is a helper class instead of making StreamReader itself a
+ Protocol subclass, because the StreamReader has other potential
+ uses, and to prevent the user of the StreamReader to accidentally
+ call inappropriate methods of the protocol.)
+ """
+
+ _source_traceback = None
+
+ def __init__(self, stream_reader, client_connected_cb=None, loop=None):
+ super().__init__(loop=loop)
+ if stream_reader is not None:
+ self._stream_reader_wr = weakref.ref(stream_reader)
+ self._source_traceback = stream_reader._source_traceback
+ else:
+ self._stream_reader_wr = None
+ if client_connected_cb is not None:
+ # This is a stream created by the `create_server()` function.
+ # Keep a strong reference to the reader until a connection
+ # is established.
+ self._strong_reader = stream_reader
+ self._reject_connection = False
+ self._stream_writer = None
+ self._task = None
+ self._transport = None
+ self._client_connected_cb = client_connected_cb
+ self._over_ssl = False
+ self._closed = self._loop.create_future()
+
+ @property
+ def _stream_reader(self):
+ if self._stream_reader_wr is None:
+ return None
+ return self._stream_reader_wr()
+
+ def _replace_writer(self, writer):
+ loop = self._loop
+ transport = writer.transport
+ self._stream_writer = writer
+ self._transport = transport
+ self._over_ssl = transport.get_extra_info('sslcontext') is not None
+
+ def connection_made(self, transport):
+ if self._reject_connection:
+ context = {
+ 'message': ('An open stream was garbage collected prior to '
+ 'establishing network connection; '
+ 'call "stream.close()" explicitly.')
+ }
+ if self._source_traceback:
+ context['source_traceback'] = self._source_traceback
+ self._loop.call_exception_handler(context)
+ transport.abort()
+ return
+ self._transport = transport
+ reader = self._stream_reader
+ if reader is not None:
+ reader.set_transport(transport)
+ self._over_ssl = transport.get_extra_info('sslcontext') is not None
+ if self._client_connected_cb is not None:
+ self._stream_writer = StreamWriter(transport, self,
+ reader,
+ self._loop)
+ res = self._client_connected_cb(reader,
+ self._stream_writer)
+ if coroutines.iscoroutine(res):
+ def callback(task):
+ if task.cancelled():
+ transport.close()
+ return
+ exc = task.exception()
+ if exc is not None:
+ self._loop.call_exception_handler({
+ 'message': 'Unhandled exception in client_connected_cb',
+ 'exception': exc,
+ 'transport': transport,
+ })
+ transport.close()
+
+ self._task = self._loop.create_task(res)
+ self._task.add_done_callback(callback)
+
+ self._strong_reader = None
+
+ def connection_lost(self, exc):
+ reader = self._stream_reader
+ if reader is not None:
+ if exc is None:
+ reader.feed_eof()
+ else:
+ reader.set_exception(exc)
+ if not self._closed.done():
+ if exc is None:
+ self._closed.set_result(None)
+ else:
+ self._closed.set_exception(exc)
+ super().connection_lost(exc)
+ self._stream_reader_wr = None
+ self._stream_writer = None
+ self._task = None
+ self._transport = None
+
+ def data_received(self, data):
+ reader = self._stream_reader
+ if reader is not None:
+ reader.feed_data(data)
+
+ def eof_received(self):
+ reader = self._stream_reader
+ if reader is not None:
+ reader.feed_eof()
+ if self._over_ssl:
+ # Prevent a warning in SSLProtocol.eof_received:
+ # "returning true from eof_received()
+ # has no effect when using ssl"
+ return False
+ return True
+
+ def _get_close_waiter(self, stream):
+ return self._closed
+
+ def __del__(self):
+ # Prevent reports about unhandled exceptions.
+ # Better than self._closed._log_traceback = False hack
+ try:
+ closed = self._closed
+ except AttributeError:
+ pass # failed constructor
+ else:
+ if closed.done() and not closed.cancelled():
+ closed.exception()
+
+
+class StreamWriter:
+ """Wraps a Transport.
+
+ This exposes write(), writelines(), [can_]write_eof(),
+ get_extra_info() and close(). It adds drain() which returns an
+ optional Future on which you can wait for flow control. It also
+ adds a transport property which references the Transport
+ directly.
+ """
+
+ def __init__(self, transport, protocol, reader, loop):
+ self._transport = transport
+ self._protocol = protocol
+ # drain() expects that the reader has an exception() method
+ assert reader is None or isinstance(reader, StreamReader)
+ self._reader = reader
+ self._loop = loop
+ self._complete_fut = self._loop.create_future()
+ self._complete_fut.set_result(None)
+
+ def __repr__(self):
+ info = [self.__class__.__name__, f'transport={self._transport!r}']
+ if self._reader is not None:
+ info.append(f'reader={self._reader!r}')
+ return '<{}>'.format(' '.join(info))
+
+ @property
+ def transport(self):
+ return self._transport
+
+ def write(self, data):
+ self._transport.write(data)
+
+ def writelines(self, data):
+ self._transport.writelines(data)
+
+ def write_eof(self):
+ return self._transport.write_eof()
+
+ def can_write_eof(self):
+ return self._transport.can_write_eof()
+
+ def close(self):
+ return self._transport.close()
+
+ def is_closing(self):
+ return self._transport.is_closing()
+
+ async def wait_closed(self):
+ await self._protocol._get_close_waiter(self)
+
+ def get_extra_info(self, name, default=None):
+ return self._transport.get_extra_info(name, default)
+
+ async def drain(self):
+ """Flush the write buffer.
+
+ The intended use is to write
+
+ w.write(data)
+ await w.drain()
+ """
+ if self._reader is not None:
+ exc = self._reader.exception()
+ if exc is not None:
+ raise exc
+ if self._transport.is_closing():
+ # Wait for protocol.connection_lost() call
+ # Raise connection closing error if any,
+ # ConnectionResetError otherwise
+ # Yield to the event loop so connection_lost() may be
+ # called. Without this, _drain_helper() would return
+ # immediately, and code that calls
+ # write(...); await drain()
+ # in a loop would never call connection_lost(), so it
+ # would not see an error when the socket is closed.
+ await sleep(0)
+ await self._protocol._drain_helper()
+
+ async def start_tls(self, sslcontext, *,
+ server_hostname=None,
+ ssl_handshake_timeout=None,
+ ssl_shutdown_timeout=None):
+ """Upgrade an existing stream-based connection to TLS."""
+ server_side = self._protocol._client_connected_cb is not None
+ protocol = self._protocol
+ await self.drain()
+ new_transport = await self._loop.start_tls( # type: ignore
+ self._transport, protocol, sslcontext,
+ server_side=server_side, server_hostname=server_hostname,
+ ssl_handshake_timeout=ssl_handshake_timeout,
+ ssl_shutdown_timeout=ssl_shutdown_timeout)
+ self._transport = new_transport
+ protocol._replace_writer(self)
+
+ def __del__(self):
+ if not self._transport.is_closing():
+ if self._loop.is_closed():
+ warnings.warn("loop is closed", ResourceWarning)
+ else:
+ self.close()
+ warnings.warn(f"unclosed {self!r}", ResourceWarning)
+
+class StreamReader:
+
+ _source_traceback = None
+
+ def __init__(self, limit=_DEFAULT_LIMIT, loop=None):
+ # The line length limit is a security feature;
+ # it also doubles as half the buffer limit.
+
+ if limit <= 0:
+ raise ValueError('Limit cannot be <= 0')
+
+ self._limit = limit
+ if loop is None:
+ self._loop = events.get_event_loop()
+ else:
+ self._loop = loop
+ self._buffer = bytearray()
+ self._eof = False # Whether we're done.
+ self._waiter = None # A future used by _wait_for_data()
+ self._exception = None
+ self._transport = None
+ self._paused = False
+ if self._loop.get_debug():
+ self._source_traceback = format_helpers.extract_stack(
+ sys._getframe(1))
+
+ def __repr__(self):
+ info = ['StreamReader']
+ if self._buffer:
+ info.append(f'{len(self._buffer)} bytes')
+ if self._eof:
+ info.append('eof')
+ if self._limit != _DEFAULT_LIMIT:
+ info.append(f'limit={self._limit}')
+ if self._waiter:
+ info.append(f'waiter={self._waiter!r}')
+ if self._exception:
+ info.append(f'exception={self._exception!r}')
+ if self._transport:
+ info.append(f'transport={self._transport!r}')
+ if self._paused:
+ info.append('paused')
+ return '<{}>'.format(' '.join(info))
+
+ def exception(self):
+ return self._exception
+
+ def set_exception(self, exc):
+ self._exception = exc
+
+ waiter = self._waiter
+ if waiter is not None:
+ self._waiter = None
+ if not waiter.cancelled():
+ waiter.set_exception(exc)
+
+ def _wakeup_waiter(self):
+ """Wakeup read*() functions waiting for data or EOF."""
+ waiter = self._waiter
+ if waiter is not None:
+ self._waiter = None
+ if not waiter.cancelled():
+ waiter.set_result(None)
+
+ def set_transport(self, transport):
+ assert self._transport is None, 'Transport already set'
+ self._transport = transport
+
+ def _maybe_resume_transport(self):
+ if self._paused and len(self._buffer) <= self._limit:
+ self._paused = False
+ self._transport.resume_reading()
+
+ def feed_eof(self):
+ self._eof = True
+ self._wakeup_waiter()
+
+ def at_eof(self):
+ """Return True if the buffer is empty and 'feed_eof' was called."""
+ return self._eof and not self._buffer
+
+ def feed_data(self, data):
+ assert not self._eof, 'feed_data after feed_eof'
+
+ if not data:
+ return
+
+ self._buffer.extend(data)
+ self._wakeup_waiter()
+
+ if (self._transport is not None and
+ not self._paused and
+ len(self._buffer) > 2 * self._limit):
+ try:
+ self._transport.pause_reading()
+ except NotImplementedError:
+ # The transport can't be paused.
+ # We'll just have to buffer all data.
+ # Forget the transport so we don't keep trying.
+ self._transport = None
+ else:
+ self._paused = True
+
+ async def _wait_for_data(self, func_name):
+ """Wait until feed_data() or feed_eof() is called.
+
+ If stream was paused, automatically resume it.
+ """
+ # StreamReader uses a future to link the protocol feed_data() method
+ # to a read coroutine. Running two read coroutines at the same time
+ # would have an unexpected behaviour. It would not possible to know
+ # which coroutine would get the next data.
+ if self._waiter is not None:
+ raise RuntimeError(
+ f'{func_name}() called while another coroutine is '
+ f'already waiting for incoming data')
+
+ assert not self._eof, '_wait_for_data after EOF'
+
+ # Waiting for data while paused will make deadlock, so prevent it.
+ # This is essential for readexactly(n) for case when n > self._limit.
+ if self._paused:
+ self._paused = False
+ self._transport.resume_reading()
+
+ self._waiter = self._loop.create_future()
+ try:
+ await self._waiter
+ finally:
+ self._waiter = None
+
+ async def readline(self):
+ """Read chunk of data from the stream until newline (b'\n') is found.
+
+ On success, return chunk that ends with newline. If only partial
+ line can be read due to EOF, return incomplete line without
+ terminating newline. When EOF was reached while no bytes read, empty
+ bytes object is returned.
+
+ If limit is reached, ValueError will be raised. In that case, if
+ newline was found, complete line including newline will be removed
+ from internal buffer. Else, internal buffer will be cleared. Limit is
+ compared against part of the line without newline.
+
+ If stream was paused, this function will automatically resume it if
+ needed.
+ """
+ sep = b'\n'
+ seplen = len(sep)
+ try:
+ line = await self.readuntil(sep)
+ except exceptions.IncompleteReadError as e:
+ return e.partial
+ except exceptions.LimitOverrunError as e:
+ if self._buffer.startswith(sep, e.consumed):
+ del self._buffer[:e.consumed + seplen]
+ else:
+ self._buffer.clear()
+ self._maybe_resume_transport()
+ raise ValueError(e.args[0])
+ return line
+
+ async def readuntil(self, separator=b'\n'):
+ """Read data from the stream until ``separator`` is found.
+
+ On success, the data and separator will be removed from the
+ internal buffer (consumed). Returned data will include the
+ separator at the end.
+
+ Configured stream limit is used to check result. Limit sets the
+ maximal length of data that can be returned, not counting the
+ separator.
+
+ If an EOF occurs and the complete separator is still not found,
+ an IncompleteReadError exception will be raised, and the internal
+ buffer will be reset. The IncompleteReadError.partial attribute
+ may contain the separator partially.
+
+ If the data cannot be read because of over limit, a
+ LimitOverrunError exception will be raised, and the data
+ will be left in the internal buffer, so it can be read again.
+ """
+ seplen = len(separator)
+ if seplen == 0:
+ raise ValueError('Separator should be at least one-byte string')
+
+ if self._exception is not None:
+ raise self._exception
+
+ # Consume whole buffer except last bytes, which length is
+ # one less than seplen. Let's check corner cases with
+ # separator='SEPARATOR':
+ # * we have received almost complete separator (without last
+ # byte). i.e buffer='some textSEPARATO'. In this case we
+ # can safely consume len(separator) - 1 bytes.
+ # * last byte of buffer is first byte of separator, i.e.
+ # buffer='abcdefghijklmnopqrS'. We may safely consume
+ # everything except that last byte, but this require to
+ # analyze bytes of buffer that match partial separator.
+ # This is slow and/or require FSM. For this case our
+ # implementation is not optimal, since require rescanning
+ # of data that is known to not belong to separator. In
+ # real world, separator will not be so long to notice
+ # performance problems. Even when reading MIME-encoded
+ # messages :)
+
+ # `offset` is the number of bytes from the beginning of the buffer
+ # where there is no occurrence of `separator`.
+ offset = 0
+
+ # Loop until we find `separator` in the buffer, exceed the buffer size,
+ # or an EOF has happened.
+ while True:
+ buflen = len(self._buffer)
+
+ # Check if we now have enough data in the buffer for `separator` to
+ # fit.
+ if buflen - offset >= seplen:
+ isep = self._buffer.find(separator, offset)
+
+ if isep != -1:
+ # `separator` is in the buffer. `isep` will be used later
+ # to retrieve the data.
+ break
+
+ # see upper comment for explanation.
+ offset = buflen + 1 - seplen
+ if offset > self._limit:
+ raise exceptions.LimitOverrunError(
+ 'Separator is not found, and chunk exceed the limit',
+ offset)
+
+ # Complete message (with full separator) may be present in buffer
+ # even when EOF flag is set. This may happen when the last chunk
+ # adds data which makes separator be found. That's why we check for
+ # EOF *ater* inspecting the buffer.
+ if self._eof:
+ chunk = bytes(self._buffer)
+ self._buffer.clear()
+ raise exceptions.IncompleteReadError(chunk, None)
+
+ # _wait_for_data() will resume reading if stream was paused.
+ await self._wait_for_data('readuntil')
+
+ if isep > self._limit:
+ raise exceptions.LimitOverrunError(
+ 'Separator is found, but chunk is longer than limit', isep)
+
+ chunk = self._buffer[:isep + seplen]
+ del self._buffer[:isep + seplen]
+ self._maybe_resume_transport()
+ return bytes(chunk)
+
+ async def read(self, n=-1):
+ """Read up to `n` bytes from the stream.
+
+ If `n` is not provided or set to -1,
+ read until EOF, then return all read bytes.
+ If EOF was received and the internal buffer is empty,
+ return an empty bytes object.
+
+ If `n` is 0, return an empty bytes object immediately.
+
+ If `n` is positive, return at most `n` available bytes
+ as soon as at least 1 byte is available in the internal buffer.
+ If EOF is received before any byte is read, return an empty
+ bytes object.
+
+ Returned value is not limited with limit, configured at stream
+ creation.
+
+ If stream was paused, this function will automatically resume it if
+ needed.
+ """
+
+ if self._exception is not None:
+ raise self._exception
+
+ if n == 0:
+ return b''
+
+ if n < 0:
+ # This used to just loop creating a new waiter hoping to
+ # collect everything in self._buffer, but that would
+ # deadlock if the subprocess sends more than self.limit
+ # bytes. So just call self.read(self._limit) until EOF.
+ blocks = []
+ while True:
+ block = await self.read(self._limit)
+ if not block:
+ break
+ blocks.append(block)
+ return b''.join(blocks)
+
+ if not self._buffer and not self._eof:
+ await self._wait_for_data('read')
+
+ # This will work right even if buffer is less than n bytes
+ data = bytes(memoryview(self._buffer)[:n])
+ del self._buffer[:n]
+
+ self._maybe_resume_transport()
+ return data
+
+ async def readexactly(self, n):
+ """Read exactly `n` bytes.
+
+ Raise an IncompleteReadError if EOF is reached before `n` bytes can be
+ read. The IncompleteReadError.partial attribute of the exception will
+ contain the partial read bytes.
+
+ if n is zero, return empty bytes object.
+
+ Returned value is not limited with limit, configured at stream
+ creation.
+
+ If stream was paused, this function will automatically resume it if
+ needed.
+ """
+ if n < 0:
+ raise ValueError('readexactly size can not be less than zero')
+
+ if self._exception is not None:
+ raise self._exception
+
+ if n == 0:
+ return b''
+
+ while len(self._buffer) < n:
+ if self._eof:
+ incomplete = bytes(self._buffer)
+ self._buffer.clear()
+ raise exceptions.IncompleteReadError(incomplete, n)
+
+ await self._wait_for_data('readexactly')
+
+ if len(self._buffer) == n:
+ data = bytes(self._buffer)
+ self._buffer.clear()
+ else:
+ data = bytes(memoryview(self._buffer)[:n])
+ del self._buffer[:n]
+ self._maybe_resume_transport()
+ return data
+
+ def __aiter__(self):
+ return self
+
+ async def __anext__(self):
+ val = await self.readline()
+ if val == b'':
+ raise StopAsyncIteration
+ return val
diff --git a/contrib/tools/python3/Lib/asyncio/subprocess.py b/contrib/tools/python3/Lib/asyncio/subprocess.py
new file mode 100644
index 0000000000..043359bbd0
--- /dev/null
+++ b/contrib/tools/python3/Lib/asyncio/subprocess.py
@@ -0,0 +1,229 @@
+__all__ = 'create_subprocess_exec', 'create_subprocess_shell'
+
+import subprocess
+
+from . import events
+from . import protocols
+from . import streams
+from . import tasks
+from .log import logger
+
+
+PIPE = subprocess.PIPE
+STDOUT = subprocess.STDOUT
+DEVNULL = subprocess.DEVNULL
+
+
+class SubprocessStreamProtocol(streams.FlowControlMixin,
+ protocols.SubprocessProtocol):
+ """Like StreamReaderProtocol, but for a subprocess."""
+
+ def __init__(self, limit, loop):
+ super().__init__(loop=loop)
+ self._limit = limit
+ self.stdin = self.stdout = self.stderr = None
+ self._transport = None
+ self._process_exited = False
+ self._pipe_fds = []
+ self._stdin_closed = self._loop.create_future()
+
+ def __repr__(self):
+ info = [self.__class__.__name__]
+ if self.stdin is not None:
+ info.append(f'stdin={self.stdin!r}')
+ if self.stdout is not None:
+ info.append(f'stdout={self.stdout!r}')
+ if self.stderr is not None:
+ info.append(f'stderr={self.stderr!r}')
+ return '<{}>'.format(' '.join(info))
+
+ def connection_made(self, transport):
+ self._transport = transport
+
+ stdout_transport = transport.get_pipe_transport(1)
+ if stdout_transport is not None:
+ self.stdout = streams.StreamReader(limit=self._limit,
+ loop=self._loop)
+ self.stdout.set_transport(stdout_transport)
+ self._pipe_fds.append(1)
+
+ stderr_transport = transport.get_pipe_transport(2)
+ if stderr_transport is not None:
+ self.stderr = streams.StreamReader(limit=self._limit,
+ loop=self._loop)
+ self.stderr.set_transport(stderr_transport)
+ self._pipe_fds.append(2)
+
+ stdin_transport = transport.get_pipe_transport(0)
+ if stdin_transport is not None:
+ self.stdin = streams.StreamWriter(stdin_transport,
+ protocol=self,
+ reader=None,
+ loop=self._loop)
+
+ def pipe_data_received(self, fd, data):
+ if fd == 1:
+ reader = self.stdout
+ elif fd == 2:
+ reader = self.stderr
+ else:
+ reader = None
+ if reader is not None:
+ reader.feed_data(data)
+
+ def pipe_connection_lost(self, fd, exc):
+ if fd == 0:
+ pipe = self.stdin
+ if pipe is not None:
+ pipe.close()
+ self.connection_lost(exc)
+ if exc is None:
+ self._stdin_closed.set_result(None)
+ else:
+ self._stdin_closed.set_exception(exc)
+ # Since calling `wait_closed()` is not mandatory,
+ # we shouldn't log the traceback if this is not awaited.
+ self._stdin_closed._log_traceback = False
+ return
+ if fd == 1:
+ reader = self.stdout
+ elif fd == 2:
+ reader = self.stderr
+ else:
+ reader = None
+ if reader is not None:
+ if exc is None:
+ reader.feed_eof()
+ else:
+ reader.set_exception(exc)
+
+ if fd in self._pipe_fds:
+ self._pipe_fds.remove(fd)
+ self._maybe_close_transport()
+
+ def process_exited(self):
+ self._process_exited = True
+ self._maybe_close_transport()
+
+ def _maybe_close_transport(self):
+ if len(self._pipe_fds) == 0 and self._process_exited:
+ self._transport.close()
+ self._transport = None
+
+ def _get_close_waiter(self, stream):
+ if stream is self.stdin:
+ return self._stdin_closed
+
+
+class Process:
+ def __init__(self, transport, protocol, loop):
+ self._transport = transport
+ self._protocol = protocol
+ self._loop = loop
+ self.stdin = protocol.stdin
+ self.stdout = protocol.stdout
+ self.stderr = protocol.stderr
+ self.pid = transport.get_pid()
+
+ def __repr__(self):
+ return f'<{self.__class__.__name__} {self.pid}>'
+
+ @property
+ def returncode(self):
+ return self._transport.get_returncode()
+
+ async def wait(self):
+ """Wait until the process exit and return the process return code."""
+ return await self._transport._wait()
+
+ def send_signal(self, signal):
+ self._transport.send_signal(signal)
+
+ def terminate(self):
+ self._transport.terminate()
+
+ def kill(self):
+ self._transport.kill()
+
+ async def _feed_stdin(self, input):
+ debug = self._loop.get_debug()
+ try:
+ if input is not None:
+ self.stdin.write(input)
+ if debug:
+ logger.debug(
+ '%r communicate: feed stdin (%s bytes)', self, len(input))
+
+ await self.stdin.drain()
+ except (BrokenPipeError, ConnectionResetError) as exc:
+ # communicate() ignores BrokenPipeError and ConnectionResetError.
+ # write() and drain() can raise these exceptions.
+ if debug:
+ logger.debug('%r communicate: stdin got %r', self, exc)
+
+ if debug:
+ logger.debug('%r communicate: close stdin', self)
+ self.stdin.close()
+
+ async def _noop(self):
+ return None
+
+ async def _read_stream(self, fd):
+ transport = self._transport.get_pipe_transport(fd)
+ if fd == 2:
+ stream = self.stderr
+ else:
+ assert fd == 1
+ stream = self.stdout
+ if self._loop.get_debug():
+ name = 'stdout' if fd == 1 else 'stderr'
+ logger.debug('%r communicate: read %s', self, name)
+ output = await stream.read()
+ if self._loop.get_debug():
+ name = 'stdout' if fd == 1 else 'stderr'
+ logger.debug('%r communicate: close %s', self, name)
+ transport.close()
+ return output
+
+ async def communicate(self, input=None):
+ if self.stdin is not None:
+ stdin = self._feed_stdin(input)
+ else:
+ stdin = self._noop()
+ if self.stdout is not None:
+ stdout = self._read_stream(1)
+ else:
+ stdout = self._noop()
+ if self.stderr is not None:
+ stderr = self._read_stream(2)
+ else:
+ stderr = self._noop()
+ stdin, stdout, stderr = await tasks.gather(stdin, stdout, stderr)
+ await self.wait()
+ return (stdout, stderr)
+
+
+async def create_subprocess_shell(cmd, stdin=None, stdout=None, stderr=None,
+ limit=streams._DEFAULT_LIMIT, **kwds):
+ loop = events.get_running_loop()
+ protocol_factory = lambda: SubprocessStreamProtocol(limit=limit,
+ loop=loop)
+ transport, protocol = await loop.subprocess_shell(
+ protocol_factory,
+ cmd, stdin=stdin, stdout=stdout,
+ stderr=stderr, **kwds)
+ return Process(transport, protocol, loop)
+
+
+async def create_subprocess_exec(program, *args, stdin=None, stdout=None,
+ stderr=None, limit=streams._DEFAULT_LIMIT,
+ **kwds):
+ loop = events.get_running_loop()
+ protocol_factory = lambda: SubprocessStreamProtocol(limit=limit,
+ loop=loop)
+ transport, protocol = await loop.subprocess_exec(
+ protocol_factory,
+ program, *args,
+ stdin=stdin, stdout=stdout,
+ stderr=stderr, **kwds)
+ return Process(transport, protocol, loop)
diff --git a/contrib/tools/python3/Lib/asyncio/taskgroups.py b/contrib/tools/python3/Lib/asyncio/taskgroups.py
new file mode 100644
index 0000000000..d264e51f1f
--- /dev/null
+++ b/contrib/tools/python3/Lib/asyncio/taskgroups.py
@@ -0,0 +1,240 @@
+# Adapted with permission from the EdgeDB project;
+# license: PSFL.
+
+
+__all__ = ("TaskGroup",)
+
+from . import events
+from . import exceptions
+from . import tasks
+
+
+class TaskGroup:
+ """Asynchronous context manager for managing groups of tasks.
+
+ Example use:
+
+ async with asyncio.TaskGroup() as group:
+ task1 = group.create_task(some_coroutine(...))
+ task2 = group.create_task(other_coroutine(...))
+ print("Both tasks have completed now.")
+
+ All tasks are awaited when the context manager exits.
+
+ Any exceptions other than `asyncio.CancelledError` raised within
+ a task will cancel all remaining tasks and wait for them to exit.
+ The exceptions are then combined and raised as an `ExceptionGroup`.
+ """
+ def __init__(self):
+ self._entered = False
+ self._exiting = False
+ self._aborting = False
+ self._loop = None
+ self._parent_task = None
+ self._parent_cancel_requested = False
+ self._tasks = set()
+ self._errors = []
+ self._base_error = None
+ self._on_completed_fut = None
+
+ def __repr__(self):
+ info = ['']
+ if self._tasks:
+ info.append(f'tasks={len(self._tasks)}')
+ if self._errors:
+ info.append(f'errors={len(self._errors)}')
+ if self._aborting:
+ info.append('cancelling')
+ elif self._entered:
+ info.append('entered')
+
+ info_str = ' '.join(info)
+ return f'<TaskGroup{info_str}>'
+
+ async def __aenter__(self):
+ if self._entered:
+ raise RuntimeError(
+ f"TaskGroup {self!r} has already been entered")
+ if self._loop is None:
+ self._loop = events.get_running_loop()
+ self._parent_task = tasks.current_task(self._loop)
+ if self._parent_task is None:
+ raise RuntimeError(
+ f'TaskGroup {self!r} cannot determine the parent task')
+ self._entered = True
+
+ return self
+
+ async def __aexit__(self, et, exc, tb):
+ self._exiting = True
+
+ if (exc is not None and
+ self._is_base_error(exc) and
+ self._base_error is None):
+ self._base_error = exc
+
+ propagate_cancellation_error = \
+ exc if et is exceptions.CancelledError else None
+ if self._parent_cancel_requested:
+ # If this flag is set we *must* call uncancel().
+ if self._parent_task.uncancel() == 0:
+ # If there are no pending cancellations left,
+ # don't propagate CancelledError.
+ propagate_cancellation_error = None
+
+ if et is not None:
+ if not self._aborting:
+ # Our parent task is being cancelled:
+ #
+ # async with TaskGroup() as g:
+ # g.create_task(...)
+ # await ... # <- CancelledError
+ #
+ # or there's an exception in "async with":
+ #
+ # async with TaskGroup() as g:
+ # g.create_task(...)
+ # 1 / 0
+ #
+ self._abort()
+
+ # We use while-loop here because "self._on_completed_fut"
+ # can be cancelled multiple times if our parent task
+ # is being cancelled repeatedly (or even once, when
+ # our own cancellation is already in progress)
+ while self._tasks:
+ if self._on_completed_fut is None:
+ self._on_completed_fut = self._loop.create_future()
+
+ try:
+ await self._on_completed_fut
+ except exceptions.CancelledError as ex:
+ if not self._aborting:
+ # Our parent task is being cancelled:
+ #
+ # async def wrapper():
+ # async with TaskGroup() as g:
+ # g.create_task(foo)
+ #
+ # "wrapper" is being cancelled while "foo" is
+ # still running.
+ propagate_cancellation_error = ex
+ self._abort()
+
+ self._on_completed_fut = None
+
+ assert not self._tasks
+
+ if self._base_error is not None:
+ raise self._base_error
+
+ # Propagate CancelledError if there is one, except if there
+ # are other errors -- those have priority.
+ if propagate_cancellation_error and not self._errors:
+ raise propagate_cancellation_error
+
+ if et is not None and et is not exceptions.CancelledError:
+ self._errors.append(exc)
+
+ if self._errors:
+ # Exceptions are heavy objects that can have object
+ # cycles (bad for GC); let's not keep a reference to
+ # a bunch of them.
+ try:
+ me = BaseExceptionGroup('unhandled errors in a TaskGroup', self._errors)
+ raise me from None
+ finally:
+ self._errors = None
+
+ def create_task(self, coro, *, name=None, context=None):
+ """Create a new task in this group and return it.
+
+ Similar to `asyncio.create_task`.
+ """
+ if not self._entered:
+ raise RuntimeError(f"TaskGroup {self!r} has not been entered")
+ if self._exiting and not self._tasks:
+ raise RuntimeError(f"TaskGroup {self!r} is finished")
+ if self._aborting:
+ raise RuntimeError(f"TaskGroup {self!r} is shutting down")
+ if context is None:
+ task = self._loop.create_task(coro)
+ else:
+ task = self._loop.create_task(coro, context=context)
+ tasks._set_task_name(task, name)
+ # optimization: Immediately call the done callback if the task is
+ # already done (e.g. if the coro was able to complete eagerly),
+ # and skip scheduling a done callback
+ if task.done():
+ self._on_task_done(task)
+ else:
+ self._tasks.add(task)
+ task.add_done_callback(self._on_task_done)
+ return task
+
+ # Since Python 3.8 Tasks propagate all exceptions correctly,
+ # except for KeyboardInterrupt and SystemExit which are
+ # still considered special.
+
+ def _is_base_error(self, exc: BaseException) -> bool:
+ assert isinstance(exc, BaseException)
+ return isinstance(exc, (SystemExit, KeyboardInterrupt))
+
+ def _abort(self):
+ self._aborting = True
+
+ for t in self._tasks:
+ if not t.done():
+ t.cancel()
+
+ def _on_task_done(self, task):
+ self._tasks.discard(task)
+
+ if self._on_completed_fut is not None and not self._tasks:
+ if not self._on_completed_fut.done():
+ self._on_completed_fut.set_result(True)
+
+ if task.cancelled():
+ return
+
+ exc = task.exception()
+ if exc is None:
+ return
+
+ self._errors.append(exc)
+ if self._is_base_error(exc) and self._base_error is None:
+ self._base_error = exc
+
+ if self._parent_task.done():
+ # Not sure if this case is possible, but we want to handle
+ # it anyways.
+ self._loop.call_exception_handler({
+ 'message': f'Task {task!r} has errored out but its parent '
+ f'task {self._parent_task} is already completed',
+ 'exception': exc,
+ 'task': task,
+ })
+ return
+
+ if not self._aborting and not self._parent_cancel_requested:
+ # If parent task *is not* being cancelled, it means that we want
+ # to manually cancel it to abort whatever is being run right now
+ # in the TaskGroup. But we want to mark parent task as
+ # "not cancelled" later in __aexit__. Example situation that
+ # we need to handle:
+ #
+ # async def foo():
+ # try:
+ # async with TaskGroup() as g:
+ # g.create_task(crash_soon())
+ # await something # <- this needs to be canceled
+ # # by the TaskGroup, e.g.
+ # # foo() needs to be cancelled
+ # except Exception:
+ # # Ignore any exceptions raised in the TaskGroup
+ # pass
+ # await something_else # this line has to be called
+ # # after TaskGroup is finished.
+ self._abort()
+ self._parent_cancel_requested = True
+ self._parent_task.cancel()
diff --git a/contrib/tools/python3/Lib/asyncio/tasks.py b/contrib/tools/python3/Lib/asyncio/tasks.py
new file mode 100644
index 0000000000..65f2a6ef80
--- /dev/null
+++ b/contrib/tools/python3/Lib/asyncio/tasks.py
@@ -0,0 +1,1065 @@
+"""Support for tasks, coroutines and the scheduler."""
+
+__all__ = (
+ 'Task', 'create_task',
+ 'FIRST_COMPLETED', 'FIRST_EXCEPTION', 'ALL_COMPLETED',
+ 'wait', 'wait_for', 'as_completed', 'sleep',
+ 'gather', 'shield', 'ensure_future', 'run_coroutine_threadsafe',
+ 'current_task', 'all_tasks',
+ 'create_eager_task_factory', 'eager_task_factory',
+ '_register_task', '_unregister_task', '_enter_task', '_leave_task',
+)
+
+import concurrent.futures
+import contextvars
+import functools
+import inspect
+import itertools
+import types
+import warnings
+import weakref
+from types import GenericAlias
+
+from . import base_tasks
+from . import coroutines
+from . import events
+from . import exceptions
+from . import futures
+from . import timeouts
+
+# Helper to generate new task names
+# This uses itertools.count() instead of a "+= 1" operation because the latter
+# is not thread safe. See bpo-11866 for a longer explanation.
+_task_name_counter = itertools.count(1).__next__
+
+
+def current_task(loop=None):
+ """Return a currently executed task."""
+ if loop is None:
+ loop = events.get_running_loop()
+ return _current_tasks.get(loop)
+
+
+def all_tasks(loop=None):
+ """Return a set of all tasks for the loop."""
+ if loop is None:
+ loop = events.get_running_loop()
+ # capturing the set of eager tasks first, so if an eager task "graduates"
+ # to a regular task in another thread, we don't risk missing it.
+ eager_tasks = list(_eager_tasks)
+ # Looping over the WeakSet isn't safe as it can be updated from another
+ # thread, therefore we cast it to list prior to filtering. The list cast
+ # itself requires iteration, so we repeat it several times ignoring
+ # RuntimeErrors (which are not very likely to occur).
+ # See issues 34970 and 36607 for details.
+ scheduled_tasks = None
+ i = 0
+ while True:
+ try:
+ scheduled_tasks = list(_scheduled_tasks)
+ except RuntimeError:
+ i += 1
+ if i >= 1000:
+ raise
+ else:
+ break
+ return {t for t in itertools.chain(scheduled_tasks, eager_tasks)
+ if futures._get_loop(t) is loop and not t.done()}
+
+
+def _set_task_name(task, name):
+ if name is not None:
+ try:
+ set_name = task.set_name
+ except AttributeError:
+ warnings.warn("Task.set_name() was added in Python 3.8, "
+ "the method support will be mandatory for third-party "
+ "task implementations since 3.13.",
+ DeprecationWarning, stacklevel=3)
+ else:
+ set_name(name)
+
+
+class Task(futures._PyFuture): # Inherit Python Task implementation
+ # from a Python Future implementation.
+
+ """A coroutine wrapped in a Future."""
+
+ # An important invariant maintained while a Task not done:
+ # _fut_waiter is either None or a Future. The Future
+ # can be either done() or not done().
+ # The task can be in any of 3 states:
+ #
+ # - 1: _fut_waiter is not None and not _fut_waiter.done():
+ # __step() is *not* scheduled and the Task is waiting for _fut_waiter.
+ # - 2: (_fut_waiter is None or _fut_waiter.done()) and __step() is scheduled:
+ # the Task is waiting for __step() to be executed.
+ # - 3: _fut_waiter is None and __step() is *not* scheduled:
+ # the Task is currently executing (in __step()).
+ #
+ # * In state 1, one of the callbacks of __fut_waiter must be __wakeup().
+ # * The transition from 1 to 2 happens when _fut_waiter becomes done(),
+ # as it schedules __wakeup() to be called (which calls __step() so
+ # we way that __step() is scheduled).
+ # * It transitions from 2 to 3 when __step() is executed, and it clears
+ # _fut_waiter to None.
+
+ # If False, don't log a message if the task is destroyed while its
+ # status is still pending
+ _log_destroy_pending = True
+
+ def __init__(self, coro, *, loop=None, name=None, context=None,
+ eager_start=False):
+ super().__init__(loop=loop)
+ if self._source_traceback:
+ del self._source_traceback[-1]
+ if not coroutines.iscoroutine(coro):
+ # raise after Future.__init__(), attrs are required for __del__
+ # prevent logging for pending task in __del__
+ self._log_destroy_pending = False
+ raise TypeError(f"a coroutine was expected, got {coro!r}")
+
+ if name is None:
+ self._name = f'Task-{_task_name_counter()}'
+ else:
+ self._name = str(name)
+
+ self._num_cancels_requested = 0
+ self._must_cancel = False
+ self._fut_waiter = None
+ self._coro = coro
+ if context is None:
+ self._context = contextvars.copy_context()
+ else:
+ self._context = context
+
+ if eager_start and self._loop.is_running():
+ self.__eager_start()
+ else:
+ self._loop.call_soon(self.__step, context=self._context)
+ _register_task(self)
+
+ def __del__(self):
+ if self._state == futures._PENDING and self._log_destroy_pending:
+ context = {
+ 'task': self,
+ 'message': 'Task was destroyed but it is pending!',
+ }
+ if self._source_traceback:
+ context['source_traceback'] = self._source_traceback
+ self._loop.call_exception_handler(context)
+ super().__del__()
+
+ __class_getitem__ = classmethod(GenericAlias)
+
+ def __repr__(self):
+ return base_tasks._task_repr(self)
+
+ def get_coro(self):
+ return self._coro
+
+ def get_context(self):
+ return self._context
+
+ def get_name(self):
+ return self._name
+
+ def set_name(self, value):
+ self._name = str(value)
+
+ def set_result(self, result):
+ raise RuntimeError('Task does not support set_result operation')
+
+ def set_exception(self, exception):
+ raise RuntimeError('Task does not support set_exception operation')
+
+ def get_stack(self, *, limit=None):
+ """Return the list of stack frames for this task's coroutine.
+
+ If the coroutine is not done, this returns the stack where it is
+ suspended. If the coroutine has completed successfully or was
+ cancelled, this returns an empty list. If the coroutine was
+ terminated by an exception, this returns the list of traceback
+ frames.
+
+ The frames are always ordered from oldest to newest.
+
+ The optional limit gives the maximum number of frames to
+ return; by default all available frames are returned. Its
+ meaning differs depending on whether a stack or a traceback is
+ returned: the newest frames of a stack are returned, but the
+ oldest frames of a traceback are returned. (This matches the
+ behavior of the traceback module.)
+
+ For reasons beyond our control, only one stack frame is
+ returned for a suspended coroutine.
+ """
+ return base_tasks._task_get_stack(self, limit)
+
+ def print_stack(self, *, limit=None, file=None):
+ """Print the stack or traceback for this task's coroutine.
+
+ This produces output similar to that of the traceback module,
+ for the frames retrieved by get_stack(). The limit argument
+ is passed to get_stack(). The file argument is an I/O stream
+ to which the output is written; by default output is written
+ to sys.stderr.
+ """
+ return base_tasks._task_print_stack(self, limit, file)
+
+ def cancel(self, msg=None):
+ """Request that this task cancel itself.
+
+ This arranges for a CancelledError to be thrown into the
+ wrapped coroutine on the next cycle through the event loop.
+ The coroutine then has a chance to clean up or even deny
+ the request using try/except/finally.
+
+ Unlike Future.cancel, this does not guarantee that the
+ task will be cancelled: the exception might be caught and
+ acted upon, delaying cancellation of the task or preventing
+ cancellation completely. The task may also return a value or
+ raise a different exception.
+
+ Immediately after this method is called, Task.cancelled() will
+ not return True (unless the task was already cancelled). A
+ task will be marked as cancelled when the wrapped coroutine
+ terminates with a CancelledError exception (even if cancel()
+ was not called).
+
+ This also increases the task's count of cancellation requests.
+ """
+ self._log_traceback = False
+ if self.done():
+ return False
+ self._num_cancels_requested += 1
+ # These two lines are controversial. See discussion starting at
+ # https://github.com/python/cpython/pull/31394#issuecomment-1053545331
+ # Also remember that this is duplicated in _asynciomodule.c.
+ # if self._num_cancels_requested > 1:
+ # return False
+ if self._fut_waiter is not None:
+ if self._fut_waiter.cancel(msg=msg):
+ # Leave self._fut_waiter; it may be a Task that
+ # catches and ignores the cancellation so we may have
+ # to cancel it again later.
+ return True
+ # It must be the case that self.__step is already scheduled.
+ self._must_cancel = True
+ self._cancel_message = msg
+ return True
+
+ def cancelling(self):
+ """Return the count of the task's cancellation requests.
+
+ This count is incremented when .cancel() is called
+ and may be decremented using .uncancel().
+ """
+ return self._num_cancels_requested
+
+ def uncancel(self):
+ """Decrement the task's count of cancellation requests.
+
+ This should be called by the party that called `cancel()` on the task
+ beforehand.
+
+ Returns the remaining number of cancellation requests.
+ """
+ if self._num_cancels_requested > 0:
+ self._num_cancels_requested -= 1
+ return self._num_cancels_requested
+
+ def __eager_start(self):
+ prev_task = _swap_current_task(self._loop, self)
+ try:
+ _register_eager_task(self)
+ try:
+ self._context.run(self.__step_run_and_handle_result, None)
+ finally:
+ _unregister_eager_task(self)
+ finally:
+ try:
+ curtask = _swap_current_task(self._loop, prev_task)
+ assert curtask is self
+ finally:
+ if self.done():
+ self._coro = None
+ self = None # Needed to break cycles when an exception occurs.
+ else:
+ _register_task(self)
+
+ def __step(self, exc=None):
+ if self.done():
+ raise exceptions.InvalidStateError(
+ f'_step(): already done: {self!r}, {exc!r}')
+ if self._must_cancel:
+ if not isinstance(exc, exceptions.CancelledError):
+ exc = self._make_cancelled_error()
+ self._must_cancel = False
+ self._fut_waiter = None
+
+ _enter_task(self._loop, self)
+ try:
+ self.__step_run_and_handle_result(exc)
+ finally:
+ _leave_task(self._loop, self)
+ self = None # Needed to break cycles when an exception occurs.
+
+ def __step_run_and_handle_result(self, exc):
+ coro = self._coro
+ try:
+ if exc is None:
+ # We use the `send` method directly, because coroutines
+ # don't have `__iter__` and `__next__` methods.
+ result = coro.send(None)
+ else:
+ result = coro.throw(exc)
+ except StopIteration as exc:
+ if self._must_cancel:
+ # Task is cancelled right before coro stops.
+ self._must_cancel = False
+ super().cancel(msg=self._cancel_message)
+ else:
+ super().set_result(exc.value)
+ except exceptions.CancelledError as exc:
+ # Save the original exception so we can chain it later.
+ self._cancelled_exc = exc
+ super().cancel() # I.e., Future.cancel(self).
+ except (KeyboardInterrupt, SystemExit) as exc:
+ super().set_exception(exc)
+ raise
+ except BaseException as exc:
+ super().set_exception(exc)
+ else:
+ blocking = getattr(result, '_asyncio_future_blocking', None)
+ if blocking is not None:
+ # Yielded Future must come from Future.__iter__().
+ if futures._get_loop(result) is not self._loop:
+ new_exc = RuntimeError(
+ f'Task {self!r} got Future '
+ f'{result!r} attached to a different loop')
+ self._loop.call_soon(
+ self.__step, new_exc, context=self._context)
+ elif blocking:
+ if result is self:
+ new_exc = RuntimeError(
+ f'Task cannot await on itself: {self!r}')
+ self._loop.call_soon(
+ self.__step, new_exc, context=self._context)
+ else:
+ result._asyncio_future_blocking = False
+ result.add_done_callback(
+ self.__wakeup, context=self._context)
+ self._fut_waiter = result
+ if self._must_cancel:
+ if self._fut_waiter.cancel(
+ msg=self._cancel_message):
+ self._must_cancel = False
+ else:
+ new_exc = RuntimeError(
+ f'yield was used instead of yield from '
+ f'in task {self!r} with {result!r}')
+ self._loop.call_soon(
+ self.__step, new_exc, context=self._context)
+
+ elif result is None:
+ # Bare yield relinquishes control for one event loop iteration.
+ self._loop.call_soon(self.__step, context=self._context)
+ elif inspect.isgenerator(result):
+ # Yielding a generator is just wrong.
+ new_exc = RuntimeError(
+ f'yield was used instead of yield from for '
+ f'generator in task {self!r} with {result!r}')
+ self._loop.call_soon(
+ self.__step, new_exc, context=self._context)
+ else:
+ # Yielding something else is an error.
+ new_exc = RuntimeError(f'Task got bad yield: {result!r}')
+ self._loop.call_soon(
+ self.__step, new_exc, context=self._context)
+ finally:
+ self = None # Needed to break cycles when an exception occurs.
+
+ def __wakeup(self, future):
+ try:
+ future.result()
+ except BaseException as exc:
+ # This may also be a cancellation.
+ self.__step(exc)
+ else:
+ # Don't pass the value of `future.result()` explicitly,
+ # as `Future.__iter__` and `Future.__await__` don't need it.
+ # If we call `_step(value, None)` instead of `_step()`,
+ # Python eval loop would use `.send(value)` method call,
+ # instead of `__next__()`, which is slower for futures
+ # that return non-generator iterators from their `__iter__`.
+ self.__step()
+ self = None # Needed to break cycles when an exception occurs.
+
+
+_PyTask = Task
+
+
+try:
+ import _asyncio
+except ImportError:
+ pass
+else:
+ # _CTask is needed for tests.
+ Task = _CTask = _asyncio.Task
+
+
+def create_task(coro, *, name=None, context=None):
+ """Schedule the execution of a coroutine object in a spawn task.
+
+ Return a Task object.
+ """
+ loop = events.get_running_loop()
+ if context is None:
+ # Use legacy API if context is not needed
+ task = loop.create_task(coro)
+ else:
+ task = loop.create_task(coro, context=context)
+
+ _set_task_name(task, name)
+ return task
+
+
+# wait() and as_completed() similar to those in PEP 3148.
+
+FIRST_COMPLETED = concurrent.futures.FIRST_COMPLETED
+FIRST_EXCEPTION = concurrent.futures.FIRST_EXCEPTION
+ALL_COMPLETED = concurrent.futures.ALL_COMPLETED
+
+
+async def wait(fs, *, timeout=None, return_when=ALL_COMPLETED):
+ """Wait for the Futures or Tasks given by fs to complete.
+
+ The fs iterable must not be empty.
+
+ Coroutines will be wrapped in Tasks.
+
+ Returns two sets of Future: (done, pending).
+
+ Usage:
+
+ done, pending = await asyncio.wait(fs)
+
+ Note: This does not raise TimeoutError! Futures that aren't done
+ when the timeout occurs are returned in the second set.
+ """
+ if futures.isfuture(fs) or coroutines.iscoroutine(fs):
+ raise TypeError(f"expect a list of futures, not {type(fs).__name__}")
+ if not fs:
+ raise ValueError('Set of Tasks/Futures is empty.')
+ if return_when not in (FIRST_COMPLETED, FIRST_EXCEPTION, ALL_COMPLETED):
+ raise ValueError(f'Invalid return_when value: {return_when}')
+
+ fs = set(fs)
+
+ if any(coroutines.iscoroutine(f) for f in fs):
+ raise TypeError("Passing coroutines is forbidden, use tasks explicitly.")
+
+ loop = events.get_running_loop()
+ return await _wait(fs, timeout, return_when, loop)
+
+
+def _release_waiter(waiter, *args):
+ if not waiter.done():
+ waiter.set_result(None)
+
+
+async def wait_for(fut, timeout):
+ """Wait for the single Future or coroutine to complete, with timeout.
+
+ Coroutine will be wrapped in Task.
+
+ Returns result of the Future or coroutine. When a timeout occurs,
+ it cancels the task and raises TimeoutError. To avoid the task
+ cancellation, wrap it in shield().
+
+ If the wait is cancelled, the task is also cancelled.
+
+ If the task supresses the cancellation and returns a value instead,
+ that value is returned.
+
+ This function is a coroutine.
+ """
+ # The special case for timeout <= 0 is for the following case:
+ #
+ # async def test_waitfor():
+ # func_started = False
+ #
+ # async def func():
+ # nonlocal func_started
+ # func_started = True
+ #
+ # try:
+ # await asyncio.wait_for(func(), 0)
+ # except asyncio.TimeoutError:
+ # assert not func_started
+ # else:
+ # assert False
+ #
+ # asyncio.run(test_waitfor())
+
+
+ if timeout is not None and timeout <= 0:
+ fut = ensure_future(fut)
+
+ if fut.done():
+ return fut.result()
+
+ await _cancel_and_wait(fut)
+ try:
+ return fut.result()
+ except exceptions.CancelledError as exc:
+ raise TimeoutError from exc
+
+ async with timeouts.timeout(timeout):
+ return await fut
+
+async def _wait(fs, timeout, return_when, loop):
+ """Internal helper for wait().
+
+ The fs argument must be a collection of Futures.
+ """
+ assert fs, 'Set of Futures is empty.'
+ waiter = loop.create_future()
+ timeout_handle = None
+ if timeout is not None:
+ timeout_handle = loop.call_later(timeout, _release_waiter, waiter)
+ counter = len(fs)
+
+ def _on_completion(f):
+ nonlocal counter
+ counter -= 1
+ if (counter <= 0 or
+ return_when == FIRST_COMPLETED or
+ return_when == FIRST_EXCEPTION and (not f.cancelled() and
+ f.exception() is not None)):
+ if timeout_handle is not None:
+ timeout_handle.cancel()
+ if not waiter.done():
+ waiter.set_result(None)
+
+ for f in fs:
+ f.add_done_callback(_on_completion)
+
+ try:
+ await waiter
+ finally:
+ if timeout_handle is not None:
+ timeout_handle.cancel()
+ for f in fs:
+ f.remove_done_callback(_on_completion)
+
+ done, pending = set(), set()
+ for f in fs:
+ if f.done():
+ done.add(f)
+ else:
+ pending.add(f)
+ return done, pending
+
+
+async def _cancel_and_wait(fut):
+ """Cancel the *fut* future or task and wait until it completes."""
+
+ loop = events.get_running_loop()
+ waiter = loop.create_future()
+ cb = functools.partial(_release_waiter, waiter)
+ fut.add_done_callback(cb)
+
+ try:
+ fut.cancel()
+ # We cannot wait on *fut* directly to make
+ # sure _cancel_and_wait itself is reliably cancellable.
+ await waiter
+ finally:
+ fut.remove_done_callback(cb)
+
+
+# This is *not* a @coroutine! It is just an iterator (yielding Futures).
+def as_completed(fs, *, timeout=None):
+ """Return an iterator whose values are coroutines.
+
+ When waiting for the yielded coroutines you'll get the results (or
+ exceptions!) of the original Futures (or coroutines), in the order
+ in which and as soon as they complete.
+
+ This differs from PEP 3148; the proper way to use this is:
+
+ for f in as_completed(fs):
+ result = await f # The 'await' may raise.
+ # Use result.
+
+ If a timeout is specified, the 'await' will raise
+ TimeoutError when the timeout occurs before all Futures are done.
+
+ Note: The futures 'f' are not necessarily members of fs.
+ """
+ if futures.isfuture(fs) or coroutines.iscoroutine(fs):
+ raise TypeError(f"expect an iterable of futures, not {type(fs).__name__}")
+
+ from .queues import Queue # Import here to avoid circular import problem.
+ done = Queue()
+
+ loop = events.get_event_loop()
+ todo = {ensure_future(f, loop=loop) for f in set(fs)}
+ timeout_handle = None
+
+ def _on_timeout():
+ for f in todo:
+ f.remove_done_callback(_on_completion)
+ done.put_nowait(None) # Queue a dummy value for _wait_for_one().
+ todo.clear() # Can't do todo.remove(f) in the loop.
+
+ def _on_completion(f):
+ if not todo:
+ return # _on_timeout() was here first.
+ todo.remove(f)
+ done.put_nowait(f)
+ if not todo and timeout_handle is not None:
+ timeout_handle.cancel()
+
+ async def _wait_for_one():
+ f = await done.get()
+ if f is None:
+ # Dummy value from _on_timeout().
+ raise exceptions.TimeoutError
+ return f.result() # May raise f.exception().
+
+ for f in todo:
+ f.add_done_callback(_on_completion)
+ if todo and timeout is not None:
+ timeout_handle = loop.call_later(timeout, _on_timeout)
+ for _ in range(len(todo)):
+ yield _wait_for_one()
+
+
+@types.coroutine
+def __sleep0():
+ """Skip one event loop run cycle.
+
+ This is a private helper for 'asyncio.sleep()', used
+ when the 'delay' is set to 0. It uses a bare 'yield'
+ expression (which Task.__step knows how to handle)
+ instead of creating a Future object.
+ """
+ yield
+
+
+async def sleep(delay, result=None):
+ """Coroutine that completes after a given time (in seconds)."""
+ if delay <= 0:
+ await __sleep0()
+ return result
+
+ loop = events.get_running_loop()
+ future = loop.create_future()
+ h = loop.call_later(delay,
+ futures._set_result_unless_cancelled,
+ future, result)
+ try:
+ return await future
+ finally:
+ h.cancel()
+
+
+def ensure_future(coro_or_future, *, loop=None):
+ """Wrap a coroutine or an awaitable in a future.
+
+ If the argument is a Future, it is returned directly.
+ """
+ if futures.isfuture(coro_or_future):
+ if loop is not None and loop is not futures._get_loop(coro_or_future):
+ raise ValueError('The future belongs to a different loop than '
+ 'the one specified as the loop argument')
+ return coro_or_future
+ should_close = True
+ if not coroutines.iscoroutine(coro_or_future):
+ if inspect.isawaitable(coro_or_future):
+ async def _wrap_awaitable(awaitable):
+ return await awaitable
+
+ coro_or_future = _wrap_awaitable(coro_or_future)
+ should_close = False
+ else:
+ raise TypeError('An asyncio.Future, a coroutine or an awaitable '
+ 'is required')
+
+ if loop is None:
+ loop = events.get_event_loop()
+ try:
+ return loop.create_task(coro_or_future)
+ except RuntimeError:
+ if should_close:
+ coro_or_future.close()
+ raise
+
+
+class _GatheringFuture(futures.Future):
+ """Helper for gather().
+
+ This overrides cancel() to cancel all the children and act more
+ like Task.cancel(), which doesn't immediately mark itself as
+ cancelled.
+ """
+
+ def __init__(self, children, *, loop):
+ assert loop is not None
+ super().__init__(loop=loop)
+ self._children = children
+ self._cancel_requested = False
+
+ def cancel(self, msg=None):
+ if self.done():
+ return False
+ ret = False
+ for child in self._children:
+ if child.cancel(msg=msg):
+ ret = True
+ if ret:
+ # If any child tasks were actually cancelled, we should
+ # propagate the cancellation request regardless of
+ # *return_exceptions* argument. See issue 32684.
+ self._cancel_requested = True
+ return ret
+
+
+def gather(*coros_or_futures, return_exceptions=False):
+ """Return a future aggregating results from the given coroutines/futures.
+
+ Coroutines will be wrapped in a future and scheduled in the event
+ loop. They will not necessarily be scheduled in the same order as
+ passed in.
+
+ All futures must share the same event loop. If all the tasks are
+ done successfully, the returned future's result is the list of
+ results (in the order of the original sequence, not necessarily
+ the order of results arrival). If *return_exceptions* is True,
+ exceptions in the tasks are treated the same as successful
+ results, and gathered in the result list; otherwise, the first
+ raised exception will be immediately propagated to the returned
+ future.
+
+ Cancellation: if the outer Future is cancelled, all children (that
+ have not completed yet) are also cancelled. If any child is
+ cancelled, this is treated as if it raised CancelledError --
+ the outer Future is *not* cancelled in this case. (This is to
+ prevent the cancellation of one child to cause other children to
+ be cancelled.)
+
+ If *return_exceptions* is False, cancelling gather() after it
+ has been marked done won't cancel any submitted awaitables.
+ For instance, gather can be marked done after propagating an
+ exception to the caller, therefore, calling ``gather.cancel()``
+ after catching an exception (raised by one of the awaitables) from
+ gather won't cancel any other awaitables.
+ """
+ if not coros_or_futures:
+ loop = events.get_event_loop()
+ outer = loop.create_future()
+ outer.set_result([])
+ return outer
+
+ def _done_callback(fut):
+ nonlocal nfinished
+ nfinished += 1
+
+ if outer is None or outer.done():
+ if not fut.cancelled():
+ # Mark exception retrieved.
+ fut.exception()
+ return
+
+ if not return_exceptions:
+ if fut.cancelled():
+ # Check if 'fut' is cancelled first, as
+ # 'fut.exception()' will *raise* a CancelledError
+ # instead of returning it.
+ exc = fut._make_cancelled_error()
+ outer.set_exception(exc)
+ return
+ else:
+ exc = fut.exception()
+ if exc is not None:
+ outer.set_exception(exc)
+ return
+
+ if nfinished == nfuts:
+ # All futures are done; create a list of results
+ # and set it to the 'outer' future.
+ results = []
+
+ for fut in children:
+ if fut.cancelled():
+ # Check if 'fut' is cancelled first, as 'fut.exception()'
+ # will *raise* a CancelledError instead of returning it.
+ # Also, since we're adding the exception return value
+ # to 'results' instead of raising it, don't bother
+ # setting __context__. This also lets us preserve
+ # calling '_make_cancelled_error()' at most once.
+ res = exceptions.CancelledError(
+ '' if fut._cancel_message is None else
+ fut._cancel_message)
+ else:
+ res = fut.exception()
+ if res is None:
+ res = fut.result()
+ results.append(res)
+
+ if outer._cancel_requested:
+ # If gather is being cancelled we must propagate the
+ # cancellation regardless of *return_exceptions* argument.
+ # See issue 32684.
+ exc = fut._make_cancelled_error()
+ outer.set_exception(exc)
+ else:
+ outer.set_result(results)
+
+ arg_to_fut = {}
+ children = []
+ nfuts = 0
+ nfinished = 0
+ done_futs = []
+ loop = None
+ outer = None # bpo-46672
+ for arg in coros_or_futures:
+ if arg not in arg_to_fut:
+ fut = ensure_future(arg, loop=loop)
+ if loop is None:
+ loop = futures._get_loop(fut)
+ if fut is not arg:
+ # 'arg' was not a Future, therefore, 'fut' is a new
+ # Future created specifically for 'arg'. Since the caller
+ # can't control it, disable the "destroy pending task"
+ # warning.
+ fut._log_destroy_pending = False
+
+ nfuts += 1
+ arg_to_fut[arg] = fut
+ if fut.done():
+ done_futs.append(fut)
+ else:
+ fut.add_done_callback(_done_callback)
+
+ else:
+ # There's a duplicate Future object in coros_or_futures.
+ fut = arg_to_fut[arg]
+
+ children.append(fut)
+
+ outer = _GatheringFuture(children, loop=loop)
+ # Run done callbacks after GatheringFuture created so any post-processing
+ # can be performed at this point
+ # optimization: in the special case that *all* futures finished eagerly,
+ # this will effectively complete the gather eagerly, with the last
+ # callback setting the result (or exception) on outer before returning it
+ for fut in done_futs:
+ _done_callback(fut)
+ return outer
+
+
+def shield(arg):
+ """Wait for a future, shielding it from cancellation.
+
+ The statement
+
+ task = asyncio.create_task(something())
+ res = await shield(task)
+
+ is exactly equivalent to the statement
+
+ res = await something()
+
+ *except* that if the coroutine containing it is cancelled, the
+ task running in something() is not cancelled. From the POV of
+ something(), the cancellation did not happen. But its caller is
+ still cancelled, so the yield-from expression still raises
+ CancelledError. Note: If something() is cancelled by other means
+ this will still cancel shield().
+
+ If you want to completely ignore cancellation (not recommended)
+ you can combine shield() with a try/except clause, as follows:
+
+ task = asyncio.create_task(something())
+ try:
+ res = await shield(task)
+ except CancelledError:
+ res = None
+
+ Save a reference to tasks passed to this function, to avoid
+ a task disappearing mid-execution. The event loop only keeps
+ weak references to tasks. A task that isn't referenced elsewhere
+ may get garbage collected at any time, even before it's done.
+ """
+ inner = ensure_future(arg)
+ if inner.done():
+ # Shortcut.
+ return inner
+ loop = futures._get_loop(inner)
+ outer = loop.create_future()
+
+ def _inner_done_callback(inner):
+ if outer.cancelled():
+ if not inner.cancelled():
+ # Mark inner's result as retrieved.
+ inner.exception()
+ return
+
+ if inner.cancelled():
+ outer.cancel()
+ else:
+ exc = inner.exception()
+ if exc is not None:
+ outer.set_exception(exc)
+ else:
+ outer.set_result(inner.result())
+
+
+ def _outer_done_callback(outer):
+ if not inner.done():
+ inner.remove_done_callback(_inner_done_callback)
+
+ inner.add_done_callback(_inner_done_callback)
+ outer.add_done_callback(_outer_done_callback)
+ return outer
+
+
+def run_coroutine_threadsafe(coro, loop):
+ """Submit a coroutine object to a given event loop.
+
+ Return a concurrent.futures.Future to access the result.
+ """
+ if not coroutines.iscoroutine(coro):
+ raise TypeError('A coroutine object is required')
+ future = concurrent.futures.Future()
+
+ def callback():
+ try:
+ futures._chain_future(ensure_future(coro, loop=loop), future)
+ except (SystemExit, KeyboardInterrupt):
+ raise
+ except BaseException as exc:
+ if future.set_running_or_notify_cancel():
+ future.set_exception(exc)
+ raise
+
+ loop.call_soon_threadsafe(callback)
+ return future
+
+
+def create_eager_task_factory(custom_task_constructor):
+ """Create a function suitable for use as a task factory on an event-loop.
+
+ Example usage:
+
+ loop.set_task_factory(
+ asyncio.create_eager_task_factory(my_task_constructor))
+
+ Now, tasks created will be started immediately (rather than being first
+ scheduled to an event loop). The constructor argument can be any callable
+ that returns a Task-compatible object and has a signature compatible
+ with `Task.__init__`; it must have the `eager_start` keyword argument.
+
+ Most applications will use `Task` for `custom_task_constructor` and in
+ this case there's no need to call `create_eager_task_factory()`
+ directly. Instead the global `eager_task_factory` instance can be
+ used. E.g. `loop.set_task_factory(asyncio.eager_task_factory)`.
+ """
+
+ def factory(loop, coro, *, name=None, context=None):
+ return custom_task_constructor(
+ coro, loop=loop, name=name, context=context, eager_start=True)
+
+ return factory
+
+
+eager_task_factory = create_eager_task_factory(Task)
+
+
+# Collectively these two sets hold references to the complete set of active
+# tasks. Eagerly executed tasks use a faster regular set as an optimization
+# but may graduate to a WeakSet if the task blocks on IO.
+_scheduled_tasks = weakref.WeakSet()
+_eager_tasks = set()
+
+# Dictionary containing tasks that are currently active in
+# all running event loops. {EventLoop: Task}
+_current_tasks = {}
+
+
+def _register_task(task):
+ """Register an asyncio Task scheduled to run on an event loop."""
+ _scheduled_tasks.add(task)
+
+
+def _register_eager_task(task):
+ """Register an asyncio Task about to be eagerly executed."""
+ _eager_tasks.add(task)
+
+
+def _enter_task(loop, task):
+ current_task = _current_tasks.get(loop)
+ if current_task is not None:
+ raise RuntimeError(f"Cannot enter into task {task!r} while another "
+ f"task {current_task!r} is being executed.")
+ _current_tasks[loop] = task
+
+
+def _leave_task(loop, task):
+ current_task = _current_tasks.get(loop)
+ if current_task is not task:
+ raise RuntimeError(f"Leaving task {task!r} does not match "
+ f"the current task {current_task!r}.")
+ del _current_tasks[loop]
+
+
+def _swap_current_task(loop, task):
+ prev_task = _current_tasks.get(loop)
+ if task is None:
+ del _current_tasks[loop]
+ else:
+ _current_tasks[loop] = task
+ return prev_task
+
+
+def _unregister_task(task):
+ """Unregister a completed, scheduled Task."""
+ _scheduled_tasks.discard(task)
+
+
+def _unregister_eager_task(task):
+ """Unregister a task which finished its first eager step."""
+ _eager_tasks.discard(task)
+
+
+_py_current_task = current_task
+_py_register_task = _register_task
+_py_register_eager_task = _register_eager_task
+_py_unregister_task = _unregister_task
+_py_unregister_eager_task = _unregister_eager_task
+_py_enter_task = _enter_task
+_py_leave_task = _leave_task
+_py_swap_current_task = _swap_current_task
+
+
+try:
+ from _asyncio import (_register_task, _register_eager_task,
+ _unregister_task, _unregister_eager_task,
+ _enter_task, _leave_task, _swap_current_task,
+ _scheduled_tasks, _eager_tasks, _current_tasks,
+ current_task)
+except ImportError:
+ pass
+else:
+ _c_current_task = current_task
+ _c_register_task = _register_task
+ _c_register_eager_task = _register_eager_task
+ _c_unregister_task = _unregister_task
+ _c_unregister_eager_task = _unregister_eager_task
+ _c_enter_task = _enter_task
+ _c_leave_task = _leave_task
+ _c_swap_current_task = _swap_current_task
diff --git a/contrib/tools/python3/Lib/asyncio/threads.py b/contrib/tools/python3/Lib/asyncio/threads.py
new file mode 100644
index 0000000000..db048a8231
--- /dev/null
+++ b/contrib/tools/python3/Lib/asyncio/threads.py
@@ -0,0 +1,25 @@
+"""High-level support for working with threads in asyncio"""
+
+import functools
+import contextvars
+
+from . import events
+
+
+__all__ = "to_thread",
+
+
+async def to_thread(func, /, *args, **kwargs):
+ """Asynchronously run function *func* in a separate thread.
+
+ Any *args and **kwargs supplied for this function are directly passed
+ to *func*. Also, the current :class:`contextvars.Context` is propagated,
+ allowing context variables from the main thread to be accessed in the
+ separate thread.
+
+ Return a coroutine that can be awaited to get the eventual result of *func*.
+ """
+ loop = events.get_running_loop()
+ ctx = contextvars.copy_context()
+ func_call = functools.partial(ctx.run, func, *args, **kwargs)
+ return await loop.run_in_executor(None, func_call)
diff --git a/contrib/tools/python3/Lib/asyncio/timeouts.py b/contrib/tools/python3/Lib/asyncio/timeouts.py
new file mode 100644
index 0000000000..30042abb3a
--- /dev/null
+++ b/contrib/tools/python3/Lib/asyncio/timeouts.py
@@ -0,0 +1,168 @@
+import enum
+
+from types import TracebackType
+from typing import final, Optional, Type
+
+from . import events
+from . import exceptions
+from . import tasks
+
+
+__all__ = (
+ "Timeout",
+ "timeout",
+ "timeout_at",
+)
+
+
+class _State(enum.Enum):
+ CREATED = "created"
+ ENTERED = "active"
+ EXPIRING = "expiring"
+ EXPIRED = "expired"
+ EXITED = "finished"
+
+
+@final
+class Timeout:
+ """Asynchronous context manager for cancelling overdue coroutines.
+
+ Use `timeout()` or `timeout_at()` rather than instantiating this class directly.
+ """
+
+ def __init__(self, when: Optional[float]) -> None:
+ """Schedule a timeout that will trigger at a given loop time.
+
+ - If `when` is `None`, the timeout will never trigger.
+ - If `when < loop.time()`, the timeout will trigger on the next
+ iteration of the event loop.
+ """
+ self._state = _State.CREATED
+
+ self._timeout_handler: Optional[events.TimerHandle] = None
+ self._task: Optional[tasks.Task] = None
+ self._when = when
+
+ def when(self) -> Optional[float]:
+ """Return the current deadline."""
+ return self._when
+
+ def reschedule(self, when: Optional[float]) -> None:
+ """Reschedule the timeout."""
+ if self._state is not _State.ENTERED:
+ if self._state is _State.CREATED:
+ raise RuntimeError("Timeout has not been entered")
+ raise RuntimeError(
+ f"Cannot change state of {self._state.value} Timeout",
+ )
+
+ self._when = when
+
+ if self._timeout_handler is not None:
+ self._timeout_handler.cancel()
+
+ if when is None:
+ self._timeout_handler = None
+ else:
+ loop = events.get_running_loop()
+ if when <= loop.time():
+ self._timeout_handler = loop.call_soon(self._on_timeout)
+ else:
+ self._timeout_handler = loop.call_at(when, self._on_timeout)
+
+ def expired(self) -> bool:
+ """Is timeout expired during execution?"""
+ return self._state in (_State.EXPIRING, _State.EXPIRED)
+
+ def __repr__(self) -> str:
+ info = ['']
+ if self._state is _State.ENTERED:
+ when = round(self._when, 3) if self._when is not None else None
+ info.append(f"when={when}")
+ info_str = ' '.join(info)
+ return f"<Timeout [{self._state.value}]{info_str}>"
+
+ async def __aenter__(self) -> "Timeout":
+ if self._state is not _State.CREATED:
+ raise RuntimeError("Timeout has already been entered")
+ task = tasks.current_task()
+ if task is None:
+ raise RuntimeError("Timeout should be used inside a task")
+ self._state = _State.ENTERED
+ self._task = task
+ self._cancelling = self._task.cancelling()
+ self.reschedule(self._when)
+ return self
+
+ async def __aexit__(
+ self,
+ exc_type: Optional[Type[BaseException]],
+ exc_val: Optional[BaseException],
+ exc_tb: Optional[TracebackType],
+ ) -> Optional[bool]:
+ assert self._state in (_State.ENTERED, _State.EXPIRING)
+
+ if self._timeout_handler is not None:
+ self._timeout_handler.cancel()
+ self._timeout_handler = None
+
+ if self._state is _State.EXPIRING:
+ self._state = _State.EXPIRED
+
+ if self._task.uncancel() <= self._cancelling and exc_type is exceptions.CancelledError:
+ # Since there are no new cancel requests, we're
+ # handling this.
+ raise TimeoutError from exc_val
+ elif self._state is _State.ENTERED:
+ self._state = _State.EXITED
+
+ return None
+
+ def _on_timeout(self) -> None:
+ assert self._state is _State.ENTERED
+ self._task.cancel()
+ self._state = _State.EXPIRING
+ # drop the reference early
+ self._timeout_handler = None
+
+
+def timeout(delay: Optional[float]) -> Timeout:
+ """Timeout async context manager.
+
+ Useful in cases when you want to apply timeout logic around block
+ of code or in cases when asyncio.wait_for is not suitable. For example:
+
+ >>> async with asyncio.timeout(10): # 10 seconds timeout
+ ... await long_running_task()
+
+
+ delay - value in seconds or None to disable timeout logic
+
+ long_running_task() is interrupted by raising asyncio.CancelledError,
+ the top-most affected timeout() context manager converts CancelledError
+ into TimeoutError.
+ """
+ loop = events.get_running_loop()
+ return Timeout(loop.time() + delay if delay is not None else None)
+
+
+def timeout_at(when: Optional[float]) -> Timeout:
+ """Schedule the timeout at absolute time.
+
+ Like timeout() but argument gives absolute time in the same clock system
+ as loop.time().
+
+ Please note: it is not POSIX time but a time with
+ undefined starting base, e.g. the time of the system power on.
+
+ >>> async with asyncio.timeout_at(loop.time() + 10):
+ ... await long_running_task()
+
+
+ when - a deadline when timeout occurs or None to disable timeout logic
+
+ long_running_task() is interrupted by raising asyncio.CancelledError,
+ the top-most affected timeout() context manager converts CancelledError
+ into TimeoutError.
+ """
+ return Timeout(when)
diff --git a/contrib/tools/python3/Lib/asyncio/transports.py b/contrib/tools/python3/Lib/asyncio/transports.py
new file mode 100644
index 0000000000..30fd41d49a
--- /dev/null
+++ b/contrib/tools/python3/Lib/asyncio/transports.py
@@ -0,0 +1,335 @@
+"""Abstract Transport class."""
+
+__all__ = (
+ 'BaseTransport', 'ReadTransport', 'WriteTransport',
+ 'Transport', 'DatagramTransport', 'SubprocessTransport',
+)
+
+
+class BaseTransport:
+ """Base class for transports."""
+
+ __slots__ = ('_extra',)
+
+ def __init__(self, extra=None):
+ if extra is None:
+ extra = {}
+ self._extra = extra
+
+ def get_extra_info(self, name, default=None):
+ """Get optional transport information."""
+ return self._extra.get(name, default)
+
+ def is_closing(self):
+ """Return True if the transport is closing or closed."""
+ raise NotImplementedError
+
+ def close(self):
+ """Close the transport.
+
+ Buffered data will be flushed asynchronously. No more data
+ will be received. After all buffered data is flushed, the
+ protocol's connection_lost() method will (eventually) be
+ called with None as its argument.
+ """
+ raise NotImplementedError
+
+ def set_protocol(self, protocol):
+ """Set a new protocol."""
+ raise NotImplementedError
+
+ def get_protocol(self):
+ """Return the current protocol."""
+ raise NotImplementedError
+
+
+class ReadTransport(BaseTransport):
+ """Interface for read-only transports."""
+
+ __slots__ = ()
+
+ def is_reading(self):
+ """Return True if the transport is receiving."""
+ raise NotImplementedError
+
+ def pause_reading(self):
+ """Pause the receiving end.
+
+ No data will be passed to the protocol's data_received()
+ method until resume_reading() is called.
+ """
+ raise NotImplementedError
+
+ def resume_reading(self):
+ """Resume the receiving end.
+
+ Data received will once again be passed to the protocol's
+ data_received() method.
+ """
+ raise NotImplementedError
+
+
+class WriteTransport(BaseTransport):
+ """Interface for write-only transports."""
+
+ __slots__ = ()
+
+ def set_write_buffer_limits(self, high=None, low=None):
+ """Set the high- and low-water limits for write flow control.
+
+ These two values control when to call the protocol's
+ pause_writing() and resume_writing() methods. If specified,
+ the low-water limit must be less than or equal to the
+ high-water limit. Neither value can be negative.
+
+ The defaults are implementation-specific. If only the
+ high-water limit is given, the low-water limit defaults to an
+ implementation-specific value less than or equal to the
+ high-water limit. Setting high to zero forces low to zero as
+ well, and causes pause_writing() to be called whenever the
+ buffer becomes non-empty. Setting low to zero causes
+ resume_writing() to be called only once the buffer is empty.
+ Use of zero for either limit is generally sub-optimal as it
+ reduces opportunities for doing I/O and computation
+ concurrently.
+ """
+ raise NotImplementedError
+
+ def get_write_buffer_size(self):
+ """Return the current size of the write buffer."""
+ raise NotImplementedError
+
+ def get_write_buffer_limits(self):
+ """Get the high and low watermarks for write flow control.
+ Return a tuple (low, high) where low and high are
+ positive number of bytes."""
+ raise NotImplementedError
+
+ def write(self, data):
+ """Write some data bytes to the transport.
+
+ This does not block; it buffers the data and arranges for it
+ to be sent out asynchronously.
+ """
+ raise NotImplementedError
+
+ def writelines(self, list_of_data):
+ """Write a list (or any iterable) of data bytes to the transport.
+
+ The default implementation concatenates the arguments and
+ calls write() on the result.
+ """
+ data = b''.join(list_of_data)
+ self.write(data)
+
+ def write_eof(self):
+ """Close the write end after flushing buffered data.
+
+ (This is like typing ^D into a UNIX program reading from stdin.)
+
+ Data may still be received.
+ """
+ raise NotImplementedError
+
+ def can_write_eof(self):
+ """Return True if this transport supports write_eof(), False if not."""
+ raise NotImplementedError
+
+ def abort(self):
+ """Close the transport immediately.
+
+ Buffered data will be lost. No more data will be received.
+ The protocol's connection_lost() method will (eventually) be
+ called with None as its argument.
+ """
+ raise NotImplementedError
+
+
+class Transport(ReadTransport, WriteTransport):
+ """Interface representing a bidirectional transport.
+
+ There may be several implementations, but typically, the user does
+ not implement new transports; rather, the platform provides some
+ useful transports that are implemented using the platform's best
+ practices.
+
+ The user never instantiates a transport directly; they call a
+ utility function, passing it a protocol factory and other
+ information necessary to create the transport and protocol. (E.g.
+ EventLoop.create_connection() or EventLoop.create_server().)
+
+ The utility function will asynchronously create a transport and a
+ protocol and hook them up by calling the protocol's
+ connection_made() method, passing it the transport.
+
+ The implementation here raises NotImplemented for every method
+ except writelines(), which calls write() in a loop.
+ """
+
+ __slots__ = ()
+
+
+class DatagramTransport(BaseTransport):
+ """Interface for datagram (UDP) transports."""
+
+ __slots__ = ()
+
+ def sendto(self, data, addr=None):
+ """Send data to the transport.
+
+ This does not block; it buffers the data and arranges for it
+ to be sent out asynchronously.
+ addr is target socket address.
+ If addr is None use target address pointed on transport creation.
+ """
+ raise NotImplementedError
+
+ def abort(self):
+ """Close the transport immediately.
+
+ Buffered data will be lost. No more data will be received.
+ The protocol's connection_lost() method will (eventually) be
+ called with None as its argument.
+ """
+ raise NotImplementedError
+
+
+class SubprocessTransport(BaseTransport):
+
+ __slots__ = ()
+
+ def get_pid(self):
+ """Get subprocess id."""
+ raise NotImplementedError
+
+ def get_returncode(self):
+ """Get subprocess returncode.
+
+ See also
+ http://docs.python.org/3/library/subprocess#subprocess.Popen.returncode
+ """
+ raise NotImplementedError
+
+ def get_pipe_transport(self, fd):
+ """Get transport for pipe with number fd."""
+ raise NotImplementedError
+
+ def send_signal(self, signal):
+ """Send signal to subprocess.
+
+ See also:
+ docs.python.org/3/library/subprocess#subprocess.Popen.send_signal
+ """
+ raise NotImplementedError
+
+ def terminate(self):
+ """Stop the subprocess.
+
+ Alias for close() method.
+
+ On Posix OSs the method sends SIGTERM to the subprocess.
+ On Windows the Win32 API function TerminateProcess()
+ is called to stop the subprocess.
+
+ See also:
+ http://docs.python.org/3/library/subprocess#subprocess.Popen.terminate
+ """
+ raise NotImplementedError
+
+ def kill(self):
+ """Kill the subprocess.
+
+ On Posix OSs the function sends SIGKILL to the subprocess.
+ On Windows kill() is an alias for terminate().
+
+ See also:
+ http://docs.python.org/3/library/subprocess#subprocess.Popen.kill
+ """
+ raise NotImplementedError
+
+
+class _FlowControlMixin(Transport):
+ """All the logic for (write) flow control in a mix-in base class.
+
+ The subclass must implement get_write_buffer_size(). It must call
+ _maybe_pause_protocol() whenever the write buffer size increases,
+ and _maybe_resume_protocol() whenever it decreases. It may also
+ override set_write_buffer_limits() (e.g. to specify different
+ defaults).
+
+ The subclass constructor must call super().__init__(extra). This
+ will call set_write_buffer_limits().
+
+ The user may call set_write_buffer_limits() and
+ get_write_buffer_size(), and their protocol's pause_writing() and
+ resume_writing() may be called.
+ """
+
+ __slots__ = ('_loop', '_protocol_paused', '_high_water', '_low_water')
+
+ def __init__(self, extra=None, loop=None):
+ super().__init__(extra)
+ assert loop is not None
+ self._loop = loop
+ self._protocol_paused = False
+ self._set_write_buffer_limits()
+
+ def _maybe_pause_protocol(self):
+ size = self.get_write_buffer_size()
+ if size <= self._high_water:
+ return
+ if not self._protocol_paused:
+ self._protocol_paused = True
+ try:
+ self._protocol.pause_writing()
+ except (SystemExit, KeyboardInterrupt):
+ raise
+ except BaseException as exc:
+ self._loop.call_exception_handler({
+ 'message': 'protocol.pause_writing() failed',
+ 'exception': exc,
+ 'transport': self,
+ 'protocol': self._protocol,
+ })
+
+ def _maybe_resume_protocol(self):
+ if (self._protocol_paused and
+ self.get_write_buffer_size() <= self._low_water):
+ self._protocol_paused = False
+ try:
+ self._protocol.resume_writing()
+ except (SystemExit, KeyboardInterrupt):
+ raise
+ except BaseException as exc:
+ self._loop.call_exception_handler({
+ 'message': 'protocol.resume_writing() failed',
+ 'exception': exc,
+ 'transport': self,
+ 'protocol': self._protocol,
+ })
+
+ def get_write_buffer_limits(self):
+ return (self._low_water, self._high_water)
+
+ def _set_write_buffer_limits(self, high=None, low=None):
+ if high is None:
+ if low is None:
+ high = 64 * 1024
+ else:
+ high = 4 * low
+ if low is None:
+ low = high // 4
+
+ if not high >= low >= 0:
+ raise ValueError(
+ f'high ({high!r}) must be >= low ({low!r}) must be >= 0')
+
+ self._high_water = high
+ self._low_water = low
+
+ def set_write_buffer_limits(self, high=None, low=None):
+ self._set_write_buffer_limits(high=high, low=low)
+ self._maybe_pause_protocol()
+
+ def get_write_buffer_size(self):
+ raise NotImplementedError
diff --git a/contrib/tools/python3/Lib/asyncio/trsock.py b/contrib/tools/python3/Lib/asyncio/trsock.py
new file mode 100644
index 0000000000..c1f20473b3
--- /dev/null
+++ b/contrib/tools/python3/Lib/asyncio/trsock.py
@@ -0,0 +1,98 @@
+import socket
+
+
+class TransportSocket:
+
+ """A socket-like wrapper for exposing real transport sockets.
+
+ These objects can be safely returned by APIs like
+ `transport.get_extra_info('socket')`. All potentially disruptive
+ operations (like "socket.close()") are banned.
+ """
+
+ __slots__ = ('_sock',)
+
+ def __init__(self, sock: socket.socket):
+ self._sock = sock
+
+ @property
+ def family(self):
+ return self._sock.family
+
+ @property
+ def type(self):
+ return self._sock.type
+
+ @property
+ def proto(self):
+ return self._sock.proto
+
+ def __repr__(self):
+ s = (
+ f"<asyncio.TransportSocket fd={self.fileno()}, "
+ f"family={self.family!s}, type={self.type!s}, "
+ f"proto={self.proto}"
+ )
+
+ if self.fileno() != -1:
+ try:
+ laddr = self.getsockname()
+ if laddr:
+ s = f"{s}, laddr={laddr}"
+ except socket.error:
+ pass
+ try:
+ raddr = self.getpeername()
+ if raddr:
+ s = f"{s}, raddr={raddr}"
+ except socket.error:
+ pass
+
+ return f"{s}>"
+
+ def __getstate__(self):
+ raise TypeError("Cannot serialize asyncio.TransportSocket object")
+
+ def fileno(self):
+ return self._sock.fileno()
+
+ def dup(self):
+ return self._sock.dup()
+
+ def get_inheritable(self):
+ return self._sock.get_inheritable()
+
+ def shutdown(self, how):
+ # asyncio doesn't currently provide a high-level transport API
+ # to shutdown the connection.
+ self._sock.shutdown(how)
+
+ def getsockopt(self, *args, **kwargs):
+ return self._sock.getsockopt(*args, **kwargs)
+
+ def setsockopt(self, *args, **kwargs):
+ self._sock.setsockopt(*args, **kwargs)
+
+ def getpeername(self):
+ return self._sock.getpeername()
+
+ def getsockname(self):
+ return self._sock.getsockname()
+
+ def getsockbyname(self):
+ return self._sock.getsockbyname()
+
+ def settimeout(self, value):
+ if value == 0:
+ return
+ raise ValueError(
+ 'settimeout(): only 0 timeout is allowed on transport sockets')
+
+ def gettimeout(self):
+ return 0
+
+ def setblocking(self, flag):
+ if not flag:
+ return
+ raise ValueError(
+ 'setblocking(): transport sockets cannot be blocking')
diff --git a/contrib/tools/python3/Lib/asyncio/unix_events.py b/contrib/tools/python3/Lib/asyncio/unix_events.py
new file mode 100644
index 0000000000..f2e920ada4
--- /dev/null
+++ b/contrib/tools/python3/Lib/asyncio/unix_events.py
@@ -0,0 +1,1500 @@
+"""Selector event loop for Unix with signal handling."""
+
+import errno
+import io
+import itertools
+import os
+import selectors
+import signal
+import socket
+import stat
+import subprocess
+import sys
+import threading
+import warnings
+
+from . import base_events
+from . import base_subprocess
+from . import constants
+from . import coroutines
+from . import events
+from . import exceptions
+from . import futures
+from . import selector_events
+from . import tasks
+from . import transports
+from .log import logger
+
+
+__all__ = (
+ 'SelectorEventLoop',
+ 'AbstractChildWatcher', 'SafeChildWatcher',
+ 'FastChildWatcher', 'PidfdChildWatcher',
+ 'MultiLoopChildWatcher', 'ThreadedChildWatcher',
+ 'DefaultEventLoopPolicy',
+)
+
+
+if sys.platform == 'win32': # pragma: no cover
+ raise ImportError('Signals are not really supported on Windows')
+
+
+def _sighandler_noop(signum, frame):
+ """Dummy signal handler."""
+ pass
+
+
+def waitstatus_to_exitcode(status):
+ try:
+ return os.waitstatus_to_exitcode(status)
+ except ValueError:
+ # The child exited, but we don't understand its status.
+ # This shouldn't happen, but if it does, let's just
+ # return that status; perhaps that helps debug it.
+ return status
+
+
+class _UnixSelectorEventLoop(selector_events.BaseSelectorEventLoop):
+ """Unix event loop.
+
+ Adds signal handling and UNIX Domain Socket support to SelectorEventLoop.
+ """
+
+ def __init__(self, selector=None):
+ super().__init__(selector)
+ self._signal_handlers = {}
+
+ def close(self):
+ super().close()
+ if not sys.is_finalizing():
+ for sig in list(self._signal_handlers):
+ self.remove_signal_handler(sig)
+ else:
+ if self._signal_handlers:
+ warnings.warn(f"Closing the loop {self!r} "
+ f"on interpreter shutdown "
+ f"stage, skipping signal handlers removal",
+ ResourceWarning,
+ source=self)
+ self._signal_handlers.clear()
+
+ def _process_self_data(self, data):
+ for signum in data:
+ if not signum:
+ # ignore null bytes written by _write_to_self()
+ continue
+ self._handle_signal(signum)
+
+ def add_signal_handler(self, sig, callback, *args):
+ """Add a handler for a signal. UNIX only.
+
+ Raise ValueError if the signal number is invalid or uncatchable.
+ Raise RuntimeError if there is a problem setting up the handler.
+ """
+ if (coroutines.iscoroutine(callback) or
+ coroutines.iscoroutinefunction(callback)):
+ raise TypeError("coroutines cannot be used "
+ "with add_signal_handler()")
+ self._check_signal(sig)
+ self._check_closed()
+ try:
+ # set_wakeup_fd() raises ValueError if this is not the
+ # main thread. By calling it early we ensure that an
+ # event loop running in another thread cannot add a signal
+ # handler.
+ signal.set_wakeup_fd(self._csock.fileno())
+ except (ValueError, OSError) as exc:
+ raise RuntimeError(str(exc))
+
+ handle = events.Handle(callback, args, self, None)
+ self._signal_handlers[sig] = handle
+
+ try:
+ # Register a dummy signal handler to ask Python to write the signal
+ # number in the wakeup file descriptor. _process_self_data() will
+ # read signal numbers from this file descriptor to handle signals.
+ signal.signal(sig, _sighandler_noop)
+
+ # Set SA_RESTART to limit EINTR occurrences.
+ signal.siginterrupt(sig, False)
+ except OSError as exc:
+ del self._signal_handlers[sig]
+ if not self._signal_handlers:
+ try:
+ signal.set_wakeup_fd(-1)
+ except (ValueError, OSError) as nexc:
+ logger.info('set_wakeup_fd(-1) failed: %s', nexc)
+
+ if exc.errno == errno.EINVAL:
+ raise RuntimeError(f'sig {sig} cannot be caught')
+ else:
+ raise
+
+ def _handle_signal(self, sig):
+ """Internal helper that is the actual signal handler."""
+ handle = self._signal_handlers.get(sig)
+ if handle is None:
+ return # Assume it's some race condition.
+ if handle._cancelled:
+ self.remove_signal_handler(sig) # Remove it properly.
+ else:
+ self._add_callback_signalsafe(handle)
+
+ def remove_signal_handler(self, sig):
+ """Remove a handler for a signal. UNIX only.
+
+ Return True if a signal handler was removed, False if not.
+ """
+ self._check_signal(sig)
+ try:
+ del self._signal_handlers[sig]
+ except KeyError:
+ return False
+
+ if sig == signal.SIGINT:
+ handler = signal.default_int_handler
+ else:
+ handler = signal.SIG_DFL
+
+ try:
+ signal.signal(sig, handler)
+ except OSError as exc:
+ if exc.errno == errno.EINVAL:
+ raise RuntimeError(f'sig {sig} cannot be caught')
+ else:
+ raise
+
+ if not self._signal_handlers:
+ try:
+ signal.set_wakeup_fd(-1)
+ except (ValueError, OSError) as exc:
+ logger.info('set_wakeup_fd(-1) failed: %s', exc)
+
+ return True
+
+ def _check_signal(self, sig):
+ """Internal helper to validate a signal.
+
+ Raise ValueError if the signal number is invalid or uncatchable.
+ Raise RuntimeError if there is a problem setting up the handler.
+ """
+ if not isinstance(sig, int):
+ raise TypeError(f'sig must be an int, not {sig!r}')
+
+ if sig not in signal.valid_signals():
+ raise ValueError(f'invalid signal number {sig}')
+
+ def _make_read_pipe_transport(self, pipe, protocol, waiter=None,
+ extra=None):
+ return _UnixReadPipeTransport(self, pipe, protocol, waiter, extra)
+
+ def _make_write_pipe_transport(self, pipe, protocol, waiter=None,
+ extra=None):
+ return _UnixWritePipeTransport(self, pipe, protocol, waiter, extra)
+
+ async def _make_subprocess_transport(self, protocol, args, shell,
+ stdin, stdout, stderr, bufsize,
+ extra=None, **kwargs):
+ with warnings.catch_warnings():
+ warnings.simplefilter('ignore', DeprecationWarning)
+ watcher = events.get_child_watcher()
+
+ with watcher:
+ if not watcher.is_active():
+ # Check early.
+ # Raising exception before process creation
+ # prevents subprocess execution if the watcher
+ # is not ready to handle it.
+ raise RuntimeError("asyncio.get_child_watcher() is not activated, "
+ "subprocess support is not installed.")
+ waiter = self.create_future()
+ transp = _UnixSubprocessTransport(self, protocol, args, shell,
+ stdin, stdout, stderr, bufsize,
+ waiter=waiter, extra=extra,
+ **kwargs)
+ watcher.add_child_handler(transp.get_pid(),
+ self._child_watcher_callback, transp)
+ try:
+ await waiter
+ except (SystemExit, KeyboardInterrupt):
+ raise
+ except BaseException:
+ transp.close()
+ await transp._wait()
+ raise
+
+ return transp
+
+ def _child_watcher_callback(self, pid, returncode, transp):
+ self.call_soon_threadsafe(transp._process_exited, returncode)
+
+ async def create_unix_connection(
+ self, protocol_factory, path=None, *,
+ ssl=None, sock=None,
+ server_hostname=None,
+ ssl_handshake_timeout=None,
+ ssl_shutdown_timeout=None):
+ assert server_hostname is None or isinstance(server_hostname, str)
+ if ssl:
+ if server_hostname is None:
+ raise ValueError(
+ 'you have to pass server_hostname when using ssl')
+ else:
+ if server_hostname is not None:
+ raise ValueError('server_hostname is only meaningful with ssl')
+ if ssl_handshake_timeout is not None:
+ raise ValueError(
+ 'ssl_handshake_timeout is only meaningful with ssl')
+ if ssl_shutdown_timeout is not None:
+ raise ValueError(
+ 'ssl_shutdown_timeout is only meaningful with ssl')
+
+ if path is not None:
+ if sock is not None:
+ raise ValueError(
+ 'path and sock can not be specified at the same time')
+
+ path = os.fspath(path)
+ sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM, 0)
+ try:
+ sock.setblocking(False)
+ await self.sock_connect(sock, path)
+ except:
+ sock.close()
+ raise
+
+ else:
+ if sock is None:
+ raise ValueError('no path and sock were specified')
+ if (sock.family != socket.AF_UNIX or
+ sock.type != socket.SOCK_STREAM):
+ raise ValueError(
+ f'A UNIX Domain Stream Socket was expected, got {sock!r}')
+ sock.setblocking(False)
+
+ transport, protocol = await self._create_connection_transport(
+ sock, protocol_factory, ssl, server_hostname,
+ ssl_handshake_timeout=ssl_handshake_timeout,
+ ssl_shutdown_timeout=ssl_shutdown_timeout)
+ return transport, protocol
+
+ async def create_unix_server(
+ self, protocol_factory, path=None, *,
+ sock=None, backlog=100, ssl=None,
+ ssl_handshake_timeout=None,
+ ssl_shutdown_timeout=None,
+ start_serving=True):
+ if isinstance(ssl, bool):
+ raise TypeError('ssl argument must be an SSLContext or None')
+
+ if ssl_handshake_timeout is not None and not ssl:
+ raise ValueError(
+ 'ssl_handshake_timeout is only meaningful with ssl')
+
+ if ssl_shutdown_timeout is not None and not ssl:
+ raise ValueError(
+ 'ssl_shutdown_timeout is only meaningful with ssl')
+
+ if path is not None:
+ if sock is not None:
+ raise ValueError(
+ 'path and sock can not be specified at the same time')
+
+ path = os.fspath(path)
+ sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+
+ # Check for abstract socket. `str` and `bytes` paths are supported.
+ if path[0] not in (0, '\x00'):
+ try:
+ if stat.S_ISSOCK(os.stat(path).st_mode):
+ os.remove(path)
+ except FileNotFoundError:
+ pass
+ except OSError as err:
+ # Directory may have permissions only to create socket.
+ logger.error('Unable to check or remove stale UNIX socket '
+ '%r: %r', path, err)
+
+ try:
+ sock.bind(path)
+ except OSError as exc:
+ sock.close()
+ if exc.errno == errno.EADDRINUSE:
+ # Let's improve the error message by adding
+ # with what exact address it occurs.
+ msg = f'Address {path!r} is already in use'
+ raise OSError(errno.EADDRINUSE, msg) from None
+ else:
+ raise
+ except:
+ sock.close()
+ raise
+ else:
+ if sock is None:
+ raise ValueError(
+ 'path was not specified, and no sock specified')
+
+ if (sock.family != socket.AF_UNIX or
+ sock.type != socket.SOCK_STREAM):
+ raise ValueError(
+ f'A UNIX Domain Stream Socket was expected, got {sock!r}')
+
+ sock.setblocking(False)
+ server = base_events.Server(self, [sock], protocol_factory,
+ ssl, backlog, ssl_handshake_timeout,
+ ssl_shutdown_timeout)
+ if start_serving:
+ server._start_serving()
+ # Skip one loop iteration so that all 'loop.add_reader'
+ # go through.
+ await tasks.sleep(0)
+
+ return server
+
+ async def _sock_sendfile_native(self, sock, file, offset, count):
+ try:
+ os.sendfile
+ except AttributeError:
+ raise exceptions.SendfileNotAvailableError(
+ "os.sendfile() is not available")
+ try:
+ fileno = file.fileno()
+ except (AttributeError, io.UnsupportedOperation) as err:
+ raise exceptions.SendfileNotAvailableError("not a regular file")
+ try:
+ fsize = os.fstat(fileno).st_size
+ except OSError:
+ raise exceptions.SendfileNotAvailableError("not a regular file")
+ blocksize = count if count else fsize
+ if not blocksize:
+ return 0 # empty file
+
+ fut = self.create_future()
+ self._sock_sendfile_native_impl(fut, None, sock, fileno,
+ offset, count, blocksize, 0)
+ return await fut
+
+ def _sock_sendfile_native_impl(self, fut, registered_fd, sock, fileno,
+ offset, count, blocksize, total_sent):
+ fd = sock.fileno()
+ if registered_fd is not None:
+ # Remove the callback early. It should be rare that the
+ # selector says the fd is ready but the call still returns
+ # EAGAIN, and I am willing to take a hit in that case in
+ # order to simplify the common case.
+ self.remove_writer(registered_fd)
+ if fut.cancelled():
+ self._sock_sendfile_update_filepos(fileno, offset, total_sent)
+ return
+ if count:
+ blocksize = count - total_sent
+ if blocksize <= 0:
+ self._sock_sendfile_update_filepos(fileno, offset, total_sent)
+ fut.set_result(total_sent)
+ return
+
+ try:
+ sent = os.sendfile(fd, fileno, offset, blocksize)
+ except (BlockingIOError, InterruptedError):
+ if registered_fd is None:
+ self._sock_add_cancellation_callback(fut, sock)
+ self.add_writer(fd, self._sock_sendfile_native_impl, fut,
+ fd, sock, fileno,
+ offset, count, blocksize, total_sent)
+ except OSError as exc:
+ if (registered_fd is not None and
+ exc.errno == errno.ENOTCONN and
+ type(exc) is not ConnectionError):
+ # If we have an ENOTCONN and this isn't a first call to
+ # sendfile(), i.e. the connection was closed in the middle
+ # of the operation, normalize the error to ConnectionError
+ # to make it consistent across all Posix systems.
+ new_exc = ConnectionError(
+ "socket is not connected", errno.ENOTCONN)
+ new_exc.__cause__ = exc
+ exc = new_exc
+ if total_sent == 0:
+ # We can get here for different reasons, the main
+ # one being 'file' is not a regular mmap(2)-like
+ # file, in which case we'll fall back on using
+ # plain send().
+ err = exceptions.SendfileNotAvailableError(
+ "os.sendfile call failed")
+ self._sock_sendfile_update_filepos(fileno, offset, total_sent)
+ fut.set_exception(err)
+ else:
+ self._sock_sendfile_update_filepos(fileno, offset, total_sent)
+ fut.set_exception(exc)
+ except (SystemExit, KeyboardInterrupt):
+ raise
+ except BaseException as exc:
+ self._sock_sendfile_update_filepos(fileno, offset, total_sent)
+ fut.set_exception(exc)
+ else:
+ if sent == 0:
+ # EOF
+ self._sock_sendfile_update_filepos(fileno, offset, total_sent)
+ fut.set_result(total_sent)
+ else:
+ offset += sent
+ total_sent += sent
+ if registered_fd is None:
+ self._sock_add_cancellation_callback(fut, sock)
+ self.add_writer(fd, self._sock_sendfile_native_impl, fut,
+ fd, sock, fileno,
+ offset, count, blocksize, total_sent)
+
+ def _sock_sendfile_update_filepos(self, fileno, offset, total_sent):
+ if total_sent > 0:
+ os.lseek(fileno, offset, os.SEEK_SET)
+
+ def _sock_add_cancellation_callback(self, fut, sock):
+ def cb(fut):
+ if fut.cancelled():
+ fd = sock.fileno()
+ if fd != -1:
+ self.remove_writer(fd)
+ fut.add_done_callback(cb)
+
+
+class _UnixReadPipeTransport(transports.ReadTransport):
+
+ max_size = 256 * 1024 # max bytes we read in one event loop iteration
+
+ def __init__(self, loop, pipe, protocol, waiter=None, extra=None):
+ super().__init__(extra)
+ self._extra['pipe'] = pipe
+ self._loop = loop
+ self._pipe = pipe
+ self._fileno = pipe.fileno()
+ self._protocol = protocol
+ self._closing = False
+ self._paused = False
+
+ mode = os.fstat(self._fileno).st_mode
+ if not (stat.S_ISFIFO(mode) or
+ stat.S_ISSOCK(mode) or
+ stat.S_ISCHR(mode)):
+ self._pipe = None
+ self._fileno = None
+ self._protocol = None
+ raise ValueError("Pipe transport is for pipes/sockets only.")
+
+ os.set_blocking(self._fileno, False)
+
+ self._loop.call_soon(self._protocol.connection_made, self)
+ # only start reading when connection_made() has been called
+ self._loop.call_soon(self._add_reader,
+ self._fileno, self._read_ready)
+ if waiter is not None:
+ # only wake up the waiter when connection_made() has been called
+ self._loop.call_soon(futures._set_result_unless_cancelled,
+ waiter, None)
+
+ def _add_reader(self, fd, callback):
+ if not self.is_reading():
+ return
+ self._loop._add_reader(fd, callback)
+
+ def is_reading(self):
+ return not self._paused and not self._closing
+
+ def __repr__(self):
+ info = [self.__class__.__name__]
+ if self._pipe is None:
+ info.append('closed')
+ elif self._closing:
+ info.append('closing')
+ info.append(f'fd={self._fileno}')
+ selector = getattr(self._loop, '_selector', None)
+ if self._pipe is not None and selector is not None:
+ polling = selector_events._test_selector_event(
+ selector, self._fileno, selectors.EVENT_READ)
+ if polling:
+ info.append('polling')
+ else:
+ info.append('idle')
+ elif self._pipe is not None:
+ info.append('open')
+ else:
+ info.append('closed')
+ return '<{}>'.format(' '.join(info))
+
+ def _read_ready(self):
+ try:
+ data = os.read(self._fileno, self.max_size)
+ except (BlockingIOError, InterruptedError):
+ pass
+ except OSError as exc:
+ self._fatal_error(exc, 'Fatal read error on pipe transport')
+ else:
+ if data:
+ self._protocol.data_received(data)
+ else:
+ if self._loop.get_debug():
+ logger.info("%r was closed by peer", self)
+ self._closing = True
+ self._loop._remove_reader(self._fileno)
+ self._loop.call_soon(self._protocol.eof_received)
+ self._loop.call_soon(self._call_connection_lost, None)
+
+ def pause_reading(self):
+ if not self.is_reading():
+ return
+ self._paused = True
+ self._loop._remove_reader(self._fileno)
+ if self._loop.get_debug():
+ logger.debug("%r pauses reading", self)
+
+ def resume_reading(self):
+ if self._closing or not self._paused:
+ return
+ self._paused = False
+ self._loop._add_reader(self._fileno, self._read_ready)
+ if self._loop.get_debug():
+ logger.debug("%r resumes reading", self)
+
+ def set_protocol(self, protocol):
+ self._protocol = protocol
+
+ def get_protocol(self):
+ return self._protocol
+
+ def is_closing(self):
+ return self._closing
+
+ def close(self):
+ if not self._closing:
+ self._close(None)
+
+ def __del__(self, _warn=warnings.warn):
+ if self._pipe is not None:
+ _warn(f"unclosed transport {self!r}", ResourceWarning, source=self)
+ self._pipe.close()
+
+ def _fatal_error(self, exc, message='Fatal error on pipe transport'):
+ # should be called by exception handler only
+ if (isinstance(exc, OSError) and exc.errno == errno.EIO):
+ if self._loop.get_debug():
+ logger.debug("%r: %s", self, message, exc_info=True)
+ else:
+ self._loop.call_exception_handler({
+ 'message': message,
+ 'exception': exc,
+ 'transport': self,
+ 'protocol': self._protocol,
+ })
+ self._close(exc)
+
+ def _close(self, exc):
+ self._closing = True
+ self._loop._remove_reader(self._fileno)
+ self._loop.call_soon(self._call_connection_lost, exc)
+
+ def _call_connection_lost(self, exc):
+ try:
+ self._protocol.connection_lost(exc)
+ finally:
+ self._pipe.close()
+ self._pipe = None
+ self._protocol = None
+ self._loop = None
+
+
+class _UnixWritePipeTransport(transports._FlowControlMixin,
+ transports.WriteTransport):
+
+ def __init__(self, loop, pipe, protocol, waiter=None, extra=None):
+ super().__init__(extra, loop)
+ self._extra['pipe'] = pipe
+ self._pipe = pipe
+ self._fileno = pipe.fileno()
+ self._protocol = protocol
+ self._buffer = bytearray()
+ self._conn_lost = 0
+ self._closing = False # Set when close() or write_eof() called.
+
+ mode = os.fstat(self._fileno).st_mode
+ is_char = stat.S_ISCHR(mode)
+ is_fifo = stat.S_ISFIFO(mode)
+ is_socket = stat.S_ISSOCK(mode)
+ if not (is_char or is_fifo or is_socket):
+ self._pipe = None
+ self._fileno = None
+ self._protocol = None
+ raise ValueError("Pipe transport is only for "
+ "pipes, sockets and character devices")
+
+ os.set_blocking(self._fileno, False)
+ self._loop.call_soon(self._protocol.connection_made, self)
+
+ # On AIX, the reader trick (to be notified when the read end of the
+ # socket is closed) only works for sockets. On other platforms it
+ # works for pipes and sockets. (Exception: OS X 10.4? Issue #19294.)
+ if is_socket or (is_fifo and not sys.platform.startswith("aix")):
+ # only start reading when connection_made() has been called
+ self._loop.call_soon(self._loop._add_reader,
+ self._fileno, self._read_ready)
+
+ if waiter is not None:
+ # only wake up the waiter when connection_made() has been called
+ self._loop.call_soon(futures._set_result_unless_cancelled,
+ waiter, None)
+
+ def __repr__(self):
+ info = [self.__class__.__name__]
+ if self._pipe is None:
+ info.append('closed')
+ elif self._closing:
+ info.append('closing')
+ info.append(f'fd={self._fileno}')
+ selector = getattr(self._loop, '_selector', None)
+ if self._pipe is not None and selector is not None:
+ polling = selector_events._test_selector_event(
+ selector, self._fileno, selectors.EVENT_WRITE)
+ if polling:
+ info.append('polling')
+ else:
+ info.append('idle')
+
+ bufsize = self.get_write_buffer_size()
+ info.append(f'bufsize={bufsize}')
+ elif self._pipe is not None:
+ info.append('open')
+ else:
+ info.append('closed')
+ return '<{}>'.format(' '.join(info))
+
+ def get_write_buffer_size(self):
+ return len(self._buffer)
+
+ def _read_ready(self):
+ # Pipe was closed by peer.
+ if self._loop.get_debug():
+ logger.info("%r was closed by peer", self)
+ if self._buffer:
+ self._close(BrokenPipeError())
+ else:
+ self._close()
+
+ def write(self, data):
+ assert isinstance(data, (bytes, bytearray, memoryview)), repr(data)
+ if isinstance(data, bytearray):
+ data = memoryview(data)
+ if not data:
+ return
+
+ if self._conn_lost or self._closing:
+ if self._conn_lost >= constants.LOG_THRESHOLD_FOR_CONNLOST_WRITES:
+ logger.warning('pipe closed by peer or '
+ 'os.write(pipe, data) raised exception.')
+ self._conn_lost += 1
+ return
+
+ if not self._buffer:
+ # Attempt to send it right away first.
+ try:
+ n = os.write(self._fileno, data)
+ except (BlockingIOError, InterruptedError):
+ n = 0
+ except (SystemExit, KeyboardInterrupt):
+ raise
+ except BaseException as exc:
+ self._conn_lost += 1
+ self._fatal_error(exc, 'Fatal write error on pipe transport')
+ return
+ if n == len(data):
+ return
+ elif n > 0:
+ data = memoryview(data)[n:]
+ self._loop._add_writer(self._fileno, self._write_ready)
+
+ self._buffer += data
+ self._maybe_pause_protocol()
+
+ def _write_ready(self):
+ assert self._buffer, 'Data should not be empty'
+
+ try:
+ n = os.write(self._fileno, self._buffer)
+ except (BlockingIOError, InterruptedError):
+ pass
+ except (SystemExit, KeyboardInterrupt):
+ raise
+ except BaseException as exc:
+ self._buffer.clear()
+ self._conn_lost += 1
+ # Remove writer here, _fatal_error() doesn't it
+ # because _buffer is empty.
+ self._loop._remove_writer(self._fileno)
+ self._fatal_error(exc, 'Fatal write error on pipe transport')
+ else:
+ if n == len(self._buffer):
+ self._buffer.clear()
+ self._loop._remove_writer(self._fileno)
+ self._maybe_resume_protocol() # May append to buffer.
+ if self._closing:
+ self._loop._remove_reader(self._fileno)
+ self._call_connection_lost(None)
+ return
+ elif n > 0:
+ del self._buffer[:n]
+
+ def can_write_eof(self):
+ return True
+
+ def write_eof(self):
+ if self._closing:
+ return
+ assert self._pipe
+ self._closing = True
+ if not self._buffer:
+ self._loop._remove_reader(self._fileno)
+ self._loop.call_soon(self._call_connection_lost, None)
+
+ def set_protocol(self, protocol):
+ self._protocol = protocol
+
+ def get_protocol(self):
+ return self._protocol
+
+ def is_closing(self):
+ return self._closing
+
+ def close(self):
+ if self._pipe is not None and not self._closing:
+ # write_eof is all what we needed to close the write pipe
+ self.write_eof()
+
+ def __del__(self, _warn=warnings.warn):
+ if self._pipe is not None:
+ _warn(f"unclosed transport {self!r}", ResourceWarning, source=self)
+ self._pipe.close()
+
+ def abort(self):
+ self._close(None)
+
+ def _fatal_error(self, exc, message='Fatal error on pipe transport'):
+ # should be called by exception handler only
+ if isinstance(exc, OSError):
+ if self._loop.get_debug():
+ logger.debug("%r: %s", self, message, exc_info=True)
+ else:
+ self._loop.call_exception_handler({
+ 'message': message,
+ 'exception': exc,
+ 'transport': self,
+ 'protocol': self._protocol,
+ })
+ self._close(exc)
+
+ def _close(self, exc=None):
+ self._closing = True
+ if self._buffer:
+ self._loop._remove_writer(self._fileno)
+ self._buffer.clear()
+ self._loop._remove_reader(self._fileno)
+ self._loop.call_soon(self._call_connection_lost, exc)
+
+ def _call_connection_lost(self, exc):
+ try:
+ self._protocol.connection_lost(exc)
+ finally:
+ self._pipe.close()
+ self._pipe = None
+ self._protocol = None
+ self._loop = None
+
+
+class _UnixSubprocessTransport(base_subprocess.BaseSubprocessTransport):
+
+ def _start(self, args, shell, stdin, stdout, stderr, bufsize, **kwargs):
+ stdin_w = None
+ if stdin == subprocess.PIPE and sys.platform.startswith('aix'):
+ # Use a socket pair for stdin on AIX, since it does not
+ # support selecting read events on the write end of a
+ # socket (which we use in order to detect closing of the
+ # other end).
+ stdin, stdin_w = socket.socketpair()
+ try:
+ self._proc = subprocess.Popen(
+ args, shell=shell, stdin=stdin, stdout=stdout, stderr=stderr,
+ universal_newlines=False, bufsize=bufsize, **kwargs)
+ if stdin_w is not None:
+ stdin.close()
+ self._proc.stdin = open(stdin_w.detach(), 'wb', buffering=bufsize)
+ stdin_w = None
+ finally:
+ if stdin_w is not None:
+ stdin.close()
+ stdin_w.close()
+
+
+class AbstractChildWatcher:
+ """Abstract base class for monitoring child processes.
+
+ Objects derived from this class monitor a collection of subprocesses and
+ report their termination or interruption by a signal.
+
+ New callbacks are registered with .add_child_handler(). Starting a new
+ process must be done within a 'with' block to allow the watcher to suspend
+ its activity until the new process if fully registered (this is needed to
+ prevent a race condition in some implementations).
+
+ Example:
+ with watcher:
+ proc = subprocess.Popen("sleep 1")
+ watcher.add_child_handler(proc.pid, callback)
+
+ Notes:
+ Implementations of this class must be thread-safe.
+
+ Since child watcher objects may catch the SIGCHLD signal and call
+ waitpid(-1), there should be only one active object per process.
+ """
+
+ def __init_subclass__(cls) -> None:
+ if cls.__module__ != __name__:
+ warnings._deprecated("AbstractChildWatcher",
+ "{name!r} is deprecated as of Python 3.12 and will be "
+ "removed in Python {remove}.",
+ remove=(3, 14))
+
+ def add_child_handler(self, pid, callback, *args):
+ """Register a new child handler.
+
+ Arrange for callback(pid, returncode, *args) to be called when
+ process 'pid' terminates. Specifying another callback for the same
+ process replaces the previous handler.
+
+ Note: callback() must be thread-safe.
+ """
+ raise NotImplementedError()
+
+ def remove_child_handler(self, pid):
+ """Removes the handler for process 'pid'.
+
+ The function returns True if the handler was successfully removed,
+ False if there was nothing to remove."""
+
+ raise NotImplementedError()
+
+ def attach_loop(self, loop):
+ """Attach the watcher to an event loop.
+
+ If the watcher was previously attached to an event loop, then it is
+ first detached before attaching to the new loop.
+
+ Note: loop may be None.
+ """
+ raise NotImplementedError()
+
+ def close(self):
+ """Close the watcher.
+
+ This must be called to make sure that any underlying resource is freed.
+ """
+ raise NotImplementedError()
+
+ def is_active(self):
+ """Return ``True`` if the watcher is active and is used by the event loop.
+
+ Return True if the watcher is installed and ready to handle process exit
+ notifications.
+
+ """
+ raise NotImplementedError()
+
+ def __enter__(self):
+ """Enter the watcher's context and allow starting new processes
+
+ This function must return self"""
+ raise NotImplementedError()
+
+ def __exit__(self, a, b, c):
+ """Exit the watcher's context"""
+ raise NotImplementedError()
+
+
+class PidfdChildWatcher(AbstractChildWatcher):
+ """Child watcher implementation using Linux's pid file descriptors.
+
+ This child watcher polls process file descriptors (pidfds) to await child
+ process termination. In some respects, PidfdChildWatcher is a "Goldilocks"
+ child watcher implementation. It doesn't require signals or threads, doesn't
+ interfere with any processes launched outside the event loop, and scales
+ linearly with the number of subprocesses launched by the event loop. The
+ main disadvantage is that pidfds are specific to Linux, and only work on
+ recent (5.3+) kernels.
+ """
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_value, exc_traceback):
+ pass
+
+ def is_active(self):
+ return True
+
+ def close(self):
+ pass
+
+ def attach_loop(self, loop):
+ pass
+
+ def add_child_handler(self, pid, callback, *args):
+ loop = events.get_running_loop()
+ pidfd = os.pidfd_open(pid)
+ loop._add_reader(pidfd, self._do_wait, pid, pidfd, callback, args)
+
+ def _do_wait(self, pid, pidfd, callback, args):
+ loop = events.get_running_loop()
+ loop._remove_reader(pidfd)
+ try:
+ _, status = os.waitpid(pid, 0)
+ except ChildProcessError:
+ # The child process is already reaped
+ # (may happen if waitpid() is called elsewhere).
+ returncode = 255
+ logger.warning(
+ "child process pid %d exit status already read: "
+ " will report returncode 255",
+ pid)
+ else:
+ returncode = waitstatus_to_exitcode(status)
+
+ os.close(pidfd)
+ callback(pid, returncode, *args)
+
+ def remove_child_handler(self, pid):
+ # asyncio never calls remove_child_handler() !!!
+ # The method is no-op but is implemented because
+ # abstract base classes require it.
+ return True
+
+
+class BaseChildWatcher(AbstractChildWatcher):
+
+ def __init__(self):
+ self._loop = None
+ self._callbacks = {}
+
+ def close(self):
+ self.attach_loop(None)
+
+ def is_active(self):
+ return self._loop is not None and self._loop.is_running()
+
+ def _do_waitpid(self, expected_pid):
+ raise NotImplementedError()
+
+ def _do_waitpid_all(self):
+ raise NotImplementedError()
+
+ def attach_loop(self, loop):
+ assert loop is None or isinstance(loop, events.AbstractEventLoop)
+
+ if self._loop is not None and loop is None and self._callbacks:
+ warnings.warn(
+ 'A loop is being detached '
+ 'from a child watcher with pending handlers',
+ RuntimeWarning)
+
+ if self._loop is not None:
+ self._loop.remove_signal_handler(signal.SIGCHLD)
+
+ self._loop = loop
+ if loop is not None:
+ loop.add_signal_handler(signal.SIGCHLD, self._sig_chld)
+
+ # Prevent a race condition in case a child terminated
+ # during the switch.
+ self._do_waitpid_all()
+
+ def _sig_chld(self):
+ try:
+ self._do_waitpid_all()
+ except (SystemExit, KeyboardInterrupt):
+ raise
+ except BaseException as exc:
+ # self._loop should always be available here
+ # as '_sig_chld' is added as a signal handler
+ # in 'attach_loop'
+ self._loop.call_exception_handler({
+ 'message': 'Unknown exception in SIGCHLD handler',
+ 'exception': exc,
+ })
+
+
+class SafeChildWatcher(BaseChildWatcher):
+ """'Safe' child watcher implementation.
+
+ This implementation avoids disrupting other code spawning processes by
+ polling explicitly each process in the SIGCHLD handler instead of calling
+ os.waitpid(-1).
+
+ This is a safe solution but it has a significant overhead when handling a
+ big number of children (O(n) each time SIGCHLD is raised)
+ """
+
+ def __init__(self):
+ super().__init__()
+ warnings._deprecated("SafeChildWatcher",
+ "{name!r} is deprecated as of Python 3.12 and will be "
+ "removed in Python {remove}.",
+ remove=(3, 14))
+
+ def close(self):
+ self._callbacks.clear()
+ super().close()
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, a, b, c):
+ pass
+
+ def add_child_handler(self, pid, callback, *args):
+ self._callbacks[pid] = (callback, args)
+
+ # Prevent a race condition in case the child is already terminated.
+ self._do_waitpid(pid)
+
+ def remove_child_handler(self, pid):
+ try:
+ del self._callbacks[pid]
+ return True
+ except KeyError:
+ return False
+
+ def _do_waitpid_all(self):
+
+ for pid in list(self._callbacks):
+ self._do_waitpid(pid)
+
+ def _do_waitpid(self, expected_pid):
+ assert expected_pid > 0
+
+ try:
+ pid, status = os.waitpid(expected_pid, os.WNOHANG)
+ except ChildProcessError:
+ # The child process is already reaped
+ # (may happen if waitpid() is called elsewhere).
+ pid = expected_pid
+ returncode = 255
+ logger.warning(
+ "Unknown child process pid %d, will report returncode 255",
+ pid)
+ else:
+ if pid == 0:
+ # The child process is still alive.
+ return
+
+ returncode = waitstatus_to_exitcode(status)
+ if self._loop.get_debug():
+ logger.debug('process %s exited with returncode %s',
+ expected_pid, returncode)
+
+ try:
+ callback, args = self._callbacks.pop(pid)
+ except KeyError: # pragma: no cover
+ # May happen if .remove_child_handler() is called
+ # after os.waitpid() returns.
+ if self._loop.get_debug():
+ logger.warning("Child watcher got an unexpected pid: %r",
+ pid, exc_info=True)
+ else:
+ callback(pid, returncode, *args)
+
+
+class FastChildWatcher(BaseChildWatcher):
+ """'Fast' child watcher implementation.
+
+ This implementation reaps every terminated processes by calling
+ os.waitpid(-1) directly, possibly breaking other code spawning processes
+ and waiting for their termination.
+
+ There is no noticeable overhead when handling a big number of children
+ (O(1) each time a child terminates).
+ """
+ def __init__(self):
+ super().__init__()
+ self._lock = threading.Lock()
+ self._zombies = {}
+ self._forks = 0
+ warnings._deprecated("FastChildWatcher",
+ "{name!r} is deprecated as of Python 3.12 and will be "
+ "removed in Python {remove}.",
+ remove=(3, 14))
+
+ def close(self):
+ self._callbacks.clear()
+ self._zombies.clear()
+ super().close()
+
+ def __enter__(self):
+ with self._lock:
+ self._forks += 1
+
+ return self
+
+ def __exit__(self, a, b, c):
+ with self._lock:
+ self._forks -= 1
+
+ if self._forks or not self._zombies:
+ return
+
+ collateral_victims = str(self._zombies)
+ self._zombies.clear()
+
+ logger.warning(
+ "Caught subprocesses termination from unknown pids: %s",
+ collateral_victims)
+
+ def add_child_handler(self, pid, callback, *args):
+ assert self._forks, "Must use the context manager"
+
+ with self._lock:
+ try:
+ returncode = self._zombies.pop(pid)
+ except KeyError:
+ # The child is running.
+ self._callbacks[pid] = callback, args
+ return
+
+ # The child is dead already. We can fire the callback.
+ callback(pid, returncode, *args)
+
+ def remove_child_handler(self, pid):
+ try:
+ del self._callbacks[pid]
+ return True
+ except KeyError:
+ return False
+
+ def _do_waitpid_all(self):
+ # Because of signal coalescing, we must keep calling waitpid() as
+ # long as we're able to reap a child.
+ while True:
+ try:
+ pid, status = os.waitpid(-1, os.WNOHANG)
+ except ChildProcessError:
+ # No more child processes exist.
+ return
+ else:
+ if pid == 0:
+ # A child process is still alive.
+ return
+
+ returncode = waitstatus_to_exitcode(status)
+
+ with self._lock:
+ try:
+ callback, args = self._callbacks.pop(pid)
+ except KeyError:
+ # unknown child
+ if self._forks:
+ # It may not be registered yet.
+ self._zombies[pid] = returncode
+ if self._loop.get_debug():
+ logger.debug('unknown process %s exited '
+ 'with returncode %s',
+ pid, returncode)
+ continue
+ callback = None
+ else:
+ if self._loop.get_debug():
+ logger.debug('process %s exited with returncode %s',
+ pid, returncode)
+
+ if callback is None:
+ logger.warning(
+ "Caught subprocess termination from unknown pid: "
+ "%d -> %d", pid, returncode)
+ else:
+ callback(pid, returncode, *args)
+
+
+class MultiLoopChildWatcher(AbstractChildWatcher):
+ """A watcher that doesn't require running loop in the main thread.
+
+ This implementation registers a SIGCHLD signal handler on
+ instantiation (which may conflict with other code that
+ install own handler for this signal).
+
+ The solution is safe but it has a significant overhead when
+ handling a big number of processes (*O(n)* each time a
+ SIGCHLD is received).
+ """
+
+ # Implementation note:
+ # The class keeps compatibility with AbstractChildWatcher ABC
+ # To achieve this it has empty attach_loop() method
+ # and doesn't accept explicit loop argument
+ # for add_child_handler()/remove_child_handler()
+ # but retrieves the current loop by get_running_loop()
+
+ def __init__(self):
+ self._callbacks = {}
+ self._saved_sighandler = None
+ warnings._deprecated("MultiLoopChildWatcher",
+ "{name!r} is deprecated as of Python 3.12 and will be "
+ "removed in Python {remove}.",
+ remove=(3, 14))
+
+ def is_active(self):
+ return self._saved_sighandler is not None
+
+ def close(self):
+ self._callbacks.clear()
+ if self._saved_sighandler is None:
+ return
+
+ handler = signal.getsignal(signal.SIGCHLD)
+ if handler != self._sig_chld:
+ logger.warning("SIGCHLD handler was changed by outside code")
+ else:
+ signal.signal(signal.SIGCHLD, self._saved_sighandler)
+ self._saved_sighandler = None
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ pass
+
+ def add_child_handler(self, pid, callback, *args):
+ loop = events.get_running_loop()
+ self._callbacks[pid] = (loop, callback, args)
+
+ # Prevent a race condition in case the child is already terminated.
+ self._do_waitpid(pid)
+
+ def remove_child_handler(self, pid):
+ try:
+ del self._callbacks[pid]
+ return True
+ except KeyError:
+ return False
+
+ def attach_loop(self, loop):
+ # Don't save the loop but initialize itself if called first time
+ # The reason to do it here is that attach_loop() is called from
+ # unix policy only for the main thread.
+ # Main thread is required for subscription on SIGCHLD signal
+ if self._saved_sighandler is not None:
+ return
+
+ self._saved_sighandler = signal.signal(signal.SIGCHLD, self._sig_chld)
+ if self._saved_sighandler is None:
+ logger.warning("Previous SIGCHLD handler was set by non-Python code, "
+ "restore to default handler on watcher close.")
+ self._saved_sighandler = signal.SIG_DFL
+
+ # Set SA_RESTART to limit EINTR occurrences.
+ signal.siginterrupt(signal.SIGCHLD, False)
+
+ def _do_waitpid_all(self):
+ for pid in list(self._callbacks):
+ self._do_waitpid(pid)
+
+ def _do_waitpid(self, expected_pid):
+ assert expected_pid > 0
+
+ try:
+ pid, status = os.waitpid(expected_pid, os.WNOHANG)
+ except ChildProcessError:
+ # The child process is already reaped
+ # (may happen if waitpid() is called elsewhere).
+ pid = expected_pid
+ returncode = 255
+ logger.warning(
+ "Unknown child process pid %d, will report returncode 255",
+ pid)
+ debug_log = False
+ else:
+ if pid == 0:
+ # The child process is still alive.
+ return
+
+ returncode = waitstatus_to_exitcode(status)
+ debug_log = True
+ try:
+ loop, callback, args = self._callbacks.pop(pid)
+ except KeyError: # pragma: no cover
+ # May happen if .remove_child_handler() is called
+ # after os.waitpid() returns.
+ logger.warning("Child watcher got an unexpected pid: %r",
+ pid, exc_info=True)
+ else:
+ if loop.is_closed():
+ logger.warning("Loop %r that handles pid %r is closed", loop, pid)
+ else:
+ if debug_log and loop.get_debug():
+ logger.debug('process %s exited with returncode %s',
+ expected_pid, returncode)
+ loop.call_soon_threadsafe(callback, pid, returncode, *args)
+
+ def _sig_chld(self, signum, frame):
+ try:
+ self._do_waitpid_all()
+ except (SystemExit, KeyboardInterrupt):
+ raise
+ except BaseException:
+ logger.warning('Unknown exception in SIGCHLD handler', exc_info=True)
+
+
+class ThreadedChildWatcher(AbstractChildWatcher):
+ """Threaded child watcher implementation.
+
+ The watcher uses a thread per process
+ for waiting for the process finish.
+
+ It doesn't require subscription on POSIX signal
+ but a thread creation is not free.
+
+ The watcher has O(1) complexity, its performance doesn't depend
+ on amount of spawn processes.
+ """
+
+ def __init__(self):
+ self._pid_counter = itertools.count(0)
+ self._threads = {}
+
+ def is_active(self):
+ return True
+
+ def close(self):
+ pass
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ pass
+
+ def __del__(self, _warn=warnings.warn):
+ threads = [thread for thread in list(self._threads.values())
+ if thread.is_alive()]
+ if threads:
+ _warn(f"{self.__class__} has registered but not finished child processes",
+ ResourceWarning,
+ source=self)
+
+ def add_child_handler(self, pid, callback, *args):
+ loop = events.get_running_loop()
+ thread = threading.Thread(target=self._do_waitpid,
+ name=f"asyncio-waitpid-{next(self._pid_counter)}",
+ args=(loop, pid, callback, args),
+ daemon=True)
+ self._threads[pid] = thread
+ thread.start()
+
+ def remove_child_handler(self, pid):
+ # asyncio never calls remove_child_handler() !!!
+ # The method is no-op but is implemented because
+ # abstract base classes require it.
+ return True
+
+ def attach_loop(self, loop):
+ pass
+
+ def _do_waitpid(self, loop, expected_pid, callback, args):
+ assert expected_pid > 0
+
+ try:
+ pid, status = os.waitpid(expected_pid, 0)
+ except ChildProcessError:
+ # The child process is already reaped
+ # (may happen if waitpid() is called elsewhere).
+ pid = expected_pid
+ returncode = 255
+ logger.warning(
+ "Unknown child process pid %d, will report returncode 255",
+ pid)
+ else:
+ returncode = waitstatus_to_exitcode(status)
+ if loop.get_debug():
+ logger.debug('process %s exited with returncode %s',
+ expected_pid, returncode)
+
+ if loop.is_closed():
+ logger.warning("Loop %r that handles pid %r is closed", loop, pid)
+ else:
+ loop.call_soon_threadsafe(callback, pid, returncode, *args)
+
+ self._threads.pop(expected_pid)
+
+def can_use_pidfd():
+ if not hasattr(os, 'pidfd_open'):
+ return False
+ try:
+ pid = os.getpid()
+ os.close(os.pidfd_open(pid, 0))
+ except OSError:
+ # blocked by security policy like SECCOMP
+ return False
+ return True
+
+
+class _UnixDefaultEventLoopPolicy(events.BaseDefaultEventLoopPolicy):
+ """UNIX event loop policy with a watcher for child processes."""
+ _loop_factory = _UnixSelectorEventLoop
+
+ def __init__(self):
+ super().__init__()
+ self._watcher = None
+
+ def _init_watcher(self):
+ with events._lock:
+ if self._watcher is None: # pragma: no branch
+ if can_use_pidfd():
+ self._watcher = PidfdChildWatcher()
+ else:
+ self._watcher = ThreadedChildWatcher()
+
+ def set_event_loop(self, loop):
+ """Set the event loop.
+
+ As a side effect, if a child watcher was set before, then calling
+ .set_event_loop() from the main thread will call .attach_loop(loop) on
+ the child watcher.
+ """
+
+ super().set_event_loop(loop)
+
+ if (self._watcher is not None and
+ threading.current_thread() is threading.main_thread()):
+ self._watcher.attach_loop(loop)
+
+ def get_child_watcher(self):
+ """Get the watcher for child processes.
+
+ If not yet set, a ThreadedChildWatcher object is automatically created.
+ """
+ if self._watcher is None:
+ self._init_watcher()
+
+ warnings._deprecated("get_child_watcher",
+ "{name!r} is deprecated as of Python 3.12 and will be "
+ "removed in Python {remove}.", remove=(3, 14))
+ return self._watcher
+
+ def set_child_watcher(self, watcher):
+ """Set the watcher for child processes."""
+
+ assert watcher is None or isinstance(watcher, AbstractChildWatcher)
+
+ if self._watcher is not None:
+ self._watcher.close()
+
+ self._watcher = watcher
+ warnings._deprecated("set_child_watcher",
+ "{name!r} is deprecated as of Python 3.12 and will be "
+ "removed in Python {remove}.", remove=(3, 14))
+
+
+SelectorEventLoop = _UnixSelectorEventLoop
+DefaultEventLoopPolicy = _UnixDefaultEventLoopPolicy
diff --git a/contrib/tools/python3/Lib/asyncio/windows_events.py b/contrib/tools/python3/Lib/asyncio/windows_events.py
new file mode 100644
index 0000000000..c9a5fb841c
--- /dev/null
+++ b/contrib/tools/python3/Lib/asyncio/windows_events.py
@@ -0,0 +1,896 @@
+"""Selector and proactor event loops for Windows."""
+
+import sys
+
+if sys.platform != 'win32': # pragma: no cover
+ raise ImportError('win32 only')
+
+import _overlapped
+import _winapi
+import errno
+import math
+import msvcrt
+import socket
+import struct
+import time
+import weakref
+
+from . import events
+from . import base_subprocess
+from . import futures
+from . import exceptions
+from . import proactor_events
+from . import selector_events
+from . import tasks
+from . import windows_utils
+from .log import logger
+
+
+__all__ = (
+ 'SelectorEventLoop', 'ProactorEventLoop', 'IocpProactor',
+ 'DefaultEventLoopPolicy', 'WindowsSelectorEventLoopPolicy',
+ 'WindowsProactorEventLoopPolicy',
+)
+
+
+NULL = _winapi.NULL
+INFINITE = _winapi.INFINITE
+ERROR_CONNECTION_REFUSED = 1225
+ERROR_CONNECTION_ABORTED = 1236
+
+# Initial delay in seconds for connect_pipe() before retrying to connect
+CONNECT_PIPE_INIT_DELAY = 0.001
+
+# Maximum delay in seconds for connect_pipe() before retrying to connect
+CONNECT_PIPE_MAX_DELAY = 0.100
+
+
+class _OverlappedFuture(futures.Future):
+ """Subclass of Future which represents an overlapped operation.
+
+ Cancelling it will immediately cancel the overlapped operation.
+ """
+
+ def __init__(self, ov, *, loop=None):
+ super().__init__(loop=loop)
+ if self._source_traceback:
+ del self._source_traceback[-1]
+ self._ov = ov
+
+ def _repr_info(self):
+ info = super()._repr_info()
+ if self._ov is not None:
+ state = 'pending' if self._ov.pending else 'completed'
+ info.insert(1, f'overlapped=<{state}, {self._ov.address:#x}>')
+ return info
+
+ def _cancel_overlapped(self):
+ if self._ov is None:
+ return
+ try:
+ self._ov.cancel()
+ except OSError as exc:
+ context = {
+ 'message': 'Cancelling an overlapped future failed',
+ 'exception': exc,
+ 'future': self,
+ }
+ if self._source_traceback:
+ context['source_traceback'] = self._source_traceback
+ self._loop.call_exception_handler(context)
+ self._ov = None
+
+ def cancel(self, msg=None):
+ self._cancel_overlapped()
+ return super().cancel(msg=msg)
+
+ def set_exception(self, exception):
+ super().set_exception(exception)
+ self._cancel_overlapped()
+
+ def set_result(self, result):
+ super().set_result(result)
+ self._ov = None
+
+
+class _BaseWaitHandleFuture(futures.Future):
+ """Subclass of Future which represents a wait handle."""
+
+ def __init__(self, ov, handle, wait_handle, *, loop=None):
+ super().__init__(loop=loop)
+ if self._source_traceback:
+ del self._source_traceback[-1]
+ # Keep a reference to the Overlapped object to keep it alive until the
+ # wait is unregistered
+ self._ov = ov
+ self._handle = handle
+ self._wait_handle = wait_handle
+
+ # Should we call UnregisterWaitEx() if the wait completes
+ # or is cancelled?
+ self._registered = True
+
+ def _poll(self):
+ # non-blocking wait: use a timeout of 0 millisecond
+ return (_winapi.WaitForSingleObject(self._handle, 0) ==
+ _winapi.WAIT_OBJECT_0)
+
+ def _repr_info(self):
+ info = super()._repr_info()
+ info.append(f'handle={self._handle:#x}')
+ if self._handle is not None:
+ state = 'signaled' if self._poll() else 'waiting'
+ info.append(state)
+ if self._wait_handle is not None:
+ info.append(f'wait_handle={self._wait_handle:#x}')
+ return info
+
+ def _unregister_wait_cb(self, fut):
+ # The wait was unregistered: it's not safe to destroy the Overlapped
+ # object
+ self._ov = None
+
+ def _unregister_wait(self):
+ if not self._registered:
+ return
+ self._registered = False
+
+ wait_handle = self._wait_handle
+ self._wait_handle = None
+ try:
+ _overlapped.UnregisterWait(wait_handle)
+ except OSError as exc:
+ if exc.winerror != _overlapped.ERROR_IO_PENDING:
+ context = {
+ 'message': 'Failed to unregister the wait handle',
+ 'exception': exc,
+ 'future': self,
+ }
+ if self._source_traceback:
+ context['source_traceback'] = self._source_traceback
+ self._loop.call_exception_handler(context)
+ return
+ # ERROR_IO_PENDING means that the unregister is pending
+
+ self._unregister_wait_cb(None)
+
+ def cancel(self, msg=None):
+ self._unregister_wait()
+ return super().cancel(msg=msg)
+
+ def set_exception(self, exception):
+ self._unregister_wait()
+ super().set_exception(exception)
+
+ def set_result(self, result):
+ self._unregister_wait()
+ super().set_result(result)
+
+
+class _WaitCancelFuture(_BaseWaitHandleFuture):
+ """Subclass of Future which represents a wait for the cancellation of a
+ _WaitHandleFuture using an event.
+ """
+
+ def __init__(self, ov, event, wait_handle, *, loop=None):
+ super().__init__(ov, event, wait_handle, loop=loop)
+
+ self._done_callback = None
+
+ def cancel(self):
+ raise RuntimeError("_WaitCancelFuture must not be cancelled")
+
+ def set_result(self, result):
+ super().set_result(result)
+ if self._done_callback is not None:
+ self._done_callback(self)
+
+ def set_exception(self, exception):
+ super().set_exception(exception)
+ if self._done_callback is not None:
+ self._done_callback(self)
+
+
+class _WaitHandleFuture(_BaseWaitHandleFuture):
+ def __init__(self, ov, handle, wait_handle, proactor, *, loop=None):
+ super().__init__(ov, handle, wait_handle, loop=loop)
+ self._proactor = proactor
+ self._unregister_proactor = True
+ self._event = _overlapped.CreateEvent(None, True, False, None)
+ self._event_fut = None
+
+ def _unregister_wait_cb(self, fut):
+ if self._event is not None:
+ _winapi.CloseHandle(self._event)
+ self._event = None
+ self._event_fut = None
+
+ # If the wait was cancelled, the wait may never be signalled, so
+ # it's required to unregister it. Otherwise, IocpProactor.close() will
+ # wait forever for an event which will never come.
+ #
+ # If the IocpProactor already received the event, it's safe to call
+ # _unregister() because we kept a reference to the Overlapped object
+ # which is used as a unique key.
+ self._proactor._unregister(self._ov)
+ self._proactor = None
+
+ super()._unregister_wait_cb(fut)
+
+ def _unregister_wait(self):
+ if not self._registered:
+ return
+ self._registered = False
+
+ wait_handle = self._wait_handle
+ self._wait_handle = None
+ try:
+ _overlapped.UnregisterWaitEx(wait_handle, self._event)
+ except OSError as exc:
+ if exc.winerror != _overlapped.ERROR_IO_PENDING:
+ context = {
+ 'message': 'Failed to unregister the wait handle',
+ 'exception': exc,
+ 'future': self,
+ }
+ if self._source_traceback:
+ context['source_traceback'] = self._source_traceback
+ self._loop.call_exception_handler(context)
+ return
+ # ERROR_IO_PENDING is not an error, the wait was unregistered
+
+ self._event_fut = self._proactor._wait_cancel(self._event,
+ self._unregister_wait_cb)
+
+
+class PipeServer(object):
+ """Class representing a pipe server.
+
+ This is much like a bound, listening socket.
+ """
+ def __init__(self, address):
+ self._address = address
+ self._free_instances = weakref.WeakSet()
+ # initialize the pipe attribute before calling _server_pipe_handle()
+ # because this function can raise an exception and the destructor calls
+ # the close() method
+ self._pipe = None
+ self._accept_pipe_future = None
+ self._pipe = self._server_pipe_handle(True)
+
+ def _get_unconnected_pipe(self):
+ # Create new instance and return previous one. This ensures
+ # that (until the server is closed) there is always at least
+ # one pipe handle for address. Therefore if a client attempt
+ # to connect it will not fail with FileNotFoundError.
+ tmp, self._pipe = self._pipe, self._server_pipe_handle(False)
+ return tmp
+
+ def _server_pipe_handle(self, first):
+ # Return a wrapper for a new pipe handle.
+ if self.closed():
+ return None
+ flags = _winapi.PIPE_ACCESS_DUPLEX | _winapi.FILE_FLAG_OVERLAPPED
+ if first:
+ flags |= _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE
+ h = _winapi.CreateNamedPipe(
+ self._address, flags,
+ _winapi.PIPE_TYPE_MESSAGE | _winapi.PIPE_READMODE_MESSAGE |
+ _winapi.PIPE_WAIT,
+ _winapi.PIPE_UNLIMITED_INSTANCES,
+ windows_utils.BUFSIZE, windows_utils.BUFSIZE,
+ _winapi.NMPWAIT_WAIT_FOREVER, _winapi.NULL)
+ pipe = windows_utils.PipeHandle(h)
+ self._free_instances.add(pipe)
+ return pipe
+
+ def closed(self):
+ return (self._address is None)
+
+ def close(self):
+ if self._accept_pipe_future is not None:
+ self._accept_pipe_future.cancel()
+ self._accept_pipe_future = None
+ # Close all instances which have not been connected to by a client.
+ if self._address is not None:
+ for pipe in self._free_instances:
+ pipe.close()
+ self._pipe = None
+ self._address = None
+ self._free_instances.clear()
+
+ __del__ = close
+
+
+class _WindowsSelectorEventLoop(selector_events.BaseSelectorEventLoop):
+ """Windows version of selector event loop."""
+
+
+class ProactorEventLoop(proactor_events.BaseProactorEventLoop):
+ """Windows version of proactor event loop using IOCP."""
+
+ def __init__(self, proactor=None):
+ if proactor is None:
+ proactor = IocpProactor()
+ super().__init__(proactor)
+
+ def run_forever(self):
+ try:
+ assert self._self_reading_future is None
+ self.call_soon(self._loop_self_reading)
+ super().run_forever()
+ finally:
+ if self._self_reading_future is not None:
+ ov = self._self_reading_future._ov
+ self._self_reading_future.cancel()
+ # self_reading_future was just cancelled so if it hasn't been
+ # finished yet, it never will be (it's possible that it has
+ # already finished and its callback is waiting in the queue,
+ # where it could still happen if the event loop is restarted).
+ # Unregister it otherwise IocpProactor.close will wait for it
+ # forever
+ if ov is not None:
+ self._proactor._unregister(ov)
+ self._self_reading_future = None
+
+ async def create_pipe_connection(self, protocol_factory, address):
+ f = self._proactor.connect_pipe(address)
+ pipe = await f
+ protocol = protocol_factory()
+ trans = self._make_duplex_pipe_transport(pipe, protocol,
+ extra={'addr': address})
+ return trans, protocol
+
+ async def start_serving_pipe(self, protocol_factory, address):
+ server = PipeServer(address)
+
+ def loop_accept_pipe(f=None):
+ pipe = None
+ try:
+ if f:
+ pipe = f.result()
+ server._free_instances.discard(pipe)
+
+ if server.closed():
+ # A client connected before the server was closed:
+ # drop the client (close the pipe) and exit
+ pipe.close()
+ return
+
+ protocol = protocol_factory()
+ self._make_duplex_pipe_transport(
+ pipe, protocol, extra={'addr': address})
+
+ pipe = server._get_unconnected_pipe()
+ if pipe is None:
+ return
+
+ f = self._proactor.accept_pipe(pipe)
+ except BrokenPipeError:
+ if pipe and pipe.fileno() != -1:
+ pipe.close()
+ self.call_soon(loop_accept_pipe)
+ except OSError as exc:
+ if pipe and pipe.fileno() != -1:
+ self.call_exception_handler({
+ 'message': 'Pipe accept failed',
+ 'exception': exc,
+ 'pipe': pipe,
+ })
+ pipe.close()
+ elif self._debug:
+ logger.warning("Accept pipe failed on pipe %r",
+ pipe, exc_info=True)
+ self.call_soon(loop_accept_pipe)
+ except exceptions.CancelledError:
+ if pipe:
+ pipe.close()
+ else:
+ server._accept_pipe_future = f
+ f.add_done_callback(loop_accept_pipe)
+
+ self.call_soon(loop_accept_pipe)
+ return [server]
+
+ async def _make_subprocess_transport(self, protocol, args, shell,
+ stdin, stdout, stderr, bufsize,
+ extra=None, **kwargs):
+ waiter = self.create_future()
+ transp = _WindowsSubprocessTransport(self, protocol, args, shell,
+ stdin, stdout, stderr, bufsize,
+ waiter=waiter, extra=extra,
+ **kwargs)
+ try:
+ await waiter
+ except (SystemExit, KeyboardInterrupt):
+ raise
+ except BaseException:
+ transp.close()
+ await transp._wait()
+ raise
+
+ return transp
+
+
+class IocpProactor:
+ """Proactor implementation using IOCP."""
+
+ def __init__(self, concurrency=INFINITE):
+ self._loop = None
+ self._results = []
+ self._iocp = _overlapped.CreateIoCompletionPort(
+ _overlapped.INVALID_HANDLE_VALUE, NULL, 0, concurrency)
+ self._cache = {}
+ self._registered = weakref.WeakSet()
+ self._unregistered = []
+ self._stopped_serving = weakref.WeakSet()
+
+ def _check_closed(self):
+ if self._iocp is None:
+ raise RuntimeError('IocpProactor is closed')
+
+ def __repr__(self):
+ info = ['overlapped#=%s' % len(self._cache),
+ 'result#=%s' % len(self._results)]
+ if self._iocp is None:
+ info.append('closed')
+ return '<%s %s>' % (self.__class__.__name__, " ".join(info))
+
+ def set_loop(self, loop):
+ self._loop = loop
+
+ def select(self, timeout=None):
+ if not self._results:
+ self._poll(timeout)
+ tmp = self._results
+ self._results = []
+ try:
+ return tmp
+ finally:
+ # Needed to break cycles when an exception occurs.
+ tmp = None
+
+ def _result(self, value):
+ fut = self._loop.create_future()
+ fut.set_result(value)
+ return fut
+
+ @staticmethod
+ def finish_socket_func(trans, key, ov):
+ try:
+ return ov.getresult()
+ except OSError as exc:
+ if exc.winerror in (_overlapped.ERROR_NETNAME_DELETED,
+ _overlapped.ERROR_OPERATION_ABORTED):
+ raise ConnectionResetError(*exc.args)
+ else:
+ raise
+
+ def recv(self, conn, nbytes, flags=0):
+ self._register_with_iocp(conn)
+ ov = _overlapped.Overlapped(NULL)
+ try:
+ if isinstance(conn, socket.socket):
+ ov.WSARecv(conn.fileno(), nbytes, flags)
+ else:
+ ov.ReadFile(conn.fileno(), nbytes)
+ except BrokenPipeError:
+ return self._result(b'')
+
+ return self._register(ov, conn, self.finish_socket_func)
+
+ def recv_into(self, conn, buf, flags=0):
+ self._register_with_iocp(conn)
+ ov = _overlapped.Overlapped(NULL)
+ try:
+ if isinstance(conn, socket.socket):
+ ov.WSARecvInto(conn.fileno(), buf, flags)
+ else:
+ ov.ReadFileInto(conn.fileno(), buf)
+ except BrokenPipeError:
+ return self._result(0)
+
+ return self._register(ov, conn, self.finish_socket_func)
+
+ def recvfrom(self, conn, nbytes, flags=0):
+ self._register_with_iocp(conn)
+ ov = _overlapped.Overlapped(NULL)
+ try:
+ ov.WSARecvFrom(conn.fileno(), nbytes, flags)
+ except BrokenPipeError:
+ return self._result((b'', None))
+
+ return self._register(ov, conn, self.finish_socket_func)
+
+ def recvfrom_into(self, conn, buf, flags=0):
+ self._register_with_iocp(conn)
+ ov = _overlapped.Overlapped(NULL)
+ try:
+ ov.WSARecvFromInto(conn.fileno(), buf, flags)
+ except BrokenPipeError:
+ return self._result((0, None))
+
+ def finish_recv(trans, key, ov):
+ try:
+ return ov.getresult()
+ except OSError as exc:
+ if exc.winerror in (_overlapped.ERROR_NETNAME_DELETED,
+ _overlapped.ERROR_OPERATION_ABORTED):
+ raise ConnectionResetError(*exc.args)
+ else:
+ raise
+
+ return self._register(ov, conn, finish_recv)
+
+ def sendto(self, conn, buf, flags=0, addr=None):
+ self._register_with_iocp(conn)
+ ov = _overlapped.Overlapped(NULL)
+
+ ov.WSASendTo(conn.fileno(), buf, flags, addr)
+
+ return self._register(ov, conn, self.finish_socket_func)
+
+ def send(self, conn, buf, flags=0):
+ self._register_with_iocp(conn)
+ ov = _overlapped.Overlapped(NULL)
+ if isinstance(conn, socket.socket):
+ ov.WSASend(conn.fileno(), buf, flags)
+ else:
+ ov.WriteFile(conn.fileno(), buf)
+
+ return self._register(ov, conn, self.finish_socket_func)
+
+ def accept(self, listener):
+ self._register_with_iocp(listener)
+ conn = self._get_accept_socket(listener.family)
+ ov = _overlapped.Overlapped(NULL)
+ ov.AcceptEx(listener.fileno(), conn.fileno())
+
+ def finish_accept(trans, key, ov):
+ ov.getresult()
+ # Use SO_UPDATE_ACCEPT_CONTEXT so getsockname() etc work.
+ buf = struct.pack('@P', listener.fileno())
+ conn.setsockopt(socket.SOL_SOCKET,
+ _overlapped.SO_UPDATE_ACCEPT_CONTEXT, buf)
+ conn.settimeout(listener.gettimeout())
+ return conn, conn.getpeername()
+
+ async def accept_coro(future, conn):
+ # Coroutine closing the accept socket if the future is cancelled
+ try:
+ await future
+ except exceptions.CancelledError:
+ conn.close()
+ raise
+
+ future = self._register(ov, listener, finish_accept)
+ coro = accept_coro(future, conn)
+ tasks.ensure_future(coro, loop=self._loop)
+ return future
+
+ def connect(self, conn, address):
+ if conn.type == socket.SOCK_DGRAM:
+ # WSAConnect will complete immediately for UDP sockets so we don't
+ # need to register any IOCP operation
+ _overlapped.WSAConnect(conn.fileno(), address)
+ fut = self._loop.create_future()
+ fut.set_result(None)
+ return fut
+
+ self._register_with_iocp(conn)
+ # The socket needs to be locally bound before we call ConnectEx().
+ try:
+ _overlapped.BindLocal(conn.fileno(), conn.family)
+ except OSError as e:
+ if e.winerror != errno.WSAEINVAL:
+ raise
+ # Probably already locally bound; check using getsockname().
+ if conn.getsockname()[1] == 0:
+ raise
+ ov = _overlapped.Overlapped(NULL)
+ ov.ConnectEx(conn.fileno(), address)
+
+ def finish_connect(trans, key, ov):
+ ov.getresult()
+ # Use SO_UPDATE_CONNECT_CONTEXT so getsockname() etc work.
+ conn.setsockopt(socket.SOL_SOCKET,
+ _overlapped.SO_UPDATE_CONNECT_CONTEXT, 0)
+ return conn
+
+ return self._register(ov, conn, finish_connect)
+
+ def sendfile(self, sock, file, offset, count):
+ self._register_with_iocp(sock)
+ ov = _overlapped.Overlapped(NULL)
+ offset_low = offset & 0xffff_ffff
+ offset_high = (offset >> 32) & 0xffff_ffff
+ ov.TransmitFile(sock.fileno(),
+ msvcrt.get_osfhandle(file.fileno()),
+ offset_low, offset_high,
+ count, 0, 0)
+
+ return self._register(ov, sock, self.finish_socket_func)
+
+ def accept_pipe(self, pipe):
+ self._register_with_iocp(pipe)
+ ov = _overlapped.Overlapped(NULL)
+ connected = ov.ConnectNamedPipe(pipe.fileno())
+
+ if connected:
+ # ConnectNamePipe() failed with ERROR_PIPE_CONNECTED which means
+ # that the pipe is connected. There is no need to wait for the
+ # completion of the connection.
+ return self._result(pipe)
+
+ def finish_accept_pipe(trans, key, ov):
+ ov.getresult()
+ return pipe
+
+ return self._register(ov, pipe, finish_accept_pipe)
+
+ async def connect_pipe(self, address):
+ delay = CONNECT_PIPE_INIT_DELAY
+ while True:
+ # Unfortunately there is no way to do an overlapped connect to
+ # a pipe. Call CreateFile() in a loop until it doesn't fail with
+ # ERROR_PIPE_BUSY.
+ try:
+ handle = _overlapped.ConnectPipe(address)
+ break
+ except OSError as exc:
+ if exc.winerror != _overlapped.ERROR_PIPE_BUSY:
+ raise
+
+ # ConnectPipe() failed with ERROR_PIPE_BUSY: retry later
+ delay = min(delay * 2, CONNECT_PIPE_MAX_DELAY)
+ await tasks.sleep(delay)
+
+ return windows_utils.PipeHandle(handle)
+
+ def wait_for_handle(self, handle, timeout=None):
+ """Wait for a handle.
+
+ Return a Future object. The result of the future is True if the wait
+ completed, or False if the wait did not complete (on timeout).
+ """
+ return self._wait_for_handle(handle, timeout, False)
+
+ def _wait_cancel(self, event, done_callback):
+ fut = self._wait_for_handle(event, None, True)
+ # add_done_callback() cannot be used because the wait may only complete
+ # in IocpProactor.close(), while the event loop is not running.
+ fut._done_callback = done_callback
+ return fut
+
+ def _wait_for_handle(self, handle, timeout, _is_cancel):
+ self._check_closed()
+
+ if timeout is None:
+ ms = _winapi.INFINITE
+ else:
+ # RegisterWaitForSingleObject() has a resolution of 1 millisecond,
+ # round away from zero to wait *at least* timeout seconds.
+ ms = math.ceil(timeout * 1e3)
+
+ # We only create ov so we can use ov.address as a key for the cache.
+ ov = _overlapped.Overlapped(NULL)
+ wait_handle = _overlapped.RegisterWaitWithQueue(
+ handle, self._iocp, ov.address, ms)
+ if _is_cancel:
+ f = _WaitCancelFuture(ov, handle, wait_handle, loop=self._loop)
+ else:
+ f = _WaitHandleFuture(ov, handle, wait_handle, self,
+ loop=self._loop)
+ if f._source_traceback:
+ del f._source_traceback[-1]
+
+ def finish_wait_for_handle(trans, key, ov):
+ # Note that this second wait means that we should only use
+ # this with handles types where a successful wait has no
+ # effect. So events or processes are all right, but locks
+ # or semaphores are not. Also note if the handle is
+ # signalled and then quickly reset, then we may return
+ # False even though we have not timed out.
+ return f._poll()
+
+ self._cache[ov.address] = (f, ov, 0, finish_wait_for_handle)
+ return f
+
+ def _register_with_iocp(self, obj):
+ # To get notifications of finished ops on this objects sent to the
+ # completion port, were must register the handle.
+ if obj not in self._registered:
+ self._registered.add(obj)
+ _overlapped.CreateIoCompletionPort(obj.fileno(), self._iocp, 0, 0)
+ # XXX We could also use SetFileCompletionNotificationModes()
+ # to avoid sending notifications to completion port of ops
+ # that succeed immediately.
+
+ def _register(self, ov, obj, callback):
+ self._check_closed()
+
+ # Return a future which will be set with the result of the
+ # operation when it completes. The future's value is actually
+ # the value returned by callback().
+ f = _OverlappedFuture(ov, loop=self._loop)
+ if f._source_traceback:
+ del f._source_traceback[-1]
+ if not ov.pending:
+ # The operation has completed, so no need to postpone the
+ # work. We cannot take this short cut if we need the
+ # NumberOfBytes, CompletionKey values returned by
+ # PostQueuedCompletionStatus().
+ try:
+ value = callback(None, None, ov)
+ except OSError as e:
+ f.set_exception(e)
+ else:
+ f.set_result(value)
+ # Even if GetOverlappedResult() was called, we have to wait for the
+ # notification of the completion in GetQueuedCompletionStatus().
+ # Register the overlapped operation to keep a reference to the
+ # OVERLAPPED object, otherwise the memory is freed and Windows may
+ # read uninitialized memory.
+
+ # Register the overlapped operation for later. Note that
+ # we only store obj to prevent it from being garbage
+ # collected too early.
+ self._cache[ov.address] = (f, ov, obj, callback)
+ return f
+
+ def _unregister(self, ov):
+ """Unregister an overlapped object.
+
+ Call this method when its future has been cancelled. The event can
+ already be signalled (pending in the proactor event queue). It is also
+ safe if the event is never signalled (because it was cancelled).
+ """
+ self._check_closed()
+ self._unregistered.append(ov)
+
+ def _get_accept_socket(self, family):
+ s = socket.socket(family)
+ s.settimeout(0)
+ return s
+
+ def _poll(self, timeout=None):
+ if timeout is None:
+ ms = INFINITE
+ elif timeout < 0:
+ raise ValueError("negative timeout")
+ else:
+ # GetQueuedCompletionStatus() has a resolution of 1 millisecond,
+ # round away from zero to wait *at least* timeout seconds.
+ ms = math.ceil(timeout * 1e3)
+ if ms >= INFINITE:
+ raise ValueError("timeout too big")
+
+ while True:
+ status = _overlapped.GetQueuedCompletionStatus(self._iocp, ms)
+ if status is None:
+ break
+ ms = 0
+
+ err, transferred, key, address = status
+ try:
+ f, ov, obj, callback = self._cache.pop(address)
+ except KeyError:
+ if self._loop.get_debug():
+ self._loop.call_exception_handler({
+ 'message': ('GetQueuedCompletionStatus() returned an '
+ 'unexpected event'),
+ 'status': ('err=%s transferred=%s key=%#x address=%#x'
+ % (err, transferred, key, address)),
+ })
+
+ # key is either zero, or it is used to return a pipe
+ # handle which should be closed to avoid a leak.
+ if key not in (0, _overlapped.INVALID_HANDLE_VALUE):
+ _winapi.CloseHandle(key)
+ continue
+
+ if obj in self._stopped_serving:
+ f.cancel()
+ # Don't call the callback if _register() already read the result or
+ # if the overlapped has been cancelled
+ elif not f.done():
+ try:
+ value = callback(transferred, key, ov)
+ except OSError as e:
+ f.set_exception(e)
+ self._results.append(f)
+ else:
+ f.set_result(value)
+ self._results.append(f)
+ finally:
+ f = None
+
+ # Remove unregistered futures
+ for ov in self._unregistered:
+ self._cache.pop(ov.address, None)
+ self._unregistered.clear()
+
+ def _stop_serving(self, obj):
+ # obj is a socket or pipe handle. It will be closed in
+ # BaseProactorEventLoop._stop_serving() which will make any
+ # pending operations fail quickly.
+ self._stopped_serving.add(obj)
+
+ def close(self):
+ if self._iocp is None:
+ # already closed
+ return
+
+ # Cancel remaining registered operations.
+ for fut, ov, obj, callback in list(self._cache.values()):
+ if fut.cancelled():
+ # Nothing to do with cancelled futures
+ pass
+ elif isinstance(fut, _WaitCancelFuture):
+ # _WaitCancelFuture must not be cancelled
+ pass
+ else:
+ try:
+ fut.cancel()
+ except OSError as exc:
+ if self._loop is not None:
+ context = {
+ 'message': 'Cancelling a future failed',
+ 'exception': exc,
+ 'future': fut,
+ }
+ if fut._source_traceback:
+ context['source_traceback'] = fut._source_traceback
+ self._loop.call_exception_handler(context)
+
+ # Wait until all cancelled overlapped complete: don't exit with running
+ # overlapped to prevent a crash. Display progress every second if the
+ # loop is still running.
+ msg_update = 1.0
+ start_time = time.monotonic()
+ next_msg = start_time + msg_update
+ while self._cache:
+ if next_msg <= time.monotonic():
+ logger.debug('%r is running after closing for %.1f seconds',
+ self, time.monotonic() - start_time)
+ next_msg = time.monotonic() + msg_update
+
+ # handle a few events, or timeout
+ self._poll(msg_update)
+
+ self._results = []
+
+ _winapi.CloseHandle(self._iocp)
+ self._iocp = None
+
+ def __del__(self):
+ self.close()
+
+
+class _WindowsSubprocessTransport(base_subprocess.BaseSubprocessTransport):
+
+ def _start(self, args, shell, stdin, stdout, stderr, bufsize, **kwargs):
+ self._proc = windows_utils.Popen(
+ args, shell=shell, stdin=stdin, stdout=stdout, stderr=stderr,
+ bufsize=bufsize, **kwargs)
+
+ def callback(f):
+ returncode = self._proc.poll()
+ self._process_exited(returncode)
+
+ f = self._loop._proactor.wait_for_handle(int(self._proc._handle))
+ f.add_done_callback(callback)
+
+
+SelectorEventLoop = _WindowsSelectorEventLoop
+
+
+class WindowsSelectorEventLoopPolicy(events.BaseDefaultEventLoopPolicy):
+ _loop_factory = SelectorEventLoop
+
+
+class WindowsProactorEventLoopPolicy(events.BaseDefaultEventLoopPolicy):
+ _loop_factory = ProactorEventLoop
+
+
+DefaultEventLoopPolicy = WindowsProactorEventLoopPolicy
diff --git a/contrib/tools/python3/Lib/asyncio/windows_utils.py b/contrib/tools/python3/Lib/asyncio/windows_utils.py
new file mode 100644
index 0000000000..ef277fac3e
--- /dev/null
+++ b/contrib/tools/python3/Lib/asyncio/windows_utils.py
@@ -0,0 +1,173 @@
+"""Various Windows specific bits and pieces."""
+
+import sys
+
+if sys.platform != 'win32': # pragma: no cover
+ raise ImportError('win32 only')
+
+import _winapi
+import itertools
+import msvcrt
+import os
+import subprocess
+import tempfile
+import warnings
+
+
+__all__ = 'pipe', 'Popen', 'PIPE', 'PipeHandle'
+
+
+# Constants/globals
+
+
+BUFSIZE = 8192
+PIPE = subprocess.PIPE
+STDOUT = subprocess.STDOUT
+_mmap_counter = itertools.count()
+
+
+# Replacement for os.pipe() using handles instead of fds
+
+
+def pipe(*, duplex=False, overlapped=(True, True), bufsize=BUFSIZE):
+ """Like os.pipe() but with overlapped support and using handles not fds."""
+ address = tempfile.mktemp(
+ prefix=r'\\.\pipe\python-pipe-{:d}-{:d}-'.format(
+ os.getpid(), next(_mmap_counter)))
+
+ if duplex:
+ openmode = _winapi.PIPE_ACCESS_DUPLEX
+ access = _winapi.GENERIC_READ | _winapi.GENERIC_WRITE
+ obsize, ibsize = bufsize, bufsize
+ else:
+ openmode = _winapi.PIPE_ACCESS_INBOUND
+ access = _winapi.GENERIC_WRITE
+ obsize, ibsize = 0, bufsize
+
+ openmode |= _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE
+
+ if overlapped[0]:
+ openmode |= _winapi.FILE_FLAG_OVERLAPPED
+
+ if overlapped[1]:
+ flags_and_attribs = _winapi.FILE_FLAG_OVERLAPPED
+ else:
+ flags_and_attribs = 0
+
+ h1 = h2 = None
+ try:
+ h1 = _winapi.CreateNamedPipe(
+ address, openmode, _winapi.PIPE_WAIT,
+ 1, obsize, ibsize, _winapi.NMPWAIT_WAIT_FOREVER, _winapi.NULL)
+
+ h2 = _winapi.CreateFile(
+ address, access, 0, _winapi.NULL, _winapi.OPEN_EXISTING,
+ flags_and_attribs, _winapi.NULL)
+
+ ov = _winapi.ConnectNamedPipe(h1, overlapped=True)
+ ov.GetOverlappedResult(True)
+ return h1, h2
+ except:
+ if h1 is not None:
+ _winapi.CloseHandle(h1)
+ if h2 is not None:
+ _winapi.CloseHandle(h2)
+ raise
+
+
+# Wrapper for a pipe handle
+
+
+class PipeHandle:
+ """Wrapper for an overlapped pipe handle which is vaguely file-object like.
+
+ The IOCP event loop can use these instead of socket objects.
+ """
+ def __init__(self, handle):
+ self._handle = handle
+
+ def __repr__(self):
+ if self._handle is not None:
+ handle = f'handle={self._handle!r}'
+ else:
+ handle = 'closed'
+ return f'<{self.__class__.__name__} {handle}>'
+
+ @property
+ def handle(self):
+ return self._handle
+
+ def fileno(self):
+ if self._handle is None:
+ raise ValueError("I/O operation on closed pipe")
+ return self._handle
+
+ def close(self, *, CloseHandle=_winapi.CloseHandle):
+ if self._handle is not None:
+ CloseHandle(self._handle)
+ self._handle = None
+
+ def __del__(self, _warn=warnings.warn):
+ if self._handle is not None:
+ _warn(f"unclosed {self!r}", ResourceWarning, source=self)
+ self.close()
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, t, v, tb):
+ self.close()
+
+
+# Replacement for subprocess.Popen using overlapped pipe handles
+
+
+class Popen(subprocess.Popen):
+ """Replacement for subprocess.Popen using overlapped pipe handles.
+
+ The stdin, stdout, stderr are None or instances of PipeHandle.
+ """
+ def __init__(self, args, stdin=None, stdout=None, stderr=None, **kwds):
+ assert not kwds.get('universal_newlines')
+ assert kwds.get('bufsize', 0) == 0
+ stdin_rfd = stdout_wfd = stderr_wfd = None
+ stdin_wh = stdout_rh = stderr_rh = None
+ if stdin == PIPE:
+ stdin_rh, stdin_wh = pipe(overlapped=(False, True), duplex=True)
+ stdin_rfd = msvcrt.open_osfhandle(stdin_rh, os.O_RDONLY)
+ else:
+ stdin_rfd = stdin
+ if stdout == PIPE:
+ stdout_rh, stdout_wh = pipe(overlapped=(True, False))
+ stdout_wfd = msvcrt.open_osfhandle(stdout_wh, 0)
+ else:
+ stdout_wfd = stdout
+ if stderr == PIPE:
+ stderr_rh, stderr_wh = pipe(overlapped=(True, False))
+ stderr_wfd = msvcrt.open_osfhandle(stderr_wh, 0)
+ elif stderr == STDOUT:
+ stderr_wfd = stdout_wfd
+ else:
+ stderr_wfd = stderr
+ try:
+ super().__init__(args, stdin=stdin_rfd, stdout=stdout_wfd,
+ stderr=stderr_wfd, **kwds)
+ except:
+ for h in (stdin_wh, stdout_rh, stderr_rh):
+ if h is not None:
+ _winapi.CloseHandle(h)
+ raise
+ else:
+ if stdin_wh is not None:
+ self.stdin = PipeHandle(stdin_wh)
+ if stdout_rh is not None:
+ self.stdout = PipeHandle(stdout_rh)
+ if stderr_rh is not None:
+ self.stderr = PipeHandle(stderr_rh)
+ finally:
+ if stdin == PIPE:
+ os.close(stdin_rfd)
+ if stdout == PIPE:
+ os.close(stdout_wfd)
+ if stderr == PIPE:
+ os.close(stderr_wfd)