aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/python/Twisted/py3/twisted/internet
diff options
context:
space:
mode:
authorshmel1k <shmel1k@ydb.tech>2023-11-26 18:16:14 +0300
committershmel1k <shmel1k@ydb.tech>2023-11-26 18:43:30 +0300
commitb8cf9e88f4c5c64d9406af533d8948deb050d695 (patch)
tree218eb61fb3c3b96ec08b4d8cdfef383104a87d63 /contrib/python/Twisted/py3/twisted/internet
parent523f645a83a0ec97a0332dbc3863bb354c92a328 (diff)
downloadydb-b8cf9e88f4c5c64d9406af533d8948deb050d695.tar.gz
add kikimr_configure
Diffstat (limited to 'contrib/python/Twisted/py3/twisted/internet')
-rw-r--r--contrib/python/Twisted/py3/twisted/internet/__init__.py12
-rw-r--r--contrib/python/Twisted/py3/twisted/internet/_baseprocess.py68
-rw-r--r--contrib/python/Twisted/py3/twisted/internet/_deprecate.py25
-rw-r--r--contrib/python/Twisted/py3/twisted/internet/_dumbwin32proc.py397
-rw-r--r--contrib/python/Twisted/py3/twisted/internet/_glibbase.py369
-rw-r--r--contrib/python/Twisted/py3/twisted/internet/_idna.py51
-rw-r--r--contrib/python/Twisted/py3/twisted/internet/_newtls.py256
-rw-r--r--contrib/python/Twisted/py3/twisted/internet/_pollingfile.py291
-rw-r--r--contrib/python/Twisted/py3/twisted/internet/_posixserialport.py81
-rw-r--r--contrib/python/Twisted/py3/twisted/internet/_posixstdio.py178
-rw-r--r--contrib/python/Twisted/py3/twisted/internet/_producer_helpers.py124
-rw-r--r--contrib/python/Twisted/py3/twisted/internet/_resolver.py342
-rw-r--r--contrib/python/Twisted/py3/twisted/internet/_signals.py445
-rw-r--r--contrib/python/Twisted/py3/twisted/internet/_sslverify.py2017
-rw-r--r--contrib/python/Twisted/py3/twisted/internet/_threadedselect.py337
-rw-r--r--contrib/python/Twisted/py3/twisted/internet/_win32serialport.py156
-rw-r--r--contrib/python/Twisted/py3/twisted/internet/_win32stdio.py127
-rw-r--r--contrib/python/Twisted/py3/twisted/internet/abstract.py542
-rw-r--r--contrib/python/Twisted/py3/twisted/internet/address.py182
-rw-r--r--contrib/python/Twisted/py3/twisted/internet/asyncioreactor.py307
-rw-r--r--contrib/python/Twisted/py3/twisted/internet/base.py1345
-rw-r--r--contrib/python/Twisted/py3/twisted/internet/cfreactor.py593
-rw-r--r--contrib/python/Twisted/py3/twisted/internet/default.py55
-rw-r--r--contrib/python/Twisted/py3/twisted/internet/defer.py2697
-rw-r--r--contrib/python/Twisted/py3/twisted/internet/endpoints.py2338
-rw-r--r--contrib/python/Twisted/py3/twisted/internet/epollreactor.py259
-rw-r--r--contrib/python/Twisted/py3/twisted/internet/error.py510
-rw-r--r--contrib/python/Twisted/py3/twisted/internet/fdesc.py121
-rw-r--r--contrib/python/Twisted/py3/twisted/internet/gireactor.py122
-rw-r--r--contrib/python/Twisted/py3/twisted/internet/glib2reactor.py50
-rw-r--r--contrib/python/Twisted/py3/twisted/internet/gtk2reactor.py119
-rw-r--r--contrib/python/Twisted/py3/twisted/internet/gtk3reactor.py22
-rw-r--r--contrib/python/Twisted/py3/twisted/internet/inotify.py426
-rw-r--r--contrib/python/Twisted/py3/twisted/internet/interfaces.py2756
-rw-r--r--contrib/python/Twisted/py3/twisted/internet/iocpreactor/__init__.py10
-rw-r--r--contrib/python/Twisted/py3/twisted/internet/iocpreactor/abstract.py387
-rw-r--r--contrib/python/Twisted/py3/twisted/internet/iocpreactor/const.py25
-rw-r--r--contrib/python/Twisted/py3/twisted/internet/iocpreactor/interfaces.py42
-rw-r--r--contrib/python/Twisted/py3/twisted/internet/iocpreactor/iocpsupport.py27
-rw-r--r--contrib/python/Twisted/py3/twisted/internet/iocpreactor/notes.txt24
-rw-r--r--contrib/python/Twisted/py3/twisted/internet/iocpreactor/reactor.py285
-rw-r--r--contrib/python/Twisted/py3/twisted/internet/iocpreactor/tcp.py608
-rw-r--r--contrib/python/Twisted/py3/twisted/internet/iocpreactor/udp.py428
-rw-r--r--contrib/python/Twisted/py3/twisted/internet/kqreactor.py324
-rw-r--r--contrib/python/Twisted/py3/twisted/internet/main.py37
-rw-r--r--contrib/python/Twisted/py3/twisted/internet/pollreactor.py189
-rw-r--r--contrib/python/Twisted/py3/twisted/internet/posixbase.py653
-rw-r--r--contrib/python/Twisted/py3/twisted/internet/process.py1293
-rw-r--r--contrib/python/Twisted/py3/twisted/internet/protocol.py900
-rw-r--r--contrib/python/Twisted/py3/twisted/internet/pyuisupport.py39
-rw-r--r--contrib/python/Twisted/py3/twisted/internet/reactor.py40
-rw-r--r--contrib/python/Twisted/py3/twisted/internet/selectreactor.py197
-rw-r--r--contrib/python/Twisted/py3/twisted/internet/serialport.py100
-rw-r--r--contrib/python/Twisted/py3/twisted/internet/ssl.py278
-rw-r--r--contrib/python/Twisted/py3/twisted/internet/stdio.py37
-rw-r--r--contrib/python/Twisted/py3/twisted/internet/task.py976
-rw-r--r--contrib/python/Twisted/py3/twisted/internet/tcp.py1523
-rw-r--r--contrib/python/Twisted/py3/twisted/internet/testing.py969
-rw-r--r--contrib/python/Twisted/py3/twisted/internet/threads.py144
-rw-r--r--contrib/python/Twisted/py3/twisted/internet/tksupport.py78
-rw-r--r--contrib/python/Twisted/py3/twisted/internet/udp.py533
-rw-r--r--contrib/python/Twisted/py3/twisted/internet/unix.py645
-rw-r--r--contrib/python/Twisted/py3/twisted/internet/utils.py256
-rw-r--r--contrib/python/Twisted/py3/twisted/internet/win32eventreactor.py425
-rw-r--r--contrib/python/Twisted/py3/twisted/internet/wxreactor.py188
-rw-r--r--contrib/python/Twisted/py3/twisted/internet/wxsupport.py57
66 files changed, 29437 insertions, 0 deletions
diff --git a/contrib/python/Twisted/py3/twisted/internet/__init__.py b/contrib/python/Twisted/py3/twisted/internet/__init__.py
new file mode 100644
index 0000000000..a3d851d198
--- /dev/null
+++ b/contrib/python/Twisted/py3/twisted/internet/__init__.py
@@ -0,0 +1,12 @@
+# Copyright (c) Twisted Matrix Laboratories.
+# See LICENSE for details.
+
+"""
+Twisted Internet: Asynchronous I/O and Events.
+
+Twisted Internet is a collection of compatible event-loops for Python. It contains
+the code to dispatch events to interested observers and a portable API so that
+observers need not care about which event loop is running. Thus, it is possible
+to use the same code for different loops, from Twisted's basic, yet portable,
+select-based loop to the loops of various GUI toolkits like GTK+ or Tk.
+"""
diff --git a/contrib/python/Twisted/py3/twisted/internet/_baseprocess.py b/contrib/python/Twisted/py3/twisted/internet/_baseprocess.py
new file mode 100644
index 0000000000..83bc08fdc0
--- /dev/null
+++ b/contrib/python/Twisted/py3/twisted/internet/_baseprocess.py
@@ -0,0 +1,68 @@
+# -*- test-case-name: twisted.test.test_process -*-
+# Copyright (c) Twisted Matrix Laboratories.
+# See LICENSE for details.
+
+"""
+Cross-platform process-related functionality used by different
+L{IReactorProcess} implementations.
+"""
+
+from typing import Optional
+
+from twisted.python.deprecate import getWarningMethod
+from twisted.python.failure import Failure
+from twisted.python.log import err
+from twisted.python.reflect import qual
+
+_missingProcessExited = (
+ "Since Twisted 8.2, IProcessProtocol.processExited "
+ "is required. %s must implement it."
+)
+
+
+class BaseProcess:
+ pid: Optional[int] = None
+ status: Optional[int] = None
+ lostProcess = 0
+ proto = None
+
+ def __init__(self, protocol):
+ self.proto = protocol
+
+ def _callProcessExited(self, reason):
+ default = object()
+ processExited = getattr(self.proto, "processExited", default)
+ if processExited is default:
+ getWarningMethod()(
+ _missingProcessExited % (qual(self.proto.__class__),),
+ DeprecationWarning,
+ stacklevel=0,
+ )
+ else:
+ try:
+ processExited(Failure(reason))
+ except BaseException:
+ err(None, "unexpected error in processExited")
+
+ def processEnded(self, status):
+ """
+ This is called when the child terminates.
+ """
+ self.status = status
+ self.lostProcess += 1
+ self.pid = None
+ self._callProcessExited(self._getReason(status))
+ self.maybeCallProcessEnded()
+
+ def maybeCallProcessEnded(self):
+ """
+ Call processEnded on protocol after final cleanup.
+ """
+ if self.proto is not None:
+ reason = self._getReason(self.status)
+ proto = self.proto
+ self.proto = None
+ try:
+ proto.processEnded(Failure(reason))
+ except BaseException:
+ err(None, "unexpected error in processEnded")
diff --git a/contrib/python/Twisted/py3/twisted/internet/_deprecate.py b/contrib/python/Twisted/py3/twisted/internet/_deprecate.py
new file mode 100644
index 0000000000..6eb1e3855f
--- /dev/null
+++ b/contrib/python/Twisted/py3/twisted/internet/_deprecate.py
@@ -0,0 +1,25 @@
+"""
+Support similar deprecation of several reactors.
+"""
+
+import warnings
+
+from incremental import Version, getVersionString
+
+from twisted.python.deprecate import DEPRECATION_WARNING_FORMAT
+
+
+def deprecatedGnomeReactor(name: str, version: Version) -> None:
+ """
+ Emit a deprecation warning about a gnome-related reactor.
+
+ @param name: The name of the reactor. For example, C{"gtk2reactor"}.
+
+ @param version: The version in which the deprecation was introduced.
+ """
+ stem = DEPRECATION_WARNING_FORMAT % {
+ "fqpn": "twisted.internet." + name,
+ "version": getVersionString(version),
+ }
+ msg = stem + ". Please use twisted.internet.gireactor instead."
+ warnings.warn(msg, category=DeprecationWarning)
diff --git a/contrib/python/Twisted/py3/twisted/internet/_dumbwin32proc.py b/contrib/python/Twisted/py3/twisted/internet/_dumbwin32proc.py
new file mode 100644
index 0000000000..678f54e59b
--- /dev/null
+++ b/contrib/python/Twisted/py3/twisted/internet/_dumbwin32proc.py
@@ -0,0 +1,397 @@
+# -*- test-case-name: twisted.test.test_process -*-
+# Copyright (c) Twisted Matrix Laboratories.
+# See LICENSE for details.
+
+"""
+Windows Process Management, used with reactor.spawnProcess
+"""
+
+
+import os
+import sys
+
+from zope.interface import implementer
+
+import pywintypes # type: ignore[import]
+
+# Win32 imports
+import win32api # type: ignore[import]
+import win32con # type: ignore[import]
+import win32event # type: ignore[import]
+import win32file # type: ignore[import]
+import win32pipe # type: ignore[import]
+import win32process # type: ignore[import]
+import win32security # type: ignore[import]
+
+from twisted.internet import _pollingfile, error
+from twisted.internet._baseprocess import BaseProcess
+from twisted.internet.interfaces import IConsumer, IProcessTransport, IProducer
+from twisted.python.win32 import quoteArguments
+
+# Security attributes for pipes
+PIPE_ATTRS_INHERITABLE = win32security.SECURITY_ATTRIBUTES()
+PIPE_ATTRS_INHERITABLE.bInheritHandle = 1
+
+
+def debug(msg):
+ print(msg)
+ sys.stdout.flush()
+
+
+class _Reaper(_pollingfile._PollableResource):
+ def __init__(self, proc):
+ self.proc = proc
+
+ def checkWork(self):
+ if (
+ win32event.WaitForSingleObject(self.proc.hProcess, 0)
+ != win32event.WAIT_OBJECT_0
+ ):
+ return 0
+ exitCode = win32process.GetExitCodeProcess(self.proc.hProcess)
+ self.deactivate()
+ self.proc.processEnded(exitCode)
+ return 0
+
+
+def _findShebang(filename):
+ """
+ Look for a #! line, and return the value following the #! if one exists, or
+ None if this file is not a script.
+
+ I don't know if there are any conventions for quoting in Windows shebang
+ lines, so this doesn't support any; therefore, you may not pass any
+ arguments to scripts invoked as filters. That's probably wrong, so if
+ somebody knows more about the cultural expectations on Windows, please feel
+ free to fix.
+
+ This shebang line support was added in support of the CGI tests;
+ appropriately enough, I determined that shebang lines are culturally
+ accepted in the Windows world through this page::
+
+ http://www.cgi101.com/learn/connect/winxp.html
+
+ @param filename: str representing a filename
+
+ @return: a str representing another filename.
+ """
+ with open(filename) as f:
+ if f.read(2) == "#!":
+ exe = f.readline(1024).strip("\n")
+ return exe
+
+
+def _invalidWin32App(pywinerr):
+ """
+ Determine if a pywintypes.error is telling us that the given process is
+ 'not a valid win32 application', i.e. not a PE format executable.
+
+ @param pywinerr: a pywintypes.error instance raised by CreateProcess
+
+ @return: a boolean
+ """
+
+ # Let's do this better in the future, but I have no idea what this error
+ # is; MSDN doesn't mention it, and there is no symbolic constant in
+ # win32process module that represents 193.
+
+ return pywinerr.args[0] == 193
+
+
+@implementer(IProcessTransport, IConsumer, IProducer)
+class Process(_pollingfile._PollingTimer, BaseProcess):
+ """
+ A process that integrates with the Twisted event loop.
+
+ If your subprocess is a python program, you need to:
+
+ - Run python.exe with the '-u' command line option - this turns on
+ unbuffered I/O. Buffering stdout/err/in can cause problems, see e.g.
+ http://support.microsoft.com/default.aspx?scid=kb;EN-US;q1903
+
+ - If you don't want Windows messing with data passed over
+ stdin/out/err, set the pipes to be in binary mode::
+
+ import os, sys, mscvrt
+ msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
+ msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
+ msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
+
+ """
+
+ closedNotifies = 0
+
+ def __init__(self, reactor, protocol, command, args, environment, path):
+ """
+ Create a new child process.
+ """
+ _pollingfile._PollingTimer.__init__(self, reactor)
+ BaseProcess.__init__(self, protocol)
+
+ # security attributes for pipes
+ sAttrs = win32security.SECURITY_ATTRIBUTES()
+ sAttrs.bInheritHandle = 1
+
+ # create the pipes which will connect to the secondary process
+ self.hStdoutR, hStdoutW = win32pipe.CreatePipe(sAttrs, 0)
+ self.hStderrR, hStderrW = win32pipe.CreatePipe(sAttrs, 0)
+ hStdinR, self.hStdinW = win32pipe.CreatePipe(sAttrs, 0)
+
+ win32pipe.SetNamedPipeHandleState(
+ self.hStdinW, win32pipe.PIPE_NOWAIT, None, None
+ )
+
+ # set the info structure for the new process.
+ StartupInfo = win32process.STARTUPINFO()
+ StartupInfo.hStdOutput = hStdoutW
+ StartupInfo.hStdError = hStderrW
+ StartupInfo.hStdInput = hStdinR
+ StartupInfo.dwFlags = win32process.STARTF_USESTDHANDLES
+
+ # Create new handles whose inheritance property is false
+ currentPid = win32api.GetCurrentProcess()
+
+ tmp = win32api.DuplicateHandle(
+ currentPid, self.hStdoutR, currentPid, 0, 0, win32con.DUPLICATE_SAME_ACCESS
+ )
+ win32file.CloseHandle(self.hStdoutR)
+ self.hStdoutR = tmp
+
+ tmp = win32api.DuplicateHandle(
+ currentPid, self.hStderrR, currentPid, 0, 0, win32con.DUPLICATE_SAME_ACCESS
+ )
+ win32file.CloseHandle(self.hStderrR)
+ self.hStderrR = tmp
+
+ tmp = win32api.DuplicateHandle(
+ currentPid, self.hStdinW, currentPid, 0, 0, win32con.DUPLICATE_SAME_ACCESS
+ )
+ win32file.CloseHandle(self.hStdinW)
+ self.hStdinW = tmp
+
+ # Add the specified environment to the current environment - this is
+ # necessary because certain operations are only supported on Windows
+ # if certain environment variables are present.
+
+ env = os.environ.copy()
+ env.update(environment or {})
+ env = {os.fsdecode(key): os.fsdecode(value) for key, value in env.items()}
+
+ # Make sure all the arguments are Unicode.
+ args = [os.fsdecode(x) for x in args]
+
+ cmdline = quoteArguments(args)
+
+ # The command, too, needs to be Unicode, if it is a value.
+ command = os.fsdecode(command) if command else command
+ path = os.fsdecode(path) if path else path
+
+ # TODO: error detection here. See #2787 and #4184.
+ def doCreate():
+ flags = win32con.CREATE_NO_WINDOW
+ self.hProcess, self.hThread, self.pid, dwTid = win32process.CreateProcess(
+ command, cmdline, None, None, 1, flags, env, path, StartupInfo
+ )
+
+ try:
+ doCreate()
+ except pywintypes.error as pwte:
+ if not _invalidWin32App(pwte):
+ # This behavior isn't _really_ documented, but let's make it
+ # consistent with the behavior that is documented.
+ raise OSError(pwte)
+ else:
+ # look for a shebang line. Insert the original 'command'
+ # (actually a script) into the new arguments list.
+ sheb = _findShebang(command)
+ if sheb is None:
+ raise OSError(
+ "%r is neither a Windows executable, "
+ "nor a script with a shebang line" % command
+ )
+ else:
+ args = list(args)
+ args.insert(0, command)
+ cmdline = quoteArguments(args)
+ origcmd = command
+ command = sheb
+ try:
+ # Let's try again.
+ doCreate()
+ except pywintypes.error as pwte2:
+ # d'oh, failed again!
+ if _invalidWin32App(pwte2):
+ raise OSError(
+ "%r has an invalid shebang line: "
+ "%r is not a valid executable" % (origcmd, sheb)
+ )
+ raise OSError(pwte2)
+
+ # close handles which only the child will use
+ win32file.CloseHandle(hStderrW)
+ win32file.CloseHandle(hStdoutW)
+ win32file.CloseHandle(hStdinR)
+
+ # set up everything
+ self.stdout = _pollingfile._PollableReadPipe(
+ self.hStdoutR,
+ lambda data: self.proto.childDataReceived(1, data),
+ self.outConnectionLost,
+ )
+
+ self.stderr = _pollingfile._PollableReadPipe(
+ self.hStderrR,
+ lambda data: self.proto.childDataReceived(2, data),
+ self.errConnectionLost,
+ )
+
+ self.stdin = _pollingfile._PollableWritePipe(
+ self.hStdinW, self.inConnectionLost
+ )
+
+ for pipewatcher in self.stdout, self.stderr, self.stdin:
+ self._addPollableResource(pipewatcher)
+
+ # notify protocol
+ self.proto.makeConnection(self)
+
+ self._addPollableResource(_Reaper(self))
+
+ def signalProcess(self, signalID):
+ if self.pid is None:
+ raise error.ProcessExitedAlready()
+ if signalID in ("INT", "TERM", "KILL"):
+ win32process.TerminateProcess(self.hProcess, 1)
+
+ def _getReason(self, status):
+ if status == 0:
+ return error.ProcessDone(status)
+ return error.ProcessTerminated(status)
+
+ def write(self, data):
+ """
+ Write data to the process' stdin.
+
+ @type data: C{bytes}
+ """
+ self.stdin.write(data)
+
+ def writeSequence(self, seq):
+ """
+ Write data to the process' stdin.
+
+ @type seq: C{list} of C{bytes}
+ """
+ self.stdin.writeSequence(seq)
+
+ def writeToChild(self, fd, data):
+ """
+ Similar to L{ITransport.write} but also allows the file descriptor in
+ the child process which will receive the bytes to be specified.
+
+ This implementation is limited to writing to the child's standard input.
+
+ @param fd: The file descriptor to which to write. Only stdin (C{0}) is
+ supported.
+ @type fd: C{int}
+
+ @param data: The bytes to write.
+ @type data: C{bytes}
+
+ @return: L{None}
+
+ @raise KeyError: If C{fd} is anything other than the stdin file
+ descriptor (C{0}).
+ """
+ if fd == 0:
+ self.stdin.write(data)
+ else:
+ raise KeyError(fd)
+
+ def closeChildFD(self, fd):
+ if fd == 0:
+ self.closeStdin()
+ elif fd == 1:
+ self.closeStdout()
+ elif fd == 2:
+ self.closeStderr()
+ else:
+ raise NotImplementedError(
+ "Only standard-IO file descriptors available on win32"
+ )
+
+ def closeStdin(self):
+ """Close the process' stdin."""
+ self.stdin.close()
+
+ def closeStderr(self):
+ self.stderr.close()
+
+ def closeStdout(self):
+ self.stdout.close()
+
+ def loseConnection(self):
+ """
+ Close the process' stdout, in and err.
+ """
+ self.closeStdin()
+ self.closeStdout()
+ self.closeStderr()
+
+ def outConnectionLost(self):
+ self.proto.childConnectionLost(1)
+ self.connectionLostNotify()
+
+ def errConnectionLost(self):
+ self.proto.childConnectionLost(2)
+ self.connectionLostNotify()
+
+ def inConnectionLost(self):
+ self.proto.childConnectionLost(0)
+ self.connectionLostNotify()
+
+ def connectionLostNotify(self):
+ """
+ Will be called 3 times, by stdout/err threads and process handle.
+ """
+ self.closedNotifies += 1
+ self.maybeCallProcessEnded()
+
+ def maybeCallProcessEnded(self):
+ if self.closedNotifies == 3 and self.lostProcess:
+ win32file.CloseHandle(self.hProcess)
+ win32file.CloseHandle(self.hThread)
+ self.hProcess = None
+ self.hThread = None
+ BaseProcess.maybeCallProcessEnded(self)
+
+ # IConsumer
+ def registerProducer(self, producer, streaming):
+ self.stdin.registerProducer(producer, streaming)
+
+ def unregisterProducer(self):
+ self.stdin.unregisterProducer()
+
+ # IProducer
+ def pauseProducing(self):
+ self._pause()
+
+ def resumeProducing(self):
+ self._unpause()
+
+ def stopProducing(self):
+ self.loseConnection()
+
+ def getHost(self):
+ # ITransport.getHost
+ raise NotImplementedError("Unimplemented: Process.getHost")
+
+ def getPeer(self):
+ # ITransport.getPeer
+ raise NotImplementedError("Unimplemented: Process.getPeer")
+
+ def __repr__(self) -> str:
+ """
+ Return a string representation of the process.
+ """
+ return f"<{self.__class__.__name__} pid={self.pid}>"
diff --git a/contrib/python/Twisted/py3/twisted/internet/_glibbase.py b/contrib/python/Twisted/py3/twisted/internet/_glibbase.py
new file mode 100644
index 0000000000..4a6d1323ab
--- /dev/null
+++ b/contrib/python/Twisted/py3/twisted/internet/_glibbase.py
@@ -0,0 +1,369 @@
+# -*- test-case-name: twisted.internet.test -*-
+# Copyright (c) Twisted Matrix Laboratories.
+# See LICENSE for details.
+
+"""
+This module provides base support for Twisted to interact with the glib/gtk
+mainloops.
+
+The classes in this module should not be used directly, but rather you should
+import gireactor or gtk3reactor for GObject Introspection based applications,
+or glib2reactor or gtk2reactor for applications using legacy static bindings.
+"""
+
+
+import sys
+from typing import Any, Callable, Dict, Set
+
+from zope.interface import implementer
+
+from twisted.internet import posixbase
+from twisted.internet.abstract import FileDescriptor
+from twisted.internet.interfaces import IReactorFDSet, IReadDescriptor, IWriteDescriptor
+from twisted.python import log
+from twisted.python.monkey import MonkeyPatcher
+from ._signals import _UnixWaker
+
+
+def ensureNotImported(moduleNames, errorMessage, preventImports=[]):
+ """
+ Check whether the given modules were imported, and if requested, ensure
+ they will not be importable in the future.
+
+ @param moduleNames: A list of module names we make sure aren't imported.
+ @type moduleNames: C{list} of C{str}
+
+ @param preventImports: A list of module name whose future imports should
+ be prevented.
+ @type preventImports: C{list} of C{str}
+
+ @param errorMessage: Message to use when raising an C{ImportError}.
+ @type errorMessage: C{str}
+
+ @raise ImportError: with given error message if a given module name
+ has already been imported.
+ """
+ for name in moduleNames:
+ if sys.modules.get(name) is not None:
+ raise ImportError(errorMessage)
+
+ # Disable module imports to avoid potential problems.
+ for name in preventImports:
+ sys.modules[name] = None
+
+
+class GlibWaker(_UnixWaker):
+ """
+ Run scheduled events after waking up.
+ """
+
+ def __init__(self, reactor):
+ super().__init__()
+ self.reactor = reactor
+
+ def doRead(self) -> None:
+ super().doRead()
+ self.reactor._simulate()
+
+
+def _signalGlue():
+ """
+ Integrate glib's wakeup file descriptor usage and our own.
+
+ Python supports only one wakeup file descriptor at a time and both Twisted
+ and glib want to use it.
+
+ This is a context manager that can be wrapped around the whole glib
+ reactor main loop which makes our signal handling work with glib's signal
+ handling.
+ """
+ from gi import _ossighelper as signalGlue # type: ignore[import]
+
+ patcher = MonkeyPatcher()
+ patcher.addPatch(signalGlue, "_wakeup_fd_is_active", True)
+ return patcher
+
+
+def _loopQuitter(
+ idleAdd: Callable[[Callable[[], None]], None], loopQuit: Callable[[], None]
+) -> Callable[[], None]:
+ """
+ Combine the C{glib.idle_add} and C{glib.MainLoop.quit} functions into a
+ function suitable for crashing the reactor.
+ """
+ return lambda: idleAdd(loopQuit)
+
+
+@implementer(IReactorFDSet)
+class GlibReactorBase(posixbase.PosixReactorBase, posixbase._PollLikeMixin):
+ """
+ Base class for GObject event loop reactors.
+
+ Notification for I/O events (reads and writes on file descriptors) is done
+ by the gobject-based event loop. File descriptors are registered with
+ gobject with the appropriate flags for read/write/disconnect notification.
+
+ Time-based events, the results of C{callLater} and C{callFromThread}, are
+ handled differently. Rather than registering each event with gobject, a
+ single gobject timeout is registered for the earliest scheduled event, the
+ output of C{reactor.timeout()}. For example, if there are timeouts in 1, 2
+ and 3.4 seconds, a single timeout is registered for 1 second in the
+ future. When this timeout is hit, C{_simulate} is called, which calls the
+ appropriate Twisted-level handlers, and a new timeout is added to gobject
+ by the C{_reschedule} method.
+
+ To handle C{callFromThread} events, we use a custom waker that calls
+ C{_simulate} whenever it wakes up.
+
+ @ivar _sources: A dictionary mapping L{FileDescriptor} instances to
+ GSource handles.
+
+ @ivar _reads: A set of L{FileDescriptor} instances currently monitored for
+ reading.
+
+ @ivar _writes: A set of L{FileDescriptor} instances currently monitored for
+ writing.
+
+ @ivar _simtag: A GSource handle for the next L{simulate} call.
+ """
+
+ # Install a waker that knows it needs to call C{_simulate} in order to run
+ # callbacks queued from a thread:
+ def _wakerFactory(self) -> GlibWaker:
+ return GlibWaker(self)
+
+ def __init__(self, glib_module: Any, gtk_module: Any, useGtk: bool = False) -> None:
+ self._simtag = None
+ self._reads: Set[IReadDescriptor] = set()
+ self._writes: Set[IWriteDescriptor] = set()
+ self._sources: Dict[FileDescriptor, int] = {}
+ self._glib = glib_module
+
+ self._POLL_DISCONNECTED = (
+ glib_module.IOCondition.HUP
+ | glib_module.IOCondition.ERR
+ | glib_module.IOCondition.NVAL
+ )
+ self._POLL_IN = glib_module.IOCondition.IN
+ self._POLL_OUT = glib_module.IOCondition.OUT
+
+ # glib's iochannel sources won't tell us about any events that we haven't
+ # asked for, even if those events aren't sensible inputs to the poll()
+ # call.
+ self.INFLAGS = self._POLL_IN | self._POLL_DISCONNECTED
+ self.OUTFLAGS = self._POLL_OUT | self._POLL_DISCONNECTED
+
+ super().__init__()
+
+ self._source_remove = self._glib.source_remove
+ self._timeout_add = self._glib.timeout_add
+
+ self.context = self._glib.main_context_default()
+ self._pending = self.context.pending
+ self._iteration = self.context.iteration
+ self.loop = self._glib.MainLoop()
+ self._crash = _loopQuitter(self._glib.idle_add, self.loop.quit)
+ self._run = self.loop.run
+
+ def _reallyStartRunning(self):
+ """
+ Make sure the reactor's signal handlers are installed despite any
+ outside interference.
+ """
+ # First, install SIGINT and friends:
+ super()._reallyStartRunning()
+
+ # Next, since certain versions of gtk will clobber our signal handler,
+ # set all signal handlers again after the event loop has started to
+ # ensure they're *really* set.
+ #
+ # We don't actually know which versions of gtk do this so this might
+ # be obsolete. If so, that would be great and this whole method can
+ # go away. Someone needs to find out, though.
+ #
+ # https://github.com/twisted/twisted/issues/11762
+
+ def reinitSignals():
+ self._signals.uninstall()
+ self._signals.install()
+
+ self.callLater(0, reinitSignals)
+
+ # The input_add function in pygtk1 checks for objects with a
+ # 'fileno' method and, if present, uses the result of that method
+ # as the input source. The pygtk2 input_add does not do this. The
+ # function below replicates the pygtk1 functionality.
+
+ # In addition, pygtk maps gtk.input_add to _gobject.io_add_watch, and
+ # g_io_add_watch() takes different condition bitfields than
+ # gtk_input_add(). We use g_io_add_watch() here in case pygtk fixes this
+ # bug.
+ def input_add(self, source, condition, callback):
+ if hasattr(source, "fileno"):
+ # handle python objects
+ def wrapper(ignored, condition):
+ return callback(source, condition)
+
+ fileno = source.fileno()
+ else:
+ fileno = source
+ wrapper = callback
+ return self._glib.io_add_watch(
+ fileno,
+ self._glib.PRIORITY_DEFAULT_IDLE,
+ condition,
+ wrapper,
+ )
+
+ def _ioEventCallback(self, source, condition):
+ """
+ Called by event loop when an I/O event occurs.
+ """
+ log.callWithLogger(source, self._doReadOrWrite, source, source, condition)
+ return True # True = don't auto-remove the source
+
+ def _add(self, source, primary, other, primaryFlag, otherFlag):
+ """
+ Add the given L{FileDescriptor} for monitoring either for reading or
+ writing. If the file is already monitored for the other operation, we
+ delete the previous registration and re-register it for both reading
+ and writing.
+ """
+ if source in primary:
+ return
+ flags = primaryFlag
+ if source in other:
+ self._source_remove(self._sources[source])
+ flags |= otherFlag
+ self._sources[source] = self.input_add(source, flags, self._ioEventCallback)
+ primary.add(source)
+
+ def addReader(self, reader):
+ """
+ Add a L{FileDescriptor} for monitoring of data available to read.
+ """
+ self._add(reader, self._reads, self._writes, self.INFLAGS, self.OUTFLAGS)
+
+ def addWriter(self, writer):
+ """
+ Add a L{FileDescriptor} for monitoring ability to write data.
+ """
+ self._add(writer, self._writes, self._reads, self.OUTFLAGS, self.INFLAGS)
+
+ def getReaders(self):
+ """
+ Retrieve the list of current L{FileDescriptor} monitored for reading.
+ """
+ return list(self._reads)
+
+ def getWriters(self):
+ """
+ Retrieve the list of current L{FileDescriptor} monitored for writing.
+ """
+ return list(self._writes)
+
+ def removeAll(self):
+ """
+ Remove monitoring for all registered L{FileDescriptor}s.
+ """
+ return self._removeAll(self._reads, self._writes)
+
+ def _remove(self, source, primary, other, flags):
+ """
+ Remove monitoring the given L{FileDescriptor} for either reading or
+ writing. If it's still monitored for the other operation, we
+ re-register the L{FileDescriptor} for only that operation.
+ """
+ if source not in primary:
+ return
+ self._source_remove(self._sources[source])
+ primary.remove(source)
+ if source in other:
+ self._sources[source] = self.input_add(source, flags, self._ioEventCallback)
+ else:
+ self._sources.pop(source)
+
+ def removeReader(self, reader):
+ """
+ Stop monitoring the given L{FileDescriptor} for reading.
+ """
+ self._remove(reader, self._reads, self._writes, self.OUTFLAGS)
+
+ def removeWriter(self, writer):
+ """
+ Stop monitoring the given L{FileDescriptor} for writing.
+ """
+ self._remove(writer, self._writes, self._reads, self.INFLAGS)
+
+ def iterate(self, delay=0):
+ """
+ One iteration of the event loop, for trial's use.
+
+ This is not used for actual reactor runs.
+ """
+ self.runUntilCurrent()
+ while self._pending():
+ self._iteration(0)
+
+ def crash(self):
+ """
+ Crash the reactor.
+ """
+ posixbase.PosixReactorBase.crash(self)
+ self._crash()
+
+ def stop(self):
+ """
+ Stop the reactor.
+ """
+ posixbase.PosixReactorBase.stop(self)
+ # The base implementation only sets a flag, to ensure shutting down is
+ # not reentrant. Unfortunately, this flag is not meaningful to the
+ # gobject event loop. We therefore call wakeUp() to ensure the event
+ # loop will call back into Twisted once this iteration is done. This
+ # will result in self.runUntilCurrent() being called, where the stop
+ # flag will trigger the actual shutdown process, eventually calling
+ # crash() which will do the actual gobject event loop shutdown.
+ self.wakeUp()
+
+ def run(self, installSignalHandlers=True):
+ """
+ Run the reactor.
+ """
+ with _signalGlue():
+ self.callWhenRunning(self._reschedule)
+ self.startRunning(installSignalHandlers=installSignalHandlers)
+ if self._started:
+ self._run()
+
+ def callLater(self, *args, **kwargs):
+ """
+ Schedule a C{DelayedCall}.
+ """
+ result = posixbase.PosixReactorBase.callLater(self, *args, **kwargs)
+ # Make sure we'll get woken up at correct time to handle this new
+ # scheduled call:
+ self._reschedule()
+ return result
+
+ def _reschedule(self):
+ """
+ Schedule a glib timeout for C{_simulate}.
+ """
+ if self._simtag is not None:
+ self._source_remove(self._simtag)
+ self._simtag = None
+ timeout = self.timeout()
+ if timeout is not None:
+ self._simtag = self._timeout_add(
+ int(timeout * 1000),
+ self._simulate,
+ priority=self._glib.PRIORITY_DEFAULT_IDLE,
+ )
+
+ def _simulate(self):
+ """
+ Run timers, and then reschedule glib timeout for next scheduled event.
+ """
+ self.runUntilCurrent()
+ self._reschedule()
diff --git a/contrib/python/Twisted/py3/twisted/internet/_idna.py b/contrib/python/Twisted/py3/twisted/internet/_idna.py
new file mode 100644
index 0000000000..852d8a6be8
--- /dev/null
+++ b/contrib/python/Twisted/py3/twisted/internet/_idna.py
@@ -0,0 +1,51 @@
+# -*- test-case-name: twisted.test.test_sslverify -*-
+# Copyright (c) Twisted Matrix Laboratories.
+# See LICENSE for details.
+
+"""
+Shared interface to IDNA encoding and decoding, using the C{idna} PyPI package
+if available, otherwise the stdlib implementation.
+"""
+
+
+def _idnaBytes(text: str) -> bytes:
+ """
+ Convert some text typed by a human into some ASCII bytes.
+
+ This is provided to allow us to use the U{partially-broken IDNA
+ implementation in the standard library <http://bugs.python.org/issue17305>}
+ if the more-correct U{idna <https://pypi.python.org/pypi/idna>} package is
+ not available; C{service_identity} is somewhat stricter about this.
+
+ @param text: A domain name, hopefully.
+ @type text: L{unicode}
+
+ @return: The domain name's IDNA representation, encoded as bytes.
+ @rtype: L{bytes}
+ """
+ try:
+ import idna
+ except ImportError:
+ return text.encode("idna")
+ else:
+ return idna.encode(text)
+
+
+def _idnaText(octets: bytes) -> str:
+ """
+ Convert some IDNA-encoded octets into some human-readable text.
+
+ Currently only used by the tests.
+
+ @param octets: Some bytes representing a hostname.
+ @type octets: L{bytes}
+
+ @return: A human-readable domain name.
+ @rtype: L{unicode}
+ """
+ try:
+ import idna
+ except ImportError:
+ return octets.decode("idna")
+ else:
+ return idna.decode(octets)
diff --git a/contrib/python/Twisted/py3/twisted/internet/_newtls.py b/contrib/python/Twisted/py3/twisted/internet/_newtls.py
new file mode 100644
index 0000000000..5c27f31eb9
--- /dev/null
+++ b/contrib/python/Twisted/py3/twisted/internet/_newtls.py
@@ -0,0 +1,256 @@
+# -*- test-case-name: twisted.test.test_ssl -*-
+# Copyright (c) Twisted Matrix Laboratories.
+# See LICENSE for details.
+
+"""
+This module implements memory BIO based TLS support. It is the preferred
+implementation and will be used whenever pyOpenSSL 0.10 or newer is installed
+(whenever L{twisted.protocols.tls} is importable).
+
+@since: 11.1
+"""
+
+
+from zope.interface import directlyProvides
+
+from twisted.internet.abstract import FileDescriptor
+from twisted.internet.interfaces import ISSLTransport
+from twisted.protocols.tls import TLSMemoryBIOFactory
+
+
+class _BypassTLS:
+ """
+ L{_BypassTLS} is used as the transport object for the TLS protocol object
+ used to implement C{startTLS}. Its methods skip any TLS logic which
+ C{startTLS} enables.
+
+ @ivar _base: A transport class L{_BypassTLS} has been mixed in with to which
+ methods will be forwarded. This class is only responsible for sending
+ bytes over the connection, not doing TLS.
+
+ @ivar _connection: A L{Connection} which TLS has been started on which will
+ be proxied to by this object. Any method which has its behavior
+ altered after C{startTLS} will be skipped in favor of the base class's
+ implementation. This allows the TLS protocol object to have direct
+ access to the transport, necessary to actually implement TLS.
+ """
+
+ def __init__(self, base, connection):
+ self._base = base
+ self._connection = connection
+
+ def __getattr__(self, name):
+ """
+ Forward any extra attribute access to the original transport object.
+ For example, this exposes C{getHost}, the behavior of which does not
+ change after TLS is enabled.
+ """
+ return getattr(self._connection, name)
+
+ def write(self, data):
+ """
+ Write some bytes directly to the connection.
+ """
+ return self._base.write(self._connection, data)
+
+ def writeSequence(self, iovec):
+ """
+ Write a some bytes directly to the connection.
+ """
+ return self._base.writeSequence(self._connection, iovec)
+
+ def loseConnection(self, *args, **kwargs):
+ """
+ Close the underlying connection.
+ """
+ return self._base.loseConnection(self._connection, *args, **kwargs)
+
+ def registerProducer(self, producer, streaming):
+ """
+ Register a producer with the underlying connection.
+ """
+ return self._base.registerProducer(self._connection, producer, streaming)
+
+ def unregisterProducer(self):
+ """
+ Unregister a producer with the underlying connection.
+ """
+ return self._base.unregisterProducer(self._connection)
+
+
+def startTLS(transport, contextFactory, normal, bypass):
+ """
+ Add a layer of SSL to a transport.
+
+ @param transport: The transport which will be modified. This can either by
+ a L{FileDescriptor<twisted.internet.abstract.FileDescriptor>} or a
+ L{FileHandle<twisted.internet.iocpreactor.abstract.FileHandle>}. The
+ actual requirements of this instance are that it have:
+
+ - a C{_tlsClientDefault} attribute indicating whether the transport is
+ a client (C{True}) or a server (C{False})
+ - a settable C{TLS} attribute which can be used to mark the fact
+ that SSL has been started
+ - settable C{getHandle} and C{getPeerCertificate} attributes so
+ these L{ISSLTransport} methods can be added to it
+ - a C{protocol} attribute referring to the L{IProtocol} currently
+ connected to the transport, which can also be set to a new
+ L{IProtocol} for the transport to deliver data to
+
+ @param contextFactory: An SSL context factory defining SSL parameters for
+ the new SSL layer.
+ @type contextFactory: L{twisted.internet.interfaces.IOpenSSLContextFactory}
+
+ @param normal: A flag indicating whether SSL will go in the same direction
+ as the underlying transport goes. That is, if the SSL client will be
+ the underlying client and the SSL server will be the underlying server.
+ C{True} means it is the same, C{False} means they are switched.
+ @type normal: L{bool}
+
+ @param bypass: A transport base class to call methods on to bypass the new
+ SSL layer (so that the SSL layer itself can send its bytes).
+ @type bypass: L{type}
+ """
+ # Figure out which direction the SSL goes in. If normal is True,
+ # we'll go in the direction indicated by the subclass. Otherwise,
+ # we'll go the other way (client = not normal ^ _tlsClientDefault,
+ # in other words).
+ if normal:
+ client = transport._tlsClientDefault
+ else:
+ client = not transport._tlsClientDefault
+
+ # If we have a producer, unregister it, and then re-register it below once
+ # we've switched to TLS mode, so it gets hooked up correctly:
+ producer, streaming = None, None
+ if transport.producer is not None:
+ producer, streaming = transport.producer, transport.streamingProducer
+ transport.unregisterProducer()
+
+ tlsFactory = TLSMemoryBIOFactory(contextFactory, client, None)
+ tlsProtocol = tlsFactory.protocol(tlsFactory, transport.protocol, False)
+ # Hook up the new TLS protocol to the transport:
+ transport.protocol = tlsProtocol
+
+ transport.getHandle = tlsProtocol.getHandle
+ transport.getPeerCertificate = tlsProtocol.getPeerCertificate
+
+ # Mark the transport as secure.
+ directlyProvides(transport, ISSLTransport)
+
+ # Remember we did this so that write and writeSequence can send the
+ # data to the right place.
+ transport.TLS = True
+
+ # Hook it up
+ transport.protocol.makeConnection(_BypassTLS(bypass, transport))
+
+ # Restore producer if necessary:
+ if producer:
+ transport.registerProducer(producer, streaming)
+
+
+class ConnectionMixin:
+ """
+ A mixin for L{twisted.internet.abstract.FileDescriptor} which adds an
+ L{ITLSTransport} implementation.
+
+ @ivar TLS: A flag indicating whether TLS is currently in use on this
+ transport. This is not a good way for applications to check for TLS,
+ instead use L{twisted.internet.interfaces.ISSLTransport}.
+ """
+
+ TLS = False
+
+ def startTLS(self, ctx, normal=True):
+ """
+ @see: L{ITLSTransport.startTLS}
+ """
+ startTLS(self, ctx, normal, FileDescriptor)
+
+ def write(self, bytes):
+ """
+ Write some bytes to this connection, passing them through a TLS layer if
+ necessary, or discarding them if the connection has already been lost.
+ """
+ if self.TLS:
+ if self.connected:
+ self.protocol.write(bytes)
+ else:
+ FileDescriptor.write(self, bytes)
+
+ def writeSequence(self, iovec):
+ """
+ Write some bytes to this connection, scatter/gather-style, passing them
+ through a TLS layer if necessary, or discarding them if the connection
+ has already been lost.
+ """
+ if self.TLS:
+ if self.connected:
+ self.protocol.writeSequence(iovec)
+ else:
+ FileDescriptor.writeSequence(self, iovec)
+
+ def loseConnection(self):
+ """
+ Close this connection after writing all pending data.
+
+ If TLS has been negotiated, perform a TLS shutdown.
+ """
+ if self.TLS:
+ if self.connected and not self.disconnecting:
+ self.protocol.loseConnection()
+ else:
+ FileDescriptor.loseConnection(self)
+
+ def registerProducer(self, producer, streaming):
+ """
+ Register a producer.
+
+ If TLS is enabled, the TLS connection handles this.
+ """
+ if self.TLS:
+ # Registering a producer before we're connected shouldn't be a
+ # problem. If we end up with a write(), that's already handled in
+ # the write() code above, and there are no other potential
+ # side-effects.
+ self.protocol.registerProducer(producer, streaming)
+ else:
+ FileDescriptor.registerProducer(self, producer, streaming)
+
+ def unregisterProducer(self):
+ """
+ Unregister a producer.
+
+ If TLS is enabled, the TLS connection handles this.
+ """
+ if self.TLS:
+ self.protocol.unregisterProducer()
+ else:
+ FileDescriptor.unregisterProducer(self)
+
+
+class ClientMixin:
+ """
+ A mixin for L{twisted.internet.tcp.Client} which just marks it as a client
+ for the purposes of the default TLS handshake.
+
+ @ivar _tlsClientDefault: Always C{True}, indicating that this is a client
+ connection, and by default when TLS is negotiated this class will act as
+ a TLS client.
+ """
+
+ _tlsClientDefault = True
+
+
+class ServerMixin:
+ """
+ A mixin for L{twisted.internet.tcp.Server} which just marks it as a server
+ for the purposes of the default TLS handshake.
+
+ @ivar _tlsClientDefault: Always C{False}, indicating that this is a server
+ connection, and by default when TLS is negotiated this class will act as
+ a TLS server.
+ """
+
+ _tlsClientDefault = False
diff --git a/contrib/python/Twisted/py3/twisted/internet/_pollingfile.py b/contrib/python/Twisted/py3/twisted/internet/_pollingfile.py
new file mode 100644
index 0000000000..758a4cecb7
--- /dev/null
+++ b/contrib/python/Twisted/py3/twisted/internet/_pollingfile.py
@@ -0,0 +1,291 @@
+# -*- test-case-name: twisted.internet.test.test_pollingfile -*-
+# Copyright (c) Twisted Matrix Laboratories.
+# See LICENSE for details.
+
+"""
+Implements a simple polling interface for file descriptors that don't work with
+select() - this is pretty much only useful on Windows.
+"""
+
+
+from zope.interface import implementer
+
+from twisted.internet.interfaces import IConsumer, IPushProducer
+
+MIN_TIMEOUT = 0.000000001
+MAX_TIMEOUT = 0.1
+
+
+class _PollableResource:
+ active = True
+
+ def activate(self):
+ self.active = True
+
+ def deactivate(self):
+ self.active = False
+
+
+class _PollingTimer:
+ # Everything is private here because it is really an implementation detail.
+
+ def __init__(self, reactor):
+ self.reactor = reactor
+ self._resources = []
+ self._pollTimer = None
+ self._currentTimeout = MAX_TIMEOUT
+ self._paused = False
+
+ def _addPollableResource(self, res):
+ self._resources.append(res)
+ self._checkPollingState()
+
+ def _checkPollingState(self):
+ for resource in self._resources:
+ if resource.active:
+ self._startPolling()
+ break
+ else:
+ self._stopPolling()
+
+ def _startPolling(self):
+ if self._pollTimer is None:
+ self._pollTimer = self._reschedule()
+
+ def _stopPolling(self):
+ if self._pollTimer is not None:
+ self._pollTimer.cancel()
+ self._pollTimer = None
+
+ def _pause(self):
+ self._paused = True
+
+ def _unpause(self):
+ self._paused = False
+ self._checkPollingState()
+
+ def _reschedule(self):
+ if not self._paused:
+ return self.reactor.callLater(self._currentTimeout, self._pollEvent)
+
+ def _pollEvent(self):
+ workUnits = 0.0
+ anyActive = []
+ for resource in self._resources:
+ if resource.active:
+ workUnits += resource.checkWork()
+ # Check AFTER work has been done
+ if resource.active:
+ anyActive.append(resource)
+
+ newTimeout = self._currentTimeout
+ if workUnits:
+ newTimeout = self._currentTimeout / (workUnits + 1.0)
+ if newTimeout < MIN_TIMEOUT:
+ newTimeout = MIN_TIMEOUT
+ else:
+ newTimeout = self._currentTimeout * 2.0
+ if newTimeout > MAX_TIMEOUT:
+ newTimeout = MAX_TIMEOUT
+ self._currentTimeout = newTimeout
+ if anyActive:
+ self._pollTimer = self._reschedule()
+
+
+# If we ever (let's hope not) need the above functionality on UNIX, this could
+# be factored into a different module.
+
+import pywintypes # type: ignore[import]
+import win32api # type: ignore[import]
+import win32file # type: ignore[import]
+import win32pipe # type: ignore[import]
+
+
+@implementer(IPushProducer)
+class _PollableReadPipe(_PollableResource):
+ def __init__(self, pipe, receivedCallback, lostCallback):
+ # security attributes for pipes
+ self.pipe = pipe
+ self.receivedCallback = receivedCallback
+ self.lostCallback = lostCallback
+
+ def checkWork(self):
+ finished = 0
+ fullDataRead = []
+
+ while 1:
+ try:
+ buffer, bytesToRead, result = win32pipe.PeekNamedPipe(self.pipe, 1)
+ # finished = (result == -1)
+ if not bytesToRead:
+ break
+ hr, data = win32file.ReadFile(self.pipe, bytesToRead, None)
+ fullDataRead.append(data)
+ except win32api.error:
+ finished = 1
+ break
+
+ dataBuf = b"".join(fullDataRead)
+ if dataBuf:
+ self.receivedCallback(dataBuf)
+ if finished:
+ self.cleanup()
+ return len(dataBuf)
+
+ def cleanup(self):
+ self.deactivate()
+ self.lostCallback()
+
+ def close(self):
+ try:
+ win32api.CloseHandle(self.pipe)
+ except pywintypes.error:
+ # You can't close std handles...?
+ pass
+
+ def stopProducing(self):
+ self.close()
+
+ def pauseProducing(self):
+ self.deactivate()
+
+ def resumeProducing(self):
+ self.activate()
+
+
+FULL_BUFFER_SIZE = 64 * 1024
+
+
+@implementer(IConsumer)
+class _PollableWritePipe(_PollableResource):
+ def __init__(self, writePipe, lostCallback):
+ self.disconnecting = False
+ self.producer = None
+ self.producerPaused = False
+ self.streamingProducer = 0
+ self.outQueue = []
+ self.writePipe = writePipe
+ self.lostCallback = lostCallback
+ try:
+ win32pipe.SetNamedPipeHandleState(
+ writePipe, win32pipe.PIPE_NOWAIT, None, None
+ )
+ except pywintypes.error:
+ # Maybe it's an invalid handle. Who knows.
+ pass
+
+ def close(self):
+ self.disconnecting = True
+
+ def bufferFull(self):
+ if self.producer is not None:
+ self.producerPaused = True
+ self.producer.pauseProducing()
+
+ def bufferEmpty(self):
+ if self.producer is not None and (
+ (not self.streamingProducer) or self.producerPaused
+ ):
+ self.producer.producerPaused = False
+ self.producer.resumeProducing()
+ return True
+ return False
+
+ # almost-but-not-quite-exact copy-paste from abstract.FileDescriptor... ugh
+
+ def registerProducer(self, producer, streaming):
+ """Register to receive data from a producer.
+
+ This sets this selectable to be a consumer for a producer. When this
+ selectable runs out of data on a write() call, it will ask the producer
+ to resumeProducing(). A producer should implement the IProducer
+ interface.
+
+ FileDescriptor provides some infrastructure for producer methods.
+ """
+ if self.producer is not None:
+ raise RuntimeError(
+ "Cannot register producer %s, because producer %s was never "
+ "unregistered." % (producer, self.producer)
+ )
+ if not self.active:
+ producer.stopProducing()
+ else:
+ self.producer = producer
+ self.streamingProducer = streaming
+ if not streaming:
+ producer.resumeProducing()
+
+ def unregisterProducer(self):
+ """Stop consuming data from a producer, without disconnecting."""
+ self.producer = None
+
+ def writeConnectionLost(self):
+ self.deactivate()
+ try:
+ win32api.CloseHandle(self.writePipe)
+ except pywintypes.error:
+ # OMG what
+ pass
+ self.lostCallback()
+
+ def writeSequence(self, seq):
+ """
+ Append a C{list} or C{tuple} of bytes to the output buffer.
+
+ @param seq: C{list} or C{tuple} of C{str} instances to be appended to
+ the output buffer.
+
+ @raise TypeError: If C{seq} contains C{unicode}.
+ """
+ if str in map(type, seq):
+ raise TypeError("Unicode not allowed in output buffer.")
+ self.outQueue.extend(seq)
+
+ def write(self, data):
+ """
+ Append some bytes to the output buffer.
+
+ @param data: C{str} to be appended to the output buffer.
+ @type data: C{str}.
+
+ @raise TypeError: If C{data} is C{unicode} instead of C{str}.
+ """
+ if isinstance(data, str):
+ raise TypeError("Unicode not allowed in output buffer.")
+ if self.disconnecting:
+ return
+ self.outQueue.append(data)
+ if sum(map(len, self.outQueue)) > FULL_BUFFER_SIZE:
+ self.bufferFull()
+
+ def checkWork(self):
+ numBytesWritten = 0
+ if not self.outQueue:
+ if self.disconnecting:
+ self.writeConnectionLost()
+ return 0
+ try:
+ win32file.WriteFile(self.writePipe, b"", None)
+ except pywintypes.error:
+ self.writeConnectionLost()
+ return numBytesWritten
+ while self.outQueue:
+ data = self.outQueue.pop(0)
+ errCode = 0
+ try:
+ errCode, nBytesWritten = win32file.WriteFile(self.writePipe, data, None)
+ except win32api.error:
+ self.writeConnectionLost()
+ break
+ else:
+ # assert not errCode, "wtf an error code???"
+ numBytesWritten += nBytesWritten
+ if len(data) > nBytesWritten:
+ self.outQueue.insert(0, data[nBytesWritten:])
+ break
+ else:
+ resumed = self.bufferEmpty()
+ if not resumed and self.disconnecting:
+ self.writeConnectionLost()
+ return numBytesWritten
diff --git a/contrib/python/Twisted/py3/twisted/internet/_posixserialport.py b/contrib/python/Twisted/py3/twisted/internet/_posixserialport.py
new file mode 100644
index 0000000000..636aefa170
--- /dev/null
+++ b/contrib/python/Twisted/py3/twisted/internet/_posixserialport.py
@@ -0,0 +1,81 @@
+# Copyright (c) Twisted Matrix Laboratories.
+# See LICENSE for details.
+
+
+"""
+Serial Port Protocol
+"""
+
+
+# dependent on pyserial ( http://pyserial.sf.net/ )
+# only tested w/ 1.18 (5 Dec 2002)
+from serial import PARITY_NONE # type: ignore[import]
+from serial import EIGHTBITS, STOPBITS_ONE
+
+from twisted.internet import abstract, fdesc
+from twisted.internet.serialport import BaseSerialPort
+
+
+class SerialPort(BaseSerialPort, abstract.FileDescriptor):
+ """
+ A select()able serial device, acting as a transport.
+ """
+
+ connected = 1
+
+ def __init__(
+ self,
+ protocol,
+ deviceNameOrPortNumber,
+ reactor,
+ baudrate=9600,
+ bytesize=EIGHTBITS,
+ parity=PARITY_NONE,
+ stopbits=STOPBITS_ONE,
+ timeout=0,
+ xonxoff=0,
+ rtscts=0,
+ ):
+ abstract.FileDescriptor.__init__(self, reactor)
+ self._serial = self._serialFactory(
+ deviceNameOrPortNumber,
+ baudrate=baudrate,
+ bytesize=bytesize,
+ parity=parity,
+ stopbits=stopbits,
+ timeout=timeout,
+ xonxoff=xonxoff,
+ rtscts=rtscts,
+ )
+ self.reactor = reactor
+ self.flushInput()
+ self.flushOutput()
+ self.protocol = protocol
+ self.protocol.makeConnection(self)
+ self.startReading()
+
+ def fileno(self):
+ return self._serial.fd
+
+ def writeSomeData(self, data):
+ """
+ Write some data to the serial device.
+ """
+ return fdesc.writeToFD(self.fileno(), data)
+
+ def doRead(self):
+ """
+ Some data's readable from serial device.
+ """
+ return fdesc.readFromFD(self.fileno(), self.protocol.dataReceived)
+
+ def connectionLost(self, reason):
+ """
+ Called when the serial port disconnects.
+
+ Will call C{connectionLost} on the protocol that is handling the
+ serial data.
+ """
+ abstract.FileDescriptor.connectionLost(self, reason)
+ self._serial.close()
+ self.protocol.connectionLost(reason)
diff --git a/contrib/python/Twisted/py3/twisted/internet/_posixstdio.py b/contrib/python/Twisted/py3/twisted/internet/_posixstdio.py
new file mode 100644
index 0000000000..b7ef9cdac3
--- /dev/null
+++ b/contrib/python/Twisted/py3/twisted/internet/_posixstdio.py
@@ -0,0 +1,178 @@
+# -*- test-case-name: twisted.test.test_stdio -*-
+
+"""Standard input/out/err support.
+
+Future Plans::
+
+ support for stderr, perhaps
+ Rewrite to use the reactor instead of an ad-hoc mechanism for connecting
+ protocols to transport.
+
+Maintainer: James Y Knight
+"""
+
+from zope.interface import implementer
+
+from twisted.internet import error, interfaces, process
+from twisted.python import failure, log
+
+
+@implementer(interfaces.IAddress)
+class PipeAddress:
+ pass
+
+
+@implementer(
+ interfaces.ITransport,
+ interfaces.IProducer,
+ interfaces.IConsumer,
+ interfaces.IHalfCloseableDescriptor,
+)
+class StandardIO:
+ _reader = None
+ _writer = None
+ disconnected = False
+ disconnecting = False
+
+ def __init__(self, proto, stdin=0, stdout=1, reactor=None):
+ if reactor is None:
+ from twisted.internet import reactor
+ self.protocol = proto
+
+ self._writer = process.ProcessWriter(reactor, self, "write", stdout)
+ self._reader = process.ProcessReader(reactor, self, "read", stdin)
+ self._reader.startReading()
+ self.protocol.makeConnection(self)
+
+ # ITransport
+
+ # XXX Actually, see #3597.
+ def loseWriteConnection(self):
+ if self._writer is not None:
+ self._writer.loseConnection()
+
+ def write(self, data):
+ if self._writer is not None:
+ self._writer.write(data)
+
+ def writeSequence(self, data):
+ if self._writer is not None:
+ self._writer.writeSequence(data)
+
+ def loseConnection(self):
+ self.disconnecting = True
+
+ if self._writer is not None:
+ self._writer.loseConnection()
+ if self._reader is not None:
+ # Don't loseConnection, because we don't want to SIGPIPE it.
+ self._reader.stopReading()
+
+ def getPeer(self):
+ return PipeAddress()
+
+ def getHost(self):
+ return PipeAddress()
+
+ # Callbacks from process.ProcessReader/ProcessWriter
+ def childDataReceived(self, fd, data):
+ self.protocol.dataReceived(data)
+
+ def childConnectionLost(self, fd, reason):
+ if self.disconnected:
+ return
+
+ if reason.value.__class__ == error.ConnectionDone:
+ # Normal close
+ if fd == "read":
+ self._readConnectionLost(reason)
+ else:
+ self._writeConnectionLost(reason)
+ else:
+ self.connectionLost(reason)
+
+ def connectionLost(self, reason):
+ self.disconnected = True
+
+ # Make sure to cleanup the other half
+ _reader = self._reader
+ _writer = self._writer
+ protocol = self.protocol
+ self._reader = self._writer = None
+ self.protocol = None
+
+ if _writer is not None and not _writer.disconnected:
+ _writer.connectionLost(reason)
+
+ if _reader is not None and not _reader.disconnected:
+ _reader.connectionLost(reason)
+
+ try:
+ protocol.connectionLost(reason)
+ except BaseException:
+ log.err()
+
+ def _writeConnectionLost(self, reason):
+ self._writer = None
+ if self.disconnecting:
+ self.connectionLost(reason)
+ return
+
+ p = interfaces.IHalfCloseableProtocol(self.protocol, None)
+ if p:
+ try:
+ p.writeConnectionLost()
+ except BaseException:
+ log.err()
+ self.connectionLost(failure.Failure())
+
+ def _readConnectionLost(self, reason):
+ self._reader = None
+ p = interfaces.IHalfCloseableProtocol(self.protocol, None)
+ if p:
+ try:
+ p.readConnectionLost()
+ except BaseException:
+ log.err()
+ self.connectionLost(failure.Failure())
+ else:
+ self.connectionLost(reason)
+
+ # IConsumer
+ def registerProducer(self, producer, streaming):
+ if self._writer is None:
+ producer.stopProducing()
+ else:
+ self._writer.registerProducer(producer, streaming)
+
+ def unregisterProducer(self):
+ if self._writer is not None:
+ self._writer.unregisterProducer()
+
+ # IProducer
+ def stopProducing(self):
+ self.loseConnection()
+
+ def pauseProducing(self):
+ if self._reader is not None:
+ self._reader.pauseProducing()
+
+ def resumeProducing(self):
+ if self._reader is not None:
+ self._reader.resumeProducing()
+
+ def stopReading(self):
+ """Compatibility only, don't use. Call pauseProducing."""
+ self.pauseProducing()
+
+ def startReading(self):
+ """Compatibility only, don't use. Call resumeProducing."""
+ self.resumeProducing()
+
+ def readConnectionLost(self, reason):
+ # L{IHalfCloseableDescriptor.readConnectionLost}
+ raise NotImplementedError()
+
+ def writeConnectionLost(self, reason):
+ # L{IHalfCloseableDescriptor.writeConnectionLost}
+ raise NotImplementedError()
diff --git a/contrib/python/Twisted/py3/twisted/internet/_producer_helpers.py b/contrib/python/Twisted/py3/twisted/internet/_producer_helpers.py
new file mode 100644
index 0000000000..c2136e0509
--- /dev/null
+++ b/contrib/python/Twisted/py3/twisted/internet/_producer_helpers.py
@@ -0,0 +1,124 @@
+# -*- test-case-name: twisted.test.test_producer_helpers -*-
+# Copyright (c) Twisted Matrix Laboratories.
+# See LICENSE for details.
+
+"""
+Helpers for working with producers.
+"""
+
+from typing import List
+
+from zope.interface import implementer
+
+from twisted.internet.interfaces import IPushProducer
+from twisted.internet.task import cooperate
+from twisted.python import log
+from twisted.python.reflect import safe_str
+
+# This module exports nothing public, it's for internal Twisted use only.
+__all__: List[str] = []
+
+
+@implementer(IPushProducer)
+class _PullToPush:
+ """
+ An adapter that converts a non-streaming to a streaming producer.
+
+ Because of limitations of the producer API, this adapter requires the
+ cooperation of the consumer. When the consumer's C{registerProducer} is
+ called with a non-streaming producer, it must wrap it with L{_PullToPush}
+ and then call C{startStreaming} on the resulting object. When the
+ consumer's C{unregisterProducer} is called, it must call
+ C{stopStreaming} on the L{_PullToPush} instance.
+
+ If the underlying producer throws an exception from C{resumeProducing},
+ the producer will be unregistered from the consumer.
+
+ @ivar _producer: the underling non-streaming producer.
+
+ @ivar _consumer: the consumer with which the underlying producer was
+ registered.
+
+ @ivar _finished: C{bool} indicating whether the producer has finished.
+
+ @ivar _coopTask: the result of calling L{cooperate}, the task driving the
+ streaming producer.
+ """
+
+ _finished = False
+
+ def __init__(self, pullProducer, consumer):
+ self._producer = pullProducer
+ self._consumer = consumer
+
+ def _pull(self):
+ """
+ A generator that calls C{resumeProducing} on the underlying producer
+ forever.
+
+ If C{resumeProducing} throws an exception, the producer is
+ unregistered, which should result in streaming stopping.
+ """
+ while True:
+ try:
+ self._producer.resumeProducing()
+ except BaseException:
+ log.err(
+ None,
+ "%s failed, producing will be stopped:"
+ % (safe_str(self._producer),),
+ )
+ try:
+ self._consumer.unregisterProducer()
+ # The consumer should now call stopStreaming() on us,
+ # thus stopping the streaming.
+ except BaseException:
+ # Since the consumer blew up, we may not have had
+ # stopStreaming() called, so we just stop on our own:
+ log.err(
+ None,
+ "%s failed to unregister producer:"
+ % (safe_str(self._consumer),),
+ )
+ self._finished = True
+ return
+ yield None
+
+ def startStreaming(self):
+ """
+ This should be called by the consumer when the producer is registered.
+
+ Start streaming data to the consumer.
+ """
+ self._coopTask = cooperate(self._pull())
+
+ def stopStreaming(self):
+ """
+ This should be called by the consumer when the producer is
+ unregistered.
+
+ Stop streaming data to the consumer.
+ """
+ if self._finished:
+ return
+ self._finished = True
+ self._coopTask.stop()
+
+ def pauseProducing(self):
+ """
+ @see: C{IPushProducer.pauseProducing}
+ """
+ self._coopTask.pause()
+
+ def resumeProducing(self):
+ """
+ @see: C{IPushProducer.resumeProducing}
+ """
+ self._coopTask.resume()
+
+ def stopProducing(self):
+ """
+ @see: C{IPushProducer.stopProducing}
+ """
+ self.stopStreaming()
+ self._producer.stopProducing()
diff --git a/contrib/python/Twisted/py3/twisted/internet/_resolver.py b/contrib/python/Twisted/py3/twisted/internet/_resolver.py
new file mode 100644
index 0000000000..f4a56b4808
--- /dev/null
+++ b/contrib/python/Twisted/py3/twisted/internet/_resolver.py
@@ -0,0 +1,342 @@
+# -*- test-case-name: twisted.internet.test.test_resolver -*-
+# Copyright (c) Twisted Matrix Laboratories.
+# See LICENSE for details.
+
+"""
+IPv6-aware hostname resolution.
+
+@see: L{IHostnameResolver}
+"""
+
+
+from socket import (
+ AF_INET,
+ AF_INET6,
+ AF_UNSPEC,
+ SOCK_DGRAM,
+ SOCK_STREAM,
+ AddressFamily,
+ SocketKind,
+ gaierror,
+ getaddrinfo,
+)
+from typing import (
+ TYPE_CHECKING,
+ Callable,
+ List,
+ NoReturn,
+ Optional,
+ Sequence,
+ Tuple,
+ Type,
+ Union,
+)
+
+from zope.interface import implementer
+
+from twisted.internet._idna import _idnaBytes
+from twisted.internet.address import IPv4Address, IPv6Address
+from twisted.internet.defer import Deferred
+from twisted.internet.error import DNSLookupError
+from twisted.internet.interfaces import (
+ IAddress,
+ IHostnameResolver,
+ IHostResolution,
+ IReactorThreads,
+ IResolutionReceiver,
+ IResolverSimple,
+)
+from twisted.internet.threads import deferToThreadPool
+from twisted.logger import Logger
+from twisted.python.compat import nativeString
+
+if TYPE_CHECKING:
+ from twisted.python.threadpool import ThreadPool
+
+
+@implementer(IHostResolution)
+class HostResolution:
+ """
+ The in-progress resolution of a given hostname.
+ """
+
+ def __init__(self, name: str):
+ """
+ Create a L{HostResolution} with the given name.
+ """
+ self.name = name
+
+ def cancel(self) -> NoReturn:
+ # IHostResolution.cancel
+ raise NotImplementedError()
+
+
+_any = frozenset([IPv4Address, IPv6Address])
+
+_typesToAF = {
+ frozenset([IPv4Address]): AF_INET,
+ frozenset([IPv6Address]): AF_INET6,
+ _any: AF_UNSPEC,
+}
+
+_afToType = {
+ AF_INET: IPv4Address,
+ AF_INET6: IPv6Address,
+}
+
+_transportToSocket = {
+ "TCP": SOCK_STREAM,
+ "UDP": SOCK_DGRAM,
+}
+
+_socktypeToType = {
+ SOCK_STREAM: "TCP",
+ SOCK_DGRAM: "UDP",
+}
+
+
+_GETADDRINFO_RESULT = List[
+ Tuple[
+ AddressFamily,
+ SocketKind,
+ int,
+ str,
+ Union[Tuple[str, int], Tuple[str, int, int, int]],
+ ]
+]
+
+
+@implementer(IHostnameResolver)
+class GAIResolver:
+ """
+ L{IHostnameResolver} implementation that resolves hostnames by calling
+ L{getaddrinfo} in a thread.
+ """
+
+ def __init__(
+ self,
+ reactor: IReactorThreads,
+ getThreadPool: Optional[Callable[[], "ThreadPool"]] = None,
+ getaddrinfo: Callable[[str, int, int, int], _GETADDRINFO_RESULT] = getaddrinfo,
+ ):
+ """
+ Create a L{GAIResolver}.
+
+ @param reactor: the reactor to schedule result-delivery on
+ @type reactor: L{IReactorThreads}
+
+ @param getThreadPool: a function to retrieve the thread pool to use for
+ scheduling name resolutions. If not supplied, the use the given
+ C{reactor}'s thread pool.
+ @type getThreadPool: 0-argument callable returning a
+ L{twisted.python.threadpool.ThreadPool}
+
+ @param getaddrinfo: a reference to the L{getaddrinfo} to use - mainly
+ parameterized for testing.
+ @type getaddrinfo: callable with the same signature as L{getaddrinfo}
+ """
+ self._reactor = reactor
+ self._getThreadPool = (
+ reactor.getThreadPool if getThreadPool is None else getThreadPool
+ )
+ self._getaddrinfo = getaddrinfo
+
+ def resolveHostName(
+ self,
+ resolutionReceiver: IResolutionReceiver,
+ hostName: str,
+ portNumber: int = 0,
+ addressTypes: Optional[Sequence[Type[IAddress]]] = None,
+ transportSemantics: str = "TCP",
+ ) -> IHostResolution:
+ """
+ See L{IHostnameResolver.resolveHostName}
+
+ @param resolutionReceiver: see interface
+
+ @param hostName: see interface
+
+ @param portNumber: see interface
+
+ @param addressTypes: see interface
+
+ @param transportSemantics: see interface
+
+ @return: see interface
+ """
+ pool = self._getThreadPool()
+ addressFamily = _typesToAF[
+ _any if addressTypes is None else frozenset(addressTypes)
+ ]
+ socketType = _transportToSocket[transportSemantics]
+
+ def get() -> _GETADDRINFO_RESULT:
+ try:
+ return self._getaddrinfo(
+ hostName, portNumber, addressFamily, socketType
+ )
+ except gaierror:
+ return []
+
+ d = deferToThreadPool(self._reactor, pool, get)
+ resolution = HostResolution(hostName)
+ resolutionReceiver.resolutionBegan(resolution)
+
+ @d.addCallback
+ def deliverResults(result: _GETADDRINFO_RESULT) -> None:
+ for family, socktype, proto, cannoname, sockaddr in result:
+ addrType = _afToType[family]
+ resolutionReceiver.addressResolved(
+ addrType(_socktypeToType.get(socktype, "TCP"), *sockaddr)
+ )
+ resolutionReceiver.resolutionComplete()
+
+ return resolution
+
+
+@implementer(IHostnameResolver)
+class SimpleResolverComplexifier:
+ """
+ A converter from L{IResolverSimple} to L{IHostnameResolver}.
+ """
+
+ _log = Logger()
+
+ def __init__(self, simpleResolver: IResolverSimple):
+ """
+ Construct a L{SimpleResolverComplexifier} with an L{IResolverSimple}.
+ """
+ self._simpleResolver = simpleResolver
+
+ def resolveHostName(
+ self,
+ resolutionReceiver: IResolutionReceiver,
+ hostName: str,
+ portNumber: int = 0,
+ addressTypes: Optional[Sequence[Type[IAddress]]] = None,
+ transportSemantics: str = "TCP",
+ ) -> IHostResolution:
+ """
+ See L{IHostnameResolver.resolveHostName}
+
+ @param resolutionReceiver: see interface
+
+ @param hostName: see interface
+
+ @param portNumber: see interface
+
+ @param addressTypes: see interface
+
+ @param transportSemantics: see interface
+
+ @return: see interface
+ """
+ # If it's str, we need to make sure that it's just ASCII.
+ try:
+ hostName_bytes = hostName.encode("ascii")
+ except UnicodeEncodeError:
+ # If it's not just ASCII, IDNA it. We don't want to give a Unicode
+ # string with non-ASCII in it to Python 3, as if anyone passes that
+ # to a Python 3 stdlib function, it will probably use the wrong
+ # IDNA version and break absolutely everything
+ hostName_bytes = _idnaBytes(hostName)
+
+ # Make sure it's passed down as a native str, to maintain the interface
+ hostName = nativeString(hostName_bytes)
+
+ resolution = HostResolution(hostName)
+ resolutionReceiver.resolutionBegan(resolution)
+ (
+ self._simpleResolver.getHostByName(hostName)
+ .addCallback(
+ lambda address: resolutionReceiver.addressResolved(
+ IPv4Address("TCP", address, portNumber)
+ )
+ )
+ .addErrback(
+ lambda error: None
+ if error.check(DNSLookupError)
+ else self._log.failure(
+ "while looking up {name} with {resolver}",
+ error,
+ name=hostName,
+ resolver=self._simpleResolver,
+ )
+ )
+ .addCallback(lambda nothing: resolutionReceiver.resolutionComplete())
+ )
+ return resolution
+
+
+@implementer(IResolutionReceiver)
+class FirstOneWins:
+ """
+ An L{IResolutionReceiver} which fires a L{Deferred} with its first result.
+ """
+
+ def __init__(self, deferred: "Deferred[str]"):
+ """
+ @param deferred: The L{Deferred} to fire when the first resolution
+ result arrives.
+ """
+ self._deferred = deferred
+ self._resolved = False
+
+ def resolutionBegan(self, resolution: IHostResolution) -> None:
+ """
+ See L{IResolutionReceiver.resolutionBegan}
+
+ @param resolution: See L{IResolutionReceiver.resolutionBegan}
+ """
+ self._resolution = resolution
+
+ def addressResolved(self, address: IAddress) -> None:
+ """
+ See L{IResolutionReceiver.addressResolved}
+
+ @param address: See L{IResolutionReceiver.addressResolved}
+ """
+ if self._resolved:
+ return
+ self._resolved = True
+ # This is used by ComplexResolverSimplifier which specifies only results
+ # of IPv4Address.
+ assert isinstance(address, IPv4Address)
+ self._deferred.callback(address.host)
+
+ def resolutionComplete(self) -> None:
+ """
+ See L{IResolutionReceiver.resolutionComplete}
+ """
+ if self._resolved:
+ return
+ self._deferred.errback(DNSLookupError(self._resolution.name))
+
+
+@implementer(IResolverSimple)
+class ComplexResolverSimplifier:
+ """
+ A converter from L{IHostnameResolver} to L{IResolverSimple}
+ """
+
+ def __init__(self, nameResolver: IHostnameResolver):
+ """
+ Create a L{ComplexResolverSimplifier} with an L{IHostnameResolver}.
+
+ @param nameResolver: The L{IHostnameResolver} to use.
+ """
+ self._nameResolver = nameResolver
+
+ def getHostByName(self, name: str, timeouts: Sequence[int] = ()) -> "Deferred[str]":
+ """
+ See L{IResolverSimple.getHostByName}
+
+ @param name: see L{IResolverSimple.getHostByName}
+
+ @param timeouts: see L{IResolverSimple.getHostByName}
+
+ @return: see L{IResolverSimple.getHostByName}
+ """
+ result: "Deferred[str]" = Deferred()
+ self._nameResolver.resolveHostName(FirstOneWins(result), name, 0, [IPv4Address])
+ return result
diff --git a/contrib/python/Twisted/py3/twisted/internet/_signals.py b/contrib/python/Twisted/py3/twisted/internet/_signals.py
new file mode 100644
index 0000000000..fa878f6cba
--- /dev/null
+++ b/contrib/python/Twisted/py3/twisted/internet/_signals.py
@@ -0,0 +1,445 @@
+# -*- test-case-name: twisted.internet.test.test_sigchld -*-
+# Copyright (c) Twisted Matrix Laboratories.
+# See LICENSE for details.
+
+"""
+This module is used to integrate child process termination into a
+reactor event loop. This is a challenging feature to provide because
+most platforms indicate process termination via SIGCHLD and do not
+provide a way to wait for that signal and arbitrary I/O events at the
+same time. The naive implementation involves installing a Python
+SIGCHLD handler; unfortunately this leads to other syscalls being
+interrupted (whenever SIGCHLD is received) and failing with EINTR
+(which almost no one is prepared to handle). This interruption can be
+disabled via siginterrupt(2) (or one of the equivalent mechanisms);
+however, if the SIGCHLD is delivered by the platform to a non-main
+thread (not a common occurrence, but difficult to prove impossible),
+the main thread (waiting on select() or another event notification
+API) may not wake up leading to an arbitrary delay before the child
+termination is noticed.
+
+The basic solution to all these issues involves enabling SA_RESTART (ie,
+disabling system call interruption) and registering a C signal handler which
+writes a byte to a pipe. The other end of the pipe is registered with the
+event loop, allowing it to wake up shortly after SIGCHLD is received. See
+L{_SIGCHLDWaker} for the implementation of the event loop side of this
+solution. The use of a pipe this way is known as the U{self-pipe
+trick<http://cr.yp.to/docs/selfpipe.html>}.
+
+From Python version 2.6, C{signal.siginterrupt} and C{signal.set_wakeup_fd}
+provide the necessary C signal handler which writes to the pipe to be
+registered with C{SA_RESTART}.
+"""
+
+from __future__ import annotations
+
+import contextlib
+import errno
+import os
+import signal
+import socket
+from types import FrameType
+from typing import Callable, Optional, Sequence
+
+from zope.interface import Attribute, Interface, implementer
+
+from attrs import define, frozen
+from typing_extensions import Protocol, TypeAlias
+
+from twisted.internet.interfaces import IReadDescriptor
+from twisted.python import failure, log, util
+from twisted.python.runtime import platformType
+
+if platformType == "posix":
+ from . import fdesc, process
+
+SignalHandler: TypeAlias = Callable[[int, Optional[FrameType]], None]
+
+
+def installHandler(fd: int) -> int:
+ """
+ Install a signal handler which will write a byte to C{fd} when
+ I{SIGCHLD} is received.
+
+ This is implemented by installing a SIGCHLD handler that does nothing,
+ setting the I{SIGCHLD} handler as not allowed to interrupt system calls,
+ and using L{signal.set_wakeup_fd} to do the actual writing.
+
+ @param fd: The file descriptor to which to write when I{SIGCHLD} is
+ received.
+
+ @return: The file descriptor previously configured for this use.
+ """
+ if fd == -1:
+ signal.signal(signal.SIGCHLD, signal.SIG_DFL)
+ else:
+
+ def noopSignalHandler(*args):
+ pass
+
+ signal.signal(signal.SIGCHLD, noopSignalHandler)
+ signal.siginterrupt(signal.SIGCHLD, False)
+ return signal.set_wakeup_fd(fd)
+
+
+def isDefaultHandler():
+ """
+ Determine whether the I{SIGCHLD} handler is the default or not.
+ """
+ return signal.getsignal(signal.SIGCHLD) == signal.SIG_DFL
+
+
+class SignalHandling(Protocol):
+ """
+ The L{SignalHandling} protocol enables customizable signal-handling
+ behaviors for reactors.
+
+ A value that conforms to L{SignalHandling} has install and uninstall hooks
+ that are called by a reactor at the correct times to have the (typically)
+ process-global effects necessary for dealing with signals.
+ """
+
+ def install(self) -> None:
+ """
+ Install the signal handlers.
+ """
+
+ def uninstall(self) -> None:
+ """
+ Restore signal handlers to their original state.
+ """
+
+
+@frozen
+class _WithoutSignalHandling:
+ """
+ A L{SignalHandling} implementation that does no signal handling.
+
+ This is the implementation of C{installSignalHandlers=False}.
+ """
+
+ def install(self) -> None:
+ """
+ Do not install any signal handlers.
+ """
+
+ def uninstall(self) -> None:
+ """
+ Do nothing because L{install} installed nothing.
+ """
+
+
+@frozen
+class _WithSignalHandling:
+ """
+ A reactor core helper that can manage signals: it installs signal handlers
+ at start time.
+ """
+
+ _sigInt: SignalHandler
+ _sigBreak: SignalHandler
+ _sigTerm: SignalHandler
+
+ def install(self) -> None:
+ """
+ Install the signal handlers for the Twisted event loop.
+ """
+ if signal.getsignal(signal.SIGINT) == signal.default_int_handler:
+ # only handle if there isn't already a handler, e.g. for Pdb.
+ signal.signal(signal.SIGINT, self._sigInt)
+ signal.signal(signal.SIGTERM, self._sigTerm)
+
+ # Catch Ctrl-Break in windows
+ SIGBREAK = getattr(signal, "SIGBREAK", None)
+ if SIGBREAK is not None:
+ signal.signal(SIGBREAK, self._sigBreak)
+
+ def uninstall(self) -> None:
+ """
+ At the moment, do nothing (for historical reasons).
+ """
+ # This should really do something.
+ # https://github.com/twisted/twisted/issues/11761
+
+
+@define
+class _MultiSignalHandling:
+ """
+ An implementation of L{SignalHandling} which propagates protocol
+ method calls to a number of other implementations.
+
+ This supports composition of multiple signal handling implementations into
+ a single object so the reactor doesn't have to be concerned with how those
+ implementations are factored.
+
+ @ivar _signalHandlings: The other C{SignalHandling} implementations to
+ which to propagate calls.
+
+ @ivar _installed: If L{install} has been called but L{uninstall} has not.
+ This is used to avoid double cleanup which otherwise results (at least
+ during test suite runs) because twisted.internet.reactormixins doesn't
+ keep track of whether a reactor has run or not but always invokes its
+ cleanup logic.
+ """
+
+ _signalHandlings: Sequence[SignalHandling]
+ _installed: bool = False
+
+ def install(self) -> None:
+ for d in self._signalHandlings:
+ d.install()
+ self._installed = True
+
+ def uninstall(self) -> None:
+ if self._installed:
+ for d in self._signalHandlings:
+ d.uninstall()
+ self._installed = False
+
+
+@define
+class _ChildSignalHandling:
+ """
+ Signal handling behavior which supports I{SIGCHLD} for notification about
+ changes to child process state.
+
+ @ivar _childWaker: L{None} or a reference to the L{_SIGCHLDWaker} which is
+ used to properly notice child process termination. This is L{None}
+ when this handling behavior is not installed and non-C{None}
+ otherwise. This is mostly an unfortunate implementation detail due to
+ L{_SIGCHLDWaker} allocating file descriptors as a side-effect of its
+ initializer.
+ """
+
+ _addInternalReader: Callable[[IReadDescriptor], object]
+ _removeInternalReader: Callable[[IReadDescriptor], object]
+ _childWaker: Optional[_SIGCHLDWaker] = None
+
+ def install(self) -> None:
+ """
+ Extend the basic signal handling logic to also support handling
+ SIGCHLD to know when to try to reap child processes.
+ """
+ # This conditional should probably not be necessary.
+ # https://github.com/twisted/twisted/issues/11763
+ if self._childWaker is None:
+ self._childWaker = _SIGCHLDWaker()
+ self._addInternalReader(self._childWaker)
+ self._childWaker.install()
+
+ # Also reap all processes right now, in case we missed any
+ # signals before we installed the SIGCHLD waker/handler.
+ # This should only happen if someone used spawnProcess
+ # before calling reactor.run (and the process also exited
+ # already).
+ process.reapAllProcesses()
+
+ def uninstall(self) -> None:
+ """
+ If a child waker was created and installed, uninstall it now.
+
+ Since this disables reactor functionality and is only called when the
+ reactor is stopping, it doesn't provide any directly useful
+ functionality, but the cleanup of reactor-related process-global state
+ that it does helps in unit tests involving multiple reactors and is
+ generally just a nice thing.
+ """
+ assert self._childWaker is not None
+
+ # XXX This would probably be an alright place to put all of the
+ # cleanup code for all internal readers (here and in the base class,
+ # anyway). See #3063 for that cleanup task.
+ self._removeInternalReader(self._childWaker)
+ self._childWaker.uninstall()
+ self._childWaker.connectionLost(failure.Failure(Exception("uninstalled")))
+
+ # We just spoiled the current _childWaker so throw it away. We can
+ # make a new one later if need be.
+ self._childWaker = None
+
+
+class _IWaker(Interface):
+ """
+ Interface to wake up the event loop based on the self-pipe trick.
+
+ The U{I{self-pipe trick}<http://cr.yp.to/docs/selfpipe.html>}, used to wake
+ up the main loop from another thread or a signal handler.
+ This is why we have wakeUp together with doRead
+
+ This is used by threads or signals to wake up the event loop.
+ """
+
+ disconnected = Attribute("")
+
+ def wakeUp():
+ """
+ Called when the event should be wake up.
+ """
+
+ def doRead():
+ """
+ Read some data from my connection and discard it.
+ """
+
+ def connectionLost(reason: failure.Failure) -> None:
+ """
+ Called when connection was closed and the pipes.
+ """
+
+
+@implementer(_IWaker)
+class _SocketWaker(log.Logger):
+ """
+ The I{self-pipe trick<http://cr.yp.to/docs/selfpipe.html>}, implemented
+ using a pair of sockets rather than pipes (due to the lack of support in
+ select() on Windows for pipes), used to wake up the main loop from
+ another thread.
+ """
+
+ disconnected = 0
+
+ def __init__(self) -> None:
+ """Initialize."""
+ # Following select_trigger (from asyncore)'s example;
+ client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ client.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
+ with contextlib.closing(
+ socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ ) as server:
+ server.bind(("127.0.0.1", 0))
+ server.listen(1)
+ client.connect(server.getsockname())
+ reader, clientaddr = server.accept()
+ client.setblocking(False)
+ reader.setblocking(False)
+ self.r = reader
+ self.w = client
+ self.fileno = self.r.fileno
+
+ def wakeUp(self):
+ """Send a byte to my connection."""
+ try:
+ util.untilConcludes(self.w.send, b"x")
+ except OSError as e:
+ if e.args[0] != errno.WSAEWOULDBLOCK:
+ raise
+
+ def doRead(self):
+ """
+ Read some data from my connection.
+ """
+ try:
+ self.r.recv(8192)
+ except OSError:
+ pass
+
+ def connectionLost(self, reason):
+ self.r.close()
+ self.w.close()
+
+
+@implementer(IReadDescriptor)
+class _FDWaker(log.Logger):
+ """
+ The I{self-pipe trick<http://cr.yp.to/docs/selfpipe.html>}, used to wake
+ up the main loop from another thread or a signal handler.
+
+ L{_FDWaker} is a base class for waker implementations based on
+ writing to a pipe being monitored by the reactor.
+
+ @ivar o: The file descriptor for the end of the pipe which can be
+ written to wake up a reactor monitoring this waker.
+
+ @ivar i: The file descriptor which should be monitored in order to
+ be awoken by this waker.
+ """
+
+ disconnected = 0
+
+ i: int
+ o: int
+
+ def __init__(self) -> None:
+ """Initialize."""
+ self.i, self.o = os.pipe()
+ fdesc.setNonBlocking(self.i)
+ fdesc._setCloseOnExec(self.i)
+ fdesc.setNonBlocking(self.o)
+ fdesc._setCloseOnExec(self.o)
+ self.fileno = lambda: self.i
+
+ def doRead(self) -> None:
+ """
+ Read some bytes from the pipe and discard them.
+ """
+ fdesc.readFromFD(self.fileno(), lambda data: None)
+
+ def connectionLost(self, reason):
+ """Close both ends of my pipe."""
+ if not hasattr(self, "o"):
+ return
+ for fd in self.i, self.o:
+ try:
+ os.close(fd)
+ except OSError:
+ pass
+ del self.i, self.o
+
+
+@implementer(_IWaker)
+class _UnixWaker(_FDWaker):
+ """
+ This class provides a simple interface to wake up the event loop.
+
+ This is used by threads or signals to wake up the event loop.
+ """
+
+ def wakeUp(self):
+ """Write one byte to the pipe, and flush it."""
+ # We don't use fdesc.writeToFD since we need to distinguish
+ # between EINTR (try again) and EAGAIN (do nothing).
+ if self.o is not None:
+ try:
+ util.untilConcludes(os.write, self.o, b"x")
+ except OSError as e:
+ # XXX There is no unit test for raising the exception
+ # for other errnos. See #4285.
+ if e.errno != errno.EAGAIN:
+ raise
+
+
+if platformType == "posix":
+ _Waker = _UnixWaker
+else:
+ # Primarily Windows and Jython.
+ _Waker = _SocketWaker # type: ignore[misc,assignment]
+
+
+class _SIGCHLDWaker(_FDWaker):
+ """
+ L{_SIGCHLDWaker} can wake up a reactor whenever C{SIGCHLD} is received.
+ """
+
+ def install(self) -> None:
+ """
+ Install the handler necessary to make this waker active.
+ """
+ installHandler(self.o)
+
+ def uninstall(self) -> None:
+ """
+ Remove the handler which makes this waker active.
+ """
+ installHandler(-1)
+
+ def doRead(self) -> None:
+ """
+ Having woken up the reactor in response to receipt of
+ C{SIGCHLD}, reap the process which exited.
+
+ This is called whenever the reactor notices the waker pipe is
+ writeable, which happens soon after any call to the C{wakeUp}
+ method.
+ """
+ super().doRead()
+ process.reapAllProcesses()
diff --git a/contrib/python/Twisted/py3/twisted/internet/_sslverify.py b/contrib/python/Twisted/py3/twisted/internet/_sslverify.py
new file mode 100644
index 0000000000..552c30bbf0
--- /dev/null
+++ b/contrib/python/Twisted/py3/twisted/internet/_sslverify.py
@@ -0,0 +1,2017 @@
+# -*- test-case-name: twisted.test.test_sslverify -*-
+# Copyright (c) 2005 Divmod, Inc.
+# Copyright (c) Twisted Matrix Laboratories.
+# See LICENSE for details.
+from __future__ import annotations
+
+import warnings
+from binascii import hexlify
+from functools import lru_cache
+from hashlib import md5
+from typing import Dict
+
+from zope.interface import Interface, implementer
+
+from OpenSSL import SSL, crypto
+from OpenSSL._util import lib as pyOpenSSLlib # type: ignore[import]
+
+import attr
+from constantly import FlagConstant, Flags, NamedConstant, Names # type: ignore[import]
+from incremental import Version
+
+from twisted.internet.abstract import isIPAddress, isIPv6Address
+from twisted.internet.defer import Deferred
+from twisted.internet.error import CertificateError, VerifyError
+from twisted.internet.interfaces import (
+ IAcceptableCiphers,
+ ICipher,
+ IOpenSSLClientConnectionCreator,
+ IOpenSSLContextFactory,
+)
+from twisted.python import log, util
+from twisted.python.compat import nativeString
+from twisted.python.deprecate import _mutuallyExclusiveArguments, deprecated
+from twisted.python.failure import Failure
+from twisted.python.randbytes import secureRandom
+from ._idna import _idnaBytes
+
+
+class TLSVersion(Names):
+ """
+ TLS versions that we can negotiate with the client/server.
+ """
+
+ SSLv3 = NamedConstant()
+ TLSv1_0 = NamedConstant()
+ TLSv1_1 = NamedConstant()
+ TLSv1_2 = NamedConstant()
+ TLSv1_3 = NamedConstant()
+
+
+_tlsDisableFlags = {
+ TLSVersion.SSLv3: SSL.OP_NO_SSLv3,
+ TLSVersion.TLSv1_0: SSL.OP_NO_TLSv1,
+ TLSVersion.TLSv1_1: SSL.OP_NO_TLSv1_1,
+ TLSVersion.TLSv1_2: SSL.OP_NO_TLSv1_2,
+ # If we don't have TLS v1.3 yet, we can't disable it -- this is just so
+ # when it makes it into OpenSSL, connections knowingly bracketed to v1.2
+ # don't end up going to v1.3
+ TLSVersion.TLSv1_3: getattr(SSL, "OP_NO_TLSv1_3", 0x00),
+}
+
+
+def _getExcludedTLSProtocols(oldest, newest):
+ """
+ Given a pair of L{TLSVersion} constants, figure out what versions we want
+ to disable (as OpenSSL is an exclusion based API).
+
+ @param oldest: The oldest L{TLSVersion} we want to allow.
+ @type oldest: L{TLSVersion} constant
+
+ @param newest: The newest L{TLSVersion} we want to allow, or L{None} for no
+ upper limit.
+ @type newest: L{TLSVersion} constant or L{None}
+
+ @return: The versions we want to disable.
+ @rtype: L{list} of L{TLSVersion} constants.
+ """
+ versions = list(TLSVersion.iterconstants())
+ excludedVersions = [x for x in versions[: versions.index(oldest)]]
+
+ if newest:
+ excludedVersions.extend([x for x in versions[versions.index(newest) :]])
+
+ return excludedVersions
+
+
+class SimpleVerificationError(Exception):
+ """
+ Not a very useful verification error.
+ """
+
+
+def simpleVerifyHostname(connection, hostname):
+ """
+ Check only the common name in the certificate presented by the peer and
+ only for an exact match.
+
+ This is to provide I{something} in the way of hostname verification to
+ users who haven't installed C{service_identity}. This check is overly
+ strict, relies on a deprecated TLS feature (you're supposed to ignore the
+ commonName if the subjectAlternativeName extensions are present, I
+ believe), and lots of valid certificates will fail.
+
+ @param connection: the OpenSSL connection to verify.
+ @type connection: L{OpenSSL.SSL.Connection}
+
+ @param hostname: The hostname expected by the user.
+ @type hostname: L{unicode}
+
+ @raise twisted.internet.ssl.VerificationError: if the common name and
+ hostname don't match.
+ """
+ commonName = connection.get_peer_certificate().get_subject().commonName
+ if commonName != hostname:
+ raise SimpleVerificationError(repr(commonName) + "!=" + repr(hostname))
+
+
+def simpleVerifyIPAddress(connection, hostname):
+ """
+ Always fails validation of IP addresses
+
+ @param connection: the OpenSSL connection to verify.
+ @type connection: L{OpenSSL.SSL.Connection}
+
+ @param hostname: The hostname expected by the user.
+ @type hostname: L{unicode}
+
+ @raise twisted.internet.ssl.VerificationError: Always raised
+ """
+ raise SimpleVerificationError("Cannot verify certificate IP addresses")
+
+
+def _usablePyOpenSSL(version):
+ """
+ Check pyOpenSSL version string whether we can use it for host verification.
+
+ @param version: A pyOpenSSL version string.
+ @type version: L{str}
+
+ @rtype: L{bool}
+ """
+ major, minor = (int(part) for part in version.split(".")[:2])
+ return (major, minor) >= (0, 12)
+
+
+def _selectVerifyImplementation():
+ """
+ Determine if C{service_identity} is installed. If so, use it. If not, use
+ simplistic and incorrect checking as implemented in
+ L{simpleVerifyHostname}.
+
+ @return: 2-tuple of (C{verify_hostname}, C{VerificationError})
+ @rtype: L{tuple}
+ """
+
+ whatsWrong = (
+ "Without the service_identity module, Twisted can perform only "
+ "rudimentary TLS client hostname verification. Many valid "
+ "certificate/hostname mappings may be rejected."
+ )
+
+ try:
+ from service_identity import VerificationError
+ from service_identity.pyopenssl import verify_hostname, verify_ip_address
+
+ return verify_hostname, verify_ip_address, VerificationError
+ except ImportError as e:
+ warnings.warn_explicit(
+ "You do not have a working installation of the "
+ "service_identity module: '" + str(e) + "'. "
+ "Please install it from "
+ "<https://pypi.python.org/pypi/service_identity> and make "
+ "sure all of its dependencies are satisfied. " + whatsWrong,
+ # Unfortunately the lineno is required.
+ category=UserWarning,
+ filename="",
+ lineno=0,
+ )
+
+ return simpleVerifyHostname, simpleVerifyIPAddress, SimpleVerificationError
+
+
+verifyHostname, verifyIPAddress, VerificationError = _selectVerifyImplementation()
+
+
+class ProtocolNegotiationSupport(Flags):
+ """
+ L{ProtocolNegotiationSupport} defines flags which are used to indicate the
+ level of NPN/ALPN support provided by the TLS backend.
+
+ @cvar NOSUPPORT: There is no support for NPN or ALPN. This is exclusive
+ with both L{NPN} and L{ALPN}.
+ @cvar NPN: The implementation supports Next Protocol Negotiation.
+ @cvar ALPN: The implementation supports Application Layer Protocol
+ Negotiation.
+ """
+
+ NPN = FlagConstant(0x0001)
+ ALPN = FlagConstant(0x0002)
+
+
+# FIXME: https://twistedmatrix.com/trac/ticket/8074
+# Currently flags with literal zero values behave incorrectly. However,
+# creating a flag by NOTing a flag with itself appears to work totally fine, so
+# do that instead.
+ProtocolNegotiationSupport.NOSUPPORT = (
+ ProtocolNegotiationSupport.NPN ^ ProtocolNegotiationSupport.NPN
+)
+
+
+def protocolNegotiationMechanisms():
+ """
+ Checks whether your versions of PyOpenSSL and OpenSSL are recent enough to
+ support protocol negotiation, and if they are, what kind of protocol
+ negotiation is supported.
+
+ @return: A combination of flags from L{ProtocolNegotiationSupport} that
+ indicate which mechanisms for protocol negotiation are supported.
+ @rtype: L{constantly.FlagConstant}
+ """
+ support = ProtocolNegotiationSupport.NOSUPPORT
+ ctx = SSL.Context(SSL.SSLv23_METHOD)
+
+ try:
+ ctx.set_npn_advertise_callback(lambda c: None)
+ except (AttributeError, NotImplementedError):
+ pass
+ else:
+ support |= ProtocolNegotiationSupport.NPN
+
+ try:
+ ctx.set_alpn_select_callback(lambda c: None)
+ except (AttributeError, NotImplementedError):
+ pass
+ else:
+ support |= ProtocolNegotiationSupport.ALPN
+
+ return support
+
+
+_x509names = {
+ "CN": "commonName",
+ "commonName": "commonName",
+ "O": "organizationName",
+ "organizationName": "organizationName",
+ "OU": "organizationalUnitName",
+ "organizationalUnitName": "organizationalUnitName",
+ "L": "localityName",
+ "localityName": "localityName",
+ "ST": "stateOrProvinceName",
+ "stateOrProvinceName": "stateOrProvinceName",
+ "C": "countryName",
+ "countryName": "countryName",
+ "emailAddress": "emailAddress",
+}
+
+
+class DistinguishedName(Dict[str, bytes]):
+ """
+ Identify and describe an entity.
+
+ Distinguished names are used to provide a minimal amount of identifying
+ information about a certificate issuer or subject. They are commonly
+ created with one or more of the following fields::
+
+ commonName (CN)
+ organizationName (O)
+ organizationalUnitName (OU)
+ localityName (L)
+ stateOrProvinceName (ST)
+ countryName (C)
+ emailAddress
+
+ A L{DistinguishedName} should be constructed using keyword arguments whose
+ keys can be any of the field names above (as a native string), and the
+ values are either Unicode text which is encodable to ASCII, or L{bytes}
+ limited to the ASCII subset. Any fields passed to the constructor will be
+ set as attributes, accessible using both their extended name and their
+ shortened acronym. The attribute values will be the ASCII-encoded
+ bytes. For example::
+
+ >>> dn = DistinguishedName(commonName=b'www.example.com',
+ ... C='US')
+ >>> dn.C
+ b'US'
+ >>> dn.countryName
+ b'US'
+ >>> hasattr(dn, "organizationName")
+ False
+
+ L{DistinguishedName} instances can also be used as dictionaries; the keys
+ are extended name of the fields::
+
+ >>> dn.keys()
+ ['countryName', 'commonName']
+ >>> dn['countryName']
+ b'US'
+
+ """
+
+ __slots__ = ()
+
+ def __init__(self, **kw):
+ for k, v in kw.items():
+ setattr(self, k, v)
+
+ def _copyFrom(self, x509name):
+ for name in _x509names:
+ value = getattr(x509name, name, None)
+ if value is not None:
+ setattr(self, name, value)
+
+ def _copyInto(self, x509name):
+ for k, v in self.items():
+ setattr(x509name, k, nativeString(v))
+
+ def __repr__(self) -> str:
+ return "<DN %s>" % (dict.__repr__(self)[1:-1])
+
+ def __getattr__(self, attr):
+ try:
+ return self[_x509names[attr]]
+ except KeyError:
+ raise AttributeError(attr)
+
+ def __setattr__(self, attr, value):
+ if attr not in _x509names:
+ raise AttributeError(f"{attr} is not a valid OpenSSL X509 name field")
+ realAttr = _x509names[attr]
+ if not isinstance(value, bytes):
+ value = value.encode("ascii")
+ self[realAttr] = value
+
+ def inspect(self):
+ """
+ Return a multi-line, human-readable representation of this DN.
+
+ @rtype: L{str}
+ """
+ l = []
+ lablen = 0
+
+ def uniqueValues(mapping):
+ return set(mapping.values())
+
+ for k in sorted(uniqueValues(_x509names)):
+ label = util.nameToLabel(k)
+ lablen = max(len(label), lablen)
+ v = getattr(self, k, None)
+ if v is not None:
+ l.append((label, nativeString(v)))
+ lablen += 2
+ for n, (label, attrib) in enumerate(l):
+ l[n] = label.rjust(lablen) + ": " + attrib
+ return "\n".join(l)
+
+
+DN = DistinguishedName
+
+
+class CertBase:
+ """
+ Base class for public (certificate only) and private (certificate + key
+ pair) certificates.
+
+ @ivar original: The underlying OpenSSL certificate object.
+ @type original: L{OpenSSL.crypto.X509}
+ """
+
+ def __init__(self, original):
+ self.original = original
+
+ def _copyName(self, suffix):
+ dn = DistinguishedName()
+ dn._copyFrom(getattr(self.original, "get_" + suffix)())
+ return dn
+
+ def getSubject(self):
+ """
+ Retrieve the subject of this certificate.
+
+ @return: A copy of the subject of this certificate.
+ @rtype: L{DistinguishedName}
+ """
+ return self._copyName("subject")
+
+ def __conform__(self, interface):
+ """
+ Convert this L{CertBase} into a provider of the given interface.
+
+ @param interface: The interface to conform to.
+ @type interface: L{zope.interface.interfaces.IInterface}
+
+ @return: an L{IOpenSSLTrustRoot} provider or L{NotImplemented}
+ @rtype: L{IOpenSSLTrustRoot} or L{NotImplemented}
+ """
+ if interface is IOpenSSLTrustRoot:
+ return OpenSSLCertificateAuthorities([self.original])
+ return NotImplemented
+
+
+def _handleattrhelper(Class, transport, methodName):
+ """
+ (private) Helper for L{Certificate.peerFromTransport} and
+ L{Certificate.hostFromTransport} which checks for incompatible handle types
+ and null certificates and raises the appropriate exception or returns the
+ appropriate certificate object.
+ """
+ method = getattr(transport.getHandle(), f"get_{methodName}_certificate", None)
+ if method is None:
+ raise CertificateError(
+ "non-TLS transport {!r} did not have {} certificate".format(
+ transport, methodName
+ )
+ )
+ cert = method()
+ if cert is None:
+ raise CertificateError(
+ "TLS transport {!r} did not have {} certificate".format(
+ transport, methodName
+ )
+ )
+ return Class(cert)
+
+
+class Certificate(CertBase):
+ """
+ An x509 certificate.
+ """
+
+ def __repr__(self) -> str:
+ return "<{} Subject={} Issuer={}>".format(
+ self.__class__.__name__,
+ self.getSubject().commonName,
+ self.getIssuer().commonName,
+ )
+
+ def __eq__(self, other: object) -> bool:
+ if isinstance(other, Certificate):
+ return self.dump() == other.dump()
+ return NotImplemented
+
+ @classmethod
+ def load(Class, requestData, format=crypto.FILETYPE_ASN1, args=()):
+ """
+ Load a certificate from an ASN.1- or PEM-format string.
+
+ @rtype: C{Class}
+ """
+ return Class(crypto.load_certificate(format, requestData), *args)
+
+ # We can't use super() because it is old style still, so we have to hack
+ # around things wanting to call the parent function
+ _load = load
+
+ def dumpPEM(self):
+ """
+ Dump this certificate to a PEM-format data string.
+
+ @rtype: L{str}
+ """
+ return self.dump(crypto.FILETYPE_PEM)
+
+ @classmethod
+ def loadPEM(Class, data):
+ """
+ Load a certificate from a PEM-format data string.
+
+ @rtype: C{Class}
+ """
+ return Class.load(data, crypto.FILETYPE_PEM)
+
+ @classmethod
+ def peerFromTransport(Class, transport):
+ """
+ Get the certificate for the remote end of the given transport.
+
+ @param transport: an L{ISystemHandle} provider
+
+ @rtype: C{Class}
+
+ @raise CertificateError: if the given transport does not have a peer
+ certificate.
+ """
+ return _handleattrhelper(Class, transport, "peer")
+
+ @classmethod
+ def hostFromTransport(Class, transport):
+ """
+ Get the certificate for the local end of the given transport.
+
+ @param transport: an L{ISystemHandle} provider; the transport we will
+
+ @rtype: C{Class}
+
+ @raise CertificateError: if the given transport does not have a host
+ certificate.
+ """
+ return _handleattrhelper(Class, transport, "host")
+
+ def getPublicKey(self):
+ """
+ Get the public key for this certificate.
+
+ @rtype: L{PublicKey}
+ """
+ return PublicKey(self.original.get_pubkey())
+
+ def dump(self, format: int = crypto.FILETYPE_ASN1) -> bytes:
+ return crypto.dump_certificate(format, self.original)
+
+ def serialNumber(self):
+ """
+ Retrieve the serial number of this certificate.
+
+ @rtype: L{int}
+ """
+ return self.original.get_serial_number()
+
+ def digest(self, method="md5"):
+ """
+ Return a digest hash of this certificate using the specified hash
+ algorithm.
+
+ @param method: One of C{'md5'} or C{'sha'}.
+
+ @return: The digest of the object, formatted as b":"-delimited hex
+ pairs
+ @rtype: L{bytes}
+ """
+ return self.original.digest(method)
+
+ def _inspect(self):
+ return "\n".join(
+ [
+ "Certificate For Subject:",
+ self.getSubject().inspect(),
+ "\nIssuer:",
+ self.getIssuer().inspect(),
+ "\nSerial Number: %d" % self.serialNumber(),
+ "Digest: %s" % nativeString(self.digest()),
+ ]
+ )
+
+ def inspect(self):
+ """
+ Return a multi-line, human-readable representation of this
+ Certificate, including information about the subject, issuer, and
+ public key.
+ """
+ return "\n".join((self._inspect(), self.getPublicKey().inspect()))
+
+ def getIssuer(self):
+ """
+ Retrieve the issuer of this certificate.
+
+ @rtype: L{DistinguishedName}
+ @return: A copy of the issuer of this certificate.
+ """
+ return self._copyName("issuer")
+
+ def options(self, *authorities):
+ raise NotImplementedError("Possible, but doubtful we need this yet")
+
+
+class CertificateRequest(CertBase):
+ """
+ An x509 certificate request.
+
+ Certificate requests are given to certificate authorities to be signed and
+ returned resulting in an actual certificate.
+ """
+
+ @classmethod
+ def load(Class, requestData, requestFormat=crypto.FILETYPE_ASN1):
+ req = crypto.load_certificate_request(requestFormat, requestData)
+ dn = DistinguishedName()
+ dn._copyFrom(req.get_subject())
+ if not req.verify(req.get_pubkey()):
+ raise VerifyError(f"Can't verify that request for {dn!r} is self-signed.")
+ return Class(req)
+
+ def dump(self, format=crypto.FILETYPE_ASN1):
+ return crypto.dump_certificate_request(format, self.original)
+
+
+class PrivateCertificate(Certificate):
+ """
+ An x509 certificate and private key.
+ """
+
+ def __repr__(self) -> str:
+ return Certificate.__repr__(self) + " with " + repr(self.privateKey)
+
+ def _setPrivateKey(self, privateKey):
+ if not privateKey.matches(self.getPublicKey()):
+ raise VerifyError("Certificate public and private keys do not match.")
+ self.privateKey = privateKey
+ return self
+
+ def newCertificate(self, newCertData, format=crypto.FILETYPE_ASN1):
+ """
+ Create a new L{PrivateCertificate} from the given certificate data and
+ this instance's private key.
+ """
+ return self.load(newCertData, self.privateKey, format)
+
+ @classmethod
+ def load(Class, data, privateKey, format=crypto.FILETYPE_ASN1):
+ return Class._load(data, format)._setPrivateKey(privateKey)
+
+ def inspect(self):
+ return "\n".join([Certificate._inspect(self), self.privateKey.inspect()])
+
+ def dumpPEM(self):
+ """
+ Dump both public and private parts of a private certificate to
+ PEM-format data.
+ """
+ return self.dump(crypto.FILETYPE_PEM) + self.privateKey.dump(
+ crypto.FILETYPE_PEM
+ )
+
+ @classmethod
+ def loadPEM(Class, data):
+ """
+ Load both private and public parts of a private certificate from a
+ chunk of PEM-format data.
+ """
+ return Class.load(
+ data, KeyPair.load(data, crypto.FILETYPE_PEM), crypto.FILETYPE_PEM
+ )
+
+ @classmethod
+ def fromCertificateAndKeyPair(Class, certificateInstance, privateKey):
+ privcert = Class(certificateInstance.original)
+ return privcert._setPrivateKey(privateKey)
+
+ def options(self, *authorities):
+ """
+ Create a context factory using this L{PrivateCertificate}'s certificate
+ and private key.
+
+ @param authorities: A list of L{Certificate} object
+
+ @return: A context factory.
+ @rtype: L{CertificateOptions <twisted.internet.ssl.CertificateOptions>}
+ """
+ options = dict(privateKey=self.privateKey.original, certificate=self.original)
+ if authorities:
+ options.update(
+ dict(
+ trustRoot=OpenSSLCertificateAuthorities(
+ [auth.original for auth in authorities]
+ )
+ )
+ )
+ return OpenSSLCertificateOptions(**options)
+
+ def certificateRequest(self, format=crypto.FILETYPE_ASN1, digestAlgorithm="sha256"):
+ return self.privateKey.certificateRequest(
+ self.getSubject(), format, digestAlgorithm
+ )
+
+ def signCertificateRequest(
+ self,
+ requestData,
+ verifyDNCallback,
+ serialNumber,
+ requestFormat=crypto.FILETYPE_ASN1,
+ certificateFormat=crypto.FILETYPE_ASN1,
+ ):
+ issuer = self.getSubject()
+ return self.privateKey.signCertificateRequest(
+ issuer,
+ requestData,
+ verifyDNCallback,
+ serialNumber,
+ requestFormat,
+ certificateFormat,
+ )
+
+ def signRequestObject(
+ self,
+ certificateRequest,
+ serialNumber,
+ secondsToExpiry=60 * 60 * 24 * 365, # One year
+ digestAlgorithm="sha256",
+ ):
+ return self.privateKey.signRequestObject(
+ self.getSubject(),
+ certificateRequest,
+ serialNumber,
+ secondsToExpiry,
+ digestAlgorithm,
+ )
+
+
+class PublicKey:
+ """
+ A L{PublicKey} is a representation of the public part of a key pair.
+
+ You can't do a whole lot with it aside from comparing it to other
+ L{PublicKey} objects.
+
+ @note: If constructing a L{PublicKey} manually, be sure to pass only a
+ L{OpenSSL.crypto.PKey} that does not contain a private key!
+
+ @ivar original: The original private key.
+ """
+
+ def __init__(self, osslpkey):
+ """
+ @param osslpkey: The underlying pyOpenSSL key object.
+ @type osslpkey: L{OpenSSL.crypto.PKey}
+ """
+ self.original = osslpkey
+
+ def matches(self, otherKey):
+ """
+ Does this L{PublicKey} contain the same value as another L{PublicKey}?
+
+ @param otherKey: The key to compare C{self} to.
+ @type otherKey: L{PublicKey}
+
+ @return: L{True} if these keys match, L{False} if not.
+ @rtype: L{bool}
+ """
+ return self.keyHash() == otherKey.keyHash()
+
+ def __repr__(self) -> str:
+ return f"<{self.__class__.__name__} {self.keyHash()}>"
+
+ def keyHash(self):
+ """
+ Compute a hash of the underlying PKey object.
+
+ The purpose of this method is to allow you to determine if two
+ certificates share the same public key; it is not really useful for
+ anything else.
+
+ In versions of Twisted prior to 15.0, C{keyHash} used a technique
+ involving certificate requests for computing the hash that was not
+ stable in the face of changes to the underlying OpenSSL library.
+
+ @return: Return a 32-character hexadecimal string uniquely identifying
+ this public key, I{for this version of Twisted}.
+ @rtype: native L{str}
+ """
+ raw = crypto.dump_publickey(crypto.FILETYPE_ASN1, self.original)
+ h = md5()
+ h.update(raw)
+ return h.hexdigest()
+
+ def inspect(self):
+ return f"Public Key with Hash: {self.keyHash()}"
+
+
+class KeyPair(PublicKey):
+ @classmethod
+ def load(Class, data, format=crypto.FILETYPE_ASN1):
+ return Class(crypto.load_privatekey(format, data))
+
+ def dump(self, format=crypto.FILETYPE_ASN1):
+ return crypto.dump_privatekey(format, self.original)
+
+ @deprecated(Version("Twisted", 15, 0, 0), "a real persistence system")
+ def __getstate__(self):
+ return self.dump()
+
+ @deprecated(Version("Twisted", 15, 0, 0), "a real persistence system")
+ def __setstate__(self, state):
+ self.__init__(crypto.load_privatekey(crypto.FILETYPE_ASN1, state))
+
+ def inspect(self):
+ t = self.original.type()
+ if t == crypto.TYPE_RSA:
+ ts = "RSA"
+ elif t == crypto.TYPE_DSA:
+ ts = "DSA"
+ else:
+ ts = "(Unknown Type!)"
+ L = (self.original.bits(), ts, self.keyHash())
+ return "%s-bit %s Key Pair with Hash: %s" % L
+
+ @classmethod
+ def generate(Class, kind=crypto.TYPE_RSA, size=2048):
+ pkey = crypto.PKey()
+ pkey.generate_key(kind, size)
+ return Class(pkey)
+
+ def newCertificate(self, newCertData, format=crypto.FILETYPE_ASN1):
+ return PrivateCertificate.load(newCertData, self, format)
+
+ def requestObject(self, distinguishedName, digestAlgorithm="sha256"):
+ req = crypto.X509Req()
+ req.set_pubkey(self.original)
+ distinguishedName._copyInto(req.get_subject())
+ req.sign(self.original, digestAlgorithm)
+ return CertificateRequest(req)
+
+ def certificateRequest(
+ self, distinguishedName, format=crypto.FILETYPE_ASN1, digestAlgorithm="sha256"
+ ):
+ """
+ Create a certificate request signed with this key.
+
+ @return: a string, formatted according to the 'format' argument.
+ """
+ return self.requestObject(distinguishedName, digestAlgorithm).dump(format)
+
+ def signCertificateRequest(
+ self,
+ issuerDistinguishedName,
+ requestData,
+ verifyDNCallback,
+ serialNumber,
+ requestFormat=crypto.FILETYPE_ASN1,
+ certificateFormat=crypto.FILETYPE_ASN1,
+ secondsToExpiry=60 * 60 * 24 * 365, # One year
+ digestAlgorithm="sha256",
+ ):
+ """
+ Given a blob of certificate request data and a certificate authority's
+ DistinguishedName, return a blob of signed certificate data.
+
+ If verifyDNCallback returns a Deferred, I will return a Deferred which
+ fires the data when that Deferred has completed.
+ """
+ hlreq = CertificateRequest.load(requestData, requestFormat)
+
+ dn = hlreq.getSubject()
+ vval = verifyDNCallback(dn)
+
+ def verified(value):
+ if not value:
+ raise VerifyError(
+ "DN callback {!r} rejected request DN {!r}".format(
+ verifyDNCallback, dn
+ )
+ )
+ return self.signRequestObject(
+ issuerDistinguishedName,
+ hlreq,
+ serialNumber,
+ secondsToExpiry,
+ digestAlgorithm,
+ ).dump(certificateFormat)
+
+ if isinstance(vval, Deferred):
+ return vval.addCallback(verified)
+ else:
+ return verified(vval)
+
+ def signRequestObject(
+ self,
+ issuerDistinguishedName,
+ requestObject,
+ serialNumber,
+ secondsToExpiry=60 * 60 * 24 * 365, # One year
+ digestAlgorithm="sha256",
+ ):
+ """
+ Sign a CertificateRequest instance, returning a Certificate instance.
+ """
+ req = requestObject.original
+ cert = crypto.X509()
+ issuerDistinguishedName._copyInto(cert.get_issuer())
+ cert.set_subject(req.get_subject())
+ cert.set_pubkey(req.get_pubkey())
+ cert.gmtime_adj_notBefore(0)
+ cert.gmtime_adj_notAfter(secondsToExpiry)
+ cert.set_serial_number(serialNumber)
+ cert.sign(self.original, digestAlgorithm)
+ return Certificate(cert)
+
+ def selfSignedCert(self, serialNumber, **kw):
+ dn = DN(**kw)
+ return PrivateCertificate.fromCertificateAndKeyPair(
+ self.signRequestObject(dn, self.requestObject(dn), serialNumber), self
+ )
+
+
+class IOpenSSLTrustRoot(Interface):
+ """
+ Trust settings for an OpenSSL context.
+
+ Note that this interface's methods are private, so things outside of
+ Twisted shouldn't implement it.
+ """
+
+ def _addCACertsToContext(context):
+ """
+ Add certificate-authority certificates to an SSL context whose
+ connections should trust those authorities.
+
+ @param context: An SSL context for a connection which should be
+ verified by some certificate authority.
+ @type context: L{OpenSSL.SSL.Context}
+
+ @return: L{None}
+ """
+
+
+@implementer(IOpenSSLTrustRoot)
+class OpenSSLCertificateAuthorities:
+ """
+ Trust an explicitly specified set of certificates, represented by a list of
+ L{OpenSSL.crypto.X509} objects.
+ """
+
+ def __init__(self, caCerts):
+ """
+ @param caCerts: The certificate authorities to trust when using this
+ object as a C{trustRoot} for L{OpenSSLCertificateOptions}.
+ @type caCerts: L{list} of L{OpenSSL.crypto.X509}
+ """
+ self._caCerts = caCerts
+
+ def _addCACertsToContext(self, context):
+ store = context.get_cert_store()
+ for cert in self._caCerts:
+ store.add_cert(cert)
+
+
+def trustRootFromCertificates(certificates):
+ """
+ Builds an object that trusts multiple root L{Certificate}s.
+
+ When passed to L{optionsForClientTLS}, connections using those options will
+ reject any server certificate not signed by at least one of the
+ certificates in the `certificates` list.
+
+ @since: 16.0
+
+ @param certificates: All certificates which will be trusted.
+ @type certificates: C{iterable} of L{CertBase}
+
+ @rtype: L{IOpenSSLTrustRoot}
+ @return: an object suitable for use as the trustRoot= keyword argument to
+ L{optionsForClientTLS}
+ """
+
+ certs = []
+ for cert in certificates:
+ # PrivateCertificate or Certificate are both okay
+ if isinstance(cert, CertBase):
+ cert = cert.original
+ else:
+ raise TypeError(
+ "certificates items must be twisted.internet.ssl.CertBase" " instances"
+ )
+ certs.append(cert)
+ return OpenSSLCertificateAuthorities(certs)
+
+
+@implementer(IOpenSSLTrustRoot)
+class OpenSSLDefaultPaths:
+ """
+ Trust the set of default verify paths that OpenSSL was built with, as
+ specified by U{SSL_CTX_set_default_verify_paths
+ <https://www.openssl.org/docs/man1.1.1/man3/SSL_CTX_load_verify_locations.html>}.
+ """
+
+ def _addCACertsToContext(self, context):
+ context.set_default_verify_paths()
+
+
+def platformTrust():
+ """
+ Attempt to discover a set of trusted certificate authority certificates
+ (or, in other words: trust roots, or root certificates) whose trust is
+ managed and updated by tools outside of Twisted.
+
+ If you are writing any client-side TLS code with Twisted, you should use
+ this as the C{trustRoot} argument to L{CertificateOptions
+ <twisted.internet.ssl.CertificateOptions>}.
+
+ The result of this function should be like the up-to-date list of
+ certificates in a web browser. When developing code that uses
+ C{platformTrust}, you can think of it that way. However, the choice of
+ which certificate authorities to trust is never Twisted's responsibility.
+ Unless you're writing a very unusual application or library, it's not your
+ code's responsibility either. The user may use platform-specific tools for
+ defining which server certificates should be trusted by programs using TLS.
+ The purpose of using this API is to respect that decision as much as
+ possible.
+
+ This should be a set of trust settings most appropriate for I{client} TLS
+ connections; i.e. those which need to verify a server's authenticity. You
+ should probably use this by default for any client TLS connection that you
+ create. For servers, however, client certificates are typically not
+ verified; or, if they are, their verification will depend on a custom,
+ application-specific certificate authority.
+
+ @since: 14.0
+
+ @note: Currently, L{platformTrust} depends entirely upon your OpenSSL build
+ supporting a set of "L{default verify paths <OpenSSLDefaultPaths>}"
+ which correspond to certificate authority trust roots. Unfortunately,
+ whether this is true of your system is both outside of Twisted's
+ control and difficult (if not impossible) for Twisted to detect
+ automatically.
+
+ Nevertheless, this ought to work as desired by default on:
+
+ - Ubuntu Linux machines with the U{ca-certificates
+ <https://launchpad.net/ubuntu/+source/ca-certificates>} package
+ installed,
+
+ - macOS when using the system-installed version of OpenSSL (i.e.
+ I{not} one installed via MacPorts or Homebrew),
+
+ - any build of OpenSSL which has had certificate authority
+ certificates installed into its default verify paths (by default,
+ C{/usr/local/ssl/certs} if you've built your own OpenSSL), or
+
+ - any process where the C{SSL_CERT_FILE} environment variable is
+ set to the path of a file containing your desired CA certificates
+ bundle.
+
+ Hopefully soon, this API will be updated to use more sophisticated
+ trust-root discovery mechanisms. Until then, you can follow tickets in
+ the Twisted tracker for progress on this implementation on U{Microsoft
+ Windows <https://twistedmatrix.com/trac/ticket/6371>}, U{macOS
+ <https://twistedmatrix.com/trac/ticket/6372>}, and U{a fallback for
+ other platforms which do not have native trust management tools
+ <https://twistedmatrix.com/trac/ticket/6934>}.
+
+ @return: an appropriate trust settings object for your platform.
+ @rtype: L{IOpenSSLTrustRoot}
+
+ @raise NotImplementedError: if this platform is not yet supported by
+ Twisted. At present, only OpenSSL is supported.
+ """
+ return OpenSSLDefaultPaths()
+
+
+def _tolerateErrors(wrapped):
+ """
+ Wrap up an C{info_callback} for pyOpenSSL so that if something goes wrong
+ the error is immediately logged and the connection is dropped if possible.
+
+ This wrapper exists because some versions of pyOpenSSL don't handle errors
+ from callbacks at I{all}, and those which do write tracebacks directly to
+ stderr rather than to a supplied logging system. This reports unexpected
+ errors to the Twisted logging system.
+
+ Also, this terminates the connection immediately if possible because if
+ you've got bugs in your verification logic it's much safer to just give up.
+
+ @param wrapped: A valid C{info_callback} for pyOpenSSL.
+ @type wrapped: L{callable}
+
+ @return: A valid C{info_callback} for pyOpenSSL that handles any errors in
+ C{wrapped}.
+ @rtype: L{callable}
+ """
+
+ def infoCallback(connection, where, ret):
+ try:
+ return wrapped(connection, where, ret)
+ except BaseException:
+ f = Failure()
+ log.err(f, "Error during info_callback")
+ connection.get_app_data().failVerification(f)
+
+ return infoCallback
+
+
+@implementer(IOpenSSLClientConnectionCreator)
+class ClientTLSOptions:
+ """
+ Client creator for TLS.
+
+ Private implementation type (not exposed to applications) for public
+ L{optionsForClientTLS} API.
+
+ @ivar _ctx: The context to use for new connections.
+ @type _ctx: L{OpenSSL.SSL.Context}
+
+ @ivar _hostname: The hostname to verify, as specified by the application,
+ as some human-readable text.
+ @type _hostname: L{unicode}
+
+ @ivar _hostnameBytes: The hostname to verify, decoded into IDNA-encoded
+ bytes. This is passed to APIs which think that hostnames are bytes,
+ such as OpenSSL's SNI implementation.
+ @type _hostnameBytes: L{bytes}
+
+ @ivar _hostnameASCII: The hostname, as transcoded into IDNA ASCII-range
+ unicode code points. This is pre-transcoded because the
+ C{service_identity} package is rather strict about requiring the
+ C{idna} package from PyPI for internationalized domain names, rather
+ than working with Python's built-in (but sometimes broken) IDNA
+ encoding. ASCII values, however, will always work.
+ @type _hostnameASCII: L{unicode}
+
+ @ivar _hostnameIsDnsName: Whether or not the C{_hostname} is a DNSName.
+ Will be L{False} if C{_hostname} is an IP address or L{True} if
+ C{_hostname} is a DNSName
+ @type _hostnameIsDnsName: L{bool}
+ """
+
+ def __init__(self, hostname, ctx):
+ """
+ Initialize L{ClientTLSOptions}.
+
+ @param hostname: The hostname to verify as input by a human.
+ @type hostname: L{unicode}
+
+ @param ctx: an L{OpenSSL.SSL.Context} to use for new connections.
+ @type ctx: L{OpenSSL.SSL.Context}.
+ """
+ self._ctx = ctx
+ self._hostname = hostname
+
+ if isIPAddress(hostname) or isIPv6Address(hostname):
+ self._hostnameBytes = hostname.encode("ascii")
+ self._hostnameIsDnsName = False
+ else:
+ self._hostnameBytes = _idnaBytes(hostname)
+ self._hostnameIsDnsName = True
+
+ self._hostnameASCII = self._hostnameBytes.decode("ascii")
+ ctx.set_info_callback(_tolerateErrors(self._identityVerifyingInfoCallback))
+
+ def clientConnectionForTLS(self, tlsProtocol):
+ """
+ Create a TLS connection for a client.
+
+ @note: This will call C{set_app_data} on its connection. If you're
+ delegating to this implementation of this method, don't ever call
+ C{set_app_data} or C{set_info_callback} on the returned connection,
+ or you'll break the implementation of various features of this
+ class.
+
+ @param tlsProtocol: the TLS protocol initiating the connection.
+ @type tlsProtocol: L{twisted.protocols.tls.TLSMemoryBIOProtocol}
+
+ @return: the configured client connection.
+ @rtype: L{OpenSSL.SSL.Connection}
+ """
+ context = self._ctx
+ connection = SSL.Connection(context, None)
+ connection.set_app_data(tlsProtocol)
+ return connection
+
+ def _identityVerifyingInfoCallback(self, connection, where, ret):
+ """
+ U{info_callback
+ <http://pythonhosted.org/pyOpenSSL/api/ssl.html#OpenSSL.SSL.Context.set_info_callback>
+ } for pyOpenSSL that verifies the hostname in the presented certificate
+ matches the one passed to this L{ClientTLSOptions}.
+
+ @param connection: the connection which is handshaking.
+ @type connection: L{OpenSSL.SSL.Connection}
+
+ @param where: flags indicating progress through a TLS handshake.
+ @type where: L{int}
+
+ @param ret: ignored
+ @type ret: ignored
+ """
+ # Literal IPv4 and IPv6 addresses are not permitted
+ # as host names according to the RFCs
+ if where & SSL.SSL_CB_HANDSHAKE_START and self._hostnameIsDnsName:
+ connection.set_tlsext_host_name(self._hostnameBytes)
+ elif where & SSL.SSL_CB_HANDSHAKE_DONE:
+ try:
+ if self._hostnameIsDnsName:
+ verifyHostname(connection, self._hostnameASCII)
+ else:
+ verifyIPAddress(connection, self._hostnameASCII)
+ except VerificationError:
+ f = Failure()
+ transport = connection.get_app_data()
+ transport.failVerification(f)
+
+
+def optionsForClientTLS(
+ hostname,
+ trustRoot=None,
+ clientCertificate=None,
+ acceptableProtocols=None,
+ *,
+ extraCertificateOptions=None,
+):
+ """
+ Create a L{client connection creator <IOpenSSLClientConnectionCreator>} for
+ use with APIs such as L{SSL4ClientEndpoint
+ <twisted.internet.endpoints.SSL4ClientEndpoint>}, L{connectSSL
+ <twisted.internet.interfaces.IReactorSSL.connectSSL>}, and L{startTLS
+ <twisted.internet.interfaces.ITLSTransport.startTLS>}.
+
+ @since: 14.0
+
+ @param hostname: The expected name of the remote host. This serves two
+ purposes: first, and most importantly, it verifies that the certificate
+ received from the server correctly identifies the specified hostname.
+ The second purpose is to use the U{Server Name Indication extension
+ <https://en.wikipedia.org/wiki/Server_Name_Indication>} to indicate to
+ the server which certificate should be used.
+ @type hostname: L{unicode}
+
+ @param trustRoot: Specification of trust requirements of peers. This may be
+ a L{Certificate} or the result of L{platformTrust}. By default it is
+ L{platformTrust} and you probably shouldn't adjust it unless you really
+ know what you're doing. Be aware that clients using this interface
+ I{must} verify the server; you cannot explicitly pass L{None} since
+ that just means to use L{platformTrust}.
+ @type trustRoot: L{IOpenSSLTrustRoot}
+
+ @param clientCertificate: The certificate and private key that the client
+ will use to authenticate to the server. If unspecified, the client will
+ not authenticate.
+ @type clientCertificate: L{PrivateCertificate}
+
+ @param acceptableProtocols: The protocols this peer is willing to speak
+ after the TLS negotiation has completed, advertised over both ALPN and
+ NPN. If this argument is specified, and no overlap can be found with
+ the other peer, the connection will fail to be established. If the
+ remote peer does not offer NPN or ALPN, the connection will be
+ established, but no protocol wil be negotiated. Protocols earlier in
+ the list are preferred over those later in the list.
+ @type acceptableProtocols: L{list} of L{bytes}
+
+ @param extraCertificateOptions: A dictionary of additional keyword arguments
+ to be presented to L{CertificateOptions}. Please avoid using this unless
+ you absolutely need to; any time you need to pass an option here that is
+ a bug in this interface.
+ @type extraCertificateOptions: L{dict}
+
+ @return: A client connection creator.
+ @rtype: L{IOpenSSLClientConnectionCreator}
+ """
+ if extraCertificateOptions is None:
+ extraCertificateOptions = {}
+ if trustRoot is None:
+ trustRoot = platformTrust()
+ if not isinstance(hostname, str):
+ raise TypeError(
+ "optionsForClientTLS requires text for host names, not "
+ + hostname.__class__.__name__
+ )
+ if clientCertificate:
+ extraCertificateOptions.update(
+ privateKey=clientCertificate.privateKey.original,
+ certificate=clientCertificate.original,
+ )
+ certificateOptions = OpenSSLCertificateOptions(
+ trustRoot=trustRoot,
+ acceptableProtocols=acceptableProtocols,
+ **extraCertificateOptions,
+ )
+ return ClientTLSOptions(hostname, certificateOptions.getContext())
+
+
+@implementer(IOpenSSLContextFactory)
+class OpenSSLCertificateOptions:
+ """
+ A L{CertificateOptions <twisted.internet.ssl.CertificateOptions>} specifies
+ the security properties for a client or server TLS connection used with
+ OpenSSL.
+
+ @ivar _options: Any option flags to set on the L{OpenSSL.SSL.Context}
+ object that will be created.
+ @type _options: L{int}
+
+ @ivar _cipherString: An OpenSSL-specific cipher string.
+ @type _cipherString: L{unicode}
+
+ @ivar _defaultMinimumTLSVersion: The default TLS version that will be
+ negotiated. This should be a "safe default", with wide client and
+ server support, vs an optimally secure one that excludes a large number
+ of users. As of May 2022, TLSv1.2 is that safe default.
+ @type _defaultMinimumTLSVersion: L{TLSVersion} constant
+ """
+
+ # Factory for creating contexts. Configurable for testability.
+ _contextFactory = SSL.Context
+ _context = None
+
+ _OP_NO_TLSv1_3 = _tlsDisableFlags[TLSVersion.TLSv1_3]
+
+ _defaultMinimumTLSVersion = TLSVersion.TLSv1_2
+
+ @_mutuallyExclusiveArguments(
+ [
+ ["trustRoot", "requireCertificate"],
+ ["trustRoot", "verify"],
+ ["trustRoot", "caCerts"],
+ ["method", "insecurelyLowerMinimumTo"],
+ ["method", "raiseMinimumTo"],
+ ["raiseMinimumTo", "insecurelyLowerMinimumTo"],
+ ["method", "lowerMaximumSecurityTo"],
+ ]
+ )
+ def __init__(
+ self,
+ privateKey=None,
+ certificate=None,
+ method=None,
+ verify=False,
+ caCerts=None,
+ verifyDepth=9,
+ requireCertificate=True,
+ verifyOnce=True,
+ enableSingleUseKeys=True,
+ enableSessions=False,
+ fixBrokenPeers=False,
+ enableSessionTickets=False,
+ extraCertChain=None,
+ acceptableCiphers=None,
+ dhParameters=None,
+ trustRoot=None,
+ acceptableProtocols=None,
+ raiseMinimumTo=None,
+ insecurelyLowerMinimumTo=None,
+ lowerMaximumSecurityTo=None,
+ ):
+ """
+ Create an OpenSSL context SSL connection context factory.
+
+ @param privateKey: A PKey object holding the private key.
+
+ @param certificate: An X509 object holding the certificate.
+
+ @param method: Deprecated, use a combination of
+ C{insecurelyLowerMinimumTo}, C{raiseMinimumTo}, or
+ C{lowerMaximumSecurityTo} instead. The SSL protocol to use, one of
+ C{TLS_METHOD}, C{TLSv1_2_METHOD}, or C{TLSv1_2_METHOD} (or any
+ future method constants provided by pyOpenSSL). By default, a
+ setting will be used which allows TLSv1.2 and TLSv1.3. Can not be
+ used with C{insecurelyLowerMinimumTo}, C{raiseMinimumTo}, or
+ C{lowerMaximumSecurityTo}.
+
+ @param verify: Please use a C{trustRoot} keyword argument instead,
+ since it provides the same functionality in a less error-prone way.
+ By default this is L{False}.
+
+ If L{True}, verify certificates received from the peer and fail the
+ handshake if verification fails. Otherwise, allow anonymous
+ sessions and sessions with certificates which fail validation.
+
+ @param caCerts: Please use a C{trustRoot} keyword argument instead,
+ since it provides the same functionality in a less error-prone way.
+
+ List of certificate authority certificate objects to use to verify
+ the peer's certificate. Only used if verify is L{True} and will be
+ ignored otherwise. Since verify is L{False} by default, this is
+ L{None} by default.
+
+ @type caCerts: L{list} of L{OpenSSL.crypto.X509}
+
+ @param verifyDepth: Depth in certificate chain down to which to verify.
+ If unspecified, use the underlying default (9).
+
+ @param requireCertificate: Please use a C{trustRoot} keyword argument
+ instead, since it provides the same functionality in a less
+ error-prone way.
+
+ If L{True}, do not allow anonymous sessions; defaults to L{True}.
+
+ @param verifyOnce: If True, do not re-verify the certificate on session
+ resumption.
+
+ @param enableSingleUseKeys: If L{True}, generate a new key whenever
+ ephemeral DH and ECDH parameters are used to prevent small subgroup
+ attacks and to ensure perfect forward secrecy.
+
+ @param enableSessions: This allows a shortened handshake to be used
+ when a known client reconnects to the same process. If True,
+ enable OpenSSL's session caching. Note that session caching only
+ works on a single Twisted node at once. Also, it is currently
+ somewhat risky due to U{a crashing bug when using OpenSSL 1.1.1
+ <https://twistedmatrix.com/trac/ticket/9764>}.
+
+ @param fixBrokenPeers: If True, enable various non-spec protocol fixes
+ for broken SSL implementations. This should be entirely safe,
+ according to the OpenSSL documentation, but YMMV. This option is
+ now off by default, because it causes problems with connections
+ between peers using OpenSSL 0.9.8a.
+
+ @param enableSessionTickets: If L{True}, enable session ticket
+ extension for session resumption per RFC 5077. Note there is no
+ support for controlling session tickets. This option is off by
+ default, as some server implementations don't correctly process
+ incoming empty session ticket extensions in the hello.
+
+ @param extraCertChain: List of certificates that I{complete} your
+ verification chain if the certificate authority that signed your
+ C{certificate} isn't widely supported. Do I{not} add
+ C{certificate} to it.
+ @type extraCertChain: C{list} of L{OpenSSL.crypto.X509}
+
+ @param acceptableCiphers: Ciphers that are acceptable for connections.
+ Uses a secure default if left L{None}.
+ @type acceptableCiphers: L{IAcceptableCiphers}
+
+ @param dhParameters: Key generation parameters that are required for
+ Diffie-Hellman key exchange. If this argument is left L{None},
+ C{EDH} ciphers are I{disabled} regardless of C{acceptableCiphers}.
+ @type dhParameters: L{DiffieHellmanParameters
+ <twisted.internet.ssl.DiffieHellmanParameters>}
+
+ @param trustRoot: Specification of trust requirements of peers. If
+ this argument is specified, the peer is verified. It requires a
+ certificate, and that certificate must be signed by one of the
+ certificate authorities specified by this object.
+
+ Note that since this option specifies the same information as
+ C{caCerts}, C{verify}, and C{requireCertificate}, specifying any of
+ those options in combination with this one will raise a
+ L{TypeError}.
+
+ @type trustRoot: L{IOpenSSLTrustRoot}
+
+ @param acceptableProtocols: The protocols this peer is willing to speak
+ after the TLS negotiation has completed, advertised over both ALPN
+ and NPN. If this argument is specified, and no overlap can be
+ found with the other peer, the connection will fail to be
+ established. If the remote peer does not offer NPN or ALPN, the
+ connection will be established, but no protocol wil be negotiated.
+ Protocols earlier in the list are preferred over those later in the
+ list.
+ @type acceptableProtocols: L{list} of L{bytes}
+
+ @param raiseMinimumTo: The minimum TLS version that you want to use, or
+ Twisted's default if it is higher. Use this if you want to make
+ your client/server more secure than Twisted's default, but will
+ accept Twisted's default instead if it moves higher than this
+ value. You probably want to use this over
+ C{insecurelyLowerMinimumTo}.
+ @type raiseMinimumTo: L{TLSVersion} constant
+
+ @param insecurelyLowerMinimumTo: The minimum TLS version to use,
+ possibly lower than Twisted's default. If not specified, it is a
+ generally considered safe default (TLSv1.0). If you want to raise
+ your minimum TLS version to above that of this default, use
+ C{raiseMinimumTo}. DO NOT use this argument unless you are
+ absolutely sure this is what you want.
+ @type insecurelyLowerMinimumTo: L{TLSVersion} constant
+
+ @param lowerMaximumSecurityTo: The maximum TLS version to use. If not
+ specified, it is the most recent your OpenSSL supports. You only
+ want to set this if the peer that you are communicating with has
+ problems with more recent TLS versions, it lowers your security
+ when communicating with newer peers. DO NOT use this argument
+ unless you are absolutely sure this is what you want.
+ @type lowerMaximumSecurityTo: L{TLSVersion} constant
+
+ @raise ValueError: when C{privateKey} or C{certificate} are set without
+ setting the respective other.
+ @raise ValueError: when C{verify} is L{True} but C{caCerts} doesn't
+ specify any CA certificates.
+ @raise ValueError: when C{extraCertChain} is passed without specifying
+ C{privateKey} or C{certificate}.
+ @raise ValueError: when C{acceptableCiphers} doesn't yield any usable
+ ciphers for the current platform.
+
+ @raise TypeError: if C{trustRoot} is passed in combination with
+ C{caCert}, C{verify}, or C{requireCertificate}. Please prefer
+ C{trustRoot} in new code, as its semantics are less tricky.
+ @raise TypeError: if C{method} is passed in combination with
+ C{tlsProtocols}. Please prefer the more explicit C{tlsProtocols}
+ in new code.
+
+ @raises NotImplementedError: If acceptableProtocols were provided but
+ no negotiation mechanism is available.
+ """
+
+ if (privateKey is None) != (certificate is None):
+ raise ValueError("Specify neither or both of privateKey and certificate")
+ self.privateKey = privateKey
+ self.certificate = certificate
+
+ # Set basic security options: disallow insecure SSLv2, disallow TLS
+ # compression to avoid CRIME attack, make the server choose the
+ # ciphers.
+ self._options = (
+ SSL.OP_NO_SSLv2 | SSL.OP_NO_COMPRESSION | SSL.OP_CIPHER_SERVER_PREFERENCE
+ )
+
+ # Set the mode to Release Buffers, which demallocs send/recv buffers on
+ # idle TLS connections to save memory
+ self._mode = SSL.MODE_RELEASE_BUFFERS
+
+ if method is None:
+ self.method = SSL.TLS_METHOD
+
+ if raiseMinimumTo:
+ if lowerMaximumSecurityTo and raiseMinimumTo > lowerMaximumSecurityTo:
+ raise ValueError(
+ "raiseMinimumTo needs to be lower than "
+ "lowerMaximumSecurityTo"
+ )
+
+ if raiseMinimumTo > self._defaultMinimumTLSVersion:
+ insecurelyLowerMinimumTo = raiseMinimumTo
+
+ if insecurelyLowerMinimumTo is None:
+ insecurelyLowerMinimumTo = self._defaultMinimumTLSVersion
+
+ # If you set the max lower than the default, but don't set the
+ # minimum, pull it down to that
+ if (
+ lowerMaximumSecurityTo
+ and insecurelyLowerMinimumTo > lowerMaximumSecurityTo
+ ):
+ insecurelyLowerMinimumTo = lowerMaximumSecurityTo
+
+ if (
+ lowerMaximumSecurityTo
+ and insecurelyLowerMinimumTo > lowerMaximumSecurityTo
+ ):
+ raise ValueError(
+ "insecurelyLowerMinimumTo needs to be lower than "
+ "lowerMaximumSecurityTo"
+ )
+
+ excludedVersions = _getExcludedTLSProtocols(
+ insecurelyLowerMinimumTo, lowerMaximumSecurityTo
+ )
+
+ for version in excludedVersions:
+ self._options |= _tlsDisableFlags[version]
+ else:
+ warnings.warn(
+ (
+ "Passing method to twisted.internet.ssl.CertificateOptions "
+ "was deprecated in Twisted 17.1.0. Please use a combination "
+ "of insecurelyLowerMinimumTo, raiseMinimumTo, and "
+ "lowerMaximumSecurityTo instead, as Twisted will correctly "
+ "configure the method."
+ ),
+ DeprecationWarning,
+ stacklevel=3,
+ )
+
+ # Otherwise respect the application decision.
+ self.method = method
+
+ if verify and not caCerts:
+ raise ValueError(
+ "Specify client CA certificate information if and"
+ " only if enabling certificate verification"
+ )
+ self.verify = verify
+ if extraCertChain is not None and None in (privateKey, certificate):
+ raise ValueError(
+ "A private key and a certificate are required "
+ "when adding a supplemental certificate chain."
+ )
+ if extraCertChain is not None:
+ self.extraCertChain = extraCertChain
+ else:
+ self.extraCertChain = []
+
+ self.caCerts = caCerts
+ self.verifyDepth = verifyDepth
+ self.requireCertificate = requireCertificate
+ self.verifyOnce = verifyOnce
+ self.enableSingleUseKeys = enableSingleUseKeys
+ if enableSingleUseKeys:
+ self._options |= SSL.OP_SINGLE_DH_USE | SSL.OP_SINGLE_ECDH_USE
+ self.enableSessions = enableSessions
+ self.fixBrokenPeers = fixBrokenPeers
+ if fixBrokenPeers:
+ self._options |= SSL.OP_ALL
+ self.enableSessionTickets = enableSessionTickets
+
+ if not enableSessionTickets:
+ self._options |= SSL.OP_NO_TICKET
+ self.dhParameters = dhParameters
+
+ self._ecChooser = _ChooseDiffieHellmanEllipticCurve(
+ SSL.OPENSSL_VERSION_NUMBER,
+ openSSLlib=pyOpenSSLlib,
+ openSSLcrypto=crypto,
+ )
+
+ if acceptableCiphers is None:
+ acceptableCiphers = defaultCiphers
+ # This needs to run when method and _options are finalized.
+ self._cipherString = ":".join(
+ c.fullName
+ for c in acceptableCiphers.selectCiphers(
+ _expandCipherString("ALL", self.method, self._options)
+ )
+ )
+ if self._cipherString == "":
+ raise ValueError(
+ "Supplied IAcceptableCiphers yielded no usable ciphers "
+ "on this platform."
+ )
+
+ if trustRoot is None:
+ if self.verify:
+ trustRoot = OpenSSLCertificateAuthorities(caCerts)
+ else:
+ self.verify = True
+ self.requireCertificate = True
+ trustRoot = IOpenSSLTrustRoot(trustRoot)
+ self.trustRoot = trustRoot
+
+ if acceptableProtocols is not None and not protocolNegotiationMechanisms():
+ raise NotImplementedError(
+ "No support for protocol negotiation on this platform."
+ )
+
+ self._acceptableProtocols = acceptableProtocols
+
+ def __getstate__(self):
+ d = self.__dict__.copy()
+ try:
+ del d["_context"]
+ except KeyError:
+ pass
+ return d
+
+ def __setstate__(self, state):
+ self.__dict__ = state
+
+ def getContext(self):
+ """
+ Return an L{OpenSSL.SSL.Context} object.
+ """
+ if self._context is None:
+ self._context = self._makeContext()
+ return self._context
+
+ def _makeContext(self):
+ ctx = self._contextFactory(self.method)
+ ctx.set_options(self._options)
+ ctx.set_mode(self._mode)
+
+ if self.certificate is not None and self.privateKey is not None:
+ ctx.use_certificate(self.certificate)
+ ctx.use_privatekey(self.privateKey)
+ for extraCert in self.extraCertChain:
+ ctx.add_extra_chain_cert(extraCert)
+ # Sanity check
+ ctx.check_privatekey()
+
+ verifyFlags = SSL.VERIFY_NONE
+ if self.verify:
+ verifyFlags = SSL.VERIFY_PEER
+ if self.requireCertificate:
+ verifyFlags |= SSL.VERIFY_FAIL_IF_NO_PEER_CERT
+ if self.verifyOnce:
+ verifyFlags |= SSL.VERIFY_CLIENT_ONCE
+ self.trustRoot._addCACertsToContext(ctx)
+
+ ctx.set_verify(verifyFlags)
+ if self.verifyDepth is not None:
+ ctx.set_verify_depth(self.verifyDepth)
+
+ # Until we know what's going on with
+ # https://twistedmatrix.com/trac/ticket/9764 let's be conservative
+ # in naming this; ASCII-only, short, as the recommended value (a
+ # hostname) might be:
+ sessionIDContext = hexlify(secureRandom(7))
+ # Note that this doesn't actually set the session ID (which had
+ # better be per-connection anyway!):
+ # https://github.com/pyca/pyopenssl/issues/845
+
+ # This is set unconditionally because it's apparently required for
+ # client certificates to work:
+ # https://www.openssl.org/docs/man1.1.1/man3/SSL_CTX_set_session_id_context.html
+ ctx.set_session_id(sessionIDContext)
+
+ if self.enableSessions:
+ ctx.set_session_cache_mode(SSL.SESS_CACHE_SERVER)
+ else:
+ ctx.set_session_cache_mode(SSL.SESS_CACHE_OFF)
+
+ if self.dhParameters:
+ ctx.load_tmp_dh(self.dhParameters._dhFile.path)
+ ctx.set_cipher_list(self._cipherString.encode("ascii"))
+
+ self._ecChooser.configureECDHCurve(ctx)
+
+ if self._acceptableProtocols:
+ # Try to set NPN and ALPN. _acceptableProtocols cannot be set by
+ # the constructor unless at least one mechanism is supported.
+ _setAcceptableProtocols(ctx, self._acceptableProtocols)
+
+ return ctx
+
+
+OpenSSLCertificateOptions.__getstate__ = deprecated(
+ Version("Twisted", 15, 0, 0), "a real persistence system"
+)(OpenSSLCertificateOptions.__getstate__)
+OpenSSLCertificateOptions.__setstate__ = deprecated(
+ Version("Twisted", 15, 0, 0), "a real persistence system"
+)(OpenSSLCertificateOptions.__setstate__)
+
+
+@implementer(ICipher)
+@attr.s(frozen=True, auto_attribs=True)
+class OpenSSLCipher:
+ """
+ A representation of an OpenSSL cipher.
+
+ @ivar fullName: The full name of the cipher. For example
+ C{u"ECDHE-RSA-AES256-GCM-SHA384"}.
+ @type fullName: L{unicode}
+ """
+
+ fullName: str
+
+
+@lru_cache(maxsize=32)
+def _expandCipherString(cipherString, method, options):
+ """
+ Expand C{cipherString} according to C{method} and C{options} to a tuple of
+ explicit ciphers that are supported by the current platform.
+
+ @param cipherString: An OpenSSL cipher string to expand.
+ @type cipherString: L{unicode}
+
+ @param method: An OpenSSL method like C{SSL.TLS_METHOD} used for
+ determining the effective ciphers.
+
+ @param options: OpenSSL options like C{SSL.OP_NO_SSLv3} ORed together.
+ @type options: L{int}
+
+ @return: The effective list of explicit ciphers that results from the
+ arguments on the current platform.
+ @rtype: L{tuple} of L{ICipher}
+ """
+ ctx = SSL.Context(method)
+ ctx.set_options(options)
+ try:
+ ctx.set_cipher_list(cipherString.encode("ascii"))
+ except SSL.Error as e:
+ # OpenSSL 1.1.1 turns an invalid cipher list into TLS 1.3
+ # ciphers, so pyOpenSSL >= 19.0.0 raises an artificial Error
+ # that lacks a corresponding OpenSSL error if the cipher list
+ # consists only of these after a call to set_cipher_list.
+ if not e.args[0]:
+ return tuple()
+ if e.args[0][0][2] == "no cipher match":
+ return tuple()
+ else:
+ raise
+ conn = SSL.Connection(ctx, None)
+ ciphers = conn.get_cipher_list()
+ if isinstance(ciphers[0], str):
+ return tuple(OpenSSLCipher(cipher) for cipher in ciphers)
+ else:
+ return tuple(OpenSSLCipher(cipher.decode("ascii")) for cipher in ciphers)
+
+
+@lru_cache(maxsize=128)
+def _selectCiphers(wantedCiphers, availableCiphers):
+ """
+ Caclulate the acceptable list of ciphers from the ciphers we want and the
+ ciphers we have support for.
+
+ @param wantedCiphers: The ciphers we want to use.
+ @type wantedCiphers: L{tuple} of L{OpenSSLCipher}
+
+ @param availableCiphers: The ciphers we have available to use.
+ @type availableCiphers: L{tuple} of L{OpenSSLCipher}
+
+ @rtype: L{tuple} of L{OpenSSLCipher}
+ """
+ return tuple(cipher for cipher in wantedCiphers if cipher in availableCiphers)
+
+
+@implementer(IAcceptableCiphers)
+class OpenSSLAcceptableCiphers:
+ """
+ A representation of ciphers that are acceptable for TLS connections.
+ """
+
+ def __init__(self, ciphers):
+ self._ciphers = tuple(ciphers)
+
+ def selectCiphers(self, availableCiphers):
+ return _selectCiphers(self._ciphers, tuple(availableCiphers))
+
+ @classmethod
+ def fromOpenSSLCipherString(cls, cipherString):
+ """
+ Create a new instance using an OpenSSL cipher string.
+
+ @param cipherString: An OpenSSL cipher string that describes what
+ cipher suites are acceptable.
+ See the documentation of U{OpenSSL
+ <http://www.openssl.org/docs/apps/ciphers.html#CIPHER_STRINGS>} or
+ U{Apache
+ <http://httpd.apache.org/docs/2.4/mod/mod_ssl.html#sslciphersuite>}
+ for details.
+ @type cipherString: L{unicode}
+
+ @return: Instance representing C{cipherString}.
+ @rtype: L{twisted.internet.ssl.AcceptableCiphers}
+ """
+ return cls(
+ _expandCipherString(
+ nativeString(cipherString),
+ SSL.TLS_METHOD,
+ SSL.OP_NO_SSLv2 | SSL.OP_NO_SSLv3,
+ )
+ )
+
+
+# A secure default.
+# Sources for more information on TLS ciphers:
+#
+# - https://wiki.mozilla.org/Security/Server_Side_TLS
+# - https://www.ssllabs.com/projects/best-practices/index.html
+# - https://hynek.me/articles/hardening-your-web-servers-ssl-ciphers/
+#
+# The general intent is:
+# - Prefer cipher suites that offer perfect forward secrecy (DHE/ECDHE),
+# - prefer ECDHE over DHE for better performance,
+# - prefer any AES-GCM and ChaCha20 over any AES-CBC for better performance and
+# security,
+# - prefer AES-GCM to ChaCha20 because AES hardware support is common,
+# - disable NULL authentication, MD5 MACs and DSS for security reasons.
+#
+defaultCiphers = OpenSSLAcceptableCiphers.fromOpenSSLCipherString(
+ "TLS13-AES-256-GCM-SHA384:TLS13-CHACHA20-POLY1305-SHA256:"
+ "TLS13-AES-128-GCM-SHA256:"
+ "ECDH+AESGCM:ECDH+CHACHA20:DH+AESGCM:DH+CHACHA20:ECDH+AES256:DH+AES256:"
+ "ECDH+AES128:DH+AES:RSA+AESGCM:RSA+AES:"
+ "!aNULL:!MD5:!DSS"
+)
+_defaultCurveName = "prime256v1"
+
+
+class _ChooseDiffieHellmanEllipticCurve:
+ """
+ Chooses the best elliptic curve for Elliptic Curve Diffie-Hellman
+ key exchange, and provides a C{configureECDHCurve} method to set
+ the curve, when appropriate, on a new L{OpenSSL.SSL.Context}.
+
+ The C{configureECDHCurve} method will be set to one of the
+ following based on the provided OpenSSL version and configuration:
+
+ - L{_configureOpenSSL110}
+
+ - L{_configureOpenSSL102}
+
+ - L{_configureOpenSSL101}
+
+ - L{_configureOpenSSL101NoCurves}.
+
+ @param openSSLVersion: The OpenSSL version number.
+ @type openSSLVersion: L{int}
+
+ @see: L{OpenSSL.SSL.OPENSSL_VERSION_NUMBER}
+
+ @param openSSLlib: The OpenSSL C{cffi} library module.
+ @param openSSLcrypto: The OpenSSL L{crypto} module.
+
+ @see: L{crypto}
+ """
+
+ def __init__(self, openSSLVersion, openSSLlib, openSSLcrypto):
+ self._openSSLlib = openSSLlib
+ self._openSSLcrypto = openSSLcrypto
+ if openSSLVersion >= 0x10100000:
+ self.configureECDHCurve = self._configureOpenSSL110
+ elif openSSLVersion >= 0x10002000:
+ self.configureECDHCurve = self._configureOpenSSL102
+ else:
+ try:
+ self._ecCurve = openSSLcrypto.get_elliptic_curve(_defaultCurveName)
+ except ValueError:
+ # The get_elliptic_curve method raises a ValueError
+ # when the curve does not exist.
+ self.configureECDHCurve = self._configureOpenSSL101NoCurves
+ else:
+ self.configureECDHCurve = self._configureOpenSSL101
+
+ def _configureOpenSSL110(self, ctx):
+ """
+ OpenSSL 1.1.0 Contexts are preconfigured with an optimal set
+ of ECDH curves. This method does nothing.
+
+ @param ctx: L{OpenSSL.SSL.Context}
+ """
+
+ def _configureOpenSSL102(self, ctx):
+ """
+ Have the context automatically choose elliptic curves for
+ ECDH. Run on OpenSSL 1.0.2 and OpenSSL 1.1.0+, but only has
+ an effect on OpenSSL 1.0.2.
+
+ @param ctx: The context which .
+ @type ctx: L{OpenSSL.SSL.Context}
+ """
+ ctxPtr = ctx._context
+ try:
+ self._openSSLlib.SSL_CTX_set_ecdh_auto(ctxPtr, True)
+ except BaseException:
+ pass
+
+ def _configureOpenSSL101(self, ctx):
+ """
+ Set the default elliptic curve for ECDH on the context. Only
+ run on OpenSSL 1.0.1.
+
+ @param ctx: The context on which to set the ECDH curve.
+ @type ctx: L{OpenSSL.SSL.Context}
+ """
+ try:
+ ctx.set_tmp_ecdh(self._ecCurve)
+ except BaseException:
+ pass
+
+ def _configureOpenSSL101NoCurves(self, ctx):
+ """
+ No elliptic curves are available on OpenSSL 1.0.1. We can't
+ set anything, so do nothing.
+
+ @param ctx: The context on which to set the ECDH curve.
+ @type ctx: L{OpenSSL.SSL.Context}
+ """
+
+
+class OpenSSLDiffieHellmanParameters:
+ """
+ A representation of key generation parameters that are required for
+ Diffie-Hellman key exchange.
+ """
+
+ def __init__(self, parameters):
+ self._dhFile = parameters
+
+ @classmethod
+ def fromFile(cls, filePath):
+ """
+ Load parameters from a file.
+
+ Such a file can be generated using the C{openssl} command line tool as
+ following:
+
+ C{openssl dhparam -out dh_param_2048.pem -2 2048}
+
+ Please refer to U{OpenSSL's C{dhparam} documentation
+ <http://www.openssl.org/docs/apps/dhparam.html>} for further details.
+
+ @param filePath: A file containing parameters for Diffie-Hellman key
+ exchange.
+ @type filePath: L{FilePath <twisted.python.filepath.FilePath>}
+
+ @return: An instance that loads its parameters from C{filePath}.
+ @rtype: L{DiffieHellmanParameters
+ <twisted.internet.ssl.DiffieHellmanParameters>}
+ """
+ return cls(filePath)
+
+
+def _setAcceptableProtocols(context, acceptableProtocols):
+ """
+ Called to set up the L{OpenSSL.SSL.Context} for doing NPN and/or ALPN
+ negotiation.
+
+ @param context: The context which is set up.
+ @type context: L{OpenSSL.SSL.Context}
+
+ @param acceptableProtocols: The protocols this peer is willing to speak
+ after the TLS negotiation has completed, advertised over both ALPN and
+ NPN. If this argument is specified, and no overlap can be found with
+ the other peer, the connection will fail to be established. If the
+ remote peer does not offer NPN or ALPN, the connection will be
+ established, but no protocol wil be negotiated. Protocols earlier in
+ the list are preferred over those later in the list.
+ @type acceptableProtocols: L{list} of L{bytes}
+ """
+
+ def protoSelectCallback(conn, protocols):
+ """
+ NPN client-side and ALPN server-side callback used to select
+ the next protocol. Prefers protocols found earlier in
+ C{_acceptableProtocols}.
+
+ @param conn: The context which is set up.
+ @type conn: L{OpenSSL.SSL.Connection}
+
+ @param conn: Protocols advertised by the other side.
+ @type conn: L{list} of L{bytes}
+ """
+ overlap = set(protocols) & set(acceptableProtocols)
+
+ for p in acceptableProtocols:
+ if p in overlap:
+ return p
+ else:
+ return b""
+
+ # If we don't actually have protocols to negotiate, don't set anything up.
+ # Depending on OpenSSL version, failing some of the selection callbacks can
+ # cause the handshake to fail, which is presumably not what was intended
+ # here.
+ if not acceptableProtocols:
+ return
+
+ supported = protocolNegotiationMechanisms()
+
+ if supported & ProtocolNegotiationSupport.NPN:
+
+ def npnAdvertiseCallback(conn):
+ return acceptableProtocols
+
+ context.set_npn_advertise_callback(npnAdvertiseCallback)
+ context.set_npn_select_callback(protoSelectCallback)
+
+ if supported & ProtocolNegotiationSupport.ALPN:
+ context.set_alpn_select_callback(protoSelectCallback)
+ context.set_alpn_protos(acceptableProtocols)
diff --git a/contrib/python/Twisted/py3/twisted/internet/_threadedselect.py b/contrib/python/Twisted/py3/twisted/internet/_threadedselect.py
new file mode 100644
index 0000000000..8a53e4ca96
--- /dev/null
+++ b/contrib/python/Twisted/py3/twisted/internet/_threadedselect.py
@@ -0,0 +1,337 @@
+# -*- test-case-name: twisted.test.test_internet -*-
+# Copyright (c) Twisted Matrix Laboratories.
+# See LICENSE for details.
+
+"""
+Threaded select reactor
+
+The threadedselectreactor is a specialized reactor for integrating with
+arbitrary foreign event loop, such as those you find in GUI toolkits.
+
+There are three things you'll need to do to use this reactor.
+
+Install the reactor at the beginning of your program, before importing
+the rest of Twisted::
+
+ | from twisted.internet import _threadedselect
+ | _threadedselect.install()
+
+Interleave this reactor with your foreign event loop, at some point after
+your event loop is initialized::
+
+ | from twisted.internet import reactor
+ | reactor.interleave(foreignEventLoopWakerFunction)
+ | self.addSystemEventTrigger('after', 'shutdown', foreignEventLoopStop)
+
+Instead of shutting down the foreign event loop directly, shut down the
+reactor::
+
+ | from twisted.internet import reactor
+ | reactor.stop()
+
+In order for Twisted to do its work in the main thread (the thread that
+interleave is called from), a waker function is necessary. The waker function
+will be called from a "background" thread with one argument: func.
+The waker function's purpose is to call func() from the main thread.
+Many GUI toolkits ship with appropriate waker functions.
+Some examples of this are wxPython's wx.callAfter (may be wxCallAfter in
+older versions of wxPython) or PyObjC's PyObjCTools.AppHelper.callAfter.
+These would be used in place of "foreignEventLoopWakerFunction" in the above
+example.
+
+The other integration point at which the foreign event loop and this reactor
+must integrate is shutdown. In order to ensure clean shutdown of Twisted,
+you must allow for Twisted to come to a complete stop before quitting the
+application. Typically, you will do this by setting up an after shutdown
+trigger to stop your foreign event loop, and call reactor.stop() where you
+would normally have initiated the shutdown procedure for the foreign event
+loop. Shutdown functions that could be used in place of
+"foreignEventloopStop" would be the ExitMainLoop method of the wxApp instance
+with wxPython, or the PyObjCTools.AppHelper.stopEventLoop function.
+"""
+
+import select
+import sys
+from errno import EBADF, EINTR
+from functools import partial
+from queue import Empty, Queue
+from threading import Thread
+
+from zope.interface import implementer
+
+from twisted.internet import posixbase
+from twisted.internet.interfaces import IReactorFDSet
+from twisted.internet.posixbase import _NO_FILEDESC, _NO_FILENO
+from twisted.internet.selectreactor import _select
+from twisted.python import failure, log, threadable
+
+
+def dictRemove(dct, value):
+ try:
+ del dct[value]
+ except KeyError:
+ pass
+
+
+def raiseException(e):
+ raise e
+
+
+@implementer(IReactorFDSet)
+class ThreadedSelectReactor(posixbase.PosixReactorBase):
+ """A threaded select() based reactor - runs on all POSIX platforms and on
+ Win32.
+ """
+
+ def __init__(self):
+ threadable.init(1)
+ self.reads = {}
+ self.writes = {}
+ self.toThreadQueue = Queue()
+ self.toMainThread = Queue()
+ self.workerThread = None
+ self.mainWaker = None
+ posixbase.PosixReactorBase.__init__(self)
+ self.addSystemEventTrigger("after", "shutdown", self._mainLoopShutdown)
+
+ def wakeUp(self):
+ # we want to wake up from any thread
+ self.waker.wakeUp()
+
+ def callLater(self, *args, **kw):
+ tple = posixbase.PosixReactorBase.callLater(self, *args, **kw)
+ self.wakeUp()
+ return tple
+
+ def _sendToMain(self, msg, *args):
+ self.toMainThread.put((msg, args))
+ if self.mainWaker is not None:
+ self.mainWaker()
+
+ def _sendToThread(self, fn, *args):
+ self.toThreadQueue.put((fn, args))
+
+ def _preenDescriptorsInThread(self):
+ log.msg("Malformed file descriptor found. Preening lists.")
+ readers = self.reads.keys()
+ writers = self.writes.keys()
+ self.reads.clear()
+ self.writes.clear()
+ for selDict, selList in ((self.reads, readers), (self.writes, writers)):
+ for selectable in selList:
+ try:
+ select.select([selectable], [selectable], [selectable], 0)
+ except BaseException:
+ log.msg("bad descriptor %s" % selectable)
+ else:
+ selDict[selectable] = 1
+
+ def _workerInThread(self):
+ try:
+ while 1:
+ fn, args = self.toThreadQueue.get()
+ fn(*args)
+ except SystemExit:
+ pass # Exception indicates this thread should exit
+ except BaseException:
+ f = failure.Failure()
+ self._sendToMain("Failure", f)
+
+ def _doSelectInThread(self, timeout):
+ """Run one iteration of the I/O monitor loop.
+
+ This will run all selectables who had input or output readiness
+ waiting for them.
+ """
+ reads = self.reads
+ writes = self.writes
+ while 1:
+ try:
+ r, w, ignored = _select(reads.keys(), writes.keys(), [], timeout)
+ break
+ except ValueError:
+ # Possibly a file descriptor has gone negative?
+ log.err()
+ self._preenDescriptorsInThread()
+ except TypeError:
+ # Something *totally* invalid (object w/o fileno, non-integral
+ # result) was passed
+ log.err()
+ self._preenDescriptorsInThread()
+ except OSError as se:
+ # select(2) encountered an error
+ if se.args[0] in (0, 2):
+ # windows does this if it got an empty list
+ if (not reads) and (not writes):
+ return
+ else:
+ raise
+ elif se.args[0] == EINTR:
+ return
+ elif se.args[0] == EBADF:
+ self._preenDescriptorsInThread()
+ else:
+ # OK, I really don't know what's going on. Blow up.
+ raise
+ self._sendToMain("Notify", r, w)
+
+ def _process_Notify(self, r, w):
+ reads = self.reads
+ writes = self.writes
+
+ _drdw = self._doReadOrWrite
+ _logrun = log.callWithLogger
+ for selectables, method, dct in ((r, "doRead", reads), (w, "doWrite", writes)):
+ for selectable in selectables:
+ # if this was disconnected in another thread, kill it.
+ if selectable not in dct:
+ continue
+ # This for pausing input when we're not ready for more.
+ _logrun(selectable, _drdw, selectable, method, dct)
+
+ def _process_Failure(self, f):
+ f.raiseException()
+
+ _doIterationInThread = _doSelectInThread
+
+ def ensureWorkerThread(self):
+ if self.workerThread is None or not self.workerThread.isAlive():
+ self.workerThread = Thread(target=self._workerInThread)
+ self.workerThread.start()
+
+ def doThreadIteration(self, timeout):
+ self._sendToThread(self._doIterationInThread, timeout)
+ self.ensureWorkerThread()
+ msg, args = self.toMainThread.get()
+ getattr(self, "_process_" + msg)(*args)
+
+ doIteration = doThreadIteration
+
+ def _interleave(self):
+ while self.running:
+ self.runUntilCurrent()
+ t2 = self.timeout()
+ t = self.running and t2
+ self._sendToThread(self._doIterationInThread, t)
+ yield None
+ msg, args = self.toMainThread.get_nowait()
+ getattr(self, "_process_" + msg)(*args)
+
+ def interleave(self, waker, *args, **kw):
+ """
+ interleave(waker) interleaves this reactor with the
+ current application by moving the blocking parts of
+ the reactor (select() in this case) to a separate
+ thread. This is typically useful for integration with
+ GUI applications which have their own event loop
+ already running.
+
+ See the module docstring for more information.
+ """
+ self.startRunning(*args, **kw)
+ loop = self._interleave()
+
+ def mainWaker(waker=waker, loop=loop):
+ waker(partial(next, loop))
+
+ self.mainWaker = mainWaker
+ next(loop)
+ self.ensureWorkerThread()
+
+ def _mainLoopShutdown(self):
+ self.mainWaker = None
+ if self.workerThread is not None:
+ self._sendToThread(raiseException, SystemExit)
+ self.wakeUp()
+ try:
+ while 1:
+ msg, args = self.toMainThread.get_nowait()
+ except Empty:
+ pass
+ self.workerThread.join()
+ self.workerThread = None
+ try:
+ while 1:
+ fn, args = self.toThreadQueue.get_nowait()
+ if fn is self._doIterationInThread:
+ log.msg("Iteration is still in the thread queue!")
+ elif fn is raiseException and args[0] is SystemExit:
+ pass
+ else:
+ fn(*args)
+ except Empty:
+ pass
+
+ def _doReadOrWrite(self, selectable, method, dict):
+ try:
+ why = getattr(selectable, method)()
+ handfn = getattr(selectable, "fileno", None)
+ if not handfn:
+ why = _NO_FILENO
+ elif handfn() == -1:
+ why = _NO_FILEDESC
+ except BaseException:
+ why = sys.exc_info()[1]
+ log.err()
+ if why:
+ self._disconnectSelectable(selectable, why, method == "doRead")
+
+ def addReader(self, reader):
+ """Add a FileDescriptor for notification of data available to read."""
+ self._sendToThread(self.reads.__setitem__, reader, 1)
+ self.wakeUp()
+
+ def addWriter(self, writer):
+ """Add a FileDescriptor for notification of data available to write."""
+ self._sendToThread(self.writes.__setitem__, writer, 1)
+ self.wakeUp()
+
+ def removeReader(self, reader):
+ """Remove a Selectable for notification of data available to read."""
+ self._sendToThread(dictRemove, self.reads, reader)
+
+ def removeWriter(self, writer):
+ """Remove a Selectable for notification of data available to write."""
+ self._sendToThread(dictRemove, self.writes, writer)
+
+ def removeAll(self):
+ return self._removeAll(self.reads, self.writes)
+
+ def getReaders(self):
+ return list(self.reads.keys())
+
+ def getWriters(self):
+ return list(self.writes.keys())
+
+ def stop(self):
+ """
+ Extend the base stop implementation to also wake up the select thread so
+ that C{runUntilCurrent} notices the reactor should stop.
+ """
+ posixbase.PosixReactorBase.stop(self)
+ self.wakeUp()
+
+ def run(self, installSignalHandlers=True):
+ self.startRunning(installSignalHandlers=installSignalHandlers)
+ self.mainLoop()
+
+ def mainLoop(self):
+ q = Queue()
+ self.interleave(q.put)
+ while self.running:
+ try:
+ q.get()()
+ except StopIteration:
+ break
+
+
+def install():
+ """Configure the twisted mainloop to be run using the select() reactor."""
+ reactor = ThreadedSelectReactor()
+ from twisted.internet.main import installReactor
+
+ installReactor(reactor)
+ return reactor
+
+
+__all__ = ["install"]
diff --git a/contrib/python/Twisted/py3/twisted/internet/_win32serialport.py b/contrib/python/Twisted/py3/twisted/internet/_win32serialport.py
new file mode 100644
index 0000000000..2dda4b9816
--- /dev/null
+++ b/contrib/python/Twisted/py3/twisted/internet/_win32serialport.py
@@ -0,0 +1,156 @@
+# -*- test-case-name: twisted.internet.test.test_win32serialport -*-
+
+# Copyright (c) Twisted Matrix Laboratories.
+# See LICENSE for details.
+
+"""
+Serial port support for Windows.
+
+Requires PySerial and pywin32.
+"""
+
+
+import win32event # type: ignore[import]
+import win32file # type: ignore[import]
+
+# system imports
+from serial import PARITY_NONE # type: ignore[import]
+from serial import EIGHTBITS, STOPBITS_ONE
+from serial.serialutil import to_bytes # type: ignore[import]
+
+# twisted imports
+from twisted.internet import abstract
+
+# sibling imports
+from twisted.internet.serialport import BaseSerialPort
+
+
+class SerialPort(BaseSerialPort, abstract.FileDescriptor):
+ """A serial device, acting as a transport, that uses a win32 event."""
+
+ connected = 1
+
+ def __init__(
+ self,
+ protocol,
+ deviceNameOrPortNumber,
+ reactor,
+ baudrate=9600,
+ bytesize=EIGHTBITS,
+ parity=PARITY_NONE,
+ stopbits=STOPBITS_ONE,
+ xonxoff=0,
+ rtscts=0,
+ ):
+ self._serial = self._serialFactory(
+ deviceNameOrPortNumber,
+ baudrate=baudrate,
+ bytesize=bytesize,
+ parity=parity,
+ stopbits=stopbits,
+ timeout=None,
+ xonxoff=xonxoff,
+ rtscts=rtscts,
+ )
+ self.flushInput()
+ self.flushOutput()
+ self.reactor = reactor
+ self.protocol = protocol
+ self.outQueue = []
+ self.closed = 0
+ self.closedNotifies = 0
+ self.writeInProgress = 0
+
+ self.protocol = protocol
+ self._overlappedRead = win32file.OVERLAPPED()
+ self._overlappedRead.hEvent = win32event.CreateEvent(None, 1, 0, None)
+ self._overlappedWrite = win32file.OVERLAPPED()
+ self._overlappedWrite.hEvent = win32event.CreateEvent(None, 0, 0, None)
+
+ self.reactor.addEvent(self._overlappedRead.hEvent, self, "serialReadEvent")
+ self.reactor.addEvent(self._overlappedWrite.hEvent, self, "serialWriteEvent")
+
+ self.protocol.makeConnection(self)
+ self._finishPortSetup()
+
+ def _finishPortSetup(self):
+ """
+ Finish setting up the serial port.
+
+ This is a separate method to facilitate testing.
+ """
+ flags, comstat = self._clearCommError()
+ rc, self.read_buf = win32file.ReadFile(
+ self._serial._port_handle,
+ win32file.AllocateReadBuffer(1),
+ self._overlappedRead,
+ )
+
+ def _clearCommError(self):
+ return win32file.ClearCommError(self._serial._port_handle)
+
+ def serialReadEvent(self):
+ # get that character we set up
+ n = win32file.GetOverlappedResult(
+ self._serial._port_handle, self._overlappedRead, 0
+ )
+ first = to_bytes(self.read_buf[:n])
+ # now we should get everything that is already in the buffer
+ flags, comstat = self._clearCommError()
+ if comstat.cbInQue:
+ win32event.ResetEvent(self._overlappedRead.hEvent)
+ rc, buf = win32file.ReadFile(
+ self._serial._port_handle,
+ win32file.AllocateReadBuffer(comstat.cbInQue),
+ self._overlappedRead,
+ )
+ n = win32file.GetOverlappedResult(
+ self._serial._port_handle, self._overlappedRead, 1
+ )
+ # handle all the received data:
+ self.protocol.dataReceived(first + to_bytes(buf[:n]))
+ else:
+ # handle all the received data:
+ self.protocol.dataReceived(first)
+
+ # set up next one
+ win32event.ResetEvent(self._overlappedRead.hEvent)
+ rc, self.read_buf = win32file.ReadFile(
+ self._serial._port_handle,
+ win32file.AllocateReadBuffer(1),
+ self._overlappedRead,
+ )
+
+ def write(self, data):
+ if data:
+ if self.writeInProgress:
+ self.outQueue.append(data)
+ else:
+ self.writeInProgress = 1
+ win32file.WriteFile(
+ self._serial._port_handle, data, self._overlappedWrite
+ )
+
+ def serialWriteEvent(self):
+ try:
+ dataToWrite = self.outQueue.pop(0)
+ except IndexError:
+ self.writeInProgress = 0
+ return
+ else:
+ win32file.WriteFile(
+ self._serial._port_handle, dataToWrite, self._overlappedWrite
+ )
+
+ def connectionLost(self, reason):
+ """
+ Called when the serial port disconnects.
+
+ Will call C{connectionLost} on the protocol that is handling the
+ serial data.
+ """
+ self.reactor.removeEvent(self._overlappedRead.hEvent)
+ self.reactor.removeEvent(self._overlappedWrite.hEvent)
+ abstract.FileDescriptor.connectionLost(self, reason)
+ self._serial.close()
+ self.protocol.connectionLost(reason)
diff --git a/contrib/python/Twisted/py3/twisted/internet/_win32stdio.py b/contrib/python/Twisted/py3/twisted/internet/_win32stdio.py
new file mode 100644
index 0000000000..f1ac920f60
--- /dev/null
+++ b/contrib/python/Twisted/py3/twisted/internet/_win32stdio.py
@@ -0,0 +1,127 @@
+# -*- test-case-name: twisted.test.test_stdio -*-
+
+"""
+Windows-specific implementation of the L{twisted.internet.stdio} interface.
+"""
+
+
+import msvcrt
+import os
+
+from zope.interface import implementer
+
+import win32api # type: ignore[import]
+
+from twisted.internet import _pollingfile, main
+from twisted.internet.interfaces import (
+ IAddress,
+ IConsumer,
+ IHalfCloseableProtocol,
+ IPushProducer,
+ ITransport,
+)
+from twisted.python.failure import Failure
+
+
+@implementer(IAddress)
+class Win32PipeAddress:
+ pass
+
+
+@implementer(ITransport, IConsumer, IPushProducer)
+class StandardIO(_pollingfile._PollingTimer):
+ disconnecting = False
+ disconnected = False
+
+ def __init__(self, proto, reactor=None):
+ """
+ Start talking to standard IO with the given protocol.
+
+ Also, put it stdin/stdout/stderr into binary mode.
+ """
+ if reactor is None:
+ from twisted.internet import reactor
+
+ for stdfd in range(0, 1, 2):
+ msvcrt.setmode(stdfd, os.O_BINARY)
+
+ _pollingfile._PollingTimer.__init__(self, reactor)
+ self.proto = proto
+
+ hstdin = win32api.GetStdHandle(win32api.STD_INPUT_HANDLE)
+ hstdout = win32api.GetStdHandle(win32api.STD_OUTPUT_HANDLE)
+
+ self.stdin = _pollingfile._PollableReadPipe(
+ hstdin, self.dataReceived, self.readConnectionLost
+ )
+
+ self.stdout = _pollingfile._PollableWritePipe(hstdout, self.writeConnectionLost)
+
+ self._addPollableResource(self.stdin)
+ self._addPollableResource(self.stdout)
+
+ self.proto.makeConnection(self)
+
+ def dataReceived(self, data):
+ self.proto.dataReceived(data)
+
+ def readConnectionLost(self):
+ if IHalfCloseableProtocol.providedBy(self.proto):
+ self.proto.readConnectionLost()
+ self.checkConnLost()
+
+ def writeConnectionLost(self):
+ if IHalfCloseableProtocol.providedBy(self.proto):
+ self.proto.writeConnectionLost()
+ self.checkConnLost()
+
+ connsLost = 0
+
+ def checkConnLost(self):
+ self.connsLost += 1
+ if self.connsLost >= 2:
+ self.disconnecting = True
+ self.disconnected = True
+ self.proto.connectionLost(Failure(main.CONNECTION_DONE))
+
+ # ITransport
+
+ def write(self, data):
+ self.stdout.write(data)
+
+ def writeSequence(self, seq):
+ self.stdout.write(b"".join(seq))
+
+ def loseConnection(self):
+ self.disconnecting = True
+ self.stdin.close()
+ self.stdout.close()
+
+ def getPeer(self):
+ return Win32PipeAddress()
+
+ def getHost(self):
+ return Win32PipeAddress()
+
+ # IConsumer
+
+ def registerProducer(self, producer, streaming):
+ return self.stdout.registerProducer(producer, streaming)
+
+ def unregisterProducer(self):
+ return self.stdout.unregisterProducer()
+
+ # def write() above
+
+ # IProducer
+
+ def stopProducing(self):
+ self.stdin.stopProducing()
+
+ # IPushProducer
+
+ def pauseProducing(self):
+ self.stdin.pauseProducing()
+
+ def resumeProducing(self):
+ self.stdin.resumeProducing()
diff --git a/contrib/python/Twisted/py3/twisted/internet/abstract.py b/contrib/python/Twisted/py3/twisted/internet/abstract.py
new file mode 100644
index 0000000000..45a80383c2
--- /dev/null
+++ b/contrib/python/Twisted/py3/twisted/internet/abstract.py
@@ -0,0 +1,542 @@
+# -*- test-case-name: twisted.test.test_abstract -*-
+# Copyright (c) Twisted Matrix Laboratories.
+# See LICENSE for details.
+
+"""
+Support for generic select()able objects.
+"""
+
+
+from socket import AF_INET, AF_INET6, inet_pton
+from typing import Iterable, List, Optional
+
+from zope.interface import implementer
+
+from twisted.internet import interfaces, main
+from twisted.python import failure, reflect
+
+# Twisted Imports
+from twisted.python.compat import lazyByteSlice
+
+
+def _dataMustBeBytes(obj):
+ if not isinstance(obj, bytes): # no, really, I mean it
+ raise TypeError("Data must be bytes")
+
+
+# Python 3.4+ can join bytes and memoryviews; using a
+# memoryview prevents the slice from copying
+def _concatenate(bObj, offset, bArray):
+ return b"".join([memoryview(bObj)[offset:]] + bArray)
+
+
+class _ConsumerMixin:
+ """
+ L{IConsumer} implementations can mix this in to get C{registerProducer} and
+ C{unregisterProducer} methods which take care of keeping track of a
+ producer's state.
+
+ Subclasses must provide three attributes which L{_ConsumerMixin} will read
+ but not write:
+
+ - connected: A C{bool} which is C{True} as long as the consumer has
+ someplace to send bytes (for example, a TCP connection), and then
+ C{False} when it no longer does.
+
+ - disconnecting: A C{bool} which is C{False} until something like
+ L{ITransport.loseConnection} is called, indicating that the send buffer
+ should be flushed and the connection lost afterwards. Afterwards,
+ C{True}.
+
+ - disconnected: A C{bool} which is C{False} until the consumer no longer
+ has a place to send bytes, then C{True}.
+
+ Subclasses must also override the C{startWriting} method.
+
+ @ivar producer: L{None} if no producer is registered, otherwise the
+ registered producer.
+
+ @ivar producerPaused: A flag indicating whether the producer is currently
+ paused.
+ @type producerPaused: L{bool}
+
+ @ivar streamingProducer: A flag indicating whether the producer was
+ registered as a streaming (ie push) producer or not (ie a pull
+ producer). This will determine whether the consumer may ever need to
+ pause and resume it, or if it can merely call C{resumeProducing} on it
+ when buffer space is available.
+ @ivar streamingProducer: C{bool} or C{int}
+
+ """
+
+ producer = None
+ producerPaused = False
+ streamingProducer = False
+
+ def startWriting(self):
+ """
+ Override in a subclass to cause the reactor to monitor this selectable
+ for write events. This will be called once in C{unregisterProducer} if
+ C{loseConnection} has previously been called, so that the connection can
+ actually close.
+ """
+ raise NotImplementedError("%r did not implement startWriting")
+
+ def registerProducer(self, producer, streaming):
+ """
+ Register to receive data from a producer.
+
+ This sets this selectable to be a consumer for a producer. When this
+ selectable runs out of data on a write() call, it will ask the producer
+ to resumeProducing(). When the FileDescriptor's internal data buffer is
+ filled, it will ask the producer to pauseProducing(). If the connection
+ is lost, FileDescriptor calls producer's stopProducing() method.
+
+ If streaming is true, the producer should provide the IPushProducer
+ interface. Otherwise, it is assumed that producer provides the
+ IPullProducer interface. In this case, the producer won't be asked to
+ pauseProducing(), but it has to be careful to write() data only when its
+ resumeProducing() method is called.
+ """
+ if self.producer is not None:
+ raise RuntimeError(
+ "Cannot register producer %s, because producer %s was never "
+ "unregistered." % (producer, self.producer)
+ )
+ if self.disconnected:
+ producer.stopProducing()
+ else:
+ self.producer = producer
+ self.streamingProducer = streaming
+ if not streaming:
+ producer.resumeProducing()
+
+ def unregisterProducer(self):
+ """
+ Stop consuming data from a producer, without disconnecting.
+ """
+ self.producer = None
+ if self.connected and self.disconnecting:
+ self.startWriting()
+
+
+@implementer(interfaces.ILoggingContext)
+class _LogOwner:
+ """
+ Mixin to help implement L{interfaces.ILoggingContext} for transports which
+ have a protocol, the log prefix of which should also appear in the
+ transport's log prefix.
+ """
+
+ def _getLogPrefix(self, applicationObject: object) -> str:
+ """
+ Determine the log prefix to use for messages related to
+ C{applicationObject}, which may or may not be an
+ L{interfaces.ILoggingContext} provider.
+
+ @return: A C{str} giving the log prefix to use.
+ """
+ if interfaces.ILoggingContext.providedBy(applicationObject):
+ return applicationObject.logPrefix()
+ return applicationObject.__class__.__name__
+
+ def logPrefix(self):
+ """
+ Override this method to insert custom logging behavior. Its
+ return value will be inserted in front of every line. It may
+ be called more times than the number of output lines.
+ """
+ return "-"
+
+
+@implementer(
+ interfaces.IPushProducer,
+ interfaces.IReadWriteDescriptor,
+ interfaces.IConsumer,
+ interfaces.ITransport,
+ interfaces.IHalfCloseableDescriptor,
+)
+class FileDescriptor(_ConsumerMixin, _LogOwner):
+ """
+ An object which can be operated on by select().
+
+ This is an abstract superclass of all objects which may be notified when
+ they are readable or writable; e.g. they have a file-descriptor that is
+ valid to be passed to select(2).
+ """
+
+ connected = 0
+ disconnected = 0
+ disconnecting = 0
+ _writeDisconnecting = False
+ _writeDisconnected = False
+ dataBuffer = b""
+ offset = 0
+
+ SEND_LIMIT = 128 * 1024
+
+ def __init__(self, reactor: Optional[interfaces.IReactorFDSet] = None):
+ """
+ @param reactor: An L{IReactorFDSet} provider which this descriptor will
+ use to get readable and writeable event notifications. If no value
+ is given, the global reactor will be used.
+ """
+ if not reactor:
+ from twisted.internet import reactor as _reactor
+
+ reactor = _reactor # type: ignore[assignment]
+ self.reactor = reactor
+ # will be added to dataBuffer in doWrite
+ self._tempDataBuffer: List[bytes] = []
+ self._tempDataLen = 0
+
+ def connectionLost(self, reason):
+ """The connection was lost.
+
+ This is called when the connection on a selectable object has been
+ lost. It will be called whether the connection was closed explicitly,
+ an exception occurred in an event handler, or the other end of the
+ connection closed it first.
+
+ Clean up state here, but make sure to call back up to FileDescriptor.
+ """
+ self.disconnected = 1
+ self.connected = 0
+ if self.producer is not None:
+ self.producer.stopProducing()
+ self.producer = None
+ self.stopReading()
+ self.stopWriting()
+
+ def writeSomeData(self, data: bytes) -> None:
+ """
+ Write as much as possible of the given data, immediately.
+
+ This is called to invoke the lower-level writing functionality, such
+ as a socket's send() method, or a file's write(); this method
+ returns an integer or an exception. If an integer, it is the number
+ of bytes written (possibly zero); if an exception, it indicates the
+ connection was lost.
+ """
+ raise NotImplementedError(
+ "%s does not implement writeSomeData" % reflect.qual(self.__class__)
+ )
+
+ def doRead(self):
+ """
+ Called when data is available for reading.
+
+ Subclasses must override this method. The result will be interpreted
+ in the same way as a result of doWrite().
+ """
+ raise NotImplementedError(
+ "%s does not implement doRead" % reflect.qual(self.__class__)
+ )
+
+ def doWrite(self):
+ """
+ Called when data can be written.
+
+ @return: L{None} on success, an exception or a negative integer on
+ failure.
+
+ @see: L{twisted.internet.interfaces.IWriteDescriptor.doWrite}.
+ """
+ if len(self.dataBuffer) - self.offset < self.SEND_LIMIT:
+ # If there is currently less than SEND_LIMIT bytes left to send
+ # in the string, extend it with the array data.
+ self.dataBuffer = _concatenate(
+ self.dataBuffer, self.offset, self._tempDataBuffer
+ )
+ self.offset = 0
+ self._tempDataBuffer = []
+ self._tempDataLen = 0
+
+ # Send as much data as you can.
+ if self.offset:
+ l = self.writeSomeData(lazyByteSlice(self.dataBuffer, self.offset))
+ else:
+ l = self.writeSomeData(self.dataBuffer)
+
+ # There is no writeSomeData implementation in Twisted which returns
+ # < 0, but the documentation for writeSomeData used to claim negative
+ # integers meant connection lost. Keep supporting this here,
+ # although it may be worth deprecating and removing at some point.
+ if isinstance(l, Exception) or l < 0:
+ return l
+ self.offset += l
+ # If there is nothing left to send,
+ if self.offset == len(self.dataBuffer) and not self._tempDataLen:
+ self.dataBuffer = b""
+ self.offset = 0
+ # stop writing.
+ self.stopWriting()
+ # If I've got a producer who is supposed to supply me with data,
+ if self.producer is not None and (
+ (not self.streamingProducer) or self.producerPaused
+ ):
+ # tell them to supply some more.
+ self.producerPaused = False
+ self.producer.resumeProducing()
+ elif self.disconnecting:
+ # But if I was previously asked to let the connection die, do
+ # so.
+ return self._postLoseConnection()
+ elif self._writeDisconnecting:
+ # I was previously asked to half-close the connection. We
+ # set _writeDisconnected before calling handler, in case the
+ # handler calls loseConnection(), which will want to check for
+ # this attribute.
+ self._writeDisconnected = True
+ result = self._closeWriteConnection()
+ return result
+ return None
+
+ def _postLoseConnection(self):
+ """Called after a loseConnection(), when all data has been written.
+
+ Whatever this returns is then returned by doWrite.
+ """
+ # default implementation, telling reactor we're finished
+ return main.CONNECTION_DONE
+
+ def _closeWriteConnection(self):
+ # override in subclasses
+ pass
+
+ def writeConnectionLost(self, reason):
+ # in current code should never be called
+ self.connectionLost(reason)
+
+ def readConnectionLost(self, reason: failure.Failure) -> None:
+ # override in subclasses
+ self.connectionLost(reason)
+
+ def getHost(self):
+ # ITransport.getHost
+ raise NotImplementedError()
+
+ def getPeer(self):
+ # ITransport.getPeer
+ raise NotImplementedError()
+
+ def _isSendBufferFull(self):
+ """
+ Determine whether the user-space send buffer for this transport is full
+ or not.
+
+ When the buffer contains more than C{self.bufferSize} bytes, it is
+ considered full. This might be improved by considering the size of the
+ kernel send buffer and how much of it is free.
+
+ @return: C{True} if it is full, C{False} otherwise.
+ """
+ return len(self.dataBuffer) + self._tempDataLen > self.bufferSize
+
+ def _maybePauseProducer(self):
+ """
+ Possibly pause a producer, if there is one and the send buffer is full.
+ """
+ # If we are responsible for pausing our producer,
+ if self.producer is not None and self.streamingProducer:
+ # and our buffer is full,
+ if self._isSendBufferFull():
+ # pause it.
+ self.producerPaused = True
+ self.producer.pauseProducing()
+
+ def write(self, data: bytes) -> None:
+ """Reliably write some data.
+
+ The data is buffered until the underlying file descriptor is ready
+ for writing. If there is more than C{self.bufferSize} data in the
+ buffer and this descriptor has a registered streaming producer, its
+ C{pauseProducing()} method will be called.
+ """
+ _dataMustBeBytes(data)
+ if not self.connected or self._writeDisconnected:
+ return
+ if data:
+ self._tempDataBuffer.append(data)
+ self._tempDataLen += len(data)
+ self._maybePauseProducer()
+ self.startWriting()
+
+ def writeSequence(self, iovec: Iterable[bytes]) -> None:
+ """
+ Reliably write a sequence of data.
+
+ Currently, this is a convenience method roughly equivalent to::
+
+ for chunk in iovec:
+ fd.write(chunk)
+
+ It may have a more efficient implementation at a later time or in a
+ different reactor.
+
+ As with the C{write()} method, if a buffer size limit is reached and a
+ streaming producer is registered, it will be paused until the buffered
+ data is written to the underlying file descriptor.
+ """
+ for i in iovec:
+ _dataMustBeBytes(i)
+ if not self.connected or not iovec or self._writeDisconnected:
+ return
+ self._tempDataBuffer.extend(iovec)
+ for i in iovec:
+ self._tempDataLen += len(i)
+ self._maybePauseProducer()
+ self.startWriting()
+
+ def loseConnection(self):
+ """Close the connection at the next available opportunity.
+
+ Call this to cause this FileDescriptor to lose its connection. It will
+ first write any data that it has buffered.
+
+ If there is data buffered yet to be written, this method will cause the
+ transport to lose its connection as soon as it's done flushing its
+ write buffer. If you have a producer registered, the connection won't
+ be closed until the producer is finished. Therefore, make sure you
+ unregister your producer when it's finished, or the connection will
+ never close.
+ """
+
+ if self.connected and not self.disconnecting:
+ if self._writeDisconnected:
+ # doWrite won't trigger the connection close anymore
+ self.stopReading()
+ self.stopWriting()
+ self.connectionLost(failure.Failure(main.CONNECTION_DONE))
+ else:
+ self.stopReading()
+ self.startWriting()
+ self.disconnecting = 1
+
+ def loseWriteConnection(self):
+ self._writeDisconnecting = True
+ self.startWriting()
+
+ def stopReading(self):
+ """Stop waiting for read availability.
+
+ Call this to remove this selectable from being notified when it is
+ ready for reading.
+ """
+ self.reactor.removeReader(self)
+
+ def stopWriting(self):
+ """Stop waiting for write availability.
+
+ Call this to remove this selectable from being notified when it is ready
+ for writing.
+ """
+ self.reactor.removeWriter(self)
+
+ def startReading(self):
+ """Start waiting for read availability."""
+ self.reactor.addReader(self)
+
+ def startWriting(self):
+ """Start waiting for write availability.
+
+ Call this to have this FileDescriptor be notified whenever it is ready for
+ writing.
+ """
+ self.reactor.addWriter(self)
+
+ # Producer/consumer implementation
+
+ # first, the consumer stuff. This requires no additional work, as
+ # any object you can write to can be a consumer, really.
+
+ producer = None
+ bufferSize = 2**2**2**2
+
+ def stopConsuming(self):
+ """Stop consuming data.
+
+ This is called when a producer has lost its connection, to tell the
+ consumer to go lose its connection (and break potential circular
+ references).
+ """
+ self.unregisterProducer()
+ self.loseConnection()
+
+ # producer interface implementation
+
+ def resumeProducing(self):
+ if self.connected and not self.disconnecting:
+ self.startReading()
+
+ def pauseProducing(self):
+ self.stopReading()
+
+ def stopProducing(self):
+ self.loseConnection()
+
+ def fileno(self):
+ """File Descriptor number for select().
+
+ This method must be overridden or assigned in subclasses to
+ indicate a valid file descriptor for the operating system.
+ """
+ return -1
+
+
+def isIPAddress(addr: str, family: int = AF_INET) -> bool:
+ """
+ Determine whether the given string represents an IP address of the given
+ family; by default, an IPv4 address.
+
+ @param addr: A string which may or may not be the decimal dotted
+ representation of an IPv4 address.
+ @param family: The address family to test for; one of the C{AF_*} constants
+ from the L{socket} module. (This parameter has only been available
+ since Twisted 17.1.0; previously L{isIPAddress} could only test for IPv4
+ addresses.)
+
+ @return: C{True} if C{addr} represents an IPv4 address, C{False} otherwise.
+ """
+ if isinstance(addr, bytes): # type: ignore[unreachable]
+ try: # type: ignore[unreachable]
+ addr = addr.decode("ascii")
+ except UnicodeDecodeError:
+ return False
+ if family == AF_INET6:
+ # On some platforms, inet_ntop fails unless the scope ID is valid; this
+ # is a test for whether the given string *is* an IP address, so strip
+ # any potential scope ID before checking.
+ addr = addr.split("%", 1)[0]
+ elif family == AF_INET:
+ # On Windows, where 3.5+ implement inet_pton, "0" is considered a valid
+ # IPv4 address, but we want to ensure we have all 4 segments.
+ if addr.count(".") != 3:
+ return False
+ else:
+ raise ValueError(f"unknown address family {family!r}")
+ try:
+ # This might be a native implementation or the one from
+ # twisted.python.compat.
+ inet_pton(family, addr)
+ except (ValueError, OSError):
+ return False
+ return True
+
+
+def isIPv6Address(addr: str) -> bool:
+ """
+ Determine whether the given string represents an IPv6 address.
+
+ @param addr: A string which may or may not be the hex
+ representation of an IPv6 address.
+ @type addr: C{str}
+
+ @return: C{True} if C{addr} represents an IPv6 address, C{False}
+ otherwise.
+ @rtype: C{bool}
+ """
+ return isIPAddress(addr, AF_INET6)
+
+
+__all__ = ["FileDescriptor", "isIPAddress", "isIPv6Address"]
diff --git a/contrib/python/Twisted/py3/twisted/internet/address.py b/contrib/python/Twisted/py3/twisted/internet/address.py
new file mode 100644
index 0000000000..10fa85241e
--- /dev/null
+++ b/contrib/python/Twisted/py3/twisted/internet/address.py
@@ -0,0 +1,182 @@
+# Copyright (c) Twisted Matrix Laboratories.
+# See LICENSE for details.
+
+"""
+Address objects for network connections.
+"""
+
+
+import os
+from typing import Optional, Union
+from warnings import warn
+
+from zope.interface import implementer
+
+import attr
+from typing_extensions import Literal
+
+from twisted.internet.interfaces import IAddress
+from twisted.python.filepath import _asFilesystemBytes, _coerceToFilesystemEncoding
+from twisted.python.runtime import platform
+
+
+@implementer(IAddress)
+@attr.s(hash=True, auto_attribs=True)
+class IPv4Address:
+ """
+ An L{IPv4Address} represents the address of an IPv4 socket endpoint.
+
+ @ivar type: A string describing the type of transport, either 'TCP' or
+ 'UDP'.
+
+ @ivar host: A string containing a dotted-quad IPv4 address; for example,
+ "127.0.0.1".
+ @type host: C{str}
+
+ @ivar port: An integer representing the port number.
+ @type port: C{int}
+ """
+
+ type: Union[Literal["TCP"], Literal["UDP"]] = attr.ib(
+ validator=attr.validators.in_(["TCP", "UDP"])
+ )
+ host: str
+ port: int
+
+
+@implementer(IAddress)
+@attr.s(hash=True, auto_attribs=True)
+class IPv6Address:
+ """
+ An L{IPv6Address} represents the address of an IPv6 socket endpoint.
+
+ @ivar type: A string describing the type of transport, either 'TCP' or
+ 'UDP'.
+
+ @ivar host: A string containing a colon-separated, hexadecimal formatted
+ IPv6 address; for example, "::1".
+ @type host: C{str}
+
+ @ivar port: An integer representing the port number.
+ @type port: C{int}
+
+ @ivar flowInfo: the IPv6 flow label. This can be used by QoS routers to
+ identify flows of traffic; you may generally safely ignore it.
+ @type flowInfo: L{int}
+
+ @ivar scopeID: the IPv6 scope identifier - roughly analagous to what
+ interface traffic destined for this address must be transmitted over.
+ @type scopeID: L{int} or L{str}
+ """
+
+ type: Union[Literal["TCP"], Literal["UDP"]] = attr.ib(
+ validator=attr.validators.in_(["TCP", "UDP"])
+ )
+ host: str
+ port: int
+ flowInfo: int = 0
+ scopeID: Union[str, int] = 0
+
+
+@implementer(IAddress)
+class _ProcessAddress:
+ """
+ An L{interfaces.IAddress} provider for process transports.
+ """
+
+
+@attr.s(hash=True, auto_attribs=True)
+@implementer(IAddress)
+class HostnameAddress:
+ """
+ A L{HostnameAddress} represents the address of a L{HostnameEndpoint}.
+
+ @ivar hostname: A hostname byte string; for example, b"example.com".
+ @type hostname: L{bytes}
+
+ @ivar port: An integer representing the port number.
+ @type port: L{int}
+ """
+
+ hostname: bytes
+ port: int
+
+
+@attr.s(hash=False, repr=False, eq=False, auto_attribs=True)
+@implementer(IAddress)
+class UNIXAddress:
+ """
+ Object representing a UNIX socket endpoint.
+
+ @ivar name: The filename associated with this socket.
+ @type name: C{bytes}
+ """
+
+ name: Optional[bytes] = attr.ib(
+ converter=attr.converters.optional(_asFilesystemBytes)
+ )
+
+ if getattr(os.path, "samefile", None) is not None:
+
+ def __eq__(self, other: object) -> bool:
+ """
+ Overriding C{attrs} to ensure the os level samefile
+ check is done if the name attributes do not match.
+ """
+ if not isinstance(other, self.__class__):
+ return NotImplemented
+ res = self.name == other.name
+ if not res and self.name and other.name:
+ try:
+ return os.path.samefile(self.name, other.name)
+ except OSError:
+ pass
+ except (TypeError, ValueError) as e:
+ # On Linux, abstract namespace UNIX sockets start with a
+ # \0, which os.path doesn't like.
+ if not platform.isLinux():
+ raise e
+ return res
+
+ else:
+
+ def __eq__(self, other: object) -> bool:
+ if isinstance(other, self.__class__):
+ return self.name == other.name
+ return NotImplemented
+
+ def __repr__(self) -> str:
+ name = self.name
+ show = _coerceToFilesystemEncoding("", name) if name is not None else None
+ return f"UNIXAddress({show!r})"
+
+ def __hash__(self):
+ if self.name is None:
+ return hash((self.__class__, None))
+ try:
+ s1 = os.stat(self.name)
+ return hash((s1.st_ino, s1.st_dev))
+ except OSError:
+ return hash(self.name)
+
+
+# These are for buildFactory backwards compatibility due to
+# stupidity-induced inconsistency.
+
+
+class _ServerFactoryIPv4Address(IPv4Address):
+ """Backwards compatibility hack. Just like IPv4Address in practice."""
+
+ def __eq__(self, other: object) -> bool:
+ if isinstance(other, tuple):
+ warn(
+ "IPv4Address.__getitem__ is deprecated. " "Use attributes instead.",
+ category=DeprecationWarning,
+ stacklevel=2,
+ )
+ return (self.host, self.port) == other
+ elif isinstance(other, IPv4Address):
+ a = (self.type, self.host, self.port)
+ b = (other.type, other.host, other.port)
+ return a == b
+ return NotImplemented
diff --git a/contrib/python/Twisted/py3/twisted/internet/asyncioreactor.py b/contrib/python/Twisted/py3/twisted/internet/asyncioreactor.py
new file mode 100644
index 0000000000..cd1cf65f05
--- /dev/null
+++ b/contrib/python/Twisted/py3/twisted/internet/asyncioreactor.py
@@ -0,0 +1,307 @@
+# -*- test-case-name: twisted.test.test_internet -*-
+# Copyright (c) Twisted Matrix Laboratories.
+# See LICENSE for details.
+
+"""
+asyncio-based reactor implementation.
+"""
+
+
+import errno
+import sys
+from asyncio import AbstractEventLoop, get_event_loop
+from typing import Dict, Optional, Type
+
+from zope.interface import implementer
+
+from twisted.internet.abstract import FileDescriptor
+from twisted.internet.interfaces import IReactorFDSet
+from twisted.internet.posixbase import (
+ _NO_FILEDESC,
+ PosixReactorBase,
+ _ContinuousPolling,
+)
+from twisted.logger import Logger
+from twisted.python.log import callWithLogger
+
+
+@implementer(IReactorFDSet)
+class AsyncioSelectorReactor(PosixReactorBase):
+ """
+ Reactor running on top of L{asyncio.SelectorEventLoop}.
+
+ On POSIX platforms, the default event loop is
+ L{asyncio.SelectorEventLoop}.
+ On Windows, the default event loop on Python 3.7 and older
+ is C{asyncio.WindowsSelectorEventLoop}, but on Python 3.8 and newer
+ the default event loop is C{asyncio.WindowsProactorEventLoop} which
+ is incompatible with L{AsyncioSelectorReactor}.
+ Applications that use L{AsyncioSelectorReactor} on Windows
+ with Python 3.8+ must call
+ C{asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())}
+ before instantiating and running L{AsyncioSelectorReactor}.
+ """
+
+ _asyncClosed = False
+ _log = Logger()
+
+ def __init__(self, eventloop: Optional[AbstractEventLoop] = None):
+ if eventloop is None:
+ _eventloop: AbstractEventLoop = get_event_loop()
+ else:
+ _eventloop = eventloop
+
+ # On Python 3.8+, asyncio.get_event_loop() on
+ # Windows was changed to return a ProactorEventLoop
+ # unless the loop policy has been changed.
+ if sys.platform == "win32":
+ from asyncio import ProactorEventLoop
+
+ if isinstance(_eventloop, ProactorEventLoop):
+ raise TypeError(
+ f"ProactorEventLoop is not supported, got: {_eventloop}"
+ )
+
+ self._asyncioEventloop: AbstractEventLoop = _eventloop
+ self._writers: Dict[Type[FileDescriptor], int] = {}
+ self._readers: Dict[Type[FileDescriptor], int] = {}
+ self._continuousPolling = _ContinuousPolling(self)
+
+ self._scheduledAt = None
+ self._timerHandle = None
+
+ super().__init__()
+
+ def _unregisterFDInAsyncio(self, fd):
+ """
+ Compensate for a bug in asyncio where it will not unregister a FD that
+ it cannot handle in the epoll loop. It touches internal asyncio code.
+
+ A description of the bug by markrwilliams:
+
+ The C{add_writer} method of asyncio event loops isn't atomic because
+ all the Selector classes in the selector module internally record a
+ file object before passing it to the platform's selector
+ implementation. If the platform's selector decides the file object
+ isn't acceptable, the resulting exception doesn't cause the Selector to
+ un-track the file object.
+
+ The failing/hanging stdio test goes through the following sequence of
+ events (roughly):
+
+ * The first C{connection.write(intToByte(value))} call hits the asyncio
+ reactor's C{addWriter} method.
+
+ * C{addWriter} calls the asyncio loop's C{add_writer} method, which
+ happens to live on C{_BaseSelectorEventLoop}.
+
+ * The asyncio loop's C{add_writer} method checks if the file object has
+ been registered before via the selector's C{get_key} method.
+
+ * It hasn't, so the KeyError block runs and calls the selector's
+ register method
+
+ * Code examples that follow use EpollSelector, but the code flow holds
+ true for any other selector implementation. The selector's register
+ method first calls through to the next register method in the MRO
+
+ * That next method is always C{_BaseSelectorImpl.register} which
+ creates a C{SelectorKey} instance for the file object, stores it under
+ the file object's file descriptor, and then returns it.
+
+ * Control returns to the concrete selector implementation, which asks
+ the operating system to track the file descriptor using the right API.
+
+ * The operating system refuses! An exception is raised that, in this
+ case, the asyncio reactor handles by creating a C{_ContinuousPolling}
+ object to watch the file descriptor.
+
+ * The second C{connection.write(intToByte(value))} call hits the
+ asyncio reactor's C{addWriter} method, which hits the C{add_writer}
+ method. But the loop's selector's get_key method now returns a
+ C{SelectorKey}! Now the asyncio reactor's C{addWriter} method thinks
+ the asyncio loop will watch the file descriptor, even though it won't.
+ """
+ try:
+ self._asyncioEventloop._selector.unregister(fd)
+ except BaseException:
+ pass
+
+ def _readOrWrite(self, selectable, read):
+ method = selectable.doRead if read else selectable.doWrite
+
+ if selectable.fileno() == -1:
+ self._disconnectSelectable(selectable, _NO_FILEDESC, read)
+ return
+
+ try:
+ why = method()
+ except Exception as e:
+ why = e
+ self._log.failure(None)
+ if why:
+ self._disconnectSelectable(selectable, why, read)
+
+ def addReader(self, reader):
+ if reader in self._readers.keys() or reader in self._continuousPolling._readers:
+ return
+
+ fd = reader.fileno()
+ try:
+ self._asyncioEventloop.add_reader(
+ fd, callWithLogger, reader, self._readOrWrite, reader, True
+ )
+ self._readers[reader] = fd
+ except OSError as e:
+ self._unregisterFDInAsyncio(fd)
+ if e.errno == errno.EPERM:
+ # epoll(7) doesn't support certain file descriptors,
+ # e.g. filesystem files, so for those we just poll
+ # continuously:
+ self._continuousPolling.addReader(reader)
+ else:
+ raise
+
+ def addWriter(self, writer):
+ if writer in self._writers.keys() or writer in self._continuousPolling._writers:
+ return
+
+ fd = writer.fileno()
+ try:
+ self._asyncioEventloop.add_writer(
+ fd, callWithLogger, writer, self._readOrWrite, writer, False
+ )
+ self._writers[writer] = fd
+ except PermissionError:
+ self._unregisterFDInAsyncio(fd)
+ # epoll(7) doesn't support certain file descriptors,
+ # e.g. filesystem files, so for those we just poll
+ # continuously:
+ self._continuousPolling.addWriter(writer)
+ except BrokenPipeError:
+ # The kqueuereactor will raise this if there is a broken pipe
+ self._unregisterFDInAsyncio(fd)
+ except BaseException:
+ self._unregisterFDInAsyncio(fd)
+ raise
+
+ def removeReader(self, reader):
+ # First, see if they're trying to remove a reader that we don't have.
+ if not (
+ reader in self._readers.keys() or self._continuousPolling.isReading(reader)
+ ):
+ # We don't have it, so just return OK.
+ return
+
+ # If it was a cont. polling reader, check there first.
+ if self._continuousPolling.isReading(reader):
+ self._continuousPolling.removeReader(reader)
+ return
+
+ fd = reader.fileno()
+ if fd == -1:
+ # If the FD is -1, we want to know what its original FD was, to
+ # remove it.
+ fd = self._readers.pop(reader)
+ else:
+ self._readers.pop(reader)
+
+ self._asyncioEventloop.remove_reader(fd)
+
+ def removeWriter(self, writer):
+ # First, see if they're trying to remove a writer that we don't have.
+ if not (
+ writer in self._writers.keys() or self._continuousPolling.isWriting(writer)
+ ):
+ # We don't have it, so just return OK.
+ return
+
+ # If it was a cont. polling writer, check there first.
+ if self._continuousPolling.isWriting(writer):
+ self._continuousPolling.removeWriter(writer)
+ return
+
+ fd = writer.fileno()
+
+ if fd == -1:
+ # If the FD is -1, we want to know what its original FD was, to
+ # remove it.
+ fd = self._writers.pop(writer)
+ else:
+ self._writers.pop(writer)
+
+ self._asyncioEventloop.remove_writer(fd)
+
+ def removeAll(self):
+ return (
+ self._removeAll(self._readers.keys(), self._writers.keys())
+ + self._continuousPolling.removeAll()
+ )
+
+ def getReaders(self):
+ return list(self._readers.keys()) + self._continuousPolling.getReaders()
+
+ def getWriters(self):
+ return list(self._writers.keys()) + self._continuousPolling.getWriters()
+
+ def iterate(self, timeout):
+ self._asyncioEventloop.call_later(timeout + 0.01, self._asyncioEventloop.stop)
+ self._asyncioEventloop.run_forever()
+
+ def run(self, installSignalHandlers=True):
+ self.startRunning(installSignalHandlers=installSignalHandlers)
+ self._asyncioEventloop.run_forever()
+ if self._justStopped:
+ self._justStopped = False
+
+ def stop(self):
+ super().stop()
+ # This will cause runUntilCurrent which in its turn
+ # will call fireSystemEvent("shutdown")
+ self.callLater(0, lambda: None)
+
+ def crash(self):
+ super().crash()
+ self._asyncioEventloop.stop()
+
+ def _onTimer(self):
+ self._scheduledAt = None
+ self.runUntilCurrent()
+ self._reschedule()
+
+ def _reschedule(self):
+ timeout = self.timeout()
+ if timeout is not None:
+ abs_time = self._asyncioEventloop.time() + timeout
+ self._scheduledAt = abs_time
+ if self._timerHandle is not None:
+ self._timerHandle.cancel()
+ self._timerHandle = self._asyncioEventloop.call_at(abs_time, self._onTimer)
+
+ def _moveCallLaterSooner(self, tple):
+ PosixReactorBase._moveCallLaterSooner(self, tple)
+ self._reschedule()
+
+ def callLater(self, seconds, f, *args, **kwargs):
+ dc = PosixReactorBase.callLater(self, seconds, f, *args, **kwargs)
+ abs_time = self._asyncioEventloop.time() + self.timeout()
+ if self._scheduledAt is None or abs_time < self._scheduledAt:
+ self._reschedule()
+ return dc
+
+ def callFromThread(self, f, *args, **kwargs):
+ g = lambda: self.callLater(0, f, *args, **kwargs)
+ self._asyncioEventloop.call_soon_threadsafe(g)
+
+
+def install(eventloop=None):
+ """
+ Install an asyncio-based reactor.
+
+ @param eventloop: The asyncio eventloop to wrap. If default, the global one
+ is selected.
+ """
+ reactor = AsyncioSelectorReactor(eventloop)
+ from twisted.internet.main import installReactor
+
+ installReactor(reactor)
diff --git a/contrib/python/Twisted/py3/twisted/internet/base.py b/contrib/python/Twisted/py3/twisted/internet/base.py
new file mode 100644
index 0000000000..f039dfe5c4
--- /dev/null
+++ b/contrib/python/Twisted/py3/twisted/internet/base.py
@@ -0,0 +1,1345 @@
+# -*- test-case-name: twisted.test.test_internet,twisted.internet.test.test_core -*-
+# Copyright (c) Twisted Matrix Laboratories.
+# See LICENSE for details.
+
+"""
+Very basic functionality for a Reactor implementation.
+"""
+
+
+import builtins
+import socket # needed only for sync-dns
+import warnings
+from abc import ABC, abstractmethod
+from heapq import heapify, heappop, heappush
+from traceback import format_stack
+from types import FrameType
+from typing import (
+ TYPE_CHECKING,
+ Any,
+ Callable,
+ Dict,
+ List,
+ NewType,
+ Optional,
+ Sequence,
+ Set,
+ Tuple,
+ Union,
+ cast,
+)
+
+from zope.interface import classImplements, implementer
+
+from twisted.internet import abstract, defer, error, fdesc, main, threads
+from twisted.internet._resolver import (
+ ComplexResolverSimplifier as _ComplexResolverSimplifier,
+ GAIResolver as _GAIResolver,
+ SimpleResolverComplexifier as _SimpleResolverComplexifier,
+)
+from twisted.internet.defer import Deferred, DeferredList
+from twisted.internet.interfaces import (
+ IAddress,
+ IConnector,
+ IDelayedCall,
+ IHostnameResolver,
+ IProtocol,
+ IReactorCore,
+ IReactorFromThreads,
+ IReactorPluggableNameResolver,
+ IReactorPluggableResolver,
+ IReactorThreads,
+ IReactorTime,
+ IReadDescriptor,
+ IResolverSimple,
+ IWriteDescriptor,
+ _ISupportsExitSignalCapturing,
+)
+from twisted.internet.protocol import ClientFactory
+from twisted.python import log, reflect
+from twisted.python.failure import Failure
+from twisted.python.runtime import platform, seconds as runtimeSeconds
+from ._signals import SignalHandling, _WithoutSignalHandling, _WithSignalHandling
+
+if TYPE_CHECKING:
+ from twisted.internet.tcp import Client
+
+# This import is for side-effects! Even if you don't see any code using it
+# in this module, don't delete it.
+from twisted.python import threadable
+
+if platform.supportsThreads():
+ from twisted.python.threadpool import ThreadPool
+else:
+ ThreadPool = None # type: ignore[misc, assignment]
+
+
+@implementer(IDelayedCall)
+class DelayedCall:
+ # enable .debug to record creator call stack, and it will be logged if
+ # an exception occurs while the function is being run
+ debug = False
+ _repr: Optional[str] = None
+
+ # In debug mode, the call stack at the time of instantiation.
+ creator: Optional[Sequence[str]] = None
+
+ def __init__(
+ self,
+ time: float,
+ func: Callable[..., Any],
+ args: Sequence[object],
+ kw: Dict[str, object],
+ cancel: Callable[["DelayedCall"], None],
+ reset: Callable[["DelayedCall"], None],
+ seconds: Callable[[], float] = runtimeSeconds,
+ ) -> None:
+ """
+ @param time: Seconds from the epoch at which to call C{func}.
+ @param func: The callable to call.
+ @param args: The positional arguments to pass to the callable.
+ @param kw: The keyword arguments to pass to the callable.
+ @param cancel: A callable which will be called with this
+ DelayedCall before cancellation.
+ @param reset: A callable which will be called with this
+ DelayedCall after changing this DelayedCall's scheduled
+ execution time. The callable should adjust any necessary
+ scheduling details to ensure this DelayedCall is invoked
+ at the new appropriate time.
+ @param seconds: If provided, a no-argument callable which will be
+ used to determine the current time any time that information is
+ needed.
+ """
+ self.time, self.func, self.args, self.kw = time, func, args, kw
+ self.resetter = reset
+ self.canceller = cancel
+ self.seconds = seconds
+ self.cancelled = self.called = 0
+ self.delayed_time = 0.0
+ if self.debug:
+ self.creator = format_stack()[:-2]
+
+ def getTime(self) -> float:
+ """
+ Return the time at which this call will fire
+
+ @return: The number of seconds after the epoch at which this call is
+ scheduled to be made.
+ """
+ return self.time + self.delayed_time
+
+ def cancel(self) -> None:
+ """
+ Unschedule this call
+
+ @raise AlreadyCancelled: Raised if this call has already been
+ unscheduled.
+
+ @raise AlreadyCalled: Raised if this call has already been made.
+ """
+ if self.cancelled:
+ raise error.AlreadyCancelled
+ elif self.called:
+ raise error.AlreadyCalled
+ else:
+ self.canceller(self)
+ self.cancelled = 1
+ if self.debug:
+ self._repr = repr(self)
+ del self.func, self.args, self.kw
+
+ def reset(self, secondsFromNow: float) -> None:
+ """
+ Reschedule this call for a different time
+
+ @param secondsFromNow: The number of seconds from the time of the
+ C{reset} call at which this call will be scheduled.
+
+ @raise AlreadyCancelled: Raised if this call has been cancelled.
+ @raise AlreadyCalled: Raised if this call has already been made.
+ """
+ if self.cancelled:
+ raise error.AlreadyCancelled
+ elif self.called:
+ raise error.AlreadyCalled
+ else:
+ newTime = self.seconds() + secondsFromNow
+ if newTime < self.time:
+ self.delayed_time = 0.0
+ self.time = newTime
+ self.resetter(self)
+ else:
+ self.delayed_time = newTime - self.time
+
+ def delay(self, secondsLater: float) -> None:
+ """
+ Reschedule this call for a later time
+
+ @param secondsLater: The number of seconds after the originally
+ scheduled time for which to reschedule this call.
+
+ @raise AlreadyCancelled: Raised if this call has been cancelled.
+ @raise AlreadyCalled: Raised if this call has already been made.
+ """
+ if self.cancelled:
+ raise error.AlreadyCancelled
+ elif self.called:
+ raise error.AlreadyCalled
+ else:
+ self.delayed_time += secondsLater
+ if self.delayed_time < 0.0:
+ self.activate_delay()
+ self.resetter(self)
+
+ def activate_delay(self) -> None:
+ self.time += self.delayed_time
+ self.delayed_time = 0.0
+
+ def active(self) -> bool:
+ """Determine whether this call is still pending
+
+ @return: True if this call has not yet been made or cancelled,
+ False otherwise.
+ """
+ return not (self.cancelled or self.called)
+
+ def __le__(self, other: object) -> bool:
+ """
+ Implement C{<=} operator between two L{DelayedCall} instances.
+
+ Comparison is based on the C{time} attribute (unadjusted by the
+ delayed time).
+ """
+ if isinstance(other, DelayedCall):
+ return self.time <= other.time
+ else:
+ return NotImplemented
+
+ def __lt__(self, other: object) -> bool:
+ """
+ Implement C{<} operator between two L{DelayedCall} instances.
+
+ Comparison is based on the C{time} attribute (unadjusted by the
+ delayed time).
+ """
+ if isinstance(other, DelayedCall):
+ return self.time < other.time
+ else:
+ return NotImplemented
+
+ def __repr__(self) -> str:
+ """
+ Implement C{repr()} for L{DelayedCall} instances.
+
+ @returns: String containing details of the L{DelayedCall}.
+ """
+ if self._repr is not None:
+ return self._repr
+ if hasattr(self, "func"):
+ # This code should be replaced by a utility function in reflect;
+ # see ticket #6066:
+ func = getattr(self.func, "__qualname__", None)
+ if func is None:
+ func = getattr(self.func, "__name__", None)
+ if func is not None:
+ imClass = getattr(self.func, "im_class", None)
+ if imClass is not None:
+ func = f"{imClass}.{func}"
+ if func is None:
+ func = reflect.safe_repr(self.func)
+ else:
+ func = None
+
+ now = self.seconds()
+ L = [
+ "<DelayedCall 0x%x [%ss] called=%s cancelled=%s"
+ % (id(self), self.time - now, self.called, self.cancelled)
+ ]
+ if func is not None:
+ L.extend((" ", func, "("))
+ if self.args:
+ L.append(", ".join([reflect.safe_repr(e) for e in self.args]))
+ if self.kw:
+ L.append(", ")
+ if self.kw:
+ L.append(
+ ", ".join(
+ [f"{k}={reflect.safe_repr(v)}" for (k, v) in self.kw.items()]
+ )
+ )
+ L.append(")")
+
+ if self.creator is not None:
+ L.append("\n\ntraceback at creation: \n\n%s" % (" ".join(self.creator)))
+ L.append(">")
+
+ return "".join(L)
+
+
+@implementer(IResolverSimple)
+class ThreadedResolver:
+ """
+ L{ThreadedResolver} uses a reactor, a threadpool, and
+ L{socket.gethostbyname} to perform name lookups without blocking the
+ reactor thread. It also supports timeouts indepedently from whatever
+ timeout logic L{socket.gethostbyname} might have.
+
+ @ivar reactor: The reactor the threadpool of which will be used to call
+ L{socket.gethostbyname} and the I/O thread of which the result will be
+ delivered.
+ """
+
+ def __init__(self, reactor: "ReactorBase") -> None:
+ self.reactor = reactor
+ self._runningQueries: Dict[
+ Deferred[str], Tuple[Deferred[str], IDelayedCall]
+ ] = {}
+
+ def _fail(self, name: str, err: str) -> Failure:
+ lookupError = error.DNSLookupError(f"address {name!r} not found: {err}")
+ return Failure(lookupError)
+
+ def _cleanup(self, name: str, lookupDeferred: Deferred[str]) -> None:
+ userDeferred, cancelCall = self._runningQueries[lookupDeferred]
+ del self._runningQueries[lookupDeferred]
+ userDeferred.errback(self._fail(name, "timeout error"))
+
+ def _checkTimeout(
+ self, result: Union[str, Failure], name: str, lookupDeferred: Deferred[str]
+ ) -> None:
+ try:
+ userDeferred, cancelCall = self._runningQueries[lookupDeferred]
+ except KeyError:
+ pass
+ else:
+ del self._runningQueries[lookupDeferred]
+ cancelCall.cancel()
+
+ if isinstance(result, Failure):
+ userDeferred.errback(self._fail(name, result.getErrorMessage()))
+ else:
+ userDeferred.callback(result)
+
+ def getHostByName(
+ self, name: str, timeout: Sequence[int] = (1, 3, 11, 45)
+ ) -> Deferred[str]:
+ """
+ See L{twisted.internet.interfaces.IResolverSimple.getHostByName}.
+
+ Note that the elements of C{timeout} are summed and the result is used
+ as a timeout for the lookup. Any intermediate timeout or retry logic
+ is left up to the platform via L{socket.gethostbyname}.
+ """
+ if timeout:
+ timeoutDelay = sum(timeout)
+ else:
+ timeoutDelay = 60
+ userDeferred: Deferred[str] = Deferred()
+ lookupDeferred = threads.deferToThreadPool(
+ cast(IReactorFromThreads, self.reactor),
+ cast(IReactorThreads, self.reactor).getThreadPool(),
+ socket.gethostbyname,
+ name,
+ )
+ cancelCall = cast(IReactorTime, self.reactor).callLater(
+ timeoutDelay, self._cleanup, name, lookupDeferred
+ )
+ self._runningQueries[lookupDeferred] = (userDeferred, cancelCall)
+ _: Deferred[None] = lookupDeferred.addBoth(
+ self._checkTimeout, name, lookupDeferred
+ )
+ return userDeferred
+
+
+@implementer(IResolverSimple)
+class BlockingResolver:
+ def getHostByName(
+ self, name: str, timeout: Sequence[int] = (1, 3, 11, 45)
+ ) -> Deferred[str]:
+ try:
+ address = socket.gethostbyname(name)
+ except OSError:
+ msg = f"address {name!r} not found"
+ err = error.DNSLookupError(msg)
+ return defer.fail(err)
+ else:
+ return defer.succeed(address)
+
+
+_ThreePhaseEventTriggerCallable = Callable[..., Any]
+_ThreePhaseEventTrigger = Tuple[
+ _ThreePhaseEventTriggerCallable, Tuple[object, ...], Dict[str, object]
+]
+_ThreePhaseEventTriggerHandle = NewType(
+ "_ThreePhaseEventTriggerHandle",
+ Tuple[str, _ThreePhaseEventTriggerCallable, Tuple[object, ...], Dict[str, object]],
+)
+
+
+class _ThreePhaseEvent:
+ """
+ Collection of callables (with arguments) which can be invoked as a group in
+ a particular order.
+
+ This provides the underlying implementation for the reactor's system event
+ triggers. An instance of this class tracks triggers for all phases of a
+ single type of event.
+
+ @ivar before: A list of the before-phase triggers containing three-tuples
+ of a callable, a tuple of positional arguments, and a dict of keyword
+ arguments
+
+ @ivar finishedBefore: A list of the before-phase triggers which have
+ already been executed. This is only populated in the C{'BEFORE'} state.
+
+ @ivar during: A list of the during-phase triggers containing three-tuples
+ of a callable, a tuple of positional arguments, and a dict of keyword
+ arguments
+
+ @ivar after: A list of the after-phase triggers containing three-tuples
+ of a callable, a tuple of positional arguments, and a dict of keyword
+ arguments
+
+ @ivar state: A string indicating what is currently going on with this
+ object. One of C{'BASE'} (for when nothing in particular is happening;
+ this is the initial value), C{'BEFORE'} (when the before-phase triggers
+ are in the process of being executed).
+ """
+
+ def __init__(self) -> None:
+ self.before: List[_ThreePhaseEventTrigger] = []
+ self.during: List[_ThreePhaseEventTrigger] = []
+ self.after: List[_ThreePhaseEventTrigger] = []
+ self.state = "BASE"
+
+ def addTrigger(
+ self,
+ phase: str,
+ callable: _ThreePhaseEventTriggerCallable,
+ *args: object,
+ **kwargs: object,
+ ) -> _ThreePhaseEventTriggerHandle:
+ """
+ Add a trigger to the indicate phase.
+
+ @param phase: One of C{'before'}, C{'during'}, or C{'after'}.
+
+ @param callable: An object to be called when this event is triggered.
+ @param args: Positional arguments to pass to C{callable}.
+ @param kwargs: Keyword arguments to pass to C{callable}.
+
+ @return: An opaque handle which may be passed to L{removeTrigger} to
+ reverse the effects of calling this method.
+ """
+ if phase not in ("before", "during", "after"):
+ raise KeyError("invalid phase")
+ getattr(self, phase).append((callable, args, kwargs))
+ return _ThreePhaseEventTriggerHandle((phase, callable, args, kwargs))
+
+ def removeTrigger(self, handle: _ThreePhaseEventTriggerHandle) -> None:
+ """
+ Remove a previously added trigger callable.
+
+ @param handle: An object previously returned by L{addTrigger}. The
+ trigger added by that call will be removed.
+
+ @raise ValueError: If the trigger associated with C{handle} has already
+ been removed or if C{handle} is not a valid handle.
+ """
+ getattr(self, "removeTrigger_" + self.state)(handle)
+
+ def removeTrigger_BASE(self, handle: _ThreePhaseEventTriggerHandle) -> None:
+ """
+ Just try to remove the trigger.
+
+ @see: removeTrigger
+ """
+ try:
+ phase, callable, args, kwargs = handle
+ except (TypeError, ValueError):
+ raise ValueError("invalid trigger handle")
+ else:
+ if phase not in ("before", "during", "after"):
+ raise KeyError("invalid phase")
+ getattr(self, phase).remove((callable, args, kwargs))
+
+ def removeTrigger_BEFORE(self, handle: _ThreePhaseEventTriggerHandle) -> None:
+ """
+ Remove the trigger if it has yet to be executed, otherwise emit a
+ warning that in the future an exception will be raised when removing an
+ already-executed trigger.
+
+ @see: removeTrigger
+ """
+ phase, callable, args, kwargs = handle
+ if phase != "before":
+ return self.removeTrigger_BASE(handle)
+ if (callable, args, kwargs) in self.finishedBefore:
+ warnings.warn(
+ "Removing already-fired system event triggers will raise an "
+ "exception in a future version of Twisted.",
+ category=DeprecationWarning,
+ stacklevel=3,
+ )
+ else:
+ self.removeTrigger_BASE(handle)
+
+ def fireEvent(self) -> None:
+ """
+ Call the triggers added to this event.
+ """
+ self.state = "BEFORE"
+ self.finishedBefore = []
+ beforeResults: List[Deferred[object]] = []
+ while self.before:
+ callable, args, kwargs = self.before.pop(0)
+ self.finishedBefore.append((callable, args, kwargs))
+ try:
+ result = callable(*args, **kwargs)
+ except BaseException:
+ log.err()
+ else:
+ if isinstance(result, Deferred):
+ beforeResults.append(result)
+ DeferredList(beforeResults).addCallback(self._continueFiring)
+
+ def _continueFiring(self, ignored: object) -> None:
+ """
+ Call the during and after phase triggers for this event.
+ """
+ self.state = "BASE"
+ self.finishedBefore = []
+ for phase in self.during, self.after:
+ while phase:
+ callable, args, kwargs = phase.pop(0)
+ try:
+ callable(*args, **kwargs)
+ except BaseException:
+ log.err()
+
+
+@implementer(IReactorPluggableNameResolver, IReactorPluggableResolver)
+class PluggableResolverMixin:
+ """
+ A mixin which implements the pluggable resolver reactor interfaces.
+
+ @ivar resolver: The installed L{IResolverSimple}.
+ @ivar _nameResolver: The installed L{IHostnameResolver}.
+ """
+
+ resolver: IResolverSimple = BlockingResolver()
+ _nameResolver: IHostnameResolver = _SimpleResolverComplexifier(resolver)
+
+ # IReactorPluggableResolver
+ def installResolver(self, resolver: IResolverSimple) -> IResolverSimple:
+ """
+ See L{IReactorPluggableResolver}.
+
+ @param resolver: see L{IReactorPluggableResolver}.
+
+ @return: see L{IReactorPluggableResolver}.
+ """
+ assert IResolverSimple.providedBy(resolver)
+ oldResolver = self.resolver
+ self.resolver = resolver
+ self._nameResolver = _SimpleResolverComplexifier(resolver)
+ return oldResolver
+
+ # IReactorPluggableNameResolver
+ def installNameResolver(self, resolver: IHostnameResolver) -> IHostnameResolver:
+ """
+ See L{IReactorPluggableNameResolver}.
+
+ @param resolver: See L{IReactorPluggableNameResolver}.
+
+ @return: see L{IReactorPluggableNameResolver}.
+ """
+ previousNameResolver = self._nameResolver
+ self._nameResolver = resolver
+ self.resolver = _ComplexResolverSimplifier(resolver)
+ return previousNameResolver
+
+ @property
+ def nameResolver(self) -> IHostnameResolver:
+ """
+ Implementation of read-only
+ L{IReactorPluggableNameResolver.nameResolver}.
+ """
+ return self._nameResolver
+
+
+_SystemEventID = NewType("_SystemEventID", Tuple[str, _ThreePhaseEventTriggerHandle])
+_ThreadCall = Tuple[Callable[..., Any], Tuple[object, ...], Dict[str, object]]
+
+
+@implementer(IReactorCore, IReactorTime, _ISupportsExitSignalCapturing)
+class ReactorBase(PluggableResolverMixin):
+ """
+ Default base class for Reactors.
+
+ @ivar _stopped: A flag which is true between paired calls to C{reactor.run}
+ and C{reactor.stop}. This should be replaced with an explicit state
+ machine.
+ @ivar _justStopped: A flag which is true between the time C{reactor.stop}
+ is called and the time the shutdown system event is fired. This is
+ used to determine whether that event should be fired after each
+ iteration through the mainloop. This should be replaced with an
+ explicit state machine.
+ @ivar _started: A flag which is true from the time C{reactor.run} is called
+ until the time C{reactor.run} returns. This is used to prevent calls
+ to C{reactor.run} on a running reactor. This should be replaced with
+ an explicit state machine.
+ @ivar running: See L{IReactorCore.running}
+ @ivar _registerAsIOThread: A flag controlling whether the reactor will
+ register the thread it is running in as the I/O thread when it starts.
+ If C{True}, registration will be done, otherwise it will not be.
+ @ivar _exitSignal: See L{_ISupportsExitSignalCapturing._exitSignal}
+
+ @ivar _installSignalHandlers: A flag which indicates whether any signal
+ handlers will be installed during startup. This includes handlers for
+ SIGCHLD to monitor child processes, and SIGINT, SIGTERM, and SIGBREAK
+
+ @ivar _signals: An object which knows how to install and uninstall the
+ reactor's signal-handling behavior.
+ """
+
+ _registerAsIOThread = True
+
+ _stopped = True
+ installed = False
+ usingThreads = False
+ _exitSignal = None
+
+ # Set to something meaningful between startRunning and shortly before run
+ # returns. We don't know the value to be used by `run` until that method
+ # itself is called and we learn the value of installSignalHandlers.
+ # However, we can use a no-op implementation until then.
+ _signals: SignalHandling = _WithoutSignalHandling()
+
+ __name__ = "twisted.internet.reactor"
+
+ def __init__(self) -> None:
+ super().__init__()
+ self.threadCallQueue: List[_ThreadCall] = []
+ self._eventTriggers: Dict[str, _ThreePhaseEvent] = {}
+ self._pendingTimedCalls: List[DelayedCall] = []
+ self._newTimedCalls: List[DelayedCall] = []
+ self._cancellations = 0
+ self.running = False
+ self._started = False
+ self._justStopped = False
+ self._startedBefore = False
+ # reactor internal readers, e.g. the waker.
+ # Using Any as the type here… unable to find a suitable defined interface
+ self._internalReaders: Set[Any] = set()
+ self.waker: Any = None
+
+ # Arrange for the running attribute to change to True at the right time
+ # and let a subclass possibly do other things at that time (eg install
+ # signal handlers).
+ self.addSystemEventTrigger("during", "startup", self._reallyStartRunning)
+ self.addSystemEventTrigger("during", "shutdown", self.crash)
+ self.addSystemEventTrigger("during", "shutdown", self.disconnectAll)
+
+ if platform.supportsThreads():
+ self._initThreads()
+ self.installWaker()
+
+ # Signal handling pieces
+ _installSignalHandlers: bool = False
+
+ def _makeSignalHandling(self, installSignalHandlers: bool) -> SignalHandling:
+ """
+ Get an appropriate signal handling object.
+
+ @param installSignalHandlers: Indicate whether to even try to do any
+ signal handling. If C{False} then the result will be a no-op
+ implementation.
+ """
+ if installSignalHandlers:
+ return self._signalsFactory()
+ return _WithoutSignalHandling()
+
+ def _signalsFactory(self) -> SignalHandling:
+ """
+ Get a signal handling object that implements the basic behavior of
+ stopping the reactor on SIGINT, SIGBREAK, and SIGTERM.
+ """
+ return _WithSignalHandling(
+ self.sigInt,
+ self.sigBreak,
+ self.sigTerm,
+ )
+
+ def _addInternalReader(self, reader: IReadDescriptor) -> None:
+ """
+ Add a read descriptor which is part of the implementation of the
+ reactor itself.
+
+ The read descriptor will not be removed by L{IReactorFDSet.removeAll}.
+ """
+ self._internalReaders.add(reader)
+ self.addReader(reader)
+
+ def _removeInternalReader(self, reader: IReadDescriptor) -> None:
+ """
+ Remove a read descriptor which is part of the implementation of the
+ reactor itself.
+ """
+ self._internalReaders.remove(reader)
+ self.removeReader(reader)
+
+ def run(self, installSignalHandlers: bool = True) -> None:
+ self.startRunning(installSignalHandlers=installSignalHandlers)
+ try:
+ self.mainLoop()
+ finally:
+ self._signals.uninstall()
+
+ def mainLoop(self) -> None:
+ while self._started:
+ try:
+ while self._started:
+ # Advance simulation time in delayed event
+ # processors.
+ self.runUntilCurrent()
+ t2 = self.timeout()
+ t = self.running and t2
+ self.doIteration(t)
+ except BaseException:
+ log.msg("Unexpected error in main loop.")
+ log.err()
+ else:
+ log.msg("Main loop terminated.") # type:ignore[unreachable]
+
+ # override in subclasses
+
+ _lock = None
+
+ def installWaker(self) -> None:
+ raise NotImplementedError(
+ reflect.qual(self.__class__) + " did not implement installWaker"
+ )
+
+ def wakeUp(self) -> None:
+ """
+ Wake up the event loop.
+ """
+ if self.waker:
+ self.waker.wakeUp()
+ # if the waker isn't installed, the reactor isn't running, and
+ # therefore doesn't need to be woken up
+
+ def doIteration(self, delay: Optional[float]) -> None:
+ """
+ Do one iteration over the readers and writers which have been added.
+ """
+ raise NotImplementedError(
+ reflect.qual(self.__class__) + " did not implement doIteration"
+ )
+
+ def addReader(self, reader: IReadDescriptor) -> None:
+ raise NotImplementedError(
+ reflect.qual(self.__class__) + " did not implement addReader"
+ )
+
+ def addWriter(self, writer: IWriteDescriptor) -> None:
+ raise NotImplementedError(
+ reflect.qual(self.__class__) + " did not implement addWriter"
+ )
+
+ def removeReader(self, reader: IReadDescriptor) -> None:
+ raise NotImplementedError(
+ reflect.qual(self.__class__) + " did not implement removeReader"
+ )
+
+ def removeWriter(self, writer: IWriteDescriptor) -> None:
+ raise NotImplementedError(
+ reflect.qual(self.__class__) + " did not implement removeWriter"
+ )
+
+ def removeAll(self) -> List[Union[IReadDescriptor, IWriteDescriptor]]:
+ raise NotImplementedError(
+ reflect.qual(self.__class__) + " did not implement removeAll"
+ )
+
+ def getReaders(self) -> List[IReadDescriptor]:
+ raise NotImplementedError(
+ reflect.qual(self.__class__) + " did not implement getReaders"
+ )
+
+ def getWriters(self) -> List[IWriteDescriptor]:
+ raise NotImplementedError(
+ reflect.qual(self.__class__) + " did not implement getWriters"
+ )
+
+ # IReactorCore
+ def resolve(
+ self, name: str, timeout: Sequence[int] = (1, 3, 11, 45)
+ ) -> Deferred[str]:
+ """
+ Return a Deferred that will resolve a hostname."""
+ if not name:
+ # XXX - This is *less than* '::', and will screw up IPv6 servers
+ return defer.succeed("0.0.0.0")
+ if abstract.isIPAddress(name):
+ return defer.succeed(name)
+ return self.resolver.getHostByName(name, timeout)
+
+ def stop(self) -> None:
+ """
+ See twisted.internet.interfaces.IReactorCore.stop.
+ """
+ if self._stopped:
+ raise error.ReactorNotRunning("Can't stop reactor that isn't running.")
+ self._stopped = True
+ self._justStopped = True
+ self._startedBefore = True
+
+ def crash(self) -> None:
+ """
+ See twisted.internet.interfaces.IReactorCore.crash.
+
+ Reset reactor state tracking attributes and re-initialize certain
+ state-transition helpers which were set up in C{__init__} but later
+ destroyed (through use).
+ """
+ self._started = False
+ self.running = False
+ self.addSystemEventTrigger("during", "startup", self._reallyStartRunning)
+
+ def sigInt(self, number: int, frame: Optional[FrameType] = None) -> None:
+ """
+ Handle a SIGINT interrupt.
+
+ @param number: See handler specification in L{signal.signal}
+ @param frame: See handler specification in L{signal.signal}
+ """
+ log.msg("Received SIGINT, shutting down.")
+ self.callFromThread(self.stop)
+ self._exitSignal = number
+
+ def sigBreak(self, number: int, frame: Optional[FrameType] = None) -> None:
+ """
+ Handle a SIGBREAK interrupt.
+
+ @param number: See handler specification in L{signal.signal}
+ @param frame: See handler specification in L{signal.signal}
+ """
+ log.msg("Received SIGBREAK, shutting down.")
+ self.callFromThread(self.stop)
+ self._exitSignal = number
+
+ def sigTerm(self, number: int, frame: Optional[FrameType] = None) -> None:
+ """
+ Handle a SIGTERM interrupt.
+
+ @param number: See handler specification in L{signal.signal}
+ @param frame: See handler specification in L{signal.signal}
+ """
+ log.msg("Received SIGTERM, shutting down.")
+ self.callFromThread(self.stop)
+ self._exitSignal = number
+
+ def disconnectAll(self) -> None:
+ """Disconnect every reader, and writer in the system."""
+ selectables = self.removeAll()
+ for reader in selectables:
+ log.callWithLogger(
+ reader, reader.connectionLost, Failure(main.CONNECTION_LOST)
+ )
+
+ def iterate(self, delay: float = 0.0) -> None:
+ """
+ See twisted.internet.interfaces.IReactorCore.iterate.
+ """
+ self.runUntilCurrent()
+ self.doIteration(delay)
+
+ def fireSystemEvent(self, eventType: str) -> None:
+ """
+ See twisted.internet.interfaces.IReactorCore.fireSystemEvent.
+ """
+ event = self._eventTriggers.get(eventType)
+ if event is not None:
+ event.fireEvent()
+
+ def addSystemEventTrigger(
+ self,
+ phase: str,
+ eventType: str,
+ callable: Callable[..., Any],
+ *args: object,
+ **kwargs: object,
+ ) -> _SystemEventID:
+ """
+ See twisted.internet.interfaces.IReactorCore.addSystemEventTrigger.
+ """
+ assert builtins.callable(callable), f"{callable} is not callable"
+ if eventType not in self._eventTriggers:
+ self._eventTriggers[eventType] = _ThreePhaseEvent()
+ return _SystemEventID(
+ (
+ eventType,
+ self._eventTriggers[eventType].addTrigger(
+ phase, callable, *args, **kwargs
+ ),
+ )
+ )
+
+ def removeSystemEventTrigger(self, triggerID: _SystemEventID) -> None:
+ """
+ See twisted.internet.interfaces.IReactorCore.removeSystemEventTrigger.
+ """
+ eventType, handle = triggerID
+ self._eventTriggers[eventType].removeTrigger(handle)
+
+ def callWhenRunning(
+ self, callable: Callable[..., Any], *args: object, **kwargs: object
+ ) -> Optional[_SystemEventID]:
+ """
+ See twisted.internet.interfaces.IReactorCore.callWhenRunning.
+ """
+ if self.running:
+ callable(*args, **kwargs)
+ return None
+ else:
+ return self.addSystemEventTrigger(
+ "after", "startup", callable, *args, **kwargs
+ )
+
+ def startRunning(self, installSignalHandlers: bool = True) -> None:
+ """
+ Method called when reactor starts: do some initialization and fire
+ startup events.
+
+ Don't call this directly, call reactor.run() instead: it should take
+ care of calling this.
+
+ This method is somewhat misnamed. The reactor will not necessarily be
+ in the running state by the time this method returns. The only
+ guarantee is that it will be on its way to the running state.
+
+ @param installSignalHandlers: A flag which, if set, indicates that
+ handlers for a number of (implementation-defined) signals should be
+ installed during startup.
+ """
+ if self._started:
+ raise error.ReactorAlreadyRunning()
+ if self._startedBefore:
+ raise error.ReactorNotRestartable()
+
+ self._signals.uninstall()
+ self._installSignalHandlers = installSignalHandlers
+ self._signals = self._makeSignalHandling(installSignalHandlers)
+
+ self._started = True
+ self._stopped = False
+ if self._registerAsIOThread:
+ threadable.registerAsIOThread()
+ self.fireSystemEvent("startup")
+
+ def _reallyStartRunning(self) -> None:
+ """
+ Method called to transition to the running state. This should happen
+ in the I{during startup} event trigger phase.
+ """
+ self.running = True
+ if self._installSignalHandlers:
+ # Make sure this happens before after-startup events, since the
+ # expectation of after-startup is that the reactor is fully
+ # initialized. Don't do it right away for historical reasons
+ # (perhaps some before-startup triggers don't want there to be a
+ # custom SIGCHLD handler so that they can run child processes with
+ # some blocking api).
+ self._signals.install()
+
+ # IReactorTime
+
+ seconds = staticmethod(runtimeSeconds)
+
+ def callLater(
+ self, delay: float, callable: Callable[..., Any], *args: object, **kw: object
+ ) -> DelayedCall:
+ """
+ See twisted.internet.interfaces.IReactorTime.callLater.
+ """
+ assert builtins.callable(callable), f"{callable} is not callable"
+ assert delay >= 0, f"{delay} is not greater than or equal to 0 seconds"
+ delayedCall = DelayedCall(
+ self.seconds() + delay,
+ callable,
+ args,
+ kw,
+ self._cancelCallLater,
+ self._moveCallLaterSooner,
+ seconds=self.seconds,
+ )
+ self._newTimedCalls.append(delayedCall)
+ return delayedCall
+
+ def _moveCallLaterSooner(self, delayedCall: DelayedCall) -> None:
+ # Linear time find: slow.
+ heap = self._pendingTimedCalls
+ try:
+ pos = heap.index(delayedCall)
+
+ # Move elt up the heap until it rests at the right place.
+ elt = heap[pos]
+ while pos != 0:
+ parent = (pos - 1) // 2
+ if heap[parent] <= elt:
+ break
+ # move parent down
+ heap[pos] = heap[parent]
+ pos = parent
+ heap[pos] = elt
+ except ValueError:
+ # element was not found in heap - oh well...
+ pass
+
+ def _cancelCallLater(self, delayedCall: DelayedCall) -> None:
+ self._cancellations += 1
+
+ def getDelayedCalls(self) -> Sequence[IDelayedCall]:
+ """
+ See L{twisted.internet.interfaces.IReactorTime.getDelayedCalls}
+ """
+ return [
+ x
+ for x in (self._pendingTimedCalls + self._newTimedCalls)
+ if not x.cancelled
+ ]
+
+ def _insertNewDelayedCalls(self) -> None:
+ for call in self._newTimedCalls:
+ if call.cancelled:
+ self._cancellations -= 1
+ else:
+ call.activate_delay()
+ heappush(self._pendingTimedCalls, call)
+ self._newTimedCalls = []
+
+ def timeout(self) -> Optional[float]:
+ """
+ Determine the longest time the reactor may sleep (waiting on I/O
+ notification, perhaps) before it must wake up to service a time-related
+ event.
+
+ @return: The maximum number of seconds the reactor may sleep.
+ """
+ # insert new delayed calls to make sure to include them in timeout value
+ self._insertNewDelayedCalls()
+
+ if not self._pendingTimedCalls:
+ return None
+
+ delay = self._pendingTimedCalls[0].time - self.seconds()
+
+ # Pick a somewhat arbitrary maximum possible value for the timeout.
+ # This value is 2 ** 31 / 1000, which is the number of seconds which can
+ # be represented as an integer number of milliseconds in a signed 32 bit
+ # integer. This particular limit is imposed by the epoll_wait(3)
+ # interface which accepts a timeout as a C "int" type and treats it as
+ # representing a number of milliseconds.
+ longest = 2147483
+
+ # Don't let the delay be in the past (negative) or exceed a plausible
+ # maximum (platform-imposed) interval.
+ return max(0, min(longest, delay))
+
+ def runUntilCurrent(self) -> None:
+ """
+ Run all pending timed calls.
+ """
+ if self.threadCallQueue:
+ # Keep track of how many calls we actually make, as we're
+ # making them, in case another call is added to the queue
+ # while we're in this loop.
+ count = 0
+ total = len(self.threadCallQueue)
+ for f, a, kw in self.threadCallQueue:
+ try:
+ f(*a, **kw)
+ except BaseException:
+ log.err()
+ count += 1
+ if count == total:
+ break
+ del self.threadCallQueue[:count]
+ if self.threadCallQueue:
+ self.wakeUp()
+
+ # insert new delayed calls now
+ self._insertNewDelayedCalls()
+
+ now = self.seconds()
+ while self._pendingTimedCalls and (self._pendingTimedCalls[0].time <= now):
+ call = heappop(self._pendingTimedCalls)
+ if call.cancelled:
+ self._cancellations -= 1
+ continue
+
+ if call.delayed_time > 0.0:
+ call.activate_delay()
+ heappush(self._pendingTimedCalls, call)
+ continue
+
+ try:
+ call.called = 1
+ call.func(*call.args, **call.kw)
+ except BaseException:
+ log.err()
+ if call.creator is not None:
+ e = "\n"
+ e += (
+ " C: previous exception occurred in "
+ + "a DelayedCall created here:\n"
+ )
+ e += " C:"
+ e += "".join(call.creator).rstrip().replace("\n", "\n C:")
+ e += "\n"
+ log.msg(e)
+
+ if (
+ self._cancellations > 50
+ and self._cancellations > len(self._pendingTimedCalls) >> 1
+ ):
+ self._cancellations = 0
+ self._pendingTimedCalls = [
+ x for x in self._pendingTimedCalls if not x.cancelled
+ ]
+ heapify(self._pendingTimedCalls)
+
+ if self._justStopped:
+ self._justStopped = False
+ self.fireSystemEvent("shutdown")
+
+ # IReactorThreads
+ if platform.supportsThreads():
+ assert ThreadPool is not None
+
+ threadpool = None
+ # ID of the trigger starting the threadpool
+ _threadpoolStartupID = None
+ # ID of the trigger stopping the threadpool
+ threadpoolShutdownID = None
+
+ def _initThreads(self) -> None:
+ self.installNameResolver(
+ _GAIResolver(cast(IReactorThreads, self), self.getThreadPool)
+ )
+ self.usingThreads = True
+
+ # `IReactorFromThreads` defines the first named argument as
+ # `callable: Callable[..., Any]` but this defines it as `f`
+ # really both should be defined using py3.8 positional only
+ def callFromThread( # type: ignore[override]
+ self, f: Callable[..., Any], *args: object, **kwargs: object
+ ) -> None:
+ """
+ See
+ L{twisted.internet.interfaces.IReactorFromThreads.callFromThread}.
+ """
+ assert callable(f), f"{f} is not callable"
+ # lists are thread-safe in CPython, but not in Jython
+ # this is probably a bug in Jython, but until fixed this code
+ # won't work in Jython.
+ self.threadCallQueue.append((f, args, kwargs))
+ self.wakeUp()
+
+ def _initThreadPool(self) -> None:
+ """
+ Create the threadpool accessible with callFromThread.
+ """
+ self.threadpool = ThreadPool(0, 10, "twisted.internet.reactor")
+ self._threadpoolStartupID = self.callWhenRunning(self.threadpool.start)
+ self.threadpoolShutdownID = self.addSystemEventTrigger(
+ "during", "shutdown", self._stopThreadPool
+ )
+
+ def _uninstallHandler(self) -> None:
+ self._signals.uninstall()
+
+ def _stopThreadPool(self) -> None:
+ """
+ Stop the reactor threadpool. This method is only valid if there
+ is currently a threadpool (created by L{_initThreadPool}). It
+ is not intended to be called directly; instead, it will be
+ called by a shutdown trigger created in L{_initThreadPool}.
+ """
+ triggers = [self._threadpoolStartupID, self.threadpoolShutdownID]
+ for trigger in filter(None, triggers):
+ try:
+ self.removeSystemEventTrigger(trigger)
+ except ValueError:
+ pass
+ self._threadpoolStartupID = None
+ self.threadpoolShutdownID = None
+ assert self.threadpool is not None
+ self.threadpool.stop()
+ self.threadpool = None
+
+ def getThreadPool(self) -> ThreadPool:
+ """
+ See L{twisted.internet.interfaces.IReactorThreads.getThreadPool}.
+ """
+ if self.threadpool is None:
+ self._initThreadPool()
+ assert self.threadpool is not None
+ return self.threadpool
+
+ # `IReactorInThreads` defines the first named argument as
+ # `callable: Callable[..., Any]` but this defines it as `_callable`
+ # really both should be defined using py3.8 positional only
+ def callInThread( # type: ignore[override]
+ self, _callable: Callable[..., Any], *args: object, **kwargs: object
+ ) -> None:
+ """
+ See L{twisted.internet.interfaces.IReactorInThreads.callInThread}.
+ """
+ self.getThreadPool().callInThread(_callable, *args, **kwargs)
+
+ def suggestThreadPoolSize(self, size: int) -> None:
+ """
+ See L{twisted.internet.interfaces.IReactorThreads.suggestThreadPoolSize}.
+ """
+ self.getThreadPool().adjustPoolsize(maxthreads=size)
+
+ else:
+ # This is for signal handlers.
+ def callFromThread(
+ self, f: Callable[..., Any], *args: object, **kwargs: object
+ ) -> None:
+ assert callable(f), f"{f} is not callable"
+ # See comment in the other callFromThread implementation.
+ self.threadCallQueue.append((f, args, kwargs))
+
+
+if platform.supportsThreads():
+ classImplements(ReactorBase, IReactorThreads)
+
+
+@implementer(IConnector)
+class BaseConnector(ABC):
+ """
+ Basic implementation of L{IConnector}.
+
+ State can be: "connecting", "connected", "disconnected"
+ """
+
+ timeoutID = None
+ factoryStarted = 0
+
+ def __init__(
+ self, factory: ClientFactory, timeout: float, reactor: ReactorBase
+ ) -> None:
+ self.state = "disconnected"
+ self.reactor = reactor
+ self.factory = factory
+ self.timeout = timeout
+
+ def disconnect(self) -> None:
+ """Disconnect whatever our state is."""
+ if self.state == "connecting":
+ self.stopConnecting()
+ elif self.state == "connected":
+ assert self.transport is not None
+ self.transport.loseConnection()
+
+ @abstractmethod
+ def _makeTransport(self) -> "Client":
+ pass
+
+ def connect(self) -> None:
+ """Start connection to remote server."""
+ if self.state != "disconnected":
+ raise RuntimeError("can't connect in this state")
+
+ self.state = "connecting"
+ if not self.factoryStarted:
+ self.factory.doStart()
+ self.factoryStarted = 1
+ self.transport: Optional[Client] = self._makeTransport()
+ if self.timeout is not None:
+ self.timeoutID = self.reactor.callLater(
+ self.timeout, self.transport.failIfNotConnected, error.TimeoutError()
+ )
+ self.factory.startedConnecting(self)
+
+ def stopConnecting(self) -> None:
+ """Stop attempting to connect."""
+ if self.state != "connecting":
+ raise error.NotConnectingError("we're not trying to connect")
+
+ assert self.transport is not None
+ self.state = "disconnected"
+ self.transport.failIfNotConnected(error.UserError())
+ del self.transport
+
+ def cancelTimeout(self) -> None:
+ if self.timeoutID is not None:
+ try:
+ self.timeoutID.cancel()
+ except ValueError:
+ pass
+ del self.timeoutID
+
+ def buildProtocol(self, addr: IAddress) -> Optional[IProtocol]:
+ self.state = "connected"
+ self.cancelTimeout()
+ return self.factory.buildProtocol(addr)
+
+ def connectionFailed(self, reason: Failure) -> None:
+ self.cancelTimeout()
+ self.transport = None
+ self.state = "disconnected"
+ self.factory.clientConnectionFailed(self, reason)
+ if self.state == "disconnected":
+ # factory hasn't called our connect() method
+ self.factory.doStop()
+ self.factoryStarted = 0
+
+ def connectionLost(self, reason: Failure) -> None:
+ self.state = "disconnected"
+ self.factory.clientConnectionLost(self, reason)
+ if self.state == "disconnected":
+ # factory hasn't called our connect() method
+ self.factory.doStop()
+ self.factoryStarted = 0
+
+ def getDestination(self) -> IAddress:
+ raise NotImplementedError(
+ reflect.qual(self.__class__) + " did not implement " "getDestination"
+ )
+
+ def __repr__(self) -> str:
+ return "<{} instance at 0x{:x} {} {}>".format(
+ reflect.qual(self.__class__),
+ id(self),
+ self.state,
+ self.getDestination(),
+ )
+
+
+class BasePort(abstract.FileDescriptor):
+ """Basic implementation of a ListeningPort.
+
+ Note: This does not actually implement IListeningPort.
+ """
+
+ addressFamily: socket.AddressFamily = None # type: ignore[assignment]
+ socketType: socket.SocketKind = None # type: ignore[assignment]
+
+ def createInternetSocket(self) -> socket.socket:
+ s = socket.socket(self.addressFamily, self.socketType)
+ s.setblocking(False)
+ fdesc._setCloseOnExec(s.fileno())
+ return s
+
+ def doWrite(self) -> Optional[Failure]:
+ """Raises a RuntimeError"""
+ raise RuntimeError("doWrite called on a %s" % reflect.qual(self.__class__))
+
+
+__all__: List[str] = []
diff --git a/contrib/python/Twisted/py3/twisted/internet/cfreactor.py b/contrib/python/Twisted/py3/twisted/internet/cfreactor.py
new file mode 100644
index 0000000000..142c0472ef
--- /dev/null
+++ b/contrib/python/Twisted/py3/twisted/internet/cfreactor.py
@@ -0,0 +1,593 @@
+# -*- test-case-name: twisted.internet.test.test_core -*-
+# Copyright (c) Twisted Matrix Laboratories.
+# See LICENSE for details.
+
+"""
+A reactor for integrating with U{CFRunLoop<http://bit.ly/cfrunloop>}, the
+CoreFoundation main loop used by macOS.
+
+This is useful for integrating Twisted with U{PyObjC<http://pyobjc.sf.net/>}
+applications.
+"""
+from __future__ import annotations
+
+__all__ = ["install", "CFReactor"]
+
+import sys
+
+from zope.interface import implementer
+
+from CFNetwork import ( # type: ignore[import]
+ CFSocketCreateRunLoopSource,
+ CFSocketCreateWithNative,
+ CFSocketDisableCallBacks,
+ CFSocketEnableCallBacks,
+ CFSocketInvalidate,
+ CFSocketSetSocketFlags,
+ kCFSocketAutomaticallyReenableReadCallBack,
+ kCFSocketAutomaticallyReenableWriteCallBack,
+ kCFSocketConnectCallBack,
+ kCFSocketReadCallBack,
+ kCFSocketWriteCallBack,
+)
+from CoreFoundation import ( # type: ignore[import]
+ CFAbsoluteTimeGetCurrent,
+ CFRunLoopAddSource,
+ CFRunLoopAddTimer,
+ CFRunLoopGetCurrent,
+ CFRunLoopRemoveSource,
+ CFRunLoopRun,
+ CFRunLoopStop,
+ CFRunLoopTimerCreate,
+ CFRunLoopTimerInvalidate,
+ kCFAllocatorDefault,
+ kCFRunLoopCommonModes,
+)
+
+from twisted.internet.interfaces import IReactorFDSet
+from twisted.internet.posixbase import _NO_FILEDESC, PosixReactorBase
+from twisted.python import log
+
+# We know that we're going to run on macOS so we can just pick the
+# POSIX-appropriate waker. This also avoids having a dynamic base class and
+# so lets more things get type checked.
+from ._signals import _UnixWaker
+
+_READ = 0
+_WRITE = 1
+_preserveSOError = 1 << 6
+
+
+class _WakerPlus(_UnixWaker):
+ """
+ The normal Twisted waker will simply wake up the main loop, which causes an
+ iteration to run, which in turn causes L{ReactorBase.runUntilCurrent}
+ to get invoked.
+
+ L{CFReactor} has a slightly different model of iteration, though: rather
+ than have each iteration process the thread queue, then timed calls, then
+ file descriptors, each callback is run as it is dispatched by the CFRunLoop
+ observer which triggered it.
+
+ So this waker needs to not only unblock the loop, but also make sure the
+ work gets done; so, it reschedules the invocation of C{runUntilCurrent} to
+ be immediate (0 seconds from now) even if there is no timed call work to
+ do.
+ """
+
+ def __init__(self, reactor):
+ super().__init__()
+ self.reactor = reactor
+
+ def doRead(self):
+ """
+ Wake up the loop and force C{runUntilCurrent} to run immediately in the
+ next timed iteration.
+ """
+ result = super().doRead()
+ self.reactor._scheduleSimulate(True)
+ return result
+
+
+@implementer(IReactorFDSet)
+class CFReactor(PosixReactorBase):
+ """
+ The CoreFoundation reactor.
+
+ You probably want to use this via the L{install} API.
+
+ @ivar _fdmap: a dictionary, mapping an integer (a file descriptor) to a
+ 4-tuple of:
+
+ - source: a C{CFRunLoopSource}; the source associated with this
+ socket.
+ - socket: a C{CFSocket} wrapping the file descriptor.
+ - descriptor: an L{IReadDescriptor} and/or L{IWriteDescriptor}
+ provider.
+ - read-write: a 2-C{list} of booleans: respectively, whether this
+ descriptor is currently registered for reading or registered for
+ writing.
+
+ @ivar _idmap: a dictionary, mapping the id() of an L{IReadDescriptor} or
+ L{IWriteDescriptor} to a C{fd} in L{_fdmap}. Implemented in this
+ manner so that we don't have to rely (even more) on the hashability of
+ L{IReadDescriptor} providers, and we know that they won't be collected
+ since these are kept in sync with C{_fdmap}. Necessary because the
+ .fileno() of a file descriptor may change at will, so we need to be
+ able to look up what its file descriptor I{used} to be, so that we can
+ look it up in C{_fdmap}
+
+ @ivar _cfrunloop: the C{CFRunLoop} pyobjc object wrapped
+ by this reactor.
+
+ @ivar _inCFLoop: Is C{CFRunLoopRun} currently running?
+
+ @type _inCFLoop: L{bool}
+
+ @ivar _currentSimulator: if a CFTimer is currently scheduled with the CF
+ run loop to run Twisted callLater calls, this is a reference to it.
+ Otherwise, it is L{None}
+ """
+
+ def __init__(self, runLoop=None, runner=None):
+ self._fdmap = {}
+ self._idmap = {}
+ if runner is None:
+ runner = CFRunLoopRun
+ self._runner = runner
+
+ if runLoop is None:
+ runLoop = CFRunLoopGetCurrent()
+ self._cfrunloop = runLoop
+ PosixReactorBase.__init__(self)
+
+ def _wakerFactory(self) -> _WakerPlus:
+ return _WakerPlus(self)
+
+ def _socketCallback(
+ self, cfSocket, callbackType, ignoredAddress, ignoredData, context
+ ):
+ """
+ The socket callback issued by CFRunLoop. This will issue C{doRead} or
+ C{doWrite} calls to the L{IReadDescriptor} and L{IWriteDescriptor}
+ registered with the file descriptor that we are being notified of.
+
+ @param cfSocket: The C{CFSocket} which has got some activity.
+
+ @param callbackType: The type of activity that we are being notified
+ of. Either C{kCFSocketReadCallBack} or C{kCFSocketWriteCallBack}.
+
+ @param ignoredAddress: Unused, because this is not used for either of
+ the callback types we register for.
+
+ @param ignoredData: Unused, because this is not used for either of the
+ callback types we register for.
+
+ @param context: The data associated with this callback by
+ C{CFSocketCreateWithNative} (in C{CFReactor._watchFD}). A 2-tuple
+ of C{(int, CFRunLoopSource)}.
+ """
+ (fd, smugglesrc) = context
+ if fd not in self._fdmap:
+ # Spurious notifications seem to be generated sometimes if you
+ # CFSocketDisableCallBacks in the middle of an event. I don't know
+ # about this FD, any more, so let's get rid of it.
+ CFRunLoopRemoveSource(self._cfrunloop, smugglesrc, kCFRunLoopCommonModes)
+ return
+
+ src, skt, readWriteDescriptor, rw = self._fdmap[fd]
+
+ def _drdw():
+ why = None
+ isRead = False
+
+ try:
+ if readWriteDescriptor.fileno() == -1:
+ why = _NO_FILEDESC
+ else:
+ isRead = callbackType == kCFSocketReadCallBack
+ # CFSocket seems to deliver duplicate read/write
+ # notifications sometimes, especially a duplicate
+ # writability notification when first registering the
+ # socket. This bears further investigation, since I may
+ # have been mis-interpreting the behavior I was seeing.
+ # (Running the full Twisted test suite, while thorough, is
+ # not always entirely clear.) Until this has been more
+ # thoroughly investigated , we consult our own
+ # reading/writing state flags to determine whether we
+ # should actually attempt a doRead/doWrite first. -glyph
+ if isRead:
+ if rw[_READ]:
+ why = readWriteDescriptor.doRead()
+ else:
+ if rw[_WRITE]:
+ why = readWriteDescriptor.doWrite()
+ except BaseException:
+ why = sys.exc_info()[1]
+ log.err()
+ if why:
+ self._disconnectSelectable(readWriteDescriptor, why, isRead)
+
+ log.callWithLogger(readWriteDescriptor, _drdw)
+
+ def _watchFD(self, fd, descr, flag):
+ """
+ Register a file descriptor with the C{CFRunLoop}, or modify its state
+ so that it's listening for both notifications (read and write) rather
+ than just one; used to implement C{addReader} and C{addWriter}.
+
+ @param fd: The file descriptor.
+
+ @type fd: L{int}
+
+ @param descr: the L{IReadDescriptor} or L{IWriteDescriptor}
+
+ @param flag: the flag to register for callbacks on, either
+ C{kCFSocketReadCallBack} or C{kCFSocketWriteCallBack}
+ """
+ if fd == -1:
+ raise RuntimeError("Invalid file descriptor.")
+ if fd in self._fdmap:
+ src, cfs, gotdescr, rw = self._fdmap[fd]
+ # do I need to verify that it's the same descr?
+ else:
+ ctx = []
+ ctx.append(fd)
+ cfs = CFSocketCreateWithNative(
+ kCFAllocatorDefault,
+ fd,
+ kCFSocketReadCallBack
+ | kCFSocketWriteCallBack
+ | kCFSocketConnectCallBack,
+ self._socketCallback,
+ ctx,
+ )
+ CFSocketSetSocketFlags(
+ cfs,
+ kCFSocketAutomaticallyReenableReadCallBack
+ | kCFSocketAutomaticallyReenableWriteCallBack
+ |
+ # This extra flag is to ensure that CF doesn't (destructively,
+ # because destructively is the only way to do it) retrieve
+ # SO_ERROR and thereby break twisted.internet.tcp.BaseClient,
+ # which needs SO_ERROR to tell it whether or not it needs to
+ # call connect_ex a second time.
+ _preserveSOError,
+ )
+ src = CFSocketCreateRunLoopSource(kCFAllocatorDefault, cfs, 0)
+ ctx.append(src)
+ CFRunLoopAddSource(self._cfrunloop, src, kCFRunLoopCommonModes)
+ CFSocketDisableCallBacks(
+ cfs,
+ kCFSocketReadCallBack
+ | kCFSocketWriteCallBack
+ | kCFSocketConnectCallBack,
+ )
+ rw = [False, False]
+ self._idmap[id(descr)] = fd
+ self._fdmap[fd] = src, cfs, descr, rw
+ rw[self._flag2idx(flag)] = True
+ CFSocketEnableCallBacks(cfs, flag)
+
+ def _flag2idx(self, flag):
+ """
+ Convert a C{kCFSocket...} constant to an index into the read/write
+ state list (C{_READ} or C{_WRITE}) (the 4th element of the value of
+ C{self._fdmap}).
+
+ @param flag: C{kCFSocketReadCallBack} or C{kCFSocketWriteCallBack}
+
+ @return: C{_READ} or C{_WRITE}
+ """
+ return {kCFSocketReadCallBack: _READ, kCFSocketWriteCallBack: _WRITE}[flag]
+
+ def _unwatchFD(self, fd, descr, flag):
+ """
+ Unregister a file descriptor with the C{CFRunLoop}, or modify its state
+ so that it's listening for only one notification (read or write) as
+ opposed to both; used to implement C{removeReader} and C{removeWriter}.
+
+ @param fd: a file descriptor
+
+ @type fd: C{int}
+
+ @param descr: an L{IReadDescriptor} or L{IWriteDescriptor}
+
+ @param flag: C{kCFSocketWriteCallBack} C{kCFSocketReadCallBack}
+ """
+ if id(descr) not in self._idmap:
+ return
+ if fd == -1:
+ # need to deal with it in this case, I think.
+ realfd = self._idmap[id(descr)]
+ else:
+ realfd = fd
+ src, cfs, descr, rw = self._fdmap[realfd]
+ CFSocketDisableCallBacks(cfs, flag)
+ rw[self._flag2idx(flag)] = False
+ if not rw[_READ] and not rw[_WRITE]:
+ del self._idmap[id(descr)]
+ del self._fdmap[realfd]
+ CFRunLoopRemoveSource(self._cfrunloop, src, kCFRunLoopCommonModes)
+ CFSocketInvalidate(cfs)
+
+ def addReader(self, reader):
+ """
+ Implement L{IReactorFDSet.addReader}.
+ """
+ self._watchFD(reader.fileno(), reader, kCFSocketReadCallBack)
+
+ def addWriter(self, writer):
+ """
+ Implement L{IReactorFDSet.addWriter}.
+ """
+ self._watchFD(writer.fileno(), writer, kCFSocketWriteCallBack)
+
+ def removeReader(self, reader):
+ """
+ Implement L{IReactorFDSet.removeReader}.
+ """
+ self._unwatchFD(reader.fileno(), reader, kCFSocketReadCallBack)
+
+ def removeWriter(self, writer):
+ """
+ Implement L{IReactorFDSet.removeWriter}.
+ """
+ self._unwatchFD(writer.fileno(), writer, kCFSocketWriteCallBack)
+
+ def removeAll(self):
+ """
+ Implement L{IReactorFDSet.removeAll}.
+ """
+ allDesc = {descr for src, cfs, descr, rw in self._fdmap.values()}
+ allDesc -= set(self._internalReaders)
+ for desc in allDesc:
+ self.removeReader(desc)
+ self.removeWriter(desc)
+ return list(allDesc)
+
+ def getReaders(self):
+ """
+ Implement L{IReactorFDSet.getReaders}.
+ """
+ return [descr for src, cfs, descr, rw in self._fdmap.values() if rw[_READ]]
+
+ def getWriters(self):
+ """
+ Implement L{IReactorFDSet.getWriters}.
+ """
+ return [descr for src, cfs, descr, rw in self._fdmap.values() if rw[_WRITE]]
+
+ def _moveCallLaterSooner(self, tple):
+ """
+ Override L{PosixReactorBase}'s implementation of L{IDelayedCall.reset}
+ so that it will immediately reschedule. Normally
+ C{_moveCallLaterSooner} depends on the fact that C{runUntilCurrent} is
+ always run before the mainloop goes back to sleep, so this forces it to
+ immediately recompute how long the loop needs to stay asleep.
+ """
+ result = PosixReactorBase._moveCallLaterSooner(self, tple)
+ self._scheduleSimulate()
+ return result
+
+ def startRunning(self, installSignalHandlers: bool = True) -> None:
+ """
+ Start running the reactor, then kick off the timer that advances
+ Twisted's clock to keep pace with CFRunLoop's.
+ """
+ super().startRunning(installSignalHandlers)
+
+ # Before 'startRunning' is called, the reactor is not attached to the
+ # CFRunLoop[1]; specifically, the CFTimer that runs all of Twisted's
+ # timers is not active and will not have been added to the loop by any
+ # application code. Now that _running is probably[2] True, we need to
+ # ensure that timed calls will actually run on the main loop. This
+ # call needs to be here, rather than at the top of mainLoop, because
+ # it's possible to use startRunning to *attach* a reactor to an
+ # already-running CFRunLoop, i.e. within a plugin for an application
+ # that doesn't otherwise use Twisted, rather than calling it via run().
+ self._scheduleSimulate(force=True)
+
+ # [1]: readers & writers are still active in the loop, but arguably
+ # they should not be.
+
+ # [2]: application code within a 'startup' system event trigger *may*
+ # have already crashed the reactor and thus set _started to False,
+ # but that specific case is handled by mainLoop, since that case
+ # is inherently irrelevant in an attach-to-application case and is
+ # only necessary to handle mainLoop spuriously blocking.
+
+ _inCFLoop = False
+
+ def mainLoop(self) -> None:
+ """
+ Run the runner (C{CFRunLoopRun} or something that calls it), which runs
+ the run loop until C{crash()} is called.
+ """
+ if not self._started:
+ # If we arrive here, we were crashed by application code in a
+ # 'startup' system event trigger, (or crashed manually before the
+ # application calls 'mainLoop' directly for whatever reason; sigh,
+ # this method should not be public). However, application code
+ # doing obscure things will expect an invocation of this loop to
+ # have at least *one* pass over ready readers, writers, and delayed
+ # calls. iterate(), in particular, is emulated in exactly this way
+ # in this reactor implementation. In order to ensure that we enter
+ # the real implementation of the mainloop and do all of those
+ # things, we need to set _started back to True so that callLater
+ # actually schedules itself against the CFRunLoop, but immediately
+ # crash once we are in the context of the loop where we've run
+ # ready I/O and timers.
+
+ def docrash() -> None:
+ self.crash()
+
+ self._started = True
+ self.callLater(0, docrash)
+ already = False
+ try:
+ while self._started:
+ if already:
+ # Sometimes CFRunLoopRun (or its equivalents) may exit
+ # without CFRunLoopStop being called.
+
+ # This is really only *supposed* to happen when it runs out
+ # of sources & timers to process. However, in full Twisted
+ # test-suite runs we have observed, extremely rarely (once
+ # in every 3000 tests or so) CFRunLoopRun exiting in cases
+ # where it seems as though there *is* still some work to
+ # do. However, given the difficulty of reproducing the
+ # race conditions necessary to make this happen, it's
+ # possible that we have missed some nuance of when
+ # CFRunLoop considers the list of work "empty" and various
+ # callbacks and timers to be "invalidated". Therefore we
+ # are not fully confident that this is a platform bug, but
+ # it is nevertheless unexpected behavior from our reading
+ # of the documentation.
+
+ # To accommodate this rare and slightly ambiguous stress
+ # case, we make extra sure that our scheduled timer is
+ # re-created on the loop as a CFRunLoopTimer, which
+ # reliably gives the loop some work to do and 'fixes' it if
+ # it exited due to having no active sources or timers.
+ self._scheduleSimulate()
+
+ # At this point, there may be a little more code that we
+ # would need to put here for full correctness for a very
+ # peculiar type of application: if you're writing a
+ # command-line tool using CFReactor, adding *nothing* to
+ # the reactor itself, disabling even the internal Waker
+ # file descriptors, then there's a possibility that
+ # CFRunLoopRun will exit early, and if we have no timers,
+ # we might busy-loop here. Because we cannot seem to force
+ # this to happen under normal circumstances, we're leaving
+ # that code out.
+
+ already = True
+ self._inCFLoop = True
+ try:
+ self._runner()
+ finally:
+ self._inCFLoop = False
+ finally:
+ self._stopSimulating()
+
+ _currentSimulator: object | None = None
+
+ def _stopSimulating(self) -> None:
+ """
+ If we have a CFRunLoopTimer registered with the CFRunLoop, invalidate
+ it and set it to None.
+ """
+ if self._currentSimulator is None:
+ return
+ CFRunLoopTimerInvalidate(self._currentSimulator)
+ self._currentSimulator = None
+
+ def _scheduleSimulate(self, force: bool = False) -> None:
+ """
+ Schedule a call to C{self.runUntilCurrent}. This will cancel the
+ currently scheduled call if it is already scheduled.
+
+ @param force: Even if there are no timed calls, make sure that
+ C{runUntilCurrent} runs immediately (in a 0-seconds-from-now
+ C{CFRunLoopTimer}). This is necessary for calls which need to
+ trigger behavior of C{runUntilCurrent} other than running timed
+ calls, such as draining the thread call queue or calling C{crash()}
+ when the appropriate flags are set.
+
+ @type force: C{bool}
+ """
+ self._stopSimulating()
+ if not self._started:
+ # If the reactor is not running (e.g. we are scheduling callLater
+ # calls before starting the reactor) we should not be scheduling
+ # CFRunLoopTimers against the global CFRunLoop.
+ return
+
+ timeout = 0.0 if force else self.timeout()
+ if timeout is None:
+ return
+
+ fireDate = CFAbsoluteTimeGetCurrent() + timeout
+
+ def simulate(cftimer, extra):
+ self._currentSimulator = None
+ self.runUntilCurrent()
+ self._scheduleSimulate()
+
+ c = self._currentSimulator = CFRunLoopTimerCreate(
+ kCFAllocatorDefault, fireDate, 0, 0, 0, simulate, None
+ )
+ CFRunLoopAddTimer(self._cfrunloop, c, kCFRunLoopCommonModes)
+
+ def callLater(self, _seconds, _f, *args, **kw):
+ """
+ Implement L{IReactorTime.callLater}.
+ """
+ delayedCall = PosixReactorBase.callLater(self, _seconds, _f, *args, **kw)
+ self._scheduleSimulate()
+ return delayedCall
+
+ def stop(self):
+ """
+ Implement L{IReactorCore.stop}.
+ """
+ PosixReactorBase.stop(self)
+ self._scheduleSimulate(True)
+
+ def crash(self):
+ """
+ Implement L{IReactorCore.crash}
+ """
+ PosixReactorBase.crash(self)
+ if not self._inCFLoop:
+ return
+ CFRunLoopStop(self._cfrunloop)
+
+ def iterate(self, delay=0):
+ """
+ Emulate the behavior of C{iterate()} for things that want to call it,
+ by letting the loop run for a little while and then scheduling a timed
+ call to exit it.
+ """
+ self._started = True
+ # Since the CoreFoundation loop doesn't have the concept of "iterate"
+ # we can't ask it to do this. Instead we will make arrangements to
+ # crash it *very* soon and then make it run. This is a rough
+ # approximation of "an iteration". Using crash and mainLoop here
+ # means that it's safe (as safe as anything using "iterate" can be) to
+ # do this repeatedly.
+ self.callLater(0, self.crash)
+ self.mainLoop()
+
+
+def install(runLoop=None, runner=None):
+ """
+ Configure the twisted mainloop to be run inside CFRunLoop.
+
+ @param runLoop: the run loop to use.
+
+ @param runner: the function to call in order to actually invoke the main
+ loop. This will default to C{CFRunLoopRun} if not specified. However,
+ this is not an appropriate choice for GUI applications, as you need to
+ run NSApplicationMain (or something like it). For example, to run the
+ Twisted mainloop in a PyObjC application, your C{main.py} should look
+ something like this::
+
+ from PyObjCTools import AppHelper
+ from twisted.internet.cfreactor import install
+ install(runner=AppHelper.runEventLoop)
+ # initialize your application
+ reactor.run()
+
+ @return: The installed reactor.
+
+ @rtype: C{CFReactor}
+ """
+
+ reactor = CFReactor(runLoop=runLoop, runner=runner)
+ from twisted.internet.main import installReactor
+
+ installReactor(reactor)
+ return reactor
diff --git a/contrib/python/Twisted/py3/twisted/internet/default.py b/contrib/python/Twisted/py3/twisted/internet/default.py
new file mode 100644
index 0000000000..5dfc7ee819
--- /dev/null
+++ b/contrib/python/Twisted/py3/twisted/internet/default.py
@@ -0,0 +1,55 @@
+# -*- test-case-name: twisted.internet.test.test_default -*-
+# Copyright (c) Twisted Matrix Laboratories.
+# See LICENSE for details.
+
+"""
+The most suitable default reactor for the current platform.
+
+Depending on a specific application's needs, some other reactor may in
+fact be better.
+"""
+
+
+__all__ = ["install"]
+
+from twisted.python.runtime import platform
+
+
+def _getInstallFunction(platform):
+ """
+ Return a function to install the reactor most suited for the given platform.
+
+ @param platform: The platform for which to select a reactor.
+ @type platform: L{twisted.python.runtime.Platform}
+
+ @return: A zero-argument callable which will install the selected
+ reactor.
+ """
+ # Linux: epoll(7) is the default, since it scales well.
+ #
+ # macOS: poll(2) is not exposed by Python because it doesn't support all
+ # file descriptors (in particular, lack of PTY support is a problem) --
+ # see <http://bugs.python.org/issue5154>. kqueue has the same restrictions
+ # as poll(2) as far PTY support goes.
+ #
+ # Windows: IOCP should eventually be default, but still has some serious
+ # bugs, e.g. <http://twistedmatrix.com/trac/ticket/4667>.
+ #
+ # We therefore choose epoll(7) on Linux, poll(2) on other non-macOS POSIX
+ # platforms, and select(2) everywhere else.
+ try:
+ if platform.isLinux():
+ try:
+ from twisted.internet.epollreactor import install
+ except ImportError:
+ from twisted.internet.pollreactor import install
+ elif platform.getType() == "posix" and not platform.isMacOSX():
+ from twisted.internet.pollreactor import install
+ else:
+ from twisted.internet.selectreactor import install
+ except ImportError:
+ from twisted.internet.selectreactor import install
+ return install
+
+
+install = _getInstallFunction(platform)
diff --git a/contrib/python/Twisted/py3/twisted/internet/defer.py b/contrib/python/Twisted/py3/twisted/internet/defer.py
new file mode 100644
index 0000000000..17e717cad2
--- /dev/null
+++ b/contrib/python/Twisted/py3/twisted/internet/defer.py
@@ -0,0 +1,2697 @@
+# -*- test-case-name: twisted.test.test_defer -*-
+# Copyright (c) Twisted Matrix Laboratories.
+# See LICENSE for details.
+
+"""
+Support for results that aren't immediately available.
+
+Maintainer: Glyph Lefkowitz
+"""
+from __future__ import annotations
+
+import inspect
+import traceback
+import warnings
+from abc import ABC, abstractmethod
+from asyncio import AbstractEventLoop, Future, iscoroutine
+from contextvars import Context as _Context, copy_context as _copy_context
+from enum import Enum
+from functools import wraps
+from sys import exc_info
+from types import CoroutineType, GeneratorType, MappingProxyType, TracebackType
+from typing import (
+ TYPE_CHECKING,
+ Any,
+ Awaitable,
+ Callable,
+ Coroutine,
+ Generator,
+ Generic,
+ Iterable,
+ List,
+ Mapping,
+ NoReturn,
+ Optional,
+ Sequence,
+ Tuple,
+ Type,
+ TypeVar,
+ Union,
+ cast,
+ overload,
+)
+
+import attr
+from incremental import Version
+from typing_extensions import Concatenate, Literal, ParamSpec, Self
+
+from twisted.internet.interfaces import IDelayedCall, IReactorTime
+from twisted.logger import Logger
+from twisted.python import lockfile
+from twisted.python.compat import _PYPY, cmp, comparable
+from twisted.python.deprecate import deprecated, warnAboutFunction
+from twisted.python.failure import Failure, _extraneous
+
+log = Logger()
+
+
+_T = TypeVar("_T")
+_P = ParamSpec("_P")
+
+
+class AlreadyCalledError(Exception):
+ """
+ This error is raised when one of L{Deferred.callback} or L{Deferred.errback}
+ is called after one of the two had already been called.
+ """
+
+
+class CancelledError(Exception):
+ """
+ This error is raised by default when a L{Deferred} is cancelled.
+ """
+
+
+class TimeoutError(Exception):
+ """
+ This error is raised by default when a L{Deferred} times out.
+ """
+
+
+class NotACoroutineError(TypeError):
+ """
+ This error is raised when a coroutine is expected and something else is
+ encountered.
+ """
+
+
+def logError(err: Failure) -> Failure:
+ """
+ Log and return failure.
+
+ This method can be used as an errback that passes the failure on to the
+ next errback unmodified. Note that if this is the last errback, and the
+ deferred gets garbage collected after being this errback has been called,
+ the clean up code logs it again.
+ """
+ log.failure("", err)
+ return err
+
+
+def succeed(result: _T) -> "Deferred[_T]":
+ """
+ Return a L{Deferred} that has already had C{.callback(result)} called.
+
+ This is useful when you're writing synchronous code to an
+ asynchronous interface: i.e., some code is calling you expecting a
+ L{Deferred} result, but you don't actually need to do anything
+ asynchronous. Just return C{defer.succeed(theResult)}.
+
+ See L{fail} for a version of this function that uses a failing
+ L{Deferred} rather than a successful one.
+
+ @param result: The result to give to the Deferred's 'callback'
+ method.
+ """
+ d: Deferred[_T] = Deferred()
+ d.callback(result)
+ return d
+
+
+def fail(result: Optional[Union[Failure, BaseException]] = None) -> "Deferred[Any]":
+ """
+ Return a L{Deferred} that has already had C{.errback(result)} called.
+
+ See L{succeed}'s docstring for rationale.
+
+ @param result: The same argument that L{Deferred.errback} takes.
+
+ @raise NoCurrentExceptionError: If C{result} is L{None} but there is no
+ current exception state.
+ """
+ d: Deferred[Any] = Deferred()
+ d.errback(result)
+ return d
+
+
+def execute(
+ callable: Callable[_P, _T], *args: _P.args, **kwargs: _P.kwargs
+) -> "Deferred[_T]":
+ """
+ Create a L{Deferred} from a callable and arguments.
+
+ Call the given function with the given arguments. Return a L{Deferred}
+ which has been fired with its callback as the result of that invocation
+ or its C{errback} with a L{Failure} for the exception thrown.
+ """
+ try:
+ result = callable(*args, **kwargs)
+ except BaseException:
+ return fail()
+ else:
+ return succeed(result)
+
+
+@overload
+def maybeDeferred(
+ f: Callable[_P, Deferred[_T]], *args: _P.args, **kwargs: _P.kwargs
+) -> "Deferred[_T]":
+ ...
+
+
+@overload
+def maybeDeferred(
+ f: Callable[_P, Coroutine[Deferred[Any], Any, _T]],
+ *args: _P.args,
+ **kwargs: _P.kwargs,
+) -> "Deferred[_T]":
+ ...
+
+
+@overload
+def maybeDeferred(
+ f: Callable[_P, _T], *args: _P.args, **kwargs: _P.kwargs
+) -> "Deferred[_T]":
+ ...
+
+
+def maybeDeferred(
+ f: Callable[_P, Union[Deferred[_T], Coroutine[Deferred[Any], Any, _T], _T]],
+ *args: _P.args,
+ **kwargs: _P.kwargs,
+) -> "Deferred[_T]":
+ """
+ Invoke a function that may or may not return a L{Deferred} or coroutine.
+
+ Call the given function with the given arguments. Then:
+
+ - If the returned object is a L{Deferred}, return it.
+
+ - If the returned object is a L{Failure}, wrap it with L{fail} and
+ return it.
+
+ - If the returned object is a L{types.CoroutineType}, wrap it with
+ L{Deferred.fromCoroutine} and return it.
+
+ - Otherwise, wrap it in L{succeed} and return it.
+
+ - If an exception is raised, convert it to a L{Failure}, wrap it in
+ L{fail}, and then return it.
+
+ @param f: The callable to invoke
+ @param args: The arguments to pass to C{f}
+ @param kwargs: The keyword arguments to pass to C{f}
+
+ @return: The result of the function call, wrapped in a L{Deferred} if
+ necessary.
+ """
+ try:
+ result = f(*args, **kwargs)
+ except BaseException:
+ return fail(Failure(captureVars=Deferred.debug))
+
+ if isinstance(result, Deferred):
+ return result
+ elif isinstance(result, Failure):
+ return fail(result)
+ elif type(result) is CoroutineType:
+ # A note on how we identify this case ...
+ #
+ # inspect.iscoroutinefunction(f) should be the simplest and easiest
+ # way to determine if we want to apply coroutine handling. However,
+ # the value may be returned by a regular function that calls a
+ # coroutine function and returns its result. It would be confusing if
+ # cases like this led to different handling of the coroutine (even
+ # though it is a mistake to have a regular function call a coroutine
+ # function to return its result - doing so immediately destroys a
+ # large part of the value of coroutine functions: that they can only
+ # have a coroutine result).
+ #
+ # There are many ways we could inspect ``result`` to determine if it
+ # is a "coroutine" but most of these are mistakes. The goal is only
+ # to determine whether the value came from ``async def`` or not
+ # because these are the only values we're trying to handle with this
+ # case. Such values always have exactly one type: CoroutineType.
+ return Deferred.fromCoroutine(result)
+ else:
+ returned: _T = result # type: ignore
+ return succeed(returned)
+
+
+@deprecated(
+ Version("Twisted", 17, 1, 0),
+ replacement="twisted.internet.defer.Deferred.addTimeout",
+)
+def timeout(deferred: "Deferred[object]") -> None:
+ deferred.errback(Failure(TimeoutError("Callback timed out")))
+
+
+def passthru(arg: _T) -> _T:
+ return arg
+
+
+def _failthru(arg: Failure) -> Failure:
+ return arg
+
+
+def setDebugging(on: bool) -> None:
+ """
+ Enable or disable L{Deferred} debugging.
+
+ When debugging is on, the call stacks from creation and invocation are
+ recorded, and added to any L{AlreadyCalledError}s we raise.
+ """
+ Deferred.debug = bool(on)
+
+
+def getDebugging() -> bool:
+ """
+ Determine whether L{Deferred} debugging is enabled.
+ """
+ return Deferred.debug
+
+
+def _cancelledToTimedOutError(value: _T, timeout: float) -> _T:
+ """
+ A default translation function that translates L{Failure}s that are
+ L{CancelledError}s to L{TimeoutError}s.
+
+ @param value: Anything
+ @param timeout: The timeout
+
+ @raise TimeoutError: If C{value} is a L{Failure} that is a L{CancelledError}.
+ @raise Exception: If C{value} is a L{Failure} that is not a L{CancelledError},
+ it is re-raised.
+
+ @since: 16.5
+ """
+ if isinstance(value, Failure):
+ value.trap(CancelledError)
+ raise TimeoutError(timeout, "Deferred")
+ return value
+
+
+class _Sentinel(Enum):
+ """
+ @cvar _NO_RESULT:
+ The result used to represent the fact that there is no result.
+ B{Never ever ever use this as an actual result for a Deferred}.
+ You have been warned.
+ @cvar _CONTINUE:
+ A marker left in L{Deferred.callback}s to indicate a Deferred chain.
+ Always accompanied by a Deferred instance in the args tuple pointing at
+ the Deferred which is chained to the Deferred which has this marker.
+ """
+
+ _NO_RESULT = object()
+ _CONTINUE = object()
+
+
+# Cache these values for use without the extra lookup in deferred hot code paths
+_NO_RESULT = _Sentinel._NO_RESULT
+_CONTINUE = _Sentinel._CONTINUE
+
+
+# type note: this should be Callable[[object, ...], object] but mypy doesn't allow.
+# Callable[[object], object] is next best, but disallows valid callback signatures
+DeferredCallback = Callable[..., object]
+# type note: this should be Callable[[Failure, ...], object] but mypy doesn't allow.
+# Callable[[Failure], object] is next best, but disallows valid callback signatures
+DeferredErrback = Callable[..., object]
+
+_CallbackOrderedArguments = Tuple[object, ...]
+_CallbackKeywordArguments = Mapping[str, object]
+_CallbackChain = Tuple[
+ Tuple[
+ Union[DeferredCallback, Literal[_Sentinel._CONTINUE]],
+ _CallbackOrderedArguments,
+ _CallbackKeywordArguments,
+ ],
+ Tuple[
+ Union[DeferredErrback, DeferredCallback, Literal[_Sentinel._CONTINUE]],
+ _CallbackOrderedArguments,
+ _CallbackKeywordArguments,
+ ],
+]
+
+_NONE_KWARGS: _CallbackKeywordArguments = MappingProxyType({})
+
+
+_SelfResultT = TypeVar("_SelfResultT")
+_NextResultT = TypeVar("_NextResultT")
+
+
+class DebugInfo:
+ """
+ Deferred debug helper.
+ """
+
+ failResult: Optional[Failure] = None
+ creator: Optional[List[str]] = None
+ invoker: Optional[List[str]] = None
+
+ def _getDebugTracebacks(self) -> str:
+ info = ""
+ if self.creator is not None:
+ info += " C: Deferred was created:\n C:"
+ info += "".join(self.creator).rstrip().replace("\n", "\n C:")
+ info += "\n"
+ if self.invoker is not None:
+ info += " I: First Invoker was:\n I:"
+ info += "".join(self.invoker).rstrip().replace("\n", "\n I:")
+ info += "\n"
+ return info
+
+ def __del__(self) -> None:
+ """
+ Print tracebacks and die.
+
+ If the *last* (and I do mean *last*) callback leaves me in an error
+ state, print a traceback (if said errback is a L{Failure}).
+ """
+ if self.failResult is not None:
+ # Note: this is two separate messages for compatibility with
+ # earlier tests; arguably it should be a single error message.
+ log.critical("Unhandled error in Deferred:", isError=True)
+
+ debugInfo = self._getDebugTracebacks()
+ if debugInfo:
+ format = "(debug: {debugInfo})"
+ else:
+ format = ""
+
+ log.failure(format, self.failResult, debugInfo=debugInfo)
+
+
+class Deferred(Awaitable[_SelfResultT]):
+ """
+ This is a callback which will be put off until later.
+
+ Why do we want this? Well, in cases where a function in a threaded
+ program would block until it gets a result, for Twisted it should
+ not block. Instead, it should return a L{Deferred}.
+
+ This can be implemented for protocols that run over the network by
+ writing an asynchronous protocol for L{twisted.internet}. For methods
+ that come from outside packages that are not under our control, we use
+ threads (see for example L{twisted.enterprise.adbapi}).
+
+ For more information about Deferreds, see doc/core/howto/defer.html or
+ U{http://twistedmatrix.com/documents/current/core/howto/defer.html}
+
+ When creating a Deferred, you may provide a canceller function, which
+ will be called by d.cancel() to let you do any clean-up necessary if the
+ user decides not to wait for the deferred to complete.
+
+ @ivar called: A flag which is C{False} until either C{callback} or
+ C{errback} is called and afterwards always C{True}.
+ @ivar paused: A counter of how many unmatched C{pause} calls have been made
+ on this instance.
+ @ivar _suppressAlreadyCalled: A flag used by the cancellation mechanism
+ which is C{True} if the Deferred has no canceller and has been
+ cancelled, C{False} otherwise. If C{True}, it can be expected that
+ C{callback} or C{errback} will eventually be called and the result
+ should be silently discarded.
+ @ivar _runningCallbacks: A flag which is C{True} while this instance is
+ executing its callback chain, used to stop recursive execution of
+ L{_runCallbacks}
+ @ivar _chainedTo: If this L{Deferred} is waiting for the result of another
+ L{Deferred}, this is a reference to the other Deferred. Otherwise,
+ L{None}.
+ """
+
+ called = False
+ paused = 0
+ _debugInfo: Optional[DebugInfo] = None
+ _suppressAlreadyCalled = False
+
+ # Are we currently running a user-installed callback? Meant to prevent
+ # recursive running of callbacks when a reentrant call to add a callback is
+ # used.
+ _runningCallbacks = False
+
+ # Keep this class attribute for now, for compatibility with code that
+ # sets it directly.
+ debug = False
+
+ _chainedTo: "Optional[Deferred[Any]]" = None
+
+ def __init__(
+ self, canceller: Optional[Callable[["Deferred[Any]"], None]] = None
+ ) -> None:
+ """
+ Initialize a L{Deferred}.
+
+ @param canceller: a callable used to stop the pending operation
+ scheduled by this L{Deferred} when L{Deferred.cancel} is invoked.
+ The canceller will be passed the deferred whose cancellation is
+ requested (i.e., C{self}).
+
+ If a canceller is not given, or does not invoke its argument's
+ C{callback} or C{errback} method, L{Deferred.cancel} will
+ invoke L{Deferred.errback} with a L{CancelledError}.
+
+ Note that if a canceller is not given, C{callback} or
+ C{errback} may still be invoked exactly once, even though
+ defer.py will have already invoked C{errback}, as described
+ above. This allows clients of code which returns a L{Deferred}
+ to cancel it without requiring the L{Deferred} instantiator to
+ provide any specific implementation support for cancellation.
+ New in 10.1.
+
+ @type canceller: a 1-argument callable which takes a L{Deferred}. The
+ return result is ignored.
+ """
+ self.callbacks: List[_CallbackChain] = []
+ self._canceller = canceller
+ if self.debug:
+ self._debugInfo = DebugInfo()
+ self._debugInfo.creator = traceback.format_stack()[:-1]
+
+ def addCallbacks(
+ self,
+ callback: Union[
+ Callable[..., _NextResultT],
+ Callable[..., Deferred[_NextResultT]],
+ Callable[..., Failure],
+ Callable[
+ ...,
+ Union[_NextResultT, Deferred[_NextResultT], Failure],
+ ],
+ ],
+ errback: Union[
+ Callable[..., _NextResultT],
+ Callable[..., Deferred[_NextResultT]],
+ Callable[..., Failure],
+ Callable[
+ ...,
+ Union[_NextResultT, Deferred[_NextResultT], Failure],
+ ],
+ None,
+ ] = None,
+ callbackArgs: Tuple[Any, ...] = (),
+ callbackKeywords: Mapping[str, Any] = _NONE_KWARGS,
+ errbackArgs: _CallbackOrderedArguments = (),
+ errbackKeywords: _CallbackKeywordArguments = _NONE_KWARGS,
+ ) -> "Deferred[_NextResultT]":
+ """
+ Add a pair of callbacks (success and error) to this L{Deferred}.
+
+ These will be executed when the 'master' callback is run.
+
+ @note: The signature of this function was designed many years before
+ PEP 612; ParamSpec provides no mechanism to annotate parameters
+ like C{callbackArgs}; this is therefore inherently less type-safe
+ than calling C{addCallback} and C{addErrback} separately.
+
+ @return: C{self}.
+ """
+ if errback is None:
+ errback = _failthru
+
+ # Default value used to be None and callers may be using None
+ if callbackArgs is None:
+ callbackArgs = () # type: ignore[unreachable]
+ if callbackKeywords is None:
+ callbackKeywords = {} # type: ignore[unreachable]
+ if errbackArgs is None:
+ errbackArgs = () # type: ignore[unreachable]
+ if errbackKeywords is None:
+ errbackKeywords = {} # type: ignore[unreachable]
+
+ assert callable(callback)
+ assert callable(errback)
+
+ self.callbacks.append(
+ (
+ (callback, callbackArgs, callbackKeywords),
+ (errback, errbackArgs, errbackKeywords),
+ )
+ )
+
+ if self.called:
+ self._runCallbacks()
+
+ # type note: The Deferred's type has changed here, but *idiomatically*
+ # the caller should treat the result as the new type, consistently.
+ return self # type:ignore[return-value]
+
+ # BEGIN way too many @overload-s for addCallback, addErrback, and addBoth:
+ # these must be accomplished with @overloads, rather than a big Union on
+ # the result type as you might expect, because the fact that
+ # _NextResultT has no bound makes mypy get confused and require the
+ # return types of functions to be combinations of Deferred and Failure
+ # rather than the actual return type. I'm not entirely sure what about the
+ # semantics of <nothing> create this overzealousness on the part of trying
+ # to assign a type; there *might* be a mypy bug in there somewhere.
+ # Possibly https://github.com/python/typing/issues/548 is implicated here
+ # because TypeVar for the *callable* with a variadic bound might express to
+ # Mypy the actual constraint that we want on its type.
+
+ @overload
+ def addCallback(
+ self,
+ callback: Callable[Concatenate[_SelfResultT, _P], Failure],
+ *args: _P.args,
+ **kwargs: _P.kwargs,
+ ) -> Deferred[_NextResultT]:
+ ...
+
+ @overload
+ def addCallback(
+ self,
+ callback: Callable[
+ Concatenate[_SelfResultT, _P],
+ Union[Failure, Deferred[_NextResultT]],
+ ],
+ *args: _P.args,
+ **kwargs: _P.kwargs,
+ ) -> Deferred[_NextResultT]:
+ ...
+
+ @overload
+ def addCallback(
+ self,
+ callback: Callable[Concatenate[_SelfResultT, _P], Union[Failure, _NextResultT]],
+ *args: _P.args,
+ **kwargs: _P.kwargs,
+ ) -> Deferred[_NextResultT]:
+ ...
+
+ @overload
+ def addCallback(
+ self,
+ callback: Callable[Concatenate[_SelfResultT, _P], Deferred[_NextResultT]],
+ *args: _P.args,
+ **kwargs: _P.kwargs,
+ ) -> Deferred[_NextResultT]:
+ ...
+
+ @overload
+ def addCallback(
+ self,
+ callback: Callable[
+ Concatenate[_SelfResultT, _P],
+ Union[Deferred[_NextResultT], _NextResultT],
+ ],
+ *args: _P.args,
+ **kwargs: _P.kwargs,
+ ) -> Deferred[_NextResultT]:
+ ...
+
+ @overload
+ def addCallback(
+ self,
+ callback: Callable[Concatenate[_SelfResultT, _P], _NextResultT],
+ *args: _P.args,
+ **kwargs: _P.kwargs,
+ ) -> Deferred[_NextResultT]:
+ ...
+
+ def addCallback(self, callback: Any, *args: Any, **kwargs: Any) -> "Deferred[Any]":
+ """
+ Convenience method for adding just a callback.
+
+ See L{addCallbacks}.
+ """
+ # Implementation Note: Any annotations for brevity; the overloads above
+ # handle specifying the actual signature, and there's nothing worth
+ # type-checking in this implementation.
+ return self.addCallbacks(callback, callbackArgs=args, callbackKeywords=kwargs)
+
+ @overload
+ def addErrback(
+ self,
+ errback: Callable[Concatenate[Failure, _P], Deferred[_NextResultT]],
+ *args: _P.args,
+ **kwargs: _P.kwargs,
+ ) -> "Deferred[Union[_SelfResultT, _NextResultT]]":
+ ...
+
+ @overload
+ def addErrback(
+ self,
+ errback: Callable[Concatenate[Failure, _P], Failure],
+ *args: _P.args,
+ **kwargs: _P.kwargs,
+ ) -> "Deferred[Union[_SelfResultT]]":
+ ...
+
+ @overload
+ def addErrback(
+ self,
+ errback: Callable[Concatenate[Failure, _P], _NextResultT],
+ *args: _P.args,
+ **kwargs: _P.kwargs,
+ ) -> "Deferred[Union[_SelfResultT, _NextResultT]]":
+ ...
+
+ def addErrback(self, errback: Any, *args: Any, **kwargs: Any) -> "Deferred[Any]":
+ """
+ Convenience method for adding just an errback.
+
+ See L{addCallbacks}.
+ """
+ # See implementation note in addCallbacks about Any arguments
+ return self.addCallbacks(
+ passthru, errback, errbackArgs=args, errbackKeywords=kwargs
+ )
+
+ @overload
+ def addBoth(
+ self,
+ callback: Callable[Concatenate[Union[_SelfResultT, Failure], _P], Failure],
+ *args: _P.args,
+ **kwargs: _P.kwargs,
+ ) -> Deferred[_NextResultT]:
+ ...
+
+ @overload
+ def addBoth(
+ self,
+ callback: Callable[
+ Concatenate[Union[_SelfResultT, Failure], _P],
+ Union[Failure, Deferred[_NextResultT]],
+ ],
+ *args: _P.args,
+ **kwargs: _P.kwargs,
+ ) -> Deferred[_NextResultT]:
+ ...
+
+ @overload
+ def addBoth(
+ self,
+ callback: Callable[
+ Concatenate[Union[_SelfResultT, Failure], _P], Union[Failure, _NextResultT]
+ ],
+ *args: _P.args,
+ **kwargs: _P.kwargs,
+ ) -> Deferred[_NextResultT]:
+ ...
+
+ @overload
+ def addBoth(
+ self,
+ callback: Callable[
+ Concatenate[Union[_SelfResultT, Failure], _P], Deferred[_NextResultT]
+ ],
+ *args: _P.args,
+ **kwargs: _P.kwargs,
+ ) -> Deferred[_NextResultT]:
+ ...
+
+ @overload
+ def addBoth(
+ self,
+ callback: Callable[
+ Concatenate[Union[_SelfResultT, Failure], _P],
+ Union[Deferred[_NextResultT], _NextResultT],
+ ],
+ *args: _P.args,
+ **kwargs: _P.kwargs,
+ ) -> Deferred[_NextResultT]:
+ ...
+
+ @overload
+ def addBoth(
+ self,
+ callback: Callable[Concatenate[Union[_SelfResultT, Failure], _P], _NextResultT],
+ *args: _P.args,
+ **kwargs: _P.kwargs,
+ ) -> Deferred[_NextResultT]:
+ ...
+
+ @overload
+ def addBoth(
+ self,
+ callback: Callable[Concatenate[_T, _P], _T],
+ *args: _P.args,
+ **kwargs: _P.kwargs,
+ ) -> Deferred[_SelfResultT]:
+ ...
+
+ def addBoth(self, callback: Any, *args: Any, **kwargs: Any) -> "Deferred[Any]":
+ """
+ Convenience method for adding a single callable as both a callback
+ and an errback.
+
+ See L{addCallbacks}.
+ """
+ # See implementation note in addCallbacks about Any arguments
+ return self.addCallbacks(
+ callback,
+ callback,
+ callbackArgs=args,
+ errbackArgs=args,
+ callbackKeywords=kwargs,
+ errbackKeywords=kwargs,
+ )
+
+ # END way too many overloads
+
+ def addTimeout(
+ self,
+ timeout: float,
+ clock: IReactorTime,
+ onTimeoutCancel: Optional[
+ Callable[
+ [Union[_SelfResultT, Failure], float],
+ Union[_NextResultT, Failure],
+ ]
+ ] = None,
+ ) -> "Deferred[Union[_SelfResultT, _NextResultT]]":
+ """
+ Time out this L{Deferred} by scheduling it to be cancelled after
+ C{timeout} seconds.
+
+ The timeout encompasses all the callbacks and errbacks added to this
+ L{defer.Deferred} before the call to L{addTimeout}, and none added
+ after the call.
+
+ If this L{Deferred} gets timed out, it errbacks with a L{TimeoutError},
+ unless a cancelable function was passed to its initialization or unless
+ a different C{onTimeoutCancel} callable is provided.
+
+ @param timeout: number of seconds to wait before timing out this
+ L{Deferred}
+ @param clock: The object which will be used to schedule the timeout.
+ @param onTimeoutCancel: A callable which is called immediately after
+ this L{Deferred} times out, and not if this L{Deferred} is
+ otherwise cancelled before the timeout. It takes an arbitrary
+ value, which is the value of this L{Deferred} at that exact point
+ in time (probably a L{CancelledError} L{Failure}), and the
+ C{timeout}. The default callable (if C{None} is provided) will
+ translate a L{CancelledError} L{Failure} into a L{TimeoutError}.
+
+ @return: C{self}.
+
+ @since: 16.5
+ """
+
+ timedOut = [False]
+
+ def timeItOut() -> None:
+ timedOut[0] = True
+ self.cancel()
+
+ delayedCall = clock.callLater(timeout, timeItOut)
+
+ def convertCancelled(
+ result: Union[_SelfResultT, Failure],
+ ) -> Union[_SelfResultT, _NextResultT, Failure]:
+ # if C{deferred} was timed out, call the translation function,
+ # if provided, otherwise just use L{cancelledToTimedOutError}
+ if timedOut[0]:
+ toCall = onTimeoutCancel or _cancelledToTimedOutError
+ return toCall(result, timeout)
+ return result
+
+ def cancelTimeout(result: _T) -> _T:
+ # stop the pending call to cancel the deferred if it's been fired
+ if delayedCall.active():
+ delayedCall.cancel()
+ return result
+
+ # Note: Mypy cannot infer this type, apparently thanks to the ambiguity
+ # of _SelfResultT / _NextResultT both being unbound. Explicitly
+ # annotating it seems to do the trick though.
+ converted: Deferred[Union[_SelfResultT, _NextResultT]] = self.addBoth(
+ convertCancelled
+ )
+ return converted.addBoth(cancelTimeout)
+
+ def chainDeferred(self, d: "Deferred[_SelfResultT]") -> "Deferred[None]":
+ """
+ Chain another L{Deferred} to this L{Deferred}.
+
+ This method adds callbacks to this L{Deferred} to call C{d}'s callback
+ or errback, as appropriate. It is merely a shorthand way of performing
+ the following::
+
+ d1.addCallbacks(d2.callback, d2.errback)
+
+ When you chain a deferred C{d2} to another deferred C{d1} with
+ C{d1.chainDeferred(d2)}, you are making C{d2} participate in the
+ callback chain of C{d1}.
+ Thus any event that fires C{d1} will also fire C{d2}.
+ However, the converse is B{not} true; if C{d2} is fired, C{d1} will not
+ be affected.
+
+ Note that unlike the case where chaining is caused by a L{Deferred}
+ being returned from a callback, it is possible to cause the call
+ stack size limit to be exceeded by chaining many L{Deferred}s
+ together with C{chainDeferred}.
+
+ @return: C{self}.
+ """
+ d._chainedTo = self
+ return self.addCallbacks(d.callback, d.errback)
+
+ def callback(self, result: Union[_SelfResultT, Failure]) -> None:
+ """
+ Run all success callbacks that have been added to this L{Deferred}.
+
+ Each callback will have its result passed as the first argument to
+ the next; this way, the callbacks act as a 'processing chain'. If
+ the success-callback returns a L{Failure} or raises an L{Exception},
+ processing will continue on the *error* callback chain. If a
+ callback (or errback) returns another L{Deferred}, this L{Deferred}
+ will be chained to it (and further callbacks will not run until that
+ L{Deferred} has a result).
+
+ An instance of L{Deferred} may only have either L{callback} or
+ L{errback} called on it, and only once.
+
+ @param result: The object which will be passed to the first callback
+ added to this L{Deferred} (via L{addCallback}), unless C{result} is
+ a L{Failure}, in which case the behavior is the same as calling
+ C{errback(result)}.
+
+ @raise AlreadyCalledError: If L{callback} or L{errback} has already been
+ called on this L{Deferred}.
+ """
+ assert not isinstance(result, Deferred)
+ self._startRunCallbacks(result)
+
+ def errback(self, fail: Optional[Union[Failure, BaseException]] = None) -> None:
+ """
+ Run all error callbacks that have been added to this L{Deferred}.
+
+ Each callback will have its result passed as the first
+ argument to the next; this way, the callbacks act as a
+ 'processing chain'. Also, if the error-callback returns a non-Failure
+ or doesn't raise an L{Exception}, processing will continue on the
+ *success*-callback chain.
+
+ If the argument that's passed to me is not a L{Failure} instance,
+ it will be embedded in one. If no argument is passed, a
+ L{Failure} instance will be created based on the current
+ traceback stack.
+
+ Passing a string as `fail' is deprecated, and will be punished with
+ a warning message.
+
+ An instance of L{Deferred} may only have either L{callback} or
+ L{errback} called on it, and only once.
+
+ @param fail: The L{Failure} object which will be passed to the first
+ errback added to this L{Deferred} (via L{addErrback}).
+ Alternatively, a L{Exception} instance from which a L{Failure} will
+ be constructed (with no traceback) or L{None} to create a L{Failure}
+ instance from the current exception state (with a traceback).
+
+ @raise AlreadyCalledError: If L{callback} or L{errback} has already been
+ called on this L{Deferred}.
+ @raise NoCurrentExceptionError: If C{fail} is L{None} but there is
+ no current exception state.
+ """
+ if fail is None:
+ fail = Failure(captureVars=self.debug)
+ elif not isinstance(fail, Failure):
+ fail = Failure(fail)
+
+ self._startRunCallbacks(fail)
+
+ def pause(self) -> None:
+ """
+ Stop processing on a L{Deferred} until L{unpause}() is called.
+ """
+ self.paused = self.paused + 1
+
+ def unpause(self) -> None:
+ """
+ Process all callbacks made since L{pause}() was called.
+ """
+ self.paused = self.paused - 1
+ if self.paused:
+ return
+ if self.called:
+ self._runCallbacks()
+
+ def cancel(self) -> None:
+ """
+ Cancel this L{Deferred}.
+
+ If the L{Deferred} has not yet had its C{errback} or C{callback} method
+ invoked, call the canceller function provided to the constructor. If
+ that function does not invoke C{callback} or C{errback}, or if no
+ canceller function was provided, errback with L{CancelledError}.
+
+ If this L{Deferred} is waiting on another L{Deferred}, forward the
+ cancellation to the other L{Deferred}.
+ """
+ if not self.called:
+ canceller = self._canceller
+ if canceller:
+ canceller(self)
+ else:
+ # Arrange to eat the callback that will eventually be fired
+ # since there was no real canceller.
+ self._suppressAlreadyCalled = True
+ if not self.called:
+ # There was no canceller, or the canceller didn't call
+ # callback or errback.
+ self.errback(Failure(CancelledError()))
+ elif isinstance(self.result, Deferred):
+ # Waiting for another deferred -- cancel it instead.
+ self.result.cancel()
+
+ def _startRunCallbacks(self, result: object) -> None:
+ if self.called:
+ if self._suppressAlreadyCalled:
+ self._suppressAlreadyCalled = False
+ return
+ if self.debug:
+ if self._debugInfo is None:
+ self._debugInfo = DebugInfo()
+ extra = "\n" + self._debugInfo._getDebugTracebacks()
+ raise AlreadyCalledError(extra)
+ raise AlreadyCalledError
+ if self.debug:
+ if self._debugInfo is None:
+ self._debugInfo = DebugInfo()
+ self._debugInfo.invoker = traceback.format_stack()[:-2]
+ self.called = True
+
+ # Clear the canceller to avoid any circular references. This is safe to
+ # do as the canceller does not get called after the deferred has fired
+ self._canceller = None
+
+ self.result = result
+ self._runCallbacks()
+
+ def _continuation(self) -> _CallbackChain:
+ """
+ Build a tuple of callback and errback with L{_Sentinel._CONTINUE}.
+ """
+ return (
+ (_Sentinel._CONTINUE, (self,), _NONE_KWARGS),
+ (_Sentinel._CONTINUE, (self,), _NONE_KWARGS),
+ )
+
+ def _runCallbacks(self) -> None:
+ """
+ Run the chain of callbacks once a result is available.
+
+ This consists of a simple loop over all of the callbacks, calling each
+ with the current result and making the current result equal to the
+ return value (or raised exception) of that call.
+
+ If L{_runningCallbacks} is true, this loop won't run at all, since
+ it is already running above us on the call stack. If C{self.paused} is
+ true, the loop also won't run, because that's what it means to be
+ paused.
+
+ The loop will terminate before processing all of the callbacks if a
+ L{Deferred} without a result is encountered.
+
+ If a L{Deferred} I{with} a result is encountered, that result is taken
+ and the loop proceeds.
+
+ @note: The implementation is complicated slightly by the fact that
+ chaining (associating two L{Deferred}s with each other such that one
+ will wait for the result of the other, as happens when a Deferred is
+ returned from a callback on another L{Deferred}) is supported
+ iteratively rather than recursively, to avoid running out of stack
+ frames when processing long chains.
+ """
+ if self._runningCallbacks:
+ # Don't recursively run callbacks
+ return
+
+ # Keep track of all the Deferreds encountered while propagating results
+ # up a chain. The way a Deferred gets onto this stack is by having
+ # added its _continuation() to the callbacks list of a second Deferred
+ # and then that second Deferred being fired. ie, if ever had _chainedTo
+ # set to something other than None, you might end up on this stack.
+ chain: List[Deferred[Any]] = [self]
+
+ while chain:
+ current = chain[-1]
+
+ if current.paused:
+ # This Deferred isn't going to produce a result at all. All the
+ # Deferreds up the chain waiting on it will just have to...
+ # wait.
+ return
+
+ finished = True
+ current._chainedTo = None
+ while current.callbacks:
+ item = current.callbacks.pop(0)
+ if not isinstance(current.result, Failure):
+ callback, args, kwargs = item[0]
+ else:
+ # type note: Callback signature also works for Errbacks in
+ # this context.
+ callback, args, kwargs = item[1]
+
+ # Avoid recursion if we can.
+ if callback is _CONTINUE:
+ # Give the waiting Deferred our current result and then
+ # forget about that result ourselves.
+ chainee = cast(Deferred[object], args[0])
+ chainee.result = current.result
+ current.result = None
+ # Making sure to update _debugInfo
+ if current._debugInfo is not None:
+ current._debugInfo.failResult = None
+ chainee.paused -= 1
+ chain.append(chainee)
+ # Delay cleaning this Deferred and popping it from the chain
+ # until after we've dealt with chainee.
+ finished = False
+ break
+
+ try:
+ current._runningCallbacks = True
+ try:
+ # type note: mypy sees `callback is _CONTINUE` above and
+ # then decides that `callback` is not callable.
+ # This goes away when we use `_Sentinel._CONTINUE`
+ # instead, but we don't want to do that attribute
+ # lookup in this hot code path, so we ignore the mypy
+ # complaint here.
+ current.result = callback( # type: ignore[misc]
+ current.result, *args, **kwargs
+ )
+
+ if current.result is current:
+ warnAboutFunction(
+ callback,
+ "Callback returned the Deferred "
+ "it was attached to; this breaks the "
+ "callback chain and will raise an "
+ "exception in the future.",
+ )
+ finally:
+ current._runningCallbacks = False
+ except BaseException:
+ # Including full frame information in the Failure is quite
+ # expensive, so we avoid it unless self.debug is set.
+ current.result = Failure(captureVars=self.debug)
+ else:
+ if isinstance(current.result, Deferred):
+ # The result is another Deferred. If it has a result,
+ # we can take it and keep going.
+ resultResult = getattr(current.result, "result", _NO_RESULT)
+ if (
+ resultResult is _NO_RESULT
+ or isinstance(resultResult, Deferred)
+ or current.result.paused
+ ):
+ # Nope, it didn't. Pause and chain.
+ current.pause()
+ current._chainedTo = current.result
+ # Note: current.result has no result, so it's not
+ # running its callbacks right now. Therefore we can
+ # append to the callbacks list directly instead of
+ # using addCallbacks.
+ current.result.callbacks.append(current._continuation())
+ break
+ else:
+ # Yep, it did. Steal it.
+ current.result.result = None
+ # Make sure _debugInfo's failure state is updated.
+ if current.result._debugInfo is not None:
+ current.result._debugInfo.failResult = None
+ current.result = resultResult
+
+ if finished:
+ # As much of the callback chain - perhaps all of it - as can be
+ # processed right now has been. The current Deferred is waiting on
+ # another Deferred or for more callbacks. Before finishing with it,
+ # make sure its _debugInfo is in the proper state.
+ if isinstance(current.result, Failure):
+ # Stash the Failure in the _debugInfo for unhandled error
+ # reporting.
+ current.result.cleanFailure()
+ if current._debugInfo is None:
+ current._debugInfo = DebugInfo()
+ current._debugInfo.failResult = current.result
+ else:
+ # Clear out any Failure in the _debugInfo, since the result
+ # is no longer a Failure.
+ if current._debugInfo is not None:
+ current._debugInfo.failResult = None
+
+ # This Deferred is done, pop it from the chain and move back up
+ # to the Deferred which supplied us with our result.
+ chain.pop()
+
+ def __str__(self) -> str:
+ """
+ Return a string representation of this L{Deferred}.
+ """
+ cname = self.__class__.__name__
+ result = getattr(self, "result", _NO_RESULT)
+ myID = id(self)
+ if self._chainedTo is not None:
+ result = f" waiting on Deferred at 0x{id(self._chainedTo):x}"
+ elif result is _NO_RESULT:
+ result = ""
+ else:
+ result = f" current result: {result!r}"
+ return f"<{cname} at 0x{myID:x}{result}>"
+
+ __repr__ = __str__
+
+ def __iter__(self) -> "Deferred[_SelfResultT]":
+ return self
+
+ @_extraneous
+ def send(self, value: object = None) -> "Deferred[_SelfResultT]":
+ if self.paused:
+ # If we're paused, we have no result to give
+ return self
+
+ result = getattr(self, "result", _NO_RESULT)
+ if result is _NO_RESULT:
+ return self
+ if isinstance(result, Failure):
+ # Clear the failure on debugInfo so it doesn't raise "unhandled
+ # exception"
+ assert self._debugInfo is not None
+ self._debugInfo.failResult = None
+ result.value.__failure__ = result
+ raise result.value
+ else:
+ raise StopIteration(result)
+
+ # For PEP-492 support (async/await)
+ # type note: base class "Awaitable" defined the type as:
+ # Callable[[], Generator[Any, None, _SelfResultT]]
+ # See: https://github.com/python/typeshed/issues/5125
+ # When the typeshed patch is included in a mypy release,
+ # this method can be replaced by `__await__ = __iter__`.
+ def __await__(self) -> Generator[Any, None, _SelfResultT]:
+ return self.__iter__() # type: ignore[return-value]
+
+ __next__ = send
+
+ def asFuture(self, loop: AbstractEventLoop) -> "Future[_SelfResultT]":
+ """
+ Adapt this L{Deferred} into a L{Future} which is bound to C{loop}.
+
+ @note: converting a L{Deferred} to an L{Future} consumes both
+ its result and its errors, so this method implicitly converts
+ C{self} into a L{Deferred} firing with L{None}, regardless of what
+ its result previously would have been.
+
+ @since: Twisted 17.5.0
+
+ @param loop: The L{asyncio} event loop to bind the L{Future} to.
+
+ @return: A L{Future} which will fire when the L{Deferred} fires.
+ """
+ future = loop.create_future()
+
+ def checkCancel(futureAgain: "Future[_SelfResultT]") -> None:
+ if futureAgain.cancelled():
+ self.cancel()
+
+ def maybeFail(failure: Failure) -> None:
+ if not future.cancelled():
+ future.set_exception(failure.value)
+
+ def maybeSucceed(result: object) -> None:
+ if not future.cancelled():
+ future.set_result(result)
+
+ self.addCallbacks(maybeSucceed, maybeFail)
+ future.add_done_callback(checkCancel)
+
+ return future
+
+ @classmethod
+ def fromFuture(cls, future: "Future[_SelfResultT]") -> "Deferred[_SelfResultT]":
+ """
+ Adapt a L{Future} to a L{Deferred}.
+
+ @note: This creates a L{Deferred} from a L{Future}, I{not} from
+ a C{coroutine}; in other words, you will need to call
+ L{asyncio.ensure_future}, L{asyncio.loop.create_task} or create an
+ L{asyncio.Task} yourself to get from a C{coroutine} to a
+ L{Future} if what you have is an awaitable coroutine and
+ not a L{Future}. (The length of this list of techniques is
+ exactly why we have left it to the caller!)
+
+ @since: Twisted 17.5.0
+
+ @param future: The L{Future} to adapt.
+
+ @return: A L{Deferred} which will fire when the L{Future} fires.
+ """
+
+ def adapt(result: Future[_SelfResultT]) -> None:
+ try:
+ extracted: _SelfResultT | Failure = result.result()
+ except BaseException:
+ extracted = Failure()
+ actual.callback(extracted)
+
+ futureCancel = object()
+
+ def cancel(reself: Deferred[object]) -> None:
+ future.cancel()
+ reself.callback(futureCancel)
+
+ self = cls(cancel)
+ actual = self
+
+ def uncancel(
+ result: _SelfResultT,
+ ) -> Union[_SelfResultT, Deferred[_SelfResultT]]:
+ if result is futureCancel:
+ nonlocal actual
+ actual = Deferred()
+ return actual
+ return result
+
+ self.addCallback(uncancel)
+ future.add_done_callback(adapt)
+
+ return self
+
+ @classmethod
+ def fromCoroutine(
+ cls,
+ coro: Union[
+ Coroutine[Deferred[Any], Any, _T],
+ Generator[Deferred[Any], Any, _T],
+ ],
+ ) -> "Deferred[_T]":
+ """
+ Schedule the execution of a coroutine that awaits on L{Deferred}s,
+ wrapping it in a L{Deferred} that will fire on success/failure of the
+ coroutine.
+
+ Coroutine functions return a coroutine object, similar to how
+ generators work. This function turns that coroutine into a Deferred,
+ meaning that it can be used in regular Twisted code. For example::
+
+ import treq
+ from twisted.internet.defer import Deferred
+ from twisted.internet.task import react
+
+ async def crawl(pages):
+ results = {}
+ for page in pages:
+ results[page] = await treq.content(await treq.get(page))
+ return results
+
+ def main(reactor):
+ pages = [
+ "http://localhost:8080"
+ ]
+ d = Deferred.fromCoroutine(crawl(pages))
+ d.addCallback(print)
+ return d
+
+ react(main)
+
+ @since: Twisted 21.2.0
+
+ @param coro: The coroutine object to schedule.
+
+ @raise ValueError: If C{coro} is not a coroutine or generator.
+ """
+ # asyncio.iscoroutine <3.12 identifies generators as coroutines, too.
+ # for >=3.12 we need to check isgenerator also
+ # see https://github.com/python/cpython/issues/102748
+ if iscoroutine(coro) or inspect.isgenerator(coro):
+ return _cancellableInlineCallbacks(coro)
+ raise NotACoroutineError(f"{coro!r} is not a coroutine")
+
+
+def ensureDeferred(
+ coro: Union[
+ Coroutine[Deferred[Any], Any, _T],
+ Generator[Deferred[Any], Any, _T],
+ Deferred[_T],
+ ]
+) -> Deferred[_T]:
+ """
+ Schedule the execution of a coroutine that awaits/yields from L{Deferred}s,
+ wrapping it in a L{Deferred} that will fire on success/failure of the
+ coroutine. If a Deferred is passed to this function, it will be returned
+ directly (mimicking the L{asyncio.ensure_future} function).
+
+ See L{Deferred.fromCoroutine} for examples of coroutines.
+
+ @param coro: The coroutine object to schedule, or a L{Deferred}.
+ """
+ if isinstance(coro, Deferred):
+ return coro
+ else:
+ try:
+ return Deferred.fromCoroutine(coro)
+ except NotACoroutineError:
+ # It's not a coroutine. Raise an exception, but say that it's also
+ # not a Deferred so the error makes sense.
+ raise NotACoroutineError(f"{coro!r} is not a coroutine or a Deferred")
+
+
+@comparable
+class FirstError(Exception):
+ """
+ First error to occur in a L{DeferredList} if C{fireOnOneErrback} is set.
+
+ @ivar subFailure: The L{Failure} that occurred.
+ @ivar index: The index of the L{Deferred} in the L{DeferredList} where
+ it happened.
+ """
+
+ def __init__(self, failure: Failure, index: int) -> None:
+ Exception.__init__(self, failure, index)
+ self.subFailure = failure
+ self.index = index
+
+ def __repr__(self) -> str:
+ """
+ The I{repr} of L{FirstError} instances includes the repr of the
+ wrapped failure's exception and the index of the L{FirstError}.
+ """
+ return "FirstError[#%d, %r]" % (self.index, self.subFailure.value)
+
+ def __str__(self) -> str:
+ """
+ The I{str} of L{FirstError} instances includes the I{str} of the
+ entire wrapped failure (including its traceback and exception) and
+ the index of the L{FirstError}.
+ """
+ return "FirstError[#%d, %s]" % (self.index, self.subFailure)
+
+ def __cmp__(self, other: object) -> int:
+ """
+ Comparison between L{FirstError} and other L{FirstError} instances
+ is defined as the comparison of the index and sub-failure of each
+ instance. L{FirstError} instances don't compare equal to anything
+ that isn't a L{FirstError} instance.
+
+ @since: 8.2
+ """
+ if isinstance(other, FirstError):
+ return cmp((self.index, self.subFailure), (other.index, other.subFailure))
+ return -1
+
+
+_DeferredListSingleResultT = Tuple[_SelfResultT, int]
+_DeferredListResultItemT = Tuple[bool, _SelfResultT]
+_DeferredListResultListT = List[_DeferredListResultItemT[_SelfResultT]]
+
+if TYPE_CHECKING:
+ # The result type is different depending on whether fireOnOneCallback
+ # is True or False. The type system is not flexible enough to handle
+ # that in a class definition, so instead we pretend that DeferredList
+ # is a function that returns a Deferred.
+
+ @overload
+ def _DeferredList(
+ deferredList: Iterable[Deferred[_SelfResultT]],
+ fireOnOneCallback: Literal[True],
+ fireOnOneErrback: bool = False,
+ consumeErrors: bool = False,
+ ) -> Deferred[_DeferredListSingleResultT[_SelfResultT]]:
+ ...
+
+ @overload
+ def _DeferredList(
+ deferredList: Iterable[Deferred[_SelfResultT]],
+ fireOnOneCallback: Literal[False] = False,
+ fireOnOneErrback: bool = False,
+ consumeErrors: bool = False,
+ ) -> Deferred[_DeferredListResultListT[_SelfResultT]]:
+ ...
+
+ def _DeferredList(
+ deferredList: Iterable[Deferred[_SelfResultT]],
+ fireOnOneCallback: bool = False,
+ fireOnOneErrback: bool = False,
+ consumeErrors: bool = False,
+ ) -> Union[
+ Deferred[_DeferredListSingleResultT[_SelfResultT]],
+ Deferred[_DeferredListResultListT[_SelfResultT]],
+ ]:
+ ...
+
+ DeferredList = _DeferredList
+
+
+class DeferredList( # type: ignore[no-redef] # noqa:F811
+ Deferred[_DeferredListResultListT[Any]]
+):
+ """
+ L{DeferredList} is a tool for collecting the results of several Deferreds.
+
+ This tracks a list of L{Deferred}s for their results, and makes a single
+ callback when they have all completed. By default, the ultimate result is a
+ list of (success, result) tuples, 'success' being a boolean.
+ L{DeferredList} exposes the same API that L{Deferred} does, so callbacks and
+ errbacks can be added to it in the same way.
+
+ L{DeferredList} is implemented by adding callbacks and errbacks to each
+ L{Deferred} in the list passed to it. This means callbacks and errbacks
+ added to the Deferreds before they are passed to L{DeferredList} will change
+ the result that L{DeferredList} sees (i.e., L{DeferredList} is not special).
+ Callbacks and errbacks can also be added to the Deferreds after they are
+ passed to L{DeferredList} and L{DeferredList} may change the result that
+ they see.
+
+ See the documentation for the C{__init__} arguments for more information.
+
+ @ivar _deferredList: The L{list} of L{Deferred}s to track.
+ """
+
+ fireOnOneCallback = False
+ fireOnOneErrback = False
+
+ def __init__(
+ self,
+ deferredList: Iterable[Deferred[_SelfResultT]],
+ fireOnOneCallback: bool = False,
+ fireOnOneErrback: bool = False,
+ consumeErrors: bool = False,
+ ):
+ """
+ Initialize a DeferredList.
+
+ @param deferredList: The deferreds to track.
+ @param fireOnOneCallback: (keyword param) a flag indicating that this
+ L{DeferredList} will fire when the first L{Deferred} in
+ C{deferredList} fires with a non-failure result without waiting for
+ any of the other Deferreds. When this flag is set, the DeferredList
+ will fire with a two-tuple: the first element is the result of the
+ Deferred which fired; the second element is the index in
+ C{deferredList} of that Deferred.
+ @param fireOnOneErrback: (keyword param) a flag indicating that this
+ L{DeferredList} will fire when the first L{Deferred} in
+ C{deferredList} fires with a failure result without waiting for any
+ of the other Deferreds. When this flag is set, if a Deferred in the
+ list errbacks, the DeferredList will errback with a L{FirstError}
+ failure wrapping the failure of that Deferred.
+ @param consumeErrors: (keyword param) a flag indicating that failures in
+ any of the included L{Deferred}s should not be propagated to
+ errbacks added to the individual L{Deferred}s after this
+ L{DeferredList} is constructed. After constructing the
+ L{DeferredList}, any errors in the individual L{Deferred}s will be
+ converted to a callback result of L{None}. This is useful to
+ prevent spurious 'Unhandled error in Deferred' messages from being
+ logged. This does not prevent C{fireOnOneErrback} from working.
+ """
+ self._deferredList = list(deferredList)
+
+ # Note this contains optional result values as the DeferredList is
+ # processing its results, even though the callback result will not,
+ # which is why we aren't using _DeferredListResultListT here.
+ self.resultList: List[Optional[_DeferredListResultItemT[Any]]] = [None] * len(
+ self._deferredList
+ )
+ """
+ The final result, in progress.
+ Each item in the list corresponds to the L{Deferred} at the same
+ position in L{_deferredList}. It will be L{None} if the L{Deferred}
+ did not complete yet, or a C{(success, result)} pair if it did.
+ """
+
+ Deferred.__init__(self)
+ if len(self._deferredList) == 0 and not fireOnOneCallback:
+ self.callback([])
+
+ # These flags need to be set *before* attaching callbacks to the
+ # deferreds, because the callbacks use these flags, and will run
+ # synchronously if any of the deferreds are already fired.
+ self.fireOnOneCallback = fireOnOneCallback
+ self.fireOnOneErrback = fireOnOneErrback
+ self.consumeErrors = consumeErrors
+ self.finishedCount = 0
+
+ index = 0
+ for deferred in self._deferredList:
+ deferred.addCallbacks(
+ self._cbDeferred,
+ self._cbDeferred,
+ callbackArgs=(index, SUCCESS),
+ errbackArgs=(index, FAILURE),
+ )
+ index = index + 1
+
+ def _cbDeferred(
+ self, result: _SelfResultT, index: int, succeeded: bool
+ ) -> Optional[_SelfResultT]:
+ """
+ (internal) Callback for when one of my deferreds fires.
+ """
+ self.resultList[index] = (succeeded, result)
+
+ self.finishedCount += 1
+ if not self.called:
+ if succeeded == SUCCESS and self.fireOnOneCallback:
+ self.callback((result, index)) # type: ignore[arg-type]
+ elif succeeded == FAILURE and self.fireOnOneErrback:
+ assert isinstance(result, Failure)
+ self.errback(Failure(FirstError(result, index)))
+ elif self.finishedCount == len(self.resultList):
+ # At this point, None values in self.resultList have been
+ # replaced by result values, so we cast it to
+ # _DeferredListResultListT to match the callback result type.
+ self.callback(cast(_DeferredListResultListT[Any], self.resultList))
+
+ if succeeded == FAILURE and self.consumeErrors:
+ return None
+
+ return result
+
+ def cancel(self) -> None:
+ """
+ Cancel this L{DeferredList}.
+
+ If the L{DeferredList} hasn't fired yet, cancel every L{Deferred} in
+ the list.
+
+ If the L{DeferredList} has fired, including the case where the
+ C{fireOnOneCallback}/C{fireOnOneErrback} flag is set and the
+ L{DeferredList} fires because one L{Deferred} in the list fires with a
+ non-failure/failure result, do nothing in the C{cancel} method.
+ """
+ if not self.called:
+ for deferred in self._deferredList:
+ try:
+ deferred.cancel()
+ except BaseException:
+ log.failure("Exception raised from user supplied canceller")
+
+
+def _parseDeferredListResult(
+ resultList: List[_DeferredListResultItemT[_T]], fireOnOneErrback: bool = False
+) -> List[_T]:
+ if __debug__:
+ for result in resultList:
+ assert result is not None
+ success, value = result
+ assert success
+ return [x[1] for x in resultList]
+
+
+def gatherResults(
+ deferredList: Iterable[Deferred[_T]], consumeErrors: bool = False
+) -> Deferred[List[_T]]:
+ """
+ Returns, via a L{Deferred}, a list with the results of the given
+ L{Deferred}s - in effect, a "join" of multiple deferred operations.
+
+ The returned L{Deferred} will fire when I{all} of the provided L{Deferred}s
+ have fired, or when any one of them has failed.
+
+ This method can be cancelled by calling the C{cancel} method of the
+ L{Deferred}, all the L{Deferred}s in the list will be cancelled.
+
+ This differs from L{DeferredList} in that you don't need to parse
+ the result for success/failure.
+
+ @param consumeErrors: (keyword param) a flag, defaulting to False,
+ indicating that failures in any of the given L{Deferred}s should not be
+ propagated to errbacks added to the individual L{Deferred}s after this
+ L{gatherResults} invocation. Any such errors in the individual
+ L{Deferred}s will be converted to a callback result of L{None}. This
+ is useful to prevent spurious 'Unhandled error in Deferred' messages
+ from being logged. This parameter is available since 11.1.0.
+ """
+ d = DeferredList(deferredList, fireOnOneErrback=True, consumeErrors=consumeErrors)
+ d.addCallback(_parseDeferredListResult)
+ return cast(Deferred[List[_T]], d)
+
+
+class FailureGroup(Exception):
+ """
+ More than one failure occurred.
+ """
+
+ def __init__(self, failures: Sequence[Failure]) -> None:
+ super(FailureGroup, self).__init__()
+ self.failures = failures
+
+
+def race(ds: Sequence[Deferred[_T]]) -> Deferred[tuple[int, _T]]:
+ """
+ Select the first available result from the sequence of Deferreds and
+ cancel the rest.
+
+ @return: A cancellable L{Deferred} that fires with the index and output of
+ the element of C{ds} to have a success result first, or that fires
+ with L{FailureGroup} holding a list of their failures if they all
+ fail.
+ """
+ # Keep track of the Deferred for the action which completed first. When
+ # it completes, all of the other Deferreds will get cancelled but this one
+ # shouldn't be. Even though it "completed" it isn't really done - the
+ # caller will still be using it for something. If we cancelled it,
+ # cancellation could propagate down to them.
+ winner: Optional[Deferred[_T]] = None
+
+ # The cancellation function for the Deferred this function returns.
+ def cancel(result: Deferred[_T]) -> None:
+ # If it is cancelled then we cancel all of the Deferreds for the
+ # individual actions because there is no longer the possibility of
+ # delivering any of their results anywhere. We don't have to fire
+ # `result` because the Deferred will do that for us.
+ for d in to_cancel:
+ d.cancel()
+
+ # The Deferred that this function will return. It will fire with the
+ # index and output of the action that completes first, or errback if all
+ # of the actions fail. If it is cancelled, all of the actions will be
+ # cancelled.
+ final_result: Deferred[tuple[int, _T]] = Deferred(canceller=cancel)
+
+ # A callback for an individual action.
+ def succeeded(this_output: _T, this_index: int) -> None:
+ # If it is the first action to succeed then it becomes the "winner",
+ # its index/output become the externally visible result, and the rest
+ # of the action Deferreds get cancelled. If it is not the first
+ # action to succeed (because some action did not support
+ # cancellation), just ignore the result. It is uncommon for this
+ # callback to be entered twice. The only way it can happen is if one
+ # of the input Deferreds has a cancellation function that fires the
+ # Deferred with a success result.
+ nonlocal winner
+ if winner is None:
+ # This is the first success. Act on it.
+ winner = to_cancel[this_index]
+
+ # Cancel the rest.
+ for d in to_cancel:
+ if d is not winner:
+ d.cancel()
+
+ # Fire our Deferred
+ final_result.callback((this_index, this_output))
+
+ # Keep track of how many actions have failed. If they all fail we need to
+ # deliver failure notification on our externally visible result.
+ failure_state = []
+
+ def failed(failure: Failure, this_index: int) -> None:
+ failure_state.append((this_index, failure))
+ if len(failure_state) == len(to_cancel):
+ # Every operation failed.
+ failure_state.sort()
+ failures = [f for (ignored, f) in failure_state]
+ final_result.errback(FailureGroup(failures))
+
+ # Copy the sequence of Deferreds so we know it doesn't get mutated out
+ # from under us.
+ to_cancel = list(ds)
+ for index, d in enumerate(ds):
+ # Propagate the position of this action as well as the argument to f
+ # to the success callback so we can cancel the right Deferreds and
+ # propagate the result outwards.
+ d.addCallbacks(succeeded, failed, callbackArgs=(index,), errbackArgs=(index,))
+
+ return final_result
+
+
+# Constants for use with DeferredList
+SUCCESS = True
+FAILURE = False
+
+
+## deferredGenerator
+class waitForDeferred:
+ """
+ See L{deferredGenerator}.
+ """
+
+ result: Any = _NO_RESULT
+
+ def __init__(self, d: Deferred[object]) -> None:
+ warnings.warn(
+ "twisted.internet.defer.waitForDeferred was deprecated in "
+ "Twisted 15.0.0; please use twisted.internet.defer.inlineCallbacks "
+ "instead",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+
+ if not isinstance(d, Deferred):
+ raise TypeError(
+ f"You must give waitForDeferred a Deferred. You gave it {d!r}."
+ )
+ self.d = d
+
+ def getResult(self) -> Any:
+ if isinstance(self.result, Failure):
+ self.result.raiseException()
+ self.result is not _NO_RESULT
+ return self.result
+
+
+_DeferableGenerator = Generator[object, None, None]
+
+
+def _deferGenerator(
+ g: _DeferableGenerator, deferred: Deferred[object]
+) -> Deferred[Any]:
+ """
+ See L{deferredGenerator}.
+ """
+
+ result = None
+
+ # This function is complicated by the need to prevent unbounded recursion
+ # arising from repeatedly yielding immediately ready deferreds. This while
+ # loop and the waiting variable solve that by manually unfolding the
+ # recursion.
+
+ # defgen is waiting for result? # result
+ # type note: List[Any] because you can't annotate List items by index.
+ # …better fix would be to create a class, but we need to jettison
+ # deferredGenerator anyway.
+ waiting: List[Any] = [True, None]
+
+ while 1:
+ try:
+ result = next(g)
+ except StopIteration:
+ deferred.callback(result)
+ return deferred
+ except BaseException:
+ deferred.errback()
+ return deferred
+
+ # Deferred.callback(Deferred) raises an error; we catch this case
+ # early here and give a nicer error message to the user in case
+ # they yield a Deferred.
+ if isinstance(result, Deferred):
+ return fail(TypeError("Yield waitForDeferred(d), not d!"))
+
+ if isinstance(result, waitForDeferred):
+ # a waitForDeferred was yielded, get the result.
+ # Pass result in so it don't get changed going around the loop
+ # This isn't a problem for waiting, as it's only reused if
+ # gotResult has already been executed.
+ def gotResult(
+ r: object, result: waitForDeferred = cast(waitForDeferred, result)
+ ) -> None:
+ result.result = r
+ if waiting[0]:
+ waiting[0] = False
+ waiting[1] = r
+ else:
+ _deferGenerator(g, deferred)
+
+ result.d.addBoth(gotResult)
+ if waiting[0]:
+ # Haven't called back yet, set flag so that we get reinvoked
+ # and return from the loop
+ waiting[0] = False
+ return deferred
+ # Reset waiting to initial values for next loop
+ waiting[0] = True
+ waiting[1] = None
+
+ result = None
+
+
+@deprecated(Version("Twisted", 15, 0, 0), "twisted.internet.defer.inlineCallbacks")
+def deferredGenerator(
+ f: Callable[..., _DeferableGenerator]
+) -> Callable[..., Deferred[object]]:
+ """
+ L{deferredGenerator} and L{waitForDeferred} help you write
+ L{Deferred}-using code that looks like a regular sequential function.
+ Consider the use of L{inlineCallbacks} instead, which can accomplish
+ the same thing in a more concise manner.
+
+ There are two important functions involved: L{waitForDeferred}, and
+ L{deferredGenerator}. They are used together, like this::
+
+ @deferredGenerator
+ def thingummy():
+ thing = waitForDeferred(makeSomeRequestResultingInDeferred())
+ yield thing
+ thing = thing.getResult()
+ print(thing) #the result! hoorj!
+
+ L{waitForDeferred} returns something that you should immediately yield; when
+ your generator is resumed, calling C{thing.getResult()} will either give you
+ the result of the L{Deferred} if it was a success, or raise an exception if it
+ was a failure. Calling C{getResult} is B{absolutely mandatory}. If you do
+ not call it, I{your program will not work}.
+
+ L{deferredGenerator} takes one of these waitForDeferred-using generator
+ functions and converts it into a function that returns a L{Deferred}. The
+ result of the L{Deferred} will be the last value that your generator yielded
+ unless the last value is a L{waitForDeferred} instance, in which case the
+ result will be L{None}. If the function raises an unhandled exception, the
+ L{Deferred} will errback instead. Remember that C{return result} won't work;
+ use C{yield result; return} in place of that.
+
+ Note that not yielding anything from your generator will make the L{Deferred}
+ result in L{None}. Yielding a L{Deferred} from your generator is also an error
+ condition; always yield C{waitForDeferred(d)} instead.
+
+ The L{Deferred} returned from your deferred generator may also errback if your
+ generator raised an exception. For example::
+
+ @deferredGenerator
+ def thingummy():
+ thing = waitForDeferred(makeSomeRequestResultingInDeferred())
+ yield thing
+ thing = thing.getResult()
+ if thing == 'I love Twisted':
+ # will become the result of the Deferred
+ yield 'TWISTED IS GREAT!'
+ return
+ else:
+ # will trigger an errback
+ raise Exception('DESTROY ALL LIFE')
+
+ Put succinctly, these functions connect deferred-using code with this 'fake
+ blocking' style in both directions: L{waitForDeferred} converts from a
+ L{Deferred} to the 'blocking' style, and L{deferredGenerator} converts from the
+ 'blocking' style to a L{Deferred}.
+ """
+
+ @wraps(f)
+ def unwindGenerator(*args: object, **kwargs: object) -> Deferred[object]:
+ return _deferGenerator(f(*args, **kwargs), Deferred())
+
+ return unwindGenerator
+
+
+## inlineCallbacks
+
+
+class _DefGen_Return(BaseException):
+ def __init__(self, value: object) -> None:
+ self.value = value
+
+
+def returnValue(val: object) -> NoReturn:
+ """
+ Return val from a L{inlineCallbacks} generator.
+
+ Note: this is currently implemented by raising an exception
+ derived from L{BaseException}. You might want to change any
+ 'except:' clauses to an 'except Exception:' clause so as not to
+ catch this exception.
+
+ Also: while this function currently will work when called from
+ within arbitrary functions called from within the generator, do
+ not rely upon this behavior.
+ """
+ raise _DefGen_Return(val)
+
+
+@attr.s(auto_attribs=True)
+class _CancellationStatus(Generic[_SelfResultT]):
+ """
+ Cancellation status of an L{inlineCallbacks} invocation.
+
+ @ivar deferred: the L{Deferred} to callback or errback when the generator
+ invocation has finished.
+ @ivar waitingOn: the L{Deferred} being waited upon (which
+ L{_inlineCallbacks} must fill out before returning)
+ """
+
+ deferred: Deferred[_SelfResultT]
+ waitingOn: Optional[Deferred[_SelfResultT]] = None
+
+
+def _gotResultInlineCallbacks(
+ r: object,
+ waiting: List[Any],
+ gen: Union[
+ Generator[Deferred[Any], Any, _T],
+ Coroutine[Deferred[Any], Any, _T],
+ ],
+ status: _CancellationStatus[_T],
+ context: _Context,
+) -> None:
+ """
+ Helper for L{_inlineCallbacks} to handle a nested L{Deferred} firing.
+
+ @param r: The result of the L{Deferred}
+ @param waiting: Whether the L{_inlineCallbacks} was waiting, and the result.
+ @param gen: a generator object returned by calling a function or method
+ decorated with C{@}L{inlineCallbacks}
+ @param status: a L{_CancellationStatus} tracking the current status of C{gen}
+ @param context: the contextvars context to run `gen` in
+ """
+ if waiting[0]:
+ waiting[0] = False
+ waiting[1] = r
+ else:
+ _inlineCallbacks(r, gen, status, context)
+
+
+@_extraneous
+def _inlineCallbacks(
+ result: object,
+ gen: Union[
+ Generator[Deferred[Any], Any, _T],
+ Coroutine[Deferred[Any], Any, _T],
+ ],
+ status: _CancellationStatus[_T],
+ context: _Context,
+) -> None:
+ """
+ Carry out the work of L{inlineCallbacks}.
+
+ Iterate the generator produced by an C{@}L{inlineCallbacks}-decorated
+ function, C{gen}, C{send()}ing it the results of each value C{yield}ed by
+ that generator, until a L{Deferred} is yielded, at which point a callback
+ is added to that L{Deferred} to call this function again.
+
+ @param result: The last result seen by this generator. Note that this is
+ never a L{Deferred} - by the time this function is invoked, the
+ L{Deferred} has been called back and this will be a particular result
+ at a point in its callback chain.
+
+ @param gen: a generator object returned by calling a function or method
+ decorated with C{@}L{inlineCallbacks}
+
+ @param status: a L{_CancellationStatus} tracking the current status of C{gen}
+
+ @param context: the contextvars context to run `gen` in
+ """
+ # This function is complicated by the need to prevent unbounded recursion
+ # arising from repeatedly yielding immediately ready deferreds. This while
+ # loop and the waiting variable solve that by manually unfolding the
+ # recursion.
+
+ # waiting for result? # result
+ waiting: List[Any] = [True, None]
+
+ stopIteration: bool = False
+ callbackValue: Any = None
+
+ while 1:
+ try:
+ # Send the last result back as the result of the yield expression.
+ isFailure = isinstance(result, Failure)
+
+ if isFailure:
+ result = context.run(
+ cast(Failure, result).throwExceptionIntoGenerator, gen
+ )
+ else:
+ result = context.run(gen.send, result)
+ except StopIteration as e:
+ # fell off the end, or "return" statement
+ stopIteration = True
+ callbackValue = getattr(e, "value", None)
+
+ except _DefGen_Return as e:
+ # returnValue() was called; time to give a result to the original
+ # Deferred. First though, let's try to identify the potentially
+ # confusing situation which results when returnValue() is
+ # accidentally invoked from a different function, one that wasn't
+ # decorated with @inlineCallbacks.
+
+ # The traceback starts in this frame (the one for
+ # _inlineCallbacks); the next one down should be the application
+ # code.
+ excInfo = exc_info()
+ assert excInfo is not None
+
+ traceback = excInfo[2]
+ assert traceback is not None
+
+ appCodeTrace = traceback.tb_next
+ assert appCodeTrace is not None
+
+ if _PYPY:
+ # PyPy as of 3.7 adds an extra frame.
+ appCodeTrace = appCodeTrace.tb_next
+ assert appCodeTrace is not None
+
+ if isFailure:
+ # If we invoked this generator frame by throwing an exception
+ # into it, then throwExceptionIntoGenerator will consume an
+ # additional stack frame itself, so we need to skip that too.
+ appCodeTrace = appCodeTrace.tb_next
+ assert appCodeTrace is not None
+
+ # Now that we've identified the frame being exited by the
+ # exception, let's figure out if returnValue was called from it
+ # directly. returnValue itself consumes a stack frame, so the
+ # application code will have a tb_next, but it will *not* have a
+ # second tb_next.
+ assert appCodeTrace.tb_next is not None
+ if appCodeTrace.tb_next.tb_next:
+ # If returnValue was invoked non-local to the frame which it is
+ # exiting, identify the frame that ultimately invoked
+ # returnValue so that we can warn the user, as this behavior is
+ # confusing.
+ ultimateTrace = appCodeTrace
+
+ assert ultimateTrace is not None
+ assert ultimateTrace.tb_next is not None
+ while ultimateTrace.tb_next.tb_next:
+ ultimateTrace = ultimateTrace.tb_next
+ assert ultimateTrace is not None
+
+ filename = ultimateTrace.tb_frame.f_code.co_filename
+ lineno = ultimateTrace.tb_lineno
+
+ assert ultimateTrace.tb_frame is not None
+ assert appCodeTrace.tb_frame is not None
+ warnings.warn_explicit(
+ "returnValue() in %r causing %r to exit: "
+ "returnValue should only be invoked by functions decorated "
+ "with inlineCallbacks"
+ % (
+ ultimateTrace.tb_frame.f_code.co_name,
+ appCodeTrace.tb_frame.f_code.co_name,
+ ),
+ DeprecationWarning,
+ filename,
+ lineno,
+ )
+
+ stopIteration = True
+ callbackValue = e.value
+
+ except BaseException:
+ status.deferred.errback()
+ return
+
+ if stopIteration:
+ # Call the callback outside of the exception handler to avoid inappropriate/confusing
+ # "During handling of the above exception, another exception occurred:" if the callback
+ # itself throws an exception.
+ status.deferred.callback(callbackValue)
+ return
+
+ if isinstance(result, Deferred):
+ # a deferred was yielded, get the result.
+ result.addBoth(_gotResultInlineCallbacks, waiting, gen, status, context)
+ if waiting[0]:
+ # Haven't called back yet, set flag so that we get reinvoked
+ # and return from the loop
+ waiting[0] = False
+ status.waitingOn = result
+ return
+
+ result = waiting[1]
+ # Reset waiting to initial values for next loop. gotResult uses
+ # waiting, but this isn't a problem because gotResult is only
+ # executed once, and if it hasn't been executed yet, the return
+ # branch above would have been taken.
+
+ waiting[0] = True
+ waiting[1] = None
+
+
+def _addCancelCallbackToDeferred(
+ it: Deferred[_T], status: _CancellationStatus[_T]
+) -> None:
+ """
+ Helper for L{_cancellableInlineCallbacks} to add
+ L{_handleCancelInlineCallbacks} as the first errback.
+
+ @param it: The L{Deferred} to add the errback to.
+ @param status: a L{_CancellationStatus} tracking the current status of C{gen}
+ """
+ it.callbacks, tmp = [], it.callbacks
+ it.addErrback(_handleCancelInlineCallbacks, status)
+ it.callbacks.extend(tmp)
+ it.errback(_InternalInlineCallbacksCancelledError())
+
+
+def _handleCancelInlineCallbacks(
+ result: Failure,
+ status: _CancellationStatus[_T],
+) -> Deferred[_T]:
+ """
+ Propagate the cancellation of an C{@}L{inlineCallbacks} to the
+ L{Deferred} it is waiting on.
+
+ @param result: An L{_InternalInlineCallbacksCancelledError} from
+ C{cancel()}.
+ @param status: a L{_CancellationStatus} tracking the current status of C{gen}
+ @return: A new L{Deferred} that the C{@}L{inlineCallbacks} generator
+ can callback or errback through.
+ """
+ result.trap(_InternalInlineCallbacksCancelledError)
+ status.deferred = Deferred(lambda d: _addCancelCallbackToDeferred(d, status))
+
+ # We would only end up here if the inlineCallback is waiting on
+ # another Deferred. It needs to be cancelled.
+ awaited = status.waitingOn
+ assert awaited is not None
+ awaited.cancel()
+
+ return status.deferred
+
+
+def _cancellableInlineCallbacks(
+ gen: Union[
+ Generator[Deferred[Any], object, _T],
+ Coroutine[Deferred[Any], object, _T],
+ ]
+) -> Deferred[_T]:
+ """
+ Make an C{@}L{inlineCallbacks} cancellable.
+
+ @param gen: a generator object returned by calling a function or method
+ decorated with C{@}L{inlineCallbacks}
+
+ @return: L{Deferred} for the C{@}L{inlineCallbacks} that is cancellable.
+ """
+
+ deferred: Deferred[_T] = Deferred(lambda d: _addCancelCallbackToDeferred(d, status))
+ status = _CancellationStatus(deferred)
+
+ _inlineCallbacks(None, gen, status, _copy_context())
+
+ return deferred
+
+
+class _InternalInlineCallbacksCancelledError(Exception):
+ """
+ A unique exception used only in L{_cancellableInlineCallbacks} to verify
+ that an L{inlineCallbacks} is being cancelled as expected.
+ """
+
+
+def inlineCallbacks(
+ f: Callable[_P, Generator[Deferred[Any], Any, _T]]
+) -> Callable[_P, Deferred[_T]]:
+ """
+ L{inlineCallbacks} helps you write L{Deferred}-using code that looks like a
+ regular sequential function. For example::
+
+ @inlineCallbacks
+ def thingummy():
+ thing = yield makeSomeRequestResultingInDeferred()
+ print(thing) # the result! hoorj!
+
+ When you call anything that results in a L{Deferred}, you can simply yield it;
+ your generator will automatically be resumed when the Deferred's result is
+ available. The generator will be sent the result of the L{Deferred} with the
+ 'send' method on generators, or if the result was a failure, 'throw'.
+
+ Things that are not L{Deferred}s may also be yielded, and your generator
+ will be resumed with the same object sent back. This means C{yield}
+ performs an operation roughly equivalent to L{maybeDeferred}.
+
+ Your inlineCallbacks-enabled generator will return a L{Deferred} object, which
+ will result in the return value of the generator (or will fail with a
+ failure object if your generator raises an unhandled exception). Note that
+ you can't use C{return result} to return a value; use C{returnValue(result)}
+ instead. Falling off the end of the generator, or simply using C{return}
+ will cause the L{Deferred} to have a result of L{None}.
+
+ Be aware that L{returnValue} will not accept a L{Deferred} as a parameter.
+ If you believe the thing you'd like to return could be a L{Deferred}, do
+ this::
+
+ result = yield result
+ returnValue(result)
+
+ The L{Deferred} returned from your deferred generator may errback if your
+ generator raised an exception::
+
+ @inlineCallbacks
+ def thingummy():
+ thing = yield makeSomeRequestResultingInDeferred()
+ if thing == 'I love Twisted':
+ # will become the result of the Deferred
+ returnValue('TWISTED IS GREAT!')
+ else:
+ # will trigger an errback
+ raise Exception('DESTROY ALL LIFE')
+
+ It is possible to use the C{return} statement instead of L{returnValue}::
+
+ @inlineCallbacks
+ def loadData(url):
+ response = yield makeRequest(url)
+ return json.loads(response)
+
+ You can cancel the L{Deferred} returned from your L{inlineCallbacks}
+ generator before it is fired by your generator completing (either by
+ reaching its end, a C{return} statement, or by calling L{returnValue}).
+ A C{CancelledError} will be raised from the C{yield}ed L{Deferred} that
+ has been cancelled if that C{Deferred} does not otherwise suppress it.
+ """
+
+ @wraps(f)
+ def unwindGenerator(*args: _P.args, **kwargs: _P.kwargs) -> Deferred[_T]:
+ try:
+ gen = f(*args, **kwargs)
+ except _DefGen_Return:
+ raise TypeError(
+ "inlineCallbacks requires %r to produce a generator; instead"
+ "caught returnValue being used in a non-generator" % (f,)
+ )
+ if not isinstance(gen, GeneratorType):
+ raise TypeError(
+ "inlineCallbacks requires %r to produce a generator; "
+ "instead got %r" % (f, gen)
+ )
+ return _cancellableInlineCallbacks(gen)
+
+ return unwindGenerator
+
+
+## DeferredLock/DeferredQueue
+
+
+class _ConcurrencyPrimitive(ABC):
+ def __init__(self: Self) -> None:
+ self.waiting: List[Deferred[Self]] = []
+
+ def _releaseAndReturn(self, r: _T) -> _T:
+ self.release()
+ return r
+
+ @overload
+ def run(
+ self: Self,
+ /,
+ f: Callable[_P, Deferred[_T]],
+ *args: _P.args,
+ **kwargs: _P.kwargs,
+ ) -> Deferred[_T]:
+ ...
+
+ @overload
+ def run(
+ self: Self,
+ /,
+ f: Callable[_P, Coroutine[Deferred[Any], Any, _T]],
+ *args: _P.args,
+ **kwargs: _P.kwargs,
+ ) -> Deferred[_T]:
+ ...
+
+ @overload
+ def run(
+ self: Self, /, f: Callable[_P, _T], *args: _P.args, **kwargs: _P.kwargs
+ ) -> Deferred[_T]:
+ ...
+
+ def run(
+ self: Self,
+ /,
+ f: Callable[_P, Union[Deferred[_T], Coroutine[Deferred[Any], Any, _T], _T]],
+ *args: _P.args,
+ **kwargs: _P.kwargs,
+ ) -> Deferred[_T]:
+ """
+ Acquire, run, release.
+
+ This method takes a callable as its first argument and any
+ number of other positional and keyword arguments. When the
+ lock or semaphore is acquired, the callable will be invoked
+ with those arguments.
+
+ The callable may return a L{Deferred}; if it does, the lock or
+ semaphore won't be released until that L{Deferred} fires.
+
+ @return: L{Deferred} of function result.
+ """
+
+ def execute(ignoredResult: object) -> Deferred[_T]:
+ # maybeDeferred arg type requires one of the possible union members
+ # and won't accept all possible union members
+ return maybeDeferred(f, *args, **kwargs).addBoth(
+ self._releaseAndReturn
+ ) # type: ignore[return-value]
+
+ return self.acquire().addCallback(execute)
+
+ def __aenter__(self: Self) -> Deferred[Self]:
+ """
+ We can be used as an asynchronous context manager.
+ """
+ return self.acquire()
+
+ def __aexit__(
+ self,
+ __exc_type: Optional[Type[BaseException]],
+ __exc_value: Optional[BaseException],
+ __traceback: Optional[TracebackType],
+ ) -> Deferred[Literal[False]]:
+ self.release()
+ # We return False to indicate that we have not consumed the
+ # exception, if any.
+ return succeed(False)
+
+ @abstractmethod
+ def acquire(self: Self) -> Deferred[Self]:
+ pass
+
+ @abstractmethod
+ def release(self) -> None:
+ pass
+
+
+class DeferredLock(_ConcurrencyPrimitive):
+ """
+ A lock for event driven systems.
+
+ @ivar locked: C{True} when this Lock has been acquired, false at all other
+ times. Do not change this value, but it is useful to examine for the
+ equivalent of a "non-blocking" acquisition.
+ """
+
+ locked = False
+
+ def _cancelAcquire(self: Self, d: Deferred[Self]) -> None:
+ """
+ Remove a deferred d from our waiting list, as the deferred has been
+ canceled.
+
+ Note: We do not need to wrap this in a try/except to catch d not
+ being in self.waiting because this canceller will not be called if
+ d has fired. release() pops a deferred out of self.waiting and
+ calls it, so the canceller will no longer be called.
+
+ @param d: The deferred that has been canceled.
+ """
+ self.waiting.remove(d)
+
+ def acquire(self: Self) -> Deferred[Self]:
+ """
+ Attempt to acquire the lock. Returns a L{Deferred} that fires on
+ lock acquisition with the L{DeferredLock} as the value. If the lock
+ is locked, then the Deferred is placed at the end of a waiting list.
+
+ @return: a L{Deferred} which fires on lock acquisition.
+ @rtype: a L{Deferred}
+ """
+ d: Deferred[Self] = Deferred(canceller=self._cancelAcquire)
+ if self.locked:
+ self.waiting.append(d)
+ else:
+ self.locked = True
+ d.callback(self)
+ return d
+
+ def release(self: Self) -> None:
+ """
+ Release the lock. If there is a waiting list, then the first
+ L{Deferred} in that waiting list will be called back.
+
+ Should be called by whomever did the L{acquire}() when the shared
+ resource is free.
+ """
+ assert self.locked, "Tried to release an unlocked lock"
+ self.locked = False
+ if self.waiting:
+ # someone is waiting to acquire lock
+ self.locked = True
+ d = self.waiting.pop(0)
+ d.callback(self)
+
+
+class DeferredSemaphore(_ConcurrencyPrimitive):
+ """
+ A semaphore for event driven systems.
+
+ If you are looking into this as a means of limiting parallelism, you might
+ find L{twisted.internet.task.Cooperator} more useful.
+
+ @ivar limit: At most this many users may acquire this semaphore at
+ once.
+ @ivar tokens: The difference between C{limit} and the number of users
+ which have currently acquired this semaphore.
+ """
+
+ def __init__(self, tokens: int) -> None:
+ """
+ @param tokens: initial value of L{tokens} and L{limit}
+ @type tokens: L{int}
+ """
+ _ConcurrencyPrimitive.__init__(self)
+ if tokens < 1:
+ raise ValueError("DeferredSemaphore requires tokens >= 1")
+ self.tokens = tokens
+ self.limit = tokens
+
+ def _cancelAcquire(self: Self, d: Deferred[Self]) -> None:
+ """
+ Remove a deferred d from our waiting list, as the deferred has been
+ canceled.
+
+ Note: We do not need to wrap this in a try/except to catch d not
+ being in self.waiting because this canceller will not be called if
+ d has fired. release() pops a deferred out of self.waiting and
+ calls it, so the canceller will no longer be called.
+
+ @param d: The deferred that has been canceled.
+ """
+ self.waiting.remove(d)
+
+ def acquire(self: Self) -> Deferred[Self]:
+ """
+ Attempt to acquire the token.
+
+ @return: a L{Deferred} which fires on token acquisition.
+ """
+ assert (
+ self.tokens >= 0
+ ), "Internal inconsistency?? tokens should never be negative"
+ d: Deferred[Self] = Deferred(canceller=self._cancelAcquire)
+ if not self.tokens:
+ self.waiting.append(d)
+ else:
+ self.tokens = self.tokens - 1
+ d.callback(self)
+ return d
+
+ def release(self: Self) -> None:
+ """
+ Release the token.
+
+ Should be called by whoever did the L{acquire}() when the shared
+ resource is free.
+ """
+ assert (
+ self.tokens < self.limit
+ ), "Someone released me too many times: too many tokens!"
+ self.tokens = self.tokens + 1
+ if self.waiting:
+ # someone is waiting to acquire token
+ self.tokens = self.tokens - 1
+ d = self.waiting.pop(0)
+ d.callback(self)
+
+
+class QueueOverflow(Exception):
+ pass
+
+
+class QueueUnderflow(Exception):
+ pass
+
+
+class DeferredQueue(Generic[_T]):
+ """
+ An event driven queue.
+
+ Objects may be added as usual to this queue. When an attempt is
+ made to retrieve an object when the queue is empty, a L{Deferred} is
+ returned which will fire when an object becomes available.
+
+ @ivar size: The maximum number of objects to allow into the queue
+ at a time. When an attempt to add a new object would exceed this
+ limit, L{QueueOverflow} is raised synchronously. L{None} for no limit.
+ @ivar backlog: The maximum number of L{Deferred} gets to allow at
+ one time. When an attempt is made to get an object which would
+ exceed this limit, L{QueueUnderflow} is raised synchronously. L{None}
+ for no limit.
+ """
+
+ def __init__(
+ self, size: Optional[int] = None, backlog: Optional[int] = None
+ ) -> None:
+ self.waiting: List[Deferred[_T]] = []
+ self.pending: List[_T] = []
+ self.size = size
+ self.backlog = backlog
+
+ def _cancelGet(self, d: Deferred[_T]) -> None:
+ """
+ Remove a deferred d from our waiting list, as the deferred has been
+ canceled.
+
+ Note: We do not need to wrap this in a try/except to catch d not
+ being in self.waiting because this canceller will not be called if
+ d has fired. put() pops a deferred out of self.waiting and calls
+ it, so the canceller will no longer be called.
+
+ @param d: The deferred that has been canceled.
+ """
+ self.waiting.remove(d)
+
+ def put(self, obj: _T) -> None:
+ """
+ Add an object to this queue.
+
+ @raise QueueOverflow: Too many objects are in this queue.
+ """
+ if self.waiting:
+ self.waiting.pop(0).callback(obj)
+ elif self.size is None or len(self.pending) < self.size:
+ self.pending.append(obj)
+ else:
+ raise QueueOverflow()
+
+ def get(self) -> Deferred[_T]:
+ """
+ Attempt to retrieve and remove an object from the queue.
+
+ @return: a L{Deferred} which fires with the next object available in
+ the queue.
+
+ @raise QueueUnderflow: Too many (more than C{backlog})
+ L{Deferred}s are already waiting for an object from this queue.
+ """
+ if self.pending:
+ return succeed(self.pending.pop(0))
+ elif self.backlog is None or len(self.waiting) < self.backlog:
+ d: Deferred[_T] = Deferred(canceller=self._cancelGet)
+ self.waiting.append(d)
+ return d
+ else:
+ raise QueueUnderflow()
+
+
+class AlreadyTryingToLockError(Exception):
+ """
+ Raised when L{DeferredFilesystemLock.deferUntilLocked} is called twice on a
+ single L{DeferredFilesystemLock}.
+ """
+
+
+class DeferredFilesystemLock(lockfile.FilesystemLock):
+ """
+ A L{FilesystemLock} that allows for a L{Deferred} to be fired when the lock is
+ acquired.
+
+ @ivar _scheduler: The object in charge of scheduling retries. In this
+ implementation this is parameterized for testing.
+ @ivar _interval: The retry interval for an L{IReactorTime} based scheduler.
+ @ivar _tryLockCall: An L{IDelayedCall} based on C{_interval} that will manage
+ the next retry for acquiring the lock.
+ @ivar _timeoutCall: An L{IDelayedCall} based on C{deferUntilLocked}'s timeout
+ argument. This is in charge of timing out our attempt to acquire the
+ lock.
+ """
+
+ _interval = 1
+ _tryLockCall: Optional[IDelayedCall] = None
+ _timeoutCall: Optional[IDelayedCall] = None
+
+ def __init__(self, name: str, scheduler: Optional[IReactorTime] = None) -> None:
+ """
+ @param name: The name of the lock to acquire
+ @param scheduler: An object which provides L{IReactorTime}
+ """
+ lockfile.FilesystemLock.__init__(self, name)
+
+ if scheduler is None:
+ from twisted.internet import reactor
+
+ scheduler = cast(IReactorTime, reactor)
+
+ self._scheduler = scheduler
+
+ def deferUntilLocked(self, timeout: Optional[float] = None) -> Deferred[None]:
+ """
+ Wait until we acquire this lock. This method is not safe for
+ concurrent use.
+
+ @param timeout: the number of seconds after which to time out if the
+ lock has not been acquired.
+
+ @return: a L{Deferred} which will callback when the lock is acquired, or
+ errback with a L{TimeoutError} after timing out or an
+ L{AlreadyTryingToLockError} if the L{deferUntilLocked} has already
+ been called and not successfully locked the file.
+ """
+ if self._tryLockCall is not None:
+ return fail(
+ AlreadyTryingToLockError(
+ "deferUntilLocked isn't safe for concurrent use."
+ )
+ )
+
+ def _cancelLock(reason: Union[Failure, Exception]) -> None:
+ """
+ Cancel a L{DeferredFilesystemLock.deferUntilLocked} call.
+
+ @type reason: L{Failure}
+ @param reason: The reason why the call is cancelled.
+ """
+ assert self._tryLockCall is not None
+ self._tryLockCall.cancel()
+ self._tryLockCall = None
+ if self._timeoutCall is not None and self._timeoutCall.active():
+ self._timeoutCall.cancel()
+ self._timeoutCall = None
+
+ if self.lock():
+ d.callback(None)
+ else:
+ d.errback(reason)
+
+ d: Deferred[None] = Deferred(lambda deferred: _cancelLock(CancelledError()))
+
+ def _tryLock() -> None:
+ if self.lock():
+ if self._timeoutCall is not None:
+ self._timeoutCall.cancel()
+ self._timeoutCall = None
+
+ self._tryLockCall = None
+
+ d.callback(None)
+ else:
+ if timeout is not None and self._timeoutCall is None:
+ reason = Failure(
+ TimeoutError(
+ "Timed out acquiring lock: %s after %fs"
+ % (self.name, timeout)
+ )
+ )
+ self._timeoutCall = self._scheduler.callLater(
+ timeout, _cancelLock, reason
+ )
+
+ self._tryLockCall = self._scheduler.callLater(self._interval, _tryLock)
+
+ _tryLock()
+
+ return d
+
+
+__all__ = [
+ "Deferred",
+ "DeferredList",
+ "succeed",
+ "fail",
+ "FAILURE",
+ "SUCCESS",
+ "AlreadyCalledError",
+ "TimeoutError",
+ "gatherResults",
+ "maybeDeferred",
+ "ensureDeferred",
+ "waitForDeferred",
+ "deferredGenerator",
+ "inlineCallbacks",
+ "returnValue",
+ "DeferredLock",
+ "DeferredSemaphore",
+ "DeferredQueue",
+ "DeferredFilesystemLock",
+ "AlreadyTryingToLockError",
+ "CancelledError",
+]
diff --git a/contrib/python/Twisted/py3/twisted/internet/endpoints.py b/contrib/python/Twisted/py3/twisted/internet/endpoints.py
new file mode 100644
index 0000000000..4a4cf55e8e
--- /dev/null
+++ b/contrib/python/Twisted/py3/twisted/internet/endpoints.py
@@ -0,0 +1,2338 @@
+# -*- test-case-name: twisted.internet.test.test_endpoints -*-
+# Copyright (c) Twisted Matrix Laboratories.
+# See LICENSE for details.
+
+"""
+Implementations of L{IStreamServerEndpoint} and L{IStreamClientEndpoint} that
+wrap the L{IReactorTCP}, L{IReactorSSL}, and L{IReactorUNIX} interfaces.
+
+This also implements an extensible mini-language for describing endpoints,
+parsed by the L{clientFromString} and L{serverFromString} functions.
+
+@since: 10.1
+"""
+
+
+import os
+import re
+import socket
+import warnings
+from typing import Optional, Sequence, Type
+from unicodedata import normalize
+
+from zope.interface import directlyProvides, implementer, provider
+
+from constantly import NamedConstant, Names # type: ignore[import]
+from incremental import Version
+
+from twisted.internet import defer, error, fdesc, interfaces, threads
+from twisted.internet.abstract import isIPAddress, isIPv6Address
+from twisted.internet.address import (
+ HostnameAddress,
+ IPv4Address,
+ IPv6Address,
+ _ProcessAddress,
+)
+from twisted.internet.interfaces import (
+ IAddress,
+ IHostnameResolver,
+ IHostResolution,
+ IReactorPluggableNameResolver,
+ IReactorSocket,
+ IResolutionReceiver,
+ IStreamClientEndpointStringParserWithReactor,
+ IStreamServerEndpointStringParser,
+)
+from twisted.internet.protocol import ClientFactory, Factory, ProcessProtocol, Protocol
+
+try:
+ from twisted.internet.stdio import PipeAddress, StandardIO
+except ImportError:
+ # fallback if pywin32 is not installed
+ StandardIO = None # type: ignore[assignment,misc]
+ PipeAddress = None # type: ignore[assignment,misc]
+
+from twisted.internet._resolver import HostResolution
+from twisted.internet.defer import Deferred
+from twisted.internet.task import LoopingCall
+from twisted.logger import Logger
+from twisted.plugin import IPlugin, getPlugins
+from twisted.python import deprecate, log
+from twisted.python.compat import _matchingString, iterbytes, nativeString
+from twisted.python.components import proxyForInterface
+from twisted.python.failure import Failure
+from twisted.python.filepath import FilePath
+from twisted.python.systemd import ListenFDs
+from ._idna import _idnaBytes, _idnaText
+
+try:
+ from OpenSSL.SSL import Error as SSLError
+
+ from twisted.internet.ssl import (
+ Certificate,
+ CertificateOptions,
+ KeyPair,
+ PrivateCertificate,
+ optionsForClientTLS,
+ trustRootFromCertificates,
+ )
+ from twisted.protocols.tls import TLSMemoryBIOFactory as _TLSMemoryBIOFactory
+except ImportError:
+ TLSMemoryBIOFactory = None
+else:
+ TLSMemoryBIOFactory = _TLSMemoryBIOFactory
+
+__all__ = [
+ "clientFromString",
+ "serverFromString",
+ "TCP4ServerEndpoint",
+ "TCP6ServerEndpoint",
+ "TCP4ClientEndpoint",
+ "TCP6ClientEndpoint",
+ "UNIXServerEndpoint",
+ "UNIXClientEndpoint",
+ "SSL4ServerEndpoint",
+ "SSL4ClientEndpoint",
+ "AdoptedStreamServerEndpoint",
+ "StandardIOEndpoint",
+ "ProcessEndpoint",
+ "HostnameEndpoint",
+ "StandardErrorBehavior",
+ "connectProtocol",
+ "wrapClientTLS",
+]
+
+
+class _WrappingProtocol(Protocol):
+ """
+ Wrap another protocol in order to notify my user when a connection has
+ been made.
+ """
+
+ def __init__(self, connectedDeferred, wrappedProtocol):
+ """
+ @param connectedDeferred: The L{Deferred} that will callback
+ with the C{wrappedProtocol} when it is connected.
+
+ @param wrappedProtocol: An L{IProtocol} provider that will be
+ connected.
+ """
+ self._connectedDeferred = connectedDeferred
+ self._wrappedProtocol = wrappedProtocol
+
+ for iface in [
+ interfaces.IHalfCloseableProtocol,
+ interfaces.IFileDescriptorReceiver,
+ interfaces.IHandshakeListener,
+ ]:
+ if iface.providedBy(self._wrappedProtocol):
+ directlyProvides(self, iface)
+
+ def logPrefix(self):
+ """
+ Transparently pass through the wrapped protocol's log prefix.
+ """
+ if interfaces.ILoggingContext.providedBy(self._wrappedProtocol):
+ return self._wrappedProtocol.logPrefix()
+ return self._wrappedProtocol.__class__.__name__
+
+ def connectionMade(self):
+ """
+ Connect the C{self._wrappedProtocol} to our C{self.transport} and
+ callback C{self._connectedDeferred} with the C{self._wrappedProtocol}
+ """
+ self._wrappedProtocol.makeConnection(self.transport)
+ self._connectedDeferred.callback(self._wrappedProtocol)
+
+ def dataReceived(self, data):
+ """
+ Proxy C{dataReceived} calls to our C{self._wrappedProtocol}
+ """
+ return self._wrappedProtocol.dataReceived(data)
+
+ def fileDescriptorReceived(self, descriptor):
+ """
+ Proxy C{fileDescriptorReceived} calls to our C{self._wrappedProtocol}
+ """
+ return self._wrappedProtocol.fileDescriptorReceived(descriptor)
+
+ def connectionLost(self, reason):
+ """
+ Proxy C{connectionLost} calls to our C{self._wrappedProtocol}
+ """
+ return self._wrappedProtocol.connectionLost(reason)
+
+ def readConnectionLost(self):
+ """
+ Proxy L{IHalfCloseableProtocol.readConnectionLost} to our
+ C{self._wrappedProtocol}
+ """
+ self._wrappedProtocol.readConnectionLost()
+
+ def writeConnectionLost(self):
+ """
+ Proxy L{IHalfCloseableProtocol.writeConnectionLost} to our
+ C{self._wrappedProtocol}
+ """
+ self._wrappedProtocol.writeConnectionLost()
+
+ def handshakeCompleted(self):
+ """
+ Proxy L{interfaces.IHandshakeListener} to our
+ C{self._wrappedProtocol}.
+ """
+ self._wrappedProtocol.handshakeCompleted()
+
+
+class _WrappingFactory(ClientFactory):
+ """
+ Wrap a factory in order to wrap the protocols it builds.
+
+ @ivar _wrappedFactory: A provider of I{IProtocolFactory} whose buildProtocol
+ method will be called and whose resulting protocol will be wrapped.
+
+ @ivar _onConnection: A L{Deferred} that fires when the protocol is
+ connected
+
+ @ivar _connector: A L{connector <twisted.internet.interfaces.IConnector>}
+ that is managing the current or previous connection attempt.
+ """
+
+ # Type is wrong. See https://twistedmatrix.com/trac/ticket/10005#ticket
+ protocol = _WrappingProtocol # type: ignore[assignment]
+
+ def __init__(self, wrappedFactory):
+ """
+ @param wrappedFactory: A provider of I{IProtocolFactory} whose
+ buildProtocol method will be called and whose resulting protocol
+ will be wrapped.
+ """
+ self._wrappedFactory = wrappedFactory
+ self._onConnection = defer.Deferred(canceller=self._canceller)
+
+ def startedConnecting(self, connector):
+ """
+ A connection attempt was started. Remember the connector which started
+ said attempt, for use later.
+ """
+ self._connector = connector
+
+ def _canceller(self, deferred):
+ """
+ The outgoing connection attempt was cancelled. Fail that L{Deferred}
+ with an L{error.ConnectingCancelledError}.
+
+ @param deferred: The L{Deferred <defer.Deferred>} that was cancelled;
+ should be the same as C{self._onConnection}.
+ @type deferred: L{Deferred <defer.Deferred>}
+
+ @note: This relies on startedConnecting having been called, so it may
+ seem as though there's a race condition where C{_connector} may not
+ have been set. However, using public APIs, this condition is
+ impossible to catch, because a connection API
+ (C{connectTCP}/C{SSL}/C{UNIX}) is always invoked before a
+ L{_WrappingFactory}'s L{Deferred <defer.Deferred>} is returned to
+ C{connect()}'s caller.
+
+ @return: L{None}
+ """
+ deferred.errback(
+ error.ConnectingCancelledError(self._connector.getDestination())
+ )
+ self._connector.stopConnecting()
+
+ def doStart(self):
+ """
+ Start notifications are passed straight through to the wrapped factory.
+ """
+ self._wrappedFactory.doStart()
+
+ def doStop(self):
+ """
+ Stop notifications are passed straight through to the wrapped factory.
+ """
+ self._wrappedFactory.doStop()
+
+ def buildProtocol(self, addr):
+ """
+ Proxy C{buildProtocol} to our C{self._wrappedFactory} or errback the
+ C{self._onConnection} L{Deferred} if the wrapped factory raises an
+ exception or returns L{None}.
+
+ @return: An instance of L{_WrappingProtocol} or L{None}
+ """
+ try:
+ proto = self._wrappedFactory.buildProtocol(addr)
+ if proto is None:
+ raise error.NoProtocol()
+ except BaseException:
+ self._onConnection.errback()
+ else:
+ return self.protocol(self._onConnection, proto)
+
+ def clientConnectionFailed(self, connector, reason):
+ """
+ Errback the C{self._onConnection} L{Deferred} when the
+ client connection fails.
+ """
+ if not self._onConnection.called:
+ self._onConnection.errback(reason)
+
+
+@implementer(interfaces.IStreamServerEndpoint)
+class StandardIOEndpoint:
+ """
+ A Standard Input/Output endpoint
+
+ @ivar _stdio: a callable, like L{stdio.StandardIO}, which takes an
+ L{IProtocol} provider and a C{reactor} keyword argument (interface
+ dependent upon your platform).
+ """
+
+ _stdio = StandardIO
+
+ def __init__(self, reactor):
+ """
+ @param reactor: The reactor for the endpoint.
+ """
+ self._reactor = reactor
+
+ def listen(self, stdioProtocolFactory):
+ """
+ Implement L{IStreamServerEndpoint.listen} to listen on stdin/stdout
+ """
+ return defer.execute(
+ self._stdio,
+ stdioProtocolFactory.buildProtocol(PipeAddress()),
+ reactor=self._reactor,
+ )
+
+
+class _IProcessTransportWithConsumerAndProducer(
+ interfaces.IProcessTransport, interfaces.IConsumer, interfaces.IPushProducer
+):
+ """
+ An L{_IProcessTransportWithConsumerAndProducer} combines various interfaces
+ to work around the issue that L{interfaces.IProcessTransport} is
+ incompletely defined and doesn't specify flow-control interfaces, and that
+ L{proxyForInterface} doesn't allow for multiple interfaces.
+ """
+
+
+class _ProcessEndpointTransport(
+ proxyForInterface( # type: ignore[misc]
+ _IProcessTransportWithConsumerAndProducer,
+ "_process",
+ )
+):
+ """
+ An L{ITransport}, L{IProcessTransport}, L{IConsumer}, and L{IPushProducer}
+ provider for the L{IProtocol} instance passed to the process endpoint.
+
+ @ivar _process: An active process transport which will be used by write
+ methods on this object to write data to a child process.
+ @type _process: L{interfaces.IProcessTransport} provider
+ """
+
+
+class _WrapIProtocol(ProcessProtocol):
+ """
+ An L{IProcessProtocol} provider that wraps an L{IProtocol}.
+
+ @ivar transport: A L{_ProcessEndpointTransport} provider that is hooked to
+ the wrapped L{IProtocol} provider.
+
+ @see: L{protocol.ProcessProtocol}
+ """
+
+ def __init__(self, proto, executable, errFlag):
+ """
+ @param proto: An L{IProtocol} provider.
+ @param errFlag: A constant belonging to L{StandardErrorBehavior}
+ that determines if stderr is logged or dropped.
+ @param executable: The file name (full path) to spawn.
+ """
+ self.protocol = proto
+ self.errFlag = errFlag
+ self.executable = executable
+
+ def makeConnection(self, process):
+ """
+ Call L{IProtocol} provider's makeConnection method with an
+ L{ITransport} provider.
+
+ @param process: An L{IProcessTransport} provider.
+ """
+ self.transport = _ProcessEndpointTransport(process)
+ return self.protocol.makeConnection(self.transport)
+
+ def childDataReceived(self, childFD, data):
+ """
+ This is called with data from the process's stdout or stderr pipes. It
+ checks the status of the errFlag to setermine if stderr should be
+ logged (default) or dropped.
+ """
+ if childFD == 1:
+ return self.protocol.dataReceived(data)
+ elif childFD == 2 and self.errFlag == StandardErrorBehavior.LOG:
+ log.msg(
+ format="Process %(executable)r wrote stderr unhandled by "
+ "%(protocol)s: %(data)s",
+ executable=self.executable,
+ protocol=self.protocol,
+ data=data,
+ )
+
+ def processEnded(self, reason):
+ """
+ If the process ends with L{error.ProcessDone}, this method calls the
+ L{IProtocol} provider's L{connectionLost} with a
+ L{error.ConnectionDone}
+
+ @see: L{ProcessProtocol.processEnded}
+ """
+ if (reason.check(error.ProcessDone) == error.ProcessDone) and (
+ reason.value.status == 0
+ ):
+ return self.protocol.connectionLost(Failure(error.ConnectionDone()))
+ else:
+ return self.protocol.connectionLost(reason)
+
+
+class StandardErrorBehavior(Names):
+ """
+ Constants used in ProcessEndpoint to decide what to do with stderr.
+
+ @cvar LOG: Indicates that stderr is to be logged.
+ @cvar DROP: Indicates that stderr is to be dropped (and not logged).
+
+ @since: 13.1
+ """
+
+ LOG = NamedConstant()
+ DROP = NamedConstant()
+
+
+@implementer(interfaces.IStreamClientEndpoint)
+class ProcessEndpoint:
+ """
+ An endpoint for child processes
+
+ @ivar _spawnProcess: A hook used for testing the spawning of child process.
+
+ @since: 13.1
+ """
+
+ def __init__(
+ self,
+ reactor,
+ executable,
+ args=(),
+ env={},
+ path=None,
+ uid=None,
+ gid=None,
+ usePTY=0,
+ childFDs=None,
+ errFlag=StandardErrorBehavior.LOG,
+ ):
+ """
+ See L{IReactorProcess.spawnProcess}.
+
+ @param errFlag: Determines if stderr should be logged.
+ @type errFlag: L{endpoints.StandardErrorBehavior}
+ """
+ self._reactor = reactor
+ self._executable = executable
+ self._args = args
+ self._env = env
+ self._path = path
+ self._uid = uid
+ self._gid = gid
+ self._usePTY = usePTY
+ self._childFDs = childFDs
+ self._errFlag = errFlag
+ self._spawnProcess = self._reactor.spawnProcess
+
+ def connect(self, protocolFactory):
+ """
+ Implement L{IStreamClientEndpoint.connect} to launch a child process
+ and connect it to a protocol created by C{protocolFactory}.
+
+ @param protocolFactory: A factory for an L{IProtocol} provider which
+ will be notified of all events related to the created process.
+ """
+ proto = protocolFactory.buildProtocol(_ProcessAddress())
+ try:
+ self._spawnProcess(
+ _WrapIProtocol(proto, self._executable, self._errFlag),
+ self._executable,
+ self._args,
+ self._env,
+ self._path,
+ self._uid,
+ self._gid,
+ self._usePTY,
+ self._childFDs,
+ )
+ except BaseException:
+ return defer.fail()
+ else:
+ return defer.succeed(proto)
+
+
+@implementer(interfaces.IStreamServerEndpoint)
+class _TCPServerEndpoint:
+ """
+ A TCP server endpoint interface
+ """
+
+ def __init__(self, reactor, port, backlog, interface):
+ """
+ @param reactor: An L{IReactorTCP} provider.
+
+ @param port: The port number used for listening
+ @type port: int
+
+ @param backlog: Size of the listen queue
+ @type backlog: int
+
+ @param interface: The hostname to bind to
+ @type interface: str
+ """
+ self._reactor = reactor
+ self._port = port
+ self._backlog = backlog
+ self._interface = interface
+
+ def listen(self, protocolFactory):
+ """
+ Implement L{IStreamServerEndpoint.listen} to listen on a TCP
+ socket
+ """
+ return defer.execute(
+ self._reactor.listenTCP,
+ self._port,
+ protocolFactory,
+ backlog=self._backlog,
+ interface=self._interface,
+ )
+
+
+class TCP4ServerEndpoint(_TCPServerEndpoint):
+ """
+ Implements TCP server endpoint with an IPv4 configuration
+ """
+
+ def __init__(self, reactor, port, backlog=50, interface=""):
+ """
+ @param reactor: An L{IReactorTCP} provider.
+
+ @param port: The port number used for listening
+ @type port: int
+
+ @param backlog: Size of the listen queue
+ @type backlog: int
+
+ @param interface: The hostname to bind to, defaults to '' (all)
+ @type interface: str
+ """
+ _TCPServerEndpoint.__init__(self, reactor, port, backlog, interface)
+
+
+class TCP6ServerEndpoint(_TCPServerEndpoint):
+ """
+ Implements TCP server endpoint with an IPv6 configuration
+ """
+
+ def __init__(self, reactor, port, backlog=50, interface="::"):
+ """
+ @param reactor: An L{IReactorTCP} provider.
+
+ @param port: The port number used for listening
+ @type port: int
+
+ @param backlog: Size of the listen queue
+ @type backlog: int
+
+ @param interface: The hostname to bind to, defaults to C{::} (all)
+ @type interface: str
+ """
+ _TCPServerEndpoint.__init__(self, reactor, port, backlog, interface)
+
+
+@implementer(interfaces.IStreamClientEndpoint)
+class TCP4ClientEndpoint:
+ """
+ TCP client endpoint with an IPv4 configuration.
+ """
+
+ def __init__(self, reactor, host, port, timeout=30, bindAddress=None):
+ """
+ @param reactor: An L{IReactorTCP} provider
+
+ @param host: A hostname, used when connecting
+ @type host: str
+
+ @param port: The port number, used when connecting
+ @type port: int
+
+ @param timeout: The number of seconds to wait before assuming the
+ connection has failed.
+ @type timeout: L{float} or L{int}
+
+ @param bindAddress: A (host, port) tuple of local address to bind to,
+ or None.
+ @type bindAddress: tuple
+ """
+ self._reactor = reactor
+ self._host = host
+ self._port = port
+ self._timeout = timeout
+ self._bindAddress = bindAddress
+
+ def connect(self, protocolFactory):
+ """
+ Implement L{IStreamClientEndpoint.connect} to connect via TCP.
+ """
+ try:
+ wf = _WrappingFactory(protocolFactory)
+ self._reactor.connectTCP(
+ self._host,
+ self._port,
+ wf,
+ timeout=self._timeout,
+ bindAddress=self._bindAddress,
+ )
+ return wf._onConnection
+ except BaseException:
+ return defer.fail()
+
+
+@implementer(interfaces.IStreamClientEndpoint)
+class TCP6ClientEndpoint:
+ """
+ TCP client endpoint with an IPv6 configuration.
+
+ @ivar _getaddrinfo: A hook used for testing name resolution.
+
+ @ivar _deferToThread: A hook used for testing deferToThread.
+
+ @ivar _GAI_ADDRESS: Index of the address portion in result of
+ getaddrinfo to be used.
+
+ @ivar _GAI_ADDRESS_HOST: Index of the actual host-address in the
+ 5-tuple L{_GAI_ADDRESS}.
+ """
+
+ _getaddrinfo = staticmethod(socket.getaddrinfo)
+ _deferToThread = staticmethod(threads.deferToThread)
+ _GAI_ADDRESS = 4
+ _GAI_ADDRESS_HOST = 0
+
+ def __init__(self, reactor, host, port, timeout=30, bindAddress=None):
+ """
+ @param host: An IPv6 address literal or a hostname with an
+ IPv6 address
+
+ @see: L{twisted.internet.interfaces.IReactorTCP.connectTCP}
+ """
+ self._reactor = reactor
+ self._host = host
+ self._port = port
+ self._timeout = timeout
+ self._bindAddress = bindAddress
+
+ def connect(self, protocolFactory):
+ """
+ Implement L{IStreamClientEndpoint.connect} to connect via TCP,
+ once the hostname resolution is done.
+ """
+ if isIPv6Address(self._host):
+ d = self._resolvedHostConnect(self._host, protocolFactory)
+ else:
+ d = self._nameResolution(self._host)
+ d.addCallback(
+ lambda result: result[0][self._GAI_ADDRESS][self._GAI_ADDRESS_HOST]
+ )
+ d.addCallback(self._resolvedHostConnect, protocolFactory)
+ return d
+
+ def _nameResolution(self, host):
+ """
+ Resolve the hostname string into a tuple containing the host
+ IPv6 address.
+ """
+ return self._deferToThread(self._getaddrinfo, host, 0, socket.AF_INET6)
+
+ def _resolvedHostConnect(self, resolvedHost, protocolFactory):
+ """
+ Connect to the server using the resolved hostname.
+ """
+ try:
+ wf = _WrappingFactory(protocolFactory)
+ self._reactor.connectTCP(
+ resolvedHost,
+ self._port,
+ wf,
+ timeout=self._timeout,
+ bindAddress=self._bindAddress,
+ )
+ return wf._onConnection
+ except BaseException:
+ return defer.fail()
+
+
+@implementer(IHostnameResolver)
+class _SimpleHostnameResolver:
+ """
+ An L{IHostnameResolver} provider that invokes a provided callable
+ to resolve hostnames.
+
+ @ivar _nameResolution: the callable L{resolveHostName} invokes to
+ resolve hostnames.
+ @type _nameResolution: A L{callable} that accepts two arguments:
+ the host to resolve and the port number to include in the
+ result.
+ """
+
+ _log = Logger()
+
+ def __init__(self, nameResolution):
+ """
+ Create a L{_SimpleHostnameResolver} instance.
+ """
+ self._nameResolution = nameResolution
+
+ def resolveHostName(
+ self,
+ resolutionReceiver: IResolutionReceiver,
+ hostName: str,
+ portNumber: int = 0,
+ addressTypes: Optional[Sequence[Type[IAddress]]] = None,
+ transportSemantics: str = "TCP",
+ ) -> IHostResolution:
+ """
+ Initiate a hostname resolution.
+
+ @param resolutionReceiver: an object that will receive each resolved
+ address as it arrives.
+ @type resolutionReceiver: L{IResolutionReceiver}
+
+ @param hostName: see interface
+
+ @param portNumber: see interface
+
+ @param addressTypes: Ignored in this implementation.
+
+ @param transportSemantics: Ignored in this implementation.
+
+ @return: The resolution in progress.
+ @rtype: L{IResolutionReceiver}
+ """
+ resolution = HostResolution(hostName)
+ resolutionReceiver.resolutionBegan(resolution)
+ d = self._nameResolution(hostName, portNumber)
+
+ def cbDeliver(gairesult):
+ for family, socktype, proto, canonname, sockaddr in gairesult:
+ if family == socket.AF_INET6:
+ resolutionReceiver.addressResolved(IPv6Address("TCP", *sockaddr))
+ elif family == socket.AF_INET:
+ resolutionReceiver.addressResolved(IPv4Address("TCP", *sockaddr))
+
+ def ebLog(error):
+ self._log.failure(
+ "while looking up {name} with {callable}",
+ error,
+ name=hostName,
+ callable=self._nameResolution,
+ )
+
+ d.addCallback(cbDeliver)
+ d.addErrback(ebLog)
+ d.addBoth(lambda ignored: resolutionReceiver.resolutionComplete())
+ return resolution
+
+
+@implementer(interfaces.IStreamClientEndpoint)
+class HostnameEndpoint:
+ """
+ A name-based endpoint that connects to the fastest amongst the resolved
+ host addresses.
+
+ @cvar _DEFAULT_ATTEMPT_DELAY: The default time to use between attempts, in
+ seconds, when no C{attemptDelay} is given to
+ L{HostnameEndpoint.__init__}.
+
+ @ivar _hostText: the textual representation of the hostname passed to the
+ constructor. Used to pass to the reactor's hostname resolver.
+ @type _hostText: L{unicode}
+
+ @ivar _hostBytes: the encoded bytes-representation of the hostname passed
+ to the constructor. Used to construct the L{HostnameAddress}
+ associated with this endpoint.
+ @type _hostBytes: L{bytes}
+
+ @ivar _hostStr: the native-string representation of the hostname passed to
+ the constructor, used for exception construction
+ @type _hostStr: native L{str}
+
+ @ivar _badHostname: a flag - hopefully false! - indicating that an invalid
+ hostname was passed to the constructor. This might be a textual
+ hostname that isn't valid IDNA, or non-ASCII bytes.
+ @type _badHostname: L{bool}
+ """
+
+ _getaddrinfo = staticmethod(socket.getaddrinfo)
+ _deferToThread = staticmethod(threads.deferToThread)
+ _DEFAULT_ATTEMPT_DELAY = 0.3
+
+ def __init__(
+ self, reactor, host, port, timeout=30, bindAddress=None, attemptDelay=None
+ ):
+ """
+ Create a L{HostnameEndpoint}.
+
+ @param reactor: The reactor to use for connections and delayed calls.
+ @type reactor: provider of L{IReactorTCP}, L{IReactorTime} and either
+ L{IReactorPluggableNameResolver} or L{IReactorPluggableResolver}.
+
+ @param host: A hostname to connect to.
+ @type host: L{bytes} or L{unicode}
+
+ @param port: The port number to connect to.
+ @type port: L{int}
+
+ @param timeout: For each individual connection attempt, the number of
+ seconds to wait before assuming the connection has failed.
+ @type timeout: L{float} or L{int}
+
+ @param bindAddress: the local address of the network interface to make
+ the connections from.
+ @type bindAddress: L{bytes}
+
+ @param attemptDelay: The number of seconds to delay between connection
+ attempts.
+ @type attemptDelay: L{float}
+
+ @see: L{twisted.internet.interfaces.IReactorTCP.connectTCP}
+ """
+
+ self._reactor = reactor
+ self._nameResolver = self._getNameResolverAndMaybeWarn(reactor)
+ [self._badHostname, self._hostBytes, self._hostText] = self._hostAsBytesAndText(
+ host
+ )
+ self._hostStr = self._hostBytes if bytes is str else self._hostText
+ self._port = port
+ self._timeout = timeout
+ self._bindAddress = bindAddress
+ if attemptDelay is None:
+ attemptDelay = self._DEFAULT_ATTEMPT_DELAY
+ self._attemptDelay = attemptDelay
+
+ def __repr__(self) -> str:
+ """
+ Produce a string representation of the L{HostnameEndpoint}.
+
+ @return: A L{str}
+ """
+ if self._badHostname:
+ # Use the backslash-encoded version of the string passed to the
+ # constructor, which is already a native string.
+ host = self._hostStr
+ elif isIPv6Address(self._hostStr):
+ host = f"[{self._hostStr}]"
+ else:
+ # Convert the bytes representation to a native string to ensure
+ # that we display the punycoded version of the hostname, which is
+ # more useful than any IDN version as it can be easily copy-pasted
+ # into debugging tools.
+ host = nativeString(self._hostBytes)
+ return "".join(["<HostnameEndpoint ", host, ":", str(self._port), ">"])
+
+ def _getNameResolverAndMaybeWarn(self, reactor):
+ """
+ Retrieve a C{nameResolver} callable and warn the caller's
+ caller that using a reactor which doesn't provide
+ L{IReactorPluggableNameResolver} is deprecated.
+
+ @param reactor: The reactor to check.
+
+ @return: A L{IHostnameResolver} provider.
+ """
+ if not IReactorPluggableNameResolver.providedBy(reactor):
+ warningString = deprecate.getDeprecationWarningString(
+ reactor.__class__,
+ Version("Twisted", 17, 5, 0),
+ format=(
+ "Passing HostnameEndpoint a reactor that does not"
+ " provide IReactorPluggableNameResolver (%(fqpn)s)"
+ " was deprecated in %(version)s"
+ ),
+ replacement=(
+ "a reactor that provides" " IReactorPluggableNameResolver"
+ ),
+ )
+ warnings.warn(warningString, DeprecationWarning, stacklevel=3)
+ return _SimpleHostnameResolver(self._fallbackNameResolution)
+ return reactor.nameResolver
+
+ @staticmethod
+ def _hostAsBytesAndText(host):
+ """
+ For various reasons (documented in the C{@ivar}'s in the class
+ docstring) we need both a textual and a binary representation of the
+ hostname given to the constructor. For compatibility and convenience,
+ we accept both textual and binary representations of the hostname, save
+ the form that was passed, and convert into the other form. This is
+ mostly just because L{HostnameAddress} chose somewhat poorly to define
+ its attribute as bytes; hopefully we can find a compatible way to clean
+ this up in the future and just operate in terms of text internally.
+
+ @param host: A hostname to convert.
+ @type host: L{bytes} or C{str}
+
+ @return: a 3-tuple of C{(invalid, bytes, text)} where C{invalid} is a
+ boolean indicating the validity of the hostname, C{bytes} is a
+ binary representation of C{host}, and C{text} is a textual
+ representation of C{host}.
+ """
+ if isinstance(host, bytes):
+ if isIPAddress(host) or isIPv6Address(host):
+ return False, host, host.decode("ascii")
+ else:
+ try:
+ return False, host, _idnaText(host)
+ except UnicodeError:
+ # Convert the host to _some_ kind of text, to handle below.
+ host = host.decode("charmap")
+ else:
+ host = normalize("NFC", host)
+ if isIPAddress(host) or isIPv6Address(host):
+ return False, host.encode("ascii"), host
+ else:
+ try:
+ return False, _idnaBytes(host), host
+ except UnicodeError:
+ pass
+ # `host` has been converted to text by this point either way; it's
+ # invalid as a hostname, and so may contain unprintable characters and
+ # such. escape it with backslashes so the user can get _some_ guess as
+ # to what went wrong.
+ asciibytes = host.encode("ascii", "backslashreplace")
+ return True, asciibytes, asciibytes.decode("ascii")
+
+ def connect(self, protocolFactory):
+ """
+ Attempts a connection to each resolved address, and returns a
+ connection which is established first.
+
+ @param protocolFactory: The protocol factory whose protocol
+ will be connected.
+ @type protocolFactory:
+ L{IProtocolFactory<twisted.internet.interfaces.IProtocolFactory>}
+
+ @return: A L{Deferred} that fires with the connected protocol
+ or fails a connection-related error.
+ """
+ if self._badHostname:
+ return defer.fail(ValueError(f"invalid hostname: {self._hostStr}"))
+
+ d = Deferred()
+ addresses = []
+
+ @provider(IResolutionReceiver)
+ class EndpointReceiver:
+ @staticmethod
+ def resolutionBegan(resolutionInProgress):
+ pass
+
+ @staticmethod
+ def addressResolved(address):
+ addresses.append(address)
+
+ @staticmethod
+ def resolutionComplete():
+ d.callback(addresses)
+
+ self._nameResolver.resolveHostName(
+ EndpointReceiver, self._hostText, portNumber=self._port
+ )
+
+ d.addErrback(
+ lambda ignored: defer.fail(
+ error.DNSLookupError(f"Couldn't find the hostname '{self._hostStr}'")
+ )
+ )
+
+ @d.addCallback
+ def resolvedAddressesToEndpoints(addresses):
+ # Yield an endpoint for every address resolved from the name.
+ for eachAddress in addresses:
+ if isinstance(eachAddress, IPv6Address):
+ yield TCP6ClientEndpoint(
+ self._reactor,
+ eachAddress.host,
+ eachAddress.port,
+ self._timeout,
+ self._bindAddress,
+ )
+ if isinstance(eachAddress, IPv4Address):
+ yield TCP4ClientEndpoint(
+ self._reactor,
+ eachAddress.host,
+ eachAddress.port,
+ self._timeout,
+ self._bindAddress,
+ )
+
+ d.addCallback(list)
+
+ def _canceller(d):
+ # This canceller must remain defined outside of
+ # `startConnectionAttempts`, because Deferred should not
+ # participate in cycles with their cancellers; that would create a
+ # potentially problematic circular reference and possibly
+ # gc.garbage.
+ d.errback(
+ error.ConnectingCancelledError(
+ HostnameAddress(self._hostBytes, self._port)
+ )
+ )
+
+ @d.addCallback
+ def startConnectionAttempts(endpoints):
+ """
+ Given a sequence of endpoints obtained via name resolution, start
+ connecting to a new one every C{self._attemptDelay} seconds until
+ one of the connections succeeds, all of them fail, or the attempt
+ is cancelled.
+
+ @param endpoints: a list of all the endpoints we might try to
+ connect to, as determined by name resolution.
+ @type endpoints: L{list} of L{IStreamServerEndpoint}
+
+ @return: a Deferred that fires with the result of the
+ C{endpoint.connect} method that completes the fastest, or fails
+ with the first connection error it encountered if none of them
+ succeed.
+ @rtype: L{Deferred} failing with L{error.ConnectingCancelledError}
+ or firing with L{IProtocol}
+ """
+ if not endpoints:
+ raise error.DNSLookupError(
+ f"no results for hostname lookup: {self._hostStr}"
+ )
+ iterEndpoints = iter(endpoints)
+ pending = []
+ failures = []
+ winner = defer.Deferred(canceller=_canceller)
+
+ def checkDone():
+ if pending or checkDone.completed or checkDone.endpointsLeft:
+ return
+ winner.errback(failures.pop())
+
+ checkDone.completed = False
+ checkDone.endpointsLeft = True
+
+ @LoopingCall
+ def iterateEndpoint():
+ endpoint = next(iterEndpoints, None)
+ if endpoint is None:
+ # The list of endpoints ends.
+ checkDone.endpointsLeft = False
+ checkDone()
+ return
+
+ eachAttempt = endpoint.connect(protocolFactory)
+ pending.append(eachAttempt)
+
+ @eachAttempt.addBoth
+ def noLongerPending(result):
+ pending.remove(eachAttempt)
+ return result
+
+ @eachAttempt.addCallback
+ def succeeded(result):
+ winner.callback(result)
+
+ @eachAttempt.addErrback
+ def failed(reason):
+ failures.append(reason)
+ checkDone()
+
+ iterateEndpoint.clock = self._reactor
+ iterateEndpoint.start(self._attemptDelay)
+
+ @winner.addBoth
+ def cancelRemainingPending(result):
+ checkDone.completed = True
+ for remaining in pending[:]:
+ remaining.cancel()
+ if iterateEndpoint.running:
+ iterateEndpoint.stop()
+ return result
+
+ return winner
+
+ return d
+
+ def _fallbackNameResolution(self, host, port):
+ """
+ Resolve the hostname string into a tuple containing the host
+ address. This is method is only used when the reactor does
+ not provide L{IReactorPluggableNameResolver}.
+
+ @param host: A unicode hostname to resolve.
+
+ @param port: The port to include in the resolution.
+
+ @return: A L{Deferred} that fires with L{_getaddrinfo}'s
+ return value.
+ """
+ return self._deferToThread(self._getaddrinfo, host, port, 0, socket.SOCK_STREAM)
+
+
+@implementer(interfaces.IStreamServerEndpoint)
+class SSL4ServerEndpoint:
+ """
+ SSL secured TCP server endpoint with an IPv4 configuration.
+ """
+
+ def __init__(self, reactor, port, sslContextFactory, backlog=50, interface=""):
+ """
+ @param reactor: An L{IReactorSSL} provider.
+
+ @param port: The port number used for listening
+ @type port: int
+
+ @param sslContextFactory: An instance of
+ L{interfaces.IOpenSSLContextFactory}.
+
+ @param backlog: Size of the listen queue
+ @type backlog: int
+
+ @param interface: The hostname to bind to, defaults to '' (all)
+ @type interface: str
+ """
+ self._reactor = reactor
+ self._port = port
+ self._sslContextFactory = sslContextFactory
+ self._backlog = backlog
+ self._interface = interface
+
+ def listen(self, protocolFactory):
+ """
+ Implement L{IStreamServerEndpoint.listen} to listen for SSL on a
+ TCP socket.
+ """
+ return defer.execute(
+ self._reactor.listenSSL,
+ self._port,
+ protocolFactory,
+ contextFactory=self._sslContextFactory,
+ backlog=self._backlog,
+ interface=self._interface,
+ )
+
+
+@implementer(interfaces.IStreamClientEndpoint)
+class SSL4ClientEndpoint:
+ """
+ SSL secured TCP client endpoint with an IPv4 configuration
+ """
+
+ def __init__(
+ self, reactor, host, port, sslContextFactory, timeout=30, bindAddress=None
+ ):
+ """
+ @param reactor: An L{IReactorSSL} provider.
+
+ @param host: A hostname, used when connecting
+ @type host: str
+
+ @param port: The port number, used when connecting
+ @type port: int
+
+ @param sslContextFactory: SSL Configuration information as an instance
+ of L{interfaces.IOpenSSLContextFactory}.
+
+ @param timeout: Number of seconds to wait before assuming the
+ connection has failed.
+ @type timeout: int
+
+ @param bindAddress: A (host, port) tuple of local address to bind to,
+ or None.
+ @type bindAddress: tuple
+ """
+ self._reactor = reactor
+ self._host = host
+ self._port = port
+ self._sslContextFactory = sslContextFactory
+ self._timeout = timeout
+ self._bindAddress = bindAddress
+
+ def connect(self, protocolFactory):
+ """
+ Implement L{IStreamClientEndpoint.connect} to connect with SSL over
+ TCP.
+ """
+ try:
+ wf = _WrappingFactory(protocolFactory)
+ self._reactor.connectSSL(
+ self._host,
+ self._port,
+ wf,
+ self._sslContextFactory,
+ timeout=self._timeout,
+ bindAddress=self._bindAddress,
+ )
+ return wf._onConnection
+ except BaseException:
+ return defer.fail()
+
+
+@implementer(interfaces.IStreamServerEndpoint)
+class UNIXServerEndpoint:
+ """
+ UnixSocket server endpoint.
+ """
+
+ def __init__(self, reactor, address, backlog=50, mode=0o666, wantPID=0):
+ """
+ @param reactor: An L{IReactorUNIX} provider.
+ @param address: The path to the Unix socket file, used when listening
+ @param backlog: number of connections to allow in backlog.
+ @param mode: mode to set on the unix socket. This parameter is
+ deprecated. Permissions should be set on the directory which
+ contains the UNIX socket.
+ @param wantPID: If True, create a pidfile for the socket.
+ """
+ self._reactor = reactor
+ self._address = address
+ self._backlog = backlog
+ self._mode = mode
+ self._wantPID = wantPID
+
+ def listen(self, protocolFactory):
+ """
+ Implement L{IStreamServerEndpoint.listen} to listen on a UNIX socket.
+ """
+ return defer.execute(
+ self._reactor.listenUNIX,
+ self._address,
+ protocolFactory,
+ backlog=self._backlog,
+ mode=self._mode,
+ wantPID=self._wantPID,
+ )
+
+
+@implementer(interfaces.IStreamClientEndpoint)
+class UNIXClientEndpoint:
+ """
+ UnixSocket client endpoint.
+ """
+
+ def __init__(self, reactor, path, timeout=30, checkPID=0):
+ """
+ @param reactor: An L{IReactorUNIX} provider.
+
+ @param path: The path to the Unix socket file, used when connecting
+ @type path: str
+
+ @param timeout: Number of seconds to wait before assuming the
+ connection has failed.
+ @type timeout: int
+
+ @param checkPID: If True, check for a pid file to verify that a server
+ is listening.
+ @type checkPID: bool
+ """
+ self._reactor = reactor
+ self._path = path
+ self._timeout = timeout
+ self._checkPID = checkPID
+
+ def connect(self, protocolFactory):
+ """
+ Implement L{IStreamClientEndpoint.connect} to connect via a
+ UNIX Socket
+ """
+ try:
+ wf = _WrappingFactory(protocolFactory)
+ self._reactor.connectUNIX(
+ self._path, wf, timeout=self._timeout, checkPID=self._checkPID
+ )
+ return wf._onConnection
+ except BaseException:
+ return defer.fail()
+
+
+@implementer(interfaces.IStreamServerEndpoint)
+class AdoptedStreamServerEndpoint:
+ """
+ An endpoint for listening on a file descriptor initialized outside of
+ Twisted.
+
+ @ivar _used: A C{bool} indicating whether this endpoint has been used to
+ listen with a factory yet. C{True} if so.
+ """
+
+ _close = os.close
+ _setNonBlocking = staticmethod(fdesc.setNonBlocking)
+
+ def __init__(self, reactor, fileno, addressFamily):
+ """
+ @param reactor: An L{IReactorSocket} provider.
+
+ @param fileno: An integer file descriptor corresponding to a listening
+ I{SOCK_STREAM} socket.
+
+ @param addressFamily: The address family of the socket given by
+ C{fileno}.
+ """
+ self.reactor = reactor
+ self.fileno = fileno
+ self.addressFamily = addressFamily
+ self._used = False
+
+ def listen(self, factory):
+ """
+ Implement L{IStreamServerEndpoint.listen} to start listening on, and
+ then close, C{self._fileno}.
+ """
+ if self._used:
+ return defer.fail(error.AlreadyListened())
+ self._used = True
+
+ try:
+ self._setNonBlocking(self.fileno)
+ port = self.reactor.adoptStreamPort(
+ self.fileno, self.addressFamily, factory
+ )
+ self._close(self.fileno)
+ except BaseException:
+ return defer.fail()
+ return defer.succeed(port)
+
+
+def _parseTCP(factory, port, interface="", backlog=50):
+ """
+ Internal parser function for L{_parseServer} to convert the string
+ arguments for a TCP(IPv4) stream endpoint into the structured arguments.
+
+ @param factory: the protocol factory being parsed, or L{None}. (This was a
+ leftover argument from when this code was in C{strports}, and is now
+ mostly None and unused.)
+
+ @type factory: L{IProtocolFactory} or L{None}
+
+ @param port: the integer port number to bind
+ @type port: C{str}
+
+ @param interface: the interface IP to listen on
+ @param backlog: the length of the listen queue
+ @type backlog: C{str}
+
+ @return: a 2-tuple of (args, kwargs), describing the parameters to
+ L{IReactorTCP.listenTCP} (or, modulo argument 2, the factory, arguments
+ to L{TCP4ServerEndpoint}.
+ """
+ return (int(port), factory), {"interface": interface, "backlog": int(backlog)}
+
+
+def _parseUNIX(factory, address, mode="666", backlog=50, lockfile=True):
+ """
+ Internal parser function for L{_parseServer} to convert the string
+ arguments for a UNIX (AF_UNIX/SOCK_STREAM) stream endpoint into the
+ structured arguments.
+
+ @param factory: the protocol factory being parsed, or L{None}. (This was a
+ leftover argument from when this code was in C{strports}, and is now
+ mostly None and unused.)
+
+ @type factory: L{IProtocolFactory} or L{None}
+
+ @param address: the pathname of the unix socket
+ @type address: C{str}
+
+ @param backlog: the length of the listen queue
+ @type backlog: C{str}
+
+ @param lockfile: A string '0' or '1', mapping to True and False
+ respectively. See the C{wantPID} argument to C{listenUNIX}
+
+ @return: a 2-tuple of (args, kwargs), describing the parameters to
+ L{twisted.internet.interfaces.IReactorUNIX.listenUNIX} (or,
+ modulo argument 2, the factory, arguments to L{UNIXServerEndpoint}.
+ """
+ return (
+ (address, factory),
+ {"mode": int(mode, 8), "backlog": int(backlog), "wantPID": bool(int(lockfile))},
+ )
+
+
+def _parseSSL(
+ factory,
+ port,
+ privateKey="server.pem",
+ certKey=None,
+ sslmethod=None,
+ interface="",
+ backlog=50,
+ extraCertChain=None,
+ dhParameters=None,
+):
+ """
+ Internal parser function for L{_parseServer} to convert the string
+ arguments for an SSL (over TCP/IPv4) stream endpoint into the structured
+ arguments.
+
+ @param factory: the protocol factory being parsed, or L{None}. (This was a
+ leftover argument from when this code was in C{strports}, and is now
+ mostly None and unused.)
+ @type factory: L{IProtocolFactory} or L{None}
+
+ @param port: the integer port number to bind
+ @type port: C{str}
+
+ @param interface: the interface IP to listen on
+ @param backlog: the length of the listen queue
+ @type backlog: C{str}
+
+ @param privateKey: The file name of a PEM format private key file.
+ @type privateKey: C{str}
+
+ @param certKey: The file name of a PEM format certificate file.
+ @type certKey: C{str}
+
+ @param sslmethod: The string name of an SSL method, based on the name of a
+ constant in C{OpenSSL.SSL}.
+ @type sslmethod: C{str}
+
+ @param extraCertChain: The path of a file containing one or more
+ certificates in PEM format that establish the chain from a root CA to
+ the CA that signed your C{certKey}.
+ @type extraCertChain: L{str}
+
+ @param dhParameters: The file name of a file containing parameters that are
+ required for Diffie-Hellman key exchange. If this is not specified,
+ the forward secret C{DHE} ciphers aren't available for servers.
+ @type dhParameters: L{str}
+
+ @return: a 2-tuple of (args, kwargs), describing the parameters to
+ L{IReactorSSL.listenSSL} (or, modulo argument 2, the factory, arguments
+ to L{SSL4ServerEndpoint}.
+ """
+ from twisted.internet import ssl
+
+ if certKey is None:
+ certKey = privateKey
+ kw = {}
+ if sslmethod is not None:
+ kw["method"] = getattr(ssl.SSL, sslmethod)
+ certPEM = FilePath(certKey).getContent()
+ keyPEM = FilePath(privateKey).getContent()
+ privateCertificate = ssl.PrivateCertificate.loadPEM(certPEM + b"\n" + keyPEM)
+ if extraCertChain is not None:
+ matches = re.findall(
+ r"(-----BEGIN CERTIFICATE-----\n.+?\n-----END CERTIFICATE-----)",
+ nativeString(FilePath(extraCertChain).getContent()),
+ flags=re.DOTALL,
+ )
+ chainCertificates = [
+ ssl.Certificate.loadPEM(chainCertPEM).original for chainCertPEM in matches
+ ]
+ if not chainCertificates:
+ raise ValueError(
+ "Specified chain file '%s' doesn't contain any valid "
+ "certificates in PEM format." % (extraCertChain,)
+ )
+ else:
+ chainCertificates = None
+ if dhParameters is not None:
+ dhParameters = ssl.DiffieHellmanParameters.fromFile(
+ FilePath(dhParameters),
+ )
+
+ cf = ssl.CertificateOptions(
+ privateKey=privateCertificate.privateKey.original,
+ certificate=privateCertificate.original,
+ extraCertChain=chainCertificates,
+ dhParameters=dhParameters,
+ **kw,
+ )
+ return ((int(port), factory, cf), {"interface": interface, "backlog": int(backlog)})
+
+
+@implementer(IPlugin, IStreamServerEndpointStringParser)
+class _StandardIOParser:
+ """
+ Stream server endpoint string parser for the Standard I/O type.
+
+ @ivar prefix: See L{IStreamServerEndpointStringParser.prefix}.
+ """
+
+ prefix = "stdio"
+
+ def _parseServer(self, reactor):
+ """
+ Internal parser function for L{_parseServer} to convert the string
+ arguments into structured arguments for the L{StandardIOEndpoint}
+
+ @param reactor: Reactor for the endpoint
+ """
+ return StandardIOEndpoint(reactor)
+
+ def parseStreamServer(self, reactor, *args, **kwargs):
+ # Redirects to another function (self._parseServer), tricks zope.interface
+ # into believing the interface is correctly implemented.
+ return self._parseServer(reactor)
+
+
+@implementer(IPlugin, IStreamServerEndpointStringParser)
+class _SystemdParser:
+ """
+ Stream server endpoint string parser for the I{systemd} endpoint type.
+
+ @ivar prefix: See L{IStreamServerEndpointStringParser.prefix}.
+
+ @ivar _sddaemon: A L{ListenFDs} instance used to translate an index into an
+ actual file descriptor.
+ """
+
+ _sddaemon = ListenFDs.fromEnvironment()
+
+ prefix = "systemd"
+
+ def _parseServer(
+ self,
+ reactor: IReactorSocket,
+ domain: str,
+ index: Optional[str] = None,
+ name: Optional[str] = None,
+ ) -> AdoptedStreamServerEndpoint:
+ """
+ Internal parser function for L{_parseServer} to convert the string
+ arguments for a systemd server endpoint into structured arguments for
+ L{AdoptedStreamServerEndpoint}.
+
+ @param reactor: An L{IReactorSocket} provider.
+
+ @param domain: The domain (or address family) of the socket inherited
+ from systemd. This is a string like C{"INET"} or C{"UNIX"}, ie
+ the name of an address family from the L{socket} module, without
+ the C{"AF_"} prefix.
+
+ @param index: If given, the decimal representation of an integer
+ giving the offset into the list of file descriptors inherited from
+ systemd. Since the order of descriptors received from systemd is
+ hard to predict, this option should only be used if only one
+ descriptor is being inherited. Even in that case, C{name} is
+ probably a better idea. Either C{index} or C{name} must be given.
+
+ @param name: If given, the name (as defined by C{FileDescriptorName}
+ in the C{[Socket]} section of a systemd service definition) of an
+ inherited file descriptor. Either C{index} or C{name} must be
+ given.
+
+ @return: An L{AdoptedStreamServerEndpoint} which will adopt the
+ inherited listening port when it is used to listen.
+ """
+ if (index is None) == (name is None):
+ raise ValueError("Specify exactly one of descriptor index or name")
+
+ if index is not None:
+ fileno = self._sddaemon.inheritedDescriptors()[int(index)]
+ else:
+ assert name is not None
+ fileno = self._sddaemon.inheritedNamedDescriptors()[name]
+
+ addressFamily = getattr(socket, "AF_" + domain)
+ return AdoptedStreamServerEndpoint(reactor, fileno, addressFamily)
+
+ def parseStreamServer(self, reactor, *args, **kwargs):
+ # Delegate to another function with a sane signature. This function has
+ # an insane signature to trick zope.interface into believing the
+ # interface is correctly implemented.
+ return self._parseServer(reactor, *args, **kwargs)
+
+
+@implementer(IPlugin, IStreamServerEndpointStringParser)
+class _TCP6ServerParser:
+ """
+ Stream server endpoint string parser for the TCP6ServerEndpoint type.
+
+ @ivar prefix: See L{IStreamServerEndpointStringParser.prefix}.
+ """
+
+ prefix = (
+ "tcp6" # Used in _parseServer to identify the plugin with the endpoint type
+ )
+
+ def _parseServer(self, reactor, port, backlog=50, interface="::"):
+ """
+ Internal parser function for L{_parseServer} to convert the string
+ arguments into structured arguments for the L{TCP6ServerEndpoint}
+
+ @param reactor: An L{IReactorTCP} provider.
+
+ @param port: The port number used for listening
+ @type port: int
+
+ @param backlog: Size of the listen queue
+ @type backlog: int
+
+ @param interface: The hostname to bind to
+ @type interface: str
+ """
+ port = int(port)
+ backlog = int(backlog)
+ return TCP6ServerEndpoint(reactor, port, backlog, interface)
+
+ def parseStreamServer(self, reactor, *args, **kwargs):
+ # Redirects to another function (self._parseServer), tricks zope.interface
+ # into believing the interface is correctly implemented.
+ return self._parseServer(reactor, *args, **kwargs)
+
+
+_serverParsers = {
+ "tcp": _parseTCP,
+ "unix": _parseUNIX,
+ "ssl": _parseSSL,
+}
+
+_OP, _STRING = range(2)
+
+
+def _tokenize(description):
+ """
+ Tokenize a strports string and yield each token.
+
+ @param description: a string as described by L{serverFromString} or
+ L{clientFromString}.
+ @type description: L{str} or L{bytes}
+
+ @return: an iterable of 2-tuples of (C{_OP} or C{_STRING}, string). Tuples
+ starting with C{_OP} will contain a second element of either ':' (i.e.
+ 'next parameter') or '=' (i.e. 'assign parameter value'). For example,
+ the string 'hello:greeting=world' would result in a generator yielding
+ these values::
+
+ _STRING, 'hello'
+ _OP, ':'
+ _STRING, 'greet=ing'
+ _OP, '='
+ _STRING, 'world'
+ """
+ empty = _matchingString("", description)
+ colon = _matchingString(":", description)
+ equals = _matchingString("=", description)
+ backslash = _matchingString("\x5c", description)
+ current = empty
+
+ ops = colon + equals
+ nextOps = {colon: colon + equals, equals: colon}
+ iterdesc = iter(iterbytes(description))
+ for n in iterdesc:
+ if n in iterbytes(ops):
+ yield _STRING, current
+ yield _OP, n
+ current = empty
+ ops = nextOps[n]
+ elif n == backslash:
+ current += next(iterdesc)
+ else:
+ current += n
+ yield _STRING, current
+
+
+def _parse(description):
+ """
+ Convert a description string into a list of positional and keyword
+ parameters, using logic vaguely like what Python does.
+
+ @param description: a string as described by L{serverFromString} or
+ L{clientFromString}.
+
+ @return: a 2-tuple of C{(args, kwargs)}, where 'args' is a list of all
+ ':'-separated C{str}s not containing an '=' and 'kwargs' is a map of
+ all C{str}s which do contain an '='. For example, the result of
+ C{_parse('a:b:d=1:c')} would be C{(['a', 'b', 'c'], {'d': '1'})}.
+ """
+ args, kw = [], {}
+ colon = _matchingString(":", description)
+
+ def add(sofar):
+ if len(sofar) == 1:
+ args.append(sofar[0])
+ else:
+ kw[nativeString(sofar[0])] = sofar[1]
+
+ sofar = ()
+ for type, value in _tokenize(description):
+ if type is _STRING:
+ sofar += (value,)
+ elif value == colon:
+ add(sofar)
+ sofar = ()
+ add(sofar)
+ return args, kw
+
+
+# Mappings from description "names" to endpoint constructors.
+_endpointServerFactories = {
+ "TCP": TCP4ServerEndpoint,
+ "SSL": SSL4ServerEndpoint,
+ "UNIX": UNIXServerEndpoint,
+}
+
+_endpointClientFactories = {
+ "TCP": TCP4ClientEndpoint,
+ "SSL": SSL4ClientEndpoint,
+ "UNIX": UNIXClientEndpoint,
+}
+
+
+def _parseServer(description, factory):
+ """
+ Parse a strports description into a 2-tuple of arguments and keyword
+ values.
+
+ @param description: A description in the format explained by
+ L{serverFromString}.
+ @type description: C{str}
+
+ @param factory: A 'factory' argument; this is left-over from
+ twisted.application.strports, it's not really used.
+ @type factory: L{IProtocolFactory} or L{None}
+
+ @return: a 3-tuple of (plugin or name, arguments, keyword arguments)
+ """
+ args, kw = _parse(description)
+ endpointType = args[0]
+ parser = _serverParsers.get(endpointType)
+ if parser is None:
+ # If the required parser is not found in _server, check if
+ # a plugin exists for the endpointType
+ plugin = _matchPluginToPrefix(
+ getPlugins(IStreamServerEndpointStringParser), endpointType
+ )
+ return (plugin, args[1:], kw)
+ return (endpointType.upper(),) + parser(factory, *args[1:], **kw)
+
+
+def _matchPluginToPrefix(plugins, endpointType):
+ """
+ Match plugin to prefix.
+ """
+ endpointType = endpointType.lower()
+ for plugin in plugins:
+ if _matchingString(plugin.prefix.lower(), endpointType) == endpointType:
+ return plugin
+ raise ValueError(f"Unknown endpoint type: '{endpointType}'")
+
+
+def serverFromString(reactor, description):
+ """
+ Construct a stream server endpoint from an endpoint description string.
+
+ The format for server endpoint descriptions is a simple byte string. It is
+ a prefix naming the type of endpoint, then a colon, then the arguments for
+ that endpoint.
+
+ For example, you can call it like this to create an endpoint that will
+ listen on TCP port 80::
+
+ serverFromString(reactor, "tcp:80")
+
+ Additional arguments may be specified as keywords, separated with colons.
+ For example, you can specify the interface for a TCP server endpoint to
+ bind to like this::
+
+ serverFromString(reactor, "tcp:80:interface=127.0.0.1")
+
+ SSL server endpoints may be specified with the 'ssl' prefix, and the
+ private key and certificate files may be specified by the C{privateKey} and
+ C{certKey} arguments::
+
+ serverFromString(
+ reactor, "ssl:443:privateKey=key.pem:certKey=crt.pem")
+
+ If a private key file name (C{privateKey}) isn't provided, a "server.pem"
+ file is assumed to exist which contains the private key. If the certificate
+ file name (C{certKey}) isn't provided, the private key file is assumed to
+ contain the certificate as well.
+
+ You may escape colons in arguments with a backslash, which you will need to
+ use if you want to specify a full pathname argument on Windows::
+
+ serverFromString(reactor,
+ "ssl:443:privateKey=C\\:/key.pem:certKey=C\\:/cert.pem")
+
+ finally, the 'unix' prefix may be used to specify a filesystem UNIX socket,
+ optionally with a 'mode' argument to specify the mode of the socket file
+ created by C{listen}::
+
+ serverFromString(reactor, "unix:/var/run/finger")
+ serverFromString(reactor, "unix:/var/run/finger:mode=660")
+
+ This function is also extensible; new endpoint types may be registered as
+ L{IStreamServerEndpointStringParser} plugins. See that interface for more
+ information.
+
+ @param reactor: The server endpoint will be constructed with this reactor.
+
+ @param description: The strports description to parse.
+ @type description: L{str}
+
+ @return: A new endpoint which can be used to listen with the parameters
+ given by C{description}.
+
+ @rtype: L{IStreamServerEndpoint<twisted.internet.interfaces.IStreamServerEndpoint>}
+
+ @raise ValueError: when the 'description' string cannot be parsed.
+
+ @since: 10.2
+ """
+ nameOrPlugin, args, kw = _parseServer(description, None)
+ if type(nameOrPlugin) is not str:
+ plugin = nameOrPlugin
+ return plugin.parseStreamServer(reactor, *args, **kw)
+ else:
+ name = nameOrPlugin
+ # Chop out the factory.
+ args = args[:1] + args[2:]
+ return _endpointServerFactories[name](reactor, *args, **kw)
+
+
+def quoteStringArgument(argument):
+ """
+ Quote an argument to L{serverFromString} and L{clientFromString}. Since
+ arguments are separated with colons and colons are escaped with
+ backslashes, some care is necessary if, for example, you have a pathname,
+ you may be tempted to interpolate into a string like this::
+
+ serverFromString(reactor, "ssl:443:privateKey=%s" % (myPathName,))
+
+ This may appear to work, but will have portability issues (Windows
+ pathnames, for example). Usually you should just construct the appropriate
+ endpoint type rather than interpolating strings, which in this case would
+ be L{SSL4ServerEndpoint}. There are some use-cases where you may need to
+ generate such a string, though; for example, a tool to manipulate a
+ configuration file which has strports descriptions in it. To be correct in
+ those cases, do this instead::
+
+ serverFromString(reactor, "ssl:443:privateKey=%s" %
+ (quoteStringArgument(myPathName),))
+
+ @param argument: The part of the endpoint description string you want to
+ pass through.
+
+ @type argument: C{str}
+
+ @return: The quoted argument.
+
+ @rtype: C{str}
+ """
+ backslash, colon = "\\:"
+ for c in backslash, colon:
+ argument = argument.replace(c, backslash + c)
+ return argument
+
+
+def _parseClientTCP(*args, **kwargs):
+ """
+ Perform any argument value coercion necessary for TCP client parameters.
+
+ Valid positional arguments to this function are host and port.
+
+ Valid keyword arguments to this function are all L{IReactorTCP.connectTCP}
+ arguments.
+
+ @return: The coerced values as a C{dict}.
+ """
+
+ if len(args) == 2:
+ kwargs["port"] = int(args[1])
+ kwargs["host"] = args[0]
+ elif len(args) == 1:
+ if "host" in kwargs:
+ kwargs["port"] = int(args[0])
+ else:
+ kwargs["host"] = args[0]
+
+ try:
+ kwargs["port"] = int(kwargs["port"])
+ except KeyError:
+ pass
+
+ try:
+ kwargs["timeout"] = int(kwargs["timeout"])
+ except KeyError:
+ pass
+
+ try:
+ kwargs["bindAddress"] = (kwargs["bindAddress"], 0)
+ except KeyError:
+ pass
+
+ return kwargs
+
+
+def _loadCAsFromDir(directoryPath):
+ """
+ Load certificate-authority certificate objects in a given directory.
+
+ @param directoryPath: a L{unicode} or L{bytes} pointing at a directory to
+ load .pem files from, or L{None}.
+
+ @return: an L{IOpenSSLTrustRoot} provider.
+ """
+ caCerts = {}
+ for child in directoryPath.children():
+ if not child.asTextMode().basename().split(".")[-1].lower() == "pem":
+ continue
+ try:
+ data = child.getContent()
+ except OSError:
+ # Permission denied, corrupt disk, we don't care.
+ continue
+ try:
+ theCert = Certificate.loadPEM(data)
+ except SSLError:
+ # Duplicate certificate, invalid certificate, etc. We don't care.
+ pass
+ else:
+ caCerts[theCert.digest()] = theCert
+ return trustRootFromCertificates(caCerts.values())
+
+
+def _parseTrustRootPath(pathName):
+ """
+ Parse a string referring to a directory full of certificate authorities
+ into a trust root.
+
+ @param pathName: path name
+ @type pathName: L{unicode} or L{bytes} or L{None}
+
+ @return: L{None} or L{IOpenSSLTrustRoot}
+ """
+ if pathName is None:
+ return None
+ return _loadCAsFromDir(FilePath(pathName))
+
+
+def _privateCertFromPaths(certificatePath, keyPath):
+ """
+ Parse a certificate path and key path, either or both of which might be
+ L{None}, into a certificate object.
+
+ @param certificatePath: the certificate path
+ @type certificatePath: L{bytes} or L{unicode} or L{None}
+
+ @param keyPath: the private key path
+ @type keyPath: L{bytes} or L{unicode} or L{None}
+
+ @return: a L{PrivateCertificate} or L{None}
+ """
+ if certificatePath is None:
+ return None
+ certBytes = FilePath(certificatePath).getContent()
+ if keyPath is None:
+ return PrivateCertificate.loadPEM(certBytes)
+ else:
+ return PrivateCertificate.fromCertificateAndKeyPair(
+ Certificate.loadPEM(certBytes),
+ KeyPair.load(FilePath(keyPath).getContent(), 1),
+ )
+
+
+def _parseClientSSLOptions(kwargs):
+ """
+ Parse common arguments for SSL endpoints, creating an L{CertificateOptions}
+ instance.
+
+ @param kwargs: A dict of keyword arguments to be parsed, potentially
+ containing keys C{certKey}, C{privateKey}, C{caCertsDir}, and
+ C{hostname}. See L{_parseClientSSL}.
+ @type kwargs: L{dict}
+
+ @return: The remaining arguments, including a new key C{sslContextFactory}.
+ """
+ hostname = kwargs.pop("hostname", None)
+ clientCertificate = _privateCertFromPaths(
+ kwargs.pop("certKey", None), kwargs.pop("privateKey", None)
+ )
+ trustRoot = _parseTrustRootPath(kwargs.pop("caCertsDir", None))
+ if hostname is not None:
+ configuration = optionsForClientTLS(
+ _idnaText(hostname),
+ trustRoot=trustRoot,
+ clientCertificate=clientCertificate,
+ )
+ else:
+ # _really_ though, you should specify a hostname.
+ if clientCertificate is not None:
+ privateKeyOpenSSL = clientCertificate.privateKey.original
+ certificateOpenSSL = clientCertificate.original
+ else:
+ privateKeyOpenSSL = None
+ certificateOpenSSL = None
+ configuration = CertificateOptions(
+ trustRoot=trustRoot,
+ privateKey=privateKeyOpenSSL,
+ certificate=certificateOpenSSL,
+ )
+ kwargs["sslContextFactory"] = configuration
+ return kwargs
+
+
+def _parseClientSSL(*args, **kwargs):
+ """
+ Perform any argument value coercion necessary for SSL client parameters.
+
+ Valid keyword arguments to this function are all L{IReactorSSL.connectSSL}
+ arguments except for C{contextFactory}. Instead, C{certKey} (the path name
+ of the certificate file) C{privateKey} (the path name of the private key
+ associated with the certificate) are accepted and used to construct a
+ context factory.
+
+ Valid positional arguments to this function are host and port.
+
+ @keyword caCertsDir: The one parameter which is not part of
+ L{IReactorSSL.connectSSL}'s signature, this is a path name used to
+ construct a list of certificate authority certificates. The directory
+ will be scanned for files ending in C{.pem}, all of which will be
+ considered valid certificate authorities for this connection.
+ @type caCertsDir: L{str}
+
+ @keyword hostname: The hostname to use for validating the server's
+ certificate.
+ @type hostname: L{unicode}
+
+ @return: The coerced values as a L{dict}.
+ """
+ kwargs = _parseClientTCP(*args, **kwargs)
+ return _parseClientSSLOptions(kwargs)
+
+
+def _parseClientUNIX(*args, **kwargs):
+ """
+ Perform any argument value coercion necessary for UNIX client parameters.
+
+ Valid keyword arguments to this function are all L{IReactorUNIX.connectUNIX}
+ keyword arguments except for C{checkPID}. Instead, C{lockfile} is accepted
+ and has the same meaning. Also C{path} is used instead of C{address}.
+
+ Valid positional arguments to this function are C{path}.
+
+ @return: The coerced values as a C{dict}.
+ """
+ if len(args) == 1:
+ kwargs["path"] = args[0]
+
+ try:
+ kwargs["checkPID"] = bool(int(kwargs.pop("lockfile")))
+ except KeyError:
+ pass
+ try:
+ kwargs["timeout"] = int(kwargs["timeout"])
+ except KeyError:
+ pass
+ return kwargs
+
+
+_clientParsers = {
+ "TCP": _parseClientTCP,
+ "SSL": _parseClientSSL,
+ "UNIX": _parseClientUNIX,
+}
+
+
+def clientFromString(reactor, description):
+ """
+ Construct a client endpoint from a description string.
+
+ Client description strings are much like server description strings,
+ although they take all of their arguments as keywords, aside from host and
+ port.
+
+ You can create a TCP client endpoint with the 'host' and 'port' arguments,
+ like so::
+
+ clientFromString(reactor, "tcp:host=www.example.com:port=80")
+
+ or, without specifying host and port keywords::
+
+ clientFromString(reactor, "tcp:www.example.com:80")
+
+ Or you can specify only one or the other, as in the following 2 examples::
+
+ clientFromString(reactor, "tcp:host=www.example.com:80")
+ clientFromString(reactor, "tcp:www.example.com:port=80")
+
+ or an SSL client endpoint with those arguments, plus the arguments used by
+ the server SSL, for a client certificate::
+
+ clientFromString(reactor, "ssl:web.example.com:443:"
+ "privateKey=foo.pem:certKey=foo.pem")
+
+ to specify your certificate trust roots, you can identify a directory with
+ PEM files in it with the C{caCertsDir} argument::
+
+ clientFromString(reactor, "ssl:host=web.example.com:port=443:"
+ "caCertsDir=/etc/ssl/certs")
+
+ Both TCP and SSL client endpoint description strings can include a
+ 'bindAddress' keyword argument, whose value should be a local IPv4
+ address. This fixes the client socket to that IP address::
+
+ clientFromString(reactor, "tcp:www.example.com:80:"
+ "bindAddress=192.0.2.100")
+
+ NB: Fixed client ports are not currently supported in TCP or SSL
+ client endpoints. The client socket will always use an ephemeral
+ port assigned by the operating system
+
+ You can create a UNIX client endpoint with the 'path' argument and optional
+ 'lockfile' and 'timeout' arguments::
+
+ clientFromString(
+ reactor, b"unix:path=/var/foo/bar:lockfile=1:timeout=9")
+
+ or, with the path as a positional argument with or without optional
+ arguments as in the following 2 examples::
+
+ clientFromString(reactor, "unix:/var/foo/bar")
+ clientFromString(reactor, "unix:/var/foo/bar:lockfile=1:timeout=9")
+
+ This function is also extensible; new endpoint types may be registered as
+ L{IStreamClientEndpointStringParserWithReactor} plugins. See that
+ interface for more information.
+
+ @param reactor: The client endpoint will be constructed with this reactor.
+
+ @param description: The strports description to parse.
+ @type description: L{str}
+
+ @return: A new endpoint which can be used to connect with the parameters
+ given by C{description}.
+ @rtype: L{IStreamClientEndpoint<twisted.internet.interfaces.IStreamClientEndpoint>}
+
+ @since: 10.2
+ """
+ args, kwargs = _parse(description)
+ aname = args.pop(0)
+ name = aname.upper()
+ if name not in _clientParsers:
+ plugin = _matchPluginToPrefix(
+ getPlugins(IStreamClientEndpointStringParserWithReactor), name
+ )
+ return plugin.parseStreamClient(reactor, *args, **kwargs)
+ kwargs = _clientParsers[name](*args, **kwargs)
+ return _endpointClientFactories[name](reactor, **kwargs)
+
+
+def connectProtocol(endpoint, protocol):
+ """
+ Connect a protocol instance to an endpoint.
+
+ This allows using a client endpoint without having to create a factory.
+
+ @param endpoint: A client endpoint to connect to.
+
+ @param protocol: A protocol instance.
+
+ @return: The result of calling C{connect} on the endpoint, i.e. a
+ L{Deferred} that will fire with the protocol when connected, or an
+ appropriate error.
+
+ @since: 13.1
+ """
+
+ class OneShotFactory(Factory):
+ def buildProtocol(self, addr):
+ return protocol
+
+ return endpoint.connect(OneShotFactory())
+
+
+@implementer(interfaces.IStreamClientEndpoint)
+class _WrapperEndpoint:
+ """
+ An endpoint that wraps another endpoint.
+ """
+
+ def __init__(self, wrappedEndpoint, wrapperFactory):
+ """
+ Construct a L{_WrapperEndpoint}.
+ """
+ self._wrappedEndpoint = wrappedEndpoint
+ self._wrapperFactory = wrapperFactory
+
+ def connect(self, protocolFactory):
+ """
+ Connect the given protocol factory and unwrap its result.
+ """
+ return self._wrappedEndpoint.connect(
+ self._wrapperFactory(protocolFactory)
+ ).addCallback(lambda protocol: protocol.wrappedProtocol)
+
+
+@implementer(interfaces.IStreamServerEndpoint)
+class _WrapperServerEndpoint:
+ """
+ A server endpoint that wraps another server endpoint.
+ """
+
+ def __init__(self, wrappedEndpoint, wrapperFactory):
+ """
+ Construct a L{_WrapperServerEndpoint}.
+ """
+ self._wrappedEndpoint = wrappedEndpoint
+ self._wrapperFactory = wrapperFactory
+
+ def listen(self, protocolFactory):
+ """
+ Connect the given protocol factory and unwrap its result.
+ """
+ return self._wrappedEndpoint.listen(self._wrapperFactory(protocolFactory))
+
+
+def wrapClientTLS(connectionCreator, wrappedEndpoint):
+ """
+ Wrap an endpoint which upgrades to TLS as soon as the connection is
+ established.
+
+ @since: 16.0
+
+ @param connectionCreator: The TLS options to use when connecting; see
+ L{twisted.internet.ssl.optionsForClientTLS} for how to construct this.
+ @type connectionCreator:
+ L{twisted.internet.interfaces.IOpenSSLClientConnectionCreator}
+
+ @param wrappedEndpoint: The endpoint to wrap.
+ @type wrappedEndpoint: An L{IStreamClientEndpoint} provider.
+
+ @return: an endpoint that provides transport level encryption layered on
+ top of C{wrappedEndpoint}
+ @rtype: L{twisted.internet.interfaces.IStreamClientEndpoint}
+ """
+ if TLSMemoryBIOFactory is None:
+ raise NotImplementedError(
+ "OpenSSL not available. Try `pip install twisted[tls]`."
+ )
+ return _WrapperEndpoint(
+ wrappedEndpoint,
+ lambda protocolFactory: TLSMemoryBIOFactory(
+ connectionCreator, True, protocolFactory
+ ),
+ )
+
+
+def _parseClientTLS(
+ reactor,
+ host,
+ port,
+ timeout=b"30",
+ bindAddress=None,
+ certificate=None,
+ privateKey=None,
+ trustRoots=None,
+ endpoint=None,
+ **kwargs,
+):
+ """
+ Internal method to construct an endpoint from string parameters.
+
+ @param reactor: The reactor passed to L{clientFromString}.
+
+ @param host: The hostname to connect to.
+ @type host: L{bytes} or L{unicode}
+
+ @param port: The port to connect to.
+ @type port: L{bytes} or L{unicode}
+
+ @param timeout: For each individual connection attempt, the number of
+ seconds to wait before assuming the connection has failed.
+ @type timeout: L{bytes} or L{unicode}
+
+ @param bindAddress: The address to which to bind outgoing connections.
+ @type bindAddress: L{bytes} or L{unicode}
+
+ @param certificate: a string representing a filesystem path to a
+ PEM-encoded certificate.
+ @type certificate: L{bytes} or L{unicode}
+
+ @param privateKey: a string representing a filesystem path to a PEM-encoded
+ certificate.
+ @type privateKey: L{bytes} or L{unicode}
+
+ @param endpoint: an optional string endpoint description of an endpoint to
+ wrap; if this is passed then C{host} is used only for certificate
+ verification.
+ @type endpoint: L{bytes} or L{unicode}
+
+ @return: a client TLS endpoint
+ @rtype: L{IStreamClientEndpoint}
+ """
+ if kwargs:
+ raise TypeError("unrecognized keyword arguments present", list(kwargs.keys()))
+ host = host if isinstance(host, str) else host.decode("utf-8")
+ bindAddress = (
+ bindAddress
+ if isinstance(bindAddress, str) or bindAddress is None
+ else bindAddress.decode("utf-8")
+ )
+ port = int(port)
+ timeout = int(timeout)
+ return wrapClientTLS(
+ optionsForClientTLS(
+ host,
+ trustRoot=_parseTrustRootPath(trustRoots),
+ clientCertificate=_privateCertFromPaths(certificate, privateKey),
+ ),
+ clientFromString(reactor, endpoint)
+ if endpoint is not None
+ else HostnameEndpoint(reactor, _idnaBytes(host), port, timeout, bindAddress),
+ )
+
+
+@implementer(IPlugin, IStreamClientEndpointStringParserWithReactor)
+class _TLSClientEndpointParser:
+ """
+ Stream client endpoint string parser for L{wrapClientTLS} with
+ L{HostnameEndpoint}.
+
+ @ivar prefix: See
+ L{IStreamClientEndpointStringParserWithReactor.prefix}.
+ """
+
+ prefix = "tls"
+
+ @staticmethod
+ def parseStreamClient(reactor, *args, **kwargs):
+ """
+ Redirects to another function L{_parseClientTLS}; tricks zope.interface
+ into believing the interface is correctly implemented, since the
+ signature is (C{reactor}, C{*args}, C{**kwargs}). See
+ L{_parseClientTLS} for the specific signature description for this
+ endpoint parser.
+
+ @param reactor: The reactor passed to L{clientFromString}.
+
+ @param args: The positional arguments in the endpoint description.
+ @type args: L{tuple}
+
+ @param kwargs: The named arguments in the endpoint description.
+ @type kwargs: L{dict}
+
+ @return: a client TLS endpoint
+ @rtype: L{IStreamClientEndpoint}
+ """
+ return _parseClientTLS(reactor, *args, **kwargs)
diff --git a/contrib/python/Twisted/py3/twisted/internet/epollreactor.py b/contrib/python/Twisted/py3/twisted/internet/epollreactor.py
new file mode 100644
index 0000000000..c13e50be01
--- /dev/null
+++ b/contrib/python/Twisted/py3/twisted/internet/epollreactor.py
@@ -0,0 +1,259 @@
+# Copyright (c) Twisted Matrix Laboratories.
+# See LICENSE for details.
+
+"""
+An epoll() based implementation of the twisted main loop.
+
+To install the event loop (and you should do this before any connections,
+listeners or connectors are added)::
+
+ from twisted.internet import epollreactor
+ epollreactor.install()
+"""
+
+import errno
+import select
+
+from zope.interface import implementer
+
+from twisted.internet import posixbase
+from twisted.internet.interfaces import IReactorFDSet
+from twisted.python import log
+
+try:
+ # This is to keep mypy from complaining
+ # We don't use type: ignore[attr-defined] on import, because mypy only complains
+ # on on some platforms, and then the unused ignore is an issue if the undefined
+ # attribute isn't.
+ epoll = getattr(select, "epoll")
+ EPOLLHUP = getattr(select, "EPOLLHUP")
+ EPOLLERR = getattr(select, "EPOLLERR")
+ EPOLLIN = getattr(select, "EPOLLIN")
+ EPOLLOUT = getattr(select, "EPOLLOUT")
+except AttributeError as e:
+ raise ImportError(e)
+
+
+@implementer(IReactorFDSet)
+class EPollReactor(posixbase.PosixReactorBase, posixbase._PollLikeMixin):
+ """
+ A reactor that uses epoll(7).
+
+ @ivar _poller: A C{epoll} which will be used to check for I/O
+ readiness.
+
+ @ivar _selectables: A dictionary mapping integer file descriptors to
+ instances of C{FileDescriptor} which have been registered with the
+ reactor. All C{FileDescriptors} which are currently receiving read or
+ write readiness notifications will be present as values in this
+ dictionary.
+
+ @ivar _reads: A set containing integer file descriptors. Values in this
+ set will be registered with C{_poller} for read readiness notifications
+ which will be dispatched to the corresponding C{FileDescriptor}
+ instances in C{_selectables}.
+
+ @ivar _writes: A set containing integer file descriptors. Values in this
+ set will be registered with C{_poller} for write readiness
+ notifications which will be dispatched to the corresponding
+ C{FileDescriptor} instances in C{_selectables}.
+
+ @ivar _continuousPolling: A L{_ContinuousPolling} instance, used to handle
+ file descriptors (e.g. filesystem files) that are not supported by
+ C{epoll(7)}.
+ """
+
+ # Attributes for _PollLikeMixin
+ _POLL_DISCONNECTED = EPOLLHUP | EPOLLERR
+ _POLL_IN = EPOLLIN
+ _POLL_OUT = EPOLLOUT
+
+ def __init__(self):
+ """
+ Initialize epoll object, file descriptor tracking dictionaries, and the
+ base class.
+ """
+ # Create the poller we're going to use. The 1024 here is just a hint
+ # to the kernel, it is not a hard maximum. After Linux 2.6.8, the size
+ # argument is completely ignored.
+ self._poller = epoll(1024)
+ self._reads = set()
+ self._writes = set()
+ self._selectables = {}
+ self._continuousPolling = posixbase._ContinuousPolling(self)
+ posixbase.PosixReactorBase.__init__(self)
+
+ def _add(self, xer, primary, other, selectables, event, antievent):
+ """
+ Private method for adding a descriptor from the event loop.
+
+ It takes care of adding it if new or modifying it if already added
+ for another state (read -> read/write for example).
+ """
+ fd = xer.fileno()
+ if fd not in primary:
+ flags = event
+ # epoll_ctl can raise all kinds of IOErrors, and every one
+ # indicates a bug either in the reactor or application-code.
+ # Let them all through so someone sees a traceback and fixes
+ # something. We'll do the same thing for every other call to
+ # this method in this file.
+ if fd in other:
+ flags |= antievent
+ self._poller.modify(fd, flags)
+ else:
+ self._poller.register(fd, flags)
+
+ # Update our own tracking state *only* after the epoll call has
+ # succeeded. Otherwise we may get out of sync.
+ primary.add(fd)
+ selectables[fd] = xer
+
+ def addReader(self, reader):
+ """
+ Add a FileDescriptor for notification of data available to read.
+ """
+ try:
+ self._add(
+ reader, self._reads, self._writes, self._selectables, EPOLLIN, EPOLLOUT
+ )
+ except OSError as e:
+ if e.errno == errno.EPERM:
+ # epoll(7) doesn't support certain file descriptors,
+ # e.g. filesystem files, so for those we just poll
+ # continuously:
+ self._continuousPolling.addReader(reader)
+ else:
+ raise
+
+ def addWriter(self, writer):
+ """
+ Add a FileDescriptor for notification of data available to write.
+ """
+ try:
+ self._add(
+ writer, self._writes, self._reads, self._selectables, EPOLLOUT, EPOLLIN
+ )
+ except OSError as e:
+ if e.errno == errno.EPERM:
+ # epoll(7) doesn't support certain file descriptors,
+ # e.g. filesystem files, so for those we just poll
+ # continuously:
+ self._continuousPolling.addWriter(writer)
+ else:
+ raise
+
+ def _remove(self, xer, primary, other, selectables, event, antievent):
+ """
+ Private method for removing a descriptor from the event loop.
+
+ It does the inverse job of _add, and also add a check in case of the fd
+ has gone away.
+ """
+ fd = xer.fileno()
+ if fd == -1:
+ for fd, fdes in selectables.items():
+ if xer is fdes:
+ break
+ else:
+ return
+ if fd in primary:
+ if fd in other:
+ flags = antievent
+ # See comment above modify call in _add.
+ self._poller.modify(fd, flags)
+ else:
+ del selectables[fd]
+ # See comment above _control call in _add.
+ self._poller.unregister(fd)
+ primary.remove(fd)
+
+ def removeReader(self, reader):
+ """
+ Remove a Selectable for notification of data available to read.
+ """
+ if self._continuousPolling.isReading(reader):
+ self._continuousPolling.removeReader(reader)
+ return
+ self._remove(
+ reader, self._reads, self._writes, self._selectables, EPOLLIN, EPOLLOUT
+ )
+
+ def removeWriter(self, writer):
+ """
+ Remove a Selectable for notification of data available to write.
+ """
+ if self._continuousPolling.isWriting(writer):
+ self._continuousPolling.removeWriter(writer)
+ return
+ self._remove(
+ writer, self._writes, self._reads, self._selectables, EPOLLOUT, EPOLLIN
+ )
+
+ def removeAll(self):
+ """
+ Remove all selectables, and return a list of them.
+ """
+ return (
+ self._removeAll(
+ [self._selectables[fd] for fd in self._reads],
+ [self._selectables[fd] for fd in self._writes],
+ )
+ + self._continuousPolling.removeAll()
+ )
+
+ def getReaders(self):
+ return [
+ self._selectables[fd] for fd in self._reads
+ ] + self._continuousPolling.getReaders()
+
+ def getWriters(self):
+ return [
+ self._selectables[fd] for fd in self._writes
+ ] + self._continuousPolling.getWriters()
+
+ def doPoll(self, timeout):
+ """
+ Poll the poller for new events.
+ """
+ if timeout is None:
+ timeout = -1 # Wait indefinitely.
+
+ try:
+ # Limit the number of events to the number of io objects we're
+ # currently tracking (because that's maybe a good heuristic) and
+ # the amount of time we block to the value specified by our
+ # caller.
+ l = self._poller.poll(timeout, len(self._selectables))
+ except OSError as err:
+ if err.errno == errno.EINTR:
+ return
+ # See epoll_wait(2) for documentation on the other conditions
+ # under which this can fail. They can only be due to a serious
+ # programming error on our part, so let's just announce them
+ # loudly.
+ raise
+
+ _drdw = self._doReadOrWrite
+ for fd, event in l:
+ try:
+ selectable = self._selectables[fd]
+ except KeyError:
+ pass
+ else:
+ log.callWithLogger(selectable, _drdw, selectable, fd, event)
+
+ doIteration = doPoll
+
+
+def install():
+ """
+ Install the epoll() reactor.
+ """
+ p = EPollReactor()
+ from twisted.internet.main import installReactor
+
+ installReactor(p)
+
+
+__all__ = ["EPollReactor", "install"]
diff --git a/contrib/python/Twisted/py3/twisted/internet/error.py b/contrib/python/Twisted/py3/twisted/internet/error.py
new file mode 100644
index 0000000000..e66a194f14
--- /dev/null
+++ b/contrib/python/Twisted/py3/twisted/internet/error.py
@@ -0,0 +1,510 @@
+# Copyright (c) Twisted Matrix Laboratories.
+# See LICENSE for details.
+
+"""
+Exceptions and errors for use in twisted.internet modules.
+"""
+
+
+import socket
+
+from incremental import Version
+
+from twisted.python import deprecate
+
+
+class BindError(Exception):
+ __doc__ = MESSAGE = "An error occurred binding to an interface"
+
+ def __str__(self) -> str:
+ s = self.MESSAGE
+ if self.args:
+ s = "{}: {}".format(s, " ".join(self.args))
+ s = "%s." % s
+ return s
+
+
+class CannotListenError(BindError):
+ """
+ This gets raised by a call to startListening, when the object cannotstart
+ listening.
+
+ @ivar interface: the interface I tried to listen on
+ @ivar port: the port I tried to listen on
+ @ivar socketError: the exception I got when I tried to listen
+ @type socketError: L{socket.error}
+ """
+
+ def __init__(self, interface, port, socketError):
+ BindError.__init__(self, interface, port, socketError)
+ self.interface = interface
+ self.port = port
+ self.socketError = socketError
+
+ def __str__(self) -> str:
+ iface = self.interface or "any"
+ return "Couldn't listen on {}:{}: {}.".format(
+ iface, self.port, self.socketError
+ )
+
+
+class MulticastJoinError(Exception):
+ """
+ An attempt to join a multicast group failed.
+ """
+
+
+class MessageLengthError(Exception):
+ __doc__ = MESSAGE = "Message is too long to send"
+
+ def __str__(self) -> str:
+ s = self.MESSAGE
+ if self.args:
+ s = "{}: {}".format(s, " ".join(self.args))
+ s = "%s." % s
+ return s
+
+
+class DNSLookupError(IOError):
+ __doc__ = MESSAGE = "DNS lookup failed"
+
+ def __str__(self) -> str:
+ s = self.MESSAGE
+ if self.args:
+ s = "{}: {}".format(s, " ".join(self.args))
+ s = "%s." % s
+ return s
+
+
+class ConnectInProgressError(Exception):
+ """A connect operation was started and isn't done yet."""
+
+
+# connection errors
+
+
+class ConnectError(Exception):
+ __doc__ = MESSAGE = "An error occurred while connecting"
+
+ def __init__(self, osError=None, string=""):
+ self.osError = osError
+ Exception.__init__(self, string)
+
+ def __str__(self) -> str:
+ s = self.MESSAGE
+ if self.osError:
+ s = f"{s}: {self.osError}"
+ if self.args[0]:
+ s = f"{s}: {self.args[0]}"
+ s = "%s." % s
+ return s
+
+
+class ConnectBindError(ConnectError):
+ __doc__ = MESSAGE = "Couldn't bind"
+
+
+class UnknownHostError(ConnectError):
+ __doc__ = MESSAGE = "Hostname couldn't be looked up"
+
+
+class NoRouteError(ConnectError):
+ __doc__ = MESSAGE = "No route to host"
+
+
+class ConnectionRefusedError(ConnectError):
+ __doc__ = MESSAGE = "Connection was refused by other side"
+
+
+class TCPTimedOutError(ConnectError):
+ __doc__ = MESSAGE = "TCP connection timed out"
+
+
+class BadFileError(ConnectError):
+ __doc__ = MESSAGE = "File used for UNIX socket is no good"
+
+
+class ServiceNameUnknownError(ConnectError):
+ __doc__ = MESSAGE = "Service name given as port is unknown"
+
+
+class UserError(ConnectError):
+ __doc__ = MESSAGE = "User aborted connection"
+
+
+class TimeoutError(UserError):
+ __doc__ = MESSAGE = "User timeout caused connection failure"
+
+
+class SSLError(ConnectError):
+ __doc__ = MESSAGE = "An SSL error occurred"
+
+
+class VerifyError(Exception):
+ __doc__ = MESSAGE = "Could not verify something that was supposed to be signed."
+
+
+class PeerVerifyError(VerifyError):
+ __doc__ = MESSAGE = "The peer rejected our verify error."
+
+
+class CertificateError(Exception):
+ __doc__ = MESSAGE = "We did not find a certificate where we expected to find one."
+
+
+try:
+ import errno
+
+ errnoMapping = {
+ errno.ENETUNREACH: NoRouteError,
+ errno.ECONNREFUSED: ConnectionRefusedError,
+ errno.ETIMEDOUT: TCPTimedOutError,
+ }
+ if hasattr(errno, "WSAECONNREFUSED"):
+ errnoMapping[errno.WSAECONNREFUSED] = ConnectionRefusedError
+ errnoMapping[errno.WSAENETUNREACH] = NoRouteError # type: ignore[attr-defined]
+except ImportError:
+ errnoMapping = {}
+
+
+def getConnectError(e):
+ """Given a socket exception, return connection error."""
+ if isinstance(e, Exception):
+ args = e.args
+ else:
+ args = e
+ try:
+ number, string = args
+ except ValueError:
+ return ConnectError(string=e)
+
+ if hasattr(socket, "gaierror") and isinstance(e, socket.gaierror):
+ # Only works in 2.2 in newer. Really that means always; #5978 covers
+ # this and other weirdnesses in this function.
+ klass = UnknownHostError
+ else:
+ klass = errnoMapping.get(number, ConnectError)
+ return klass(number, string)
+
+
+class ConnectionClosed(Exception):
+ """
+ Connection was closed, whether cleanly or non-cleanly.
+ """
+
+
+class ConnectionLost(ConnectionClosed):
+ __doc__ = MESSAGE = """
+ Connection to the other side was lost in a non-clean fashion
+ """
+
+ def __str__(self) -> str:
+ s = self.MESSAGE.strip().splitlines()[:1]
+ if self.args:
+ s.append(": ")
+ s.append(" ".join(self.args))
+ s.append(".")
+ return "".join(s)
+
+
+class ConnectionAborted(ConnectionLost):
+ """
+ Connection was aborted locally, using
+ L{twisted.internet.interfaces.ITCPTransport.abortConnection}.
+
+ @since: 11.1
+ """
+
+ MESSAGE = "Connection was aborted locally using " "ITCPTransport.abortConnection"
+
+
+class ConnectionDone(ConnectionClosed):
+ __doc__ = MESSAGE = "Connection was closed cleanly"
+
+ def __str__(self) -> str:
+ s = self.MESSAGE
+ if self.args:
+ s = "{}: {}".format(s, " ".join(self.args))
+ s = "%s." % s
+ return s
+
+
+class FileDescriptorOverrun(ConnectionLost):
+ """
+ A mis-use of L{IUNIXTransport.sendFileDescriptor} caused the connection to
+ be closed.
+
+ Each file descriptor sent using C{sendFileDescriptor} must be associated
+ with at least one byte sent using L{ITransport.write}. If at any point
+ fewer bytes have been written than file descriptors have been sent, the
+ connection is closed with this exception.
+ """
+
+ MESSAGE = (
+ "A mis-use of IUNIXTransport.sendFileDescriptor caused "
+ "the connection to be closed."
+ )
+
+
+class ConnectionFdescWentAway(ConnectionLost):
+ __doc__ = MESSAGE = "Uh" # TODO
+
+
+class AlreadyCalled(ValueError):
+ __doc__ = MESSAGE = "Tried to cancel an already-called event"
+
+ def __str__(self) -> str:
+ s = self.MESSAGE
+ if self.args:
+ s = "{}: {}".format(s, " ".join(self.args))
+ s = "%s." % s
+ return s
+
+
+class AlreadyCancelled(ValueError):
+ __doc__ = MESSAGE = "Tried to cancel an already-cancelled event"
+
+ def __str__(self) -> str:
+ s = self.MESSAGE
+ if self.args:
+ s = "{}: {}".format(s, " ".join(self.args))
+ s = "%s." % s
+ return s
+
+
+class PotentialZombieWarning(Warning):
+ """
+ Emitted when L{IReactorProcess.spawnProcess} is called in a way which may
+ result in termination of the created child process not being reported.
+
+ Deprecated in Twisted 10.0.
+ """
+
+ MESSAGE = (
+ "spawnProcess called, but the SIGCHLD handler is not "
+ "installed. This probably means you have not yet "
+ "called reactor.run, or called "
+ "reactor.run(installSignalHandler=0). You will probably "
+ "never see this process finish, and it may become a "
+ "zombie process."
+ )
+
+
+deprecate.deprecatedModuleAttribute(
+ Version("Twisted", 10, 0, 0),
+ "There is no longer any potential for zombie process.",
+ __name__,
+ "PotentialZombieWarning",
+)
+
+
+class ProcessDone(ConnectionDone):
+ __doc__ = MESSAGE = "A process has ended without apparent errors"
+
+ def __init__(self, status):
+ Exception.__init__(self, "process finished with exit code 0")
+ self.exitCode = 0
+ self.signal = None
+ self.status = status
+
+
+class ProcessTerminated(ConnectionLost):
+ __doc__ = MESSAGE = """
+ A process has ended with a probable error condition
+
+ @ivar exitCode: See L{__init__}
+ @ivar signal: See L{__init__}
+ @ivar status: See L{__init__}
+ """
+
+ def __init__(self, exitCode=None, signal=None, status=None):
+ """
+ @param exitCode: The exit status of the process. This is roughly like
+ the value you might pass to L{os._exit}. This is L{None} if the
+ process exited due to a signal.
+ @type exitCode: L{int} or L{None}
+
+ @param signal: The exit signal of the process. This is L{None} if the
+ process did not exit due to a signal.
+ @type signal: L{int} or L{None}
+
+ @param status: The exit code of the process. This is a platform
+ specific combination of the exit code and the exit signal. See
+ L{os.WIFEXITED} and related functions.
+ @type status: L{int}
+ """
+ self.exitCode = exitCode
+ self.signal = signal
+ self.status = status
+ s = "process ended"
+ if exitCode is not None:
+ s = s + " with exit code %s" % exitCode
+ if signal is not None:
+ s = s + " by signal %s" % signal
+ Exception.__init__(self, s)
+
+
+class ProcessExitedAlready(Exception):
+ """
+ The process has already exited and the operation requested can no longer
+ be performed.
+ """
+
+
+class NotConnectingError(RuntimeError):
+ __doc__ = (
+ MESSAGE
+ ) = "The Connector was not connecting when it was asked to stop connecting"
+
+ def __str__(self) -> str:
+ s = self.MESSAGE
+ if self.args:
+ s = "{}: {}".format(s, " ".join(self.args))
+ s = "%s." % s
+ return s
+
+
+class NotListeningError(RuntimeError):
+ __doc__ = MESSAGE = "The Port was not listening when it was asked to stop listening"
+
+ def __str__(self) -> str:
+ s = self.MESSAGE
+ if self.args:
+ s = "{}: {}".format(s, " ".join(self.args))
+ s = "%s." % s
+ return s
+
+
+class ReactorNotRunning(RuntimeError):
+ """
+ Error raised when trying to stop a reactor which is not running.
+ """
+
+
+class ReactorNotRestartable(RuntimeError):
+ """
+ Error raised when trying to run a reactor which was stopped.
+ """
+
+
+class ReactorAlreadyRunning(RuntimeError):
+ """
+ Error raised when trying to start the reactor multiple times.
+ """
+
+
+class ReactorAlreadyInstalledError(AssertionError):
+ """
+ Could not install reactor because one is already installed.
+ """
+
+
+class ConnectingCancelledError(Exception):
+ """
+ An C{Exception} that will be raised when an L{IStreamClientEndpoint} is
+ cancelled before it connects.
+
+ @ivar address: The L{IAddress} that is the destination of the
+ cancelled L{IStreamClientEndpoint}.
+ """
+
+ def __init__(self, address):
+ """
+ @param address: The L{IAddress} that is the destination of the
+ L{IStreamClientEndpoint} that was cancelled.
+ """
+ Exception.__init__(self, address)
+ self.address = address
+
+
+class NoProtocol(Exception):
+ """
+ An C{Exception} that will be raised when the factory given to a
+ L{IStreamClientEndpoint} returns L{None} from C{buildProtocol}.
+ """
+
+
+class UnsupportedAddressFamily(Exception):
+ """
+ An attempt was made to use a socket with an address family (eg I{AF_INET},
+ I{AF_INET6}, etc) which is not supported by the reactor.
+ """
+
+
+class UnsupportedSocketType(Exception):
+ """
+ An attempt was made to use a socket of a type (eg I{SOCK_STREAM},
+ I{SOCK_DGRAM}, etc) which is not supported by the reactor.
+ """
+
+
+class AlreadyListened(Exception):
+ """
+ An attempt was made to listen on a file descriptor which can only be
+ listened on once.
+ """
+
+
+class InvalidAddressError(ValueError):
+ """
+ An invalid address was specified (i.e. neither IPv4 or IPv6, or expected
+ one and got the other).
+
+ @ivar address: See L{__init__}
+ @ivar message: See L{__init__}
+ """
+
+ def __init__(self, address, message):
+ """
+ @param address: The address that was provided.
+ @type address: L{bytes}
+ @param message: A native string of additional information provided by
+ the calling context.
+ @type address: L{str}
+ """
+ self.address = address
+ self.message = message
+
+
+__all__ = [
+ "BindError",
+ "CannotListenError",
+ "MulticastJoinError",
+ "MessageLengthError",
+ "DNSLookupError",
+ "ConnectInProgressError",
+ "ConnectError",
+ "ConnectBindError",
+ "UnknownHostError",
+ "NoRouteError",
+ "ConnectionRefusedError",
+ "TCPTimedOutError",
+ "BadFileError",
+ "ServiceNameUnknownError",
+ "UserError",
+ "TimeoutError",
+ "SSLError",
+ "VerifyError",
+ "PeerVerifyError",
+ "CertificateError",
+ "getConnectError",
+ "ConnectionClosed",
+ "ConnectionLost",
+ "ConnectionDone",
+ "ConnectionFdescWentAway",
+ "AlreadyCalled",
+ "AlreadyCancelled",
+ "PotentialZombieWarning",
+ "ProcessDone",
+ "ProcessTerminated",
+ "ProcessExitedAlready",
+ "NotConnectingError",
+ "NotListeningError",
+ "ReactorNotRunning",
+ "ReactorAlreadyRunning",
+ "ReactorAlreadyInstalledError",
+ "ConnectingCancelledError",
+ "UnsupportedAddressFamily",
+ "UnsupportedSocketType",
+ "InvalidAddressError",
+]
diff --git a/contrib/python/Twisted/py3/twisted/internet/fdesc.py b/contrib/python/Twisted/py3/twisted/internet/fdesc.py
new file mode 100644
index 0000000000..b95755222d
--- /dev/null
+++ b/contrib/python/Twisted/py3/twisted/internet/fdesc.py
@@ -0,0 +1,121 @@
+# -*- test-case-name: twisted.test.test_fdesc -*-
+# Copyright (c) Twisted Matrix Laboratories.
+# See LICENSE for details.
+
+
+"""
+Utility functions for dealing with POSIX file descriptors.
+"""
+
+import errno
+import os
+
+try:
+ import fcntl as _fcntl
+except ImportError:
+ fcntl = None
+else:
+ fcntl = _fcntl
+
+# twisted imports
+from twisted.internet.main import CONNECTION_DONE, CONNECTION_LOST
+
+
+def setNonBlocking(fd):
+ """
+ Set the file description of the given file descriptor to non-blocking.
+ """
+ flags = fcntl.fcntl(fd, fcntl.F_GETFL)
+ flags = flags | os.O_NONBLOCK
+ fcntl.fcntl(fd, fcntl.F_SETFL, flags)
+
+
+def setBlocking(fd):
+ """
+ Set the file description of the given file descriptor to blocking.
+ """
+ flags = fcntl.fcntl(fd, fcntl.F_GETFL)
+ flags = flags & ~os.O_NONBLOCK
+ fcntl.fcntl(fd, fcntl.F_SETFL, flags)
+
+
+if fcntl is None:
+ # fcntl isn't available on Windows. By default, handles aren't
+ # inherited on Windows, so we can do nothing here.
+ _setCloseOnExec = _unsetCloseOnExec = lambda fd: None
+else:
+
+ def _setCloseOnExec(fd):
+ """
+ Make a file descriptor close-on-exec.
+ """
+ flags = fcntl.fcntl(fd, fcntl.F_GETFD)
+ flags = flags | fcntl.FD_CLOEXEC
+ fcntl.fcntl(fd, fcntl.F_SETFD, flags)
+
+ def _unsetCloseOnExec(fd):
+ """
+ Make a file descriptor close-on-exec.
+ """
+ flags = fcntl.fcntl(fd, fcntl.F_GETFD)
+ flags = flags & ~fcntl.FD_CLOEXEC
+ fcntl.fcntl(fd, fcntl.F_SETFD, flags)
+
+
+def readFromFD(fd, callback):
+ """
+ Read from file descriptor, calling callback with resulting data.
+
+ If successful, call 'callback' with a single argument: the
+ resulting data.
+
+ Returns same thing FileDescriptor.doRead would: CONNECTION_LOST,
+ CONNECTION_DONE, or None.
+
+ @type fd: C{int}
+ @param fd: non-blocking file descriptor to be read from.
+ @param callback: a callable which accepts a single argument. If
+ data is read from the file descriptor it will be called with this
+ data. Handling exceptions from calling the callback is up to the
+ caller.
+
+ Note that if the descriptor is still connected but no data is read,
+ None will be returned but callback will not be called.
+
+ @return: CONNECTION_LOST on error, CONNECTION_DONE when fd is
+ closed, otherwise None.
+ """
+ try:
+ output = os.read(fd, 8192)
+ except OSError as ioe:
+ if ioe.args[0] in (errno.EAGAIN, errno.EINTR):
+ return
+ else:
+ return CONNECTION_LOST
+ if not output:
+ return CONNECTION_DONE
+ callback(output)
+
+
+def writeToFD(fd, data):
+ """
+ Write data to file descriptor.
+
+ Returns same thing FileDescriptor.writeSomeData would.
+
+ @type fd: C{int}
+ @param fd: non-blocking file descriptor to be written to.
+ @type data: C{str} or C{buffer}
+ @param data: bytes to write to fd.
+
+ @return: number of bytes written, or CONNECTION_LOST.
+ """
+ try:
+ return os.write(fd, data)
+ except OSError as io:
+ if io.errno in (errno.EAGAIN, errno.EINTR):
+ return 0
+ return CONNECTION_LOST
+
+
+__all__ = ["setNonBlocking", "setBlocking", "readFromFD", "writeToFD"]
diff --git a/contrib/python/Twisted/py3/twisted/internet/gireactor.py b/contrib/python/Twisted/py3/twisted/internet/gireactor.py
new file mode 100644
index 0000000000..e9c072f41a
--- /dev/null
+++ b/contrib/python/Twisted/py3/twisted/internet/gireactor.py
@@ -0,0 +1,122 @@
+# Copyright (c) Twisted Matrix Laboratories.
+# See LICENSE for details.
+
+"""
+This module provides support for Twisted to interact with the glib
+mainloop via GObject Introspection.
+
+In order to use this support, simply do the following::
+
+ from twisted.internet import gireactor
+ gireactor.install()
+
+If you wish to use a GApplication, register it with the reactor::
+
+ from twisted.internet import reactor
+ reactor.registerGApplication(app)
+
+Then use twisted.internet APIs as usual.
+
+On Python 3, pygobject v3.4 or later is required.
+"""
+
+
+from typing import Union
+
+from gi.repository import GLib # type:ignore[import]
+
+from twisted.internet import _glibbase
+from twisted.internet.error import ReactorAlreadyRunning
+from twisted.python import runtime
+
+if getattr(GLib, "threads_init", None) is not None:
+ GLib.threads_init()
+
+
+class GIReactor(_glibbase.GlibReactorBase):
+ """
+ GObject-introspection event loop reactor.
+
+ @ivar _gapplication: A C{Gio.Application} instance that was registered
+ with C{registerGApplication}.
+ """
+
+ # By default no Application is registered:
+ _gapplication = None
+
+ def __init__(self, useGtk=False):
+ _glibbase.GlibReactorBase.__init__(self, GLib, None)
+
+ def registerGApplication(self, app):
+ """
+ Register a C{Gio.Application} or C{Gtk.Application}, whose main loop
+ will be used instead of the default one.
+
+ We will C{hold} the application so it doesn't exit on its own. In
+ versions of C{python-gi} 3.2 and later, we exit the event loop using
+ the C{app.quit} method which overrides any holds. Older versions are
+ not supported.
+ """
+ if self._gapplication is not None:
+ raise RuntimeError("Can't register more than one application instance.")
+ if self._started:
+ raise ReactorAlreadyRunning(
+ "Can't register application after reactor was started."
+ )
+ if not hasattr(app, "quit"):
+ raise RuntimeError(
+ "Application registration is not supported in"
+ " versions of PyGObject prior to 3.2."
+ )
+ self._gapplication = app
+
+ def run():
+ app.hold()
+ app.run(None)
+
+ self._run = run
+
+ self._crash = app.quit
+
+
+class PortableGIReactor(_glibbase.GlibReactorBase):
+ """
+ Portable GObject Introspection event loop reactor.
+ """
+
+ def __init__(self, useGtk=False):
+ super().__init__(GLib, None, useGtk=useGtk)
+
+ def registerGApplication(self, app):
+ """
+ Register a C{Gio.Application} or C{Gtk.Application}, whose main loop
+ will be used instead of the default one.
+ """
+ raise NotImplementedError("GApplication is not currently supported on Windows.")
+
+ def simulate(self) -> None:
+ """
+ For compatibility only. Do nothing.
+ """
+
+
+def install(useGtk: bool = False) -> Union[GIReactor, PortableGIReactor]:
+ """
+ Configure the twisted mainloop to be run inside the glib mainloop.
+
+ @param useGtk: A hint that the Gtk GUI will or will not be used. Currently
+ does not modify any behavior.
+ """
+ reactor: Union[GIReactor, PortableGIReactor]
+ if runtime.platform.getType() == "posix":
+ reactor = GIReactor(useGtk=useGtk)
+ else:
+ reactor = PortableGIReactor(useGtk=useGtk)
+
+ from twisted.internet.main import installReactor
+
+ installReactor(reactor)
+ return reactor
+
+
+__all__ = ["install"]
diff --git a/contrib/python/Twisted/py3/twisted/internet/glib2reactor.py b/contrib/python/Twisted/py3/twisted/internet/glib2reactor.py
new file mode 100644
index 0000000000..9a11bec02a
--- /dev/null
+++ b/contrib/python/Twisted/py3/twisted/internet/glib2reactor.py
@@ -0,0 +1,50 @@
+# Copyright (c) Twisted Matrix Laboratories.
+# See LICENSE for details.
+
+"""
+This module provides support for Twisted to interact with the glib mainloop.
+This is like gtk2, but slightly faster and does not require a working
+$DISPLAY. However, you cannot run GUIs under this reactor: for that you must
+use the gtk2reactor instead.
+
+In order to use this support, simply do the following::
+
+ from twisted.internet import glib2reactor
+ glib2reactor.install()
+
+Then use twisted.internet APIs as usual. The other methods here are not
+intended to be called directly.
+"""
+
+from incremental import Version
+
+from ._deprecate import deprecatedGnomeReactor
+
+deprecatedGnomeReactor("glib2reactor", Version("Twisted", 23, 8, 0))
+
+from twisted.internet import gtk2reactor
+
+
+class Glib2Reactor(gtk2reactor.Gtk2Reactor):
+ """
+ The reactor using the glib mainloop.
+ """
+
+ def __init__(self):
+ """
+ Override init to set the C{useGtk} flag.
+ """
+ gtk2reactor.Gtk2Reactor.__init__(self, useGtk=False)
+
+
+def install():
+ """
+ Configure the twisted mainloop to be run inside the glib mainloop.
+ """
+ reactor = Glib2Reactor()
+ from twisted.internet.main import installReactor
+
+ installReactor(reactor)
+
+
+__all__ = ["install"]
diff --git a/contrib/python/Twisted/py3/twisted/internet/gtk2reactor.py b/contrib/python/Twisted/py3/twisted/internet/gtk2reactor.py
new file mode 100644
index 0000000000..b4e0c4c4e1
--- /dev/null
+++ b/contrib/python/Twisted/py3/twisted/internet/gtk2reactor.py
@@ -0,0 +1,119 @@
+# -*- test-case-name: twisted.internet.test -*-
+# Copyright (c) Twisted Matrix Laboratories.
+# See LICENSE for details.
+
+
+"""
+This module provides support for Twisted to interact with the glib/gtk2
+mainloop.
+
+In order to use this support, simply do the following::
+
+ from twisted.internet import gtk2reactor
+ gtk2reactor.install()
+
+Then use twisted.internet APIs as usual. The other methods here are not
+intended to be called directly.
+"""
+
+from incremental import Version
+
+from ._deprecate import deprecatedGnomeReactor
+
+deprecatedGnomeReactor("gtk2reactor", Version("Twisted", 23, 8, 0))
+
+# System Imports
+import sys
+
+# Twisted Imports
+from twisted.internet import _glibbase
+from twisted.python import runtime
+
+# Certain old versions of pygtk and gi crash if imported at the same
+# time. This is a problem when running Twisted's unit tests, since they will
+# attempt to run both gtk2 and gtk3/gi tests. However, gireactor makes sure
+# that if we are in such an old version, and gireactor was imported,
+# gtk2reactor will not be importable. So we don't *need* to enforce that here
+# as well; whichever is imported first will still win. Moreover, additional
+# enforcement in this module is unnecessary in modern versions, and downright
+# problematic in certain versions where for some reason importing gtk also
+# imports some subset of gi. So we do nothing here, relying on gireactor to
+# prevent the crash.
+
+try:
+ if not hasattr(sys, "frozen"):
+ # Don't want to check this for py2exe
+ import pygtk # type: ignore[import]
+
+ pygtk.require("2.0")
+except (ImportError, AttributeError):
+ pass # maybe we're using pygtk before this hack existed.
+
+import gobject # type: ignore[import]
+
+if not hasattr(gobject, "IO_HUP"):
+ # gi.repository's legacy compatibility helper raises an AttributeError with
+ # a custom error message rather than a useful ImportError, so things tend
+ # to fail loudly. Things that import this module expect an ImportError if,
+ # well, something failed to import, and treat an AttributeError as an
+ # arbitrary application code failure, so we satisfy that expectation here.
+ raise ImportError("pygobject 2.x is not installed. Use the `gi` reactor.")
+
+if hasattr(gobject, "threads_init"):
+ # recent versions of python-gtk expose this. python-gtk=2.4.1
+ # (wrapping glib-2.4.7) does. python-gtk=2.0.0 (wrapping
+ # glib-2.2.3) does not.
+ gobject.threads_init()
+
+
+class Gtk2Reactor(_glibbase.GlibReactorBase):
+ """
+ PyGTK+ 2 event loop reactor.
+ """
+
+ def __init__(self, useGtk=True):
+ _gtk = None
+ if useGtk is True:
+ import gtk as _gtk # type: ignore[import]
+
+ _glibbase.GlibReactorBase.__init__(self, gobject, _gtk, useGtk=useGtk)
+
+
+# We don't bother deprecating the PortableGtkReactor.
+# The original code was removed and replaced with the
+# backward compatible generic GTK reactor.
+PortableGtkReactor = Gtk2Reactor
+
+
+def install(useGtk=True):
+ """
+ Configure the twisted mainloop to be run inside the gtk mainloop.
+
+ @param useGtk: should glib rather than GTK+ event loop be
+ used (this will be slightly faster but does not support GUI).
+ """
+ reactor = Gtk2Reactor(useGtk)
+ from twisted.internet.main import installReactor
+
+ installReactor(reactor)
+ return reactor
+
+
+def portableInstall(useGtk=True):
+ """
+ Configure the twisted mainloop to be run inside the gtk mainloop.
+ """
+ reactor = PortableGtkReactor()
+ from twisted.internet.main import installReactor
+
+ installReactor(reactor)
+ return reactor
+
+
+if runtime.platform.getType() == "posix":
+ install = install
+else:
+ install = portableInstall
+
+
+__all__ = ["install"]
diff --git a/contrib/python/Twisted/py3/twisted/internet/gtk3reactor.py b/contrib/python/Twisted/py3/twisted/internet/gtk3reactor.py
new file mode 100644
index 0000000000..a2c60f9fec
--- /dev/null
+++ b/contrib/python/Twisted/py3/twisted/internet/gtk3reactor.py
@@ -0,0 +1,22 @@
+# Copyright (c) Twisted Matrix Laboratories.
+# See LICENSE for details.
+
+"""
+This module is a legacy compatibility alias for L{twisted.internet.gireactor}.
+See that module instead.
+"""
+
+from incremental import Version
+
+from ._deprecate import deprecatedGnomeReactor
+
+deprecatedGnomeReactor("gtk3reactor", Version("Twisted", 23, 8, 0))
+
+from twisted.internet import gireactor
+
+Gtk3Reactor = gireactor.GIReactor
+PortableGtk3Reactor = gireactor.PortableGIReactor
+
+install = gireactor.install
+
+__all__ = ["install"]
diff --git a/contrib/python/Twisted/py3/twisted/internet/inotify.py b/contrib/python/Twisted/py3/twisted/internet/inotify.py
new file mode 100644
index 0000000000..0fd8fd681c
--- /dev/null
+++ b/contrib/python/Twisted/py3/twisted/internet/inotify.py
@@ -0,0 +1,426 @@
+# -*- test-case-name: twisted.internet.test.test_inotify -*-
+# Copyright (c) Twisted Matrix Laboratories.
+# See LICENSE for details.
+
+"""
+This module provides support for Twisted to linux inotify API.
+
+In order to use this support, simply do the following (and start a reactor
+at some point)::
+
+ from twisted.internet import inotify
+ from twisted.python import filepath
+
+ def notify(ignored, filepath, mask):
+ \"""
+ For historical reasons, an opaque handle is passed as first
+ parameter. This object should never be used.
+
+ @param filepath: FilePath on which the event happened.
+ @param mask: inotify event as hexadecimal masks
+ \"""
+ print("event %s on %s" % (
+ ', '.join(inotify.humanReadableMask(mask)), filepath))
+
+ notifier = inotify.INotify()
+ notifier.startReading()
+ notifier.watch(filepath.FilePath("/some/directory"), callbacks=[notify])
+ notifier.watch(filepath.FilePath(b"/some/directory2"), callbacks=[notify])
+
+Note that in the above example, a L{FilePath} which is a L{bytes} path name
+or L{str} path name may be used. However, no matter what type of
+L{FilePath} is passed to this module, internally the L{FilePath} is
+converted to L{bytes} according to L{sys.getfilesystemencoding}.
+For any L{FilePath} returned by this module, the caller is responsible for
+converting from a L{bytes} path name to a L{str} path name.
+
+@since: 10.1
+"""
+
+
+import os
+import struct
+
+from twisted.internet import fdesc
+from twisted.internet.abstract import FileDescriptor
+from twisted.python import _inotify, log
+
+# from /usr/src/linux/include/linux/inotify.h
+
+IN_ACCESS = 0x00000001 # File was accessed
+IN_MODIFY = 0x00000002 # File was modified
+IN_ATTRIB = 0x00000004 # Metadata changed
+IN_CLOSE_WRITE = 0x00000008 # Writeable file was closed
+IN_CLOSE_NOWRITE = 0x00000010 # Unwriteable file closed
+IN_OPEN = 0x00000020 # File was opened
+IN_MOVED_FROM = 0x00000040 # File was moved from X
+IN_MOVED_TO = 0x00000080 # File was moved to Y
+IN_CREATE = 0x00000100 # Subfile was created
+IN_DELETE = 0x00000200 # Subfile was delete
+IN_DELETE_SELF = 0x00000400 # Self was deleted
+IN_MOVE_SELF = 0x00000800 # Self was moved
+IN_UNMOUNT = 0x00002000 # Backing fs was unmounted
+IN_Q_OVERFLOW = 0x00004000 # Event queued overflowed
+IN_IGNORED = 0x00008000 # File was ignored
+
+IN_ONLYDIR = 0x01000000 # only watch the path if it is a directory
+IN_DONT_FOLLOW = 0x02000000 # don't follow a sym link
+IN_MASK_ADD = 0x20000000 # add to the mask of an already existing watch
+IN_ISDIR = 0x40000000 # event occurred against dir
+IN_ONESHOT = 0x80000000 # only send event once
+
+IN_CLOSE = IN_CLOSE_WRITE | IN_CLOSE_NOWRITE # closes
+IN_MOVED = IN_MOVED_FROM | IN_MOVED_TO # moves
+IN_CHANGED = IN_MODIFY | IN_ATTRIB # changes
+
+IN_WATCH_MASK = (
+ IN_MODIFY
+ | IN_ATTRIB
+ | IN_CREATE
+ | IN_DELETE
+ | IN_DELETE_SELF
+ | IN_MOVE_SELF
+ | IN_UNMOUNT
+ | IN_MOVED_FROM
+ | IN_MOVED_TO
+)
+
+
+_FLAG_TO_HUMAN = [
+ (IN_ACCESS, "access"),
+ (IN_MODIFY, "modify"),
+ (IN_ATTRIB, "attrib"),
+ (IN_CLOSE_WRITE, "close_write"),
+ (IN_CLOSE_NOWRITE, "close_nowrite"),
+ (IN_OPEN, "open"),
+ (IN_MOVED_FROM, "moved_from"),
+ (IN_MOVED_TO, "moved_to"),
+ (IN_CREATE, "create"),
+ (IN_DELETE, "delete"),
+ (IN_DELETE_SELF, "delete_self"),
+ (IN_MOVE_SELF, "move_self"),
+ (IN_UNMOUNT, "unmount"),
+ (IN_Q_OVERFLOW, "queue_overflow"),
+ (IN_IGNORED, "ignored"),
+ (IN_ONLYDIR, "only_dir"),
+ (IN_DONT_FOLLOW, "dont_follow"),
+ (IN_MASK_ADD, "mask_add"),
+ (IN_ISDIR, "is_dir"),
+ (IN_ONESHOT, "one_shot"),
+]
+
+
+def humanReadableMask(mask):
+ """
+ Auxiliary function that converts a hexadecimal mask into a series
+ of human readable flags.
+ """
+ s = []
+ for k, v in _FLAG_TO_HUMAN:
+ if k & mask:
+ s.append(v)
+ return s
+
+
+class _Watch:
+ """
+ Watch object that represents a Watch point in the filesystem. The
+ user should let INotify to create these objects
+
+ @ivar path: The path over which this watch point is monitoring
+ @ivar mask: The events monitored by this watchpoint
+ @ivar autoAdd: Flag that determines whether this watch point
+ should automatically add created subdirectories
+ @ivar callbacks: L{list} of callback functions that will be called
+ when an event occurs on this watch.
+ """
+
+ def __init__(self, path, mask=IN_WATCH_MASK, autoAdd=False, callbacks=None):
+ self.path = path.asBytesMode()
+ self.mask = mask
+ self.autoAdd = autoAdd
+ if callbacks is None:
+ callbacks = []
+ self.callbacks = callbacks
+
+ def _notify(self, filepath, events):
+ """
+ Callback function used by L{INotify} to dispatch an event.
+ """
+ filepath = filepath.asBytesMode()
+ for callback in self.callbacks:
+ callback(self, filepath, events)
+
+
+class INotify(FileDescriptor):
+ """
+ The INotify file descriptor, it basically does everything related
+ to INotify, from reading to notifying watch points.
+
+ @ivar _buffer: a L{bytes} containing the data read from the inotify fd.
+
+ @ivar _watchpoints: a L{dict} that maps from inotify watch ids to
+ watchpoints objects
+
+ @ivar _watchpaths: a L{dict} that maps from watched paths to the
+ inotify watch ids
+ """
+
+ _inotify = _inotify
+
+ def __init__(self, reactor=None):
+ FileDescriptor.__init__(self, reactor=reactor)
+
+ # Smart way to allow parametrization of libc so I can override
+ # it and test for the system errors.
+ self._fd = self._inotify.init()
+
+ fdesc.setNonBlocking(self._fd)
+ fdesc._setCloseOnExec(self._fd)
+
+ # The next 2 lines are needed to have self.loseConnection()
+ # to call connectionLost() on us. Since we already created the
+ # fd that talks to inotify we want to be notified even if we
+ # haven't yet started reading.
+ self.connected = 1
+ self._writeDisconnected = True
+
+ self._buffer = b""
+ self._watchpoints = {}
+ self._watchpaths = {}
+
+ def _addWatch(self, path, mask, autoAdd, callbacks):
+ """
+ Private helper that abstracts the use of ctypes.
+
+ Calls the internal inotify API and checks for any errors after the
+ call. If there's an error L{INotify._addWatch} can raise an
+ INotifyError. If there's no error it proceeds creating a watchpoint and
+ adding a watchpath for inverse lookup of the file descriptor from the
+ path.
+ """
+ path = path.asBytesMode()
+ wd = self._inotify.add(self._fd, path, mask)
+
+ iwp = _Watch(path, mask, autoAdd, callbacks)
+
+ self._watchpoints[wd] = iwp
+ self._watchpaths[path] = wd
+
+ return wd
+
+ def _rmWatch(self, wd):
+ """
+ Private helper that abstracts the use of ctypes.
+
+ Calls the internal inotify API to remove an fd from inotify then
+ removes the corresponding watchpoint from the internal mapping together
+ with the file descriptor from the watchpath.
+ """
+ self._inotify.remove(self._fd, wd)
+ iwp = self._watchpoints.pop(wd)
+ self._watchpaths.pop(iwp.path)
+
+ def connectionLost(self, reason):
+ """
+ Release the inotify file descriptor and do the necessary cleanup
+ """
+ FileDescriptor.connectionLost(self, reason)
+ if self._fd >= 0:
+ try:
+ os.close(self._fd)
+ except OSError as e:
+ log.err(e, "Couldn't close INotify file descriptor.")
+
+ def fileno(self):
+ """
+ Get the underlying file descriptor from this inotify observer.
+ Required by L{abstract.FileDescriptor} subclasses.
+ """
+ return self._fd
+
+ def doRead(self):
+ """
+ Read some data from the observed file descriptors
+ """
+ fdesc.readFromFD(self._fd, self._doRead)
+
+ def _doRead(self, in_):
+ """
+ Work on the data just read from the file descriptor.
+ """
+ self._buffer += in_
+ while len(self._buffer) >= 16:
+ wd, mask, cookie, size = struct.unpack("=LLLL", self._buffer[0:16])
+
+ if size:
+ name = self._buffer[16 : 16 + size].rstrip(b"\0")
+ else:
+ name = None
+
+ self._buffer = self._buffer[16 + size :]
+
+ try:
+ iwp = self._watchpoints[wd]
+ except KeyError:
+ continue
+
+ path = iwp.path.asBytesMode()
+ if name:
+ path = path.child(name)
+ iwp._notify(path, mask)
+
+ if iwp.autoAdd and mask & IN_ISDIR and mask & IN_CREATE:
+ # mask & IN_ISDIR already guarantees that the path is a
+ # directory. There's no way you can get here without a
+ # directory anyway, so no point in checking for that again.
+ new_wd = self.watch(
+ path, mask=iwp.mask, autoAdd=True, callbacks=iwp.callbacks
+ )
+ # This is very very very hacky and I'd rather not do this but
+ # we have no other alternative that is less hacky other than
+ # surrender. We use callLater because we don't want to have
+ # too many events waiting while we process these subdirs, we
+ # must always answer events as fast as possible or the overflow
+ # might come.
+ self.reactor.callLater(0, self._addChildren, self._watchpoints[new_wd])
+ if mask & IN_DELETE_SELF:
+ self._rmWatch(wd)
+ self.loseConnection()
+
+ def _addChildren(self, iwp):
+ """
+ This is a very private method, please don't even think about using it.
+
+ Note that this is a fricking hack... it's because we cannot be fast
+ enough in adding a watch to a directory and so we basically end up
+ getting here too late if some operations have already been going on in
+ the subdir, we basically need to catchup. This eventually ends up
+ meaning that we generate double events, your app must be resistant.
+ """
+ try:
+ listdir = iwp.path.children()
+ except OSError:
+ # Somebody or something (like a test) removed this directory while
+ # we were in the callLater(0...) waiting. It doesn't make sense to
+ # process it anymore
+ return
+
+ # note that it's true that listdir will only see the subdirs inside
+ # path at the moment of the call but path is monitored already so if
+ # something is created we will receive an event.
+ for f in listdir:
+ # It's a directory, watch it and then add its children
+ if f.isdir():
+ wd = self.watch(f, mask=iwp.mask, autoAdd=True, callbacks=iwp.callbacks)
+ iwp._notify(f, IN_ISDIR | IN_CREATE)
+ # now f is watched, we can add its children the callLater is to
+ # avoid recursion
+ self.reactor.callLater(0, self._addChildren, self._watchpoints[wd])
+
+ # It's a file and we notify it.
+ if f.isfile():
+ iwp._notify(f, IN_CREATE | IN_CLOSE_WRITE)
+
+ def watch(
+ self, path, mask=IN_WATCH_MASK, autoAdd=False, callbacks=None, recursive=False
+ ):
+ """
+ Watch the 'mask' events in given path. Can raise C{INotifyError} when
+ there's a problem while adding a directory.
+
+ @param path: The path needing monitoring
+ @type path: L{FilePath}
+
+ @param mask: The events that should be watched
+ @type mask: L{int}
+
+ @param autoAdd: if True automatically add newly created
+ subdirectories
+ @type autoAdd: L{bool}
+
+ @param callbacks: A list of callbacks that should be called
+ when an event happens in the given path.
+ The callback should accept 3 arguments:
+ (ignored, filepath, mask)
+ @type callbacks: L{list} of callables
+
+ @param recursive: Also add all the subdirectories in this path
+ @type recursive: L{bool}
+ """
+ if recursive:
+ # This behavior is needed to be compatible with the windows
+ # interface for filesystem changes:
+ # http://msdn.microsoft.com/en-us/library/aa365465(VS.85).aspx
+ # ReadDirectoryChangesW can do bWatchSubtree so it doesn't
+ # make sense to implement this at a higher abstraction
+ # level when other platforms support it already
+ for child in path.walk():
+ if child.isdir():
+ self.watch(child, mask, autoAdd, callbacks, recursive=False)
+ else:
+ wd = self._isWatched(path)
+ if wd:
+ return wd
+
+ mask = mask | IN_DELETE_SELF # need this to remove the watch
+
+ return self._addWatch(path, mask, autoAdd, callbacks)
+
+ def ignore(self, path):
+ """
+ Remove the watch point monitoring the given path
+
+ @param path: The path that should be ignored
+ @type path: L{FilePath}
+ """
+ path = path.asBytesMode()
+ wd = self._isWatched(path)
+ if wd is None:
+ raise KeyError(f"{path!r} is not watched")
+ else:
+ self._rmWatch(wd)
+
+ def _isWatched(self, path):
+ """
+ Helper function that checks if the path is already monitored
+ and returns its watchdescriptor if so or None otherwise.
+
+ @param path: The path that should be checked
+ @type path: L{FilePath}
+ """
+ path = path.asBytesMode()
+ return self._watchpaths.get(path, None)
+
+
+INotifyError = _inotify.INotifyError
+
+
+__all__ = [
+ "INotify",
+ "humanReadableMask",
+ "IN_WATCH_MASK",
+ "IN_ACCESS",
+ "IN_MODIFY",
+ "IN_ATTRIB",
+ "IN_CLOSE_NOWRITE",
+ "IN_CLOSE_WRITE",
+ "IN_OPEN",
+ "IN_MOVED_FROM",
+ "IN_MOVED_TO",
+ "IN_CREATE",
+ "IN_DELETE",
+ "IN_DELETE_SELF",
+ "IN_MOVE_SELF",
+ "IN_UNMOUNT",
+ "IN_Q_OVERFLOW",
+ "IN_IGNORED",
+ "IN_ONLYDIR",
+ "IN_DONT_FOLLOW",
+ "IN_MASK_ADD",
+ "IN_ISDIR",
+ "IN_ONESHOT",
+ "IN_CLOSE",
+ "IN_MOVED",
+ "IN_CHANGED",
+]
diff --git a/contrib/python/Twisted/py3/twisted/internet/interfaces.py b/contrib/python/Twisted/py3/twisted/internet/interfaces.py
new file mode 100644
index 0000000000..78380ccc39
--- /dev/null
+++ b/contrib/python/Twisted/py3/twisted/internet/interfaces.py
@@ -0,0 +1,2756 @@
+# Copyright (c) Twisted Matrix Laboratories.
+# See LICENSE for details.
+
+"""
+Interface documentation.
+
+Maintainer: Itamar Shtull-Trauring
+"""
+from __future__ import annotations
+
+from typing import (
+ TYPE_CHECKING,
+ Any,
+ AnyStr,
+ Callable,
+ Iterable,
+ List,
+ Mapping,
+ Optional,
+ Sequence,
+ Tuple,
+ Type,
+ Union,
+)
+
+from zope.interface import Attribute, Interface
+
+from twisted.python.failure import Failure
+
+if TYPE_CHECKING:
+ from socket import AddressFamily
+
+ try:
+ from OpenSSL.SSL import (
+ Connection as OpenSSLConnection,
+ Context as OpenSSLContext,
+ )
+ except ImportError:
+ OpenSSLConnection = OpenSSLContext = object # type: ignore[misc,assignment]
+
+ from twisted.internet.abstract import FileDescriptor
+ from twisted.internet.address import IPv4Address, IPv6Address, UNIXAddress
+ from twisted.internet.defer import Deferred
+ from twisted.internet.protocol import (
+ ClientFactory,
+ ConnectedDatagramProtocol,
+ DatagramProtocol,
+ Factory,
+ ServerFactory,
+ )
+ from twisted.internet.ssl import ClientContextFactory
+ from twisted.names.dns import Query, RRHeader
+ from twisted.protocols.tls import TLSMemoryBIOProtocol
+ from twisted.python.runtime import platform
+
+ if platform.supportsThreads():
+ from twisted.python.threadpool import ThreadPool
+ else:
+ ThreadPool = object # type: ignore[misc, assignment]
+
+
+class IAddress(Interface):
+ """
+ An address, e.g. a TCP C{(host, port)}.
+
+ Default implementations are in L{twisted.internet.address}.
+ """
+
+
+### Reactor Interfaces
+
+
+class IConnector(Interface):
+ """
+ Object used to interface between connections and protocols.
+
+ Each L{IConnector} manages one connection.
+ """
+
+ def stopConnecting() -> None:
+ """
+ Stop attempting to connect.
+ """
+
+ def disconnect() -> None:
+ """
+ Disconnect regardless of the connection state.
+
+ If we are connected, disconnect, if we are trying to connect,
+ stop trying.
+ """
+
+ def connect() -> None:
+ """
+ Try to connect to remote address.
+ """
+
+ def getDestination() -> IAddress:
+ """
+ Return destination this will try to connect to.
+
+ @return: An object which provides L{IAddress}.
+ """
+
+
+class IResolverSimple(Interface):
+ def getHostByName(name: str, timeout: Sequence[int] = ()) -> "Deferred[str]":
+ """
+ Resolve the domain name C{name} into an IP address.
+
+ @param name: DNS name to resolve.
+ @param timeout: Number of seconds after which to reissue the query.
+ When the last timeout expires, the query is considered failed.
+
+ @return: The callback of the Deferred that is returned will be
+ passed a string that represents the IP address of the
+ specified name, or the errback will be called if the
+ lookup times out. If multiple types of address records
+ are associated with the name, A6 records will be returned
+ in preference to AAAA records, which will be returned in
+ preference to A records. If there are multiple records of
+ the type to be returned, one will be selected at random.
+
+ @raise twisted.internet.defer.TimeoutError: Raised
+ (asynchronously) if the name cannot be resolved within the
+ specified timeout period.
+ """
+
+
+class IHostResolution(Interface):
+ """
+ An L{IHostResolution} represents represents an in-progress recursive query
+ for a DNS name.
+
+ @since: Twisted 17.1.0
+ """
+
+ name = Attribute(
+ """
+ L{unicode}; the name of the host being resolved.
+ """
+ )
+
+ def cancel() -> None:
+ """
+ Stop the hostname resolution in progress.
+ """
+
+
+class IResolutionReceiver(Interface):
+ """
+ An L{IResolutionReceiver} receives the results of a hostname resolution in
+ progress, initiated by an L{IHostnameResolver}.
+
+ @since: Twisted 17.1.0
+ """
+
+ def resolutionBegan(resolutionInProgress: IHostResolution) -> None:
+ """
+ A hostname resolution began.
+
+ @param resolutionInProgress: an L{IHostResolution}.
+ """
+
+ def addressResolved(address: IAddress) -> None:
+ """
+ An internet address. This is called when an address for the given name
+ is discovered. In the current implementation this practically means
+ L{IPv4Address} or L{IPv6Address}, but implementations of this interface
+ should be lenient to other types being passed to this interface as
+ well, for future-proofing.
+
+ @param address: An address object.
+ """
+
+ def resolutionComplete() -> None:
+ """
+ Resolution has completed; no further addresses will be relayed to
+ L{IResolutionReceiver.addressResolved}.
+ """
+
+
+class IHostnameResolver(Interface):
+ """
+ An L{IHostnameResolver} can resolve a host name and port number into a
+ series of L{IAddress} objects.
+
+ @since: Twisted 17.1.0
+ """
+
+ def resolveHostName(
+ resolutionReceiver: IResolutionReceiver,
+ hostName: str,
+ portNumber: int = 0,
+ addressTypes: Optional[Sequence[Type[IAddress]]] = None,
+ transportSemantics: str = "TCP",
+ ) -> IHostResolution:
+ """
+ Initiate a hostname resolution.
+
+ @param resolutionReceiver: an object that will receive each resolved
+ address as it arrives.
+ @param hostName: The name of the host to resolve. If this contains
+ non-ASCII code points, they will be converted to IDNA first.
+ @param portNumber: The port number that the returned addresses should
+ include.
+ @param addressTypes: An iterable of implementors of L{IAddress} that
+ are acceptable values for C{resolutionReceiver} to receive to its
+ L{addressResolved <IResolutionReceiver.addressResolved>}. In
+ practice, this means an iterable containing
+ L{twisted.internet.address.IPv4Address},
+ L{twisted.internet.address.IPv6Address}, both, or neither.
+ @param transportSemantics: A string describing the semantics of the
+ transport; either C{'TCP'} for stream-oriented transports or
+ C{'UDP'} for datagram-oriented; see
+ L{twisted.internet.address.IPv6Address.type} and
+ L{twisted.internet.address.IPv4Address.type}.
+
+ @return: The resolution in progress.
+ """
+
+
+class IResolver(IResolverSimple):
+ def query(
+ query: "Query", timeout: Sequence[int]
+ ) -> "Deferred[Tuple[RRHeader, RRHeader, RRHeader]]":
+ """
+ Dispatch C{query} to the method which can handle its type.
+
+ @param query: The DNS query being issued, to which a response is to be
+ generated.
+ @param timeout: Number of seconds after which to reissue the query.
+ When the last timeout expires, the query is considered failed.
+
+ @return: A L{Deferred} which fires with a three-tuple of lists of
+ L{twisted.names.dns.RRHeader} instances. The first element of the
+ tuple gives answers. The second element of the tuple gives
+ authorities. The third element of the tuple gives additional
+ information. The L{Deferred} may instead fail with one of the
+ exceptions defined in L{twisted.names.error} or with
+ C{NotImplementedError}.
+ """
+
+ def lookupAddress(
+ name: str, timeout: Sequence[int]
+ ) -> "Deferred[Tuple[RRHeader, RRHeader, RRHeader]]":
+ """
+ Perform an A record lookup.
+
+ @param name: DNS name to resolve.
+ @param timeout: Number of seconds after which to reissue the query.
+ When the last timeout expires, the query is considered failed.
+
+ @return: A L{Deferred} which fires with a three-tuple of lists of
+ L{twisted.names.dns.RRHeader} instances. The first element of the
+ tuple gives answers. The second element of the tuple gives
+ authorities. The third element of the tuple gives additional
+ information. The L{Deferred} may instead fail with one of the
+ exceptions defined in L{twisted.names.error} or with
+ C{NotImplementedError}.
+ """
+
+ def lookupAddress6(
+ name: str, timeout: Sequence[int]
+ ) -> "Deferred[Tuple[RRHeader, RRHeader, RRHeader]]":
+ """
+ Perform an A6 record lookup.
+
+ @param name: DNS name to resolve.
+ @param timeout: Number of seconds after which to reissue the query.
+ When the last timeout expires, the query is considered failed.
+
+ @return: A L{Deferred} which fires with a three-tuple of lists of
+ L{twisted.names.dns.RRHeader} instances. The first element of the
+ tuple gives answers. The second element of the tuple gives
+ authorities. The third element of the tuple gives additional
+ information. The L{Deferred} may instead fail with one of the
+ exceptions defined in L{twisted.names.error} or with
+ C{NotImplementedError}.
+ """
+
+ def lookupIPV6Address(
+ name: str, timeout: Sequence[int]
+ ) -> "Deferred[Tuple[RRHeader, RRHeader, RRHeader]]":
+ """
+ Perform an AAAA record lookup.
+
+ @param name: DNS name to resolve.
+ @param timeout: Number of seconds after which to reissue the query.
+ When the last timeout expires, the query is considered failed.
+
+ @return: A L{Deferred} which fires with a three-tuple of lists of
+ L{twisted.names.dns.RRHeader} instances. The first element of the
+ tuple gives answers. The second element of the tuple gives
+ authorities. The third element of the tuple gives additional
+ information. The L{Deferred} may instead fail with one of the
+ exceptions defined in L{twisted.names.error} or with
+ C{NotImplementedError}.
+ """
+
+ def lookupMailExchange(
+ name: str, timeout: Sequence[int]
+ ) -> "Deferred[Tuple[RRHeader, RRHeader, RRHeader]]":
+ """
+ Perform an MX record lookup.
+
+ @param name: DNS name to resolve.
+ @param timeout: Number of seconds after which to reissue the query.
+ When the last timeout expires, the query is considered failed.
+
+ @return: A L{Deferred} which fires with a three-tuple of lists of
+ L{twisted.names.dns.RRHeader} instances. The first element of the
+ tuple gives answers. The second element of the tuple gives
+ authorities. The third element of the tuple gives additional
+ information. The L{Deferred} may instead fail with one of the
+ exceptions defined in L{twisted.names.error} or with
+ C{NotImplementedError}.
+ """
+
+ def lookupNameservers(
+ name: str, timeout: Sequence[int]
+ ) -> "Deferred[Tuple[RRHeader, RRHeader, RRHeader]]":
+ """
+ Perform an NS record lookup.
+
+ @param name: DNS name to resolve.
+ @param timeout: Number of seconds after which to reissue the query.
+ When the last timeout expires, the query is considered failed.
+
+ @return: A L{Deferred} which fires with a three-tuple of lists of
+ L{twisted.names.dns.RRHeader} instances. The first element of the
+ tuple gives answers. The second element of the tuple gives
+ authorities. The third element of the tuple gives additional
+ information. The L{Deferred} may instead fail with one of the
+ exceptions defined in L{twisted.names.error} or with
+ C{NotImplementedError}.
+ """
+
+ def lookupCanonicalName(
+ name: str, timeout: Sequence[int]
+ ) -> "Deferred[Tuple[RRHeader, RRHeader, RRHeader]]":
+ """
+ Perform a CNAME record lookup.
+
+ @param name: DNS name to resolve.
+ @param timeout: Number of seconds after which to reissue the query.
+ When the last timeout expires, the query is considered failed.
+
+ @return: A L{Deferred} which fires with a three-tuple of lists of
+ L{twisted.names.dns.RRHeader} instances. The first element of the
+ tuple gives answers. The second element of the tuple gives
+ authorities. The third element of the tuple gives additional
+ information. The L{Deferred} may instead fail with one of the
+ exceptions defined in L{twisted.names.error} or with
+ C{NotImplementedError}.
+ """
+
+ def lookupMailBox(
+ name: str, timeout: Sequence[int]
+ ) -> "Deferred[Tuple[RRHeader, RRHeader, RRHeader]]":
+ """
+ Perform an MB record lookup.
+
+ @param name: DNS name to resolve.
+ @param timeout: Number of seconds after which to reissue the query.
+ When the last timeout expires, the query is considered failed.
+
+ @return: A L{Deferred} which fires with a three-tuple of lists of
+ L{twisted.names.dns.RRHeader} instances. The first element of the
+ tuple gives answers. The second element of the tuple gives
+ authorities. The third element of the tuple gives additional
+ information. The L{Deferred} may instead fail with one of the
+ exceptions defined in L{twisted.names.error} or with
+ C{NotImplementedError}.
+ """
+
+ def lookupMailGroup(
+ name: str, timeout: Sequence[int]
+ ) -> "Deferred[Tuple[RRHeader, RRHeader, RRHeader]]":
+ """
+ Perform an MG record lookup.
+
+ @param name: DNS name to resolve.
+ @param timeout: Number of seconds after which to reissue the query.
+ When the last timeout expires, the query is considered failed.
+
+ @return: A L{Deferred} which fires with a three-tuple of lists of
+ L{twisted.names.dns.RRHeader} instances. The first element of the
+ tuple gives answers. The second element of the tuple gives
+ authorities. The third element of the tuple gives additional
+ information. The L{Deferred} may instead fail with one of the
+ exceptions defined in L{twisted.names.error} or with
+ C{NotImplementedError}.
+ """
+
+ def lookupMailRename(
+ name: str, timeout: Sequence[int]
+ ) -> "Deferred[Tuple[RRHeader, RRHeader, RRHeader]]":
+ """
+ Perform an MR record lookup.
+
+ @param name: DNS name to resolve.
+ @param timeout: Number of seconds after which to reissue the query.
+ When the last timeout expires, the query is considered failed.
+
+ @return: A L{Deferred} which fires with a three-tuple of lists of
+ L{twisted.names.dns.RRHeader} instances. The first element of the
+ tuple gives answers. The second element of the tuple gives
+ authorities. The third element of the tuple gives additional
+ information. The L{Deferred} may instead fail with one of the
+ exceptions defined in L{twisted.names.error} or with
+ C{NotImplementedError}.
+ """
+
+ def lookupPointer(
+ name: str, timeout: Sequence[int]
+ ) -> "Deferred[Tuple[RRHeader, RRHeader, RRHeader]]":
+ """
+ Perform a PTR record lookup.
+
+ @param name: DNS name to resolve.
+ @param timeout: Number of seconds after which to reissue the query.
+ When the last timeout expires, the query is considered failed.
+
+ @return: A L{Deferred} which fires with a three-tuple of lists of
+ L{twisted.names.dns.RRHeader} instances. The first element of the
+ tuple gives answers. The second element of the tuple gives
+ authorities. The third element of the tuple gives additional
+ information. The L{Deferred} may instead fail with one of the
+ exceptions defined in L{twisted.names.error} or with
+ C{NotImplementedError}.
+ """
+
+ def lookupAuthority(
+ name: str, timeout: Sequence[int]
+ ) -> "Deferred[Tuple[RRHeader, RRHeader, RRHeader]]":
+ """
+ Perform an SOA record lookup.
+
+ @param name: DNS name to resolve.
+ @param timeout: Number of seconds after which to reissue the query.
+ When the last timeout expires, the query is considered failed.
+
+ @return: A L{Deferred} which fires with a three-tuple of lists of
+ L{twisted.names.dns.RRHeader} instances. The first element of the
+ tuple gives answers. The second element of the tuple gives
+ authorities. The third element of the tuple gives additional
+ information. The L{Deferred} may instead fail with one of the
+ exceptions defined in L{twisted.names.error} or with
+ C{NotImplementedError}.
+ """
+
+ def lookupNull(
+ name: str, timeout: Sequence[int]
+ ) -> "Deferred[Tuple[RRHeader, RRHeader, RRHeader]]":
+ """
+ Perform a NULL record lookup.
+
+ @param name: DNS name to resolve.
+ @param timeout: Number of seconds after which to reissue the query.
+ When the last timeout expires, the query is considered failed.
+
+ @return: A L{Deferred} which fires with a three-tuple of lists of
+ L{twisted.names.dns.RRHeader} instances. The first element of the
+ tuple gives answers. The second element of the tuple gives
+ authorities. The third element of the tuple gives additional
+ information. The L{Deferred} may instead fail with one of the
+ exceptions defined in L{twisted.names.error} or with
+ C{NotImplementedError}.
+ """
+
+ def lookupWellKnownServices(
+ name: str, timeout: Sequence[int]
+ ) -> "Deferred[Tuple[RRHeader, RRHeader, RRHeader]]":
+ """
+ Perform a WKS record lookup.
+
+ @param name: DNS name to resolve.
+ @param timeout: Number of seconds after which to reissue the query.
+ When the last timeout expires, the query is considered failed.
+
+ @return: A L{Deferred} which fires with a three-tuple of lists of
+ L{twisted.names.dns.RRHeader} instances. The first element of the
+ tuple gives answers. The second element of the tuple gives
+ authorities. The third element of the tuple gives additional
+ information. The L{Deferred} may instead fail with one of the
+ exceptions defined in L{twisted.names.error} or with
+ C{NotImplementedError}.
+ """
+
+ def lookupHostInfo(
+ name: str, timeout: Sequence[int]
+ ) -> "Deferred[Tuple[RRHeader, RRHeader, RRHeader]]":
+ """
+ Perform a HINFO record lookup.
+
+ @param name: DNS name to resolve.
+ @param timeout: Number of seconds after which to reissue the query.
+ When the last timeout expires, the query is considered failed.
+
+ @return: A L{Deferred} which fires with a three-tuple of lists of
+ L{twisted.names.dns.RRHeader} instances. The first element of the
+ tuple gives answers. The second element of the tuple gives
+ authorities. The third element of the tuple gives additional
+ information. The L{Deferred} may instead fail with one of the
+ exceptions defined in L{twisted.names.error} or with
+ C{NotImplementedError}.
+ """
+
+ def lookupMailboxInfo(
+ name: str, timeout: Sequence[int]
+ ) -> "Deferred[Tuple[RRHeader, RRHeader, RRHeader]]":
+ """
+ Perform an MINFO record lookup.
+
+ @param name: DNS name to resolve.
+ @param timeout: Number of seconds after which to reissue the query.
+ When the last timeout expires, the query is considered failed.
+
+ @return: A L{Deferred} which fires with a three-tuple of lists of
+ L{twisted.names.dns.RRHeader} instances. The first element of the
+ tuple gives answers. The second element of the tuple gives
+ authorities. The third element of the tuple gives additional
+ information. The L{Deferred} may instead fail with one of the
+ exceptions defined in L{twisted.names.error} or with
+ C{NotImplementedError}.
+ """
+
+ def lookupText(
+ name: str, timeout: Sequence[int]
+ ) -> "Deferred[Tuple[RRHeader, RRHeader, RRHeader]]":
+ """
+ Perform a TXT record lookup.
+
+ @param name: DNS name to resolve.
+ @param timeout: Number of seconds after which to reissue the query.
+ When the last timeout expires, the query is considered failed.
+
+ @return: A L{Deferred} which fires with a three-tuple of lists of
+ L{twisted.names.dns.RRHeader} instances. The first element of the
+ tuple gives answers. The second element of the tuple gives
+ authorities. The third element of the tuple gives additional
+ information. The L{Deferred} may instead fail with one of the
+ exceptions defined in L{twisted.names.error} or with
+ C{NotImplementedError}.
+ """
+
+ def lookupResponsibility(
+ name: str, timeout: Sequence[int]
+ ) -> "Deferred[Tuple[RRHeader, RRHeader, RRHeader]]":
+ """
+ Perform an RP record lookup.
+
+ @param name: DNS name to resolve.
+ @param timeout: Number of seconds after which to reissue the query.
+ When the last timeout expires, the query is considered failed.
+
+ @return: A L{Deferred} which fires with a three-tuple of lists of
+ L{twisted.names.dns.RRHeader} instances. The first element of the
+ tuple gives answers. The second element of the tuple gives
+ authorities. The third element of the tuple gives additional
+ information. The L{Deferred} may instead fail with one of the
+ exceptions defined in L{twisted.names.error} or with
+ C{NotImplementedError}.
+ """
+
+ def lookupAFSDatabase(
+ name: str, timeout: Sequence[int]
+ ) -> "Deferred[Tuple[RRHeader, RRHeader, RRHeader]]":
+ """
+ Perform an AFSDB record lookup.
+
+ @param name: DNS name to resolve.
+ @param timeout: Number of seconds after which to reissue the query.
+ When the last timeout expires, the query is considered failed.
+
+ @return: A L{Deferred} which fires with a three-tuple of lists of
+ L{twisted.names.dns.RRHeader} instances. The first element of the
+ tuple gives answers. The second element of the tuple gives
+ authorities. The third element of the tuple gives additional
+ information. The L{Deferred} may instead fail with one of the
+ exceptions defined in L{twisted.names.error} or with
+ C{NotImplementedError}.
+ """
+
+ def lookupService(
+ name: str, timeout: Sequence[int]
+ ) -> "Deferred[Tuple[RRHeader, RRHeader, RRHeader]]":
+ """
+ Perform an SRV record lookup.
+
+ @param name: DNS name to resolve.
+ @param timeout: Number of seconds after which to reissue the query.
+ When the last timeout expires, the query is considered failed.
+
+ @return: A L{Deferred} which fires with a three-tuple of lists of
+ L{twisted.names.dns.RRHeader} instances. The first element of the
+ tuple gives answers. The second element of the tuple gives
+ authorities. The third element of the tuple gives additional
+ information. The L{Deferred} may instead fail with one of the
+ exceptions defined in L{twisted.names.error} or with
+ C{NotImplementedError}.
+ """
+
+ def lookupAllRecords(
+ name: str, timeout: Sequence[int]
+ ) -> "Deferred[Tuple[RRHeader, RRHeader, RRHeader]]":
+ """
+ Perform an ALL_RECORD lookup.
+
+ @param name: DNS name to resolve.
+ @param timeout: Number of seconds after which to reissue the query.
+ When the last timeout expires, the query is considered failed.
+
+ @return: A L{Deferred} which fires with a three-tuple of lists of
+ L{twisted.names.dns.RRHeader} instances. The first element of the
+ tuple gives answers. The second element of the tuple gives
+ authorities. The third element of the tuple gives additional
+ information. The L{Deferred} may instead fail with one of the
+ exceptions defined in L{twisted.names.error} or with
+ C{NotImplementedError}.
+ """
+
+ def lookupSenderPolicy(
+ name: str, timeout: Sequence[int]
+ ) -> "Deferred[Tuple[RRHeader, RRHeader, RRHeader]]":
+ """
+ Perform a SPF record lookup.
+
+ @param name: DNS name to resolve.
+ @param timeout: Number of seconds after which to reissue the query.
+ When the last timeout expires, the query is considered failed.
+
+ @return: A L{Deferred} which fires with a three-tuple of lists of
+ L{twisted.names.dns.RRHeader} instances. The first element of the
+ tuple gives answers. The second element of the tuple gives
+ authorities. The third element of the tuple gives additional
+ information. The L{Deferred} may instead fail with one of the
+ exceptions defined in L{twisted.names.error} or with
+ C{NotImplementedError}.
+ """
+
+ def lookupNamingAuthorityPointer(
+ name: str, timeout: Sequence[int]
+ ) -> "Deferred[Tuple[RRHeader, RRHeader, RRHeader]]":
+ """
+ Perform a NAPTR record lookup.
+
+ @param name: DNS name to resolve.
+ @param timeout: Number of seconds after which to reissue the query.
+ When the last timeout expires, the query is considered failed.
+
+ @return: A L{Deferred} which fires with a three-tuple of lists of
+ L{twisted.names.dns.RRHeader} instances. The first element of the
+ tuple gives answers. The second element of the tuple gives
+ authorities. The third element of the tuple gives additional
+ information. The L{Deferred} may instead fail with one of the
+ exceptions defined in L{twisted.names.error} or with
+ C{NotImplementedError}.
+ """
+
+ def lookupZone(
+ name: str, timeout: Sequence[int]
+ ) -> "Deferred[Tuple[RRHeader, RRHeader, RRHeader]]":
+ """
+ Perform an AXFR record lookup.
+
+ NB This is quite different from other DNS requests. See
+ U{http://cr.yp.to/djbdns/axfr-notes.html} for more
+ information.
+
+ NB Unlike other C{lookup*} methods, the timeout here is not a
+ list of ints, it is a single int.
+
+ @param name: DNS name to resolve.
+ @param timeout: When this timeout expires, the query is
+ considered failed.
+
+ @return: A L{Deferred} which fires with a three-tuple of lists of
+ L{twisted.names.dns.RRHeader} instances.
+ The first element of the tuple gives answers.
+ The second and third elements are always empty.
+ The L{Deferred} may instead fail with one of the
+ exceptions defined in L{twisted.names.error} or with
+ C{NotImplementedError}.
+ """
+
+
+class IReactorTCP(Interface):
+ def listenTCP(
+ port: int, factory: "ServerFactory", backlog: int, interface: str
+ ) -> "IListeningPort":
+ """
+ Connects a given protocol factory to the given numeric TCP/IP port.
+
+ @param port: a port number on which to listen
+ @param factory: a L{twisted.internet.protocol.ServerFactory} instance
+ @param backlog: size of the listen queue
+ @param interface: The local IPv4 or IPv6 address to which to bind;
+ defaults to '', ie all IPv4 addresses. To bind to all IPv4 and IPv6
+ addresses, you must call this method twice.
+
+ @return: an object that provides L{IListeningPort}.
+
+ @raise CannotListenError: as defined here
+ L{twisted.internet.error.CannotListenError},
+ if it cannot listen on this port (e.g., it
+ cannot bind to the required port number)
+ """
+
+ def connectTCP(
+ host: str,
+ port: int,
+ factory: "ClientFactory",
+ timeout: float,
+ bindAddress: Optional[Tuple[str, int]],
+ ) -> IConnector:
+ """
+ Connect a TCP client.
+
+ @param host: A hostname or an IPv4 or IPv6 address literal.
+ @param port: a port number
+ @param factory: a L{twisted.internet.protocol.ClientFactory} instance
+ @param timeout: number of seconds to wait before assuming the
+ connection has failed.
+ @param bindAddress: a (host, port) tuple of local address to bind
+ to, or None.
+
+ @return: An object which provides L{IConnector}. This connector will
+ call various callbacks on the factory when a connection is
+ made, failed, or lost - see
+ L{ClientFactory<twisted.internet.protocol.ClientFactory>}
+ docs for details.
+ """
+
+
+class IReactorSSL(Interface):
+ def connectSSL(
+ host: str,
+ port: int,
+ factory: "ClientFactory",
+ contextFactory: "ClientContextFactory",
+ timeout: float,
+ bindAddress: Optional[Tuple[str, int]],
+ ) -> IConnector:
+ """
+ Connect a client Protocol to a remote SSL socket.
+
+ @param host: a host name
+ @param port: a port number
+ @param factory: a L{twisted.internet.protocol.ClientFactory} instance
+ @param contextFactory: a L{twisted.internet.ssl.ClientContextFactory} object.
+ @param timeout: number of seconds to wait before assuming the
+ connection has failed.
+ @param bindAddress: a (host, port) tuple of local address to bind to,
+ or L{None}.
+
+ @return: An object which provides L{IConnector}.
+ """
+
+ def listenSSL(
+ port: int,
+ factory: "ServerFactory",
+ contextFactory: "IOpenSSLContextFactory",
+ backlog: int,
+ interface: str,
+ ) -> "IListeningPort":
+ """
+ Connects a given protocol factory to the given numeric TCP/IP port.
+ The connection is a SSL one, using contexts created by the context
+ factory.
+
+ @param port: a port number on which to listen
+ @param factory: a L{twisted.internet.protocol.ServerFactory} instance
+ @param contextFactory: an implementor of L{IOpenSSLContextFactory}
+ @param backlog: size of the listen queue
+ @param interface: the hostname to bind to, defaults to '' (all)
+ """
+
+
+class IReactorUNIX(Interface):
+ """
+ UNIX socket methods.
+ """
+
+ def connectUNIX(
+ address: str, factory: "ClientFactory", timeout: float, checkPID: bool
+ ) -> IConnector:
+ """
+ Connect a client protocol to a UNIX socket.
+
+ @param address: a path to a unix socket on the filesystem.
+ @param factory: a L{twisted.internet.protocol.ClientFactory} instance
+ @param timeout: number of seconds to wait before assuming the connection
+ has failed.
+ @param checkPID: if True, check for a pid file to verify that a server
+ is listening. If C{address} is a Linux abstract namespace path,
+ this must be C{False}.
+
+ @return: An object which provides L{IConnector}.
+ """
+
+ def listenUNIX(
+ address: str, factory: "Factory", backlog: int, mode: int, wantPID: bool
+ ) -> "IListeningPort":
+ """
+ Listen on a UNIX socket.
+
+ @param address: a path to a unix socket on the filesystem.
+ @param factory: a L{twisted.internet.protocol.Factory} instance.
+ @param backlog: number of connections to allow in backlog.
+ @param mode: The mode (B{not} umask) to set on the unix socket. See
+ platform specific documentation for information about how this
+ might affect connection attempts.
+ @param wantPID: if True, create a pidfile for the socket. If C{address}
+ is a Linux abstract namespace path, this must be C{False}.
+
+ @return: An object which provides L{IListeningPort}.
+ """
+
+
+class IReactorUNIXDatagram(Interface):
+ """
+ Datagram UNIX socket methods.
+ """
+
+ def connectUNIXDatagram(
+ address: str,
+ protocol: "ConnectedDatagramProtocol",
+ maxPacketSize: int,
+ mode: int,
+ bindAddress: Optional[Tuple[str, int]],
+ ) -> IConnector:
+ """
+ Connect a client protocol to a datagram UNIX socket.
+
+ @param address: a path to a unix socket on the filesystem.
+ @param protocol: a L{twisted.internet.protocol.ConnectedDatagramProtocol} instance
+ @param maxPacketSize: maximum packet size to accept
+ @param mode: The mode (B{not} umask) to set on the unix socket. See
+ platform specific documentation for information about how this
+ might affect connection attempts.
+
+ @param bindAddress: address to bind to
+
+ @return: An object which provides L{IConnector}.
+ """
+
+ def listenUNIXDatagram(
+ address: str, protocol: "DatagramProtocol", maxPacketSize: int, mode: int
+ ) -> "IListeningPort":
+ """
+ Listen on a datagram UNIX socket.
+
+ @param address: a path to a unix socket on the filesystem.
+ @param protocol: a L{twisted.internet.protocol.DatagramProtocol} instance.
+ @param maxPacketSize: maximum packet size to accept
+ @param mode: The mode (B{not} umask) to set on the unix socket. See
+ platform specific documentation for information about how this
+ might affect connection attempts.
+
+ @return: An object which provides L{IListeningPort}.
+ """
+
+
+class IReactorWin32Events(Interface):
+ """
+ Win32 Event API methods
+
+ @since: 10.2
+ """
+
+ def addEvent(event: object, fd: "FileDescriptor", action: str) -> None:
+ """
+ Add a new win32 event to the event loop.
+
+ @param event: a Win32 event object created using win32event.CreateEvent()
+ @param fd: an instance of L{twisted.internet.abstract.FileDescriptor}
+ @param action: a string that is a method name of the fd instance.
+ This method is called in response to the event.
+ """
+
+ def removeEvent(event: object) -> None:
+ """
+ Remove an event.
+
+ @param event: a Win32 event object added using L{IReactorWin32Events.addEvent}
+
+ @return: None
+ """
+
+
+class IReactorUDP(Interface):
+ """
+ UDP socket methods.
+ """
+
+ def listenUDP(
+ port: int, protocol: "DatagramProtocol", interface: str, maxPacketSize: int
+ ) -> "IListeningPort":
+ """
+ Connects a given L{DatagramProtocol} to the given numeric UDP port.
+
+ @param port: A port number on which to listen.
+ @param protocol: A L{DatagramProtocol} instance which will be
+ connected to the given C{port}.
+ @param interface: The local IPv4 or IPv6 address to which to bind;
+ defaults to '', ie all IPv4 addresses.
+ @param maxPacketSize: The maximum packet size to accept.
+
+ @return: object which provides L{IListeningPort}.
+ """
+
+
+class IReactorMulticast(Interface):
+ """
+ UDP socket methods that support multicast.
+
+ IMPORTANT: This is an experimental new interface. It may change
+ without backwards compatibility. Suggestions are welcome.
+ """
+
+ def listenMulticast(
+ port: int,
+ protocol: "DatagramProtocol",
+ interface: str,
+ maxPacketSize: int,
+ listenMultiple: bool,
+ ) -> "IListeningPort":
+ """
+ Connects a given
+ L{DatagramProtocol<twisted.internet.protocol.DatagramProtocol>} to the
+ given numeric UDP port.
+
+ @param listenMultiple: If set to True, allows multiple sockets to
+ bind to the same address and port number at the same time.
+
+ @returns: An object which provides L{IListeningPort}.
+
+ @see: L{twisted.internet.interfaces.IMulticastTransport}
+ @see: U{http://twistedmatrix.com/documents/current/core/howto/udp.html}
+ """
+
+
+class IReactorSocket(Interface):
+ """
+ Methods which allow a reactor to use externally created sockets.
+
+ For example, to use C{adoptStreamPort} to implement behavior equivalent
+ to that of L{IReactorTCP.listenTCP}, you might write code like this::
+
+ from socket import SOMAXCONN, AF_INET, SOCK_STREAM, socket
+ portSocket = socket(AF_INET, SOCK_STREAM)
+ # Set FD_CLOEXEC on port, left as an exercise. Then make it into a
+ # non-blocking listening port:
+ portSocket.setblocking(False)
+ portSocket.bind(('192.168.1.2', 12345))
+ portSocket.listen(SOMAXCONN)
+
+ # Now have the reactor use it as a TCP port
+ port = reactor.adoptStreamPort(
+ portSocket.fileno(), AF_INET, YourFactory())
+
+ # portSocket itself is no longer necessary, and needs to be cleaned
+ # up by us.
+ portSocket.close()
+
+ # Whenever the server is no longer needed, stop it as usual.
+ stoppedDeferred = port.stopListening()
+
+ Another potential use is to inherit a listening descriptor from a parent
+ process (for example, systemd or launchd), or to receive one over a UNIX
+ domain socket.
+
+ Some plans for extending this interface exist. See:
+
+ - U{http://twistedmatrix.com/trac/ticket/6594}: AF_UNIX SOCK_DGRAM ports
+ """
+
+ def adoptStreamPort(
+ fileDescriptor: int, addressFamily: "AddressFamily", factory: "ServerFactory"
+ ) -> "IListeningPort":
+ """
+ Add an existing listening I{SOCK_STREAM} socket to the reactor to
+ monitor for new connections to accept and handle.
+
+ @param fileDescriptor: A file descriptor associated with a socket which
+ is already bound to an address and marked as listening. The socket
+ must be set non-blocking. Any additional flags (for example,
+ close-on-exec) must also be set by application code. Application
+ code is responsible for closing the file descriptor, which may be
+ done as soon as C{adoptStreamPort} returns.
+ @param addressFamily: The address family (or I{domain}) of the socket.
+ For example, L{socket.AF_INET6}.
+ @param factory: A L{ServerFactory} instance to use to create new
+ protocols to handle connections accepted via this socket.
+
+ @return: An object providing L{IListeningPort}.
+
+ @raise twisted.internet.error.UnsupportedAddressFamily: If the
+ given address family is not supported by this reactor, or
+ not supported with the given socket type.
+ @raise twisted.internet.error.UnsupportedSocketType: If the
+ given socket type is not supported by this reactor, or not
+ supported with the given socket type.
+ """
+
+ def adoptStreamConnection(
+ fileDescriptor: int, addressFamily: "AddressFamily", factory: "ServerFactory"
+ ) -> None:
+ """
+ Add an existing connected I{SOCK_STREAM} socket to the reactor to
+ monitor for data.
+
+ Note that the given factory won't have its C{startFactory} and
+ C{stopFactory} methods called, as there is no sensible time to call
+ them in this situation.
+
+ @param fileDescriptor: A file descriptor associated with a socket which
+ is already connected. The socket must be set non-blocking. Any
+ additional flags (for example, close-on-exec) must also be set by
+ application code. Application code is responsible for closing the
+ file descriptor, which may be done as soon as
+ C{adoptStreamConnection} returns.
+ @param addressFamily: The address family (or I{domain}) of the socket.
+ For example, L{socket.AF_INET6}.
+ @param factory: A L{ServerFactory} instance to use to create a new
+ protocol to handle the connection via this socket.
+
+ @raise UnsupportedAddressFamily: If the given address family is not
+ supported by this reactor, or not supported with the given socket
+ type.
+ @raise UnsupportedSocketType: If the given socket type is not supported
+ by this reactor, or not supported with the given socket type.
+ """
+
+ def adoptDatagramPort(
+ fileDescriptor: int,
+ addressFamily: "AddressFamily",
+ protocol: "DatagramProtocol",
+ maxPacketSize: int,
+ ) -> "IListeningPort":
+ """
+ Add an existing listening I{SOCK_DGRAM} socket to the reactor to
+ monitor for read and write readiness.
+
+ @param fileDescriptor: A file descriptor associated with a socket which
+ is already bound to an address and marked as listening. The socket
+ must be set non-blocking. Any additional flags (for example,
+ close-on-exec) must also be set by application code. Application
+ code is responsible for closing the file descriptor, which may be
+ done as soon as C{adoptDatagramPort} returns.
+ @param addressFamily: The address family or I{domain} of the socket.
+ For example, L{socket.AF_INET6}.
+ @param protocol: A L{DatagramProtocol} instance to connect to
+ a UDP transport.
+ @param maxPacketSize: The maximum packet size to accept.
+
+ @return: An object providing L{IListeningPort}.
+
+ @raise UnsupportedAddressFamily: If the given address family is not
+ supported by this reactor, or not supported with the given socket
+ type.
+ @raise UnsupportedSocketType: If the given socket type is not supported
+ by this reactor, or not supported with the given socket type.
+ """
+
+
+class IReactorProcess(Interface):
+ def spawnProcess(
+ processProtocol: "IProcessProtocol",
+ executable: Union[bytes, str],
+ args: Sequence[Union[bytes, str]],
+ env: Optional[Mapping[AnyStr, AnyStr]] = None,
+ path: Union[None, bytes, str] = None,
+ uid: Optional[int] = None,
+ gid: Optional[int] = None,
+ usePTY: bool = False,
+ childFDs: Optional[Mapping[int, Union[int, str]]] = None,
+ ) -> "IProcessTransport":
+ """
+ Spawn a process, with a process protocol.
+
+ Arguments given to this function that are listed as L{bytes} or
+ L{unicode} may be encoded or decoded depending on the platform and the
+ argument type given. On UNIX systems (Linux, FreeBSD, macOS) and
+ Python 2 on Windows, L{unicode} arguments will be encoded down to
+ L{bytes} using the encoding given by L{sys.getfilesystemencoding}, to be
+ used with the "narrow" OS APIs. On Python 3 on Windows, L{bytes}
+ arguments will be decoded up to L{unicode} using the encoding given by
+ L{sys.getfilesystemencoding} (C{utf8}) and given to Windows's native "wide" APIs.
+
+ @param processProtocol: An object which will be notified of all events
+ related to the created process.
+
+ @param executable: the file name to spawn - the full path should be
+ used.
+
+ @param args: the command line arguments to pass to the process; a
+ sequence of strings. The first string should be the executable's
+ name.
+
+ @param env: the environment variables to pass to the child process.
+ The resulting behavior varies between platforms. If:
+
+ - C{env} is not set:
+ - On POSIX: pass an empty environment.
+ - On Windows: pass L{os.environ}.
+ - C{env} is L{None}:
+ - On POSIX: pass L{os.environ}.
+ - On Windows: pass L{os.environ}.
+ - C{env} is a L{dict}:
+ - On POSIX: pass the key/value pairs in C{env} as the
+ complete environment.
+ - On Windows: update L{os.environ} with the key/value
+ pairs in the L{dict} before passing it. As a
+ consequence of U{bug #1640
+ <http://twistedmatrix.com/trac/ticket/1640>}, passing
+ keys with empty values in an effort to unset
+ environment variables I{won't} unset them.
+
+ @param path: the path to run the subprocess in - defaults to the
+ current directory.
+
+ @param uid: user ID to run the subprocess as. (Only available on POSIX
+ systems.)
+
+ @param gid: group ID to run the subprocess as. (Only available on
+ POSIX systems.)
+
+ @param usePTY: if true, run this process in a pseudo-terminal.
+ optionally a tuple of C{(masterfd, slavefd, ttyname)}, in which
+ case use those file descriptors. (Not available on all systems.)
+
+ @param childFDs: A dictionary mapping file descriptors in the new child
+ process to an integer or to the string 'r' or 'w'.
+
+ If the value is an integer, it specifies a file descriptor in the
+ parent process which will be mapped to a file descriptor (specified
+ by the key) in the child process. This is useful for things like
+ inetd and shell-like file redirection.
+
+ If it is the string 'r', a pipe will be created and attached to the
+ child at that file descriptor: the child will be able to write to
+ that file descriptor and the parent will receive read notification
+ via the L{IProcessProtocol.childDataReceived} callback. This is
+ useful for the child's stdout and stderr.
+
+ If it is the string 'w', similar setup to the previous case will
+ occur, with the pipe being readable by the child instead of
+ writeable. The parent process can write to that file descriptor
+ using L{IProcessTransport.writeToChild}. This is useful for the
+ child's stdin.
+
+ If childFDs is not passed, the default behaviour is to use a
+ mapping that opens the usual stdin/stdout/stderr pipes.
+
+ @see: L{twisted.internet.protocol.ProcessProtocol}
+
+ @return: An object which provides L{IProcessTransport}.
+
+ @raise OSError: Raised with errno C{EAGAIN} or C{ENOMEM} if there are
+ insufficient system resources to create a new process.
+ """
+
+
+class IReactorTime(Interface):
+ """
+ Time methods that a Reactor should implement.
+ """
+
+ def seconds() -> float:
+ """
+ Get the current time in seconds.
+
+ @return: A number-like object of some sort.
+ """
+
+ def callLater(
+ delay: float, callable: Callable[..., Any], *args: object, **kwargs: object
+ ) -> "IDelayedCall":
+ """
+ Call a function later.
+
+ @param delay: the number of seconds to wait.
+ @param callable: the callable object to call later.
+ @param args: the arguments to call it with.
+ @param kwargs: the keyword arguments to call it with.
+
+ @return: An object which provides L{IDelayedCall} and can be used to
+ cancel the scheduled call, by calling its C{cancel()} method.
+ It also may be rescheduled by calling its C{delay()} or
+ C{reset()} methods.
+ """
+
+ def getDelayedCalls() -> Sequence["IDelayedCall"]:
+ """
+ See L{twisted.internet.interfaces.IReactorTime.getDelayedCalls}
+ """
+
+
+class IDelayedCall(Interface):
+ """
+ A scheduled call.
+
+ There are probably other useful methods we can add to this interface;
+ suggestions are welcome.
+ """
+
+ def getTime() -> float:
+ """
+ Get time when delayed call will happen.
+
+ @return: time in seconds since epoch (a float).
+ """
+
+ def cancel() -> None:
+ """
+ Cancel the scheduled call.
+
+ @raises twisted.internet.error.AlreadyCalled: if the call has already
+ happened.
+ @raises twisted.internet.error.AlreadyCancelled: if the call has already
+ been cancelled.
+ """
+
+ def delay(secondsLater: float) -> None:
+ """
+ Delay the scheduled call.
+
+ @param secondsLater: how many seconds from its current firing time to delay
+
+ @raises twisted.internet.error.AlreadyCalled: if the call has already
+ happened.
+ @raises twisted.internet.error.AlreadyCancelled: if the call has already
+ been cancelled.
+ """
+
+ def reset(secondsFromNow: float) -> None:
+ """
+ Reset the scheduled call's timer.
+
+ @param secondsFromNow: how many seconds from now it should fire,
+ equivalent to C{.cancel()} and then doing another
+ C{reactor.callLater(secondsLater, ...)}
+
+ @raises twisted.internet.error.AlreadyCalled: if the call has already
+ happened.
+ @raises twisted.internet.error.AlreadyCancelled: if the call has already
+ been cancelled.
+ """
+
+ def active() -> bool:
+ """
+ @return: True if this call is still active, False if it has been
+ called or cancelled.
+ """
+
+
+class IReactorFromThreads(Interface):
+ """
+ This interface is the set of thread-safe methods which may be invoked on
+ the reactor from other threads.
+
+ @since: 15.4
+ """
+
+ def callFromThread(
+ callable: Callable[..., Any], *args: object, **kwargs: object
+ ) -> None:
+ """
+ Cause a function to be executed by the reactor thread.
+
+ Use this method when you want to run a function in the reactor's thread
+ from another thread. Calling L{callFromThread} should wake up the main
+ thread (where L{reactor.run() <IReactorCore.run>} is executing) and run
+ the given callable in that thread.
+
+ If you're writing a multi-threaded application the C{callable}
+ may need to be thread safe, but this method doesn't require it as such.
+ If you want to call a function in the next mainloop iteration, but
+ you're in the same thread, use L{callLater} with a delay of 0.
+ """
+
+
+class IReactorInThreads(Interface):
+ """
+ This interface contains the methods exposed by a reactor which will let you
+ run functions in another thread.
+
+ @since: 15.4
+ """
+
+ def callInThread(
+ callable: Callable[..., Any], *args: object, **kwargs: object
+ ) -> None:
+ """
+ Run the given callable object in a separate thread, with the given
+ arguments and keyword arguments.
+ """
+
+
+class IReactorThreads(IReactorFromThreads, IReactorInThreads):
+ """
+ Dispatch methods to be run in threads.
+
+ Internally, this should use a thread pool and dispatch methods to them.
+ """
+
+ def getThreadPool() -> "ThreadPool":
+ """
+ Return the threadpool used by L{IReactorInThreads.callInThread}.
+ Create it first if necessary.
+ """
+
+ def suggestThreadPoolSize(size: int) -> None:
+ """
+ Suggest the size of the internal threadpool used to dispatch functions
+ passed to L{IReactorInThreads.callInThread}.
+ """
+
+
+class IReactorCore(Interface):
+ """
+ Core methods that a Reactor must implement.
+ """
+
+ running = Attribute(
+ "A C{bool} which is C{True} from I{during startup} to "
+ "I{during shutdown} and C{False} the rest of the time."
+ )
+
+ def resolve(name: str, timeout: Sequence[int]) -> "Deferred[str]":
+ """
+ Return a L{twisted.internet.defer.Deferred} that will resolve
+ a hostname.
+ """
+
+ def run() -> None:
+ """
+ Fire 'startup' System Events, move the reactor to the 'running'
+ state, then run the main loop until it is stopped with C{stop()} or
+ C{crash()}.
+ """
+
+ def stop() -> None:
+ """
+ Fire 'shutdown' System Events, which will move the reactor to the
+ 'stopped' state and cause C{reactor.run()} to exit.
+ """
+
+ def crash() -> None:
+ """
+ Stop the main loop *immediately*, without firing any system events.
+
+ This is named as it is because this is an extremely "rude" thing to do;
+ it is possible to lose data and put your system in an inconsistent
+ state by calling this. However, it is necessary, as sometimes a system
+ can become wedged in a pre-shutdown call.
+ """
+
+ def iterate(delay: float) -> None:
+ """
+ Run the main loop's I/O polling function for a period of time.
+
+ This is most useful in applications where the UI is being drawn "as
+ fast as possible", such as games. All pending L{IDelayedCall}s will
+ be called.
+
+ The reactor must have been started (via the C{run()} method) prior to
+ any invocations of this method. It must also be stopped manually
+ after the last call to this method (via the C{stop()} method). This
+ method is not re-entrant: you must not call it recursively; in
+ particular, you must not call it while the reactor is running.
+ """
+
+ def fireSystemEvent(eventType: str) -> None:
+ """
+ Fire a system-wide event.
+
+ System-wide events are things like 'startup', 'shutdown', and
+ 'persist'.
+ """
+
+ def addSystemEventTrigger(
+ phase: str,
+ eventType: str,
+ callable: Callable[..., Any],
+ *args: object,
+ **kwargs: object,
+ ) -> Any:
+ """
+ Add a function to be called when a system event occurs.
+
+ Each "system event" in Twisted, such as 'startup', 'shutdown', and
+ 'persist', has 3 phases: 'before', 'during', and 'after' (in that
+ order, of course). These events will be fired internally by the
+ Reactor.
+
+ An implementor of this interface must only implement those events
+ described here.
+
+ Callbacks registered for the "before" phase may return either None or a
+ Deferred. The "during" phase will not execute until all of the
+ Deferreds from the "before" phase have fired.
+
+ Once the "during" phase is running, all of the remaining triggers must
+ execute; their return values must be ignored.
+
+ @param phase: a time to call the event -- either the string 'before',
+ 'after', or 'during', describing when to call it
+ relative to the event's execution.
+ @param eventType: this is a string describing the type of event.
+ @param callable: the object to call before shutdown.
+ @param args: the arguments to call it with.
+ @param kwargs: the keyword arguments to call it with.
+
+ @return: an ID that can be used to remove this call with
+ removeSystemEventTrigger.
+ """
+
+ def removeSystemEventTrigger(triggerID: Any) -> None:
+ """
+ Removes a trigger added with addSystemEventTrigger.
+
+ @param triggerID: a value returned from addSystemEventTrigger.
+
+ @raise KeyError: If there is no system event trigger for the given
+ C{triggerID}.
+ @raise ValueError: If there is no system event trigger for the given
+ C{triggerID}.
+ @raise TypeError: If there is no system event trigger for the given
+ C{triggerID}.
+ """
+
+ def callWhenRunning(
+ callable: Callable[..., Any], *args: object, **kwargs: object
+ ) -> Optional[Any]:
+ """
+ Call a function when the reactor is running.
+
+ If the reactor has not started, the callable will be scheduled
+ to run when it does start. Otherwise, the callable will be invoked
+ immediately.
+
+ @param callable: the callable object to call later.
+ @param args: the arguments to call it with.
+ @param kwargs: the keyword arguments to call it with.
+
+ @return: None if the callable was invoked, otherwise a system
+ event id for the scheduled call.
+ """
+
+
+class IReactorPluggableResolver(Interface):
+ """
+ An L{IReactorPluggableResolver} is a reactor which can be customized with
+ an L{IResolverSimple}. This is a fairly limited interface, that supports
+ only IPv4; you should use L{IReactorPluggableNameResolver} instead.
+
+ @see: L{IReactorPluggableNameResolver}
+ """
+
+ def installResolver(resolver: IResolverSimple) -> IResolverSimple:
+ """
+ Set the internal resolver to use to for name lookups.
+
+ @param resolver: The new resolver to use.
+
+ @return: The previously installed resolver.
+ """
+
+
+class IReactorPluggableNameResolver(Interface):
+ """
+ An L{IReactorPluggableNameResolver} is a reactor whose name resolver can be
+ set to a user-supplied object.
+ """
+
+ nameResolver = Attribute(
+ """
+ Read-only attribute; the resolver installed with L{installResolver}.
+ An L{IHostnameResolver}.
+ """
+ )
+
+ def installNameResolver(resolver: IHostnameResolver) -> IHostnameResolver:
+ """
+ Set the internal resolver to use for name lookups.
+
+ @param resolver: The new resolver to use.
+
+ @return: The previously installed resolver.
+ """
+
+
+class IReactorDaemonize(Interface):
+ """
+ A reactor which provides hooks that need to be called before and after
+ daemonization.
+
+ Notes:
+ - This interface SHOULD NOT be called by applications.
+ - This interface should only be implemented by reactors as a workaround
+ (in particular, it's implemented currently only by kqueue()).
+ For details please see the comments on ticket #1918.
+ """
+
+ def beforeDaemonize() -> None:
+ """
+ Hook to be called immediately before daemonization. No reactor methods
+ may be called until L{afterDaemonize} is called.
+ """
+
+ def afterDaemonize() -> None:
+ """
+ Hook to be called immediately after daemonization. This may only be
+ called after L{beforeDaemonize} had been called previously.
+ """
+
+
+class IReactorFDSet(Interface):
+ """
+ Implement me to be able to use L{IFileDescriptor} type resources.
+
+ This assumes that your main-loop uses UNIX-style numeric file descriptors
+ (or at least similarly opaque IDs returned from a .fileno() method)
+ """
+
+ def addReader(reader: "IReadDescriptor") -> None:
+ """
+ I add reader to the set of file descriptors to get read events for.
+
+ @param reader: An L{IReadDescriptor} provider that will be checked for
+ read events until it is removed from the reactor with
+ L{removeReader}.
+ """
+
+ def addWriter(writer: "IWriteDescriptor") -> None:
+ """
+ I add writer to the set of file descriptors to get write events for.
+
+ @param writer: An L{IWriteDescriptor} provider that will be checked for
+ write events until it is removed from the reactor with
+ L{removeWriter}.
+ """
+
+ def removeReader(reader: "IReadDescriptor") -> None:
+ """
+ Removes an object previously added with L{addReader}.
+ """
+
+ def removeWriter(writer: "IWriteDescriptor") -> None:
+ """
+ Removes an object previously added with L{addWriter}.
+ """
+
+ def removeAll() -> List[Union["IReadDescriptor", "IWriteDescriptor"]]:
+ """
+ Remove all readers and writers.
+
+ Should not remove reactor internal reactor connections (like a waker).
+
+ @return: A list of L{IReadDescriptor} and L{IWriteDescriptor} providers
+ which were removed.
+ """
+
+ def getReaders() -> List["IReadDescriptor"]:
+ """
+ Return the list of file descriptors currently monitored for input
+ events by the reactor.
+
+ @return: the list of file descriptors monitored for input events.
+ """
+
+ def getWriters() -> List["IWriteDescriptor"]:
+ """
+ Return the list file descriptors currently monitored for output events
+ by the reactor.
+
+ @return: the list of file descriptors monitored for output events.
+ """
+
+
+class IListeningPort(Interface):
+ """
+ A listening port.
+ """
+
+ def startListening() -> None:
+ """
+ Start listening on this port.
+
+ @raise CannotListenError: If it cannot listen on this port (e.g., it is
+ a TCP port and it cannot bind to the required
+ port number).
+ """
+
+ def stopListening() -> Optional["Deferred[None]"]:
+ """
+ Stop listening on this port.
+
+ If it does not complete immediately, will return Deferred that fires
+ upon completion.
+ """
+
+ def getHost() -> IAddress:
+ """
+ Get the host that this port is listening for.
+
+ @return: An L{IAddress} provider.
+ """
+
+
+class ILoggingContext(Interface):
+ """
+ Give context information that will be used to log events generated by
+ this item.
+ """
+
+ def logPrefix() -> str:
+ """
+ @return: Prefix used during log formatting to indicate context.
+ """
+
+
+class IFileDescriptor(ILoggingContext):
+ """
+ An interface representing a UNIX-style numeric file descriptor.
+ """
+
+ def fileno() -> object:
+ """
+ @return: The platform-specified representation of a file descriptor
+ number. Or C{-1} if the descriptor no longer has a valid file
+ descriptor number associated with it. As long as the descriptor
+ is valid, calls to this method on a particular instance must
+ return the same value.
+ """
+
+ def connectionLost(reason: Failure) -> None:
+ """
+ Called when the connection was lost.
+
+ This is called when the connection on a selectable object has been
+ lost. It will be called whether the connection was closed explicitly,
+ an exception occurred in an event handler, or the other end of the
+ connection closed it first.
+
+ See also L{IHalfCloseableDescriptor} if your descriptor wants to be
+ notified separately of the two halves of the connection being closed.
+
+ @param reason: A failure instance indicating the reason why the
+ connection was lost. L{error.ConnectionLost} and
+ L{error.ConnectionDone} are of special note, but the
+ failure may be of other classes as well.
+ """
+
+
+class IReadDescriptor(IFileDescriptor):
+ """
+ An L{IFileDescriptor} that can read.
+
+ This interface is generally used in conjunction with L{IReactorFDSet}.
+ """
+
+ def doRead() -> Optional[Failure]:
+ """
+ Some data is available for reading on your descriptor.
+
+ @return: If an error is encountered which causes the descriptor to
+ no longer be valid, a L{Failure} should be returned. Otherwise,
+ L{None}.
+ """
+
+
+class IWriteDescriptor(IFileDescriptor):
+ """
+ An L{IFileDescriptor} that can write.
+
+ This interface is generally used in conjunction with L{IReactorFDSet}.
+ """
+
+ def doWrite() -> Optional[Failure]:
+ """
+ Some data can be written to your descriptor.
+
+ @return: If an error is encountered which causes the descriptor to
+ no longer be valid, a L{Failure} should be returned. Otherwise,
+ L{None}.
+ """
+
+
+class IReadWriteDescriptor(IReadDescriptor, IWriteDescriptor):
+ """
+ An L{IFileDescriptor} that can both read and write.
+ """
+
+
+class IHalfCloseableDescriptor(Interface):
+ """
+ A descriptor that can be half-closed.
+ """
+
+ def writeConnectionLost(reason: Failure) -> None:
+ """
+ Indicates write connection was lost.
+ """
+
+ def readConnectionLost(reason: Failure) -> None:
+ """
+ Indicates read connection was lost.
+ """
+
+
+class ISystemHandle(Interface):
+ """
+ An object that wraps a networking OS-specific handle.
+ """
+
+ def getHandle() -> object:
+ """
+ Return a system- and reactor-specific handle.
+
+ This might be a socket.socket() object, or some other type of
+ object, depending on which reactor is being used. Use and
+ manipulate at your own risk.
+
+ This might be used in cases where you want to set specific
+ options not exposed by the Twisted APIs.
+ """
+
+
+class IConsumer(Interface):
+ """
+ A consumer consumes data from a producer.
+ """
+
+ def registerProducer(producer: "IProducer", streaming: bool) -> None:
+ """
+ Register to receive data from a producer.
+
+ This sets self to be a consumer for a producer. When this object runs
+ out of data (as when a send(2) call on a socket succeeds in moving the
+ last data from a userspace buffer into a kernelspace buffer), it will
+ ask the producer to resumeProducing().
+
+ For L{IPullProducer} providers, C{resumeProducing} will be called once
+ each time data is required.
+
+ For L{IPushProducer} providers, C{pauseProducing} will be called
+ whenever the write buffer fills up and C{resumeProducing} will only be
+ called when it empties. The consumer will only call C{resumeProducing}
+ to balance a previous C{pauseProducing} call; the producer is assumed
+ to start in an un-paused state.
+
+ @param streaming: C{True} if C{producer} provides L{IPushProducer},
+ C{False} if C{producer} provides L{IPullProducer}.
+
+ @raise RuntimeError: If a producer is already registered.
+ """
+
+ def unregisterProducer() -> None:
+ """
+ Stop consuming data from a producer, without disconnecting.
+ """
+
+ def write(data: bytes) -> None:
+ """
+ The producer will write data by calling this method.
+
+ The implementation must be non-blocking and perform whatever
+ buffering is necessary. If the producer has provided enough data
+ for now and it is a L{IPushProducer}, the consumer may call its
+ C{pauseProducing} method.
+ """
+
+
+class IProducer(Interface):
+ """
+ A producer produces data for a consumer.
+
+ Typically producing is done by calling the C{write} method of a class
+ implementing L{IConsumer}.
+ """
+
+ def stopProducing() -> None:
+ """
+ Stop producing data.
+
+ This tells a producer that its consumer has died, so it must stop
+ producing data for good.
+ """
+
+
+class IPushProducer(IProducer):
+ """
+ A push producer, also known as a streaming producer is expected to
+ produce (write to this consumer) data on a continuous basis, unless
+ it has been paused. A paused push producer will resume producing
+ after its C{resumeProducing()} method is called. For a push producer
+ which is not pauseable, these functions may be noops.
+ """
+
+ def pauseProducing() -> None:
+ """
+ Pause producing data.
+
+ Tells a producer that it has produced too much data to process for
+ the time being, and to stop until C{resumeProducing()} is called.
+ """
+
+ def resumeProducing() -> None:
+ """
+ Resume producing data.
+
+ This tells a producer to re-add itself to the main loop and produce
+ more data for its consumer.
+ """
+
+
+class IPullProducer(IProducer):
+ """
+ A pull producer, also known as a non-streaming producer, is
+ expected to produce data each time L{resumeProducing()} is called.
+ """
+
+ def resumeProducing() -> None:
+ """
+ Produce data for the consumer a single time.
+
+ This tells a producer to produce data for the consumer once
+ (not repeatedly, once only). Typically this will be done
+ by calling the consumer's C{write} method a single time with
+ produced data. The producer should produce data before returning
+ from C{resumeProducing()}, that is, it should not schedule a deferred
+ write.
+ """
+
+
+class IProtocol(Interface):
+ def dataReceived(data: bytes) -> None:
+ """
+ Called whenever data is received.
+
+ Use this method to translate to a higher-level message. Usually, some
+ callback will be made upon the receipt of each complete protocol
+ message.
+
+ Please keep in mind that you will probably need to buffer some data
+ as partial (or multiple) protocol messages may be received! We
+ recommend that unit tests for protocols call through to this method
+ with differing chunk sizes, down to one byte at a time.
+
+ @param data: bytes of indeterminate length
+ """
+
+ def connectionLost(reason: Failure) -> None:
+ """
+ Called when the connection is shut down.
+
+ Clear any circular references here, and any external references
+ to this Protocol. The connection has been closed. The C{reason}
+ Failure wraps a L{twisted.internet.error.ConnectionDone} or
+ L{twisted.internet.error.ConnectionLost} instance (or a subclass
+ of one of those).
+ """
+
+ def makeConnection(transport: "ITransport") -> None:
+ """
+ Make a connection to a transport and a server.
+ """
+
+ def connectionMade() -> None:
+ """
+ Called when a connection is made.
+
+ This may be considered the initializer of the protocol, because
+ it is called when the connection is completed. For clients,
+ this is called once the connection to the server has been
+ established; for servers, this is called after an accept() call
+ stops blocking and a socket has been received. If you need to
+ send any greeting or initial message, do it here.
+ """
+
+
+class IProcessProtocol(Interface):
+ """
+ Interface for process-related event handlers.
+ """
+
+ def makeConnection(process: "IProcessTransport") -> None:
+ """
+ Called when the process has been created.
+
+ @param process: An object representing the process which has been
+ created and associated with this protocol.
+ """
+
+ def childDataReceived(childFD: int, data: bytes) -> None:
+ """
+ Called when data arrives from the child process.
+
+ @param childFD: The file descriptor from which the data was
+ received.
+ @param data: The data read from the child's file descriptor.
+ """
+
+ def childConnectionLost(childFD: int) -> None:
+ """
+ Called when a file descriptor associated with the child process is
+ closed.
+
+ @param childFD: The file descriptor which was closed.
+ """
+
+ def processExited(reason: Failure) -> None:
+ """
+ Called when the child process exits.
+
+ @param reason: A failure giving the reason the child process
+ terminated. The type of exception for this failure is either
+ L{twisted.internet.error.ProcessDone} or
+ L{twisted.internet.error.ProcessTerminated}.
+
+ @since: 8.2
+ """
+
+ def processEnded(reason: Failure) -> None:
+ """
+ Called when the child process exits and all file descriptors associated
+ with it have been closed.
+
+ @param reason: A failure giving the reason the child process
+ terminated. The type of exception for this failure is either
+ L{twisted.internet.error.ProcessDone} or
+ L{twisted.internet.error.ProcessTerminated}.
+ """
+
+
+class IHalfCloseableProtocol(Interface):
+ """
+ Implemented to indicate they want notification of half-closes.
+
+ TCP supports the notion of half-closing the connection, e.g.
+ closing the write side but still not stopping reading. A protocol
+ that implements this interface will be notified of such events,
+ instead of having connectionLost called.
+ """
+
+ def readConnectionLost() -> None:
+ """
+ Notification of the read connection being closed.
+
+ This indicates peer did half-close of write side. It is now
+ the responsibility of the this protocol to call
+ loseConnection(). In addition, the protocol MUST make sure a
+ reference to it still exists (i.e. by doing a callLater with
+ one of its methods, etc.) as the reactor will only have a
+ reference to it if it is writing.
+
+ If the protocol does not do so, it might get garbage collected
+ without the connectionLost method ever being called.
+ """
+
+ def writeConnectionLost() -> None:
+ """
+ Notification of the write connection being closed.
+
+ This will never be called for TCP connections as TCP does not
+ support notification of this type of half-close.
+ """
+
+
+class IHandshakeListener(Interface):
+ """
+ An interface implemented by a L{IProtocol} to indicate that it would like
+ to be notified when TLS handshakes complete when run over a TLS-based
+ transport.
+
+ This interface is only guaranteed to be called when run over a TLS-based
+ transport: non TLS-based transports will not respect this interface.
+ """
+
+ def handshakeCompleted() -> None:
+ """
+ Notification of the TLS handshake being completed.
+
+ This notification fires when OpenSSL has completed the TLS handshake.
+ At this point the TLS connection is established, and the protocol can
+ interrogate its transport (usually an L{ISSLTransport}) for details of
+ the TLS connection.
+
+ This notification *also* fires whenever the TLS session is
+ renegotiated. As a result, protocols that have certain minimum security
+ requirements should implement this interface to ensure that they are
+ able to re-evaluate the security of the TLS session if it changes.
+ """
+
+
+class IFileDescriptorReceiver(Interface):
+ """
+ Protocols may implement L{IFileDescriptorReceiver} to receive file
+ descriptors sent to them. This is useful in conjunction with
+ L{IUNIXTransport}, which allows file descriptors to be sent between
+ processes on a single host.
+ """
+
+ def fileDescriptorReceived(descriptor: int) -> None:
+ """
+ Called when a file descriptor is received over the connection.
+
+ @param descriptor: The descriptor which was received.
+
+ @return: L{None}
+ """
+
+
+class IProtocolFactory(Interface):
+ """
+ Interface for protocol factories.
+ """
+
+ def buildProtocol(addr: IAddress) -> Optional[IProtocol]:
+ """
+ Called when a connection has been established to addr.
+
+ If None is returned, the connection is assumed to have been refused,
+ and the Port will close the connection.
+
+ @param addr: The address of the newly-established connection
+
+ @return: None if the connection was refused, otherwise an object
+ providing L{IProtocol}.
+ """
+
+ def doStart() -> None:
+ """
+ Called every time this is connected to a Port or Connector.
+ """
+
+ def doStop() -> None:
+ """
+ Called every time this is unconnected from a Port or Connector.
+ """
+
+
+class ITransport(Interface):
+ """
+ I am a transport for bytes.
+
+ I represent (and wrap) the physical connection and synchronicity
+ of the framework which is talking to the network. I make no
+ representations about whether calls to me will happen immediately
+ or require returning to a control loop, or whether they will happen
+ in the same or another thread. Consider methods of this class
+ (aside from getPeer) to be 'thrown over the wall', to happen at some
+ indeterminate time.
+ """
+
+ def write(data: bytes) -> None:
+ """
+ Write some data to the physical connection, in sequence, in a
+ non-blocking fashion.
+
+ If possible, make sure that it is all written. No data will
+ ever be lost, although (obviously) the connection may be closed
+ before it all gets through.
+
+ @param data: The data to write.
+ """
+
+ def writeSequence(data: Iterable[bytes]) -> None:
+ """
+ Write an iterable of byte strings to the physical connection.
+
+ If possible, make sure that all of the data is written to
+ the socket at once, without first copying it all into a
+ single byte string.
+
+ @param data: The data to write.
+ """
+
+ def loseConnection() -> None:
+ """
+ Close my connection, after writing all pending data.
+
+ Note that if there is a registered producer on a transport it
+ will not be closed until the producer has been unregistered.
+ """
+
+ def getPeer() -> IAddress:
+ """
+ Get the remote address of this connection.
+
+ Treat this method with caution. It is the unfortunate result of the
+ CGI and Jabber standards, but should not be considered reliable for
+ the usual host of reasons; port forwarding, proxying, firewalls, IP
+ masquerading, etc.
+
+ @return: An L{IAddress} provider.
+ """
+
+ def getHost() -> IAddress:
+ """
+ Similar to getPeer, but returns an address describing this side of the
+ connection.
+
+ @return: An L{IAddress} provider.
+ """
+
+
+class ITCPTransport(ITransport):
+ """
+ A TCP based transport.
+ """
+
+ def loseWriteConnection() -> None:
+ """
+ Half-close the write side of a TCP connection.
+
+ If the protocol instance this is attached to provides
+ IHalfCloseableProtocol, it will get notified when the operation is
+ done. When closing write connection, as with loseConnection this will
+ only happen when buffer has emptied and there is no registered
+ producer.
+ """
+
+ def abortConnection() -> None:
+ """
+ Close the connection abruptly.
+
+ Discards any buffered data, stops any registered producer,
+ and, if possible, notifies the other end of the unclean
+ closure.
+
+ @since: 11.1
+ """
+
+ def getTcpNoDelay() -> bool:
+ """
+ Return if C{TCP_NODELAY} is enabled.
+ """
+
+ def setTcpNoDelay(enabled: bool) -> None:
+ """
+ Enable/disable C{TCP_NODELAY}.
+
+ Enabling C{TCP_NODELAY} turns off Nagle's algorithm. Small packets are
+ sent sooner, possibly at the expense of overall throughput.
+ """
+
+ def getTcpKeepAlive() -> bool:
+ """
+ Return if C{SO_KEEPALIVE} is enabled.
+ """
+
+ def setTcpKeepAlive(enabled: bool) -> None:
+ """
+ Enable/disable C{SO_KEEPALIVE}.
+
+ Enabling C{SO_KEEPALIVE} sends packets periodically when the connection
+ is otherwise idle, usually once every two hours. They are intended
+ to allow detection of lost peers in a non-infinite amount of time.
+ """
+
+ def getHost() -> Union["IPv4Address", "IPv6Address"]:
+ """
+ Returns L{IPv4Address} or L{IPv6Address}.
+ """
+
+ def getPeer() -> Union["IPv4Address", "IPv6Address"]:
+ """
+ Returns L{IPv4Address} or L{IPv6Address}.
+ """
+
+
+class IUNIXTransport(ITransport):
+ """
+ Transport for stream-oriented unix domain connections.
+ """
+
+ def sendFileDescriptor(descriptor: int) -> None:
+ """
+ Send a duplicate of this (file, socket, pipe, etc) descriptor to the
+ other end of this connection.
+
+ The send is non-blocking and will be queued if it cannot be performed
+ immediately. The send will be processed in order with respect to other
+ C{sendFileDescriptor} calls on this transport, but not necessarily with
+ respect to C{write} calls on this transport. The send can only be
+ processed if there are also bytes in the normal connection-oriented send
+ buffer (ie, you must call C{write} at least as many times as you call
+ C{sendFileDescriptor}).
+
+ @param descriptor: An C{int} giving a valid file descriptor in this
+ process. Note that a I{file descriptor} may actually refer to a
+ socket, a pipe, or anything else POSIX tries to treat in the same
+ way as a file.
+ """
+
+
+class IOpenSSLServerConnectionCreator(Interface):
+ """
+ A provider of L{IOpenSSLServerConnectionCreator} can create
+ L{OpenSSL.SSL.Connection} objects for TLS servers.
+
+ @see: L{twisted.internet.ssl}
+
+ @note: Creating OpenSSL connection objects is subtle, error-prone, and
+ security-critical. Before implementing this interface yourself,
+ consider using L{twisted.internet.ssl.CertificateOptions} as your
+ C{contextFactory}. (For historical reasons, that class does not
+ actually I{implement} this interface; nevertheless it is usable in all
+ Twisted APIs which require a provider of this interface.)
+ """
+
+ def serverConnectionForTLS(
+ tlsProtocol: "TLSMemoryBIOProtocol",
+ ) -> "OpenSSLConnection":
+ """
+ Create a connection for the given server protocol.
+
+ @return: an OpenSSL connection object configured appropriately for the
+ given Twisted protocol.
+ """
+
+
+class IOpenSSLClientConnectionCreator(Interface):
+ """
+ A provider of L{IOpenSSLClientConnectionCreator} can create
+ L{OpenSSL.SSL.Connection} objects for TLS clients.
+
+ @see: L{twisted.internet.ssl}
+
+ @note: Creating OpenSSL connection objects is subtle, error-prone, and
+ security-critical. Before implementing this interface yourself,
+ consider using L{twisted.internet.ssl.optionsForClientTLS} as your
+ C{contextFactory}.
+ """
+
+ def clientConnectionForTLS(
+ tlsProtocol: "TLSMemoryBIOProtocol",
+ ) -> "OpenSSLConnection":
+ """
+ Create a connection for the given client protocol.
+
+ @param tlsProtocol: the client protocol making the request.
+
+ @return: an OpenSSL connection object configured appropriately for the
+ given Twisted protocol.
+ """
+
+
+class IProtocolNegotiationFactory(Interface):
+ """
+ A provider of L{IProtocolNegotiationFactory} can provide information about
+ the various protocols that the factory can create implementations of. This
+ can be used, for example, to provide protocol names for Next Protocol
+ Negotiation and Application Layer Protocol Negotiation.
+
+ @see: L{twisted.internet.ssl}
+ """
+
+ def acceptableProtocols() -> List[bytes]:
+ """
+ Returns a list of protocols that can be spoken by the connection
+ factory in the form of ALPN tokens, as laid out in the IANA registry
+ for ALPN tokens.
+
+ @return: a list of ALPN tokens in order of preference.
+ """
+
+
+class IOpenSSLContextFactory(Interface):
+ """
+ A provider of L{IOpenSSLContextFactory} is capable of generating
+ L{OpenSSL.SSL.Context} classes suitable for configuring TLS on a
+ connection. A provider will store enough state to be able to generate these
+ contexts as needed for individual connections.
+
+ @see: L{twisted.internet.ssl}
+ """
+
+ def getContext() -> "OpenSSLContext":
+ """
+ Returns a TLS context object, suitable for securing a TLS connection.
+ This context object will be appropriately customized for the connection
+ based on the state in this object.
+
+ @return: A TLS context object.
+ """
+
+
+class ITLSTransport(ITCPTransport):
+ """
+ A TCP transport that supports switching to TLS midstream.
+
+ Once TLS mode is started the transport will implement L{ISSLTransport}.
+ """
+
+ def startTLS(
+ contextFactory: Union[
+ IOpenSSLClientConnectionCreator, IOpenSSLServerConnectionCreator
+ ]
+ ) -> None:
+ """
+ Initiate TLS negotiation.
+
+ @param contextFactory: An object which creates appropriately configured
+ TLS connections.
+
+ For clients, use L{twisted.internet.ssl.optionsForClientTLS}; for
+ servers, use L{twisted.internet.ssl.CertificateOptions}.
+
+ @type contextFactory: L{IOpenSSLClientConnectionCreator} or
+ L{IOpenSSLServerConnectionCreator}, depending on whether this
+ L{ITLSTransport} is a server or not. If the appropriate interface
+ is not provided by the value given for C{contextFactory}, it must
+ be an implementor of L{IOpenSSLContextFactory}.
+ """
+
+
+class ISSLTransport(ITCPTransport):
+ """
+ A SSL/TLS based transport.
+ """
+
+ def getPeerCertificate() -> object:
+ """
+ Return an object with the peer's certificate info.
+ """
+
+
+class INegotiated(ISSLTransport):
+ """
+ A TLS based transport that supports using ALPN/NPN to negotiate the
+ protocol to be used inside the encrypted tunnel.
+ """
+
+ negotiatedProtocol = Attribute(
+ """
+ The protocol selected to be spoken using ALPN/NPN. The result from ALPN
+ is preferred to the result from NPN if both were used. If the remote
+ peer does not support ALPN or NPN, or neither NPN or ALPN are available
+ on this machine, will be L{None}. Otherwise, will be the name of the
+ selected protocol as C{bytes}. Note that until the handshake has
+ completed this property may incorrectly return L{None}: wait until data
+ has been received before trusting it (see
+ https://twistedmatrix.com/trac/ticket/6024).
+ """
+ )
+
+
+class ICipher(Interface):
+ """
+ A TLS cipher.
+ """
+
+ fullName = Attribute("The fully qualified name of the cipher in L{unicode}.")
+
+
+class IAcceptableCiphers(Interface):
+ """
+ A list of acceptable ciphers for a TLS context.
+ """
+
+ def selectCiphers(availableCiphers: Tuple[ICipher]) -> Tuple[ICipher]:
+ """
+ Choose which ciphers to allow to be negotiated on a TLS connection.
+
+ @param availableCiphers: A L{tuple} of L{ICipher} which gives the names
+ of all ciphers supported by the TLS implementation in use.
+
+ @return: A L{tuple} of L{ICipher} which represents the ciphers
+ which may be negotiated on the TLS connection. The result is
+ ordered by preference with more preferred ciphers appearing
+ earlier.
+ """
+
+
+class IProcessTransport(ITransport):
+ """
+ A process transport.
+ """
+
+ pid = Attribute(
+ "From before L{IProcessProtocol.makeConnection} is called to before "
+ "L{IProcessProtocol.processEnded} is called, C{pid} is an L{int} "
+ "giving the platform process ID of this process. C{pid} is L{None} "
+ "at all other times."
+ )
+
+ def closeStdin() -> None:
+ """
+ Close stdin after all data has been written out.
+ """
+
+ def closeStdout() -> None:
+ """
+ Close stdout.
+ """
+
+ def closeStderr() -> None:
+ """
+ Close stderr.
+ """
+
+ def closeChildFD(descriptor: int) -> None:
+ """
+ Close a file descriptor which is connected to the child process, identified
+ by its FD in the child process.
+ """
+
+ def writeToChild(childFD: int, data: bytes) -> None:
+ """
+ Similar to L{ITransport.write} but also allows the file descriptor in
+ the child process which will receive the bytes to be specified.
+
+ @param childFD: The file descriptor to which to write.
+ @param data: The bytes to write.
+
+ @raise KeyError: If C{childFD} is not a file descriptor that was mapped
+ in the child when L{IReactorProcess.spawnProcess} was used to create
+ it.
+ """
+
+ def loseConnection() -> None:
+ """
+ Close stdin, stderr and stdout.
+ """
+
+ def signalProcess(signalID: Union[str, int]) -> None:
+ """
+ Send a signal to the process.
+
+ @param signalID: can be
+ - one of C{"KILL"}, C{"TERM"}, or C{"INT"}.
+ These will be implemented in a
+ cross-platform manner, and so should be used
+ if possible.
+ - an integer, where it represents a POSIX
+ signal ID.
+
+ @raise twisted.internet.error.ProcessExitedAlready: If the process has
+ already exited.
+ @raise OSError: If the C{os.kill} call fails with an errno different
+ from C{ESRCH}.
+ """
+
+
+class IServiceCollection(Interface):
+ """
+ An object which provides access to a collection of services.
+ """
+
+ def getServiceNamed(serviceName: str) -> object:
+ """
+ Retrieve the named service from this application.
+
+ Raise a C{KeyError} if there is no such service name.
+ """
+
+ def addService(service: object) -> None:
+ """
+ Add a service to this collection.
+ """
+
+ def removeService(service: object) -> None:
+ """
+ Remove a service from this collection.
+ """
+
+
+class IUDPTransport(Interface):
+ """
+ Transport for UDP DatagramProtocols.
+ """
+
+ def write(packet: bytes, addr: Optional[Tuple[str, int]]) -> None:
+ """
+ Write packet to given address.
+
+ @param addr: a tuple of (ip, port). For connected transports must
+ be the address the transport is connected to, or None.
+ In non-connected mode this is mandatory.
+
+ @raise twisted.internet.error.MessageLengthError: C{packet} was too
+ long.
+ """
+
+ def connect(host: str, port: int) -> None:
+ """
+ Connect the transport to an address.
+
+ This changes it to connected mode. Datagrams can only be sent to
+ this address, and will only be received from this address. In addition
+ the protocol's connectionRefused method might get called if destination
+ is not receiving datagrams.
+
+ @param host: an IP address, not a domain name ('127.0.0.1', not 'localhost')
+ @param port: port to connect to.
+ """
+
+ def getHost() -> Union["IPv4Address", "IPv6Address"]:
+ """
+ Get this port's host address.
+
+ @return: an address describing the listening port.
+ """
+
+ def stopListening() -> Optional["Deferred[None]"]:
+ """
+ Stop listening on this port.
+
+ If it does not complete immediately, will return L{Deferred} that fires
+ upon completion.
+ """
+
+ def setBroadcastAllowed(enabled: bool) -> None:
+ """
+ Set whether this port may broadcast.
+
+ @param enabled: Whether the port may broadcast.
+ """
+
+ def getBroadcastAllowed() -> bool:
+ """
+ Checks if broadcast is currently allowed on this port.
+
+ @return: Whether this port may broadcast.
+ """
+
+
+class IUNIXDatagramTransport(Interface):
+ """
+ Transport for UDP PacketProtocols.
+ """
+
+ def write(packet: bytes, addr: str) -> None:
+ """
+ Write packet to given address.
+ """
+
+ def getHost() -> "UNIXAddress":
+ """
+ Returns L{UNIXAddress}.
+ """
+
+
+class IUNIXDatagramConnectedTransport(Interface):
+ """
+ Transport for UDP ConnectedPacketProtocols.
+ """
+
+ def write(packet: bytes) -> None:
+ """
+ Write packet to address we are connected to.
+ """
+
+ def getHost() -> "UNIXAddress":
+ """
+ Returns L{UNIXAddress}.
+ """
+
+ def getPeer() -> "UNIXAddress":
+ """
+ Returns L{UNIXAddress}.
+ """
+
+
+class IMulticastTransport(Interface):
+ """
+ Additional functionality for multicast UDP.
+ """
+
+ def getOutgoingInterface() -> str:
+ """
+ Return interface of outgoing multicast packets.
+ """
+
+ def setOutgoingInterface(addr: str) -> None:
+ """
+ Set interface for outgoing multicast packets.
+
+ Returns Deferred of success.
+ """
+
+ def getLoopbackMode() -> bool:
+ """
+ Return if loopback mode is enabled.
+ """
+
+ def setLoopbackMode(mode: bool) -> None:
+ """
+ Set if loopback mode is enabled.
+ """
+
+ def getTTL() -> int:
+ """
+ Get time to live for multicast packets.
+ """
+
+ def setTTL(ttl: int) -> None:
+ """
+ Set time to live on multicast packets.
+ """
+
+ def joinGroup(addr: str, interface: str) -> "Deferred[None]":
+ """
+ Join a multicast group. Returns L{Deferred} of success or failure.
+
+ If an error occurs, the returned L{Deferred} will fail with
+ L{error.MulticastJoinError}.
+ """
+
+ def leaveGroup(addr: str, interface: str) -> "Deferred[None]":
+ """
+ Leave multicast group, return L{Deferred} of success.
+ """
+
+
+class IStreamClientEndpoint(Interface):
+ """
+ A stream client endpoint is a place that L{ClientFactory} can connect to.
+ For example, a remote TCP host/port pair would be a TCP client endpoint.
+
+ @since: 10.1
+ """
+
+ def connect(protocolFactory: IProtocolFactory) -> "Deferred[IProtocol]":
+ """
+ Connect the C{protocolFactory} to the location specified by this
+ L{IStreamClientEndpoint} provider.
+
+ @param protocolFactory: A provider of L{IProtocolFactory}
+
+ @return: A L{Deferred} that results in an L{IProtocol} upon successful
+ connection otherwise a L{Failure} wrapping L{ConnectError} or
+ L{NoProtocol <twisted.internet.error.NoProtocol>}.
+ """
+
+
+class IStreamServerEndpoint(Interface):
+ """
+ A stream server endpoint is a place that a L{Factory} can listen for
+ incoming connections.
+
+ @since: 10.1
+ """
+
+ def listen(protocolFactory: IProtocolFactory) -> "Deferred[IListeningPort]":
+ """
+ Listen with C{protocolFactory} at the location specified by this
+ L{IStreamServerEndpoint} provider.
+
+ @param protocolFactory: A provider of L{IProtocolFactory}
+
+ @return: A L{Deferred} that results in an L{IListeningPort} or an
+ L{CannotListenError}
+ """
+
+
+class IStreamServerEndpointStringParser(Interface):
+ """
+ An L{IStreamServerEndpointStringParser} is like an
+ L{IStreamClientEndpointStringParserWithReactor}, except for
+ L{IStreamServerEndpoint}s instead of clients. It integrates with
+ L{endpoints.serverFromString} in much the same way.
+ """
+
+ prefix = Attribute(
+ """
+ A C{str}, the description prefix to respond to. For example, an
+ L{IStreamServerEndpointStringParser} plugin which had C{"foo"} for its
+ C{prefix} attribute would be called for endpoint descriptions like
+ C{"foo:bar:baz"} or C{"foo:"}.
+ """
+ )
+
+ def parseStreamServer(
+ reactor: IReactorCore, *args: object, **kwargs: object
+ ) -> IStreamServerEndpoint:
+ """
+ Parse a stream server endpoint from a reactor and string-only arguments
+ and keyword arguments.
+
+ @see: L{IStreamClientEndpointStringParserWithReactor.parseStreamClient}
+
+ @return: a stream server endpoint
+ """
+
+
+class IStreamClientEndpointStringParserWithReactor(Interface):
+ """
+ An L{IStreamClientEndpointStringParserWithReactor} is a parser which can
+ convert a set of string C{*args} and C{**kwargs} into an
+ L{IStreamClientEndpoint} provider.
+
+ This interface is really only useful in the context of the plugin system
+ for L{endpoints.clientFromString}. See the document entitled "I{The
+ Twisted Plugin System}" for more details on how to write a plugin.
+
+ If you place an L{IStreamClientEndpointStringParserWithReactor} plugin in
+ the C{twisted.plugins} package, that plugin's C{parseStreamClient} method
+ will be used to produce endpoints for any description string that begins
+ with the result of that L{IStreamClientEndpointStringParserWithReactor}'s
+ prefix attribute.
+ """
+
+ prefix = Attribute(
+ """
+ L{bytes}, the description prefix to respond to. For example, an
+ L{IStreamClientEndpointStringParserWithReactor} plugin which had
+ C{b"foo"} for its C{prefix} attribute would be called for endpoint
+ descriptions like C{b"foo:bar:baz"} or C{b"foo:"}.
+ """
+ )
+
+ def parseStreamClient(
+ reactor: IReactorCore, *args: object, **kwargs: object
+ ) -> IStreamClientEndpoint:
+ """
+ This method is invoked by L{endpoints.clientFromString}, if the type of
+ endpoint matches the return value from this
+ L{IStreamClientEndpointStringParserWithReactor}'s C{prefix} method.
+
+ @param reactor: The reactor passed to L{endpoints.clientFromString}.
+ @param args: The byte string arguments, minus the endpoint type, in the
+ endpoint description string, parsed according to the rules
+ described in L{endpoints.quoteStringArgument}. For example, if the
+ description were C{b"my-type:foo:bar:baz=qux"}, C{args} would be
+ C{(b'foo', b'bar')}
+ @param kwargs: The byte string arguments from the endpoint description
+ passed as keyword arguments. For example, if the description were
+ C{b"my-type:foo:bar:baz=qux"}, C{kwargs} would be
+ C{dict(baz=b'qux')}.
+
+ @return: a client endpoint
+ """
+
+
+class _ISupportsExitSignalCapturing(Interface):
+ """
+ An implementor of L{_ISupportsExitSignalCapturing} will capture the
+ value of any delivered exit signal (SIGINT, SIGTERM, SIGBREAK) for which
+ it has installed a handler. The caught signal number is made available in
+ the _exitSignal attribute.
+ """
+
+ _exitSignal = Attribute(
+ """
+ C{int} or C{None}, the integer exit signal delivered to the
+ application, or None if no signal was delivered.
+ """
+ )
diff --git a/contrib/python/Twisted/py3/twisted/internet/iocpreactor/__init__.py b/contrib/python/Twisted/py3/twisted/internet/iocpreactor/__init__.py
new file mode 100644
index 0000000000..d1881d4fe3
--- /dev/null
+++ b/contrib/python/Twisted/py3/twisted/internet/iocpreactor/__init__.py
@@ -0,0 +1,10 @@
+# Copyright (c) Twisted Matrix Laboratories.
+# See LICENSE for details.
+
+"""
+I/O Completion Ports reactor
+"""
+
+from twisted.internet.iocpreactor.reactor import install
+
+__all__ = ["install"]
diff --git a/contrib/python/Twisted/py3/twisted/internet/iocpreactor/abstract.py b/contrib/python/Twisted/py3/twisted/internet/iocpreactor/abstract.py
new file mode 100644
index 0000000000..818c86068d
--- /dev/null
+++ b/contrib/python/Twisted/py3/twisted/internet/iocpreactor/abstract.py
@@ -0,0 +1,387 @@
+# Copyright (c) Twisted Matrix Laboratories.
+# See LICENSE for details.
+
+"""
+Abstract file handle class
+"""
+
+import errno
+
+from zope.interface import implementer
+
+from twisted.internet import error, interfaces, main
+from twisted.internet.abstract import _ConsumerMixin, _dataMustBeBytes, _LogOwner
+from twisted.internet.iocpreactor import iocpsupport as _iocp
+from twisted.internet.iocpreactor.const import ERROR_HANDLE_EOF, ERROR_IO_PENDING
+from twisted.python import failure
+
+
+@implementer(
+ interfaces.IPushProducer,
+ interfaces.IConsumer,
+ interfaces.ITransport,
+ interfaces.IHalfCloseableDescriptor,
+)
+class FileHandle(_ConsumerMixin, _LogOwner):
+ """
+ File handle that can read and write asynchronously
+ """
+
+ # read stuff
+ maxReadBuffers = 16
+ readBufferSize = 4096
+ reading = False
+ dynamicReadBuffers = True # set this to false if subclass doesn't do iovecs
+ _readNextBuffer = 0
+ _readSize = 0 # how much data we have in the read buffer
+ _readScheduled = None
+ _readScheduledInOS = False
+
+ def startReading(self):
+ self.reactor.addActiveHandle(self)
+ if not self._readScheduled and not self.reading:
+ self.reading = True
+ self._readScheduled = self.reactor.callLater(0, self._resumeReading)
+
+ def stopReading(self):
+ if self._readScheduled:
+ self._readScheduled.cancel()
+ self._readScheduled = None
+ self.reading = False
+
+ def _resumeReading(self):
+ self._readScheduled = None
+ if self._dispatchData() and not self._readScheduledInOS:
+ self.doRead()
+
+ def _dispatchData(self):
+ """
+ Dispatch previously read data. Return True if self.reading and we don't
+ have any more data
+ """
+ if not self._readSize:
+ return self.reading
+ size = self._readSize
+ full_buffers = size // self.readBufferSize
+ while self._readNextBuffer < full_buffers:
+ self.dataReceived(self._readBuffers[self._readNextBuffer])
+ self._readNextBuffer += 1
+ if not self.reading:
+ return False
+ remainder = size % self.readBufferSize
+ if remainder:
+ self.dataReceived(self._readBuffers[full_buffers][0:remainder])
+ if self.dynamicReadBuffers:
+ total_buffer_size = self.readBufferSize * len(self._readBuffers)
+ # we have one buffer too many
+ if size < total_buffer_size - self.readBufferSize:
+ del self._readBuffers[-1]
+ # we filled all buffers, so allocate one more
+ elif (
+ size == total_buffer_size
+ and len(self._readBuffers) < self.maxReadBuffers
+ ):
+ self._readBuffers.append(bytearray(self.readBufferSize))
+ self._readNextBuffer = 0
+ self._readSize = 0
+ return self.reading
+
+ def _cbRead(self, rc, data, evt):
+ self._readScheduledInOS = False
+ if self._handleRead(rc, data, evt):
+ self.doRead()
+
+ def _handleRead(self, rc, data, evt):
+ """
+ Returns False if we should stop reading for now
+ """
+ if self.disconnected:
+ return False
+ # graceful disconnection
+ if (not (rc or data)) or rc in (errno.WSAEDISCON, ERROR_HANDLE_EOF):
+ self.reactor.removeActiveHandle(self)
+ self.readConnectionLost(failure.Failure(main.CONNECTION_DONE))
+ return False
+ # XXX: not handling WSAEWOULDBLOCK
+ # ("too many outstanding overlapped I/O requests")
+ elif rc:
+ self.connectionLost(
+ failure.Failure(
+ error.ConnectionLost(
+ "read error -- %s (%s)"
+ % (errno.errorcode.get(rc, "unknown"), rc)
+ )
+ )
+ )
+ return False
+ else:
+ assert self._readSize == 0
+ assert self._readNextBuffer == 0
+ self._readSize = data
+ return self._dispatchData()
+
+ def doRead(self):
+ evt = _iocp.Event(self._cbRead, self)
+
+ evt.buff = buff = self._readBuffers
+ rc, numBytesRead = self.readFromHandle(buff, evt)
+
+ if not rc or rc == ERROR_IO_PENDING:
+ self._readScheduledInOS = True
+ else:
+ self._handleRead(rc, numBytesRead, evt)
+
+ def readFromHandle(self, bufflist, evt):
+ raise NotImplementedError() # TODO: this should default to ReadFile
+
+ def dataReceived(self, data):
+ raise NotImplementedError
+
+ def readConnectionLost(self, reason):
+ self.connectionLost(reason)
+
+ # write stuff
+ dataBuffer = b""
+ offset = 0
+ writing = False
+ _writeScheduled = None
+ _writeDisconnecting = False
+ _writeDisconnected = False
+ writeBufferSize = 2**2**2**2
+
+ def loseWriteConnection(self):
+ self._writeDisconnecting = True
+ self.startWriting()
+
+ def _closeWriteConnection(self):
+ # override in subclasses
+ pass
+
+ def writeConnectionLost(self, reason):
+ # in current code should never be called
+ self.connectionLost(reason)
+
+ def startWriting(self):
+ self.reactor.addActiveHandle(self)
+
+ if not self._writeScheduled and not self.writing:
+ self.writing = True
+ self._writeScheduled = self.reactor.callLater(0, self._resumeWriting)
+
+ def stopWriting(self):
+ if self._writeScheduled:
+ self._writeScheduled.cancel()
+ self._writeScheduled = None
+ self.writing = False
+
+ def _resumeWriting(self):
+ self._writeScheduled = None
+ self.doWrite()
+
+ def _cbWrite(self, rc, numBytesWritten, evt):
+ if self._handleWrite(rc, numBytesWritten, evt):
+ self.doWrite()
+
+ def _handleWrite(self, rc, numBytesWritten, evt):
+ """
+ Returns false if we should stop writing for now
+ """
+ if self.disconnected or self._writeDisconnected:
+ return False
+ # XXX: not handling WSAEWOULDBLOCK
+ # ("too many outstanding overlapped I/O requests")
+ if rc:
+ self.connectionLost(
+ failure.Failure(
+ error.ConnectionLost(
+ "write error -- %s (%s)"
+ % (errno.errorcode.get(rc, "unknown"), rc)
+ )
+ )
+ )
+ return False
+ else:
+ self.offset += numBytesWritten
+ # If there is nothing left to send,
+ if self.offset == len(self.dataBuffer) and not self._tempDataLen:
+ self.dataBuffer = b""
+ self.offset = 0
+ # stop writing
+ self.stopWriting()
+ # If I've got a producer who is supposed to supply me with data
+ if self.producer is not None and (
+ (not self.streamingProducer) or self.producerPaused
+ ):
+ # tell them to supply some more.
+ self.producerPaused = True
+ self.producer.resumeProducing()
+ elif self.disconnecting:
+ # But if I was previously asked to let the connection die,
+ # do so.
+ self.connectionLost(failure.Failure(main.CONNECTION_DONE))
+ elif self._writeDisconnecting:
+ # I was previously asked to half-close the connection.
+ self._writeDisconnected = True
+ self._closeWriteConnection()
+ return False
+ else:
+ return True
+
+ def doWrite(self):
+ if len(self.dataBuffer) - self.offset < self.SEND_LIMIT:
+ # If there is currently less than SEND_LIMIT bytes left to send
+ # in the string, extend it with the array data.
+ self.dataBuffer = self.dataBuffer[self.offset :] + b"".join(
+ self._tempDataBuffer
+ )
+ self.offset = 0
+ self._tempDataBuffer = []
+ self._tempDataLen = 0
+
+ evt = _iocp.Event(self._cbWrite, self)
+
+ # Send as much data as you can.
+ if self.offset:
+ sendView = memoryview(self.dataBuffer)
+ evt.buff = buff = sendView[self.offset :]
+ else:
+ evt.buff = buff = self.dataBuffer
+ rc, data = self.writeToHandle(buff, evt)
+ if rc and rc != ERROR_IO_PENDING:
+ self._handleWrite(rc, data, evt)
+
+ def writeToHandle(self, buff, evt):
+ raise NotImplementedError() # TODO: this should default to WriteFile
+
+ def write(self, data):
+ """Reliably write some data.
+
+ The data is buffered until his file descriptor is ready for writing.
+ """
+ _dataMustBeBytes(data)
+ if not self.connected or self._writeDisconnected:
+ return
+ if data:
+ self._tempDataBuffer.append(data)
+ self._tempDataLen += len(data)
+ if self.producer is not None and self.streamingProducer:
+ if len(self.dataBuffer) + self._tempDataLen > self.writeBufferSize:
+ self.producerPaused = True
+ self.producer.pauseProducing()
+ self.startWriting()
+
+ def writeSequence(self, iovec):
+ for i in iovec:
+ _dataMustBeBytes(i)
+ if not self.connected or not iovec or self._writeDisconnected:
+ return
+ self._tempDataBuffer.extend(iovec)
+ for i in iovec:
+ self._tempDataLen += len(i)
+ if self.producer is not None and self.streamingProducer:
+ if len(self.dataBuffer) + self._tempDataLen > self.writeBufferSize:
+ self.producerPaused = True
+ self.producer.pauseProducing()
+ self.startWriting()
+
+ # general stuff
+ connected = False
+ disconnected = False
+ disconnecting = False
+ logstr = "Uninitialized"
+
+ SEND_LIMIT = 128 * 1024
+
+ def __init__(self, reactor=None):
+ if not reactor:
+ from twisted.internet import reactor
+ self.reactor = reactor
+ self._tempDataBuffer = [] # will be added to dataBuffer in doWrite
+ self._tempDataLen = 0
+ self._readBuffers = [bytearray(self.readBufferSize)]
+
+ def connectionLost(self, reason):
+ """
+ The connection was lost.
+
+ This is called when the connection on a selectable object has been
+ lost. It will be called whether the connection was closed explicitly,
+ an exception occurred in an event handler, or the other end of the
+ connection closed it first.
+
+ Clean up state here, but make sure to call back up to FileDescriptor.
+ """
+
+ self.disconnected = True
+ self.connected = False
+ if self.producer is not None:
+ self.producer.stopProducing()
+ self.producer = None
+ self.stopReading()
+ self.stopWriting()
+ self.reactor.removeActiveHandle(self)
+
+ def getFileHandle(self):
+ return -1
+
+ def loseConnection(self, _connDone=failure.Failure(main.CONNECTION_DONE)):
+ """
+ Close the connection at the next available opportunity.
+
+ Call this to cause this FileDescriptor to lose its connection. It will
+ first write any data that it has buffered.
+
+ If there is data buffered yet to be written, this method will cause the
+ transport to lose its connection as soon as it's done flushing its
+ write buffer. If you have a producer registered, the connection won't
+ be closed until the producer is finished. Therefore, make sure you
+ unregister your producer when it's finished, or the connection will
+ never close.
+ """
+
+ if self.connected and not self.disconnecting:
+ if self._writeDisconnected:
+ # doWrite won't trigger the connection close anymore
+ self.stopReading()
+ self.stopWriting
+ self.connectionLost(_connDone)
+ else:
+ self.stopReading()
+ self.startWriting()
+ self.disconnecting = 1
+
+ # Producer/consumer implementation
+
+ def stopConsuming(self):
+ """
+ Stop consuming data.
+
+ This is called when a producer has lost its connection, to tell the
+ consumer to go lose its connection (and break potential circular
+ references).
+ """
+ self.unregisterProducer()
+ self.loseConnection()
+
+ # producer interface implementation
+
+ def resumeProducing(self):
+ if self.connected and not self.disconnecting:
+ self.startReading()
+
+ def pauseProducing(self):
+ self.stopReading()
+
+ def stopProducing(self):
+ self.loseConnection()
+
+ def getHost(self):
+ # ITransport.getHost
+ raise NotImplementedError()
+
+ def getPeer(self):
+ # ITransport.getPeer
+ raise NotImplementedError()
+
+
+__all__ = ["FileHandle"]
diff --git a/contrib/python/Twisted/py3/twisted/internet/iocpreactor/const.py b/contrib/python/Twisted/py3/twisted/internet/iocpreactor/const.py
new file mode 100644
index 0000000000..4814425af9
--- /dev/null
+++ b/contrib/python/Twisted/py3/twisted/internet/iocpreactor/const.py
@@ -0,0 +1,25 @@
+# Copyright (c) Twisted Matrix Laboratories.
+# See LICENSE for details.
+
+
+"""
+Windows constants for IOCP
+"""
+
+
+# this stuff should really be gotten from Windows headers via pyrex, but it
+# probably is not going to change
+
+ERROR_PORT_UNREACHABLE = 1234
+ERROR_NETWORK_UNREACHABLE = 1231
+ERROR_CONNECTION_REFUSED = 1225
+ERROR_IO_PENDING = 997
+ERROR_OPERATION_ABORTED = 995
+WAIT_TIMEOUT = 258
+ERROR_NETNAME_DELETED = 64
+ERROR_HANDLE_EOF = 38
+
+INFINITE = -1
+
+SO_UPDATE_CONNECT_CONTEXT = 0x7010
+SO_UPDATE_ACCEPT_CONTEXT = 0x700B
diff --git a/contrib/python/Twisted/py3/twisted/internet/iocpreactor/interfaces.py b/contrib/python/Twisted/py3/twisted/internet/iocpreactor/interfaces.py
new file mode 100644
index 0000000000..b161341efa
--- /dev/null
+++ b/contrib/python/Twisted/py3/twisted/internet/iocpreactor/interfaces.py
@@ -0,0 +1,42 @@
+# Copyright (c) Twisted Matrix Laboratories.
+# See LICENSE for details.
+
+
+"""
+Interfaces for iocpreactor
+"""
+
+
+from zope.interface import Interface
+
+
+class IReadHandle(Interface):
+ def readFromHandle(bufflist, evt):
+ """
+ Read into the given buffers from this handle.
+
+ @param bufflist: the buffers to read into
+ @type bufflist: list of objects implementing the read/write buffer protocol
+
+ @param evt: an IOCP Event object
+
+ @return: tuple (return code, number of bytes read)
+ """
+
+
+class IWriteHandle(Interface):
+ def writeToHandle(buff, evt):
+ """
+ Write the given buffer to this handle.
+
+ @param buff: the buffer to write
+ @type buff: any object implementing the buffer protocol
+
+ @param evt: an IOCP Event object
+
+ @return: tuple (return code, number of bytes written)
+ """
+
+
+class IReadWriteHandle(IReadHandle, IWriteHandle):
+ pass
diff --git a/contrib/python/Twisted/py3/twisted/internet/iocpreactor/iocpsupport.py b/contrib/python/Twisted/py3/twisted/internet/iocpreactor/iocpsupport.py
new file mode 100644
index 0000000000..826c976487
--- /dev/null
+++ b/contrib/python/Twisted/py3/twisted/internet/iocpreactor/iocpsupport.py
@@ -0,0 +1,27 @@
+__all__ = [
+ "CompletionPort",
+ "Event",
+ "accept",
+ "connect",
+ "get_accept_addrs",
+ "have_connectex",
+ "makesockaddr",
+ "maxAddrLen",
+ "recv",
+ "recvfrom",
+ "send",
+]
+
+from twisted_iocpsupport.iocpsupport import ( # type: ignore[import]
+ CompletionPort,
+ Event,
+ accept,
+ connect,
+ get_accept_addrs,
+ have_connectex,
+ makesockaddr,
+ maxAddrLen,
+ recv,
+ recvfrom,
+ send,
+)
diff --git a/contrib/python/Twisted/py3/twisted/internet/iocpreactor/notes.txt b/contrib/python/Twisted/py3/twisted/internet/iocpreactor/notes.txt
new file mode 100644
index 0000000000..4caffb882f
--- /dev/null
+++ b/contrib/python/Twisted/py3/twisted/internet/iocpreactor/notes.txt
@@ -0,0 +1,24 @@
+test specifically:
+failed accept error message -- similar to test_tcp_internals
+immediate success on accept/connect/recv, including Event.ignore
+parametrize iocpsupport somehow -- via reactor?
+
+do:
+break handling -- WaitForSingleObject on the IOCP handle?
+iovecs for write buffer
+do not wait for a mainloop iteration if resumeProducing (in _handleWrite) does startWriting
+don't addActiveHandle in every call to startWriting/startReading
+iocpified process support
+ win32er-in-a-thread (or run GQCS in a thread -- it can't receive SIGBREAK)
+blocking in sendto() -- I think Windows can do that, especially with local UDP
+
+buildbot:
+run in vmware
+start from a persistent snapshot
+
+use a stub inside the vm to svnup/run tests/collect stdio
+lift logs through SMB? or ship them via tcp beams to the VM host
+
+have a timeout on the test run
+if we time out, take a screenshot, save it, kill the VM
+
diff --git a/contrib/python/Twisted/py3/twisted/internet/iocpreactor/reactor.py b/contrib/python/Twisted/py3/twisted/internet/iocpreactor/reactor.py
new file mode 100644
index 0000000000..e9c3716219
--- /dev/null
+++ b/contrib/python/Twisted/py3/twisted/internet/iocpreactor/reactor.py
@@ -0,0 +1,285 @@
+# -*- test-case-name: twisted.internet.test.test_iocp -*-
+# Copyright (c) Twisted Matrix Laboratories.
+# See LICENSE for details.
+
+"""
+Reactor that uses IO completion ports
+"""
+
+
+import socket
+import sys
+import warnings
+from typing import Tuple, Type
+
+from zope.interface import implementer
+
+from twisted.internet import base, error, interfaces, main
+from twisted.internet._dumbwin32proc import Process
+from twisted.internet.iocpreactor import iocpsupport as _iocp, tcp, udp
+from twisted.internet.iocpreactor.const import WAIT_TIMEOUT
+from twisted.internet.win32eventreactor import _ThreadedWin32EventsMixin
+from twisted.python import failure, log
+
+try:
+ from twisted.protocols.tls import TLSMemoryBIOFactory as _TLSMemoryBIOFactory
+except ImportError:
+ TLSMemoryBIOFactory = None
+ # Either pyOpenSSL isn't installed, or it is too old for this code to work.
+ # The reactor won't provide IReactorSSL.
+ _extraInterfaces: Tuple[Type[interfaces.IReactorSSL], ...] = ()
+ warnings.warn(
+ "pyOpenSSL 0.10 or newer is required for SSL support in iocpreactor. "
+ "It is missing, so the reactor will not support SSL APIs."
+ )
+else:
+ TLSMemoryBIOFactory = _TLSMemoryBIOFactory
+ _extraInterfaces = (interfaces.IReactorSSL,)
+
+MAX_TIMEOUT = 2000 # 2 seconds, see doIteration for explanation
+
+EVENTS_PER_LOOP = 1000 # XXX: what's a good value here?
+
+# keys to associate with normal and waker events
+KEY_NORMAL, KEY_WAKEUP = range(2)
+
+_NO_GETHANDLE = error.ConnectionFdescWentAway("Handler has no getFileHandle method")
+_NO_FILEDESC = error.ConnectionFdescWentAway("Filedescriptor went away")
+
+
+@implementer(
+ interfaces.IReactorTCP,
+ interfaces.IReactorUDP,
+ interfaces.IReactorMulticast,
+ interfaces.IReactorProcess,
+ *_extraInterfaces,
+)
+class IOCPReactor(base.ReactorBase, _ThreadedWin32EventsMixin):
+ port = None
+
+ def __init__(self):
+ base.ReactorBase.__init__(self)
+ self.port = _iocp.CompletionPort()
+ self.handles = set()
+
+ def addActiveHandle(self, handle):
+ self.handles.add(handle)
+
+ def removeActiveHandle(self, handle):
+ self.handles.discard(handle)
+
+ def doIteration(self, timeout):
+ """
+ Poll the IO completion port for new events.
+ """
+ # This function sits and waits for an IO completion event.
+ #
+ # There are two requirements: process IO events as soon as they arrive
+ # and process ctrl-break from the user in a reasonable amount of time.
+ #
+ # There are three kinds of waiting.
+ # 1) GetQueuedCompletionStatus (self.port.getEvent) to wait for IO
+ # events only.
+ # 2) Msg* family of wait functions that can stop waiting when
+ # ctrl-break is detected (then, I think, Python converts it into a
+ # KeyboardInterrupt)
+ # 3) *Ex family of wait functions that put the thread into an
+ # "alertable" wait state which is supposedly triggered by IO completion
+ #
+ # 2) and 3) can be combined. Trouble is, my IO completion is not
+ # causing 3) to trigger, possibly because I do not use an IO completion
+ # callback. Windows is weird.
+ # There are two ways to handle this. I could use MsgWaitForSingleObject
+ # here and GetQueuedCompletionStatus in a thread. Or I could poll with
+ # a reasonable interval. Guess what! Threads are hard.
+
+ processed_events = 0
+ if timeout is None:
+ timeout = MAX_TIMEOUT
+ else:
+ timeout = min(MAX_TIMEOUT, int(1000 * timeout))
+ rc, numBytes, key, evt = self.port.getEvent(timeout)
+ while 1:
+ if rc == WAIT_TIMEOUT:
+ break
+ if key != KEY_WAKEUP:
+ assert key == KEY_NORMAL
+ log.callWithLogger(
+ evt.owner, self._callEventCallback, rc, numBytes, evt
+ )
+ processed_events += 1
+ if processed_events >= EVENTS_PER_LOOP:
+ break
+ rc, numBytes, key, evt = self.port.getEvent(0)
+
+ def _callEventCallback(self, rc, numBytes, evt):
+ owner = evt.owner
+ why = None
+ try:
+ evt.callback(rc, numBytes, evt)
+ handfn = getattr(owner, "getFileHandle", None)
+ if not handfn:
+ why = _NO_GETHANDLE
+ elif handfn() == -1:
+ why = _NO_FILEDESC
+ if why:
+ return # ignore handles that were closed
+ except BaseException:
+ why = sys.exc_info()[1]
+ log.err()
+ if why:
+ owner.loseConnection(failure.Failure(why))
+
+ def installWaker(self):
+ pass
+
+ def wakeUp(self):
+ self.port.postEvent(0, KEY_WAKEUP, None)
+
+ def registerHandle(self, handle):
+ self.port.addHandle(handle, KEY_NORMAL)
+
+ def createSocket(self, af, stype):
+ skt = socket.socket(af, stype)
+ self.registerHandle(skt.fileno())
+ return skt
+
+ def listenTCP(self, port, factory, backlog=50, interface=""):
+ """
+ @see: twisted.internet.interfaces.IReactorTCP.listenTCP
+ """
+ p = tcp.Port(port, factory, backlog, interface, self)
+ p.startListening()
+ return p
+
+ def connectTCP(self, host, port, factory, timeout=30, bindAddress=None):
+ """
+ @see: twisted.internet.interfaces.IReactorTCP.connectTCP
+ """
+ c = tcp.Connector(host, port, factory, timeout, bindAddress, self)
+ c.connect()
+ return c
+
+ if TLSMemoryBIOFactory is not None:
+
+ def listenSSL(self, port, factory, contextFactory, backlog=50, interface=""):
+ """
+ @see: twisted.internet.interfaces.IReactorSSL.listenSSL
+ """
+ port = self.listenTCP(
+ port,
+ TLSMemoryBIOFactory(contextFactory, False, factory),
+ backlog,
+ interface,
+ )
+ port._type = "TLS"
+ return port
+
+ def connectSSL(
+ self, host, port, factory, contextFactory, timeout=30, bindAddress=None
+ ):
+ """
+ @see: twisted.internet.interfaces.IReactorSSL.connectSSL
+ """
+ return self.connectTCP(
+ host,
+ port,
+ TLSMemoryBIOFactory(contextFactory, True, factory),
+ timeout,
+ bindAddress,
+ )
+
+ else:
+
+ def listenSSL(self, port, factory, contextFactory, backlog=50, interface=""):
+ """
+ Non-implementation of L{IReactorSSL.listenSSL}. Some dependency
+ is not satisfied. This implementation always raises
+ L{NotImplementedError}.
+ """
+ raise NotImplementedError(
+ "pyOpenSSL 0.10 or newer is required for SSL support in "
+ "iocpreactor. It is missing, so the reactor does not support "
+ "SSL APIs."
+ )
+
+ def connectSSL(
+ self, host, port, factory, contextFactory, timeout=30, bindAddress=None
+ ):
+ """
+ Non-implementation of L{IReactorSSL.connectSSL}. Some dependency
+ is not satisfied. This implementation always raises
+ L{NotImplementedError}.
+ """
+ raise NotImplementedError(
+ "pyOpenSSL 0.10 or newer is required for SSL support in "
+ "iocpreactor. It is missing, so the reactor does not support "
+ "SSL APIs."
+ )
+
+ def listenUDP(self, port, protocol, interface="", maxPacketSize=8192):
+ """
+ Connects a given L{DatagramProtocol} to the given numeric UDP port.
+
+ @returns: object conforming to L{IListeningPort}.
+ """
+ p = udp.Port(port, protocol, interface, maxPacketSize, self)
+ p.startListening()
+ return p
+
+ def listenMulticast(
+ self, port, protocol, interface="", maxPacketSize=8192, listenMultiple=False
+ ):
+ """
+ Connects a given DatagramProtocol to the given numeric UDP port.
+
+ EXPERIMENTAL.
+
+ @returns: object conforming to IListeningPort.
+ """
+ p = udp.MulticastPort(
+ port, protocol, interface, maxPacketSize, self, listenMultiple
+ )
+ p.startListening()
+ return p
+
+ def spawnProcess(
+ self,
+ processProtocol,
+ executable,
+ args=(),
+ env={},
+ path=None,
+ uid=None,
+ gid=None,
+ usePTY=0,
+ childFDs=None,
+ ):
+ """
+ Spawn a process.
+ """
+ if uid is not None:
+ raise ValueError("Setting UID is unsupported on this platform.")
+ if gid is not None:
+ raise ValueError("Setting GID is unsupported on this platform.")
+ if usePTY:
+ raise ValueError("PTYs are unsupported on this platform.")
+ if childFDs is not None:
+ raise ValueError(
+ "Custom child file descriptor mappings are unsupported on "
+ "this platform."
+ )
+ return Process(self, processProtocol, executable, args, env, path)
+
+ def removeAll(self):
+ res = list(self.handles)
+ self.handles.clear()
+ return res
+
+
+def install():
+ r = IOCPReactor()
+ main.installReactor(r)
+
+
+__all__ = ["IOCPReactor", "install"]
diff --git a/contrib/python/Twisted/py3/twisted/internet/iocpreactor/tcp.py b/contrib/python/Twisted/py3/twisted/internet/iocpreactor/tcp.py
new file mode 100644
index 0000000000..aadd685269
--- /dev/null
+++ b/contrib/python/Twisted/py3/twisted/internet/iocpreactor/tcp.py
@@ -0,0 +1,608 @@
+# Copyright (c) Twisted Matrix Laboratories.
+# See LICENSE for details.
+
+"""
+TCP support for IOCP reactor
+"""
+
+import errno
+import socket
+import struct
+from typing import Optional
+
+from zope.interface import classImplements, implementer
+
+from twisted.internet import address, defer, error, interfaces, main
+from twisted.internet.abstract import _LogOwner, isIPv6Address
+from twisted.internet.iocpreactor import abstract, iocpsupport as _iocp
+from twisted.internet.iocpreactor.const import (
+ ERROR_CONNECTION_REFUSED,
+ ERROR_IO_PENDING,
+ ERROR_NETWORK_UNREACHABLE,
+ SO_UPDATE_ACCEPT_CONTEXT,
+ SO_UPDATE_CONNECT_CONTEXT,
+)
+from twisted.internet.iocpreactor.interfaces import IReadWriteHandle
+from twisted.internet.protocol import Protocol
+from twisted.internet.tcp import (
+ Connector as TCPConnector,
+ _AbortingMixin,
+ _BaseBaseClient,
+ _BaseTCPClient,
+ _getsockname,
+ _resolveIPv6,
+ _SocketCloser,
+)
+from twisted.python import failure, log, reflect
+
+try:
+ from twisted.internet._newtls import startTLS as __startTLS
+except ImportError:
+ _startTLS = None
+else:
+ _startTLS = __startTLS
+
+
+# ConnectEx returns these. XXX: find out what it does for timeout
+connectExErrors = {
+ ERROR_CONNECTION_REFUSED: errno.WSAECONNREFUSED, # type: ignore[attr-defined]
+ ERROR_NETWORK_UNREACHABLE: errno.WSAENETUNREACH, # type: ignore[attr-defined]
+}
+
+
+@implementer(IReadWriteHandle, interfaces.ITCPTransport, interfaces.ISystemHandle)
+class Connection(abstract.FileHandle, _SocketCloser, _AbortingMixin):
+ """
+ @ivar TLS: C{False} to indicate the connection is in normal TCP mode,
+ C{True} to indicate that TLS has been started and that operations must
+ be routed through the L{TLSMemoryBIOProtocol} instance.
+ """
+
+ TLS = False
+
+ def __init__(self, sock, proto, reactor=None):
+ abstract.FileHandle.__init__(self, reactor)
+ self.socket = sock
+ self.getFileHandle = sock.fileno
+ self.protocol = proto
+
+ def getHandle(self):
+ return self.socket
+
+ def dataReceived(self, rbuffer):
+ """
+ @param rbuffer: Data received.
+ @type rbuffer: L{bytes} or L{bytearray}
+ """
+ if isinstance(rbuffer, bytes):
+ pass
+ elif isinstance(rbuffer, bytearray):
+ # XXX: some day, we'll have protocols that can handle raw buffers
+ rbuffer = bytes(rbuffer)
+ else:
+ raise TypeError("data must be bytes or bytearray, not " + type(rbuffer))
+
+ self.protocol.dataReceived(rbuffer)
+
+ def readFromHandle(self, bufflist, evt):
+ return _iocp.recv(self.getFileHandle(), bufflist, evt)
+
+ def writeToHandle(self, buff, evt):
+ """
+ Send C{buff} to current file handle using C{_iocp.send}. The buffer
+ sent is limited to a size of C{self.SEND_LIMIT}.
+ """
+ writeView = memoryview(buff)
+ return _iocp.send(
+ self.getFileHandle(), writeView[0 : self.SEND_LIMIT].tobytes(), evt
+ )
+
+ def _closeWriteConnection(self):
+ try:
+ self.socket.shutdown(1)
+ except OSError:
+ pass
+ p = interfaces.IHalfCloseableProtocol(self.protocol, None)
+ if p:
+ try:
+ p.writeConnectionLost()
+ except BaseException:
+ f = failure.Failure()
+ log.err()
+ self.connectionLost(f)
+
+ def readConnectionLost(self, reason):
+ p = interfaces.IHalfCloseableProtocol(self.protocol, None)
+ if p:
+ try:
+ p.readConnectionLost()
+ except BaseException:
+ log.err()
+ self.connectionLost(failure.Failure())
+ else:
+ self.connectionLost(reason)
+
+ def connectionLost(self, reason):
+ if self.disconnected:
+ return
+ abstract.FileHandle.connectionLost(self, reason)
+ isClean = reason is None or not reason.check(error.ConnectionAborted)
+ self._closeSocket(isClean)
+ protocol = self.protocol
+ del self.protocol
+ del self.socket
+ del self.getFileHandle
+ protocol.connectionLost(reason)
+
+ def logPrefix(self):
+ """
+ Return the prefix to log with when I own the logging thread.
+ """
+ return self.logstr
+
+ def getTcpNoDelay(self):
+ return bool(self.socket.getsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY))
+
+ def setTcpNoDelay(self, enabled):
+ self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, enabled)
+
+ def getTcpKeepAlive(self):
+ return bool(self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE))
+
+ def setTcpKeepAlive(self, enabled):
+ self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, enabled)
+
+ if _startTLS is not None:
+
+ def startTLS(self, contextFactory, normal=True):
+ """
+ @see: L{ITLSTransport.startTLS}
+ """
+ _startTLS(self, contextFactory, normal, abstract.FileHandle)
+
+ def write(self, data):
+ """
+ Write some data, either directly to the underlying handle or, if TLS
+ has been started, to the L{TLSMemoryBIOProtocol} for it to encrypt and
+ send.
+
+ @see: L{twisted.internet.interfaces.ITransport.write}
+ """
+ if self.disconnected:
+ return
+ if self.TLS:
+ self.protocol.write(data)
+ else:
+ abstract.FileHandle.write(self, data)
+
+ def writeSequence(self, iovec):
+ """
+ Write some data, either directly to the underlying handle or, if TLS
+ has been started, to the L{TLSMemoryBIOProtocol} for it to encrypt and
+ send.
+
+ @see: L{twisted.internet.interfaces.ITransport.writeSequence}
+ """
+ if self.disconnected:
+ return
+ if self.TLS:
+ self.protocol.writeSequence(iovec)
+ else:
+ abstract.FileHandle.writeSequence(self, iovec)
+
+ def loseConnection(self, reason=None):
+ """
+ Close the underlying handle or, if TLS has been started, first shut it
+ down.
+
+ @see: L{twisted.internet.interfaces.ITransport.loseConnection}
+ """
+ if self.TLS:
+ if self.connected and not self.disconnecting:
+ self.protocol.loseConnection()
+ else:
+ abstract.FileHandle.loseConnection(self, reason)
+
+ def registerProducer(self, producer, streaming):
+ """
+ Register a producer.
+
+ If TLS is enabled, the TLS connection handles this.
+ """
+ if self.TLS:
+ # Registering a producer before we're connected shouldn't be a
+ # problem. If we end up with a write(), that's already handled in
+ # the write() code above, and there are no other potential
+ # side-effects.
+ self.protocol.registerProducer(producer, streaming)
+ else:
+ abstract.FileHandle.registerProducer(self, producer, streaming)
+
+ def unregisterProducer(self):
+ """
+ Unregister a producer.
+
+ If TLS is enabled, the TLS connection handles this.
+ """
+ if self.TLS:
+ self.protocol.unregisterProducer()
+ else:
+ abstract.FileHandle.unregisterProducer(self)
+
+ def getHost(self):
+ # ITCPTransport.getHost
+ pass
+
+ def getPeer(self):
+ # ITCPTransport.getPeer
+ pass
+
+
+if _startTLS is not None:
+ classImplements(Connection, interfaces.ITLSTransport)
+
+
+class Client(_BaseBaseClient, _BaseTCPClient, Connection):
+ """
+ @ivar _tlsClientDefault: Always C{True}, indicating that this is a client
+ connection, and by default when TLS is negotiated this class will act as
+ a TLS client.
+ """
+
+ addressFamily = socket.AF_INET
+ socketType = socket.SOCK_STREAM
+
+ _tlsClientDefault = True
+ _commonConnection = Connection
+
+ def __init__(self, host, port, bindAddress, connector, reactor):
+ # ConnectEx documentation says socket _has_ to be bound
+ if bindAddress is None:
+ bindAddress = ("", 0)
+ self.reactor = reactor # createInternetSocket needs this
+ _BaseTCPClient.__init__(self, host, port, bindAddress, connector, reactor)
+
+ def createInternetSocket(self):
+ """
+ Create a socket registered with the IOCP reactor.
+
+ @see: L{_BaseTCPClient}
+ """
+ return self.reactor.createSocket(self.addressFamily, self.socketType)
+
+ def _collectSocketDetails(self):
+ """
+ Clean up potentially circular references to the socket and to its
+ C{getFileHandle} method.
+
+ @see: L{_BaseBaseClient}
+ """
+ del self.socket, self.getFileHandle
+
+ def _stopReadingAndWriting(self):
+ """
+ Remove the active handle from the reactor.
+
+ @see: L{_BaseBaseClient}
+ """
+ self.reactor.removeActiveHandle(self)
+
+ def cbConnect(self, rc, data, evt):
+ if rc:
+ rc = connectExErrors.get(rc, rc)
+ self.failIfNotConnected(
+ error.getConnectError((rc, errno.errorcode.get(rc, "Unknown error")))
+ )
+ else:
+ self.socket.setsockopt(
+ socket.SOL_SOCKET,
+ SO_UPDATE_CONNECT_CONTEXT,
+ struct.pack("P", self.socket.fileno()),
+ )
+ self.protocol = self.connector.buildProtocol(self.getPeer())
+ self.connected = True
+ logPrefix = self._getLogPrefix(self.protocol)
+ self.logstr = logPrefix + ",client"
+ if self.protocol is None:
+ # Factory.buildProtocol is allowed to return None. In that
+ # case, make up a protocol to satisfy the rest of the
+ # implementation; connectionLost is going to be called on
+ # something, for example. This is easier than adding special
+ # case support for a None protocol throughout the rest of the
+ # transport implementation.
+ self.protocol = Protocol()
+ # But dispose of the connection quickly.
+ self.loseConnection()
+ else:
+ self.protocol.makeConnection(self)
+ self.startReading()
+
+ def doConnect(self):
+ if not hasattr(self, "connector"):
+ # this happens if we connector.stopConnecting in
+ # factory.startedConnecting
+ return
+ assert _iocp.have_connectex
+ self.reactor.addActiveHandle(self)
+ evt = _iocp.Event(self.cbConnect, self)
+
+ rc = _iocp.connect(self.socket.fileno(), self.realAddress, evt)
+ if rc and rc != ERROR_IO_PENDING:
+ self.cbConnect(rc, 0, evt)
+
+
+class Server(Connection):
+ """
+ Serverside socket-stream connection class.
+
+ I am a serverside network connection transport; a socket which came from an
+ accept() on a server.
+
+ @ivar _tlsClientDefault: Always C{False}, indicating that this is a server
+ connection, and by default when TLS is negotiated this class will act as
+ a TLS server.
+ """
+
+ _tlsClientDefault = False
+
+ def __init__(self, sock, protocol, clientAddr, serverAddr, sessionno, reactor):
+ """
+ Server(sock, protocol, client, server, sessionno)
+
+ Initialize me with a socket, a protocol, a descriptor for my peer (a
+ tuple of host, port describing the other end of the connection), an
+ instance of Port, and a session number.
+ """
+ Connection.__init__(self, sock, protocol, reactor)
+ self.serverAddr = serverAddr
+ self.clientAddr = clientAddr
+ self.sessionno = sessionno
+ logPrefix = self._getLogPrefix(self.protocol)
+ self.logstr = f"{logPrefix},{sessionno},{self.clientAddr.host}"
+ self.repstr: str = "<{} #{} on {}>".format(
+ self.protocol.__class__.__name__,
+ self.sessionno,
+ self.serverAddr.port,
+ )
+ self.connected = True
+ self.startReading()
+
+ def __repr__(self) -> str:
+ """
+ A string representation of this connection.
+ """
+ return self.repstr
+
+ def getHost(self):
+ """
+ Returns an IPv4Address.
+
+ This indicates the server's address.
+ """
+ return self.serverAddr
+
+ def getPeer(self):
+ """
+ Returns an IPv4Address.
+
+ This indicates the client's address.
+ """
+ return self.clientAddr
+
+
+class Connector(TCPConnector):
+ def _makeTransport(self):
+ return Client(self.host, self.port, self.bindAddress, self, self.reactor)
+
+
+@implementer(interfaces.IListeningPort)
+class Port(_SocketCloser, _LogOwner):
+ connected = False
+ disconnected = False
+ disconnecting = False
+ addressFamily = socket.AF_INET
+ socketType = socket.SOCK_STREAM
+ _addressType = address.IPv4Address
+ sessionno = 0
+
+ # Actual port number being listened on, only set to a non-None
+ # value when we are actually listening.
+ _realPortNumber: Optional[int] = None
+
+ # A string describing the connections which will be created by this port.
+ # Normally this is C{"TCP"}, since this is a TCP port, but when the TLS
+ # implementation re-uses this class it overrides the value with C{"TLS"}.
+ # Only used for logging.
+ _type = "TCP"
+
+ def __init__(self, port, factory, backlog=50, interface="", reactor=None):
+ self.port = port
+ self.factory = factory
+ self.backlog = backlog
+ self.interface = interface
+ self.reactor = reactor
+ if isIPv6Address(interface):
+ self.addressFamily = socket.AF_INET6
+ self._addressType = address.IPv6Address
+
+ def __repr__(self) -> str:
+ if self._realPortNumber is not None:
+ return "<{} of {} on {}>".format(
+ self.__class__,
+ self.factory.__class__,
+ self._realPortNumber,
+ )
+ else:
+ return "<{} of {} (not listening)>".format(
+ self.__class__,
+ self.factory.__class__,
+ )
+
+ def startListening(self):
+ try:
+ skt = self.reactor.createSocket(self.addressFamily, self.socketType)
+ # TODO: resolve self.interface if necessary
+ if self.addressFamily == socket.AF_INET6:
+ addr = _resolveIPv6(self.interface, self.port)
+ else:
+ addr = (self.interface, self.port)
+ skt.bind(addr)
+ except OSError as le:
+ raise error.CannotListenError(self.interface, self.port, le)
+
+ self.addrLen = _iocp.maxAddrLen(skt.fileno())
+
+ # Make sure that if we listened on port 0, we update that to
+ # reflect what the OS actually assigned us.
+ self._realPortNumber = skt.getsockname()[1]
+
+ log.msg(
+ "%s starting on %s"
+ % (self._getLogPrefix(self.factory), self._realPortNumber)
+ )
+
+ self.factory.doStart()
+ skt.listen(self.backlog)
+ self.connected = True
+ self.disconnected = False
+ self.reactor.addActiveHandle(self)
+ self.socket = skt
+ self.getFileHandle = self.socket.fileno
+ self.doAccept()
+
+ def loseConnection(self, connDone=failure.Failure(main.CONNECTION_DONE)):
+ """
+ Stop accepting connections on this port.
+
+ This will shut down my socket and call self.connectionLost().
+ It returns a deferred which will fire successfully when the
+ port is actually closed.
+ """
+ self.disconnecting = True
+ if self.connected:
+ self.deferred = defer.Deferred()
+ self.reactor.callLater(0, self.connectionLost, connDone)
+ return self.deferred
+
+ stopListening = loseConnection
+
+ def _logConnectionLostMsg(self):
+ """
+ Log message for closing port
+ """
+ log.msg(f"({self._type} Port {self._realPortNumber} Closed)")
+
+ def connectionLost(self, reason):
+ """
+ Cleans up the socket.
+ """
+ self._logConnectionLostMsg()
+ self._realPortNumber = None
+ d = None
+ if hasattr(self, "deferred"):
+ d = self.deferred
+ del self.deferred
+
+ self.disconnected = True
+ self.reactor.removeActiveHandle(self)
+ self.connected = False
+ self._closeSocket(True)
+ del self.socket
+ del self.getFileHandle
+
+ try:
+ self.factory.doStop()
+ except BaseException:
+ self.disconnecting = False
+ if d is not None:
+ d.errback(failure.Failure())
+ else:
+ raise
+ else:
+ self.disconnecting = False
+ if d is not None:
+ d.callback(None)
+
+ def logPrefix(self):
+ """
+ Returns the name of my class, to prefix log entries with.
+ """
+ return reflect.qual(self.factory.__class__)
+
+ def getHost(self):
+ """
+ Returns an IPv4Address or IPv6Address.
+
+ This indicates the server's address.
+ """
+ return self._addressType("TCP", *_getsockname(self.socket))
+
+ def cbAccept(self, rc, data, evt):
+ self.handleAccept(rc, evt)
+ if not (self.disconnecting or self.disconnected):
+ self.doAccept()
+
+ def handleAccept(self, rc, evt):
+ if self.disconnecting or self.disconnected:
+ return False
+
+ # possible errors:
+ # (WSAEMFILE, WSAENOBUFS, WSAENFILE, WSAENOMEM, WSAECONNABORTED)
+ if rc:
+ log.msg(
+ "Could not accept new connection -- %s (%s)"
+ % (errno.errorcode.get(rc, "unknown error"), rc)
+ )
+ return False
+ else:
+ # Inherit the properties from the listening port socket as
+ # documented in the `Remarks` section of AcceptEx.
+ # https://docs.microsoft.com/en-us/windows/win32/api/mswsock/nf-mswsock-acceptex
+ # In this way we can call getsockname and getpeername on the
+ # accepted socket.
+ evt.newskt.setsockopt(
+ socket.SOL_SOCKET,
+ SO_UPDATE_ACCEPT_CONTEXT,
+ struct.pack("P", self.socket.fileno()),
+ )
+ family, lAddr, rAddr = _iocp.get_accept_addrs(evt.newskt.fileno(), evt.buff)
+ assert family == self.addressFamily
+
+ # Build an IPv6 address that includes the scopeID, if necessary
+ if "%" in lAddr[0]:
+ scope = int(lAddr[0].split("%")[1])
+ lAddr = (lAddr[0], lAddr[1], 0, scope)
+ if "%" in rAddr[0]:
+ scope = int(rAddr[0].split("%")[1])
+ rAddr = (rAddr[0], rAddr[1], 0, scope)
+
+ protocol = self.factory.buildProtocol(self._addressType("TCP", *rAddr))
+ if protocol is None:
+ evt.newskt.close()
+ else:
+ s = self.sessionno
+ self.sessionno = s + 1
+ transport = Server(
+ evt.newskt,
+ protocol,
+ self._addressType("TCP", *rAddr),
+ self._addressType("TCP", *lAddr),
+ s,
+ self.reactor,
+ )
+ protocol.makeConnection(transport)
+ return True
+
+ def doAccept(self):
+ evt = _iocp.Event(self.cbAccept, self)
+
+ # see AcceptEx documentation
+ evt.buff = buff = bytearray(2 * (self.addrLen + 16))
+
+ evt.newskt = newskt = self.reactor.createSocket(
+ self.addressFamily, self.socketType
+ )
+ rc = _iocp.accept(self.socket.fileno(), newskt.fileno(), buff, evt)
+
+ if rc and rc != ERROR_IO_PENDING:
+ self.handleAccept(rc, evt)
diff --git a/contrib/python/Twisted/py3/twisted/internet/iocpreactor/udp.py b/contrib/python/Twisted/py3/twisted/internet/iocpreactor/udp.py
new file mode 100644
index 0000000000..59c5fefb4b
--- /dev/null
+++ b/contrib/python/Twisted/py3/twisted/internet/iocpreactor/udp.py
@@ -0,0 +1,428 @@
+# Copyright (c) Twisted Matrix Laboratories.
+# See LICENSE for details.
+
+"""
+UDP support for IOCP reactor
+"""
+
+import errno
+import socket
+import struct
+import warnings
+from typing import Optional
+
+from zope.interface import implementer
+
+from twisted.internet import address, defer, error, interfaces
+from twisted.internet.abstract import isIPAddress, isIPv6Address
+from twisted.internet.iocpreactor import abstract, iocpsupport as _iocp
+from twisted.internet.iocpreactor.const import (
+ ERROR_CONNECTION_REFUSED,
+ ERROR_IO_PENDING,
+ ERROR_PORT_UNREACHABLE,
+)
+from twisted.internet.iocpreactor.interfaces import IReadWriteHandle
+from twisted.python import failure, log
+
+
+@implementer(
+ IReadWriteHandle,
+ interfaces.IListeningPort,
+ interfaces.IUDPTransport,
+ interfaces.ISystemHandle,
+)
+class Port(abstract.FileHandle):
+ """
+ UDP port, listening for packets.
+
+ @ivar addressFamily: L{socket.AF_INET} or L{socket.AF_INET6}, depending on
+ whether this port is listening on an IPv4 address or an IPv6 address.
+ """
+
+ addressFamily = socket.AF_INET
+ socketType = socket.SOCK_DGRAM
+ dynamicReadBuffers = False
+
+ # Actual port number being listened on, only set to a non-None
+ # value when we are actually listening.
+ _realPortNumber: Optional[int] = None
+
+ def __init__(self, port, proto, interface="", maxPacketSize=8192, reactor=None):
+ """
+ Initialize with a numeric port to listen on.
+ """
+ self.port = port
+ self.protocol = proto
+ self.readBufferSize = maxPacketSize
+ self.interface = interface
+ self.setLogStr()
+ self._connectedAddr = None
+ self._setAddressFamily()
+
+ abstract.FileHandle.__init__(self, reactor)
+
+ skt = socket.socket(self.addressFamily, self.socketType)
+ addrLen = _iocp.maxAddrLen(skt.fileno())
+ self.addressBuffer = bytearray(addrLen)
+ # WSARecvFrom takes an int
+ self.addressLengthBuffer = bytearray(struct.calcsize("i"))
+
+ def _setAddressFamily(self):
+ """
+ Resolve address family for the socket.
+ """
+ if isIPv6Address(self.interface):
+ self.addressFamily = socket.AF_INET6
+ elif isIPAddress(self.interface):
+ self.addressFamily = socket.AF_INET
+ elif self.interface:
+ raise error.InvalidAddressError(
+ self.interface, "not an IPv4 or IPv6 address"
+ )
+
+ def __repr__(self) -> str:
+ if self._realPortNumber is not None:
+ return f"<{self.protocol.__class__} on {self._realPortNumber}>"
+ else:
+ return f"<{self.protocol.__class__} not connected>"
+
+ def getHandle(self):
+ """
+ Return a socket object.
+ """
+ return self.socket
+
+ def startListening(self):
+ """
+ Create and bind my socket, and begin listening on it.
+
+ This is called on unserialization, and must be called after creating a
+ server to begin listening on the specified port.
+ """
+ self._bindSocket()
+ self._connectToProtocol()
+
+ def createSocket(self):
+ return self.reactor.createSocket(self.addressFamily, self.socketType)
+
+ def _bindSocket(self):
+ try:
+ skt = self.createSocket()
+ skt.bind((self.interface, self.port))
+ except OSError as le:
+ raise error.CannotListenError(self.interface, self.port, le)
+
+ # Make sure that if we listened on port 0, we update that to
+ # reflect what the OS actually assigned us.
+ self._realPortNumber = skt.getsockname()[1]
+
+ log.msg(
+ "%s starting on %s"
+ % (self._getLogPrefix(self.protocol), self._realPortNumber)
+ )
+
+ self.connected = True
+ self.socket = skt
+ self.getFileHandle = self.socket.fileno
+
+ def _connectToProtocol(self):
+ self.protocol.makeConnection(self)
+ self.startReading()
+ self.reactor.addActiveHandle(self)
+
+ def cbRead(self, rc, data, evt):
+ if self.reading:
+ self.handleRead(rc, data, evt)
+ self.doRead()
+
+ def handleRead(self, rc, data, evt):
+ if rc in (
+ errno.WSAECONNREFUSED,
+ errno.WSAECONNRESET,
+ ERROR_CONNECTION_REFUSED,
+ ERROR_PORT_UNREACHABLE,
+ ):
+ if self._connectedAddr:
+ self.protocol.connectionRefused()
+ elif rc:
+ log.msg(
+ "error in recvfrom -- %s (%s)"
+ % (errno.errorcode.get(rc, "unknown error"), rc)
+ )
+ else:
+ try:
+ self.protocol.datagramReceived(
+ bytes(evt.buff[:data]), _iocp.makesockaddr(evt.addr_buff)
+ )
+ except BaseException:
+ log.err()
+
+ def doRead(self):
+ evt = _iocp.Event(self.cbRead, self)
+
+ evt.buff = buff = self._readBuffers[0]
+ evt.addr_buff = addr_buff = self.addressBuffer
+ evt.addr_len_buff = addr_len_buff = self.addressLengthBuffer
+ rc, data = _iocp.recvfrom(
+ self.getFileHandle(), buff, addr_buff, addr_len_buff, evt
+ )
+
+ if rc and rc != ERROR_IO_PENDING:
+ # If the error was not 0 or IO_PENDING then that means recvfrom() hit a
+ # failure condition. In this situation recvfrom() gives us our response
+ # right away and we don't need to wait for Windows to call the callback
+ # on our event. In fact, windows will not call it for us so we must call it
+ # ourselves manually
+ self.reactor.callLater(0, self.cbRead, rc, data, evt)
+
+ def write(self, datagram, addr=None):
+ """
+ Write a datagram.
+
+ @param addr: should be a tuple (ip, port), can be None in connected
+ mode.
+ """
+ if self._connectedAddr:
+ assert addr in (None, self._connectedAddr)
+ try:
+ return self.socket.send(datagram)
+ except OSError as se:
+ no = se.args[0]
+ if no == errno.WSAEINTR:
+ return self.write(datagram)
+ elif no == errno.WSAEMSGSIZE:
+ raise error.MessageLengthError("message too long")
+ elif no in (
+ errno.WSAECONNREFUSED,
+ errno.WSAECONNRESET,
+ ERROR_CONNECTION_REFUSED,
+ ERROR_PORT_UNREACHABLE,
+ ):
+ self.protocol.connectionRefused()
+ else:
+ raise
+ else:
+ assert addr != None
+ if (
+ not isIPAddress(addr[0])
+ and not isIPv6Address(addr[0])
+ and addr[0] != "<broadcast>"
+ ):
+ raise error.InvalidAddressError(
+ addr[0], "write() only accepts IP addresses, not hostnames"
+ )
+ if isIPAddress(addr[0]) and self.addressFamily == socket.AF_INET6:
+ raise error.InvalidAddressError(
+ addr[0], "IPv6 port write() called with IPv4 address"
+ )
+ if isIPv6Address(addr[0]) and self.addressFamily == socket.AF_INET:
+ raise error.InvalidAddressError(
+ addr[0], "IPv4 port write() called with IPv6 address"
+ )
+ try:
+ return self.socket.sendto(datagram, addr)
+ except OSError as se:
+ no = se.args[0]
+ if no == errno.WSAEINTR:
+ return self.write(datagram, addr)
+ elif no == errno.WSAEMSGSIZE:
+ raise error.MessageLengthError("message too long")
+ elif no in (
+ errno.WSAECONNREFUSED,
+ errno.WSAECONNRESET,
+ ERROR_CONNECTION_REFUSED,
+ ERROR_PORT_UNREACHABLE,
+ ):
+ # in non-connected UDP ECONNREFUSED is platform dependent,
+ # I think and the info is not necessarily useful.
+ # Nevertheless maybe we should call connectionRefused? XXX
+ return
+ else:
+ raise
+
+ def writeSequence(self, seq, addr):
+ self.write(b"".join(seq), addr)
+
+ def connect(self, host, port):
+ """
+ 'Connect' to remote server.
+ """
+ if self._connectedAddr:
+ raise RuntimeError(
+ "already connected, reconnecting is not currently supported "
+ "(talk to itamar if you want this)"
+ )
+ if not isIPAddress(host) and not isIPv6Address(host):
+ raise error.InvalidAddressError(host, "not an IPv4 or IPv6 address.")
+ self._connectedAddr = (host, port)
+ self.socket.connect((host, port))
+
+ def _loseConnection(self):
+ self.stopReading()
+ self.reactor.removeActiveHandle(self)
+ if self.connected: # actually means if we are *listening*
+ self.reactor.callLater(0, self.connectionLost)
+
+ def stopListening(self):
+ if self.connected:
+ result = self.d = defer.Deferred()
+ else:
+ result = None
+ self._loseConnection()
+ return result
+
+ def loseConnection(self):
+ warnings.warn(
+ "Please use stopListening() to disconnect port",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ self.stopListening()
+
+ def connectionLost(self, reason=None):
+ """
+ Cleans up my socket.
+ """
+ log.msg("(UDP Port %s Closed)" % self._realPortNumber)
+ self._realPortNumber = None
+ abstract.FileHandle.connectionLost(self, reason)
+ self.protocol.doStop()
+ self.socket.close()
+ del self.socket
+ del self.getFileHandle
+ if hasattr(self, "d"):
+ self.d.callback(None)
+ del self.d
+
+ def setLogStr(self):
+ """
+ Initialize the C{logstr} attribute to be used by C{logPrefix}.
+ """
+ logPrefix = self._getLogPrefix(self.protocol)
+ self.logstr = "%s (UDP)" % logPrefix
+
+ def logPrefix(self):
+ """
+ Returns the name of my class, to prefix log entries with.
+ """
+ return self.logstr
+
+ def getHost(self):
+ """
+ Return the local address of the UDP connection
+
+ @returns: the local address of the UDP connection
+ @rtype: L{IPv4Address} or L{IPv6Address}
+ """
+ addr = self.socket.getsockname()
+ if self.addressFamily == socket.AF_INET:
+ return address.IPv4Address("UDP", *addr)
+ elif self.addressFamily == socket.AF_INET6:
+ return address.IPv6Address("UDP", *(addr[:2]))
+
+ def setBroadcastAllowed(self, enabled):
+ """
+ Set whether this port may broadcast. This is disabled by default.
+
+ @param enabled: Whether the port may broadcast.
+ @type enabled: L{bool}
+ """
+ self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, enabled)
+
+ def getBroadcastAllowed(self):
+ """
+ Checks if broadcast is currently allowed on this port.
+
+ @return: Whether this port may broadcast.
+ @rtype: L{bool}
+ """
+ return bool(self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST))
+
+
+class MulticastMixin:
+ """
+ Implement multicast functionality.
+ """
+
+ def getOutgoingInterface(self):
+ i = self.socket.getsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_IF)
+ return socket.inet_ntoa(struct.pack("@i", i))
+
+ def setOutgoingInterface(self, addr):
+ """
+ Returns Deferred of success.
+ """
+ return self.reactor.resolve(addr).addCallback(self._setInterface)
+
+ def _setInterface(self, addr):
+ i = socket.inet_aton(addr)
+ self.socket.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_IF, i)
+ return 1
+
+ def getLoopbackMode(self):
+ return self.socket.getsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_LOOP)
+
+ def setLoopbackMode(self, mode):
+ mode = struct.pack("b", bool(mode))
+ self.socket.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_LOOP, mode)
+
+ def getTTL(self):
+ return self.socket.getsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL)
+
+ def setTTL(self, ttl):
+ ttl = struct.pack("B", ttl)
+ self.socket.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, ttl)
+
+ def joinGroup(self, addr, interface=""):
+ """
+ Join a multicast group. Returns Deferred of success.
+ """
+ return self.reactor.resolve(addr).addCallback(self._joinAddr1, interface, 1)
+
+ def _joinAddr1(self, addr, interface, join):
+ return self.reactor.resolve(interface).addCallback(self._joinAddr2, addr, join)
+
+ def _joinAddr2(self, interface, addr, join):
+ addr = socket.inet_aton(addr)
+ interface = socket.inet_aton(interface)
+ if join:
+ cmd = socket.IP_ADD_MEMBERSHIP
+ else:
+ cmd = socket.IP_DROP_MEMBERSHIP
+ try:
+ self.socket.setsockopt(socket.IPPROTO_IP, cmd, addr + interface)
+ except OSError as e:
+ return failure.Failure(error.MulticastJoinError(addr, interface, *e.args))
+
+ def leaveGroup(self, addr, interface=""):
+ """
+ Leave multicast group, return Deferred of success.
+ """
+ return self.reactor.resolve(addr).addCallback(self._joinAddr1, interface, 0)
+
+
+@implementer(interfaces.IMulticastTransport)
+class MulticastPort(MulticastMixin, Port):
+ """
+ UDP Port that supports multicasting.
+ """
+
+ def __init__(
+ self,
+ port,
+ proto,
+ interface="",
+ maxPacketSize=8192,
+ reactor=None,
+ listenMultiple=False,
+ ):
+ Port.__init__(self, port, proto, interface, maxPacketSize, reactor)
+ self.listenMultiple = listenMultiple
+
+ def createSocket(self):
+ skt = Port.createSocket(self)
+ if self.listenMultiple:
+ skt.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+ if hasattr(socket, "SO_REUSEPORT"):
+ skt.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
+ return skt
diff --git a/contrib/python/Twisted/py3/twisted/internet/kqreactor.py b/contrib/python/Twisted/py3/twisted/internet/kqreactor.py
new file mode 100644
index 0000000000..a4863b183c
--- /dev/null
+++ b/contrib/python/Twisted/py3/twisted/internet/kqreactor.py
@@ -0,0 +1,324 @@
+# -*- test-case-name: twisted.test.test_kqueuereactor -*-
+# Copyright (c) Twisted Matrix Laboratories.
+# See LICENSE for details.
+
+"""
+A kqueue()/kevent() based implementation of the Twisted main loop.
+
+To use this reactor, start your application specifying the kqueue reactor::
+
+ twistd --reactor kqueue ...
+
+To install the event loop from code (and you should do this before any
+connections, listeners or connectors are added)::
+
+ from twisted.internet import kqreactor
+ kqreactor.install()
+"""
+
+import errno
+import select
+
+from zope.interface import Attribute, Interface, declarations, implementer
+
+from twisted.internet import main, posixbase
+from twisted.internet.interfaces import IReactorDaemonize, IReactorFDSet
+from twisted.python import failure, log
+
+try:
+ # This is to keep mypy from complaining
+ # We don't use type: ignore[attr-defined] on import, because mypy only complains
+ # on on some platforms, and then the unused ignore is an issue if the undefined
+ # attribute isn't.
+ KQ_EV_ADD = getattr(select, "KQ_EV_ADD")
+ KQ_EV_DELETE = getattr(select, "KQ_EV_DELETE")
+ KQ_EV_EOF = getattr(select, "KQ_EV_EOF")
+ KQ_FILTER_READ = getattr(select, "KQ_FILTER_READ")
+ KQ_FILTER_WRITE = getattr(select, "KQ_FILTER_WRITE")
+except AttributeError as e:
+ raise ImportError(e)
+
+
+class _IKQueue(Interface):
+ """
+ An interface for KQueue implementations.
+ """
+
+ kqueue = Attribute("An implementation of kqueue(2).")
+ kevent = Attribute("An implementation of kevent(2).")
+
+
+declarations.directlyProvides(select, _IKQueue)
+
+
+@implementer(IReactorFDSet, IReactorDaemonize)
+class KQueueReactor(posixbase.PosixReactorBase):
+ """
+ A reactor that uses kqueue(2)/kevent(2) and relies on Python 2.6 or higher
+ which has built in support for kqueue in the select module.
+
+ @ivar _kq: A C{kqueue} which will be used to check for I/O readiness.
+
+ @ivar _impl: The implementation of L{_IKQueue} to use.
+
+ @ivar _selectables: A dictionary mapping integer file descriptors to
+ instances of L{FileDescriptor} which have been registered with the
+ reactor. All L{FileDescriptor}s which are currently receiving read or
+ write readiness notifications will be present as values in this
+ dictionary.
+
+ @ivar _reads: A set containing integer file descriptors. Values in this
+ set will be registered with C{_kq} for read readiness notifications
+ which will be dispatched to the corresponding L{FileDescriptor}
+ instances in C{_selectables}.
+
+ @ivar _writes: A set containing integer file descriptors. Values in this
+ set will be registered with C{_kq} for write readiness notifications
+ which will be dispatched to the corresponding L{FileDescriptor}
+ instances in C{_selectables}.
+ """
+
+ def __init__(self, _kqueueImpl=select):
+ """
+ Initialize kqueue object, file descriptor tracking dictionaries, and
+ the base class.
+
+ See:
+ - http://docs.python.org/library/select.html
+ - www.freebsd.org/cgi/man.cgi?query=kqueue
+ - people.freebsd.org/~jlemon/papers/kqueue.pdf
+
+ @param _kqueueImpl: The implementation of L{_IKQueue} to use. A
+ hook for testing.
+ """
+ self._impl = _kqueueImpl
+ self._kq = self._impl.kqueue()
+ self._reads = set()
+ self._writes = set()
+ self._selectables = {}
+ posixbase.PosixReactorBase.__init__(self)
+
+ def _updateRegistration(self, fd, filter, op):
+ """
+ Private method for changing kqueue registration on a given FD
+ filtering for events given filter/op. This will never block and
+ returns nothing.
+ """
+ self._kq.control([self._impl.kevent(fd, filter, op)], 0, 0)
+
+ def beforeDaemonize(self):
+ """
+ Implement L{IReactorDaemonize.beforeDaemonize}.
+ """
+ # Twisted-internal method called during daemonization (when application
+ # is started via twistd). This is called right before the magic double
+ # forking done for daemonization. We cleanly close the kqueue() and later
+ # recreate it. This is needed since a) kqueue() are not inherited across
+ # forks and b) twistd will create the reactor already before daemonization
+ # (and will also add at least 1 reader to the reactor, an instance of
+ # twisted.internet.posixbase._UnixWaker).
+ #
+ # See: twisted.scripts._twistd_unix.daemonize()
+ self._kq.close()
+ self._kq = None
+
+ def afterDaemonize(self):
+ """
+ Implement L{IReactorDaemonize.afterDaemonize}.
+ """
+ # Twisted-internal method called during daemonization. This is called right
+ # after daemonization and recreates the kqueue() and any readers/writers
+ # that were added before. Note that you MUST NOT call any reactor methods
+ # in between beforeDaemonize() and afterDaemonize()!
+ self._kq = self._impl.kqueue()
+ for fd in self._reads:
+ self._updateRegistration(fd, KQ_FILTER_READ, KQ_EV_ADD)
+ for fd in self._writes:
+ self._updateRegistration(fd, KQ_FILTER_WRITE, KQ_EV_ADD)
+
+ def addReader(self, reader):
+ """
+ Implement L{IReactorFDSet.addReader}.
+ """
+ fd = reader.fileno()
+ if fd not in self._reads:
+ try:
+ self._updateRegistration(fd, KQ_FILTER_READ, KQ_EV_ADD)
+ except OSError:
+ pass
+ finally:
+ self._selectables[fd] = reader
+ self._reads.add(fd)
+
+ def addWriter(self, writer):
+ """
+ Implement L{IReactorFDSet.addWriter}.
+ """
+ fd = writer.fileno()
+ if fd not in self._writes:
+ try:
+ self._updateRegistration(fd, KQ_FILTER_WRITE, KQ_EV_ADD)
+ except OSError:
+ pass
+ finally:
+ self._selectables[fd] = writer
+ self._writes.add(fd)
+
+ def removeReader(self, reader):
+ """
+ Implement L{IReactorFDSet.removeReader}.
+ """
+ wasLost = False
+ try:
+ fd = reader.fileno()
+ except BaseException:
+ fd = -1
+ if fd == -1:
+ for fd, fdes in self._selectables.items():
+ if reader is fdes:
+ wasLost = True
+ break
+ else:
+ return
+ if fd in self._reads:
+ self._reads.remove(fd)
+ if fd not in self._writes:
+ del self._selectables[fd]
+ if not wasLost:
+ try:
+ self._updateRegistration(fd, KQ_FILTER_READ, KQ_EV_DELETE)
+ except OSError:
+ pass
+
+ def removeWriter(self, writer):
+ """
+ Implement L{IReactorFDSet.removeWriter}.
+ """
+ wasLost = False
+ try:
+ fd = writer.fileno()
+ except BaseException:
+ fd = -1
+ if fd == -1:
+ for fd, fdes in self._selectables.items():
+ if writer is fdes:
+ wasLost = True
+ break
+ else:
+ return
+ if fd in self._writes:
+ self._writes.remove(fd)
+ if fd not in self._reads:
+ del self._selectables[fd]
+ if not wasLost:
+ try:
+ self._updateRegistration(fd, KQ_FILTER_WRITE, KQ_EV_DELETE)
+ except OSError:
+ pass
+
+ def removeAll(self):
+ """
+ Implement L{IReactorFDSet.removeAll}.
+ """
+ return self._removeAll(
+ [self._selectables[fd] for fd in self._reads],
+ [self._selectables[fd] for fd in self._writes],
+ )
+
+ def getReaders(self):
+ """
+ Implement L{IReactorFDSet.getReaders}.
+ """
+ return [self._selectables[fd] for fd in self._reads]
+
+ def getWriters(self):
+ """
+ Implement L{IReactorFDSet.getWriters}.
+ """
+ return [self._selectables[fd] for fd in self._writes]
+
+ def doKEvent(self, timeout):
+ """
+ Poll the kqueue for new events.
+ """
+ if timeout is None:
+ timeout = 1
+
+ try:
+ events = self._kq.control([], len(self._selectables), timeout)
+ except OSError as e:
+ # Since this command blocks for potentially a while, it's possible
+ # EINTR can be raised for various reasons (for example, if the user
+ # hits ^C).
+ if e.errno == errno.EINTR:
+ return
+ else:
+ raise
+
+ _drdw = self._doWriteOrRead
+ for event in events:
+ fd = event.ident
+ try:
+ selectable = self._selectables[fd]
+ except KeyError:
+ # Handles the infrequent case where one selectable's
+ # handler disconnects another.
+ continue
+ else:
+ log.callWithLogger(selectable, _drdw, selectable, fd, event)
+
+ def _doWriteOrRead(self, selectable, fd, event):
+ """
+ Private method called when a FD is ready for reading, writing or was
+ lost. Do the work and raise errors where necessary.
+ """
+ why = None
+ inRead = False
+ (filter, flags, data, fflags) = (
+ event.filter,
+ event.flags,
+ event.data,
+ event.fflags,
+ )
+
+ if flags & KQ_EV_EOF and data and fflags:
+ why = main.CONNECTION_LOST
+ else:
+ try:
+ if selectable.fileno() == -1:
+ inRead = False
+ why = posixbase._NO_FILEDESC
+ else:
+ if filter == KQ_FILTER_READ:
+ inRead = True
+ why = selectable.doRead()
+ if filter == KQ_FILTER_WRITE:
+ inRead = False
+ why = selectable.doWrite()
+ except BaseException:
+ # Any exception from application code gets logged and will
+ # cause us to disconnect the selectable.
+ why = failure.Failure()
+ log.err(
+ why,
+ "An exception was raised from application code"
+ " while processing a reactor selectable",
+ )
+
+ if why:
+ self._disconnectSelectable(selectable, why, inRead)
+
+ doIteration = doKEvent
+
+
+def install():
+ """
+ Install the kqueue() reactor.
+ """
+ p = KQueueReactor()
+ from twisted.internet.main import installReactor
+
+ installReactor(p)
+
+
+__all__ = ["KQueueReactor", "install"]
diff --git a/contrib/python/Twisted/py3/twisted/internet/main.py b/contrib/python/Twisted/py3/twisted/internet/main.py
new file mode 100644
index 0000000000..2a05ac9c6d
--- /dev/null
+++ b/contrib/python/Twisted/py3/twisted/internet/main.py
@@ -0,0 +1,37 @@
+# -*- test-case-name: twisted.internet.test.test_main -*-
+# Copyright (c) Twisted Matrix Laboratories.
+# See LICENSE for details.
+
+
+"""
+Backwards compatibility, and utility functions.
+
+In general, this module should not be used, other than by reactor authors
+who need to use the 'installReactor' method.
+"""
+
+
+from twisted.internet import error
+
+CONNECTION_DONE = error.ConnectionDone("Connection done")
+CONNECTION_LOST = error.ConnectionLost("Connection lost")
+
+
+def installReactor(reactor):
+ """
+ Install reactor C{reactor}.
+
+ @param reactor: An object that provides one or more IReactor* interfaces.
+ """
+ # this stuff should be common to all reactors.
+ import sys
+
+ import twisted.internet
+
+ if "twisted.internet.reactor" in sys.modules:
+ raise error.ReactorAlreadyInstalledError("reactor already installed")
+ twisted.internet.reactor = reactor
+ sys.modules["twisted.internet.reactor"] = reactor
+
+
+__all__ = ["CONNECTION_LOST", "CONNECTION_DONE", "installReactor"]
diff --git a/contrib/python/Twisted/py3/twisted/internet/pollreactor.py b/contrib/python/Twisted/py3/twisted/internet/pollreactor.py
new file mode 100644
index 0000000000..b9f1fb8402
--- /dev/null
+++ b/contrib/python/Twisted/py3/twisted/internet/pollreactor.py
@@ -0,0 +1,189 @@
+# Copyright (c) Twisted Matrix Laboratories.
+# See LICENSE for details.
+
+"""
+A poll() based implementation of the twisted main loop.
+
+To install the event loop (and you should do this before any connections,
+listeners or connectors are added)::
+
+ from twisted.internet import pollreactor
+ pollreactor.install()
+"""
+
+
+# System imports
+import errno
+from select import (
+ POLLERR,
+ POLLHUP,
+ POLLIN,
+ POLLNVAL,
+ POLLOUT,
+ error as SelectError,
+ poll,
+)
+
+from zope.interface import implementer
+
+from twisted.internet import posixbase
+from twisted.internet.interfaces import IReactorFDSet
+
+# Twisted imports
+from twisted.python import log
+
+
+@implementer(IReactorFDSet)
+class PollReactor(posixbase.PosixReactorBase, posixbase._PollLikeMixin):
+ """
+ A reactor that uses poll(2).
+
+ @ivar _poller: A L{select.poll} which will be used to check for I/O
+ readiness.
+
+ @ivar _selectables: A dictionary mapping integer file descriptors to
+ instances of L{FileDescriptor} which have been registered with the
+ reactor. All L{FileDescriptor}s which are currently receiving read or
+ write readiness notifications will be present as values in this
+ dictionary.
+
+ @ivar _reads: A dictionary mapping integer file descriptors to arbitrary
+ values (this is essentially a set). Keys in this dictionary will be
+ registered with C{_poller} for read readiness notifications which will
+ be dispatched to the corresponding L{FileDescriptor} instances in
+ C{_selectables}.
+
+ @ivar _writes: A dictionary mapping integer file descriptors to arbitrary
+ values (this is essentially a set). Keys in this dictionary will be
+ registered with C{_poller} for write readiness notifications which will
+ be dispatched to the corresponding L{FileDescriptor} instances in
+ C{_selectables}.
+ """
+
+ _POLL_DISCONNECTED = POLLHUP | POLLERR | POLLNVAL
+ _POLL_IN = POLLIN
+ _POLL_OUT = POLLOUT
+
+ def __init__(self):
+ """
+ Initialize polling object, file descriptor tracking dictionaries, and
+ the base class.
+ """
+ self._poller = poll()
+ self._selectables = {}
+ self._reads = {}
+ self._writes = {}
+ posixbase.PosixReactorBase.__init__(self)
+
+ def _updateRegistration(self, fd):
+ """Register/unregister an fd with the poller."""
+ try:
+ self._poller.unregister(fd)
+ except KeyError:
+ pass
+
+ mask = 0
+ if fd in self._reads:
+ mask = mask | POLLIN
+ if fd in self._writes:
+ mask = mask | POLLOUT
+ if mask != 0:
+ self._poller.register(fd, mask)
+ else:
+ if fd in self._selectables:
+ del self._selectables[fd]
+
+ def _dictRemove(self, selectable, mdict):
+ try:
+ # the easy way
+ fd = selectable.fileno()
+ # make sure the fd is actually real. In some situations we can get
+ # -1 here.
+ mdict[fd]
+ except BaseException:
+ # the hard way: necessary because fileno() may disappear at any
+ # moment, thanks to python's underlying sockets impl
+ for fd, fdes in self._selectables.items():
+ if selectable is fdes:
+ break
+ else:
+ # Hmm, maybe not the right course of action? This method can't
+ # fail, because it happens inside error detection...
+ return
+ if fd in mdict:
+ del mdict[fd]
+ self._updateRegistration(fd)
+
+ def addReader(self, reader):
+ """Add a FileDescriptor for notification of data available to read."""
+ fd = reader.fileno()
+ if fd not in self._reads:
+ self._selectables[fd] = reader
+ self._reads[fd] = 1
+ self._updateRegistration(fd)
+
+ def addWriter(self, writer):
+ """Add a FileDescriptor for notification of data available to write."""
+ fd = writer.fileno()
+ if fd not in self._writes:
+ self._selectables[fd] = writer
+ self._writes[fd] = 1
+ self._updateRegistration(fd)
+
+ def removeReader(self, reader):
+ """Remove a Selectable for notification of data available to read."""
+ return self._dictRemove(reader, self._reads)
+
+ def removeWriter(self, writer):
+ """Remove a Selectable for notification of data available to write."""
+ return self._dictRemove(writer, self._writes)
+
+ def removeAll(self):
+ """
+ Remove all selectables, and return a list of them.
+ """
+ return self._removeAll(
+ [self._selectables[fd] for fd in self._reads],
+ [self._selectables[fd] for fd in self._writes],
+ )
+
+ def doPoll(self, timeout):
+ """Poll the poller for new events."""
+ if timeout is not None:
+ timeout = int(timeout * 1000) # convert seconds to milliseconds
+
+ try:
+ l = self._poller.poll(timeout)
+ except SelectError as e:
+ if e.args[0] == errno.EINTR:
+ return
+ else:
+ raise
+ _drdw = self._doReadOrWrite
+ for fd, event in l:
+ try:
+ selectable = self._selectables[fd]
+ except KeyError:
+ # Handles the infrequent case where one selectable's
+ # handler disconnects another.
+ continue
+ log.callWithLogger(selectable, _drdw, selectable, fd, event)
+
+ doIteration = doPoll
+
+ def getReaders(self):
+ return [self._selectables[fd] for fd in self._reads]
+
+ def getWriters(self):
+ return [self._selectables[fd] for fd in self._writes]
+
+
+def install():
+ """Install the poll() reactor."""
+ p = PollReactor()
+ from twisted.internet.main import installReactor
+
+ installReactor(p)
+
+
+__all__ = ["PollReactor", "install"]
diff --git a/contrib/python/Twisted/py3/twisted/internet/posixbase.py b/contrib/python/Twisted/py3/twisted/internet/posixbase.py
new file mode 100644
index 0000000000..bd160ec865
--- /dev/null
+++ b/contrib/python/Twisted/py3/twisted/internet/posixbase.py
@@ -0,0 +1,653 @@
+# -*- test-case-name: twisted.test.test_internet,twisted.internet.test.test_posixbase -*-
+# Copyright (c) Twisted Matrix Laboratories.
+# See LICENSE for details.
+
+"""
+Posix reactor base class
+"""
+
+
+import socket
+import sys
+from typing import Sequence
+
+from zope.interface import classImplements, implementer
+
+from twisted.internet import error, tcp, udp
+from twisted.internet.base import ReactorBase
+from twisted.internet.interfaces import (
+ IHalfCloseableDescriptor,
+ IReactorFDSet,
+ IReactorMulticast,
+ IReactorProcess,
+ IReactorSocket,
+ IReactorSSL,
+ IReactorTCP,
+ IReactorUDP,
+ IReactorUNIX,
+ IReactorUNIXDatagram,
+)
+from twisted.internet.main import CONNECTION_DONE, CONNECTION_LOST
+from twisted.python import failure, log
+from twisted.python.runtime import platform, platformType
+from ._signals import (
+ SignalHandling,
+ _ChildSignalHandling,
+ _IWaker,
+ _MultiSignalHandling,
+ _Waker,
+)
+
+# Exceptions that doSelect might return frequently
+_NO_FILENO = error.ConnectionFdescWentAway("Handler has no fileno method")
+_NO_FILEDESC = error.ConnectionFdescWentAway("File descriptor lost")
+
+
+try:
+ from twisted.protocols import tls as _tls
+except ImportError:
+ tls = None
+else:
+ tls = _tls
+
+try:
+ from twisted.internet import ssl as _ssl
+except ImportError:
+ ssl = None
+else:
+ ssl = _ssl
+
+unixEnabled = platformType == "posix"
+
+processEnabled = False
+if unixEnabled:
+ from twisted.internet import process, unix
+
+ processEnabled = True
+
+
+if platform.isWindows():
+ try:
+ import win32process # type: ignore[import]
+
+ processEnabled = True
+ except ImportError:
+ win32process = None
+
+
+class _DisconnectSelectableMixin:
+ """
+ Mixin providing the C{_disconnectSelectable} method.
+ """
+
+ def _disconnectSelectable(
+ self,
+ selectable,
+ why,
+ isRead,
+ faildict={
+ error.ConnectionDone: failure.Failure(error.ConnectionDone()),
+ error.ConnectionLost: failure.Failure(error.ConnectionLost()),
+ },
+ ):
+ """
+ Utility function for disconnecting a selectable.
+
+ Supports half-close notification, isRead should be boolean indicating
+ whether error resulted from doRead().
+ """
+ self.removeReader(selectable)
+ f = faildict.get(why.__class__)
+ if f:
+ if (
+ isRead
+ and why.__class__ == error.ConnectionDone
+ and IHalfCloseableDescriptor.providedBy(selectable)
+ ):
+ selectable.readConnectionLost(f)
+ else:
+ self.removeWriter(selectable)
+ selectable.connectionLost(f)
+ else:
+ self.removeWriter(selectable)
+ selectable.connectionLost(failure.Failure(why))
+
+
+@implementer(IReactorTCP, IReactorUDP, IReactorMulticast)
+class PosixReactorBase(_DisconnectSelectableMixin, ReactorBase):
+ """
+ A basis for reactors that use file descriptors.
+
+ @ivar _childWaker: L{None} or a reference to the L{_SIGCHLDWaker}
+ which is used to properly notice child process termination.
+ """
+
+ _childWaker = None
+
+ # Callable that creates a waker, overrideable so that subclasses can
+ # substitute their own implementation:
+ def _wakerFactory(self) -> _IWaker:
+ return _Waker()
+
+ def installWaker(self):
+ """
+ Install a `waker' to allow threads and signals to wake up the IO thread.
+
+ We use the self-pipe trick (http://cr.yp.to/docs/selfpipe.html) to wake
+ the reactor. On Windows we use a pair of sockets.
+ """
+ if not self.waker:
+ self.waker = self._wakerFactory()
+ self._internalReaders.add(self.waker)
+ self.addReader(self.waker)
+
+ def _signalsFactory(self) -> SignalHandling:
+ """
+ Customize reactor signal handling to support child processes on POSIX
+ platforms.
+ """
+ baseHandling = super()._signalsFactory()
+ # If we're on a platform that uses signals for process event signaling
+ if platformType == "posix":
+ # Compose ...
+ return _MultiSignalHandling(
+ (
+ # the base signal handling behavior ...
+ baseHandling,
+ # with our extra SIGCHLD handling behavior.
+ _ChildSignalHandling(
+ self._addInternalReader,
+ self._removeInternalReader,
+ ),
+ )
+ )
+
+ # Otherwise just use the base behavior
+ return baseHandling
+
+ # IReactorProcess
+
+ def spawnProcess(
+ self,
+ processProtocol,
+ executable,
+ args=(),
+ env={},
+ path=None,
+ uid=None,
+ gid=None,
+ usePTY=0,
+ childFDs=None,
+ ):
+ if platformType == "posix":
+ if usePTY:
+ if childFDs is not None:
+ raise ValueError(
+ "Using childFDs is not supported with usePTY=True."
+ )
+ return process.PTYProcess(
+ self, executable, args, env, path, processProtocol, uid, gid, usePTY
+ )
+ else:
+ return process.Process(
+ self,
+ executable,
+ args,
+ env,
+ path,
+ processProtocol,
+ uid,
+ gid,
+ childFDs,
+ )
+ elif platformType == "win32":
+ if uid is not None:
+ raise ValueError("Setting UID is unsupported on this platform.")
+ if gid is not None:
+ raise ValueError("Setting GID is unsupported on this platform.")
+ if usePTY:
+ raise ValueError("The usePTY parameter is not supported on Windows.")
+ if childFDs:
+ raise ValueError("Customizing childFDs is not supported on Windows.")
+
+ if win32process:
+ from twisted.internet._dumbwin32proc import Process
+
+ return Process(self, processProtocol, executable, args, env, path)
+ else:
+ raise NotImplementedError(
+ "spawnProcess not available since pywin32 is not installed."
+ )
+ else:
+ raise NotImplementedError(
+ "spawnProcess only available on Windows or POSIX."
+ )
+
+ # IReactorUDP
+
+ def listenUDP(self, port, protocol, interface="", maxPacketSize=8192):
+ """Connects a given L{DatagramProtocol} to the given numeric UDP port.
+
+ @returns: object conforming to L{IListeningPort}.
+ """
+ p = udp.Port(port, protocol, interface, maxPacketSize, self)
+ p.startListening()
+ return p
+
+ # IReactorMulticast
+
+ def listenMulticast(
+ self, port, protocol, interface="", maxPacketSize=8192, listenMultiple=False
+ ):
+ """Connects a given DatagramProtocol to the given numeric UDP port.
+
+ EXPERIMENTAL.
+
+ @returns: object conforming to IListeningPort.
+ """
+ p = udp.MulticastPort(
+ port, protocol, interface, maxPacketSize, self, listenMultiple
+ )
+ p.startListening()
+ return p
+
+ # IReactorUNIX
+
+ def connectUNIX(self, address, factory, timeout=30, checkPID=0):
+ assert unixEnabled, "UNIX support is not present"
+ c = unix.Connector(address, factory, timeout, self, checkPID)
+ c.connect()
+ return c
+
+ def listenUNIX(self, address, factory, backlog=50, mode=0o666, wantPID=0):
+ assert unixEnabled, "UNIX support is not present"
+ p = unix.Port(address, factory, backlog, mode, self, wantPID)
+ p.startListening()
+ return p
+
+ # IReactorUNIXDatagram
+
+ def listenUNIXDatagram(self, address, protocol, maxPacketSize=8192, mode=0o666):
+ """
+ Connects a given L{DatagramProtocol} to the given path.
+
+ EXPERIMENTAL.
+
+ @returns: object conforming to L{IListeningPort}.
+ """
+ assert unixEnabled, "UNIX support is not present"
+ p = unix.DatagramPort(address, protocol, maxPacketSize, mode, self)
+ p.startListening()
+ return p
+
+ def connectUNIXDatagram(
+ self, address, protocol, maxPacketSize=8192, mode=0o666, bindAddress=None
+ ):
+ """
+ Connects a L{ConnectedDatagramProtocol} instance to a path.
+
+ EXPERIMENTAL.
+ """
+ assert unixEnabled, "UNIX support is not present"
+ p = unix.ConnectedDatagramPort(
+ address, protocol, maxPacketSize, mode, bindAddress, self
+ )
+ p.startListening()
+ return p
+
+ # IReactorSocket (no AF_UNIX on Windows)
+
+ if unixEnabled:
+ _supportedAddressFamilies: Sequence[socket.AddressFamily] = (
+ socket.AF_INET,
+ socket.AF_INET6,
+ socket.AF_UNIX,
+ )
+ else:
+ _supportedAddressFamilies = (
+ socket.AF_INET,
+ socket.AF_INET6,
+ )
+
+ def adoptStreamPort(self, fileDescriptor, addressFamily, factory):
+ """
+ Create a new L{IListeningPort} from an already-initialized socket.
+
+ This just dispatches to a suitable port implementation (eg from
+ L{IReactorTCP}, etc) based on the specified C{addressFamily}.
+
+ @see: L{twisted.internet.interfaces.IReactorSocket.adoptStreamPort}
+ """
+ if addressFamily not in self._supportedAddressFamilies:
+ raise error.UnsupportedAddressFamily(addressFamily)
+
+ if unixEnabled and addressFamily == socket.AF_UNIX:
+ p = unix.Port._fromListeningDescriptor(self, fileDescriptor, factory)
+ else:
+ p = tcp.Port._fromListeningDescriptor(
+ self, fileDescriptor, addressFamily, factory
+ )
+ p.startListening()
+ return p
+
+ def adoptStreamConnection(self, fileDescriptor, addressFamily, factory):
+ """
+ @see:
+ L{twisted.internet.interfaces.IReactorSocket.adoptStreamConnection}
+ """
+ if addressFamily not in self._supportedAddressFamilies:
+ raise error.UnsupportedAddressFamily(addressFamily)
+
+ if unixEnabled and addressFamily == socket.AF_UNIX:
+ return unix.Server._fromConnectedSocket(fileDescriptor, factory, self)
+ else:
+ return tcp.Server._fromConnectedSocket(
+ fileDescriptor, addressFamily, factory, self
+ )
+
+ def adoptDatagramPort(
+ self, fileDescriptor, addressFamily, protocol, maxPacketSize=8192
+ ):
+ if addressFamily not in (socket.AF_INET, socket.AF_INET6):
+ raise error.UnsupportedAddressFamily(addressFamily)
+
+ p = udp.Port._fromListeningDescriptor(
+ self, fileDescriptor, addressFamily, protocol, maxPacketSize=maxPacketSize
+ )
+ p.startListening()
+ return p
+
+ # IReactorTCP
+
+ def listenTCP(self, port, factory, backlog=50, interface=""):
+ p = tcp.Port(port, factory, backlog, interface, self)
+ p.startListening()
+ return p
+
+ def connectTCP(self, host, port, factory, timeout=30, bindAddress=None):
+ c = tcp.Connector(host, port, factory, timeout, bindAddress, self)
+ c.connect()
+ return c
+
+ # IReactorSSL (sometimes, not implemented)
+
+ def connectSSL(
+ self, host, port, factory, contextFactory, timeout=30, bindAddress=None
+ ):
+ if tls is not None:
+ tlsFactory = tls.TLSMemoryBIOFactory(contextFactory, True, factory)
+ return self.connectTCP(host, port, tlsFactory, timeout, bindAddress)
+ elif ssl is not None:
+ c = ssl.Connector(
+ host, port, factory, contextFactory, timeout, bindAddress, self
+ )
+ c.connect()
+ return c
+ else:
+ assert False, "SSL support is not present"
+
+ def listenSSL(self, port, factory, contextFactory, backlog=50, interface=""):
+ if tls is not None:
+ tlsFactory = tls.TLSMemoryBIOFactory(contextFactory, False, factory)
+ port = self.listenTCP(port, tlsFactory, backlog, interface)
+ port._type = "TLS"
+ return port
+ elif ssl is not None:
+ p = ssl.Port(port, factory, contextFactory, backlog, interface, self)
+ p.startListening()
+ return p
+ else:
+ assert False, "SSL support is not present"
+
+ def _removeAll(self, readers, writers):
+ """
+ Remove all readers and writers, and list of removed L{IReadDescriptor}s
+ and L{IWriteDescriptor}s.
+
+ Meant for calling from subclasses, to implement removeAll, like::
+
+ def removeAll(self):
+ return self._removeAll(self._reads, self._writes)
+
+ where C{self._reads} and C{self._writes} are iterables.
+ """
+ removedReaders = set(readers) - self._internalReaders
+ for reader in removedReaders:
+ self.removeReader(reader)
+
+ removedWriters = set(writers)
+ for writer in removedWriters:
+ self.removeWriter(writer)
+
+ return list(removedReaders | removedWriters)
+
+
+class _PollLikeMixin:
+ """
+ Mixin for poll-like reactors.
+
+ Subclasses must define the following attributes::
+
+ - _POLL_DISCONNECTED - Bitmask for events indicating a connection was
+ lost.
+ - _POLL_IN - Bitmask for events indicating there is input to read.
+ - _POLL_OUT - Bitmask for events indicating output can be written.
+
+ Must be mixed in to a subclass of PosixReactorBase (for
+ _disconnectSelectable).
+ """
+
+ def _doReadOrWrite(self, selectable, fd, event):
+ """
+ fd is available for read or write, do the work and raise errors if
+ necessary.
+ """
+ why = None
+ inRead = False
+ if event & self._POLL_DISCONNECTED and not (event & self._POLL_IN):
+ # Handle disconnection. But only if we finished processing all
+ # the pending input.
+ if fd in self._reads:
+ # If we were reading from the descriptor then this is a
+ # clean shutdown. We know there are no read events pending
+ # because we just checked above. It also might be a
+ # half-close (which is why we have to keep track of inRead).
+ inRead = True
+ why = CONNECTION_DONE
+ else:
+ # If we weren't reading, this is an error shutdown of some
+ # sort.
+ why = CONNECTION_LOST
+ else:
+ # Any non-disconnect event turns into a doRead or a doWrite.
+ try:
+ # First check to see if the descriptor is still valid. This
+ # gives fileno() a chance to raise an exception, too.
+ # Ideally, disconnection would always be indicated by the
+ # return value of doRead or doWrite (or an exception from
+ # one of those methods), but calling fileno here helps make
+ # buggy applications more transparent.
+ if selectable.fileno() == -1:
+ # -1 is sort of a historical Python artifact. Python
+ # files and sockets used to change their file descriptor
+ # to -1 when they closed. For the time being, we'll
+ # continue to support this anyway in case applications
+ # replicated it, plus abstract.FileDescriptor.fileno
+ # returns -1. Eventually it'd be good to deprecate this
+ # case.
+ why = _NO_FILEDESC
+ else:
+ if event & self._POLL_IN:
+ # Handle a read event.
+ why = selectable.doRead()
+ inRead = True
+ if not why and event & self._POLL_OUT:
+ # Handle a write event, as long as doRead didn't
+ # disconnect us.
+ why = selectable.doWrite()
+ inRead = False
+ except BaseException:
+ # Any exception from application code gets logged and will
+ # cause us to disconnect the selectable.
+ why = sys.exc_info()[1]
+ log.err()
+ if why:
+ self._disconnectSelectable(selectable, why, inRead)
+
+
+@implementer(IReactorFDSet)
+class _ContinuousPolling(_PollLikeMixin, _DisconnectSelectableMixin):
+ """
+ Schedule reads and writes based on the passage of time, rather than
+ notification.
+
+ This is useful for supporting polling filesystem files, which C{epoll(7)}
+ does not support.
+
+ The implementation uses L{_PollLikeMixin}, which is a bit hacky, but
+ re-implementing and testing the relevant code yet again is unappealing.
+
+ @ivar _reactor: The L{EPollReactor} that is using this instance.
+
+ @ivar _loop: A C{LoopingCall} that drives the polling, or L{None}.
+
+ @ivar _readers: A C{set} of C{FileDescriptor} objects that should be read
+ from.
+
+ @ivar _writers: A C{set} of C{FileDescriptor} objects that should be
+ written to.
+ """
+
+ # Attributes for _PollLikeMixin
+ _POLL_DISCONNECTED = 1
+ _POLL_IN = 2
+ _POLL_OUT = 4
+
+ def __init__(self, reactor):
+ self._reactor = reactor
+ self._loop = None
+ self._readers = set()
+ self._writers = set()
+
+ def _checkLoop(self):
+ """
+ Start or stop a C{LoopingCall} based on whether there are readers and
+ writers.
+ """
+ if self._readers or self._writers:
+ if self._loop is None:
+ from twisted.internet.task import _EPSILON, LoopingCall
+
+ self._loop = LoopingCall(self.iterate)
+ self._loop.clock = self._reactor
+ # LoopingCall seems unhappy with timeout of 0, so use very
+ # small number:
+ self._loop.start(_EPSILON, now=False)
+ elif self._loop:
+ self._loop.stop()
+ self._loop = None
+
+ def iterate(self):
+ """
+ Call C{doRead} and C{doWrite} on all readers and writers respectively.
+ """
+ for reader in list(self._readers):
+ self._doReadOrWrite(reader, reader, self._POLL_IN)
+ for writer in list(self._writers):
+ self._doReadOrWrite(writer, writer, self._POLL_OUT)
+
+ def addReader(self, reader):
+ """
+ Add a C{FileDescriptor} for notification of data available to read.
+ """
+ self._readers.add(reader)
+ self._checkLoop()
+
+ def addWriter(self, writer):
+ """
+ Add a C{FileDescriptor} for notification of data available to write.
+ """
+ self._writers.add(writer)
+ self._checkLoop()
+
+ def removeReader(self, reader):
+ """
+ Remove a C{FileDescriptor} from notification of data available to read.
+ """
+ try:
+ self._readers.remove(reader)
+ except KeyError:
+ return
+ self._checkLoop()
+
+ def removeWriter(self, writer):
+ """
+ Remove a C{FileDescriptor} from notification of data available to
+ write.
+ """
+ try:
+ self._writers.remove(writer)
+ except KeyError:
+ return
+ self._checkLoop()
+
+ def removeAll(self):
+ """
+ Remove all readers and writers.
+ """
+ result = list(self._readers | self._writers)
+ # Don't reset to new value, since self.isWriting and .isReading refer
+ # to the existing instance:
+ self._readers.clear()
+ self._writers.clear()
+ return result
+
+ def getReaders(self):
+ """
+ Return a list of the readers.
+ """
+ return list(self._readers)
+
+ def getWriters(self):
+ """
+ Return a list of the writers.
+ """
+ return list(self._writers)
+
+ def isReading(self, fd):
+ """
+ Checks if the file descriptor is currently being observed for read
+ readiness.
+
+ @param fd: The file descriptor being checked.
+ @type fd: L{twisted.internet.abstract.FileDescriptor}
+ @return: C{True} if the file descriptor is being observed for read
+ readiness, C{False} otherwise.
+ @rtype: C{bool}
+ """
+ return fd in self._readers
+
+ def isWriting(self, fd):
+ """
+ Checks if the file descriptor is currently being observed for write
+ readiness.
+
+ @param fd: The file descriptor being checked.
+ @type fd: L{twisted.internet.abstract.FileDescriptor}
+ @return: C{True} if the file descriptor is being observed for write
+ readiness, C{False} otherwise.
+ @rtype: C{bool}
+ """
+ return fd in self._writers
+
+
+if tls is not None or ssl is not None:
+ classImplements(PosixReactorBase, IReactorSSL)
+if unixEnabled:
+ classImplements(PosixReactorBase, IReactorUNIX, IReactorUNIXDatagram)
+if processEnabled:
+ classImplements(PosixReactorBase, IReactorProcess)
+if getattr(socket, "fromfd", None) is not None:
+ classImplements(PosixReactorBase, IReactorSocket)
+
+__all__ = ["PosixReactorBase"]
diff --git a/contrib/python/Twisted/py3/twisted/internet/process.py b/contrib/python/Twisted/py3/twisted/internet/process.py
new file mode 100644
index 0000000000..ef3b88d9f1
--- /dev/null
+++ b/contrib/python/Twisted/py3/twisted/internet/process.py
@@ -0,0 +1,1293 @@
+# -*- test-case-name: twisted.test.test_process -*-
+# Copyright (c) Twisted Matrix Laboratories.
+# See LICENSE for details.
+
+"""
+UNIX Process management.
+
+Do NOT use this module directly - use reactor.spawnProcess() instead.
+
+Maintainer: Itamar Shtull-Trauring
+"""
+from __future__ import annotations
+
+import errno
+import gc
+import io
+import os
+import signal
+import stat
+import sys
+import traceback
+from collections import defaultdict
+from typing import TYPE_CHECKING, Dict, List, Optional, Tuple
+
+_PS_CLOSE: int
+_PS_DUP2: int
+
+if not TYPE_CHECKING:
+ try:
+ from os import POSIX_SPAWN_CLOSE as _PS_CLOSE, POSIX_SPAWN_DUP2 as _PS_DUP2
+ except ImportError:
+ pass
+
+from zope.interface import implementer
+
+from twisted.internet import abstract, error, fdesc
+from twisted.internet._baseprocess import BaseProcess
+from twisted.internet.interfaces import IProcessTransport
+from twisted.internet.main import CONNECTION_DONE, CONNECTION_LOST
+from twisted.python import failure, log
+from twisted.python.runtime import platform
+from twisted.python.util import switchUID
+
+if platform.isWindows():
+ raise ImportError(
+ "twisted.internet.process does not work on Windows. "
+ "Use the reactor.spawnProcess() API instead."
+ )
+
+try:
+ import pty as _pty
+except ImportError:
+ pty = None
+else:
+ pty = _pty
+
+try:
+ import fcntl as _fcntl
+ import termios
+except ImportError:
+ fcntl = None
+else:
+ fcntl = _fcntl
+
+# Some people were importing this, which is incorrect, just keeping it
+# here for backwards compatibility:
+ProcessExitedAlready = error.ProcessExitedAlready
+
+reapProcessHandlers: Dict[int, _BaseProcess] = {}
+
+
+def reapAllProcesses() -> None:
+ """
+ Reap all registered processes.
+ """
+ # Coerce this to a list, as reaping the process changes the dictionary and
+ # causes a "size changed during iteration" exception
+ for process in list(reapProcessHandlers.values()):
+ process.reapProcess()
+
+
+def registerReapProcessHandler(pid, process):
+ """
+ Register a process handler for the given pid, in case L{reapAllProcesses}
+ is called.
+
+ @param pid: the pid of the process.
+ @param process: a process handler.
+ """
+ if pid in reapProcessHandlers:
+ raise RuntimeError("Try to register an already registered process.")
+ try:
+ auxPID, status = os.waitpid(pid, os.WNOHANG)
+ except BaseException:
+ log.msg(f"Failed to reap {pid}:")
+ log.err()
+
+ if pid is None:
+ return
+
+ auxPID = None
+ if auxPID:
+ process.processEnded(status)
+ else:
+ # if auxPID is 0, there are children but none have exited
+ reapProcessHandlers[pid] = process
+
+
+def unregisterReapProcessHandler(pid, process):
+ """
+ Unregister a process handler previously registered with
+ L{registerReapProcessHandler}.
+ """
+ if not (pid in reapProcessHandlers and reapProcessHandlers[pid] == process):
+ raise RuntimeError("Try to unregister a process not registered.")
+ del reapProcessHandlers[pid]
+
+
+class ProcessWriter(abstract.FileDescriptor):
+ """
+ (Internal) Helper class to write into a Process's input pipe.
+
+ I am a helper which describes a selectable asynchronous writer to a
+ process's input pipe, including stdin.
+
+ @ivar enableReadHack: A flag which determines how readability on this
+ write descriptor will be handled. If C{True}, then readability may
+ indicate the reader for this write descriptor has been closed (ie,
+ the connection has been lost). If C{False}, then readability events
+ are ignored.
+ """
+
+ connected = 1
+ ic = 0
+ enableReadHack = False
+
+ def __init__(self, reactor, proc, name, fileno, forceReadHack=False):
+ """
+ Initialize, specifying a Process instance to connect to.
+ """
+ abstract.FileDescriptor.__init__(self, reactor)
+ fdesc.setNonBlocking(fileno)
+ self.proc = proc
+ self.name = name
+ self.fd = fileno
+
+ if not stat.S_ISFIFO(os.fstat(self.fileno()).st_mode):
+ # If the fd is not a pipe, then the read hack is never
+ # applicable. This case arises when ProcessWriter is used by
+ # StandardIO and stdout is redirected to a normal file.
+ self.enableReadHack = False
+ elif forceReadHack:
+ self.enableReadHack = True
+ else:
+ # Detect if this fd is actually a write-only fd. If it's
+ # valid to read, don't try to detect closing via read.
+ # This really only means that we cannot detect a TTY's write
+ # pipe being closed.
+ try:
+ os.read(self.fileno(), 0)
+ except OSError:
+ # It's a write-only pipe end, enable hack
+ self.enableReadHack = True
+
+ if self.enableReadHack:
+ self.startReading()
+
+ def fileno(self):
+ """
+ Return the fileno() of my process's stdin.
+ """
+ return self.fd
+
+ def writeSomeData(self, data):
+ """
+ Write some data to the open process.
+ """
+ rv = fdesc.writeToFD(self.fd, data)
+ if rv == len(data) and self.enableReadHack:
+ # If the send buffer is now empty and it is necessary to monitor
+ # this descriptor for readability to detect close, try detecting
+ # readability now.
+ self.startReading()
+ return rv
+
+ def write(self, data):
+ self.stopReading()
+ abstract.FileDescriptor.write(self, data)
+
+ def doRead(self):
+ """
+ The only way a write pipe can become "readable" is at EOF, because the
+ child has closed it, and we're using a reactor which doesn't
+ distinguish between readable and closed (such as the select reactor).
+
+ Except that's not true on linux < 2.6.11. It has the following
+ characteristics: write pipe is completely empty => POLLOUT (writable in
+ select), write pipe is not completely empty => POLLIN (readable in
+ select), write pipe's reader closed => POLLIN|POLLERR (readable and
+ writable in select)
+
+ That's what this funky code is for. If linux was not broken, this
+ function could be simply "return CONNECTION_LOST".
+ """
+ if self.enableReadHack:
+ return CONNECTION_LOST
+ else:
+ self.stopReading()
+
+ def connectionLost(self, reason):
+ """
+ See abstract.FileDescriptor.connectionLost.
+ """
+ # At least on macOS 10.4, exiting while stdout is non-blocking can
+ # result in data loss. For some reason putting the file descriptor
+ # back into blocking mode seems to resolve this issue.
+ fdesc.setBlocking(self.fd)
+
+ abstract.FileDescriptor.connectionLost(self, reason)
+ self.proc.childConnectionLost(self.name, reason)
+
+
+class ProcessReader(abstract.FileDescriptor):
+ """
+ ProcessReader
+
+ I am a selectable representation of a process's output pipe, such as
+ stdout and stderr.
+ """
+
+ connected = True
+
+ def __init__(self, reactor, proc, name, fileno):
+ """
+ Initialize, specifying a process to connect to.
+ """
+ abstract.FileDescriptor.__init__(self, reactor)
+ fdesc.setNonBlocking(fileno)
+ self.proc = proc
+ self.name = name
+ self.fd = fileno
+ self.startReading()
+
+ def fileno(self):
+ """
+ Return the fileno() of my process's stderr.
+ """
+ return self.fd
+
+ def writeSomeData(self, data):
+ # the only time this is actually called is after .loseConnection Any
+ # actual write attempt would fail, so we must avoid that. This hack
+ # allows us to use .loseConnection on both readers and writers.
+ assert data == b""
+ return CONNECTION_LOST
+
+ def doRead(self):
+ """
+ This is called when the pipe becomes readable.
+ """
+ return fdesc.readFromFD(self.fd, self.dataReceived)
+
+ def dataReceived(self, data):
+ self.proc.childDataReceived(self.name, data)
+
+ def loseConnection(self):
+ if self.connected and not self.disconnecting:
+ self.disconnecting = 1
+ self.stopReading()
+ self.reactor.callLater(
+ 0, self.connectionLost, failure.Failure(CONNECTION_DONE)
+ )
+
+ def connectionLost(self, reason):
+ """
+ Close my end of the pipe, signal the Process (which signals the
+ ProcessProtocol).
+ """
+ abstract.FileDescriptor.connectionLost(self, reason)
+ self.proc.childConnectionLost(self.name, reason)
+
+
+class _BaseProcess(BaseProcess):
+ """
+ Base class for Process and PTYProcess.
+ """
+
+ status: Optional[int] = None
+ pid = None
+
+ def reapProcess(self):
+ """
+ Try to reap a process (without blocking) via waitpid.
+
+ This is called when sigchild is caught or a Process object loses its
+ "connection" (stdout is closed) This ought to result in reaping all
+ zombie processes, since it will be called twice as often as it needs
+ to be.
+
+ (Unfortunately, this is a slightly experimental approach, since
+ UNIX has no way to be really sure that your process is going to
+ go away w/o blocking. I don't want to block.)
+ """
+ try:
+ try:
+ pid, status = os.waitpid(self.pid, os.WNOHANG)
+ except OSError as e:
+ if e.errno == errno.ECHILD:
+ # no child process
+ pid = None
+ else:
+ raise
+ except BaseException:
+ log.msg(f"Failed to reap {self.pid}:")
+ log.err()
+ pid = None
+ if pid:
+ unregisterReapProcessHandler(pid, self)
+ self.processEnded(status)
+
+ def _getReason(self, status):
+ exitCode = sig = None
+ if os.WIFEXITED(status):
+ exitCode = os.WEXITSTATUS(status)
+ else:
+ sig = os.WTERMSIG(status)
+ if exitCode or sig:
+ return error.ProcessTerminated(exitCode, sig, status)
+ return error.ProcessDone(status)
+
+ def signalProcess(self, signalID):
+ """
+ Send the given signal C{signalID} to the process. It'll translate a
+ few signals ('HUP', 'STOP', 'INT', 'KILL', 'TERM') from a string
+ representation to its int value, otherwise it'll pass directly the
+ value provided
+
+ @type signalID: C{str} or C{int}
+ """
+ if signalID in ("HUP", "STOP", "INT", "KILL", "TERM"):
+ signalID = getattr(signal, f"SIG{signalID}")
+ if self.pid is None:
+ raise ProcessExitedAlready()
+ try:
+ os.kill(self.pid, signalID)
+ except OSError as e:
+ if e.errno == errno.ESRCH:
+ raise ProcessExitedAlready()
+ else:
+ raise
+
+ def _resetSignalDisposition(self):
+ # The Python interpreter ignores some signals, and our child
+ # process will inherit that behaviour. To have a child process
+ # that responds to signals normally, we need to reset our
+ # child process's signal handling (just) after we fork and
+ # before we execvpe.
+ for signalnum in range(1, signal.NSIG):
+ if signal.getsignal(signalnum) == signal.SIG_IGN:
+ # Reset signal handling to the default
+ signal.signal(signalnum, signal.SIG_DFL)
+
+ def _trySpawnInsteadOfFork(
+ self, path, uid, gid, executable, args, environment, kwargs
+ ):
+ """
+ Try to use posix_spawnp() instead of fork(), if possible.
+
+ This implementation returns False because the non-PTY subclass
+ implements the actual logic; we can't yet use this for pty processes.
+
+ @return: a boolean indicating whether posix_spawnp() was used or not.
+ """
+ return False
+
+ def _fork(self, path, uid, gid, executable, args, environment, **kwargs):
+ """
+ Fork and then exec sub-process.
+
+ @param path: the path where to run the new process.
+ @type path: L{bytes} or L{unicode}
+
+ @param uid: if defined, the uid used to run the new process.
+ @type uid: L{int}
+
+ @param gid: if defined, the gid used to run the new process.
+ @type gid: L{int}
+
+ @param executable: the executable to run in a new process.
+ @type executable: L{str}
+
+ @param args: arguments used to create the new process.
+ @type args: L{list}.
+
+ @param environment: environment used for the new process.
+ @type environment: L{dict}.
+
+ @param kwargs: keyword arguments to L{_setupChild} method.
+ """
+
+ if self._trySpawnInsteadOfFork(
+ path, uid, gid, executable, args, environment, kwargs
+ ):
+ return
+
+ collectorEnabled = gc.isenabled()
+ gc.disable()
+ try:
+ self.pid = os.fork()
+ except BaseException:
+ # Still in the parent process
+ if collectorEnabled:
+ gc.enable()
+ raise
+ else:
+ if self.pid == 0:
+ # A return value of 0 from fork() indicates that we are now
+ # executing in the child process.
+
+ # Do not put *ANY* code outside the try block. The child
+ # process must either exec or _exit. If it gets outside this
+ # block (due to an exception that is not handled here, but
+ # which might be handled higher up), there will be two copies
+ # of the parent running in parallel, doing all kinds of damage.
+
+ # After each change to this code, review it to make sure there
+ # are no exit paths.
+
+ try:
+ # Stop debugging. If I am, I don't care anymore.
+ sys.settrace(None)
+ self._setupChild(**kwargs)
+ self._execChild(path, uid, gid, executable, args, environment)
+ except BaseException:
+ # If there are errors, try to write something descriptive
+ # to stderr before exiting.
+
+ # The parent's stderr isn't *necessarily* fd 2 anymore, or
+ # even still available; however, even libc assumes that
+ # write(2, err) is a useful thing to attempt.
+
+ try:
+ # On Python 3, print_exc takes a text stream, but
+ # on Python 2 it still takes a byte stream. So on
+ # Python 3 we will wrap up the byte stream returned
+ # by os.fdopen using TextIOWrapper.
+
+ # We hard-code UTF-8 as the encoding here, rather
+ # than looking at something like
+ # getfilesystemencoding() or sys.stderr.encoding,
+ # because we want an encoding that will be able to
+ # encode the full range of code points. We are
+ # (most likely) talking to the parent process on
+ # the other end of this pipe and not the filesystem
+ # or the original sys.stderr, so there's no point
+ # in trying to match the encoding of one of those
+ # objects.
+
+ stderr = io.TextIOWrapper(os.fdopen(2, "wb"), encoding="utf-8")
+ msg = ("Upon execvpe {} {} in environment id {}" "\n:").format(
+ executable, str(args), id(environment)
+ )
+ stderr.write(msg)
+ traceback.print_exc(file=stderr)
+ stderr.flush()
+
+ for fd in range(3):
+ os.close(fd)
+ except BaseException:
+ # Handle all errors during the error-reporting process
+ # silently to ensure that the child terminates.
+ pass
+
+ # See comment above about making sure that we reach this line
+ # of code.
+ os._exit(1)
+
+ # we are now in parent process
+ if collectorEnabled:
+ gc.enable()
+ self.status = -1 # this records the exit status of the child
+
+ def _setupChild(self, *args, **kwargs):
+ """
+ Setup the child process. Override in subclasses.
+ """
+ raise NotImplementedError()
+
+ def _execChild(self, path, uid, gid, executable, args, environment):
+ """
+ The exec() which is done in the forked child.
+ """
+ if path:
+ os.chdir(path)
+ if uid is not None or gid is not None:
+ if uid is None:
+ uid = os.geteuid()
+ if gid is None:
+ gid = os.getegid()
+ # set the UID before I actually exec the process
+ os.setuid(0)
+ os.setgid(0)
+ switchUID(uid, gid)
+ os.execvpe(executable, args, environment)
+
+ def __repr__(self) -> str:
+ """
+ String representation of a process.
+ """
+ return "<{} pid={} status={}>".format(
+ self.__class__.__name__,
+ self.pid,
+ self.status,
+ )
+
+
+class _FDDetector:
+ """
+ This class contains the logic necessary to decide which of the available
+ system techniques should be used to detect the open file descriptors for
+ the current process. The chosen technique gets monkey-patched into the
+ _listOpenFDs method of this class so that the detection only needs to occur
+ once.
+
+ @ivar listdir: The implementation of listdir to use. This gets overwritten
+ by the test cases.
+ @ivar getpid: The implementation of getpid to use, returns the PID of the
+ running process.
+ @ivar openfile: The implementation of open() to use, by default the Python
+ builtin.
+ """
+
+ # So that we can unit test this
+ listdir = os.listdir
+ getpid = os.getpid
+ openfile = open
+
+ def __init__(self):
+ self._implementations = [
+ self._procFDImplementation,
+ self._devFDImplementation,
+ self._fallbackFDImplementation,
+ ]
+
+ def _listOpenFDs(self):
+ """
+ Return an iterable of file descriptors which I{may} be open in this
+ process.
+
+ This will try to return the fewest possible descriptors without missing
+ any.
+ """
+ self._listOpenFDs = self._getImplementation()
+ return self._listOpenFDs()
+
+ def _getImplementation(self):
+ """
+ Pick a method which gives correct results for C{_listOpenFDs} in this
+ runtime environment.
+
+ This involves a lot of very platform-specific checks, some of which may
+ be relatively expensive. Therefore the returned method should be saved
+ and re-used, rather than always calling this method to determine what it
+ is.
+
+ See the implementation for the details of how a method is selected.
+ """
+ for impl in self._implementations:
+ try:
+ before = impl()
+ except BaseException:
+ continue
+ with self.openfile("/dev/null", "r"):
+ after = impl()
+ if before != after:
+ return impl
+ # If no implementation can detect the newly opened file above, then just
+ # return the last one. The last one should therefore always be one
+ # which makes a simple static guess which includes all possible open
+ # file descriptors, but perhaps also many other values which do not
+ # correspond to file descriptors. For example, the scheme implemented
+ # by _fallbackFDImplementation is suitable to be the last entry.
+ return impl
+
+ def _devFDImplementation(self):
+ """
+ Simple implementation for systems where /dev/fd actually works.
+ See: http://www.freebsd.org/cgi/man.cgi?fdescfs
+ """
+ dname = "/dev/fd"
+ result = [int(fd) for fd in self.listdir(dname)]
+ return result
+
+ def _procFDImplementation(self):
+ """
+ Simple implementation for systems where /proc/pid/fd exists (we assume
+ it works).
+ """
+ dname = "/proc/%d/fd" % (self.getpid(),)
+ return [int(fd) for fd in self.listdir(dname)]
+
+ def _fallbackFDImplementation(self):
+ """
+ Fallback implementation where either the resource module can inform us
+ about the upper bound of how many FDs to expect, or where we just guess
+ a constant maximum if there is no resource module.
+
+ All possible file descriptors from 0 to that upper bound are returned
+ with no attempt to exclude invalid file descriptor values.
+ """
+ try:
+ import resource
+ except ImportError:
+ maxfds = 1024
+ else:
+ # OS-X reports 9223372036854775808. That's a lot of fds to close.
+ # OS-X should get the /dev/fd implementation instead, so mostly
+ # this check probably isn't necessary.
+ maxfds = min(1024, resource.getrlimit(resource.RLIMIT_NOFILE)[1])
+ return range(maxfds)
+
+
+detector = _FDDetector()
+
+
+def _listOpenFDs():
+ """
+ Use the global detector object to figure out which FD implementation to
+ use.
+ """
+ return detector._listOpenFDs()
+
+
+def _getFileActions(
+ fdState: List[Tuple[int, bool]],
+ childToParentFD: Dict[int, int],
+ doClose: int,
+ doDup2: int,
+) -> List[Tuple[int, ...]]:
+ """
+ Get the C{file_actions} parameter for C{posix_spawn} based on the
+ parameters describing the current process state.
+
+ @param fdState: A list of 2-tuples of (file descriptor, close-on-exec
+ flag).
+
+ @param doClose: the integer to use for the 'close' instruction
+
+ @param doDup2: the integer to use for the 'dup2' instruction
+ """
+ fdStateDict = dict(fdState)
+ parentToChildren: Dict[int, List[int]] = defaultdict(list)
+ for inChild, inParent in childToParentFD.items():
+ parentToChildren[inParent].append(inChild)
+ allocated = set(fdStateDict)
+ allocated |= set(childToParentFD.values())
+ allocated |= set(childToParentFD.keys())
+ nextFD = 0
+
+ def allocateFD() -> int:
+ nonlocal nextFD
+ while nextFD in allocated:
+ nextFD += 1
+ allocated.add(nextFD)
+ return nextFD
+
+ result: List[Tuple[int, ...]] = []
+ relocations = {}
+ for inChild, inParent in sorted(childToParentFD.items()):
+ # The parent FD will later be reused by a child FD.
+ parentToChildren[inParent].remove(inChild)
+ if parentToChildren[inChild]:
+ new = relocations[inChild] = allocateFD()
+ result.append((doDup2, inChild, new))
+ if inParent in relocations:
+ result.append((doDup2, relocations[inParent], inChild))
+ if not parentToChildren[inParent]:
+ result.append((doClose, relocations[inParent]))
+ else:
+ if inParent == inChild:
+ if fdStateDict[inParent]:
+ # If the child is attempting to inherit the parent as-is,
+ # and it is not close-on-exec, the job is already done; we
+ # can bail. Otherwise...
+
+ tempFD = allocateFD()
+ # The child wants to inherit the parent as-is, so the
+ # handle must be heritable.. dup2 makes the new descriptor
+ # inheritable by default, *but*, per the man page, “if
+ # fildes and fildes2 are equal, then dup2() just returns
+ # fildes2; no other changes are made to the existing
+ # descriptor”, so we need to dup it somewhere else and dup
+ # it back before closing the temporary place we put it.
+ result.extend(
+ [
+ (doDup2, inParent, tempFD),
+ (doDup2, tempFD, inChild),
+ (doClose, tempFD),
+ ]
+ )
+ else:
+ result.append((doDup2, inParent, inChild))
+
+ for eachFD, uninheritable in fdStateDict.items():
+ if eachFD not in childToParentFD and not uninheritable:
+ result.append((doClose, eachFD))
+
+ return result
+
+
+@implementer(IProcessTransport)
+class Process(_BaseProcess):
+ """
+ An operating-system Process.
+
+ This represents an operating-system process with arbitrary input/output
+ pipes connected to it. Those pipes may represent standard input, standard
+ output, and standard error, or any other file descriptor.
+
+ On UNIX, this is implemented using posix_spawnp() when possible (or fork(),
+ exec(), pipe() and fcntl() when not). These calls may not exist elsewhere
+ so this code is not cross-platform. (also, windows can only select on
+ sockets...)
+ """
+
+ debug = False
+ debug_child = False
+
+ status = -1
+ pid = None
+
+ processWriterFactory = ProcessWriter
+ processReaderFactory = ProcessReader
+
+ def __init__(
+ self,
+ reactor,
+ executable,
+ args,
+ environment,
+ path,
+ proto,
+ uid=None,
+ gid=None,
+ childFDs=None,
+ ):
+ """
+ Spawn an operating-system process.
+
+ This is where the hard work of disconnecting all currently open
+ files / forking / executing the new process happens. (This is
+ executed automatically when a Process is instantiated.)
+
+ This will also run the subprocess as a given user ID and group ID, if
+ specified. (Implementation Note: this doesn't support all the arcane
+ nuances of setXXuid on UNIX: it will assume that either your effective
+ or real UID is 0.)
+ """
+ self._reactor = reactor
+ if not proto:
+ assert "r" not in childFDs.values()
+ assert "w" not in childFDs.values()
+ _BaseProcess.__init__(self, proto)
+
+ self.pipes = {}
+ # keys are childFDs, we can sense them closing
+ # values are ProcessReader/ProcessWriters
+
+ helpers = {}
+ # keys are childFDs
+ # values are parentFDs
+
+ if childFDs is None:
+ childFDs = {
+ 0: "w", # we write to the child's stdin
+ 1: "r", # we read from their stdout
+ 2: "r", # and we read from their stderr
+ }
+
+ debug = self.debug
+ if debug:
+ print("childFDs", childFDs)
+
+ _openedPipes = []
+
+ def pipe():
+ r, w = os.pipe()
+ _openedPipes.extend([r, w])
+ return r, w
+
+ # fdmap.keys() are filenos of pipes that are used by the child.
+ fdmap = {} # maps childFD to parentFD
+ try:
+ for childFD, target in childFDs.items():
+ if debug:
+ print("[%d]" % childFD, target)
+ if target == "r":
+ # we need a pipe that the parent can read from
+ readFD, writeFD = pipe()
+ if debug:
+ print("readFD=%d, writeFD=%d" % (readFD, writeFD))
+ fdmap[childFD] = writeFD # child writes to this
+ helpers[childFD] = readFD # parent reads from this
+ elif target == "w":
+ # we need a pipe that the parent can write to
+ readFD, writeFD = pipe()
+ if debug:
+ print("readFD=%d, writeFD=%d" % (readFD, writeFD))
+ fdmap[childFD] = readFD # child reads from this
+ helpers[childFD] = writeFD # parent writes to this
+ else:
+ assert type(target) == int, f"{target!r} should be an int"
+ fdmap[childFD] = target # parent ignores this
+ if debug:
+ print("fdmap", fdmap)
+ if debug:
+ print("helpers", helpers)
+ # the child only cares about fdmap.values()
+
+ self._fork(path, uid, gid, executable, args, environment, fdmap=fdmap)
+ except BaseException:
+ for pipe in _openedPipes:
+ os.close(pipe)
+ raise
+
+ # we are the parent process:
+ self.proto = proto
+
+ # arrange for the parent-side pipes to be read and written
+ for childFD, parentFD in helpers.items():
+ os.close(fdmap[childFD])
+ if childFDs[childFD] == "r":
+ reader = self.processReaderFactory(reactor, self, childFD, parentFD)
+ self.pipes[childFD] = reader
+
+ if childFDs[childFD] == "w":
+ writer = self.processWriterFactory(
+ reactor, self, childFD, parentFD, forceReadHack=True
+ )
+ self.pipes[childFD] = writer
+
+ try:
+ # the 'transport' is used for some compatibility methods
+ if self.proto is not None:
+ self.proto.makeConnection(self)
+ except BaseException:
+ log.err()
+
+ # The reactor might not be running yet. This might call back into
+ # processEnded synchronously, triggering an application-visible
+ # callback. That's probably not ideal. The replacement API for
+ # spawnProcess should improve upon this situation.
+ registerReapProcessHandler(self.pid, self)
+
+ def _trySpawnInsteadOfFork(
+ self, path, uid, gid, executable, args, environment, kwargs
+ ):
+ """
+ Try to use posix_spawnp() instead of fork(), if possible.
+
+ @return: a boolean indicating whether posix_spawnp() was used or not.
+ """
+ if (
+ # no support for setuid/setgid anywhere but in QNX's
+ # posix_spawnattr_setcred
+ (uid is not None)
+ or (gid is not None)
+ or ((path is not None) and (os.path.abspath(path) != os.path.abspath(".")))
+ or getattr(self._reactor, "_neverUseSpawn", False)
+ ):
+ return False
+ fdmap = kwargs.get("fdmap")
+ fdState = []
+ for eachFD in _listOpenFDs():
+ try:
+ isCloseOnExec = fcntl.fcntl(eachFD, fcntl.F_GETFD, fcntl.FD_CLOEXEC)
+ except OSError:
+ pass
+ else:
+ fdState.append((eachFD, isCloseOnExec))
+ if environment is None:
+ environment = {}
+
+ setSigDef = [
+ everySignal
+ for everySignal in range(1, signal.NSIG)
+ if signal.getsignal(everySignal) == signal.SIG_IGN
+ ]
+
+ self.pid = os.posix_spawnp(
+ executable,
+ args,
+ environment,
+ file_actions=_getFileActions(
+ fdState, fdmap, doClose=_PS_CLOSE, doDup2=_PS_DUP2
+ ),
+ setsigdef=setSigDef,
+ )
+ self.status = -1
+ return True
+
+ if getattr(os, "posix_spawnp", None) is None:
+ # If there's no posix_spawn implemented, let the superclass handle it
+ del _trySpawnInsteadOfFork
+
+ def _setupChild(self, fdmap):
+ """
+ fdmap[childFD] = parentFD
+
+ The child wants to end up with 'childFD' attached to what used to be
+ the parent's parentFD. As an example, a bash command run like
+ 'command 2>&1' would correspond to an fdmap of {0:0, 1:1, 2:1}.
+ 'command >foo.txt' would be {0:0, 1:os.open('foo.txt'), 2:2}.
+
+ This is accomplished in two steps::
+
+ 1. close all file descriptors that aren't values of fdmap. This
+ means 0 .. maxfds (or just the open fds within that range, if
+ the platform supports '/proc/<pid>/fd').
+
+ 2. for each childFD::
+
+ - if fdmap[childFD] == childFD, the descriptor is already in
+ place. Make sure the CLOEXEC flag is not set, then delete
+ the entry from fdmap.
+
+ - if childFD is in fdmap.values(), then the target descriptor
+ is busy. Use os.dup() to move it elsewhere, update all
+ fdmap[childFD] items that point to it, then close the
+ original. Then fall through to the next case.
+
+ - now fdmap[childFD] is not in fdmap.values(), and is free.
+ Use os.dup2() to move it to the right place, then close the
+ original.
+ """
+ debug = self.debug_child
+ if debug:
+ errfd = sys.stderr
+ errfd.write("starting _setupChild\n")
+
+ destList = fdmap.values()
+ for fd in _listOpenFDs():
+ if fd in destList:
+ continue
+ if debug and fd == errfd.fileno():
+ continue
+ try:
+ os.close(fd)
+ except BaseException:
+ pass
+
+ # at this point, the only fds still open are the ones that need to
+ # be moved to their appropriate positions in the child (the targets
+ # of fdmap, i.e. fdmap.values() )
+
+ if debug:
+ print("fdmap", fdmap, file=errfd)
+ for child in sorted(fdmap.keys()):
+ target = fdmap[child]
+ if target == child:
+ # fd is already in place
+ if debug:
+ print("%d already in place" % target, file=errfd)
+ fdesc._unsetCloseOnExec(child)
+ else:
+ if child in fdmap.values():
+ # we can't replace child-fd yet, as some other mapping
+ # still needs the fd it wants to target. We must preserve
+ # that old fd by duping it to a new home.
+ newtarget = os.dup(child) # give it a safe home
+ if debug:
+ print("os.dup(%d) -> %d" % (child, newtarget), file=errfd)
+ os.close(child) # close the original
+ for c, p in list(fdmap.items()):
+ if p == child:
+ fdmap[c] = newtarget # update all pointers
+ # now it should be available
+ if debug:
+ print("os.dup2(%d,%d)" % (target, child), file=errfd)
+ os.dup2(target, child)
+
+ # At this point, the child has everything it needs. We want to close
+ # everything that isn't going to be used by the child, i.e.
+ # everything not in fdmap.keys(). The only remaining fds open are
+ # those in fdmap.values().
+
+ # Any given fd may appear in fdmap.values() multiple times, so we
+ # need to remove duplicates first.
+
+ old = []
+ for fd in fdmap.values():
+ if fd not in old:
+ if fd not in fdmap.keys():
+ old.append(fd)
+ if debug:
+ print("old", old, file=errfd)
+ for fd in old:
+ os.close(fd)
+
+ self._resetSignalDisposition()
+
+ def writeToChild(self, childFD, data):
+ self.pipes[childFD].write(data)
+
+ def closeChildFD(self, childFD):
+ # for writer pipes, loseConnection tries to write the remaining data
+ # out to the pipe before closing it
+ # if childFD is not in the list of pipes, assume that it is already
+ # closed
+ if childFD in self.pipes:
+ self.pipes[childFD].loseConnection()
+
+ def pauseProducing(self):
+ for p in self.pipes.values():
+ if isinstance(p, ProcessReader):
+ p.stopReading()
+
+ def resumeProducing(self):
+ for p in self.pipes.values():
+ if isinstance(p, ProcessReader):
+ p.startReading()
+
+ # compatibility
+ def closeStdin(self):
+ """
+ Call this to close standard input on this process.
+ """
+ self.closeChildFD(0)
+
+ def closeStdout(self):
+ self.closeChildFD(1)
+
+ def closeStderr(self):
+ self.closeChildFD(2)
+
+ def loseConnection(self):
+ self.closeStdin()
+ self.closeStderr()
+ self.closeStdout()
+
+ def write(self, data):
+ """
+ Call this to write to standard input on this process.
+
+ NOTE: This will silently lose data if there is no standard input.
+ """
+ if 0 in self.pipes:
+ self.pipes[0].write(data)
+
+ def registerProducer(self, producer, streaming):
+ """
+ Call this to register producer for standard input.
+
+ If there is no standard input producer.stopProducing() will
+ be called immediately.
+ """
+ if 0 in self.pipes:
+ self.pipes[0].registerProducer(producer, streaming)
+ else:
+ producer.stopProducing()
+
+ def unregisterProducer(self):
+ """
+ Call this to unregister producer for standard input."""
+ if 0 in self.pipes:
+ self.pipes[0].unregisterProducer()
+
+ def writeSequence(self, seq):
+ """
+ Call this to write to standard input on this process.
+
+ NOTE: This will silently lose data if there is no standard input.
+ """
+ if 0 in self.pipes:
+ self.pipes[0].writeSequence(seq)
+
+ def childDataReceived(self, name, data):
+ self.proto.childDataReceived(name, data)
+
+ def childConnectionLost(self, childFD, reason):
+ # this is called when one of the helpers (ProcessReader or
+ # ProcessWriter) notices their pipe has been closed
+ os.close(self.pipes[childFD].fileno())
+ del self.pipes[childFD]
+ try:
+ self.proto.childConnectionLost(childFD)
+ except BaseException:
+ log.err()
+ self.maybeCallProcessEnded()
+
+ def maybeCallProcessEnded(self):
+ # we don't call ProcessProtocol.processEnded until:
+ # the child has terminated, AND
+ # all writers have indicated an error status, AND
+ # all readers have indicated EOF
+ # This insures that we've gathered all output from the process.
+ if self.pipes:
+ return
+ if not self.lostProcess:
+ self.reapProcess()
+ return
+ _BaseProcess.maybeCallProcessEnded(self)
+
+ def getHost(self):
+ # ITransport.getHost
+ raise NotImplementedError()
+
+ def getPeer(self):
+ # ITransport.getPeer
+ raise NotImplementedError()
+
+
+@implementer(IProcessTransport)
+class PTYProcess(abstract.FileDescriptor, _BaseProcess):
+ """
+ An operating-system Process that uses PTY support.
+ """
+
+ status = -1
+ pid = None
+
+ def __init__(
+ self,
+ reactor,
+ executable,
+ args,
+ environment,
+ path,
+ proto,
+ uid=None,
+ gid=None,
+ usePTY=None,
+ ):
+ """
+ Spawn an operating-system process.
+
+ This is where the hard work of disconnecting all currently open
+ files / forking / executing the new process happens. (This is
+ executed automatically when a Process is instantiated.)
+
+ This will also run the subprocess as a given user ID and group ID, if
+ specified. (Implementation Note: this doesn't support all the arcane
+ nuances of setXXuid on UNIX: it will assume that either your effective
+ or real UID is 0.)
+ """
+ if pty is None and not isinstance(usePTY, (tuple, list)):
+ # no pty module and we didn't get a pty to use
+ raise NotImplementedError(
+ "cannot use PTYProcess on platforms without the pty module."
+ )
+ abstract.FileDescriptor.__init__(self, reactor)
+ _BaseProcess.__init__(self, proto)
+
+ if isinstance(usePTY, (tuple, list)):
+ masterfd, slavefd, _ = usePTY
+ else:
+ masterfd, slavefd = pty.openpty()
+
+ try:
+ self._fork(
+ path,
+ uid,
+ gid,
+ executable,
+ args,
+ environment,
+ masterfd=masterfd,
+ slavefd=slavefd,
+ )
+ except BaseException:
+ if not isinstance(usePTY, (tuple, list)):
+ os.close(masterfd)
+ os.close(slavefd)
+ raise
+
+ # we are now in parent process:
+ os.close(slavefd)
+ fdesc.setNonBlocking(masterfd)
+ self.fd = masterfd
+ self.startReading()
+ self.connected = 1
+ self.status = -1
+ try:
+ self.proto.makeConnection(self)
+ except BaseException:
+ log.err()
+ registerReapProcessHandler(self.pid, self)
+
+ def _setupChild(self, masterfd, slavefd):
+ """
+ Set up child process after C{fork()} but before C{exec()}.
+
+ This involves:
+
+ - closing C{masterfd}, since it is not used in the subprocess
+
+ - creating a new session with C{os.setsid}
+
+ - changing the controlling terminal of the process (and the new
+ session) to point at C{slavefd}
+
+ - duplicating C{slavefd} to standard input, output, and error
+
+ - closing all other open file descriptors (according to
+ L{_listOpenFDs})
+
+ - re-setting all signal handlers to C{SIG_DFL}
+
+ @param masterfd: The master end of a PTY file descriptors opened with
+ C{openpty}.
+ @type masterfd: L{int}
+
+ @param slavefd: The slave end of a PTY opened with C{openpty}.
+ @type slavefd: L{int}
+ """
+ os.close(masterfd)
+ os.setsid()
+ fcntl.ioctl(slavefd, termios.TIOCSCTTY, "")
+
+ for fd in range(3):
+ if fd != slavefd:
+ os.close(fd)
+
+ os.dup2(slavefd, 0) # stdin
+ os.dup2(slavefd, 1) # stdout
+ os.dup2(slavefd, 2) # stderr
+
+ for fd in _listOpenFDs():
+ if fd > 2:
+ try:
+ os.close(fd)
+ except BaseException:
+ pass
+
+ self._resetSignalDisposition()
+
+ def closeStdin(self):
+ # PTYs do not have stdin/stdout/stderr. They only have in and out, just
+ # like sockets. You cannot close one without closing off the entire PTY
+ pass
+
+ def closeStdout(self):
+ pass
+
+ def closeStderr(self):
+ pass
+
+ def doRead(self):
+ """
+ Called when my standard output stream is ready for reading.
+ """
+ return fdesc.readFromFD(
+ self.fd, lambda data: self.proto.childDataReceived(1, data)
+ )
+
+ def fileno(self):
+ """
+ This returns the file number of standard output on this process.
+ """
+ return self.fd
+
+ def maybeCallProcessEnded(self):
+ # two things must happen before we call the ProcessProtocol's
+ # processEnded method. 1: the child process must die and be reaped
+ # (which calls our own processEnded method). 2: the child must close
+ # their stdin/stdout/stderr fds, causing the pty to close, causing
+ # our connectionLost method to be called. #2 can also be triggered
+ # by calling .loseConnection().
+ if self.lostProcess == 2:
+ _BaseProcess.maybeCallProcessEnded(self)
+
+ def connectionLost(self, reason):
+ """
+ I call this to clean up when one or all of my connections has died.
+ """
+ abstract.FileDescriptor.connectionLost(self, reason)
+ os.close(self.fd)
+ self.lostProcess += 1
+ self.maybeCallProcessEnded()
+
+ def writeSomeData(self, data):
+ """
+ Write some data to the open process.
+ """
+ return fdesc.writeToFD(self.fd, data)
+
+ def closeChildFD(self, descriptor):
+ # IProcessTransport
+ raise NotImplementedError()
+
+ def writeToChild(self, childFD, data):
+ # IProcessTransport
+ raise NotImplementedError()
diff --git a/contrib/python/Twisted/py3/twisted/internet/protocol.py b/contrib/python/Twisted/py3/twisted/internet/protocol.py
new file mode 100644
index 0000000000..4fcf0e1038
--- /dev/null
+++ b/contrib/python/Twisted/py3/twisted/internet/protocol.py
@@ -0,0 +1,900 @@
+# -*- test-case-name: twisted.test.test_factories,twisted.internet.test.test_protocol -*-
+# Copyright (c) Twisted Matrix Laboratories.
+# See LICENSE for details.
+
+"""
+Standard implementations of Twisted protocol-related interfaces.
+
+Start here if you are looking to write a new protocol implementation for
+Twisted. The Protocol class contains some introductory material.
+"""
+
+
+import random
+from typing import Any, Callable, Optional
+
+from zope.interface import implementer
+
+from twisted.internet import defer, error, interfaces
+from twisted.internet.interfaces import IAddress, ITransport
+from twisted.logger import _loggerFor
+from twisted.python import components, failure, log
+
+
+@implementer(interfaces.IProtocolFactory, interfaces.ILoggingContext)
+class Factory:
+ """
+ This is a factory which produces protocols.
+
+ By default, buildProtocol will create a protocol of the class given in
+ self.protocol.
+ """
+
+ protocol: "Optional[Callable[[], Protocol]]" = None
+
+ numPorts = 0
+ noisy = True
+
+ @classmethod
+ def forProtocol(cls, protocol, *args, **kwargs):
+ """
+ Create a factory for the given protocol.
+
+ It sets the C{protocol} attribute and returns the constructed factory
+ instance.
+
+ @param protocol: A L{Protocol} subclass
+
+ @param args: Positional arguments for the factory.
+
+ @param kwargs: Keyword arguments for the factory.
+
+ @return: A L{Factory} instance wired up to C{protocol}.
+ """
+ factory = cls(*args, **kwargs)
+ factory.protocol = protocol
+ return factory
+
+ def logPrefix(self):
+ """
+ Describe this factory for log messages.
+ """
+ return self.__class__.__name__
+
+ def doStart(self):
+ """
+ Make sure startFactory is called.
+
+ Users should not call this function themselves!
+ """
+ if not self.numPorts:
+ if self.noisy:
+ _loggerFor(self).info("Starting factory {factory!r}", factory=self)
+ self.startFactory()
+ self.numPorts = self.numPorts + 1
+
+ def doStop(self):
+ """
+ Make sure stopFactory is called.
+
+ Users should not call this function themselves!
+ """
+ if self.numPorts == 0:
+ # This shouldn't happen, but does sometimes and this is better
+ # than blowing up in assert as we did previously.
+ return
+ self.numPorts = self.numPorts - 1
+ if not self.numPorts:
+ if self.noisy:
+ _loggerFor(self).info("Stopping factory {factory!r}", factory=self)
+ self.stopFactory()
+
+ def startFactory(self):
+ """
+ This will be called before I begin listening on a Port or Connector.
+
+ It will only be called once, even if the factory is connected
+ to multiple ports.
+
+ This can be used to perform 'unserialization' tasks that
+ are best put off until things are actually running, such
+ as connecting to a database, opening files, etcetera.
+ """
+
+ def stopFactory(self):
+ """
+ This will be called before I stop listening on all Ports/Connectors.
+
+ This can be overridden to perform 'shutdown' tasks such as disconnecting
+ database connections, closing files, etc.
+
+ It will be called, for example, before an application shuts down,
+ if it was connected to a port. User code should not call this function
+ directly.
+ """
+
+ def buildProtocol(self, addr: IAddress) -> "Optional[Protocol]":
+ """
+ Create an instance of a subclass of Protocol.
+
+ The returned instance will handle input on an incoming server
+ connection, and an attribute "factory" pointing to the creating
+ factory.
+
+ Alternatively, L{None} may be returned to immediately close the
+ new connection.
+
+ Override this method to alter how Protocol instances get created.
+
+ @param addr: an object implementing L{IAddress}
+ """
+ assert self.protocol is not None
+ p = self.protocol()
+ p.factory = self
+ return p
+
+
+class ClientFactory(Factory):
+ """
+ A Protocol factory for clients.
+
+ This can be used together with the various connectXXX methods in
+ reactors.
+ """
+
+ def startedConnecting(self, connector):
+ """
+ Called when a connection has been started.
+
+ You can call connector.stopConnecting() to stop the connection attempt.
+
+ @param connector: a Connector object.
+ """
+
+ def clientConnectionFailed(self, connector, reason):
+ """
+ Called when a connection has failed to connect.
+
+ It may be useful to call connector.connect() - this will reconnect.
+
+ @type reason: L{twisted.python.failure.Failure}
+ """
+
+ def clientConnectionLost(self, connector, reason):
+ """
+ Called when an established connection is lost.
+
+ It may be useful to call connector.connect() - this will reconnect.
+
+ @type reason: L{twisted.python.failure.Failure}
+ """
+
+
+class _InstanceFactory(ClientFactory):
+ """
+ Factory used by ClientCreator.
+
+ @ivar deferred: The L{Deferred} which represents this connection attempt and
+ which will be fired when it succeeds or fails.
+
+ @ivar pending: After a connection attempt succeeds or fails, a delayed call
+ which will fire the L{Deferred} representing this connection attempt.
+ """
+
+ noisy = False
+ pending = None
+
+ def __init__(self, reactor, instance, deferred):
+ self.reactor = reactor
+ self.instance = instance
+ self.deferred = deferred
+
+ def __repr__(self) -> str:
+ return f"<ClientCreator factory: {self.instance!r}>"
+
+ def buildProtocol(self, addr):
+ """
+ Return the pre-constructed protocol instance and arrange to fire the
+ waiting L{Deferred} to indicate success establishing the connection.
+ """
+ self.pending = self.reactor.callLater(
+ 0, self.fire, self.deferred.callback, self.instance
+ )
+ self.deferred = None
+ return self.instance
+
+ def clientConnectionFailed(self, connector, reason):
+ """
+ Arrange to fire the waiting L{Deferred} with the given failure to
+ indicate the connection could not be established.
+ """
+ self.pending = self.reactor.callLater(
+ 0, self.fire, self.deferred.errback, reason
+ )
+ self.deferred = None
+
+ def fire(self, func, value):
+ """
+ Clear C{self.pending} to avoid a reference cycle and then invoke func
+ with the value.
+ """
+ self.pending = None
+ func(value)
+
+
+class ClientCreator:
+ """
+ Client connections that do not require a factory.
+
+ The various connect* methods create a protocol instance using the given
+ protocol class and arguments, and connect it, returning a Deferred of the
+ resulting protocol instance.
+
+ Useful for cases when we don't really need a factory. Mainly this
+ is when there is no shared state between protocol instances, and no need
+ to reconnect.
+
+ The C{connectTCP}, C{connectUNIX}, and C{connectSSL} methods each return a
+ L{Deferred} which will fire with an instance of the protocol class passed to
+ L{ClientCreator.__init__}. These Deferred can be cancelled to abort the
+ connection attempt (in a very unlikely case, cancelling the Deferred may not
+ prevent the protocol from being instantiated and connected to a transport;
+ if this happens, it will be disconnected immediately afterwards and the
+ Deferred will still errback with L{CancelledError}).
+ """
+
+ def __init__(self, reactor, protocolClass, *args, **kwargs):
+ self.reactor = reactor
+ self.protocolClass = protocolClass
+ self.args = args
+ self.kwargs = kwargs
+
+ def _connect(self, method, *args, **kwargs):
+ """
+ Initiate a connection attempt.
+
+ @param method: A callable which will actually start the connection
+ attempt. For example, C{reactor.connectTCP}.
+
+ @param args: Positional arguments to pass to C{method}, excluding the
+ factory.
+
+ @param kwargs: Keyword arguments to pass to C{method}.
+
+ @return: A L{Deferred} which fires with an instance of the protocol
+ class passed to this L{ClientCreator}'s initializer or fails if the
+ connection cannot be set up for some reason.
+ """
+
+ def cancelConnect(deferred):
+ connector.disconnect()
+ if f.pending is not None:
+ f.pending.cancel()
+
+ d = defer.Deferred(cancelConnect)
+ f = _InstanceFactory(
+ self.reactor, self.protocolClass(*self.args, **self.kwargs), d
+ )
+ connector = method(factory=f, *args, **kwargs)
+ return d
+
+ def connectTCP(self, host, port, timeout=30, bindAddress=None):
+ """
+ Connect to a TCP server.
+
+ The parameters are all the same as to L{IReactorTCP.connectTCP} except
+ that the factory parameter is omitted.
+
+ @return: A L{Deferred} which fires with an instance of the protocol
+ class passed to this L{ClientCreator}'s initializer or fails if the
+ connection cannot be set up for some reason.
+ """
+ return self._connect(
+ self.reactor.connectTCP,
+ host,
+ port,
+ timeout=timeout,
+ bindAddress=bindAddress,
+ )
+
+ def connectUNIX(self, address, timeout=30, checkPID=False):
+ """
+ Connect to a Unix socket.
+
+ The parameters are all the same as to L{IReactorUNIX.connectUNIX} except
+ that the factory parameter is omitted.
+
+ @return: A L{Deferred} which fires with an instance of the protocol
+ class passed to this L{ClientCreator}'s initializer or fails if the
+ connection cannot be set up for some reason.
+ """
+ return self._connect(
+ self.reactor.connectUNIX, address, timeout=timeout, checkPID=checkPID
+ )
+
+ def connectSSL(self, host, port, contextFactory, timeout=30, bindAddress=None):
+ """
+ Connect to an SSL server.
+
+ The parameters are all the same as to L{IReactorSSL.connectSSL} except
+ that the factory parameter is omitted.
+
+ @return: A L{Deferred} which fires with an instance of the protocol
+ class passed to this L{ClientCreator}'s initializer or fails if the
+ connection cannot be set up for some reason.
+ """
+ return self._connect(
+ self.reactor.connectSSL,
+ host,
+ port,
+ contextFactory=contextFactory,
+ timeout=timeout,
+ bindAddress=bindAddress,
+ )
+
+
+class ReconnectingClientFactory(ClientFactory):
+ """
+ Factory which auto-reconnects clients with an exponential back-off.
+
+ Note that clients should call my resetDelay method after they have
+ connected successfully.
+
+ @ivar maxDelay: Maximum number of seconds between connection attempts.
+ @ivar initialDelay: Delay for the first reconnection attempt.
+ @ivar factor: A multiplicitive factor by which the delay grows
+ @ivar jitter: Percentage of randomness to introduce into the delay length
+ to prevent stampeding.
+ @ivar clock: The clock used to schedule reconnection. It's mainly useful to
+ be parametrized in tests. If the factory is serialized, this attribute
+ will not be serialized, and the default value (the reactor) will be
+ restored when deserialized.
+ @type clock: L{IReactorTime}
+ @ivar maxRetries: Maximum number of consecutive unsuccessful connection
+ attempts, after which no further connection attempts will be made. If
+ this is not explicitly set, no maximum is applied.
+ """
+
+ maxDelay = 3600
+ initialDelay = 1.0
+ # Note: These highly sensitive factors have been precisely measured by
+ # the National Institute of Science and Technology. Take extreme care
+ # in altering them, or you may damage your Internet!
+ # (Seriously: <http://physics.nist.gov/cuu/Constants/index.html>)
+ factor = 2.7182818284590451 # (math.e)
+ # Phi = 1.6180339887498948 # (Phi is acceptable for use as a
+ # factor if e is too large for your application.)
+
+ # This is the value of the molar Planck constant times c, joule
+ # meter/mole. The value is attributable to
+ # https://physics.nist.gov/cgi-bin/cuu/Value?nahc|search_for=molar+planck+constant+times+c
+ jitter = 0.119626565582
+
+ delay = initialDelay
+ retries = 0
+ maxRetries = None
+ _callID = None
+ connector = None
+ clock = None
+
+ continueTrying = 1
+
+ def clientConnectionFailed(self, connector, reason):
+ if self.continueTrying:
+ self.connector = connector
+ self.retry()
+
+ def clientConnectionLost(self, connector, unused_reason):
+ if self.continueTrying:
+ self.connector = connector
+ self.retry()
+
+ def retry(self, connector=None):
+ """
+ Have this connector connect again, after a suitable delay.
+ """
+ if not self.continueTrying:
+ if self.noisy:
+ log.msg(f"Abandoning {connector} on explicit request")
+ return
+
+ if connector is None:
+ if self.connector is None:
+ raise ValueError("no connector to retry")
+ else:
+ connector = self.connector
+
+ self.retries += 1
+ if self.maxRetries is not None and (self.retries > self.maxRetries):
+ if self.noisy:
+ log.msg("Abandoning %s after %d retries." % (connector, self.retries))
+ return
+
+ self.delay = min(self.delay * self.factor, self.maxDelay)
+ if self.jitter:
+ self.delay = random.normalvariate(self.delay, self.delay * self.jitter)
+
+ if self.noisy:
+ log.msg(
+ "%s will retry in %d seconds"
+ % (
+ connector,
+ self.delay,
+ )
+ )
+
+ def reconnector():
+ self._callID = None
+ connector.connect()
+
+ if self.clock is None:
+ from twisted.internet import reactor
+
+ self.clock = reactor
+ self._callID = self.clock.callLater(self.delay, reconnector)
+
+ def stopTrying(self):
+ """
+ Put a stop to any attempt to reconnect in progress.
+ """
+ # ??? Is this function really stopFactory?
+ if self._callID:
+ self._callID.cancel()
+ self._callID = None
+ self.continueTrying = 0
+ if self.connector:
+ try:
+ self.connector.stopConnecting()
+ except error.NotConnectingError:
+ pass
+
+ def resetDelay(self):
+ """
+ Call this method after a successful connection: it resets the delay and
+ the retry counter.
+ """
+ self.delay = self.initialDelay
+ self.retries = 0
+ self._callID = None
+ self.continueTrying = 1
+
+ def __getstate__(self):
+ """
+ Remove all of the state which is mutated by connection attempts and
+ failures, returning just the state which describes how reconnections
+ should be attempted. This will make the unserialized instance
+ behave just as this one did when it was first instantiated.
+ """
+ state = self.__dict__.copy()
+ for key in [
+ "connector",
+ "retries",
+ "delay",
+ "continueTrying",
+ "_callID",
+ "clock",
+ ]:
+ if key in state:
+ del state[key]
+ return state
+
+
+class ServerFactory(Factory):
+ """
+ Subclass this to indicate that your protocol.Factory is only usable for servers.
+ """
+
+
+class BaseProtocol:
+ """
+ This is the abstract superclass of all protocols.
+
+ Some methods have helpful default implementations here so that they can
+ easily be shared, but otherwise the direct subclasses of this class are more
+ interesting, L{Protocol} and L{ProcessProtocol}.
+ """
+
+ connected = 0
+ transport: Optional[ITransport] = None
+
+ def makeConnection(self, transport):
+ """
+ Make a connection to a transport and a server.
+
+ This sets the 'transport' attribute of this Protocol, and calls the
+ connectionMade() callback.
+ """
+ self.connected = 1
+ self.transport = transport
+ self.connectionMade()
+
+ def connectionMade(self):
+ """
+ Called when a connection is made.
+
+ This may be considered the initializer of the protocol, because
+ it is called when the connection is completed. For clients,
+ this is called once the connection to the server has been
+ established; for servers, this is called after an accept() call
+ stops blocking and a socket has been received. If you need to
+ send any greeting or initial message, do it here.
+ """
+
+
+connectionDone = failure.Failure(error.ConnectionDone())
+connectionDone.cleanFailure()
+
+
+@implementer(interfaces.IProtocol, interfaces.ILoggingContext)
+class Protocol(BaseProtocol):
+ """
+ This is the base class for streaming connection-oriented protocols.
+
+ If you are going to write a new connection-oriented protocol for Twisted,
+ start here. Any protocol implementation, either client or server, should
+ be a subclass of this class.
+
+ The API is quite simple. Implement L{dataReceived} to handle both
+ event-based and synchronous input; output can be sent through the
+ 'transport' attribute, which is to be an instance that implements
+ L{twisted.internet.interfaces.ITransport}. Override C{connectionLost} to be
+ notified when the connection ends.
+
+ Some subclasses exist already to help you write common types of protocols:
+ see the L{twisted.protocols.basic} module for a few of them.
+ """
+
+ factory: Optional[Factory] = None
+
+ def logPrefix(self):
+ """
+ Return a prefix matching the class name, to identify log messages
+ related to this protocol instance.
+ """
+ return self.__class__.__name__
+
+ def dataReceived(self, data: bytes) -> None:
+ """
+ Called whenever data is received.
+
+ Use this method to translate to a higher-level message. Usually, some
+ callback will be made upon the receipt of each complete protocol
+ message.
+
+ @param data: a string of indeterminate length. Please keep in mind
+ that you will probably need to buffer some data, as partial
+ (or multiple) protocol messages may be received! I recommend
+ that unit tests for protocols call through to this method with
+ differing chunk sizes, down to one byte at a time.
+ """
+
+ def connectionLost(self, reason: failure.Failure = connectionDone) -> None:
+ """
+ Called when the connection is shut down.
+
+ Clear any circular references here, and any external references
+ to this Protocol. The connection has been closed.
+
+ @type reason: L{twisted.python.failure.Failure}
+ """
+
+
+@implementer(interfaces.IConsumer)
+class ProtocolToConsumerAdapter(components.Adapter):
+ def write(self, data: bytes) -> None:
+ self.original.dataReceived(data)
+
+ def registerProducer(self, producer, streaming):
+ pass
+
+ def unregisterProducer(self):
+ pass
+
+
+components.registerAdapter(
+ ProtocolToConsumerAdapter, interfaces.IProtocol, interfaces.IConsumer
+)
+
+
+@implementer(interfaces.IProtocol)
+class ConsumerToProtocolAdapter(components.Adapter):
+ def dataReceived(self, data: bytes) -> None:
+ self.original.write(data)
+
+ def connectionLost(self, reason: failure.Failure) -> None:
+ pass
+
+ def makeConnection(self, transport):
+ pass
+
+ def connectionMade(self):
+ pass
+
+
+components.registerAdapter(
+ ConsumerToProtocolAdapter, interfaces.IConsumer, interfaces.IProtocol
+)
+
+
+@implementer(interfaces.IProcessProtocol)
+class ProcessProtocol(BaseProtocol):
+ """
+ Base process protocol implementation which does simple dispatching for
+ stdin, stdout, and stderr file descriptors.
+ """
+
+ transport: Optional[interfaces.IProcessTransport] = None
+
+ def childDataReceived(self, childFD: int, data: bytes) -> None:
+ if childFD == 1:
+ self.outReceived(data)
+ elif childFD == 2:
+ self.errReceived(data)
+
+ def outReceived(self, data: bytes) -> None:
+ """
+ Some data was received from stdout.
+ """
+
+ def errReceived(self, data: bytes) -> None:
+ """
+ Some data was received from stderr.
+ """
+
+ def childConnectionLost(self, childFD: int) -> None:
+ if childFD == 0:
+ self.inConnectionLost()
+ elif childFD == 1:
+ self.outConnectionLost()
+ elif childFD == 2:
+ self.errConnectionLost()
+
+ def inConnectionLost(self):
+ """
+ This will be called when stdin is closed.
+ """
+
+ def outConnectionLost(self):
+ """
+ This will be called when stdout is closed.
+ """
+
+ def errConnectionLost(self):
+ """
+ This will be called when stderr is closed.
+ """
+
+ def processExited(self, reason: failure.Failure) -> None:
+ """
+ This will be called when the subprocess exits.
+
+ @type reason: L{twisted.python.failure.Failure}
+ """
+
+ def processEnded(self, reason: failure.Failure) -> None:
+ """
+ Called when the child process exits and all file descriptors
+ associated with it have been closed.
+
+ @type reason: L{twisted.python.failure.Failure}
+ """
+
+
+class AbstractDatagramProtocol:
+ """
+ Abstract protocol for datagram-oriented transports, e.g. IP, ICMP, ARP,
+ UDP.
+ """
+
+ transport = None
+ numPorts = 0
+ noisy = True
+
+ def __getstate__(self):
+ d = self.__dict__.copy()
+ d["transport"] = None
+ return d
+
+ def doStart(self):
+ """
+ Make sure startProtocol is called.
+
+ This will be called by makeConnection(), users should not call it.
+ """
+ if not self.numPorts:
+ if self.noisy:
+ log.msg("Starting protocol %s" % self)
+ self.startProtocol()
+ self.numPorts = self.numPorts + 1
+
+ def doStop(self):
+ """
+ Make sure stopProtocol is called.
+
+ This will be called by the port, users should not call it.
+ """
+ assert self.numPorts > 0
+ self.numPorts = self.numPorts - 1
+ self.transport = None
+ if not self.numPorts:
+ if self.noisy:
+ log.msg("Stopping protocol %s" % self)
+ self.stopProtocol()
+
+ def startProtocol(self):
+ """
+ Called when a transport is connected to this protocol.
+
+ Will only be called once, even if multiple ports are connected.
+ """
+
+ def stopProtocol(self):
+ """
+ Called when the transport is disconnected.
+
+ Will only be called once, after all ports are disconnected.
+ """
+
+ def makeConnection(self, transport):
+ """
+ Make a connection to a transport and a server.
+
+ This sets the 'transport' attribute of this DatagramProtocol, and calls the
+ doStart() callback.
+ """
+ assert self.transport == None
+ self.transport = transport
+ self.doStart()
+
+ def datagramReceived(self, datagram: bytes, addr: Any) -> None:
+ """
+ Called when a datagram is received.
+
+ @param datagram: the bytes received from the transport.
+ @param addr: tuple of source of datagram.
+ """
+
+
+@implementer(interfaces.ILoggingContext)
+class DatagramProtocol(AbstractDatagramProtocol):
+ """
+ Protocol for datagram-oriented transport, e.g. UDP.
+
+ @type transport: L{None} or
+ L{IUDPTransport<twisted.internet.interfaces.IUDPTransport>} provider
+ @ivar transport: The transport with which this protocol is associated,
+ if it is associated with one.
+ """
+
+ def logPrefix(self):
+ """
+ Return a prefix matching the class name, to identify log messages
+ related to this protocol instance.
+ """
+ return self.__class__.__name__
+
+ def connectionRefused(self):
+ """
+ Called due to error from write in connected mode.
+
+ Note this is a result of ICMP message generated by *previous*
+ write.
+ """
+
+
+class ConnectedDatagramProtocol(DatagramProtocol):
+ """
+ Protocol for connected datagram-oriented transport.
+
+ No longer necessary for UDP.
+ """
+
+ def datagramReceived(self, datagram):
+ """
+ Called when a datagram is received.
+
+ @param datagram: the string received from the transport.
+ """
+
+ def connectionFailed(self, failure: failure.Failure) -> None:
+ """
+ Called if connecting failed.
+
+ Usually this will be due to a DNS lookup failure.
+ """
+
+
+@implementer(interfaces.ITransport)
+class FileWrapper:
+ """
+ A wrapper around a file-like object to make it behave as a Transport.
+
+ This doesn't actually stream the file to the attached protocol,
+ and is thus useful mainly as a utility for debugging protocols.
+ """
+
+ closed = 0
+ disconnecting = 0
+ producer = None
+ streamingProducer = 0
+
+ def __init__(self, file):
+ self.file = file
+
+ def write(self, data: bytes) -> None:
+ try:
+ self.file.write(data)
+ except BaseException:
+ self.handleException()
+
+ def _checkProducer(self):
+ # Cheating; this is called at "idle" times to allow producers to be
+ # found and dealt with
+ if self.producer:
+ self.producer.resumeProducing()
+
+ def registerProducer(self, producer, streaming):
+ """
+ From abstract.FileDescriptor
+ """
+ self.producer = producer
+ self.streamingProducer = streaming
+ if not streaming:
+ producer.resumeProducing()
+
+ def unregisterProducer(self):
+ self.producer = None
+
+ def stopConsuming(self):
+ self.unregisterProducer()
+ self.loseConnection()
+
+ def writeSequence(self, iovec):
+ self.write(b"".join(iovec))
+
+ def loseConnection(self):
+ self.closed = 1
+ try:
+ self.file.close()
+ except OSError:
+ self.handleException()
+
+ def getPeer(self):
+ # FIXME: https://twistedmatrix.com/trac/ticket/7820
+ # According to ITransport, this should return an IAddress!
+ return "file", "file"
+
+ def getHost(self):
+ # FIXME: https://twistedmatrix.com/trac/ticket/7820
+ # According to ITransport, this should return an IAddress!
+ return "file"
+
+ def handleException(self):
+ pass
+
+ def resumeProducing(self):
+ # Never sends data anyways
+ pass
+
+ def pauseProducing(self):
+ # Never sends data anyways
+ pass
+
+ def stopProducing(self):
+ self.loseConnection()
+
+
+__all__ = [
+ "Factory",
+ "ClientFactory",
+ "ReconnectingClientFactory",
+ "connectionDone",
+ "Protocol",
+ "ProcessProtocol",
+ "FileWrapper",
+ "ServerFactory",
+ "AbstractDatagramProtocol",
+ "DatagramProtocol",
+ "ConnectedDatagramProtocol",
+ "ClientCreator",
+]
diff --git a/contrib/python/Twisted/py3/twisted/internet/pyuisupport.py b/contrib/python/Twisted/py3/twisted/internet/pyuisupport.py
new file mode 100644
index 0000000000..bfbffb9cb4
--- /dev/null
+++ b/contrib/python/Twisted/py3/twisted/internet/pyuisupport.py
@@ -0,0 +1,39 @@
+# Copyright (c) Twisted Matrix Laboratories.
+# See LICENSE for details.
+
+
+"""
+This module integrates PyUI with twisted.internet's mainloop.
+
+Maintainer: Jp Calderone
+
+See doc/examples/pyuidemo.py for example usage.
+"""
+
+# System imports
+import pyui # type: ignore[import]
+
+
+def _guiUpdate(reactor, delay):
+ pyui.draw()
+ if pyui.update() == 0:
+ pyui.quit()
+ reactor.stop()
+ else:
+ reactor.callLater(delay, _guiUpdate, reactor, delay)
+
+
+def install(ms=10, reactor=None, args=(), kw={}):
+ """
+ Schedule PyUI's display to be updated approximately every C{ms}
+ milliseconds, and initialize PyUI with the specified arguments.
+ """
+ d = pyui.init(*args, **kw)
+
+ if reactor is None:
+ from twisted.internet import reactor
+ _guiUpdate(reactor, ms / 1000.0)
+ return d
+
+
+__all__ = ["install"]
diff --git a/contrib/python/Twisted/py3/twisted/internet/reactor.py b/contrib/python/Twisted/py3/twisted/internet/reactor.py
new file mode 100644
index 0000000000..00f1ef6e01
--- /dev/null
+++ b/contrib/python/Twisted/py3/twisted/internet/reactor.py
@@ -0,0 +1,40 @@
+# Copyright (c) Twisted Matrix Laboratories.
+# See LICENSE for details.
+
+"""
+The reactor is the Twisted event loop within Twisted, the loop which drives
+applications using Twisted. The reactor provides APIs for networking,
+threading, dispatching events, and more.
+
+The default reactor depends on the platform and will be installed if this
+module is imported without another reactor being explicitly installed
+beforehand. Regardless of which reactor is installed, importing this module is
+the correct way to get a reference to it.
+
+New application code should prefer to pass and accept the reactor as a
+parameter where it is needed, rather than relying on being able to import this
+module to get a reference. This simplifies unit testing and may make it easier
+to one day support multiple reactors (as a performance enhancement), though
+this is not currently possible.
+
+@see: L{IReactorCore<twisted.internet.interfaces.IReactorCore>}
+@see: L{IReactorTime<twisted.internet.interfaces.IReactorTime>}
+@see: L{IReactorProcess<twisted.internet.interfaces.IReactorProcess>}
+@see: L{IReactorTCP<twisted.internet.interfaces.IReactorTCP>}
+@see: L{IReactorSSL<twisted.internet.interfaces.IReactorSSL>}
+@see: L{IReactorUDP<twisted.internet.interfaces.IReactorUDP>}
+@see: L{IReactorMulticast<twisted.internet.interfaces.IReactorMulticast>}
+@see: L{IReactorUNIX<twisted.internet.interfaces.IReactorUNIX>}
+@see: L{IReactorUNIXDatagram<twisted.internet.interfaces.IReactorUNIXDatagram>}
+@see: L{IReactorFDSet<twisted.internet.interfaces.IReactorFDSet>}
+@see: L{IReactorThreads<twisted.internet.interfaces.IReactorThreads>}
+@see: L{IReactorPluggableResolver<twisted.internet.interfaces.IReactorPluggableResolver>}
+"""
+
+
+import sys
+
+del sys.modules["twisted.internet.reactor"]
+from twisted.internet import default
+
+default.install()
diff --git a/contrib/python/Twisted/py3/twisted/internet/selectreactor.py b/contrib/python/Twisted/py3/twisted/internet/selectreactor.py
new file mode 100644
index 0000000000..199dc40671
--- /dev/null
+++ b/contrib/python/Twisted/py3/twisted/internet/selectreactor.py
@@ -0,0 +1,197 @@
+# -*- test-case-name: twisted.test.test_internet -*-
+# Copyright (c) Twisted Matrix Laboratories.
+# See LICENSE for details.
+
+"""
+Select reactor
+"""
+
+
+import select
+import sys
+from errno import EBADF, EINTR
+from time import sleep
+from typing import Type
+
+from zope.interface import implementer
+
+from twisted.internet import posixbase
+from twisted.internet.interfaces import IReactorFDSet
+from twisted.python import log
+from twisted.python.runtime import platformType
+
+
+def win32select(r, w, e, timeout=None):
+ """Win32 select wrapper."""
+ if not (r or w):
+ # windows select() exits immediately when no sockets
+ if timeout is None:
+ timeout = 0.01
+ else:
+ timeout = min(timeout, 0.001)
+ sleep(timeout)
+ return [], [], []
+ # windows doesn't process 'signals' inside select(), so we set a max
+ # time or ctrl-c will never be recognized
+ if timeout is None or timeout > 0.5:
+ timeout = 0.5
+ r, w, e = select.select(r, w, w, timeout)
+ return r, w + e, []
+
+
+if platformType == "win32":
+ _select = win32select
+else:
+ _select = select.select
+
+
+try:
+ from twisted.internet.win32eventreactor import _ThreadedWin32EventsMixin
+except ImportError:
+ _extraBase: Type[object] = object
+else:
+ _extraBase = _ThreadedWin32EventsMixin
+
+
+@implementer(IReactorFDSet)
+class SelectReactor(posixbase.PosixReactorBase, _extraBase): # type: ignore[misc,valid-type]
+ """
+ A select() based reactor - runs on all POSIX platforms and on Win32.
+
+ @ivar _reads: A set containing L{FileDescriptor} instances which will be
+ checked for read events.
+
+ @ivar _writes: A set containing L{FileDescriptor} instances which will be
+ checked for writability.
+ """
+
+ def __init__(self):
+ """
+ Initialize file descriptor tracking dictionaries and the base class.
+ """
+ self._reads = set()
+ self._writes = set()
+ posixbase.PosixReactorBase.__init__(self)
+
+ def _preenDescriptors(self):
+ log.msg("Malformed file descriptor found. Preening lists.")
+ readers = list(self._reads)
+ writers = list(self._writes)
+ self._reads.clear()
+ self._writes.clear()
+ for selSet, selList in ((self._reads, readers), (self._writes, writers)):
+ for selectable in selList:
+ try:
+ select.select([selectable], [selectable], [selectable], 0)
+ except Exception as e:
+ log.msg("bad descriptor %s" % selectable)
+ self._disconnectSelectable(selectable, e, False)
+ else:
+ selSet.add(selectable)
+
+ def doSelect(self, timeout):
+ """
+ Run one iteration of the I/O monitor loop.
+
+ This will run all selectables who had input or output readiness
+ waiting for them.
+ """
+ try:
+ r, w, ignored = _select(self._reads, self._writes, [], timeout)
+ except ValueError:
+ # Possibly a file descriptor has gone negative?
+ self._preenDescriptors()
+ return
+ except TypeError:
+ # Something *totally* invalid (object w/o fileno, non-integral
+ # result) was passed
+ log.err()
+ self._preenDescriptors()
+ return
+ except OSError as se:
+ # select(2) encountered an error, perhaps while calling the fileno()
+ # method of a socket. (Python 2.6 socket.error is an IOError
+ # subclass, but on Python 2.5 and earlier it is not.)
+ if se.args[0] in (0, 2):
+ # windows does this if it got an empty list
+ if (not self._reads) and (not self._writes):
+ return
+ else:
+ raise
+ elif se.args[0] == EINTR:
+ return
+ elif se.args[0] == EBADF:
+ self._preenDescriptors()
+ return
+ else:
+ # OK, I really don't know what's going on. Blow up.
+ raise
+
+ _drdw = self._doReadOrWrite
+ _logrun = log.callWithLogger
+ for selectables, method, fdset in (
+ (r, "doRead", self._reads),
+ (w, "doWrite", self._writes),
+ ):
+ for selectable in selectables:
+ # if this was disconnected in another thread, kill it.
+ # ^^^^ --- what the !@#*? serious! -exarkun
+ if selectable not in fdset:
+ continue
+ # This for pausing input when we're not ready for more.
+ _logrun(selectable, _drdw, selectable, method)
+
+ doIteration = doSelect
+
+ def _doReadOrWrite(self, selectable, method):
+ try:
+ why = getattr(selectable, method)()
+ except BaseException:
+ why = sys.exc_info()[1]
+ log.err()
+ if why:
+ self._disconnectSelectable(selectable, why, method == "doRead")
+
+ def addReader(self, reader):
+ """
+ Add a FileDescriptor for notification of data available to read.
+ """
+ self._reads.add(reader)
+
+ def addWriter(self, writer):
+ """
+ Add a FileDescriptor for notification of data available to write.
+ """
+ self._writes.add(writer)
+
+ def removeReader(self, reader):
+ """
+ Remove a Selectable for notification of data available to read.
+ """
+ self._reads.discard(reader)
+
+ def removeWriter(self, writer):
+ """
+ Remove a Selectable for notification of data available to write.
+ """
+ self._writes.discard(writer)
+
+ def removeAll(self):
+ return self._removeAll(self._reads, self._writes)
+
+ def getReaders(self):
+ return list(self._reads)
+
+ def getWriters(self):
+ return list(self._writes)
+
+
+def install():
+ """Configure the twisted mainloop to be run using the select() reactor."""
+ reactor = SelectReactor()
+ from twisted.internet.main import installReactor
+
+ installReactor(reactor)
+
+
+__all__ = ["install"]
diff --git a/contrib/python/Twisted/py3/twisted/internet/serialport.py b/contrib/python/Twisted/py3/twisted/internet/serialport.py
new file mode 100644
index 0000000000..d63d4ce435
--- /dev/null
+++ b/contrib/python/Twisted/py3/twisted/internet/serialport.py
@@ -0,0 +1,100 @@
+# Copyright (c) Twisted Matrix Laboratories.
+# See LICENSE for details.
+
+
+"""
+Serial Port Protocol
+"""
+
+
+# http://twistedmatrix.com/trac/ticket/3725#comment:24
+# Apparently applications use these names even though they should
+# be imported from pyserial
+__all__ = [
+ "serial",
+ "PARITY_ODD",
+ "PARITY_EVEN",
+ "PARITY_NONE",
+ "STOPBITS_TWO",
+ "STOPBITS_ONE",
+ "FIVEBITS",
+ "EIGHTBITS",
+ "SEVENBITS",
+ "SIXBITS",
+ # Name this module is actually trying to export
+ "SerialPort",
+]
+
+# all of them require pyserial at the moment, so check that first
+import serial # type: ignore[import]
+from serial import (
+ EIGHTBITS,
+ FIVEBITS,
+ PARITY_EVEN,
+ PARITY_NONE,
+ PARITY_ODD,
+ SEVENBITS,
+ SIXBITS,
+ STOPBITS_ONE,
+ STOPBITS_TWO,
+)
+
+from twisted.python.runtime import platform
+
+
+class BaseSerialPort:
+ """
+ Base class for Windows and POSIX serial ports.
+
+ @ivar _serialFactory: a pyserial C{serial.Serial} factory, used to create
+ the instance stored in C{self._serial}. Overrideable to enable easier
+ testing.
+
+ @ivar _serial: a pyserial C{serial.Serial} instance used to manage the
+ options on the serial port.
+ """
+
+ _serialFactory = serial.Serial
+
+ def setBaudRate(self, baudrate):
+ if hasattr(self._serial, "setBaudrate"):
+ self._serial.setBaudrate(baudrate)
+ else:
+ self._serial.setBaudRate(baudrate)
+
+ def inWaiting(self):
+ return self._serial.inWaiting()
+
+ def flushInput(self):
+ self._serial.flushInput()
+
+ def flushOutput(self):
+ self._serial.flushOutput()
+
+ def sendBreak(self):
+ self._serial.sendBreak()
+
+ def getDSR(self):
+ return self._serial.getDSR()
+
+ def getCD(self):
+ return self._serial.getCD()
+
+ def getRI(self):
+ return self._serial.getRI()
+
+ def getCTS(self):
+ return self._serial.getCTS()
+
+ def setDTR(self, on=1):
+ self._serial.setDTR(on)
+
+ def setRTS(self, on=1):
+ self._serial.setRTS(on)
+
+
+# Expert appropriate implementation of SerialPort.
+if platform.isWindows():
+ from twisted.internet._win32serialport import SerialPort
+else:
+ from twisted.internet._posixserialport import SerialPort # type: ignore[assignment]
diff --git a/contrib/python/Twisted/py3/twisted/internet/ssl.py b/contrib/python/Twisted/py3/twisted/internet/ssl.py
new file mode 100644
index 0000000000..1ad02b3c09
--- /dev/null
+++ b/contrib/python/Twisted/py3/twisted/internet/ssl.py
@@ -0,0 +1,278 @@
+# -*- test-case-name: twisted.test.test_ssl -*-
+# Copyright (c) Twisted Matrix Laboratories.
+# See LICENSE for details.
+
+"""
+This module implements Transport Layer Security (TLS) support for Twisted. It
+requires U{PyOpenSSL <https://pypi.python.org/pypi/pyOpenSSL>}.
+
+If you wish to establish a TLS connection, please use one of the following
+APIs:
+
+ - SSL endpoints for L{servers
+ <twisted.internet.endpoints.SSL4ServerEndpoint>} and L{clients
+ <twisted.internet.endpoints.SSL4ClientEndpoint>}
+
+ - L{startTLS <twisted.internet.interfaces.ITLSTransport.startTLS>}
+
+ - L{connectSSL <twisted.internet.interfaces.IReactorSSL.connectSSL>}
+
+ - L{listenSSL <twisted.internet.interfaces.IReactorSSL.listenSSL>}
+
+These APIs all require a C{contextFactory} argument that specifies their
+security properties, such as certificate, private key, certificate authorities
+to verify the peer, allowed TLS protocol versions, cipher suites, and so on.
+The recommended value for this argument is a L{CertificateOptions} instance;
+see its documentation for an explanation of the available options.
+
+The C{contextFactory} name is a bit of an anachronism now, as context factories
+have been replaced with "connection creators", but these objects serve the same
+role.
+
+Be warned that implementing your own connection creator (i.e.: value for the
+C{contextFactory}) is both difficult and dangerous; the Twisted team has worked
+hard to make L{CertificateOptions}' API comprehensible and unsurprising, and
+the Twisted team is actively maintaining it to ensure that it becomes more
+secure over time.
+
+If you are really absolutely sure that you want to take on the risk of
+implementing your own connection creator based on the pyOpenSSL API, see the
+L{server connection creator
+<twisted.internet.interfaces.IOpenSSLServerConnectionCreator>} and L{client
+connection creator
+<twisted.internet.interfaces.IOpenSSLServerConnectionCreator>} interfaces.
+
+Developers using Twisted, please ignore the L{Port}, L{Connector}, and
+L{Client} classes defined here, as these are details of certain reactors' TLS
+implementations, exposed by accident (and remaining here only for compatibility
+reasons). If you wish to establish a TLS connection, please use one of the
+APIs listed above.
+
+@note: "SSL" (Secure Sockets Layer) is an antiquated synonym for "TLS"
+ (Transport Layer Security). You may see these terms used interchangeably
+ throughout the documentation.
+"""
+
+
+from zope.interface import implementedBy, implementer, implementer_only
+
+# System imports
+from OpenSSL import SSL
+
+# Twisted imports
+from twisted.internet import interfaces, tcp
+
+supported = True
+
+
+@implementer(interfaces.IOpenSSLContextFactory)
+class ContextFactory:
+ """A factory for SSL context objects, for server SSL connections."""
+
+ isClient = 0
+
+ def getContext(self):
+ """Return a SSL.Context object. override in subclasses."""
+ raise NotImplementedError
+
+
+class DefaultOpenSSLContextFactory(ContextFactory):
+ """
+ L{DefaultOpenSSLContextFactory} is a factory for server-side SSL context
+ objects. These objects define certain parameters related to SSL
+ handshakes and the subsequent connection.
+
+ @ivar _contextFactory: A callable which will be used to create new
+ context objects. This is typically L{OpenSSL.SSL.Context}.
+ """
+
+ _context = None
+
+ def __init__(
+ self,
+ privateKeyFileName,
+ certificateFileName,
+ sslmethod=SSL.TLS_METHOD,
+ _contextFactory=SSL.Context,
+ ):
+ """
+ @param privateKeyFileName: Name of a file containing a private key
+ @param certificateFileName: Name of a file containing a certificate
+ @param sslmethod: The SSL method to use
+ """
+ self.privateKeyFileName = privateKeyFileName
+ self.certificateFileName = certificateFileName
+ self.sslmethod = sslmethod
+ self._contextFactory = _contextFactory
+
+ # Create a context object right now. This is to force validation of
+ # the given parameters so that errors are detected earlier rather
+ # than later.
+ self.cacheContext()
+
+ def cacheContext(self):
+ if self._context is None:
+ ctx = self._contextFactory(self.sslmethod)
+ # Disallow SSLv2! It's insecure! SSLv3 has been around since
+ # 1996. It's time to move on.
+ ctx.set_options(SSL.OP_NO_SSLv2)
+ ctx.use_certificate_file(self.certificateFileName)
+ ctx.use_privatekey_file(self.privateKeyFileName)
+ self._context = ctx
+
+ def __getstate__(self):
+ d = self.__dict__.copy()
+ del d["_context"]
+ return d
+
+ def __setstate__(self, state):
+ self.__dict__ = state
+
+ def getContext(self):
+ """
+ Return an SSL context.
+ """
+ return self._context
+
+
+@implementer(interfaces.IOpenSSLContextFactory)
+class ClientContextFactory:
+ """A context factory for SSL clients."""
+
+ isClient = 1
+
+ # TLS_METHOD allows negotiation of multiple TLS versions.
+ method = SSL.TLS_METHOD
+
+ _contextFactory = SSL.Context
+
+ def getContext(self):
+ ctx = self._contextFactory(self.method)
+ ctx.set_options(
+ SSL.OP_NO_SSLv2 | SSL.OP_NO_SSLv3 | SSL.OP_NO_TLSv1 | SSL.OP_NO_TLSv1_1
+ )
+ return ctx
+
+
+@implementer_only(
+ interfaces.ISSLTransport,
+ *(i for i in implementedBy(tcp.Client) if i != interfaces.ITLSTransport),
+)
+class Client(tcp.Client):
+ """
+ I am an SSL client.
+ """
+
+ def __init__(self, host, port, bindAddress, ctxFactory, connector, reactor=None):
+ # tcp.Client.__init__ depends on self.ctxFactory being set
+ self.ctxFactory = ctxFactory
+ tcp.Client.__init__(self, host, port, bindAddress, connector, reactor)
+
+ def _connectDone(self):
+ self.startTLS(self.ctxFactory)
+ self.startWriting()
+ tcp.Client._connectDone(self)
+
+
+@implementer(interfaces.ISSLTransport)
+class Server(tcp.Server):
+ """
+ I am an SSL server.
+ """
+
+ def __init__(self, *args, **kwargs):
+ tcp.Server.__init__(self, *args, **kwargs)
+ self.startTLS(self.server.ctxFactory)
+
+ def getPeerCertificate(self):
+ # ISSLTransport.getPeerCertificate
+ raise NotImplementedError("Server.getPeerCertificate")
+
+
+class Port(tcp.Port):
+ """
+ I am an SSL port.
+ """
+
+ transport = Server
+
+ _type = "TLS"
+
+ def __init__(
+ self, port, factory, ctxFactory, backlog=50, interface="", reactor=None
+ ):
+ tcp.Port.__init__(self, port, factory, backlog, interface, reactor)
+ self.ctxFactory = ctxFactory
+
+ def _getLogPrefix(self, factory):
+ """
+ Override the normal prefix to include an annotation indicating this is a
+ port for TLS connections.
+ """
+ return tcp.Port._getLogPrefix(self, factory) + " (TLS)"
+
+
+class Connector(tcp.Connector):
+ def __init__(
+ self, host, port, factory, contextFactory, timeout, bindAddress, reactor=None
+ ):
+ self.contextFactory = contextFactory
+ tcp.Connector.__init__(self, host, port, factory, timeout, bindAddress, reactor)
+
+ # Force some parameter checking in pyOpenSSL. It's better to fail now
+ # than after we've set up the transport.
+ contextFactory.getContext()
+
+ def _makeTransport(self):
+ return Client(
+ self.host,
+ self.port,
+ self.bindAddress,
+ self.contextFactory,
+ self,
+ self.reactor,
+ )
+
+
+from twisted.internet._sslverify import (
+ DN,
+ Certificate,
+ CertificateRequest,
+ DistinguishedName,
+ KeyPair,
+ OpenSSLAcceptableCiphers as AcceptableCiphers,
+ OpenSSLCertificateOptions as CertificateOptions,
+ OpenSSLDefaultPaths,
+ OpenSSLDiffieHellmanParameters as DiffieHellmanParameters,
+ PrivateCertificate,
+ ProtocolNegotiationSupport,
+ TLSVersion,
+ VerificationError,
+ optionsForClientTLS,
+ platformTrust,
+ protocolNegotiationMechanisms,
+ trustRootFromCertificates,
+)
+
+__all__ = [
+ "ContextFactory",
+ "DefaultOpenSSLContextFactory",
+ "ClientContextFactory",
+ "DistinguishedName",
+ "DN",
+ "Certificate",
+ "CertificateRequest",
+ "PrivateCertificate",
+ "KeyPair",
+ "AcceptableCiphers",
+ "CertificateOptions",
+ "DiffieHellmanParameters",
+ "platformTrust",
+ "OpenSSLDefaultPaths",
+ "TLSVersion",
+ "VerificationError",
+ "optionsForClientTLS",
+ "ProtocolNegotiationSupport",
+ "protocolNegotiationMechanisms",
+ "trustRootFromCertificates",
+]
diff --git a/contrib/python/Twisted/py3/twisted/internet/stdio.py b/contrib/python/Twisted/py3/twisted/internet/stdio.py
new file mode 100644
index 0000000000..3196898bf6
--- /dev/null
+++ b/contrib/python/Twisted/py3/twisted/internet/stdio.py
@@ -0,0 +1,37 @@
+# -*- test-case-name: twisted.test.test_stdio -*-
+# Copyright (c) Twisted Matrix Laboratories.
+# See LICENSE for details.
+
+"""
+Standard input/out/err support.
+
+This module exposes one name, StandardIO, which is a factory that takes an
+IProtocol provider as an argument. It connects that protocol to standard input
+and output on the current process.
+
+It should work on any UNIX and also on Win32 (with some caveats: due to
+platform limitations, it will perform very poorly on Win32).
+
+Future Plans::
+
+ support for stderr, perhaps
+ Rewrite to use the reactor instead of an ad-hoc mechanism for connecting
+ protocols to transport.
+
+
+Maintainer: James Y Knight
+"""
+
+
+from twisted.python.runtime import platform
+
+if platform.isWindows():
+ from twisted.internet._win32stdio import StandardIO, Win32PipeAddress as PipeAddress
+
+else:
+ from twisted.internet._posixstdio import ( # type: ignore[assignment]
+ PipeAddress,
+ StandardIO,
+ )
+
+__all__ = ["StandardIO", "PipeAddress"]
diff --git a/contrib/python/Twisted/py3/twisted/internet/task.py b/contrib/python/Twisted/py3/twisted/internet/task.py
new file mode 100644
index 0000000000..0319d24a7a
--- /dev/null
+++ b/contrib/python/Twisted/py3/twisted/internet/task.py
@@ -0,0 +1,976 @@
+# -*- test-case-name: twisted.test.test_task,twisted.test.test_cooperator -*-
+# Copyright (c) Twisted Matrix Laboratories.
+# See LICENSE for details.
+
+"""
+Scheduling utility methods and classes.
+"""
+
+
+import sys
+import time
+import warnings
+from typing import (
+ Callable,
+ Coroutine,
+ Iterable,
+ Iterator,
+ List,
+ NoReturn,
+ Optional,
+ Sequence,
+ TypeVar,
+ Union,
+ cast,
+)
+
+from zope.interface import implementer
+
+from incremental import Version
+
+from twisted.internet.base import DelayedCall
+from twisted.internet.defer import Deferred, ensureDeferred, maybeDeferred
+from twisted.internet.error import ReactorNotRunning
+from twisted.internet.interfaces import IDelayedCall, IReactorCore, IReactorTime
+from twisted.python import log, reflect
+from twisted.python.deprecate import _getDeprecationWarningString
+from twisted.python.failure import Failure
+
+_T = TypeVar("_T")
+
+
+class LoopingCall:
+ """Call a function repeatedly.
+
+ If C{f} returns a deferred, rescheduling will not take place until the
+ deferred has fired. The result value is ignored.
+
+ @ivar f: The function to call.
+ @ivar a: A tuple of arguments to pass the function.
+ @ivar kw: A dictionary of keyword arguments to pass to the function.
+ @ivar clock: A provider of
+ L{twisted.internet.interfaces.IReactorTime}. The default is
+ L{twisted.internet.reactor}. Feel free to set this to
+ something else, but it probably ought to be set *before*
+ calling L{start}.
+
+ @ivar running: A flag which is C{True} while C{f} is scheduled to be called
+ (or is currently being called). It is set to C{True} when L{start} is
+ called and set to C{False} when L{stop} is called or if C{f} raises an
+ exception. In either case, it will be C{False} by the time the
+ C{Deferred} returned by L{start} fires its callback or errback.
+
+ @ivar _realLastTime: When counting skips, the time at which the skip
+ counter was last invoked.
+
+ @ivar _runAtStart: A flag indicating whether the 'now' argument was passed
+ to L{LoopingCall.start}.
+ """
+
+ call: Optional[IDelayedCall] = None
+ running = False
+ _deferred: Optional[Deferred["LoopingCall"]] = None
+ interval: Optional[float] = None
+ _runAtStart = False
+ starttime: Optional[float] = None
+ _realLastTime: Optional[float] = None
+
+ def __init__(self, f: Callable[..., object], *a: object, **kw: object) -> None:
+ self.f = f
+ self.a = a
+ self.kw = kw
+ from twisted.internet import reactor
+
+ self.clock = cast(IReactorTime, reactor)
+
+ @property
+ def deferred(self) -> Optional[Deferred["LoopingCall"]]:
+ """
+ DEPRECATED. L{Deferred} fired when loop stops or fails.
+
+ Use the L{Deferred} returned by L{LoopingCall.start}.
+ """
+ warningString = _getDeprecationWarningString(
+ "twisted.internet.task.LoopingCall.deferred",
+ Version("Twisted", 16, 0, 0),
+ replacement="the deferred returned by start()",
+ )
+ warnings.warn(warningString, DeprecationWarning, stacklevel=2)
+
+ return self._deferred
+
+ @classmethod
+ def withCount(cls, countCallable: Callable[[int], object]) -> "LoopingCall":
+ """
+ An alternate constructor for L{LoopingCall} that makes available the
+ number of calls which should have occurred since it was last invoked.
+
+ Note that this number is an C{int} value; It represents the discrete
+ number of calls that should have been made. For example, if you are
+ using a looping call to display an animation with discrete frames, this
+ number would be the number of frames to advance.
+
+ The count is normally 1, but can be higher. For example, if the reactor
+ is blocked and takes too long to invoke the L{LoopingCall}, a Deferred
+ returned from a previous call is not fired before an interval has
+ elapsed, or if the callable itself blocks for longer than an interval,
+ preventing I{itself} from being called.
+
+ When running with an interval of 0, count will be always 1.
+
+ @param countCallable: A callable that will be invoked each time the
+ resulting LoopingCall is run, with an integer specifying the number
+ of calls that should have been invoked.
+
+ @return: An instance of L{LoopingCall} with call counting enabled,
+ which provides the count as the first positional argument.
+
+ @since: 9.0
+ """
+
+ def counter() -> object:
+ now = self.clock.seconds()
+
+ if self.interval == 0:
+ self._realLastTime = now
+ return countCallable(1)
+
+ lastTime = self._realLastTime
+ if lastTime is None:
+ assert (
+ self.starttime is not None
+ ), "LoopingCall called before it was started"
+ lastTime = self.starttime
+ if self._runAtStart:
+ assert (
+ self.interval is not None
+ ), "Looping call called with None interval"
+ lastTime -= self.interval
+ lastInterval = self._intervalOf(lastTime)
+ thisInterval = self._intervalOf(now)
+ count = thisInterval - lastInterval
+ if count > 0:
+ self._realLastTime = now
+ return countCallable(count)
+
+ return None
+
+ self = cls(counter)
+
+ return self
+
+ def _intervalOf(self, t: float) -> int:
+ """
+ Determine the number of intervals passed as of the given point in
+ time.
+
+ @param t: The specified time (from the start of the L{LoopingCall}) to
+ be measured in intervals
+
+ @return: The C{int} number of intervals which have passed as of the
+ given point in time.
+ """
+ assert self.starttime is not None
+ assert self.interval is not None
+ elapsedTime = t - self.starttime
+ intervalNum = int(elapsedTime / self.interval)
+ return intervalNum
+
+ def start(self, interval: float, now: bool = True) -> Deferred["LoopingCall"]:
+ """
+ Start running function every interval seconds.
+
+ @param interval: The number of seconds between calls. May be
+ less than one. Precision will depend on the underlying
+ platform, the available hardware, and the load on the system.
+
+ @param now: If True, run this call right now. Otherwise, wait
+ until the interval has elapsed before beginning.
+
+ @return: A Deferred whose callback will be invoked with
+ C{self} when C{self.stop} is called, or whose errback will be
+ invoked when the function raises an exception or returned a
+ deferred that has its errback invoked.
+ """
+ assert not self.running, "Tried to start an already running " "LoopingCall."
+ if interval < 0:
+ raise ValueError("interval must be >= 0")
+ self.running = True
+ # Loop might fail to start and then self._deferred will be cleared.
+ # This why the local C{deferred} variable is used.
+ deferred = self._deferred = Deferred()
+ self.starttime = self.clock.seconds()
+ self.interval = interval
+ self._runAtStart = now
+ if now:
+ self()
+ else:
+ self._scheduleFrom(self.starttime)
+ return deferred
+
+ def stop(self) -> None:
+ """Stop running function."""
+ assert self.running, "Tried to stop a LoopingCall that was " "not running."
+ self.running = False
+ if self.call is not None:
+ self.call.cancel()
+ self.call = None
+ d, self._deferred = self._deferred, None
+ assert d is not None
+ d.callback(self)
+
+ def reset(self) -> None:
+ """
+ Skip the next iteration and reset the timer.
+
+ @since: 11.1
+ """
+ assert self.running, "Tried to reset a LoopingCall that was " "not running."
+ if self.call is not None:
+ self.call.cancel()
+ self.call = None
+ self.starttime = self.clock.seconds()
+ self._scheduleFrom(self.starttime)
+
+ def __call__(self) -> None:
+ def cb(result: object) -> None:
+ if self.running:
+ self._scheduleFrom(self.clock.seconds())
+ else:
+ d, self._deferred = self._deferred, None
+ assert d is not None
+ d.callback(self)
+
+ def eb(failure: Failure) -> None:
+ self.running = False
+ d, self._deferred = self._deferred, None
+ assert d is not None
+ d.errback(failure)
+
+ self.call = None
+ d = maybeDeferred(self.f, *self.a, **self.kw)
+ d.addCallback(cb)
+ d.addErrback(eb)
+
+ def _scheduleFrom(self, when: float) -> None:
+ """
+ Schedule the next iteration of this looping call.
+
+ @param when: The present time from whence the call is scheduled.
+ """
+
+ def howLong() -> float:
+ # How long should it take until the next invocation of our
+ # callable? Split out into a function because there are multiple
+ # places we want to 'return' out of this.
+ if self.interval == 0:
+ # If the interval is 0, just go as fast as possible, always
+ # return zero, call ourselves ASAP.
+ return 0
+ # Compute the time until the next interval; how long has this call
+ # been running for?
+ assert self.starttime is not None
+ runningFor = when - self.starttime
+ # And based on that start time, when does the current interval end?
+ assert self.interval is not None
+ untilNextInterval = self.interval - (runningFor % self.interval)
+ # Now that we know how long it would be, we have to tell if the
+ # number is effectively zero. However, we can't just test against
+ # zero. If a number with a small exponent is added to a number
+ # with a large exponent, it may be so small that the digits just
+ # fall off the end, which means that adding the increment makes no
+ # difference; it's time to tick over into the next interval.
+ if when == when + untilNextInterval:
+ # If it's effectively zero, then we need to add another
+ # interval.
+ return self.interval
+ # Finally, if everything else is normal, we just return the
+ # computed delay.
+ return untilNextInterval
+
+ self.call = self.clock.callLater(howLong(), self)
+
+ def __repr__(self) -> str:
+ # This code should be replaced by a utility function in reflect;
+ # see ticket #6066:
+ func = getattr(self.f, "__qualname__", None)
+ if func is None:
+ func = getattr(self.f, "__name__", None)
+ if func is not None:
+ imClass = getattr(self.f, "im_class", None)
+ if imClass is not None:
+ func = f"{imClass}.{func}"
+ if func is None:
+ func = reflect.safe_repr(self.f)
+
+ return "LoopingCall<{!r}>({}, *{}, **{})".format(
+ self.interval,
+ func,
+ reflect.safe_repr(self.a),
+ reflect.safe_repr(self.kw),
+ )
+
+
+class SchedulerError(Exception):
+ """
+ The operation could not be completed because the scheduler or one of its
+ tasks was in an invalid state. This exception should not be raised
+ directly, but is a superclass of various scheduler-state-related
+ exceptions.
+ """
+
+
+class SchedulerStopped(SchedulerError):
+ """
+ The operation could not complete because the scheduler was stopped in
+ progress or was already stopped.
+ """
+
+
+class TaskFinished(SchedulerError):
+ """
+ The operation could not complete because the task was already completed,
+ stopped, encountered an error or otherwise permanently stopped running.
+ """
+
+
+class TaskDone(TaskFinished):
+ """
+ The operation could not complete because the task was already completed.
+ """
+
+
+class TaskStopped(TaskFinished):
+ """
+ The operation could not complete because the task was stopped.
+ """
+
+
+class TaskFailed(TaskFinished):
+ """
+ The operation could not complete because the task died with an unhandled
+ error.
+ """
+
+
+class NotPaused(SchedulerError):
+ """
+ This exception is raised when a task is resumed which was not previously
+ paused.
+ """
+
+
+class _Timer:
+ MAX_SLICE = 0.01
+
+ def __init__(self) -> None:
+ self.end = time.time() + self.MAX_SLICE
+
+ def __call__(self) -> bool:
+ return time.time() >= self.end
+
+
+_EPSILON = 0.00000001
+
+
+def _defaultScheduler(callable: Callable[[], None]) -> IDelayedCall:
+ from twisted.internet import reactor
+
+ return cast(IReactorTime, reactor).callLater(_EPSILON, callable)
+
+
+_TaskResultT = TypeVar("_TaskResultT")
+
+
+class CooperativeTask:
+ """
+ A L{CooperativeTask} is a task object inside a L{Cooperator}, which can be
+ paused, resumed, and stopped. It can also have its completion (or
+ termination) monitored.
+
+ @see: L{Cooperator.cooperate}
+
+ @ivar _iterator: the iterator to iterate when this L{CooperativeTask} is
+ asked to do work.
+
+ @ivar _cooperator: the L{Cooperator} that this L{CooperativeTask}
+ participates in, which is used to re-insert it upon resume.
+
+ @ivar _deferreds: the list of L{Deferred}s to fire when this task
+ completes, fails, or finishes.
+
+ @ivar _pauseCount: the number of times that this L{CooperativeTask} has
+ been paused; if 0, it is running.
+
+ @ivar _completionState: The completion-state of this L{CooperativeTask}.
+ L{None} if the task is not yet completed, an instance of L{TaskStopped}
+ if C{stop} was called to stop this task early, of L{TaskFailed} if the
+ application code in the iterator raised an exception which caused it to
+ terminate, and of L{TaskDone} if it terminated normally via raising
+ C{StopIteration}.
+ """
+
+ def __init__(
+ self, iterator: Iterator[_TaskResultT], cooperator: "Cooperator"
+ ) -> None:
+ """
+ A private constructor: to create a new L{CooperativeTask}, see
+ L{Cooperator.cooperate}.
+ """
+ self._iterator = iterator
+ self._cooperator = cooperator
+ self._deferreds: List[Deferred[Iterator[_TaskResultT]]] = []
+ self._pauseCount = 0
+ self._completionState: Optional[SchedulerError] = None
+ self._completionResult: Optional[Union[Iterator[_TaskResultT], Failure]] = None
+ cooperator._addTask(self)
+
+ def whenDone(self) -> Deferred[Iterator[_TaskResultT]]:
+ """
+ Get a L{Deferred} notification of when this task is complete.
+
+ @return: a L{Deferred} that fires with the C{iterator} that this
+ L{CooperativeTask} was created with when the iterator has been
+ exhausted (i.e. its C{next} method has raised C{StopIteration}), or
+ fails with the exception raised by C{next} if it raises some other
+ exception.
+
+ @rtype: L{Deferred}
+ """
+ d: Deferred[Iterator[_TaskResultT]] = Deferred()
+ if self._completionState is None:
+ self._deferreds.append(d)
+ else:
+ assert self._completionResult is not None
+ d.callback(self._completionResult)
+ return d
+
+ def pause(self) -> None:
+ """
+ Pause this L{CooperativeTask}. Stop doing work until
+ L{CooperativeTask.resume} is called. If C{pause} is called more than
+ once, C{resume} must be called an equal number of times to resume this
+ task.
+
+ @raise TaskFinished: if this task has already finished or completed.
+ """
+ self._checkFinish()
+ self._pauseCount += 1
+ if self._pauseCount == 1:
+ self._cooperator._removeTask(self)
+
+ def resume(self) -> None:
+ """
+ Resume processing of a paused L{CooperativeTask}.
+
+ @raise NotPaused: if this L{CooperativeTask} is not paused.
+ """
+ if self._pauseCount == 0:
+ raise NotPaused()
+ self._pauseCount -= 1
+ if self._pauseCount == 0 and self._completionState is None:
+ self._cooperator._addTask(self)
+
+ def _completeWith(
+ self,
+ completionState: SchedulerError,
+ deferredResult: Union[Iterator[_TaskResultT], Failure],
+ ) -> None:
+ """
+ @param completionState: a L{SchedulerError} exception or a subclass
+ thereof, indicating what exception should be raised when subsequent
+ operations are performed.
+
+ @param deferredResult: the result to fire all the deferreds with.
+ """
+ self._completionState = completionState
+ self._completionResult = deferredResult
+ if not self._pauseCount:
+ self._cooperator._removeTask(self)
+
+ # The Deferreds need to be invoked after all this is completed, because
+ # a Deferred may want to manipulate other tasks in a Cooperator. For
+ # example, if you call "stop()" on a cooperator in a callback on a
+ # Deferred returned from whenDone(), this CooperativeTask must be gone
+ # from the Cooperator by that point so that _completeWith is not
+ # invoked reentrantly; that would cause these Deferreds to blow up with
+ # an AlreadyCalledError, or the _removeTask to fail with a ValueError.
+ for d in self._deferreds:
+ d.callback(deferredResult)
+
+ def stop(self) -> None:
+ """
+ Stop further processing of this task.
+
+ @raise TaskFinished: if this L{CooperativeTask} has previously
+ completed, via C{stop}, completion, or failure.
+ """
+ self._checkFinish()
+ self._completeWith(TaskStopped(), Failure(TaskStopped()))
+
+ def _checkFinish(self) -> None:
+ """
+ If this task has been stopped, raise the appropriate subclass of
+ L{TaskFinished}.
+ """
+ if self._completionState is not None:
+ raise self._completionState
+
+ def _oneWorkUnit(self) -> None:
+ """
+ Perform one unit of work for this task, retrieving one item from its
+ iterator, stopping if there are no further items in the iterator, and
+ pausing if the result was a L{Deferred}.
+ """
+ try:
+ result = next(self._iterator)
+ except StopIteration:
+ self._completeWith(TaskDone(), self._iterator)
+ except BaseException:
+ self._completeWith(TaskFailed(), Failure())
+ else:
+ if isinstance(result, Deferred):
+ self.pause()
+
+ def failLater(failure: Failure) -> None:
+ self._completeWith(TaskFailed(), failure)
+
+ result.addCallbacks(lambda result: self.resume(), failLater)
+
+
+class Cooperator:
+ """
+ Cooperative task scheduler.
+
+ A cooperative task is an iterator where each iteration represents an
+ atomic unit of work. When the iterator yields, it allows the
+ L{Cooperator} to decide which of its tasks to execute next. If the
+ iterator yields a L{Deferred} then work will pause until the
+ L{Deferred} fires and completes its callback chain.
+
+ When a L{Cooperator} has more than one task, it distributes work between
+ all tasks.
+
+ There are two ways to add tasks to a L{Cooperator}, L{cooperate} and
+ L{coiterate}. L{cooperate} is the more useful of the two, as it returns a
+ L{CooperativeTask}, which can be L{paused<CooperativeTask.pause>},
+ L{resumed<CooperativeTask.resume>} and L{waited
+ on<CooperativeTask.whenDone>}. L{coiterate} has the same effect, but
+ returns only a L{Deferred} that fires when the task is done.
+
+ L{Cooperator} can be used for many things, including but not limited to:
+
+ - running one or more computationally intensive tasks without blocking
+ - limiting parallelism by running a subset of the total tasks
+ simultaneously
+ - doing one thing, waiting for a L{Deferred} to fire,
+ doing the next thing, repeat (i.e. serializing a sequence of
+ asynchronous tasks)
+
+ Multiple L{Cooperator}s do not cooperate with each other, so for most
+ cases you should use the L{global cooperator<task.cooperate>}.
+ """
+
+ def __init__(
+ self,
+ terminationPredicateFactory: Callable[[], Callable[[], bool]] = _Timer,
+ scheduler: Callable[[Callable[[], None]], IDelayedCall] = _defaultScheduler,
+ started: bool = True,
+ ):
+ """
+ Create a scheduler-like object to which iterators may be added.
+
+ @param terminationPredicateFactory: A no-argument callable which will
+ be invoked at the beginning of each step and should return a
+ no-argument callable which will return True when the step should be
+ terminated. The default factory is time-based and allows iterators to
+ run for 1/100th of a second at a time.
+
+ @param scheduler: A one-argument callable which takes a no-argument
+ callable and should invoke it at some future point. This will be used
+ to schedule each step of this Cooperator.
+
+ @param started: A boolean which indicates whether iterators should be
+ stepped as soon as they are added, or if they will be queued up until
+ L{Cooperator.start} is called.
+ """
+ self._tasks: List[CooperativeTask] = []
+ self._metarator: Iterator[CooperativeTask] = iter(())
+ self._terminationPredicateFactory = terminationPredicateFactory
+ self._scheduler = scheduler
+ self._delayedCall: Optional[IDelayedCall] = None
+ self._stopped = False
+ self._started = started
+
+ def coiterate(
+ self,
+ iterator: Iterator[_TaskResultT],
+ doneDeferred: Optional[Deferred[Iterator[_TaskResultT]]] = None,
+ ) -> Deferred[Iterator[_TaskResultT]]:
+ """
+ Add an iterator to the list of iterators this L{Cooperator} is
+ currently running.
+
+ Equivalent to L{cooperate}, but returns a L{Deferred} that will
+ be fired when the task is done.
+
+ @param doneDeferred: If specified, this will be the Deferred used as
+ the completion deferred. It is suggested that you use the default,
+ which creates a new Deferred for you.
+
+ @return: a Deferred that will fire when the iterator finishes.
+ """
+ if doneDeferred is None:
+ doneDeferred = Deferred()
+ whenDone: Deferred[Iterator[_TaskResultT]] = CooperativeTask(
+ iterator, self
+ ).whenDone()
+ whenDone.chainDeferred(doneDeferred)
+ return doneDeferred
+
+ def cooperate(self, iterator: Iterator[_TaskResultT]) -> CooperativeTask:
+ """
+ Start running the given iterator as a long-running cooperative task, by
+ calling next() on it as a periodic timed event.
+
+ @param iterator: the iterator to invoke.
+
+ @return: a L{CooperativeTask} object representing this task.
+ """
+ return CooperativeTask(iterator, self)
+
+ def _addTask(self, task: CooperativeTask) -> None:
+ """
+ Add a L{CooperativeTask} object to this L{Cooperator}.
+ """
+ if self._stopped:
+ self._tasks.append(task) # XXX silly, I know, but _completeWith
+ # does the inverse
+ task._completeWith(SchedulerStopped(), Failure(SchedulerStopped()))
+ else:
+ self._tasks.append(task)
+ self._reschedule()
+
+ def _removeTask(self, task: CooperativeTask) -> None:
+ """
+ Remove a L{CooperativeTask} from this L{Cooperator}.
+ """
+ self._tasks.remove(task)
+ # If no work left to do, cancel the delayed call:
+ if not self._tasks and self._delayedCall:
+ self._delayedCall.cancel()
+ self._delayedCall = None
+
+ def _tasksWhileNotStopped(self) -> Iterable[CooperativeTask]:
+ """
+ Yield all L{CooperativeTask} objects in a loop as long as this
+ L{Cooperator}'s termination condition has not been met.
+ """
+ terminator = self._terminationPredicateFactory()
+ while self._tasks:
+ for t in self._metarator:
+ yield t
+ if terminator():
+ return
+ self._metarator = iter(self._tasks)
+
+ def _tick(self) -> None:
+ """
+ Run one scheduler tick.
+ """
+ self._delayedCall = None
+ for taskObj in self._tasksWhileNotStopped():
+ taskObj._oneWorkUnit()
+ self._reschedule()
+
+ _mustScheduleOnStart = False
+
+ def _reschedule(self) -> None:
+ if not self._started:
+ self._mustScheduleOnStart = True
+ return
+ if self._delayedCall is None and self._tasks:
+ self._delayedCall = self._scheduler(self._tick)
+
+ def start(self) -> None:
+ """
+ Begin scheduling steps.
+ """
+ self._stopped = False
+ self._started = True
+ if self._mustScheduleOnStart:
+ del self._mustScheduleOnStart
+ self._reschedule()
+
+ def stop(self) -> None:
+ """
+ Stop scheduling steps. Errback the completion Deferreds of all
+ iterators which have been added and forget about them.
+ """
+ self._stopped = True
+ for taskObj in self._tasks:
+ taskObj._completeWith(SchedulerStopped(), Failure(SchedulerStopped()))
+ self._tasks = []
+ if self._delayedCall is not None:
+ self._delayedCall.cancel()
+ self._delayedCall = None
+
+ @property
+ def running(self) -> bool:
+ """
+ Is this L{Cooperator} is currently running?
+
+ @return: C{True} if the L{Cooperator} is running, C{False} otherwise.
+ @rtype: C{bool}
+ """
+ return self._started and not self._stopped
+
+
+_theCooperator = Cooperator()
+
+
+def coiterate(iterator: Iterator[_T]) -> Deferred[Iterator[_T]]:
+ """
+ Cooperatively iterate over the given iterator, dividing runtime between it
+ and all other iterators which have been passed to this function and not yet
+ exhausted.
+
+ @param iterator: the iterator to invoke.
+
+ @return: a Deferred that will fire when the iterator finishes.
+ """
+ return _theCooperator.coiterate(iterator)
+
+
+def cooperate(iterator: Iterator[_T]) -> CooperativeTask:
+ """
+ Start running the given iterator as a long-running cooperative task, by
+ calling next() on it as a periodic timed event.
+
+ This is very useful if you have computationally expensive tasks that you
+ want to run without blocking the reactor. Just break each task up so that
+ it yields frequently, pass it in here and the global L{Cooperator} will
+ make sure work is distributed between them without blocking longer than a
+ single iteration of a single task.
+
+ @param iterator: the iterator to invoke.
+
+ @return: a L{CooperativeTask} object representing this task.
+ """
+ return _theCooperator.cooperate(iterator)
+
+
+@implementer(IReactorTime)
+class Clock:
+ """
+ Provide a deterministic, easily-controlled implementation of
+ L{IReactorTime.callLater}. This is commonly useful for writing
+ deterministic unit tests for code which schedules events using this API.
+ """
+
+ rightNow = 0.0
+
+ def __init__(self) -> None:
+ self.calls: List[DelayedCall] = []
+
+ def seconds(self) -> float:
+ """
+ Pretend to be time.time(). This is used internally when an operation
+ such as L{IDelayedCall.reset} needs to determine a time value
+ relative to the current time.
+
+ @return: The time which should be considered the current time.
+ """
+ return self.rightNow
+
+ def _sortCalls(self) -> None:
+ """
+ Sort the pending calls according to the time they are scheduled.
+ """
+ self.calls.sort(key=lambda a: a.getTime())
+
+ def callLater(
+ self, delay: float, callable: Callable[..., object], *args: object, **kw: object
+ ) -> IDelayedCall:
+ """
+ See L{twisted.internet.interfaces.IReactorTime.callLater}.
+ """
+ dc = DelayedCall(
+ self.seconds() + delay,
+ callable,
+ args,
+ kw,
+ self.calls.remove,
+ lambda c: None,
+ self.seconds,
+ )
+ self.calls.append(dc)
+ self._sortCalls()
+ return dc
+
+ def getDelayedCalls(self) -> Sequence[IDelayedCall]:
+ """
+ See L{twisted.internet.interfaces.IReactorTime.getDelayedCalls}
+ """
+ return self.calls
+
+ def advance(self, amount: float) -> None:
+ """
+ Move time on this clock forward by the given amount and run whatever
+ pending calls should be run.
+
+ @param amount: The number of seconds which to advance this clock's
+ time.
+ """
+ self.rightNow += amount
+ self._sortCalls()
+ while self.calls and self.calls[0].getTime() <= self.seconds():
+ call = self.calls.pop(0)
+ call.called = 1
+ call.func(*call.args, **call.kw)
+ self._sortCalls()
+
+ def pump(self, timings: Iterable[float]) -> None:
+ """
+ Advance incrementally by the given set of times.
+ """
+ for amount in timings:
+ self.advance(amount)
+
+
+def deferLater(
+ clock: IReactorTime,
+ delay: float,
+ callable: Optional[Callable[..., _T]] = None,
+ *args: object,
+ **kw: object,
+) -> Deferred[_T]:
+ """
+ Call the given function after a certain period of time has passed.
+
+ @param clock: The object which will be used to schedule the delayed
+ call.
+
+ @param delay: The number of seconds to wait before calling the function.
+
+ @param callable: The callable to call after the delay, or C{None}.
+
+ @param args: The positional arguments to pass to C{callable}.
+
+ @param kw: The keyword arguments to pass to C{callable}.
+
+ @return: A deferred that fires with the result of the callable when the
+ specified time has elapsed.
+ """
+
+ def deferLaterCancel(deferred: Deferred[object]) -> None:
+ delayedCall.cancel()
+
+ def cb(result: object) -> _T:
+ if callable is None:
+ return None # type: ignore[return-value]
+ return callable(*args, **kw)
+
+ d: Deferred[_T] = Deferred(deferLaterCancel)
+ d.addCallback(cb)
+ delayedCall = clock.callLater(delay, d.callback, None)
+ return d
+
+
+def react(
+ main: Callable[
+ ...,
+ Union[Deferred[_T], Coroutine["Deferred[_T]", object, _T]],
+ ],
+ argv: Iterable[object] = (),
+ _reactor: Optional[IReactorCore] = None,
+) -> NoReturn:
+ """
+ Call C{main} and run the reactor until the L{Deferred} it returns fires or
+ the coroutine it returns completes.
+
+ This is intended as the way to start up an application with a well-defined
+ completion condition. Use it to write clients or one-off asynchronous
+ operations. Prefer this to calling C{reactor.run} directly, as this
+ function will also:
+
+ - Take care to call C{reactor.stop} once and only once, and at the right
+ time.
+ - Log any failures from the C{Deferred} returned by C{main}.
+ - Exit the application when done, with exit code 0 in case of success and
+ 1 in case of failure. If C{main} fails with a C{SystemExit} error, the
+ code returned is used.
+
+ The following demonstrates the signature of a C{main} function which can be
+ used with L{react}::
+
+ async def main(reactor, username, password):
+ return "ok"
+
+ task.react(main, ("alice", "secret"))
+
+ @param main: A callable which returns a L{Deferred} or
+ coroutine. It should take the reactor as its first
+ parameter, followed by the elements of C{argv}.
+
+ @param argv: A list of arguments to pass to C{main}. If omitted the
+ callable will be invoked with no additional arguments.
+
+ @param _reactor: An implementation detail to allow easier unit testing. Do
+ not supply this parameter.
+
+ @since: 12.3
+ """
+ if _reactor is None:
+ from twisted.internet import reactor
+
+ _reactor = cast(IReactorCore, reactor)
+
+ finished = ensureDeferred(main(_reactor, *argv))
+ code = 0
+
+ stopping = False
+
+ def onShutdown() -> None:
+ nonlocal stopping
+ stopping = True
+
+ _reactor.addSystemEventTrigger("before", "shutdown", onShutdown)
+
+ def stop(result: object, stopReactor: bool) -> None:
+ if stopReactor:
+ assert _reactor is not None
+ try:
+ _reactor.stop()
+ except ReactorNotRunning:
+ pass
+
+ if isinstance(result, Failure):
+ nonlocal code
+ if result.check(SystemExit) is not None:
+ code = result.value.code
+ else:
+ log.err(result, "main function encountered error")
+ code = 1
+
+ def cbFinish(result: object) -> None:
+ if stopping:
+ stop(result, False)
+ else:
+ assert _reactor is not None
+ _reactor.callWhenRunning(stop, result, True)
+
+ finished.addBoth(cbFinish)
+ _reactor.run()
+ sys.exit(code)
+
+
+__all__ = [
+ "LoopingCall",
+ "Clock",
+ "SchedulerStopped",
+ "Cooperator",
+ "coiterate",
+ "deferLater",
+ "react",
+]
diff --git a/contrib/python/Twisted/py3/twisted/internet/tcp.py b/contrib/python/Twisted/py3/twisted/internet/tcp.py
new file mode 100644
index 0000000000..c87b5b7333
--- /dev/null
+++ b/contrib/python/Twisted/py3/twisted/internet/tcp.py
@@ -0,0 +1,1523 @@
+# -*- test-case-name: twisted.test.test_tcp -*-
+# Copyright (c) Twisted Matrix Laboratories.
+# See LICENSE for details.
+
+"""
+Various asynchronous TCP/IP classes.
+
+End users shouldn't use this module directly - use the reactor APIs instead.
+"""
+
+import os
+
+# System Imports
+import socket
+import struct
+import sys
+from typing import Callable, ClassVar, List, Optional
+
+from zope.interface import Interface, implementer
+
+import attr
+import typing_extensions
+
+from twisted.internet.interfaces import (
+ IHalfCloseableProtocol,
+ IListeningPort,
+ ISystemHandle,
+ ITCPTransport,
+)
+from twisted.logger import ILogObserver, LogEvent, Logger
+from twisted.python import deprecate, versions
+from twisted.python.compat import lazyByteSlice
+from twisted.python.runtime import platformType
+
+try:
+ # Try to get the memory BIO based startTLS implementation, available since
+ # pyOpenSSL 0.10
+ from twisted.internet._newtls import (
+ ClientMixin as _TLSClientMixin,
+ ConnectionMixin as _TLSConnectionMixin,
+ ServerMixin as _TLSServerMixin,
+ )
+ from twisted.internet.interfaces import ITLSTransport
+except ImportError:
+ # There is no version of startTLS available
+ ITLSTransport = Interface # type: ignore[misc,assignment]
+
+ class _TLSConnectionMixin: # type: ignore[no-redef]
+ TLS = False
+
+ class _TLSClientMixin: # type: ignore[no-redef]
+ pass
+
+ class _TLSServerMixin: # type: ignore[no-redef]
+ pass
+
+
+if platformType == "win32":
+ # no such thing as WSAEPERM or error code 10001
+ # according to winsock.h or MSDN
+ EPERM = object()
+ from errno import ( # type: ignore[attr-defined]
+ WSAEALREADY as EALREADY,
+ WSAEINPROGRESS as EINPROGRESS,
+ WSAEINVAL as EINVAL,
+ WSAEISCONN as EISCONN,
+ WSAEMFILE as EMFILE,
+ WSAENOBUFS as ENOBUFS,
+ WSAEWOULDBLOCK as EWOULDBLOCK,
+ )
+
+ # No such thing as WSAENFILE, either.
+ ENFILE = object()
+ # Nor ENOMEM
+ ENOMEM = object()
+ EAGAIN = EWOULDBLOCK
+ from errno import WSAECONNRESET as ECONNABORTED # type: ignore[attr-defined]
+
+ from twisted.python.win32 import formatError as strerror
+else:
+ from errno import EPERM
+ from errno import EINVAL
+ from errno import EWOULDBLOCK
+ from errno import EINPROGRESS
+ from errno import EALREADY
+ from errno import EISCONN
+ from errno import ENOBUFS
+ from errno import EMFILE
+ from errno import ENFILE
+ from errno import ENOMEM
+ from errno import EAGAIN
+ from errno import ECONNABORTED
+
+ from os import strerror
+
+from errno import errorcode
+
+# Twisted Imports
+from twisted.internet import abstract, address, base, error, fdesc, main
+from twisted.internet.error import CannotListenError
+from twisted.internet.protocol import Protocol
+from twisted.internet.task import deferLater
+from twisted.python import failure, log, reflect
+from twisted.python.util import untilConcludes
+
+# Not all platforms have, or support, this flag.
+_AI_NUMERICSERV = getattr(socket, "AI_NUMERICSERV", 0)
+
+
+def _getrealname(addr):
+ """
+ Return a 2-tuple of socket IP and port for IPv4 and a 4-tuple of
+ socket IP, port, flowInfo, and scopeID for IPv6. For IPv6, it
+ returns the interface portion (the part after the %) as a part of
+ the IPv6 address, which Python 3.7+ does not include.
+
+ @param addr: A 2-tuple for IPv4 information or a 4-tuple for IPv6
+ information.
+ """
+ if len(addr) == 4:
+ # IPv6
+ host = socket.getnameinfo(addr, socket.NI_NUMERICHOST | socket.NI_NUMERICSERV)[
+ 0
+ ]
+ return tuple([host] + list(addr[1:]))
+ else:
+ return addr[:2]
+
+
+def _getpeername(skt):
+ """
+ See L{_getrealname}.
+ """
+ return _getrealname(skt.getpeername())
+
+
+def _getsockname(skt):
+ """
+ See L{_getrealname}.
+ """
+ return _getrealname(skt.getsockname())
+
+
+class _SocketCloser:
+ """
+ @ivar _shouldShutdown: Set to C{True} if C{shutdown} should be called
+ before calling C{close} on the underlying socket.
+ @type _shouldShutdown: C{bool}
+ """
+
+ _shouldShutdown = True
+
+ def _closeSocket(self, orderly):
+ # The call to shutdown() before close() isn't really necessary, because
+ # we set FD_CLOEXEC now, which will ensure this is the only process
+ # holding the FD, thus ensuring close() really will shutdown the TCP
+ # socket. However, do it anyways, just to be safe.
+ skt = self.socket
+ try:
+ if orderly:
+ if self._shouldShutdown:
+ skt.shutdown(2)
+ else:
+ # Set SO_LINGER to 1,0 which, by convention, causes a
+ # connection reset to be sent when close is called,
+ # instead of the standard FIN shutdown sequence.
+ self.socket.setsockopt(
+ socket.SOL_SOCKET, socket.SO_LINGER, struct.pack("ii", 1, 0)
+ )
+
+ except OSError:
+ pass
+ try:
+ skt.close()
+ except OSError:
+ pass
+
+
+class _AbortingMixin:
+ """
+ Common implementation of C{abortConnection}.
+
+ @ivar _aborting: Set to C{True} when C{abortConnection} is called.
+ @type _aborting: C{bool}
+ """
+
+ _aborting = False
+
+ def abortConnection(self):
+ """
+ Aborts the connection immediately, dropping any buffered data.
+
+ @since: 11.1
+ """
+ if self.disconnected or self._aborting:
+ return
+ self._aborting = True
+ self.stopReading()
+ self.stopWriting()
+ self.doRead = lambda *args, **kwargs: None
+ self.doWrite = lambda *args, **kwargs: None
+ self.reactor.callLater(
+ 0, self.connectionLost, failure.Failure(error.ConnectionAborted())
+ )
+
+
+@implementer(ITLSTransport, ITCPTransport, ISystemHandle)
+class Connection(
+ _TLSConnectionMixin, abstract.FileDescriptor, _SocketCloser, _AbortingMixin
+):
+ """
+ Superclass of all socket-based FileDescriptors.
+
+ This is an abstract superclass of all objects which represent a TCP/IP
+ connection based socket.
+
+ @ivar logstr: prefix used when logging events related to this connection.
+ @type logstr: C{str}
+ """
+
+ def __init__(self, skt, protocol, reactor=None):
+ abstract.FileDescriptor.__init__(self, reactor=reactor)
+ self.socket = skt
+ self.socket.setblocking(0)
+ self.fileno = skt.fileno
+ self.protocol = protocol
+
+ def getHandle(self):
+ """Return the socket for this connection."""
+ return self.socket
+
+ def doRead(self):
+ """Calls self.protocol.dataReceived with all available data.
+
+ This reads up to self.bufferSize bytes of data from its socket, then
+ calls self.dataReceived(data) to process it. If the connection is not
+ lost through an error in the physical recv(), this function will return
+ the result of the dataReceived call.
+ """
+ try:
+ data = self.socket.recv(self.bufferSize)
+ except OSError as se:
+ if se.args[0] == EWOULDBLOCK:
+ return
+ else:
+ return main.CONNECTION_LOST
+
+ return self._dataReceived(data)
+
+ def _dataReceived(self, data):
+ if not data:
+ return main.CONNECTION_DONE
+ rval = self.protocol.dataReceived(data)
+ if rval is not None:
+ offender = self.protocol.dataReceived
+ warningFormat = (
+ "Returning a value other than None from %(fqpn)s is "
+ "deprecated since %(version)s."
+ )
+ warningString = deprecate.getDeprecationWarningString(
+ offender, versions.Version("Twisted", 11, 0, 0), format=warningFormat
+ )
+ deprecate.warnAboutFunction(offender, warningString)
+ return rval
+
+ def writeSomeData(self, data):
+ """
+ Write as much as possible of the given data to this TCP connection.
+
+ This sends up to C{self.SEND_LIMIT} bytes from C{data}. If the
+ connection is lost, an exception is returned. Otherwise, the number
+ of bytes successfully written is returned.
+ """
+ # Limit length of buffer to try to send, because some OSes are too
+ # stupid to do so themselves (ahem windows)
+ limitedData = lazyByteSlice(data, 0, self.SEND_LIMIT)
+
+ try:
+ return untilConcludes(self.socket.send, limitedData)
+ except OSError as se:
+ if se.args[0] in (EWOULDBLOCK, ENOBUFS):
+ return 0
+ else:
+ return main.CONNECTION_LOST
+
+ def _closeWriteConnection(self):
+ try:
+ self.socket.shutdown(1)
+ except OSError:
+ pass
+ p = IHalfCloseableProtocol(self.protocol, None)
+ if p:
+ try:
+ p.writeConnectionLost()
+ except BaseException:
+ f = failure.Failure()
+ log.err()
+ self.connectionLost(f)
+
+ def readConnectionLost(self, reason):
+ p = IHalfCloseableProtocol(self.protocol, None)
+ if p:
+ try:
+ p.readConnectionLost()
+ except BaseException:
+ log.err()
+ self.connectionLost(failure.Failure())
+ else:
+ self.connectionLost(reason)
+
+ def connectionLost(self, reason):
+ """See abstract.FileDescriptor.connectionLost()."""
+ # Make sure we're not called twice, which can happen e.g. if
+ # abortConnection() is called from protocol's dataReceived and then
+ # code immediately after throws an exception that reaches the
+ # reactor. We can't rely on "disconnected" attribute for this check
+ # since twisted.internet._oldtls does evil things to it:
+ if not hasattr(self, "socket"):
+ return
+ abstract.FileDescriptor.connectionLost(self, reason)
+ self._closeSocket(not reason.check(error.ConnectionAborted))
+ protocol = self.protocol
+ del self.protocol
+ del self.socket
+ del self.fileno
+ protocol.connectionLost(reason)
+
+ logstr = "Uninitialized"
+
+ def logPrefix(self):
+ """Return the prefix to log with when I own the logging thread."""
+ return self.logstr
+
+ def getTcpNoDelay(self):
+ return bool(self.socket.getsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY))
+
+ def setTcpNoDelay(self, enabled):
+ self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, enabled)
+
+ def getTcpKeepAlive(self):
+ return bool(self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE))
+
+ def setTcpKeepAlive(self, enabled):
+ self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, enabled)
+
+
+class _BaseBaseClient:
+ """
+ Code shared with other (non-POSIX) reactors for management of general
+ outgoing connections.
+
+ Requirements upon subclasses are documented as instance variables rather
+ than abstract methods, in order to avoid MRO confusion, since this base is
+ mixed in to unfortunately weird and distinctive multiple-inheritance
+ hierarchies and many of these attributes are provided by peer classes
+ rather than descendant classes in those hierarchies.
+
+ @ivar addressFamily: The address family constant (C{socket.AF_INET},
+ C{socket.AF_INET6}, C{socket.AF_UNIX}) of the underlying socket of this
+ client connection.
+ @type addressFamily: C{int}
+
+ @ivar socketType: The socket type constant (C{socket.SOCK_STREAM} or
+ C{socket.SOCK_DGRAM}) of the underlying socket.
+ @type socketType: C{int}
+
+ @ivar _requiresResolution: A flag indicating whether the address of this
+ client will require name resolution. C{True} if the hostname of said
+ address indicates a name that must be resolved by hostname lookup,
+ C{False} if it indicates an IP address literal.
+ @type _requiresResolution: C{bool}
+
+ @cvar _commonConnection: Subclasses must provide this attribute, which
+ indicates the L{Connection}-alike class to invoke C{__init__} and
+ C{connectionLost} on.
+ @type _commonConnection: C{type}
+
+ @ivar _stopReadingAndWriting: Subclasses must implement in order to remove
+ this transport from its reactor's notifications in response to a
+ terminated connection attempt.
+ @type _stopReadingAndWriting: 0-argument callable returning L{None}
+
+ @ivar _closeSocket: Subclasses must implement in order to close the socket
+ in response to a terminated connection attempt.
+ @type _closeSocket: 1-argument callable; see L{_SocketCloser._closeSocket}
+
+ @ivar _collectSocketDetails: Clean up references to the attached socket in
+ its underlying OS resource (such as a file descriptor or file handle),
+ as part of post connection-failure cleanup.
+ @type _collectSocketDetails: 0-argument callable returning L{None}.
+
+ @ivar reactor: The class pointed to by C{_commonConnection} should set this
+ attribute in its constructor.
+ @type reactor: L{twisted.internet.interfaces.IReactorTime},
+ L{twisted.internet.interfaces.IReactorCore},
+ L{twisted.internet.interfaces.IReactorFDSet}
+ """
+
+ addressFamily = socket.AF_INET
+ socketType = socket.SOCK_STREAM
+
+ def _finishInit(self, whenDone, skt, error, reactor):
+ """
+ Called by subclasses to continue to the stage of initialization where
+ the socket connect attempt is made.
+
+ @param whenDone: A 0-argument callable to invoke once the connection is
+ set up. This is L{None} if the connection could not be prepared
+ due to a previous error.
+
+ @param skt: The socket object to use to perform the connection.
+ @type skt: C{socket._socketobject}
+
+ @param error: The error to fail the connection with.
+
+ @param reactor: The reactor to use for this client.
+ @type reactor: L{twisted.internet.interfaces.IReactorTime}
+ """
+ if whenDone:
+ self._commonConnection.__init__(self, skt, None, reactor)
+ reactor.callLater(0, whenDone)
+ else:
+ reactor.callLater(0, self.failIfNotConnected, error)
+
+ def resolveAddress(self):
+ """
+ Resolve the name that was passed to this L{_BaseBaseClient}, if
+ necessary, and then move on to attempting the connection once an
+ address has been determined. (The connection will be attempted
+ immediately within this function if either name resolution can be
+ synchronous or the address was an IP address literal.)
+
+ @note: You don't want to call this method from outside, as it won't do
+ anything useful; it's just part of the connection bootstrapping
+ process. Also, although this method is on L{_BaseBaseClient} for
+ historical reasons, it's not used anywhere except for L{Client}
+ itself.
+
+ @return: L{None}
+ """
+ if self._requiresResolution:
+ d = self.reactor.resolve(self.addr[0])
+ d.addCallback(lambda n: (n,) + self.addr[1:])
+ d.addCallbacks(self._setRealAddress, self.failIfNotConnected)
+ else:
+ self._setRealAddress(self.addr)
+
+ def _setRealAddress(self, address):
+ """
+ Set the resolved address of this L{_BaseBaseClient} and initiate the
+ connection attempt.
+
+ @param address: Depending on whether this is an IPv4 or IPv6 connection
+ attempt, a 2-tuple of C{(host, port)} or a 4-tuple of C{(host,
+ port, flow, scope)}. At this point it is a fully resolved address,
+ and the 'host' portion will always be an IP address, not a DNS
+ name.
+ """
+ if len(address) == 4:
+ # IPv6, make sure we have the scopeID associated
+ hostname = socket.getnameinfo(
+ address, socket.NI_NUMERICHOST | socket.NI_NUMERICSERV
+ )[0]
+ self.realAddress = tuple([hostname] + list(address[1:]))
+ else:
+ self.realAddress = address
+ self.doConnect()
+
+ def failIfNotConnected(self, err):
+ """
+ Generic method called when the attempts to connect failed. It basically
+ cleans everything it can: call connectionFailed, stop read and write,
+ delete socket related members.
+ """
+ if self.connected or self.disconnected or not hasattr(self, "connector"):
+ return
+
+ self._stopReadingAndWriting()
+ try:
+ self._closeSocket(True)
+ except AttributeError:
+ pass
+ else:
+ self._collectSocketDetails()
+ self.connector.connectionFailed(failure.Failure(err))
+ del self.connector
+
+ def stopConnecting(self):
+ """
+ If a connection attempt is still outstanding (i.e. no connection is
+ yet established), immediately stop attempting to connect.
+ """
+ self.failIfNotConnected(error.UserError())
+
+ def connectionLost(self, reason):
+ """
+ Invoked by lower-level logic when it's time to clean the socket up.
+ Depending on the state of the connection, either inform the attached
+ L{Connector} that the connection attempt has failed, or inform the
+ connected L{IProtocol} that the established connection has been lost.
+
+ @param reason: the reason that the connection was terminated
+ @type reason: L{Failure}
+ """
+ if not self.connected:
+ self.failIfNotConnected(error.ConnectError(string=reason))
+ else:
+ self._commonConnection.connectionLost(self, reason)
+ self.connector.connectionLost(reason)
+
+
+class BaseClient(_BaseBaseClient, _TLSClientMixin, Connection):
+ """
+ A base class for client TCP (and similar) sockets.
+
+ @ivar realAddress: The address object that will be used for socket.connect;
+ this address is an address tuple (the number of elements dependent upon
+ the address family) which does not contain any names which need to be
+ resolved.
+ @type realAddress: C{tuple}
+
+ @ivar _base: L{Connection}, which is the base class of this class which has
+ all of the useful file descriptor methods. This is used by
+ L{_TLSServerMixin} to call the right methods to directly manipulate the
+ transport, as is necessary for writing TLS-encrypted bytes (whereas
+ those methods on L{Server} will go through another layer of TLS if it
+ has been enabled).
+ """
+
+ _base = Connection
+ _commonConnection = Connection
+
+ def _stopReadingAndWriting(self):
+ """
+ Implement the POSIX-ish (i.e.
+ L{twisted.internet.interfaces.IReactorFDSet}) method of detaching this
+ socket from the reactor for L{_BaseBaseClient}.
+ """
+ if hasattr(self, "reactor"):
+ # this doesn't happen if we failed in __init__
+ self.stopReading()
+ self.stopWriting()
+
+ def _collectSocketDetails(self):
+ """
+ Clean up references to the socket and its file descriptor.
+
+ @see: L{_BaseBaseClient}
+ """
+ del self.socket, self.fileno
+
+ def createInternetSocket(self):
+ """(internal) Create a non-blocking socket using
+ self.addressFamily, self.socketType.
+ """
+ s = socket.socket(self.addressFamily, self.socketType)
+ s.setblocking(0)
+ fdesc._setCloseOnExec(s.fileno())
+ return s
+
+ def doConnect(self):
+ """
+ Initiate the outgoing connection attempt.
+
+ @note: Applications do not need to call this method; it will be invoked
+ internally as part of L{IReactorTCP.connectTCP}.
+ """
+ self.doWrite = self.doConnect
+ self.doRead = self.doConnect
+ if not hasattr(self, "connector"):
+ # this happens when connection failed but doConnect
+ # was scheduled via a callLater in self._finishInit
+ return
+
+ err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
+ if err:
+ self.failIfNotConnected(error.getConnectError((err, strerror(err))))
+ return
+
+ # doConnect gets called twice. The first time we actually need to
+ # start the connection attempt. The second time we don't really
+ # want to (SO_ERROR above will have taken care of any errors, and if
+ # it reported none, the mere fact that doConnect was called again is
+ # sufficient to indicate that the connection has succeeded), but it
+ # is not /particularly/ detrimental to do so. This should get
+ # cleaned up some day, though.
+ try:
+ connectResult = self.socket.connect_ex(self.realAddress)
+ except OSError as se:
+ connectResult = se.args[0]
+ if connectResult:
+ if connectResult == EISCONN:
+ pass
+ # on Windows EINVAL means sometimes that we should keep trying:
+ # http://msdn.microsoft.com/library/default.asp?url=/library/en-us/winsock/winsock/connect_2.asp
+ elif (connectResult in (EWOULDBLOCK, EINPROGRESS, EALREADY)) or (
+ connectResult == EINVAL and platformType == "win32"
+ ):
+ self.startReading()
+ self.startWriting()
+ return
+ else:
+ self.failIfNotConnected(
+ error.getConnectError((connectResult, strerror(connectResult)))
+ )
+ return
+
+ # If I have reached this point without raising or returning, that means
+ # that the socket is connected.
+ del self.doWrite
+ del self.doRead
+ # we first stop and then start, to reset any references to the old doRead
+ self.stopReading()
+ self.stopWriting()
+ self._connectDone()
+
+ def _connectDone(self):
+ """
+ This is a hook for when a connection attempt has succeeded.
+
+ Here, we build the protocol from the
+ L{twisted.internet.protocol.ClientFactory} that was passed in, compute
+ a log string, begin reading so as to send traffic to the newly built
+ protocol, and finally hook up the protocol itself.
+
+ This hook is overridden by L{ssl.Client} to initiate the TLS protocol.
+ """
+ self.protocol = self.connector.buildProtocol(self.getPeer())
+ self.connected = 1
+ logPrefix = self._getLogPrefix(self.protocol)
+ self.logstr = "%s,client" % logPrefix
+ if self.protocol is None:
+ # Factory.buildProtocol is allowed to return None. In that case,
+ # make up a protocol to satisfy the rest of the implementation;
+ # connectionLost is going to be called on something, for example.
+ # This is easier than adding special case support for a None
+ # protocol throughout the rest of the transport implementation.
+ self.protocol = Protocol()
+ # But dispose of the connection quickly.
+ self.loseConnection()
+ else:
+ self.startReading()
+ self.protocol.makeConnection(self)
+
+
+_NUMERIC_ONLY = socket.AI_NUMERICHOST | _AI_NUMERICSERV
+
+
+def _resolveIPv6(ip, port):
+ """
+ Resolve an IPv6 literal into an IPv6 address.
+
+ This is necessary to resolve any embedded scope identifiers to the relevant
+ C{sin6_scope_id} for use with C{socket.connect()}, C{socket.listen()}, or
+ C{socket.bind()}; see U{RFC 3493 <https://tools.ietf.org/html/rfc3493>} for
+ more information.
+
+ @param ip: An IPv6 address literal.
+ @type ip: C{str}
+
+ @param port: A port number.
+ @type port: C{int}
+
+ @return: a 4-tuple of C{(host, port, flow, scope)}, suitable for use as an
+ IPv6 address.
+
+ @raise socket.gaierror: if either the IP or port is not numeric as it
+ should be.
+ """
+ return socket.getaddrinfo(ip, port, 0, 0, 0, _NUMERIC_ONLY)[0][4]
+
+
+class _BaseTCPClient:
+ """
+ Code shared with other (non-POSIX) reactors for management of outgoing TCP
+ connections (both TCPv4 and TCPv6).
+
+ @note: In order to be functional, this class must be mixed into the same
+ hierarchy as L{_BaseBaseClient}. It would subclass L{_BaseBaseClient}
+ directly, but the class hierarchy here is divided in strange ways out
+ of the need to share code along multiple axes; specifically, with the
+ IOCP reactor and also with UNIX clients in other reactors.
+
+ @ivar _addressType: The Twisted _IPAddress implementation for this client
+ @type _addressType: L{IPv4Address} or L{IPv6Address}
+
+ @ivar connector: The L{Connector} which is driving this L{_BaseTCPClient}'s
+ connection attempt.
+
+ @ivar addr: The address that this socket will be connecting to.
+ @type addr: If IPv4, a 2-C{tuple} of C{(str host, int port)}. If IPv6, a
+ 4-C{tuple} of (C{str host, int port, int ignored, int scope}).
+
+ @ivar createInternetSocket: Subclasses must implement this as a method to
+ create a python socket object of the appropriate address family and
+ socket type.
+ @type createInternetSocket: 0-argument callable returning
+ C{socket._socketobject}.
+ """
+
+ _addressType = address.IPv4Address
+
+ def __init__(self, host, port, bindAddress, connector, reactor=None):
+ # BaseClient.__init__ is invoked later
+ self.connector = connector
+ self.addr = (host, port)
+
+ whenDone = self.resolveAddress
+ err = None
+ skt = None
+
+ if abstract.isIPAddress(host):
+ self._requiresResolution = False
+ elif abstract.isIPv6Address(host):
+ self._requiresResolution = False
+ self.addr = _resolveIPv6(host, port)
+ self.addressFamily = socket.AF_INET6
+ self._addressType = address.IPv6Address
+ else:
+ self._requiresResolution = True
+ try:
+ skt = self.createInternetSocket()
+ except OSError as se:
+ err = error.ConnectBindError(se.args[0], se.args[1])
+ whenDone = None
+ if whenDone and bindAddress is not None:
+ try:
+ if abstract.isIPv6Address(bindAddress[0]):
+ bindinfo = _resolveIPv6(*bindAddress)
+ else:
+ bindinfo = bindAddress
+ skt.bind(bindinfo)
+ except OSError as se:
+ err = error.ConnectBindError(se.args[0], se.args[1])
+ whenDone = None
+ self._finishInit(whenDone, skt, err, reactor)
+
+ def getHost(self):
+ """
+ Returns an L{IPv4Address} or L{IPv6Address}.
+
+ This indicates the address from which I am connecting.
+ """
+ return self._addressType("TCP", *_getsockname(self.socket))
+
+ def getPeer(self):
+ """
+ Returns an L{IPv4Address} or L{IPv6Address}.
+
+ This indicates the address that I am connected to.
+ """
+ return self._addressType("TCP", *self.realAddress)
+
+ def __repr__(self) -> str:
+ s = f"<{self.__class__} to {self.addr} at {id(self):x}>"
+ return s
+
+
+class Client(_BaseTCPClient, BaseClient):
+ """
+ A transport for a TCP protocol; either TCPv4 or TCPv6.
+
+ Do not create these directly; use L{IReactorTCP.connectTCP}.
+ """
+
+
+class Server(_TLSServerMixin, Connection):
+ """
+ Serverside socket-stream connection class.
+
+ This is a serverside network connection transport; a socket which came from
+ an accept() on a server.
+
+ @ivar _base: L{Connection}, which is the base class of this class which has
+ all of the useful file descriptor methods. This is used by
+ L{_TLSServerMixin} to call the right methods to directly manipulate the
+ transport, as is necessary for writing TLS-encrypted bytes (whereas
+ those methods on L{Server} will go through another layer of TLS if it
+ has been enabled).
+ """
+
+ _base = Connection
+
+ _addressType = address.IPv4Address
+
+ def __init__(self, sock, protocol, client, server, sessionno, reactor):
+ """
+ Server(sock, protocol, client, server, sessionno)
+
+ Initialize it with a socket, a protocol, a descriptor for my peer (a
+ tuple of host, port describing the other end of the connection), an
+ instance of Port, and a session number.
+ """
+ Connection.__init__(self, sock, protocol, reactor)
+ if len(client) != 2:
+ self._addressType = address.IPv6Address
+ self.server = server
+ self.client = client
+ self.sessionno = sessionno
+ self.hostname = client[0]
+
+ logPrefix = self._getLogPrefix(self.protocol)
+ self.logstr = f"{logPrefix},{sessionno},{self.hostname}"
+ if self.server is not None:
+ self.repstr: str = "<{} #{} on {}>".format(
+ self.protocol.__class__.__name__,
+ self.sessionno,
+ self.server._realPortNumber,
+ )
+ self.startReading()
+ self.connected = 1
+
+ def __repr__(self) -> str:
+ """
+ A string representation of this connection.
+ """
+ return self.repstr
+
+ @classmethod
+ def _fromConnectedSocket(cls, fileDescriptor, addressFamily, factory, reactor):
+ """
+ Create a new L{Server} based on an existing connected I{SOCK_STREAM}
+ socket.
+
+ Arguments are the same as to L{Server.__init__}, except where noted.
+
+ @param fileDescriptor: An integer file descriptor associated with a
+ connected socket. The socket must be in non-blocking mode. Any
+ additional attributes desired, such as I{FD_CLOEXEC}, must also be
+ set already.
+
+ @param addressFamily: The address family (sometimes called I{domain})
+ of the existing socket. For example, L{socket.AF_INET}.
+
+ @return: A new instance of C{cls} wrapping the socket given by
+ C{fileDescriptor}.
+ """
+ addressType = address.IPv4Address
+ if addressFamily == socket.AF_INET6:
+ addressType = address.IPv6Address
+ skt = socket.fromfd(fileDescriptor, addressFamily, socket.SOCK_STREAM)
+ addr = _getpeername(skt)
+ protocolAddr = addressType("TCP", *addr)
+ localPort = skt.getsockname()[1]
+
+ protocol = factory.buildProtocol(protocolAddr)
+ if protocol is None:
+ skt.close()
+ return
+
+ self = cls(skt, protocol, addr, None, addr[1], reactor)
+ self.repstr = "<{} #{} on {}>".format(
+ self.protocol.__class__.__name__,
+ self.sessionno,
+ localPort,
+ )
+ protocol.makeConnection(self)
+ return self
+
+ def getHost(self):
+ """
+ Returns an L{IPv4Address} or L{IPv6Address}.
+
+ This indicates the server's address.
+ """
+ addr = _getsockname(self.socket)
+ return self._addressType("TCP", *addr)
+
+ def getPeer(self):
+ """
+ Returns an L{IPv4Address} or L{IPv6Address}.
+
+ This indicates the client's address.
+ """
+ return self._addressType("TCP", *self.client)
+
+
+class _IFileDescriptorReservation(Interface):
+ """
+ An open file that represents an emergency reservation in the
+ process' file descriptor table. If L{Port} encounters C{EMFILE}
+ on C{accept(2)}, it can close this file descriptor, retry the
+ C{accept} so that the incoming connection occupies this file
+ descriptor's space, and then close that connection and reopen this
+ one.
+
+ Calling L{_IFileDescriptorReservation.reserve} attempts to open
+ the reserve file descriptor if it is not already open.
+ L{_IFileDescriptorReservation.available} returns L{True} if the
+ underlying file is open and its descriptor claimed.
+
+ L{_IFileDescriptorReservation} instances are context managers;
+ entering them releases the underlying file descriptor, while
+ exiting them attempts to reacquire it. The block can take
+ advantage of the free slot in the process' file descriptor table
+ accept and close a client connection.
+
+ Because another thread might open a file descriptor between the
+ time the context manager is entered and the time C{accept} is
+ called, opening the reserve descriptor is best-effort only.
+ """
+
+ def available():
+ """
+ Is the reservation available?
+
+ @return: L{True} if the reserved file descriptor is open and
+ can thus be closed to allow a new file to be opened in its
+ place; L{False} if it is not open.
+ """
+
+ def reserve():
+ """
+ Attempt to open the reserved file descriptor; if this fails
+ because of C{EMFILE}, internal state is reset so that another
+ reservation attempt can be made.
+
+ @raises Exception: Any exception except an L{OSError} whose
+ errno is L{EMFILE}.
+ """
+
+ def __enter__():
+ """
+ Release the underlying file descriptor so that code within the
+ context manager can open a new file.
+ """
+
+ def __exit__(excType, excValue, traceback):
+ """
+ Attempt to re-open the reserved file descriptor. See
+ L{reserve} for caveats.
+
+ @param excType: See L{object.__exit__}
+ @param excValue: See L{object.__exit__}
+ @param traceback: See L{object.__exit__}
+ """
+
+
+class _HasClose(typing_extensions.Protocol):
+ def close(self) -> object:
+ ...
+
+
+@implementer(_IFileDescriptorReservation)
+@attr.s(auto_attribs=True)
+class _FileDescriptorReservation:
+ """
+ L{_IFileDescriptorReservation} implementation.
+
+ @ivar fileFactory: A factory that will be called to reserve a
+ file descriptor.
+ @type fileFactory: A L{callable} that accepts no arguments and
+ returns an object with a C{close} method.
+ """
+
+ _log: ClassVar[Logger] = Logger()
+
+ _fileFactory: Callable[[], _HasClose]
+ _fileDescriptor: Optional[_HasClose] = attr.ib(init=False, default=None)
+
+ def available(self):
+ """
+ See L{_IFileDescriptorReservation.available}.
+
+ @return: L{True} if the reserved file descriptor is open and
+ can thus be closed to allow a new file to be opened in its
+ place; L{False} if it is not open.
+ """
+ return self._fileDescriptor is not None
+
+ def reserve(self):
+ """
+ See L{_IFileDescriptorReservation.reserve}.
+ """
+ if self._fileDescriptor is None:
+ try:
+ fileDescriptor = self._fileFactory()
+ except OSError as e:
+ if e.errno == EMFILE:
+ self._log.failure(
+ "Could not reserve EMFILE recovery file descriptor."
+ )
+ else:
+ raise
+ else:
+ self._fileDescriptor = fileDescriptor
+
+ def __enter__(self):
+ """
+ See L{_IFileDescriptorReservation.__enter__}.
+ """
+ if self._fileDescriptor is None:
+ raise RuntimeError("No file reserved. Have you called my reserve method?")
+ self._fileDescriptor.close()
+ self._fileDescriptor = None
+
+ def __exit__(self, excType, excValue, traceback):
+ """
+ See L{_IFileDescriptorReservation.__exit__}.
+ """
+ try:
+ self.reserve()
+ except Exception:
+ self._log.failure("Could not re-reserve EMFILE recovery file descriptor.")
+
+
+@implementer(_IFileDescriptorReservation)
+class _NullFileDescriptorReservation:
+ """
+ A null implementation of L{_IFileDescriptorReservation}.
+ """
+
+ def available(self):
+ """
+ The reserved file is never available. See
+ L{_IFileDescriptorReservation.available}.
+
+ @return: L{False}
+ """
+ return False
+
+ def reserve(self):
+ """
+ Do nothing. See L{_IFileDescriptorReservation.reserve}.
+ """
+
+ def __enter__(self):
+ """
+ Do nothing. See L{_IFileDescriptorReservation.__enter__}
+
+ @return: L{False}
+ """
+
+ def __exit__(self, excType, excValue, traceback):
+ """
+ Do nothing. See L{_IFileDescriptorReservation.__exit__}.
+
+ @param excType: See L{object.__exit__}
+ @param excValue: See L{object.__exit__}
+ @param traceback: See L{object.__exit__}
+ """
+
+
+# Don't keep a reserve file descriptor for coping with file descriptor
+# exhaustion on Windows.
+
+# WSAEMFILE occurs when a process has run out of memory, not when a
+# specific limit has been reached. Windows sockets are handles, which
+# differ from UNIX's file descriptors in that they can refer to any
+# "named kernel object", including user interface resources like menu
+# and icons. The generality of handles results in a much higher limit
+# than UNIX imposes on file descriptors: a single Windows process can
+# allocate up to 16,777,216 handles. Because they're indexes into a
+# three level table whose upper two layers are allocated from
+# swappable pages, handles compete for heap space with other kernel
+# objects, not with each other. Closing a given socket handle may not
+# release enough memory to allow the process to make progress.
+#
+# This fundamental difference between file descriptors and handles
+# makes a reserve file descriptor useless on Windows. Note that other
+# event loops, such as libuv and libevent, also do not special case
+# WSAEMFILE.
+#
+# For an explanation of handles, see the "Object Manager"
+# (pp. 140-175) section of
+#
+# Windows Internals, Part 1: Covering Windows Server 2008 R2 and
+# Windows 7 (6th ed.)
+# Mark E. Russinovich, David A. Solomon, and Alex
+# Ionescu. 2012. Microsoft Press.
+if platformType == "win32":
+ _reservedFD = _NullFileDescriptorReservation()
+else:
+ _reservedFD = _FileDescriptorReservation(lambda: open(os.devnull)) # type: ignore[assignment]
+
+
+# Linux and other UNIX-like operating systems return EMFILE when a
+# process has reached its soft limit of file descriptors. *BSD and
+# Win32 raise (WSA)ENOBUFS when socket limits are reached. Linux can
+# give ENFILE if the system is out of inodes, or ENOMEM if there is
+# insufficient memory to allocate a new dentry. ECONNABORTED is
+# documented as possible on all relevant platforms (Linux, Windows,
+# macOS, and the BSDs) but occurs only on the BSDs. It occurs when a
+# client sends a FIN or RST after the server sends a SYN|ACK but
+# before application code calls accept(2). On Linux, calling
+# accept(2) on such a listener returns a connection that fails as
+# though the it were terminated after being fully established. This
+# appears to be an implementation choice (see inet_accept in
+# inet/ipv4/af_inet.c). On macOS, such a listener is not considered
+# readable, so accept(2) will never be called. Calling accept(2) on
+# such a listener, however, does not return at all.
+_ACCEPT_ERRORS = (EMFILE, ENOBUFS, ENFILE, ENOMEM, ECONNABORTED)
+
+
+@attr.s(auto_attribs=True)
+class _BuffersLogs:
+ """
+ A context manager that buffers any log events until after its
+ block exits.
+
+ @ivar _namespace: The namespace of the buffered events.
+ @type _namespace: L{str}.
+
+ @ivar _observer: The observer to which buffered log events will be
+ written
+ @type _observer: L{twisted.logger.ILogObserver}.
+ """
+
+ _namespace: str
+ _observer: ILogObserver
+ _logs: List[LogEvent] = attr.ib(default=attr.Factory(list))
+
+ def __enter__(self):
+ """
+ Enter a log buffering context.
+
+ @return: A logger that buffers log events.
+ @rtype: L{Logger}.
+ """
+ return Logger(namespace=self._namespace, observer=self._logs.append)
+
+ def __exit__(self, excValue, excType, traceback):
+ """
+ Exit a log buffering context and log all buffered events to
+ the provided observer.
+
+ @param excType: See L{object.__exit__}
+ @param excValue: See L{object.__exit__}
+ @param traceback: See L{object.__exit__}
+ """
+ for event in self._logs:
+ self._observer(event)
+
+
+def _accept(logger, accepts, listener, reservedFD):
+ """
+ Return a generator that yields client sockets from the provided
+ listening socket until there are none left or an unrecoverable
+ error occurs.
+
+ @param logger: A logger to which C{accept}-related events will be
+ logged. This should not log to arbitrary observers that might
+ open a file descriptor to avoid claiming the C{EMFILE} file
+ descriptor on UNIX-like systems.
+ @type logger: L{Logger}
+
+ @param accepts: An iterable iterated over to limit the number
+ consecutive C{accept}s.
+ @type accepts: An iterable.
+
+ @param listener: The listening socket.
+ @type listener: L{socket.socket}
+
+ @param reservedFD: A reserved file descriptor that can be used to
+ recover from C{EMFILE} on UNIX-like systems.
+ @type reservedFD: L{_IFileDescriptorReservation}
+
+ @return: A generator that yields C{(socket, addr)} tuples from
+ L{socket.socket.accept}
+ """
+ for _ in accepts:
+ try:
+ client, address = listener.accept()
+ except OSError as e:
+ if e.args[0] in (EWOULDBLOCK, EAGAIN):
+ # No more clients.
+ return
+ elif e.args[0] == EPERM:
+ # Netfilter on Linux may have rejected the
+ # connection, but we get told to try to accept()
+ # anyway.
+ continue
+ elif e.args[0] == EMFILE and reservedFD.available():
+ # Linux and other UNIX-like operating systems return
+ # EMFILE when a process has reached its soft limit of
+ # file descriptors. The reserved file descriptor is
+ # available, so it can be released to free up a
+ # descriptor for use by listener.accept()'s clients.
+ # Each client socket will be closed until the listener
+ # returns EAGAIN.
+ logger.info(
+ "EMFILE encountered;" " releasing reserved file descriptor."
+ )
+ # The following block should not run arbitrary code
+ # that might acquire its own file descriptor.
+ with reservedFD:
+ clientsToClose = _accept(logger, accepts, listener, reservedFD)
+ for clientToClose, closedAddress in clientsToClose:
+ clientToClose.close()
+ logger.info(
+ "EMFILE recovery:" " Closed socket from {address}",
+ address=closedAddress,
+ )
+ logger.info("Re-reserving EMFILE recovery file descriptor.")
+ return
+ elif e.args[0] in _ACCEPT_ERRORS:
+ logger.info(
+ "Could not accept new connection ({acceptError})",
+ acceptError=errorcode[e.args[0]],
+ )
+ return
+ else:
+ raise
+ else:
+ yield client, address
+
+
+@implementer(IListeningPort)
+class Port(base.BasePort, _SocketCloser):
+ """
+ A TCP server port, listening for connections.
+
+ When a connection is accepted, this will call a factory's buildProtocol
+ with the incoming address as an argument, according to the specification
+ described in L{twisted.internet.interfaces.IProtocolFactory}.
+
+ If you wish to change the sort of transport that will be used, the
+ C{transport} attribute will be called with the signature expected for
+ C{Server.__init__}, so it can be replaced.
+
+ @ivar deferred: a deferred created when L{stopListening} is called, and
+ that will fire when connection is lost. This is not to be used it
+ directly: prefer the deferred returned by L{stopListening} instead.
+ @type deferred: L{defer.Deferred}
+
+ @ivar disconnecting: flag indicating that the L{stopListening} method has
+ been called and that no connections should be accepted anymore.
+ @type disconnecting: C{bool}
+
+ @ivar connected: flag set once the listen has successfully been called on
+ the socket.
+ @type connected: C{bool}
+
+ @ivar _type: A string describing the connections which will be created by
+ this port. Normally this is C{"TCP"}, since this is a TCP port, but
+ when the TLS implementation re-uses this class it overrides the value
+ with C{"TLS"}. Only used for logging.
+
+ @ivar _preexistingSocket: If not L{None}, a L{socket.socket} instance which
+ was created and initialized outside of the reactor and will be used to
+ listen for connections (instead of a new socket being created by this
+ L{Port}).
+ """
+
+ socketType = socket.SOCK_STREAM
+
+ transport = Server
+ sessionno = 0
+ interface = ""
+ backlog = 50
+
+ _type = "TCP"
+
+ # Actual port number being listened on, only set to a non-None
+ # value when we are actually listening.
+ _realPortNumber: Optional[int] = None
+
+ # An externally initialized socket that we will use, rather than creating
+ # our own.
+ _preexistingSocket = None
+
+ addressFamily = socket.AF_INET
+ _addressType = address.IPv4Address
+ _logger = Logger()
+
+ def __init__(self, port, factory, backlog=50, interface="", reactor=None):
+ """Initialize with a numeric port to listen on."""
+ base.BasePort.__init__(self, reactor=reactor)
+ self.port = port
+ self.factory = factory
+ self.backlog = backlog
+ if abstract.isIPv6Address(interface):
+ self.addressFamily = socket.AF_INET6
+ self._addressType = address.IPv6Address
+ self.interface = interface
+
+ @classmethod
+ def _fromListeningDescriptor(cls, reactor, fd, addressFamily, factory):
+ """
+ Create a new L{Port} based on an existing listening I{SOCK_STREAM}
+ socket.
+
+ Arguments are the same as to L{Port.__init__}, except where noted.
+
+ @param fd: An integer file descriptor associated with a listening
+ socket. The socket must be in non-blocking mode. Any additional
+ attributes desired, such as I{FD_CLOEXEC}, must also be set already.
+
+ @param addressFamily: The address family (sometimes called I{domain}) of
+ the existing socket. For example, L{socket.AF_INET}.
+
+ @return: A new instance of C{cls} wrapping the socket given by C{fd}.
+ """
+ port = socket.fromfd(fd, addressFamily, cls.socketType)
+ interface = _getsockname(port)[0]
+ self = cls(None, factory, None, interface, reactor)
+ self._preexistingSocket = port
+ return self
+
+ def __repr__(self) -> str:
+ if self._realPortNumber is not None:
+ return "<{} of {} on {}>".format(
+ self.__class__,
+ self.factory.__class__,
+ self._realPortNumber,
+ )
+ else:
+ return "<{} of {} (not listening)>".format(
+ self.__class__,
+ self.factory.__class__,
+ )
+
+ def createInternetSocket(self):
+ s = base.BasePort.createInternetSocket(self)
+ if platformType == "posix" and sys.platform != "cygwin":
+ s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+ return s
+
+ def startListening(self):
+ """Create and bind my socket, and begin listening on it.
+
+ This is called on unserialization, and must be called after creating a
+ server to begin listening on the specified port.
+ """
+ _reservedFD.reserve()
+ if self._preexistingSocket is None:
+ # Create a new socket and make it listen
+ try:
+ skt = self.createInternetSocket()
+ if self.addressFamily == socket.AF_INET6:
+ addr = _resolveIPv6(self.interface, self.port)
+ else:
+ addr = (self.interface, self.port)
+ skt.bind(addr)
+ except OSError as le:
+ raise CannotListenError(self.interface, self.port, le)
+ skt.listen(self.backlog)
+ else:
+ # Re-use the externally specified socket
+ skt = self._preexistingSocket
+ self._preexistingSocket = None
+ # Avoid shutting it down at the end.
+ self._shouldShutdown = False
+
+ # Make sure that if we listened on port 0, we update that to
+ # reflect what the OS actually assigned us.
+ self._realPortNumber = skt.getsockname()[1]
+
+ log.msg(
+ "%s starting on %s"
+ % (self._getLogPrefix(self.factory), self._realPortNumber)
+ )
+
+ # The order of the next 5 lines is kind of bizarre. If no one
+ # can explain it, perhaps we should re-arrange them.
+ self.factory.doStart()
+ self.connected = True
+ self.socket = skt
+ self.fileno = self.socket.fileno
+ self.numberAccepts = 100
+
+ self.startReading()
+
+ def _buildAddr(self, address):
+ return self._addressType("TCP", *address)
+
+ def doRead(self):
+ """
+ Called when my socket is ready for reading.
+
+ This accepts a connection and calls self.protocol() to handle the
+ wire-level protocol.
+ """
+ try:
+ if platformType == "posix":
+ numAccepts = self.numberAccepts
+ else:
+ # win32 event loop breaks if we do more than one accept()
+ # in an iteration of the event loop.
+ numAccepts = 1
+
+ with _BuffersLogs(
+ self._logger.namespace, self._logger.observer
+ ) as bufferingLogger:
+ accepted = 0
+ clients = _accept(
+ bufferingLogger, range(numAccepts), self.socket, _reservedFD
+ )
+
+ for accepted, (skt, addr) in enumerate(clients, 1):
+ fdesc._setCloseOnExec(skt.fileno())
+
+ if len(addr) == 4:
+ # IPv6, make sure we get the scopeID if it
+ # exists
+ host = socket.getnameinfo(
+ addr, socket.NI_NUMERICHOST | socket.NI_NUMERICSERV
+ )
+ addr = tuple([host[0]] + list(addr[1:]))
+
+ protocol = self.factory.buildProtocol(self._buildAddr(addr))
+ if protocol is None:
+ skt.close()
+ continue
+ s = self.sessionno
+ self.sessionno = s + 1
+ transport = self.transport(
+ skt, protocol, addr, self, s, self.reactor
+ )
+ protocol.makeConnection(transport)
+
+ # Scale our synchronous accept loop according to traffic
+ # Reaching our limit on consecutive accept calls indicates
+ # there might be still more clients to serve the next time
+ # the reactor calls us. Prepare to accept some more.
+ if accepted == self.numberAccepts:
+ self.numberAccepts += 20
+ # Otherwise, don't attempt to accept any more clients than
+ # we just accepted or any less than 1.
+ else:
+ self.numberAccepts = max(1, accepted)
+ except BaseException:
+ # Note that in TLS mode, this will possibly catch SSL.Errors
+ # raised by self.socket.accept()
+ #
+ # There is no "except SSL.Error:" above because SSL may be
+ # None if there is no SSL support. In any case, all the
+ # "except SSL.Error:" suite would probably do is log.deferr()
+ # and return, so handling it here works just as well.
+ log.deferr()
+
+ def loseConnection(self, connDone=failure.Failure(main.CONNECTION_DONE)):
+ """
+ Stop accepting connections on this port.
+
+ This will shut down the socket and call self.connectionLost(). It
+ returns a deferred which will fire successfully when the port is
+ actually closed, or with a failure if an error occurs shutting down.
+ """
+ self.disconnecting = True
+ self.stopReading()
+ if self.connected:
+ self.deferred = deferLater(self.reactor, 0, self.connectionLost, connDone)
+ return self.deferred
+
+ stopListening = loseConnection
+
+ def _logConnectionLostMsg(self):
+ """
+ Log message for closing port
+ """
+ log.msg(f"({self._type} Port {self._realPortNumber} Closed)")
+
+ def connectionLost(self, reason):
+ """
+ Cleans up the socket.
+ """
+ self._logConnectionLostMsg()
+ self._realPortNumber = None
+
+ base.BasePort.connectionLost(self, reason)
+ self.connected = False
+ self._closeSocket(True)
+ del self.socket
+ del self.fileno
+
+ try:
+ self.factory.doStop()
+ finally:
+ self.disconnecting = False
+
+ def logPrefix(self):
+ """Returns the name of my class, to prefix log entries with."""
+ return reflect.qual(self.factory.__class__)
+
+ def getHost(self):
+ """
+ Return an L{IPv4Address} or L{IPv6Address} indicating the listening
+ address of this port.
+ """
+ addr = _getsockname(self.socket)
+ return self._addressType("TCP", *addr)
+
+
+class Connector(base.BaseConnector):
+ """
+ A L{Connector} provides of L{twisted.internet.interfaces.IConnector} for
+ all POSIX-style reactors.
+
+ @ivar _addressType: the type returned by L{Connector.getDestination}.
+ Either L{IPv4Address} or L{IPv6Address}, depending on the type of
+ address.
+ @type _addressType: C{type}
+ """
+
+ _addressType = address.IPv4Address
+
+ def __init__(self, host, port, factory, timeout, bindAddress, reactor=None):
+ if isinstance(port, str):
+ try:
+ port = socket.getservbyname(port, "tcp")
+ except OSError as e:
+ raise error.ServiceNameUnknownError(string=f"{e} ({port!r})")
+ self.host, self.port = host, port
+ if abstract.isIPv6Address(host):
+ self._addressType = address.IPv6Address
+ self.bindAddress = bindAddress
+ base.BaseConnector.__init__(self, factory, timeout, reactor)
+
+ def _makeTransport(self):
+ """
+ Create a L{Client} bound to this L{Connector}.
+
+ @return: a new L{Client}
+ @rtype: L{Client}
+ """
+ return Client(self.host, self.port, self.bindAddress, self, self.reactor)
+
+ def getDestination(self):
+ """
+ @see: L{twisted.internet.interfaces.IConnector.getDestination}.
+ """
+ return self._addressType("TCP", self.host, self.port)
diff --git a/contrib/python/Twisted/py3/twisted/internet/testing.py b/contrib/python/Twisted/py3/twisted/internet/testing.py
new file mode 100644
index 0000000000..2c37249584
--- /dev/null
+++ b/contrib/python/Twisted/py3/twisted/internet/testing.py
@@ -0,0 +1,969 @@
+# -*- test-case-name: twisted.internet.test.test_testing -*-
+# Copyright (c) Twisted Matrix Laboratories.
+# See LICENSE for details.
+
+"""
+Assorted functionality which is commonly useful when writing unit tests.
+"""
+from __future__ import annotations
+
+from io import BytesIO
+from socket import AF_INET, AF_INET6
+from typing import Callable, Iterator, Sequence, overload
+
+from zope.interface import implementedBy, implementer
+from zope.interface.verify import verifyClass
+
+from typing_extensions import ParamSpec, Self
+
+from twisted.internet import address, error, protocol, task
+from twisted.internet.abstract import _dataMustBeBytes, isIPv6Address
+from twisted.internet.address import IPv4Address, IPv6Address, UNIXAddress
+from twisted.internet.defer import Deferred
+from twisted.internet.error import UnsupportedAddressFamily
+from twisted.internet.interfaces import (
+ IConnector,
+ IConsumer,
+ IListeningPort,
+ IProtocol,
+ IPushProducer,
+ IReactorCore,
+ IReactorFDSet,
+ IReactorSocket,
+ IReactorSSL,
+ IReactorTCP,
+ IReactorUNIX,
+ ITransport,
+)
+from twisted.internet.task import Clock
+from twisted.logger import ILogObserver, LogEvent, LogPublisher
+from twisted.protocols import basic
+from twisted.python import failure
+from twisted.trial.unittest import TestCase
+
+__all__ = [
+ "AccumulatingProtocol",
+ "LineSendingProtocol",
+ "FakeDatagramTransport",
+ "StringTransport",
+ "StringTransportWithDisconnection",
+ "StringIOWithoutClosing",
+ "_FakeConnector",
+ "_FakePort",
+ "MemoryReactor",
+ "MemoryReactorClock",
+ "RaisingMemoryReactor",
+ "NonStreamingProducer",
+ "waitUntilAllDisconnected",
+ "EventLoggingObserver",
+]
+
+_P = ParamSpec("_P")
+
+
+class AccumulatingProtocol(protocol.Protocol):
+ """
+ L{AccumulatingProtocol} is an L{IProtocol} implementation which collects
+ the data delivered to it and can fire a Deferred when it is connected or
+ disconnected.
+
+ @ivar made: A flag indicating whether C{connectionMade} has been called.
+ @ivar data: Bytes giving all the data passed to C{dataReceived}.
+ @ivar closed: A flag indicated whether C{connectionLost} has been called.
+ @ivar closedReason: The value of the I{reason} parameter passed to
+ C{connectionLost}.
+ @ivar closedDeferred: If set to a L{Deferred}, this will be fired when
+ C{connectionLost} is called.
+ """
+
+ made = closed = 0
+ closedReason = None
+
+ closedDeferred = None
+
+ data = b""
+
+ factory = None
+
+ def connectionMade(self):
+ self.made = 1
+ if self.factory is not None and self.factory.protocolConnectionMade is not None:
+ d = self.factory.protocolConnectionMade
+ self.factory.protocolConnectionMade = None
+ d.callback(self)
+
+ def dataReceived(self, data):
+ self.data += data
+
+ def connectionLost(self, reason):
+ self.closed = 1
+ self.closedReason = reason
+ if self.closedDeferred is not None:
+ d, self.closedDeferred = self.closedDeferred, None
+ d.callback(None)
+
+
+class LineSendingProtocol(basic.LineReceiver):
+ lostConn = False
+
+ def __init__(self, lines, start=True):
+ self.lines = lines[:]
+ self.response = []
+ self.start = start
+
+ def connectionMade(self):
+ if self.start:
+ for line in self.lines:
+ self.sendLine(line)
+
+ def lineReceived(self, line):
+ if not self.start:
+ for line in self.lines:
+ self.sendLine(line)
+ self.lines = []
+ self.response.append(line)
+
+ def connectionLost(self, reason):
+ self.lostConn = True
+
+
+class FakeDatagramTransport:
+ noAddr = object()
+
+ def __init__(self):
+ self.written = []
+
+ def write(self, packet, addr=noAddr):
+ self.written.append((packet, addr))
+
+
+@implementer(ITransport, IConsumer, IPushProducer)
+class StringTransport:
+ """
+ A transport implementation which buffers data in memory and keeps track of
+ its other state without providing any behavior.
+
+ L{StringTransport} has a number of attributes which are not part of any of
+ the interfaces it claims to implement. These attributes are provided for
+ testing purposes. Implementation code should not use any of these
+ attributes; they are not provided by other transports.
+
+ @ivar disconnecting: A C{bool} which is C{False} until L{loseConnection} is
+ called, then C{True}.
+
+ @ivar disconnected: A C{bool} which is C{False} until L{abortConnection} is
+ called, then C{True}.
+
+ @ivar producer: If a producer is currently registered, C{producer} is a
+ reference to it. Otherwise, L{None}.
+
+ @ivar streaming: If a producer is currently registered, C{streaming} refers
+ to the value of the second parameter passed to C{registerProducer}.
+
+ @ivar hostAddr: L{None} or an object which will be returned as the host
+ address of this transport. If L{None}, a nasty tuple will be returned
+ instead.
+
+ @ivar peerAddr: L{None} or an object which will be returned as the peer
+ address of this transport. If L{None}, a nasty tuple will be returned
+ instead.
+
+ @ivar producerState: The state of this L{StringTransport} in its capacity
+ as an L{IPushProducer}. One of C{'producing'}, C{'paused'}, or
+ C{'stopped'}.
+
+ @ivar io: A L{io.BytesIO} which holds the data which has been written to
+ this transport since the last call to L{clear}. Use L{value} instead
+ of accessing this directly.
+
+ @ivar _lenient: By default L{StringTransport} enforces that
+ L{resumeProducing} is not called after the connection is lost. This is
+ to ensure that any code that does call L{resumeProducing} after the
+ connection is lost is not blindly expecting L{resumeProducing} to have
+ any impact.
+
+ However, if your test case is calling L{resumeProducing} after
+ connection close on purpose, and you know it won't block expecting
+ further data to show up, this flag may safely be set to L{True}.
+
+ Defaults to L{False}.
+ @type lenient: L{bool}
+ """
+
+ disconnecting = False
+ disconnected = False
+
+ producer = None
+ streaming = None
+
+ hostAddr = None
+ peerAddr = None
+
+ producerState = "producing"
+
+ def __init__(self, hostAddress=None, peerAddress=None, lenient=False):
+ self.clear()
+ if hostAddress is not None:
+ self.hostAddr = hostAddress
+ if peerAddress is not None:
+ self.peerAddr = peerAddress
+ self.connected = True
+ self._lenient = lenient
+
+ def clear(self):
+ """
+ Discard all data written to this transport so far.
+
+ This is not a transport method. It is intended for tests. Do not use
+ it in implementation code.
+ """
+ self.io = BytesIO()
+
+ def value(self):
+ """
+ Retrieve all data which has been buffered by this transport.
+
+ This is not a transport method. It is intended for tests. Do not use
+ it in implementation code.
+
+ @return: A C{bytes} giving all data written to this transport since the
+ last call to L{clear}.
+ @rtype: C{bytes}
+ """
+ return self.io.getvalue()
+
+ # ITransport
+ def write(self, data):
+ _dataMustBeBytes(data)
+ self.io.write(data)
+
+ def writeSequence(self, data):
+ self.io.write(b"".join(data))
+
+ def loseConnection(self):
+ """
+ Close the connection. Does nothing besides toggle the C{disconnecting}
+ instance variable to C{True}.
+ """
+ self.disconnecting = True
+
+ def abortConnection(self):
+ """
+ Abort the connection. Same as C{loseConnection}, but also toggles the
+ C{aborted} instance variable to C{True}.
+ """
+ self.disconnected = True
+ self.loseConnection()
+
+ def getPeer(self):
+ if self.peerAddr is None:
+ return address.IPv4Address("TCP", "192.168.1.1", 54321)
+ return self.peerAddr
+
+ def getHost(self):
+ if self.hostAddr is None:
+ return address.IPv4Address("TCP", "10.0.0.1", 12345)
+ return self.hostAddr
+
+ # IConsumer
+ def registerProducer(self, producer, streaming):
+ if self.producer is not None:
+ raise RuntimeError("Cannot register two producers")
+ self.producer = producer
+ self.streaming = streaming
+
+ def unregisterProducer(self):
+ if self.producer is None:
+ raise RuntimeError("Cannot unregister a producer unless one is registered")
+ self.producer = None
+ self.streaming = None
+
+ # IPushProducer
+ def _checkState(self):
+ if self.disconnecting and not self._lenient:
+ raise RuntimeError("Cannot resume producing after loseConnection")
+ if self.producerState == "stopped":
+ raise RuntimeError("Cannot resume a stopped producer")
+
+ def pauseProducing(self):
+ self._checkState()
+ self.producerState = "paused"
+
+ def stopProducing(self):
+ self.producerState = "stopped"
+
+ def resumeProducing(self):
+ self._checkState()
+ self.producerState = "producing"
+
+
+class StringTransportWithDisconnection(StringTransport):
+ """
+ A L{StringTransport} which on disconnection will trigger the connection
+ lost on the attached protocol.
+ """
+
+ protocol: IProtocol
+
+ def loseConnection(self):
+ if self.connected:
+ self.connected = False
+ self.protocol.connectionLost(failure.Failure(error.ConnectionDone("Bye.")))
+
+
+class StringIOWithoutClosing(BytesIO):
+ """
+ A BytesIO that can't be closed.
+ """
+
+ def close(self):
+ """
+ Do nothing.
+ """
+
+
+@implementer(IListeningPort)
+class _FakePort:
+ """
+ A fake L{IListeningPort} to be used in tests.
+
+ @ivar _hostAddress: The L{IAddress} this L{IListeningPort} is pretending
+ to be listening on.
+ """
+
+ def __init__(self, hostAddress):
+ """
+ @param hostAddress: An L{IAddress} this L{IListeningPort} should
+ pretend to be listening on.
+ """
+ self._hostAddress = hostAddress
+
+ def startListening(self):
+ """
+ Fake L{IListeningPort.startListening} that doesn't do anything.
+ """
+
+ def stopListening(self):
+ """
+ Fake L{IListeningPort.stopListening} that doesn't do anything.
+ """
+
+ def getHost(self):
+ """
+ Fake L{IListeningPort.getHost} that returns our L{IAddress}.
+ """
+ return self._hostAddress
+
+
+@implementer(IConnector)
+class _FakeConnector:
+ """
+ A fake L{IConnector} that allows us to inspect if it has been told to stop
+ connecting.
+
+ @ivar stoppedConnecting: has this connector's
+ L{_FakeConnector.stopConnecting} method been invoked yet?
+
+ @ivar _address: An L{IAddress} provider that represents our destination.
+ """
+
+ _disconnected = False
+ stoppedConnecting = False
+
+ def __init__(self, address):
+ """
+ @param address: An L{IAddress} provider that represents this
+ connector's destination.
+ """
+ self._address = address
+
+ def stopConnecting(self):
+ """
+ Implement L{IConnector.stopConnecting} and set
+ L{_FakeConnector.stoppedConnecting} to C{True}
+ """
+ self.stoppedConnecting = True
+
+ def disconnect(self):
+ """
+ Implement L{IConnector.disconnect} as a no-op.
+ """
+ self._disconnected = True
+
+ def connect(self):
+ """
+ Implement L{IConnector.connect} as a no-op.
+ """
+
+ def getDestination(self):
+ """
+ Implement L{IConnector.getDestination} to return the C{address} passed
+ to C{__init__}.
+ """
+ return self._address
+
+
+@implementer(
+ IReactorCore, IReactorTCP, IReactorSSL, IReactorUNIX, IReactorSocket, IReactorFDSet
+)
+class MemoryReactor:
+ """
+ A fake reactor to be used in tests. This reactor doesn't actually do
+ much that's useful yet. It accepts TCP connection setup attempts, but
+ they will never succeed.
+
+ @ivar hasInstalled: Keeps track of whether this reactor has been installed.
+ @type hasInstalled: L{bool}
+
+ @ivar running: Keeps track of whether this reactor is running.
+ @type running: L{bool}
+
+ @ivar hasStopped: Keeps track of whether this reactor has been stopped.
+ @type hasStopped: L{bool}
+
+ @ivar hasCrashed: Keeps track of whether this reactor has crashed.
+ @type hasCrashed: L{bool}
+
+ @ivar whenRunningHooks: Keeps track of hooks registered with
+ C{callWhenRunning}.
+ @type whenRunningHooks: L{list}
+
+ @ivar triggers: Keeps track of hooks registered with
+ C{addSystemEventTrigger}.
+ @type triggers: L{dict}
+
+ @ivar tcpClients: Keeps track of connection attempts (ie, calls to
+ C{connectTCP}).
+ @type tcpClients: L{list}
+
+ @ivar tcpServers: Keeps track of server listen attempts (ie, calls to
+ C{listenTCP}).
+ @type tcpServers: L{list}
+
+ @ivar sslClients: Keeps track of connection attempts (ie, calls to
+ C{connectSSL}).
+ @type sslClients: L{list}
+
+ @ivar sslServers: Keeps track of server listen attempts (ie, calls to
+ C{listenSSL}).
+ @type sslServers: L{list}
+
+ @ivar unixClients: Keeps track of connection attempts (ie, calls to
+ C{connectUNIX}).
+ @type unixClients: L{list}
+
+ @ivar unixServers: Keeps track of server listen attempts (ie, calls to
+ C{listenUNIX}).
+ @type unixServers: L{list}
+
+ @ivar adoptedPorts: Keeps track of server listen attempts (ie, calls to
+ C{adoptStreamPort}).
+
+ @ivar adoptedStreamConnections: Keeps track of stream-oriented
+ connections added using C{adoptStreamConnection}.
+ """
+
+ def __init__(self):
+ """
+ Initialize the tracking lists.
+ """
+ self.hasInstalled = False
+
+ self.running = False
+ self.hasRun = True
+ self.hasStopped = True
+ self.hasCrashed = True
+
+ self.whenRunningHooks = []
+ self.triggers = {}
+
+ self.tcpClients = []
+ self.tcpServers = []
+ self.sslClients = []
+ self.sslServers = []
+ self.unixClients = []
+ self.unixServers = []
+ self.adoptedPorts = []
+ self.adoptedStreamConnections = []
+ self.connectors = []
+
+ self.readers = set()
+ self.writers = set()
+
+ def install(self):
+ """
+ Fake install callable to emulate reactor module installation.
+ """
+ self.hasInstalled = True
+
+ def resolve(self, name, timeout=10):
+ """
+ Not implemented; raises L{NotImplementedError}.
+ """
+ raise NotImplementedError()
+
+ def run(self):
+ """
+ Fake L{IReactorCore.run}.
+ Sets C{self.running} to L{True}, runs all of the hooks passed to
+ C{self.callWhenRunning}, then calls C{self.stop} to simulate a request
+ to stop the reactor.
+ Sets C{self.hasRun} to L{True}.
+ """
+ assert self.running is False
+ self.running = True
+ self.hasRun = True
+
+ for f, args, kwargs in self.whenRunningHooks:
+ f(*args, **kwargs)
+
+ self.stop()
+ # That we stopped means we can return, phew.
+
+ def stop(self):
+ """
+ Fake L{IReactorCore.run}.
+ Sets C{self.running} to L{False}.
+ Sets C{self.hasStopped} to L{True}.
+ """
+ self.running = False
+ self.hasStopped = True
+
+ def crash(self):
+ """
+ Fake L{IReactorCore.crash}.
+ Sets C{self.running} to L{None}, because that feels crashy.
+ Sets C{self.hasCrashed} to L{True}.
+ """
+ self.running = None
+ self.hasCrashed = True
+
+ def iterate(self, delay=0):
+ """
+ Not implemented; raises L{NotImplementedError}.
+ """
+ raise NotImplementedError()
+
+ def fireSystemEvent(self, eventType):
+ """
+ Not implemented; raises L{NotImplementedError}.
+ """
+ raise NotImplementedError()
+
+ def addSystemEventTrigger(
+ self,
+ phase: str,
+ eventType: str,
+ callable: Callable[_P, object],
+ *args: _P.args,
+ **kw: _P.kwargs,
+ ) -> None:
+ """
+ Fake L{IReactorCore.run}.
+ Keep track of trigger by appending it to
+ self.triggers[phase][eventType].
+ """
+ phaseTriggers = self.triggers.setdefault(phase, {})
+ eventTypeTriggers = phaseTriggers.setdefault(eventType, [])
+ eventTypeTriggers.append((callable, args, kw))
+
+ def removeSystemEventTrigger(self, triggerID):
+ """
+ Not implemented; raises L{NotImplementedError}.
+ """
+ raise NotImplementedError()
+
+ def callWhenRunning(
+ self, callable: Callable[_P, object], *args: _P.args, **kw: _P.kwargs
+ ) -> None:
+ """
+ Fake L{IReactorCore.callWhenRunning}.
+ Keeps a list of invocations to make in C{self.whenRunningHooks}.
+ """
+ self.whenRunningHooks.append((callable, args, kw))
+
+ def adoptStreamPort(self, fileno, addressFamily, factory):
+ """
+ Fake L{IReactorSocket.adoptStreamPort}, that logs the call and returns
+ an L{IListeningPort}.
+ """
+ if addressFamily == AF_INET:
+ addr = IPv4Address("TCP", "0.0.0.0", 1234)
+ elif addressFamily == AF_INET6:
+ addr = IPv6Address("TCP", "::", 1234)
+ else:
+ raise UnsupportedAddressFamily()
+
+ self.adoptedPorts.append((fileno, addressFamily, factory))
+ return _FakePort(addr)
+
+ def adoptStreamConnection(self, fileDescriptor, addressFamily, factory):
+ """
+ Record the given stream connection in C{adoptedStreamConnections}.
+
+ @see:
+ L{twisted.internet.interfaces.IReactorSocket.adoptStreamConnection}
+ """
+ self.adoptedStreamConnections.append((fileDescriptor, addressFamily, factory))
+
+ def adoptDatagramPort(self, fileno, addressFamily, protocol, maxPacketSize=8192):
+ """
+ Fake L{IReactorSocket.adoptDatagramPort}, that logs the call and
+ returns a fake L{IListeningPort}.
+
+ @see: L{twisted.internet.interfaces.IReactorSocket.adoptDatagramPort}
+ """
+ if addressFamily == AF_INET:
+ addr = IPv4Address("UDP", "0.0.0.0", 1234)
+ elif addressFamily == AF_INET6:
+ addr = IPv6Address("UDP", "::", 1234)
+ else:
+ raise UnsupportedAddressFamily()
+
+ self.adoptedPorts.append((fileno, addressFamily, protocol, maxPacketSize))
+ return _FakePort(addr)
+
+ def listenTCP(self, port, factory, backlog=50, interface=""):
+ """
+ Fake L{IReactorTCP.listenTCP}, that logs the call and
+ returns an L{IListeningPort}.
+ """
+ self.tcpServers.append((port, factory, backlog, interface))
+ if isIPv6Address(interface):
+ address = IPv6Address("TCP", interface, port)
+ else:
+ address = IPv4Address("TCP", "0.0.0.0", port)
+ return _FakePort(address)
+
+ def connectTCP(self, host, port, factory, timeout=30, bindAddress=None):
+ """
+ Fake L{IReactorTCP.connectTCP}, that logs the call and
+ returns an L{IConnector}.
+ """
+ self.tcpClients.append((host, port, factory, timeout, bindAddress))
+ if isIPv6Address(host):
+ conn = _FakeConnector(IPv6Address("TCP", host, port))
+ else:
+ conn = _FakeConnector(IPv4Address("TCP", host, port))
+ factory.startedConnecting(conn)
+ self.connectors.append(conn)
+ return conn
+
+ def listenSSL(self, port, factory, contextFactory, backlog=50, interface=""):
+ """
+ Fake L{IReactorSSL.listenSSL}, that logs the call and
+ returns an L{IListeningPort}.
+ """
+ self.sslServers.append((port, factory, contextFactory, backlog, interface))
+ return _FakePort(IPv4Address("TCP", "0.0.0.0", port))
+
+ def connectSSL(
+ self, host, port, factory, contextFactory, timeout=30, bindAddress=None
+ ):
+ """
+ Fake L{IReactorSSL.connectSSL}, that logs the call and returns an
+ L{IConnector}.
+ """
+ self.sslClients.append(
+ (host, port, factory, contextFactory, timeout, bindAddress)
+ )
+ conn = _FakeConnector(IPv4Address("TCP", host, port))
+ factory.startedConnecting(conn)
+ self.connectors.append(conn)
+ return conn
+
+ def listenUNIX(self, address, factory, backlog=50, mode=0o666, wantPID=0):
+ """
+ Fake L{IReactorUNIX.listenUNIX}, that logs the call and returns an
+ L{IListeningPort}.
+ """
+ self.unixServers.append((address, factory, backlog, mode, wantPID))
+ return _FakePort(UNIXAddress(address))
+
+ def connectUNIX(self, address, factory, timeout=30, checkPID=0):
+ """
+ Fake L{IReactorUNIX.connectUNIX}, that logs the call and returns an
+ L{IConnector}.
+ """
+ self.unixClients.append((address, factory, timeout, checkPID))
+ conn = _FakeConnector(UNIXAddress(address))
+ factory.startedConnecting(conn)
+ self.connectors.append(conn)
+ return conn
+
+ def addReader(self, reader):
+ """
+ Fake L{IReactorFDSet.addReader} which adds the reader to a local set.
+ """
+ self.readers.add(reader)
+
+ def removeReader(self, reader):
+ """
+ Fake L{IReactorFDSet.removeReader} which removes the reader from a
+ local set.
+ """
+ self.readers.discard(reader)
+
+ def addWriter(self, writer):
+ """
+ Fake L{IReactorFDSet.addWriter} which adds the writer to a local set.
+ """
+ self.writers.add(writer)
+
+ def removeWriter(self, writer):
+ """
+ Fake L{IReactorFDSet.removeWriter} which removes the writer from a
+ local set.
+ """
+ self.writers.discard(writer)
+
+ def getReaders(self):
+ """
+ Fake L{IReactorFDSet.getReaders} which returns a list of readers from
+ the local set.
+ """
+ return list(self.readers)
+
+ def getWriters(self):
+ """
+ Fake L{IReactorFDSet.getWriters} which returns a list of writers from
+ the local set.
+ """
+ return list(self.writers)
+
+ def removeAll(self):
+ """
+ Fake L{IReactorFDSet.removeAll} which removed all readers and writers
+ from the local sets.
+ """
+ self.readers.clear()
+ self.writers.clear()
+
+
+for iface in implementedBy(MemoryReactor):
+ verifyClass(iface, MemoryReactor)
+
+
+class MemoryReactorClock(MemoryReactor, Clock):
+ def __init__(self):
+ MemoryReactor.__init__(self)
+ Clock.__init__(self)
+
+
+@implementer(IReactorTCP, IReactorSSL, IReactorUNIX, IReactorSocket)
+class RaisingMemoryReactor:
+ """
+ A fake reactor to be used in tests. It accepts TCP connection setup
+ attempts, but they will fail.
+
+ @ivar _listenException: An instance of an L{Exception}
+ @ivar _connectException: An instance of an L{Exception}
+ """
+
+ def __init__(self, listenException=None, connectException=None):
+ """
+ @param listenException: An instance of an L{Exception} to raise
+ when any C{listen} method is called.
+
+ @param connectException: An instance of an L{Exception} to raise
+ when any C{connect} method is called.
+ """
+ self._listenException = listenException
+ self._connectException = connectException
+
+ def adoptStreamPort(self, fileno, addressFamily, factory):
+ """
+ Fake L{IReactorSocket.adoptStreamPort}, that raises
+ L{_listenException}.
+ """
+ raise self._listenException
+
+ def listenTCP(self, port, factory, backlog=50, interface=""):
+ """
+ Fake L{IReactorTCP.listenTCP}, that raises L{_listenException}.
+ """
+ raise self._listenException
+
+ def connectTCP(self, host, port, factory, timeout=30, bindAddress=None):
+ """
+ Fake L{IReactorTCP.connectTCP}, that raises L{_connectException}.
+ """
+ raise self._connectException
+
+ def listenSSL(self, port, factory, contextFactory, backlog=50, interface=""):
+ """
+ Fake L{IReactorSSL.listenSSL}, that raises L{_listenException}.
+ """
+ raise self._listenException
+
+ def connectSSL(
+ self, host, port, factory, contextFactory, timeout=30, bindAddress=None
+ ):
+ """
+ Fake L{IReactorSSL.connectSSL}, that raises L{_connectException}.
+ """
+ raise self._connectException
+
+ def listenUNIX(self, address, factory, backlog=50, mode=0o666, wantPID=0):
+ """
+ Fake L{IReactorUNIX.listenUNIX}, that raises L{_listenException}.
+ """
+ raise self._listenException
+
+ def connectUNIX(self, address, factory, timeout=30, checkPID=0):
+ """
+ Fake L{IReactorUNIX.connectUNIX}, that raises L{_connectException}.
+ """
+ raise self._connectException
+
+ def adoptDatagramPort(self, fileDescriptor, addressFamily, protocol, maxPacketSize):
+ """
+ Fake L{IReactorSocket.adoptDatagramPort}, that raises
+ L{_connectException}.
+ """
+ raise self._connectException
+
+ def adoptStreamConnection(self, fileDescriptor, addressFamily, factory):
+ """
+ Fake L{IReactorSocket.adoptStreamConnection}, that raises
+ L{_connectException}.
+ """
+ raise self._connectException
+
+
+class NonStreamingProducer:
+ """
+ A pull producer which writes 10 times only.
+ """
+
+ counter = 0
+ stopped = False
+
+ def __init__(self, consumer):
+ self.consumer = consumer
+ self.result = Deferred()
+
+ def resumeProducing(self):
+ """
+ Write the counter value once.
+ """
+ if self.consumer is None or self.counter >= 10:
+ raise RuntimeError("BUG: resume after unregister/stop.")
+ else:
+ self.consumer.write(b"%d" % (self.counter,))
+ self.counter += 1
+ if self.counter == 10:
+ self.consumer.unregisterProducer()
+ self._done()
+
+ def pauseProducing(self):
+ """
+ An implementation of C{IPushProducer.pauseProducing}. This should never
+ be called on a pull producer, so this just raises an error.
+ """
+ raise RuntimeError("BUG: pause should never be called.")
+
+ def _done(self):
+ """
+ Fire a L{Deferred} so that users can wait for this to complete.
+ """
+ self.consumer = None
+ d = self.result
+ del self.result
+ d.callback(None)
+
+ def stopProducing(self):
+ """
+ Stop all production.
+ """
+ self.stopped = True
+ self._done()
+
+
+def waitUntilAllDisconnected(reactor, protocols):
+ """
+ Take a list of disconnecting protocols, callback a L{Deferred} when they're
+ all done.
+
+ This is a hack to make some older tests less flaky, as
+ L{ITransport.loseConnection} is not atomic on all reactors (for example,
+ the CoreFoundation, which sometimes takes a reactor turn for CFSocket to
+ realise). New tests should either not use real sockets in testing, or take
+ the advice in
+ I{https://jml.io/pages/how-to-disconnect-in-twisted-really.html} to heart.
+
+ @param reactor: The reactor to schedule the checks on.
+ @type reactor: L{IReactorTime}
+
+ @param protocols: The protocols to wait for disconnecting.
+ @type protocols: A L{list} of L{IProtocol}s.
+ """
+ lc = None
+
+ def _check():
+ if True not in [x.transport.connected for x in protocols]:
+ lc.stop()
+
+ lc = task.LoopingCall(_check)
+ lc.clock = reactor
+ return lc.start(0.01, now=True)
+
+
+@implementer(ILogObserver)
+class EventLoggingObserver(Sequence[LogEvent]):
+ """
+ L{ILogObserver} That stores its events in a list for later inspection.
+ This class is similar to L{LimitedHistoryLogObserver} save that the
+ internal buffer is public and intended for external inspection. The
+ observer implements the sequence protocol to ease iteration of the events.
+
+ @ivar _events: The events captured by this observer
+ @type _events: L{list}
+ """
+
+ def __init__(self) -> None:
+ self._events: list[LogEvent] = []
+
+ def __len__(self) -> int:
+ return len(self._events)
+
+ @overload
+ def __getitem__(self, index: int) -> LogEvent:
+ ...
+
+ @overload
+ def __getitem__(self, index: slice) -> Sequence[LogEvent]:
+ ...
+
+ def __getitem__(self, index: int | slice) -> LogEvent | Sequence[LogEvent]:
+ return self._events[index]
+
+ def __iter__(self) -> Iterator[LogEvent]:
+ return iter(self._events)
+
+ def __call__(self, event: LogEvent) -> None:
+ """
+ @see: L{ILogObserver}
+ """
+ self._events.append(event)
+
+ @classmethod
+ def createWithCleanup(cls, testInstance: TestCase, publisher: LogPublisher) -> Self:
+ """
+ Create an L{EventLoggingObserver} instance that observes the provided
+ publisher and will be cleaned up with addCleanup().
+
+ @param testInstance: Test instance in which this logger is used.
+ @type testInstance: L{twisted.trial.unittest.TestCase}
+
+ @param publisher: Log publisher to observe.
+ @type publisher: twisted.logger.LogPublisher
+
+ @return: An EventLoggingObserver configured to observe the provided
+ publisher.
+ @rtype: L{twisted.test.proto_helpers.EventLoggingObserver}
+ """
+ obs = cls()
+ publisher.addObserver(obs)
+ testInstance.addCleanup(lambda: publisher.removeObserver(obs))
+ return obs
diff --git a/contrib/python/Twisted/py3/twisted/internet/threads.py b/contrib/python/Twisted/py3/twisted/internet/threads.py
new file mode 100644
index 0000000000..e9a49cbea8
--- /dev/null
+++ b/contrib/python/Twisted/py3/twisted/internet/threads.py
@@ -0,0 +1,144 @@
+# Copyright (c) Twisted Matrix Laboratories.
+# See LICENSE for details.
+
+"""
+Extended thread dispatching support.
+
+For basic support see reactor threading API docs.
+"""
+
+from __future__ import annotations
+
+import queue as Queue
+from typing import Callable, TypeVar
+
+from typing_extensions import ParamSpec
+
+from twisted.internet import defer
+from twisted.internet.interfaces import IReactorFromThreads
+from twisted.python import failure
+from twisted.python.threadpool import ThreadPool
+
+_P = ParamSpec("_P")
+_R = TypeVar("_R")
+
+
+def deferToThreadPool(
+ reactor: IReactorFromThreads,
+ threadpool: ThreadPool,
+ f: Callable[_P, _R],
+ *args: _P.args,
+ **kwargs: _P.kwargs,
+) -> defer.Deferred[_R]:
+ """
+ Call the function C{f} using a thread from the given threadpool and return
+ the result as a Deferred.
+
+ This function is only used by client code which is maintaining its own
+ threadpool. To run a function in the reactor's threadpool, use
+ C{deferToThread}.
+
+ @param reactor: The reactor in whose main thread the Deferred will be
+ invoked.
+
+ @param threadpool: An object which supports the C{callInThreadWithCallback}
+ method of C{twisted.python.threadpool.ThreadPool}.
+
+ @param f: The function to call.
+ @param args: positional arguments to pass to f.
+ @param kwargs: keyword arguments to pass to f.
+
+ @return: A Deferred which fires a callback with the result of f, or an
+ errback with a L{twisted.python.failure.Failure} if f throws an
+ exception.
+ """
+ d: defer.Deferred[_R] = defer.Deferred()
+
+ def onResult(success: bool, result: _R | BaseException) -> None:
+ if success:
+ reactor.callFromThread(d.callback, result)
+ else:
+ reactor.callFromThread(d.errback, result)
+
+ threadpool.callInThreadWithCallback(onResult, f, *args, **kwargs)
+
+ return d
+
+
+def deferToThread(f, *args, **kwargs):
+ """
+ Run a function in a thread and return the result as a Deferred.
+
+ @param f: The function to call.
+ @param args: positional arguments to pass to f.
+ @param kwargs: keyword arguments to pass to f.
+
+ @return: A Deferred which fires a callback with the result of f,
+ or an errback with a L{twisted.python.failure.Failure} if f throws
+ an exception.
+ """
+ from twisted.internet import reactor
+
+ return deferToThreadPool(reactor, reactor.getThreadPool(), f, *args, **kwargs)
+
+
+def _runMultiple(tupleList):
+ """
+ Run a list of functions.
+ """
+ for f, args, kwargs in tupleList:
+ f(*args, **kwargs)
+
+
+def callMultipleInThread(tupleList):
+ """
+ Run a list of functions in the same thread.
+
+ tupleList should be a list of (function, argsList, kwargsDict) tuples.
+ """
+ from twisted.internet import reactor
+
+ reactor.callInThread(_runMultiple, tupleList)
+
+
+def blockingCallFromThread(reactor, f, *a, **kw):
+ """
+ Run a function in the reactor from a thread, and wait for the result
+ synchronously. If the function returns a L{Deferred}, wait for its
+ result and return that.
+
+ @param reactor: The L{IReactorThreads} provider which will be used to
+ schedule the function call.
+ @param f: the callable to run in the reactor thread
+ @type f: any callable.
+ @param a: the arguments to pass to C{f}.
+ @param kw: the keyword arguments to pass to C{f}.
+
+ @return: the result of the L{Deferred} returned by C{f}, or the result
+ of C{f} if it returns anything other than a L{Deferred}.
+
+ @raise Exception: If C{f} raises a synchronous exception,
+ C{blockingCallFromThread} will raise that exception. If C{f}
+ returns a L{Deferred} which fires with a L{Failure},
+ C{blockingCallFromThread} will raise that failure's exception (see
+ L{Failure.raiseException}).
+ """
+ queue = Queue.Queue()
+
+ def _callFromThread():
+ result = defer.maybeDeferred(f, *a, **kw)
+ result.addBoth(queue.put)
+
+ reactor.callFromThread(_callFromThread)
+ result = queue.get()
+ if isinstance(result, failure.Failure):
+ result.raiseException()
+ return result
+
+
+__all__ = [
+ "deferToThread",
+ "deferToThreadPool",
+ "callMultipleInThread",
+ "blockingCallFromThread",
+]
diff --git a/contrib/python/Twisted/py3/twisted/internet/tksupport.py b/contrib/python/Twisted/py3/twisted/internet/tksupport.py
new file mode 100644
index 0000000000..35550e0a48
--- /dev/null
+++ b/contrib/python/Twisted/py3/twisted/internet/tksupport.py
@@ -0,0 +1,78 @@
+# Copyright (c) Twisted Matrix Laboratories.
+# See LICENSE for details.
+
+
+"""
+This module integrates Tkinter with twisted.internet's mainloop.
+
+Maintainer: Itamar Shtull-Trauring
+
+To use, do::
+
+ | tksupport.install(rootWidget)
+
+and then run your reactor as usual - do *not* call Tk's mainloop(),
+use Twisted's regular mechanism for running the event loop.
+
+Likewise, to stop your program you will need to stop Twisted's
+event loop. For example, if you want closing your root widget to
+stop Twisted::
+
+ | root.protocol('WM_DELETE_WINDOW', reactor.stop)
+
+When using Aqua Tcl/Tk on macOS the standard Quit menu item in
+your application might become unresponsive without the additional
+fix::
+
+ | root.createcommand("::tk::mac::Quit", reactor.stop)
+
+@see: U{Tcl/TkAqua FAQ for more info<http://wiki.tcl.tk/12987>}
+"""
+
+import tkinter.messagebox as tkMessageBox
+import tkinter.simpledialog as tkSimpleDialog
+
+from twisted.internet import task
+
+_task = None
+
+
+def install(widget, ms=10, reactor=None):
+ """Install a Tkinter.Tk() object into the reactor."""
+ installTkFunctions()
+ global _task
+ _task = task.LoopingCall(widget.update)
+ _task.start(ms / 1000.0, False)
+
+
+def uninstall():
+ """Remove the root Tk widget from the reactor.
+
+ Call this before destroy()ing the root widget.
+ """
+ global _task
+ _task.stop()
+ _task = None
+
+
+def installTkFunctions():
+ import twisted.python.util
+
+ twisted.python.util.getPassword = getPassword
+
+
+def getPassword(prompt="", confirm=0):
+ while 1:
+ try1 = tkSimpleDialog.askstring("Password Dialog", prompt, show="*")
+ if not confirm:
+ return try1
+ try2 = tkSimpleDialog.askstring("Password Dialog", "Confirm Password", show="*")
+ if try1 == try2:
+ return try1
+ else:
+ tkMessageBox.showerror(
+ "Password Mismatch", "Passwords did not match, starting over"
+ )
+
+
+__all__ = ["install", "uninstall"]
diff --git a/contrib/python/Twisted/py3/twisted/internet/udp.py b/contrib/python/Twisted/py3/twisted/internet/udp.py
new file mode 100644
index 0000000000..7601f2dc84
--- /dev/null
+++ b/contrib/python/Twisted/py3/twisted/internet/udp.py
@@ -0,0 +1,533 @@
+# -*- test-case-name: twisted.test.test_udp -*-
+# Copyright (c) Twisted Matrix Laboratories.
+# See LICENSE for details.
+
+"""
+Various asynchronous UDP classes.
+
+Please do not use this module directly.
+
+@var _sockErrReadIgnore: list of symbolic error constants (from the C{errno}
+ module) representing socket errors where the error is temporary and can be
+ ignored.
+
+@var _sockErrReadRefuse: list of symbolic error constants (from the C{errno}
+ module) representing socket errors that indicate connection refused.
+"""
+
+
+# System Imports
+import socket
+import struct
+import warnings
+from typing import Optional
+
+from zope.interface import implementer
+
+from twisted.python.runtime import platformType
+
+if platformType == "win32":
+ from errno import WSAEINPROGRESS # type: ignore[attr-defined]
+ from errno import WSAEWOULDBLOCK # type: ignore[attr-defined]
+ from errno import ( # type: ignore[attr-defined]
+ WSAECONNREFUSED,
+ WSAECONNRESET,
+ WSAEINTR,
+ WSAEMSGSIZE,
+ WSAENETRESET,
+ WSAENOPROTOOPT as ENOPROTOOPT,
+ WSAETIMEDOUT,
+ )
+
+ # Classify read and write errors
+ _sockErrReadIgnore = [WSAEINTR, WSAEWOULDBLOCK, WSAEMSGSIZE, WSAEINPROGRESS]
+ _sockErrReadRefuse = [WSAECONNREFUSED, WSAECONNRESET, WSAENETRESET, WSAETIMEDOUT]
+
+ # POSIX-compatible write errors
+ EMSGSIZE = WSAEMSGSIZE
+ ECONNREFUSED = WSAECONNREFUSED
+ EAGAIN = WSAEWOULDBLOCK
+ EINTR = WSAEINTR
+else:
+ from errno import EAGAIN, ECONNREFUSED, EINTR, EMSGSIZE, ENOPROTOOPT, EWOULDBLOCK
+
+ _sockErrReadIgnore = [EAGAIN, EINTR, EWOULDBLOCK]
+ _sockErrReadRefuse = [ECONNREFUSED]
+
+# Twisted Imports
+from twisted.internet import abstract, address, base, defer, error, interfaces
+from twisted.python import failure, log
+
+
+@implementer(
+ interfaces.IListeningPort, interfaces.IUDPTransport, interfaces.ISystemHandle
+)
+class Port(base.BasePort):
+ """
+ UDP port, listening for packets.
+
+ @ivar maxThroughput: Maximum number of bytes read in one event
+ loop iteration.
+
+ @ivar addressFamily: L{socket.AF_INET} or L{socket.AF_INET6}, depending on
+ whether this port is listening on an IPv4 address or an IPv6 address.
+
+ @ivar _realPortNumber: Actual port number being listened on. The
+ value will be L{None} until this L{Port} is listening.
+
+ @ivar _preexistingSocket: If not L{None}, a L{socket.socket} instance which
+ was created and initialized outside of the reactor and will be used to
+ listen for connections (instead of a new socket being created by this
+ L{Port}).
+ """
+
+ addressFamily = socket.AF_INET
+ socketType = socket.SOCK_DGRAM
+ maxThroughput = 256 * 1024
+
+ _realPortNumber: Optional[int] = None
+ _preexistingSocket = None
+
+ def __init__(self, port, proto, interface="", maxPacketSize=8192, reactor=None):
+ """
+ @param port: A port number on which to listen.
+ @type port: L{int}
+
+ @param proto: A C{DatagramProtocol} instance which will be
+ connected to the given C{port}.
+ @type proto: L{twisted.internet.protocol.DatagramProtocol}
+
+ @param interface: The local IPv4 or IPv6 address to which to bind;
+ defaults to '', ie all IPv4 addresses.
+ @type interface: L{str}
+
+ @param maxPacketSize: The maximum packet size to accept.
+ @type maxPacketSize: L{int}
+
+ @param reactor: A reactor which will notify this C{Port} when
+ its socket is ready for reading or writing. Defaults to
+ L{None}, ie the default global reactor.
+ @type reactor: L{interfaces.IReactorFDSet}
+ """
+ base.BasePort.__init__(self, reactor)
+ self.port = port
+ self.protocol = proto
+ self.maxPacketSize = maxPacketSize
+ self.interface = interface
+ self.setLogStr()
+ self._connectedAddr = None
+ self._setAddressFamily()
+
+ @classmethod
+ def _fromListeningDescriptor(
+ cls, reactor, fd, addressFamily, protocol, maxPacketSize
+ ):
+ """
+ Create a new L{Port} based on an existing listening
+ I{SOCK_DGRAM} socket.
+
+ @param reactor: A reactor which will notify this L{Port} when
+ its socket is ready for reading or writing. Defaults to
+ L{None}, ie the default global reactor.
+ @type reactor: L{interfaces.IReactorFDSet}
+
+ @param fd: An integer file descriptor associated with a listening
+ socket. The socket must be in non-blocking mode. Any additional
+ attributes desired, such as I{FD_CLOEXEC}, must also be set already.
+ @type fd: L{int}
+
+ @param addressFamily: The address family (sometimes called I{domain}) of
+ the existing socket. For example, L{socket.AF_INET}.
+ @type addressFamily: L{int}
+
+ @param protocol: A C{DatagramProtocol} instance which will be
+ connected to the C{port}.
+ @type protocol: L{twisted.internet.protocol.DatagramProtocol}
+
+ @param maxPacketSize: The maximum packet size to accept.
+ @type maxPacketSize: L{int}
+
+ @return: A new instance of C{cls} wrapping the socket given by C{fd}.
+ @rtype: L{Port}
+ """
+ port = socket.fromfd(fd, addressFamily, cls.socketType)
+ interface = port.getsockname()[0]
+ self = cls(
+ None,
+ protocol,
+ interface=interface,
+ reactor=reactor,
+ maxPacketSize=maxPacketSize,
+ )
+ self._preexistingSocket = port
+ return self
+
+ def __repr__(self) -> str:
+ if self._realPortNumber is not None:
+ return f"<{self.protocol.__class__} on {self._realPortNumber}>"
+ else:
+ return f"<{self.protocol.__class__} not connected>"
+
+ def getHandle(self):
+ """
+ Return a socket object.
+ """
+ return self.socket
+
+ def startListening(self):
+ """
+ Create and bind my socket, and begin listening on it.
+
+ This is called on unserialization, and must be called after creating a
+ server to begin listening on the specified port.
+ """
+ self._bindSocket()
+ self._connectToProtocol()
+
+ def _bindSocket(self):
+ """
+ Prepare and assign a L{socket.socket} instance to
+ C{self.socket}.
+
+ Either creates a new SOCK_DGRAM L{socket.socket} bound to
+ C{self.interface} and C{self.port} or takes an existing
+ L{socket.socket} provided via the
+ L{interfaces.IReactorSocket.adoptDatagramPort} interface.
+ """
+ if self._preexistingSocket is None:
+ # Create a new socket and make it listen
+ try:
+ skt = self.createInternetSocket()
+ skt.bind((self.interface, self.port))
+ except OSError as le:
+ raise error.CannotListenError(self.interface, self.port, le)
+ else:
+ # Re-use the externally specified socket
+ skt = self._preexistingSocket
+ self._preexistingSocket = None
+
+ # Make sure that if we listened on port 0, we update that to
+ # reflect what the OS actually assigned us.
+ self._realPortNumber = skt.getsockname()[1]
+
+ log.msg(
+ "%s starting on %s"
+ % (self._getLogPrefix(self.protocol), self._realPortNumber)
+ )
+
+ self.connected = 1
+ self.socket = skt
+ self.fileno = self.socket.fileno
+
+ def _connectToProtocol(self):
+ self.protocol.makeConnection(self)
+ self.startReading()
+
+ def doRead(self):
+ """
+ Called when my socket is ready for reading.
+ """
+ read = 0
+ while read < self.maxThroughput:
+ try:
+ data, addr = self.socket.recvfrom(self.maxPacketSize)
+ except OSError as se:
+ no = se.args[0]
+ if no in _sockErrReadIgnore:
+ return
+ if no in _sockErrReadRefuse:
+ if self._connectedAddr:
+ self.protocol.connectionRefused()
+ return
+ raise
+ else:
+ read += len(data)
+ if self.addressFamily == socket.AF_INET6:
+ # Remove the flow and scope ID from the address tuple,
+ # reducing it to a tuple of just (host, port).
+ #
+ # TODO: This should be amended to return an object that can
+ # unpack to (host, port) but also includes the flow info
+ # and scope ID. See http://tm.tl/6826
+ addr = addr[:2]
+ try:
+ self.protocol.datagramReceived(data, addr)
+ except BaseException:
+ log.err()
+
+ def write(self, datagram, addr=None):
+ """
+ Write a datagram.
+
+ @type datagram: L{bytes}
+ @param datagram: The datagram to be sent.
+
+ @type addr: L{tuple} containing L{str} as first element and L{int} as
+ second element, or L{None}
+ @param addr: A tuple of (I{stringified IPv4 or IPv6 address},
+ I{integer port number}); can be L{None} in connected mode.
+ """
+ if self._connectedAddr:
+ assert addr in (None, self._connectedAddr)
+ try:
+ return self.socket.send(datagram)
+ except OSError as se:
+ no = se.args[0]
+ if no == EINTR:
+ return self.write(datagram)
+ elif no == EMSGSIZE:
+ raise error.MessageLengthError("message too long")
+ elif no == ECONNREFUSED:
+ self.protocol.connectionRefused()
+ else:
+ raise
+ else:
+ assert addr != None
+ if (
+ not abstract.isIPAddress(addr[0])
+ and not abstract.isIPv6Address(addr[0])
+ and addr[0] != "<broadcast>"
+ ):
+ raise error.InvalidAddressError(
+ addr[0], "write() only accepts IP addresses, not hostnames"
+ )
+ if (
+ abstract.isIPAddress(addr[0]) or addr[0] == "<broadcast>"
+ ) and self.addressFamily == socket.AF_INET6:
+ raise error.InvalidAddressError(
+ addr[0], "IPv6 port write() called with IPv4 or broadcast address"
+ )
+ if abstract.isIPv6Address(addr[0]) and self.addressFamily == socket.AF_INET:
+ raise error.InvalidAddressError(
+ addr[0], "IPv4 port write() called with IPv6 address"
+ )
+ try:
+ return self.socket.sendto(datagram, addr)
+ except OSError as se:
+ no = se.args[0]
+ if no == EINTR:
+ return self.write(datagram, addr)
+ elif no == EMSGSIZE:
+ raise error.MessageLengthError("message too long")
+ elif no == ECONNREFUSED:
+ # in non-connected UDP ECONNREFUSED is platform dependent, I
+ # think and the info is not necessarily useful. Nevertheless
+ # maybe we should call connectionRefused? XXX
+ return
+ else:
+ raise
+
+ def writeSequence(self, seq, addr):
+ """
+ Write a datagram constructed from an iterable of L{bytes}.
+
+ @param seq: The data that will make up the complete datagram to be
+ written.
+ @type seq: an iterable of L{bytes}
+
+ @type addr: L{tuple} containing L{str} as first element and L{int} as
+ second element, or L{None}
+ @param addr: A tuple of (I{stringified IPv4 or IPv6 address},
+ I{integer port number}); can be L{None} in connected mode.
+ """
+ self.write(b"".join(seq), addr)
+
+ def connect(self, host, port):
+ """
+ 'Connect' to remote server.
+ """
+ if self._connectedAddr:
+ raise RuntimeError(
+ "already connected, reconnecting is not currently supported"
+ )
+ if not abstract.isIPAddress(host) and not abstract.isIPv6Address(host):
+ raise error.InvalidAddressError(host, "not an IPv4 or IPv6 address.")
+ self._connectedAddr = (host, port)
+ self.socket.connect((host, port))
+
+ def _loseConnection(self):
+ self.stopReading()
+ if self.connected: # actually means if we are *listening*
+ self.reactor.callLater(0, self.connectionLost)
+
+ def stopListening(self):
+ if self.connected:
+ result = self.d = defer.Deferred()
+ else:
+ result = None
+ self._loseConnection()
+ return result
+
+ def loseConnection(self):
+ warnings.warn(
+ "Please use stopListening() to disconnect port",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ self.stopListening()
+
+ def connectionLost(self, reason=None):
+ """
+ Cleans up my socket.
+ """
+ log.msg("(UDP Port %s Closed)" % self._realPortNumber)
+ self._realPortNumber = None
+ self.maxThroughput = -1
+ base.BasePort.connectionLost(self, reason)
+ self.protocol.doStop()
+ self.socket.close()
+ del self.socket
+ del self.fileno
+ if hasattr(self, "d"):
+ self.d.callback(None)
+ del self.d
+
+ def setLogStr(self):
+ """
+ Initialize the C{logstr} attribute to be used by C{logPrefix}.
+ """
+ logPrefix = self._getLogPrefix(self.protocol)
+ self.logstr = "%s (UDP)" % logPrefix
+
+ def _setAddressFamily(self):
+ """
+ Resolve address family for the socket.
+ """
+ if abstract.isIPv6Address(self.interface):
+ self.addressFamily = socket.AF_INET6
+ elif abstract.isIPAddress(self.interface):
+ self.addressFamily = socket.AF_INET
+ elif self.interface:
+ raise error.InvalidAddressError(
+ self.interface, "not an IPv4 or IPv6 address."
+ )
+
+ def logPrefix(self):
+ """
+ Return the prefix to log with.
+ """
+ return self.logstr
+
+ def getHost(self):
+ """
+ Return the local address of the UDP connection
+
+ @returns: the local address of the UDP connection
+ @rtype: L{IPv4Address} or L{IPv6Address}
+ """
+ addr = self.socket.getsockname()
+ if self.addressFamily == socket.AF_INET:
+ return address.IPv4Address("UDP", *addr)
+ elif self.addressFamily == socket.AF_INET6:
+ return address.IPv6Address("UDP", *(addr[:2]))
+
+ def setBroadcastAllowed(self, enabled):
+ """
+ Set whether this port may broadcast. This is disabled by default.
+
+ @param enabled: Whether the port may broadcast.
+ @type enabled: L{bool}
+ """
+ self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, enabled)
+
+ def getBroadcastAllowed(self):
+ """
+ Checks if broadcast is currently allowed on this port.
+
+ @return: Whether this port may broadcast.
+ @rtype: L{bool}
+ """
+ return bool(self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST))
+
+
+class MulticastMixin:
+ """
+ Implement multicast functionality.
+ """
+
+ def getOutgoingInterface(self):
+ i = self.socket.getsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_IF)
+ return socket.inet_ntoa(struct.pack("@i", i))
+
+ def setOutgoingInterface(self, addr):
+ """Returns Deferred of success."""
+ return self.reactor.resolve(addr).addCallback(self._setInterface)
+
+ def _setInterface(self, addr):
+ i = socket.inet_aton(addr)
+ self.socket.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_IF, i)
+ return 1
+
+ def getLoopbackMode(self):
+ return self.socket.getsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_LOOP)
+
+ def setLoopbackMode(self, mode):
+ mode = struct.pack("b", bool(mode))
+ self.socket.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_LOOP, mode)
+
+ def getTTL(self):
+ return self.socket.getsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL)
+
+ def setTTL(self, ttl):
+ ttl = struct.pack("B", ttl)
+ self.socket.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, ttl)
+
+ def joinGroup(self, addr, interface=""):
+ """Join a multicast group. Returns Deferred of success."""
+ return self.reactor.resolve(addr).addCallback(self._joinAddr1, interface, 1)
+
+ def _joinAddr1(self, addr, interface, join):
+ return self.reactor.resolve(interface).addCallback(self._joinAddr2, addr, join)
+
+ def _joinAddr2(self, interface, addr, join):
+ addr = socket.inet_aton(addr)
+ interface = socket.inet_aton(interface)
+ if join:
+ cmd = socket.IP_ADD_MEMBERSHIP
+ else:
+ cmd = socket.IP_DROP_MEMBERSHIP
+ try:
+ self.socket.setsockopt(socket.IPPROTO_IP, cmd, addr + interface)
+ except OSError as e:
+ return failure.Failure(error.MulticastJoinError(addr, interface, *e.args))
+
+ def leaveGroup(self, addr, interface=""):
+ """Leave multicast group, return Deferred of success."""
+ return self.reactor.resolve(addr).addCallback(self._joinAddr1, interface, 0)
+
+
+@implementer(interfaces.IMulticastTransport)
+class MulticastPort(MulticastMixin, Port):
+ """
+ UDP Port that supports multicasting.
+ """
+
+ def __init__(
+ self,
+ port,
+ proto,
+ interface="",
+ maxPacketSize=8192,
+ reactor=None,
+ listenMultiple=False,
+ ):
+ """
+ @see: L{twisted.internet.interfaces.IReactorMulticast.listenMulticast}
+ """
+ Port.__init__(self, port, proto, interface, maxPacketSize, reactor)
+ self.listenMultiple = listenMultiple
+
+ def createInternetSocket(self):
+ skt = Port.createInternetSocket(self)
+ if self.listenMultiple:
+ skt.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+ if hasattr(socket, "SO_REUSEPORT"):
+ try:
+ skt.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
+ except OSError as le:
+ # RHEL6 defines SO_REUSEPORT but it doesn't work
+ if le.errno == ENOPROTOOPT:
+ pass
+ else:
+ raise
+ return skt
diff --git a/contrib/python/Twisted/py3/twisted/internet/unix.py b/contrib/python/Twisted/py3/twisted/internet/unix.py
new file mode 100644
index 0000000000..c3fe62b22d
--- /dev/null
+++ b/contrib/python/Twisted/py3/twisted/internet/unix.py
@@ -0,0 +1,645 @@
+# -*- test-case-name: twisted.test.test_unix,twisted.internet.test.test_unix,twisted.internet.test.test_posixbase -*-
+# Copyright (c) Twisted Matrix Laboratories.
+# See LICENSE for details.
+
+"""
+UNIX socket support for Twisted.
+
+End users shouldn't use this module directly - use the reactor APIs instead.
+
+Maintainer: Itamar Shtull-Trauring
+"""
+
+
+import os
+import socket
+import stat
+import struct
+from errno import EAGAIN, ECONNREFUSED, EINTR, EMSGSIZE, ENOBUFS, EWOULDBLOCK
+from typing import Optional, Type
+
+from zope.interface import implementedBy, implementer, implementer_only
+
+from twisted.internet import address, base, error, interfaces, main, protocol, tcp, udp
+from twisted.internet.abstract import FileDescriptor
+from twisted.python import failure, lockfile, log, reflect
+from twisted.python.compat import lazyByteSlice
+from twisted.python.filepath import _coerceToFilesystemEncoding
+from twisted.python.util import untilConcludes
+
+try:
+ from twisted.python import sendmsg as _sendmsg
+except ImportError:
+ sendmsg = None
+else:
+ sendmsg = _sendmsg
+
+if not hasattr(socket, "AF_UNIX"):
+ raise ImportError("UNIX sockets not supported on this platform")
+
+
+def _ancillaryDescriptor(fd):
+ """
+ Pack an integer into an ancillary data structure suitable for use with
+ L{sendmsg.sendmsg}.
+ """
+ packed = struct.pack("i", fd)
+ return [(socket.SOL_SOCKET, sendmsg.SCM_RIGHTS, packed)]
+
+
+class _SendmsgMixin:
+ """
+ Mixin for stream-oriented UNIX transports which uses sendmsg and recvmsg to
+ offer additional functionality, such as copying file descriptors into other
+ processes.
+
+ @ivar _writeSomeDataBase: The class which provides the basic implementation
+ of C{writeSomeData}. Ultimately this should be a subclass of
+ L{twisted.internet.abstract.FileDescriptor}. Subclasses which mix in
+ L{_SendmsgMixin} must define this.
+
+ @ivar _sendmsgQueue: A C{list} of C{int} holding file descriptors which are
+ currently buffered before being sent.
+
+ @ivar _fileDescriptorBufferSize: An C{int} giving the maximum number of file
+ descriptors to accept and queue for sending before pausing the
+ registered producer, if there is one.
+ """
+
+ _writeSomeDataBase: Optional[Type[FileDescriptor]] = None
+ _fileDescriptorBufferSize = 64
+
+ def __init__(self):
+ self._sendmsgQueue = []
+
+ def _isSendBufferFull(self):
+ """
+ Determine whether the user-space send buffer for this transport is full
+ or not.
+
+ This extends the base determination by adding consideration of how many
+ file descriptors need to be sent using L{sendmsg.sendmsg}. When there
+ are more than C{self._fileDescriptorBufferSize}, the buffer is
+ considered full.
+
+ @return: C{True} if it is full, C{False} otherwise.
+ """
+ # There must be some bytes in the normal send buffer, checked by
+ # _writeSomeDataBase._isSendBufferFull, in order to send file
+ # descriptors from _sendmsgQueue. That means that the buffer will
+ # eventually be considered full even without this additional logic.
+ # However, since we send only one byte per file descriptor, having lots
+ # of elements in _sendmsgQueue incurs more overhead and perhaps slows
+ # things down. Anyway, try this for now, maybe rethink it later.
+ return len(
+ self._sendmsgQueue
+ ) > self._fileDescriptorBufferSize or self._writeSomeDataBase._isSendBufferFull(
+ self
+ )
+
+ def sendFileDescriptor(self, fileno):
+ """
+ Queue the given file descriptor to be sent and start trying to send it.
+ """
+ self._sendmsgQueue.append(fileno)
+ self._maybePauseProducer()
+ self.startWriting()
+
+ def writeSomeData(self, data):
+ """
+ Send as much of C{data} as possible. Also send any pending file
+ descriptors.
+ """
+ # Make it a programming error to send more file descriptors than you
+ # send regular bytes. Otherwise, due to the limitation mentioned
+ # below, we could end up with file descriptors left, but no bytes to
+ # send with them, therefore no way to send those file descriptors.
+ if len(self._sendmsgQueue) > len(data):
+ return error.FileDescriptorOverrun()
+
+ # If there are file descriptors to send, try sending them first, using
+ # a little bit of data from the stream-oriented write buffer too. It
+ # is not possible to send a file descriptor without sending some
+ # regular data.
+ index = 0
+ try:
+ while index < len(self._sendmsgQueue):
+ fd = self._sendmsgQueue[index]
+ try:
+ untilConcludes(
+ sendmsg.sendmsg,
+ self.socket,
+ data[index : index + 1],
+ _ancillaryDescriptor(fd),
+ )
+ except OSError as se:
+ if se.args[0] in (EWOULDBLOCK, ENOBUFS):
+ return index
+ else:
+ return main.CONNECTION_LOST
+ else:
+ index += 1
+ finally:
+ del self._sendmsgQueue[:index]
+
+ # Hand the remaining data to the base implementation. Avoid slicing in
+ # favor of a buffer, in case that happens to be any faster.
+ limitedData = lazyByteSlice(data, index)
+ result = self._writeSomeDataBase.writeSomeData(self, limitedData)
+ try:
+ return index + result
+ except TypeError:
+ return result
+
+ def doRead(self):
+ """
+ Calls {IProtocol.dataReceived} with all available data and
+ L{IFileDescriptorReceiver.fileDescriptorReceived} once for each
+ received file descriptor in ancillary data.
+
+ This reads up to C{self.bufferSize} bytes of data from its socket, then
+ dispatches the data to protocol callbacks to be handled. If the
+ connection is not lost through an error in the underlying recvmsg(),
+ this function will return the result of the dataReceived call.
+ """
+ try:
+ data, ancillary, flags = untilConcludes(
+ sendmsg.recvmsg, self.socket, self.bufferSize
+ )
+ except OSError as se:
+ if se.args[0] == EWOULDBLOCK:
+ return
+ else:
+ return main.CONNECTION_LOST
+
+ for cmsgLevel, cmsgType, cmsgData in ancillary:
+ if cmsgLevel == socket.SOL_SOCKET and cmsgType == sendmsg.SCM_RIGHTS:
+ self._ancillaryLevelSOLSOCKETTypeSCMRIGHTS(cmsgData)
+ else:
+ log.msg(
+ format=(
+ "%(protocolName)s (on %(hostAddress)r) "
+ "received unsupported ancillary data "
+ "(level=%(cmsgLevel)r, type=%(cmsgType)r) "
+ "from %(peerAddress)r."
+ ),
+ hostAddress=self.getHost(),
+ peerAddress=self.getPeer(),
+ protocolName=self._getLogPrefix(self.protocol),
+ cmsgLevel=cmsgLevel,
+ cmsgType=cmsgType,
+ )
+
+ return self._dataReceived(data)
+
+ def _ancillaryLevelSOLSOCKETTypeSCMRIGHTS(self, cmsgData):
+ """
+ Processes ancillary data with level SOL_SOCKET and type SCM_RIGHTS,
+ indicating that the ancillary data payload holds file descriptors.
+
+ Calls L{IFileDescriptorReceiver.fileDescriptorReceived} once for each
+ received file descriptor or logs a message if the protocol does not
+ implement L{IFileDescriptorReceiver}.
+
+ @param cmsgData: Ancillary data payload.
+ @type cmsgData: L{bytes}
+ """
+
+ fdCount = len(cmsgData) // 4
+ fds = struct.unpack("i" * fdCount, cmsgData)
+ if interfaces.IFileDescriptorReceiver.providedBy(self.protocol):
+ for fd in fds:
+ self.protocol.fileDescriptorReceived(fd)
+ else:
+ log.msg(
+ format=(
+ "%(protocolName)s (on %(hostAddress)r) does not "
+ "provide IFileDescriptorReceiver; closing file "
+ "descriptor received (from %(peerAddress)r)."
+ ),
+ hostAddress=self.getHost(),
+ peerAddress=self.getPeer(),
+ protocolName=self._getLogPrefix(self.protocol),
+ )
+ for fd in fds:
+ os.close(fd)
+
+
+class _UnsupportedSendmsgMixin:
+ """
+ Behaviorless placeholder used when C{twisted.python.sendmsg} is not
+ available, preventing L{IUNIXTransport} from being supported.
+ """
+
+
+if sendmsg:
+ _SendmsgMixin = _SendmsgMixin
+else:
+ _SendmsgMixin = _UnsupportedSendmsgMixin # type: ignore[assignment,misc]
+
+
+@implementer(interfaces.IUNIXTransport)
+class Server(_SendmsgMixin, tcp.Server):
+ _writeSomeDataBase = tcp.Server
+
+ def __init__(self, sock, protocol, client, server, sessionno, reactor):
+ _SendmsgMixin.__init__(self)
+ tcp.Server.__init__(
+ self, sock, protocol, (client, None), server, sessionno, reactor
+ )
+
+ @classmethod
+ def _fromConnectedSocket(cls, fileDescriptor, factory, reactor):
+ """
+ Create a new L{Server} based on an existing connected I{SOCK_STREAM}
+ socket.
+
+ Arguments are the same as to L{Server.__init__}, except where noted.
+
+ @param fileDescriptor: An integer file descriptor associated with a
+ connected socket. The socket must be in non-blocking mode. Any
+ additional attributes desired, such as I{FD_CLOEXEC}, must also be
+ set already.
+
+ @return: A new instance of C{cls} wrapping the socket given by
+ C{fileDescriptor}.
+ """
+ skt = socket.fromfd(fileDescriptor, socket.AF_UNIX, socket.SOCK_STREAM)
+ protocolAddr = address.UNIXAddress(skt.getsockname())
+
+ proto = factory.buildProtocol(protocolAddr)
+ if proto is None:
+ skt.close()
+ return
+
+ # FIXME: is this a suitable sessionno?
+ sessionno = 0
+ self = cls(skt, proto, skt.getpeername(), None, sessionno, reactor)
+ self.repstr = "<{} #{} on {}>".format(
+ self.protocol.__class__.__name__,
+ self.sessionno,
+ skt.getsockname(),
+ )
+ self.logstr = "{},{},{}".format(
+ self.protocol.__class__.__name__,
+ self.sessionno,
+ skt.getsockname(),
+ )
+ proto.makeConnection(self)
+ return self
+
+ def getHost(self):
+ return address.UNIXAddress(self.socket.getsockname())
+
+ def getPeer(self):
+ return address.UNIXAddress(self.hostname or None)
+
+
+def _inFilesystemNamespace(path):
+ """
+ Determine whether the given unix socket path is in a filesystem namespace.
+
+ While most PF_UNIX sockets are entries in the filesystem, Linux 2.2 and
+ above support PF_UNIX sockets in an "abstract namespace" that does not
+ correspond to any path. This function returns C{True} if the given socket
+ path is stored in the filesystem and C{False} if the path is in this
+ abstract namespace.
+ """
+ return path[:1] not in (b"\0", "\0")
+
+
+class _UNIXPort:
+ def getHost(self):
+ """
+ Returns a UNIXAddress.
+
+ This indicates the server's address.
+ """
+ return address.UNIXAddress(self.socket.getsockname())
+
+
+class Port(_UNIXPort, tcp.Port):
+ addressFamily = socket.AF_UNIX
+ socketType = socket.SOCK_STREAM
+
+ transport = Server
+ lockFile = None
+
+ def __init__(
+ self, fileName, factory, backlog=50, mode=0o666, reactor=None, wantPID=0
+ ):
+ tcp.Port.__init__(
+ self, self._buildAddr(fileName).name, factory, backlog, reactor=reactor
+ )
+ self.mode = mode
+ self.wantPID = wantPID
+ self._preexistingSocket = None
+
+ @classmethod
+ def _fromListeningDescriptor(cls, reactor, fd, factory):
+ """
+ Create a new L{Port} based on an existing listening I{SOCK_STREAM}
+ socket.
+
+ Arguments are the same as to L{Port.__init__}, except where noted.
+
+ @param fd: An integer file descriptor associated with a listening
+ socket. The socket must be in non-blocking mode. Any additional
+ attributes desired, such as I{FD_CLOEXEC}, must also be set already.
+
+ @return: A new instance of C{cls} wrapping the socket given by C{fd}.
+ """
+ port = socket.fromfd(fd, cls.addressFamily, cls.socketType)
+ self = cls(port.getsockname(), factory, reactor=reactor)
+ self._preexistingSocket = port
+ return self
+
+ def __repr__(self) -> str:
+ factoryName = reflect.qual(self.factory.__class__)
+ if hasattr(self, "socket"):
+ return "<{} on {!r}>".format(
+ factoryName,
+ _coerceToFilesystemEncoding("", self.port),
+ )
+ else:
+ return f"<{factoryName} (not listening)>"
+
+ def _buildAddr(self, name):
+ return address.UNIXAddress(name)
+
+ def startListening(self):
+ """
+ Create and bind my socket, and begin listening on it.
+
+ This is called on unserialization, and must be called after creating a
+ server to begin listening on the specified port.
+ """
+ tcp._reservedFD.reserve()
+ log.msg(
+ "%s starting on %r"
+ % (
+ self._getLogPrefix(self.factory),
+ _coerceToFilesystemEncoding("", self.port),
+ )
+ )
+ if self.wantPID:
+ self.lockFile = lockfile.FilesystemLock(self.port + b".lock")
+ if not self.lockFile.lock():
+ raise error.CannotListenError(None, self.port, "Cannot acquire lock")
+ else:
+ if not self.lockFile.clean:
+ try:
+ # This is a best-attempt at cleaning up
+ # left-over unix sockets on the filesystem.
+ # If it fails, there's not much else we can
+ # do. The bind() below will fail with an
+ # exception that actually propagates.
+ if stat.S_ISSOCK(os.stat(self.port).st_mode):
+ os.remove(self.port)
+ except BaseException:
+ pass
+
+ self.factory.doStart()
+
+ try:
+ if self._preexistingSocket is not None:
+ skt = self._preexistingSocket
+ self._preexistingSocket = None
+ else:
+ skt = self.createInternetSocket()
+ skt.bind(self.port)
+ except OSError as le:
+ raise error.CannotListenError(None, self.port, le)
+ else:
+ if _inFilesystemNamespace(self.port):
+ # Make the socket readable and writable to the world.
+ os.chmod(self.port, self.mode)
+ skt.listen(self.backlog)
+ self.connected = True
+ self.socket = skt
+ self.fileno = self.socket.fileno
+ self.numberAccepts = 100
+ self.startReading()
+
+ def _logConnectionLostMsg(self):
+ """
+ Log message for closing socket
+ """
+ log.msg(
+ "(UNIX Port %s Closed)"
+ % (
+ _coerceToFilesystemEncoding(
+ "",
+ self.port,
+ )
+ )
+ )
+
+ def connectionLost(self, reason):
+ if _inFilesystemNamespace(self.port):
+ os.unlink(self.port)
+ if self.lockFile is not None:
+ self.lockFile.unlock()
+ tcp.Port.connectionLost(self, reason)
+
+
+@implementer(interfaces.IUNIXTransport)
+class Client(_SendmsgMixin, tcp.BaseClient):
+ """A client for Unix sockets."""
+
+ addressFamily = socket.AF_UNIX
+ socketType = socket.SOCK_STREAM
+ _writeSomeDataBase = tcp.BaseClient
+
+ def __init__(self, filename, connector, reactor=None, checkPID=0):
+ _SendmsgMixin.__init__(self)
+ # Normalise the filename using UNIXAddress
+ filename = address.UNIXAddress(filename).name
+ self.connector = connector
+ self.realAddress = self.addr = filename
+ if checkPID and not lockfile.isLocked(filename + b".lock"):
+ self._finishInit(None, None, error.BadFileError(filename), reactor)
+ self._finishInit(self.doConnect, self.createInternetSocket(), None, reactor)
+
+ def getPeer(self):
+ return address.UNIXAddress(self.addr)
+
+ def getHost(self):
+ return address.UNIXAddress(None)
+
+
+class Connector(base.BaseConnector):
+ def __init__(self, address, factory, timeout, reactor, checkPID):
+ base.BaseConnector.__init__(self, factory, timeout, reactor)
+ self.address = address
+ self.checkPID = checkPID
+
+ def _makeTransport(self):
+ return Client(self.address, self, self.reactor, self.checkPID)
+
+ def getDestination(self):
+ return address.UNIXAddress(self.address)
+
+
+@implementer(interfaces.IUNIXDatagramTransport)
+class DatagramPort(_UNIXPort, udp.Port):
+ """
+ Datagram UNIX port, listening for packets.
+ """
+
+ addressFamily = socket.AF_UNIX
+
+ def __init__(self, addr, proto, maxPacketSize=8192, mode=0o666, reactor=None):
+ """Initialize with address to listen on."""
+ udp.Port.__init__(
+ self, addr, proto, maxPacketSize=maxPacketSize, reactor=reactor
+ )
+ self.mode = mode
+
+ def __repr__(self) -> str:
+ protocolName = reflect.qual(
+ self.protocol.__class__,
+ )
+ if hasattr(self, "socket"):
+ return f"<{protocolName} on {self.port!r}>"
+ else:
+ return f"<{protocolName} (not listening)>"
+
+ def _bindSocket(self):
+ log.msg(f"{self.protocol.__class__} starting on {repr(self.port)}")
+ try:
+ skt = self.createInternetSocket() # XXX: haha misnamed method
+ if self.port:
+ skt.bind(self.port)
+ except OSError as le:
+ raise error.CannotListenError(None, self.port, le)
+ if self.port and _inFilesystemNamespace(self.port):
+ # Make the socket readable and writable to the world.
+ os.chmod(self.port, self.mode)
+ self.connected = 1
+ self.socket = skt
+ self.fileno = self.socket.fileno
+
+ def write(self, datagram, address):
+ """Write a datagram."""
+ try:
+ return self.socket.sendto(datagram, address)
+ except OSError as se:
+ no = se.args[0]
+ if no == EINTR:
+ return self.write(datagram, address)
+ elif no == EMSGSIZE:
+ raise error.MessageLengthError("message too long")
+ elif no == EAGAIN:
+ # oh, well, drop the data. The only difference from UDP
+ # is that UDP won't ever notice.
+ # TODO: add TCP-like buffering
+ pass
+ else:
+ raise
+
+ def connectionLost(self, reason=None):
+ """Cleans up my socket."""
+ log.msg("(Port %s Closed)" % repr(self.port))
+ base.BasePort.connectionLost(self, reason)
+ if hasattr(self, "protocol"):
+ # we won't have attribute in ConnectedPort, in cases
+ # where there was an error in connection process
+ self.protocol.doStop()
+ self.connected = 0
+ self.socket.close()
+ del self.socket
+ del self.fileno
+ if hasattr(self, "d"):
+ self.d.callback(None)
+ del self.d
+
+ def setLogStr(self):
+ self.logstr = reflect.qual(self.protocol.__class__) + " (UDP)"
+
+
+@implementer_only(
+ interfaces.IUNIXDatagramConnectedTransport, *(implementedBy(base.BasePort))
+)
+class ConnectedDatagramPort(DatagramPort):
+ """
+ A connected datagram UNIX socket.
+ """
+
+ def __init__(
+ self,
+ addr,
+ proto,
+ maxPacketSize=8192,
+ mode=0o666,
+ bindAddress=None,
+ reactor=None,
+ ):
+ assert isinstance(proto, protocol.ConnectedDatagramProtocol)
+ DatagramPort.__init__(self, bindAddress, proto, maxPacketSize, mode, reactor)
+ self.remoteaddr = addr
+
+ def startListening(self):
+ try:
+ self._bindSocket()
+ self.socket.connect(self.remoteaddr)
+ self._connectToProtocol()
+ except BaseException:
+ self.connectionFailed(failure.Failure())
+
+ def connectionFailed(self, reason):
+ """
+ Called when a connection fails. Stop listening on the socket.
+
+ @type reason: L{Failure}
+ @param reason: Why the connection failed.
+ """
+ self.stopListening()
+ self.protocol.connectionFailed(reason)
+ del self.protocol
+
+ def doRead(self):
+ """
+ Called when my socket is ready for reading.
+ """
+ read = 0
+ while read < self.maxThroughput:
+ try:
+ data, addr = self.socket.recvfrom(self.maxPacketSize)
+ read += len(data)
+ self.protocol.datagramReceived(data)
+ except OSError as se:
+ no = se.args[0]
+ if no in (EAGAIN, EINTR, EWOULDBLOCK):
+ return
+ if no == ECONNREFUSED:
+ self.protocol.connectionRefused()
+ else:
+ raise
+ except BaseException:
+ log.deferr()
+
+ def write(self, data):
+ """
+ Write a datagram.
+ """
+ try:
+ return self.socket.send(data)
+ except OSError as se:
+ no = se.args[0]
+ if no == EINTR:
+ return self.write(data)
+ elif no == EMSGSIZE:
+ raise error.MessageLengthError("message too long")
+ elif no == ECONNREFUSED:
+ self.protocol.connectionRefused()
+ elif no == EAGAIN:
+ # oh, well, drop the data. The only difference from UDP
+ # is that UDP won't ever notice.
+ # TODO: add TCP-like buffering
+ pass
+ else:
+ raise
+
+ def getPeer(self):
+ return address.UNIXAddress(self.remoteaddr)
diff --git a/contrib/python/Twisted/py3/twisted/internet/utils.py b/contrib/python/Twisted/py3/twisted/internet/utils.py
new file mode 100644
index 0000000000..aaf00e169c
--- /dev/null
+++ b/contrib/python/Twisted/py3/twisted/internet/utils.py
@@ -0,0 +1,256 @@
+# -*- test-case-name: twisted.test.test_iutils -*-
+# Copyright (c) Twisted Matrix Laboratories.
+# See LICENSE for details.
+
+"""
+Utility methods.
+"""
+
+
+import sys
+import warnings
+from functools import wraps
+from io import BytesIO
+
+from twisted.internet import defer, protocol
+from twisted.python import failure
+
+
+def _callProtocolWithDeferred(
+ protocol, executable, args, env, path, reactor=None, protoArgs=()
+):
+ if reactor is None:
+ from twisted.internet import reactor
+
+ d = defer.Deferred()
+ p = protocol(d, *protoArgs)
+ reactor.spawnProcess(p, executable, (executable,) + tuple(args), env, path)
+ return d
+
+
+class _UnexpectedErrorOutput(IOError):
+ """
+ Standard error data was received where it was not expected. This is a
+ subclass of L{IOError} to preserve backward compatibility with the previous
+ error behavior of L{getProcessOutput}.
+
+ @ivar processEnded: A L{Deferred} which will fire when the process which
+ produced the data on stderr has ended (exited and all file descriptors
+ closed).
+ """
+
+ def __init__(self, text, processEnded):
+ IOError.__init__(self, f"got stderr: {text!r}")
+ self.processEnded = processEnded
+
+
+class _BackRelay(protocol.ProcessProtocol):
+ """
+ Trivial protocol for communicating with a process and turning its output
+ into the result of a L{Deferred}.
+
+ @ivar deferred: A L{Deferred} which will be called back with all of stdout
+ and, if C{errortoo} is true, all of stderr as well (mixed together in
+ one string). If C{errortoo} is false and any bytes are received over
+ stderr, this will fire with an L{_UnexpectedErrorOutput} instance and
+ the attribute will be set to L{None}.
+
+ @ivar onProcessEnded: If C{errortoo} is false and bytes are received over
+ stderr, this attribute will refer to a L{Deferred} which will be called
+ back when the process ends. This C{Deferred} is also associated with
+ the L{_UnexpectedErrorOutput} which C{deferred} fires with earlier in
+ this case so that users can determine when the process has actually
+ ended, in addition to knowing when bytes have been received via stderr.
+ """
+
+ def __init__(self, deferred, errortoo=0):
+ self.deferred = deferred
+ self.s = BytesIO()
+ if errortoo:
+ self.errReceived = self.errReceivedIsGood
+ else:
+ self.errReceived = self.errReceivedIsBad
+
+ def errReceivedIsBad(self, text):
+ if self.deferred is not None:
+ self.onProcessEnded = defer.Deferred()
+ err = _UnexpectedErrorOutput(text, self.onProcessEnded)
+ self.deferred.errback(failure.Failure(err))
+ self.deferred = None
+ self.transport.loseConnection()
+
+ def errReceivedIsGood(self, text):
+ self.s.write(text)
+
+ def outReceived(self, text):
+ self.s.write(text)
+
+ def processEnded(self, reason):
+ if self.deferred is not None:
+ self.deferred.callback(self.s.getvalue())
+ elif self.onProcessEnded is not None:
+ self.onProcessEnded.errback(reason)
+
+
+def getProcessOutput(executable, args=(), env={}, path=None, reactor=None, errortoo=0):
+ """
+ Spawn a process and return its output as a deferred returning a L{bytes}.
+
+ @param executable: The file name to run and get the output of - the
+ full path should be used.
+
+ @param args: the command line arguments to pass to the process; a
+ sequence of strings. The first string should B{NOT} be the
+ executable's name.
+
+ @param env: the environment variables to pass to the process; a
+ dictionary of strings.
+
+ @param path: the path to run the subprocess in - defaults to the
+ current directory.
+
+ @param reactor: the reactor to use - defaults to the default reactor
+
+ @param errortoo: If true, include stderr in the result. If false, if
+ stderr is received the returned L{Deferred} will errback with an
+ L{IOError} instance with a C{processEnded} attribute. The
+ C{processEnded} attribute refers to a L{Deferred} which fires when the
+ executed process ends.
+ """
+ return _callProtocolWithDeferred(
+ lambda d: _BackRelay(d, errortoo=errortoo), executable, args, env, path, reactor
+ )
+
+
+class _ValueGetter(protocol.ProcessProtocol):
+ def __init__(self, deferred):
+ self.deferred = deferred
+
+ def processEnded(self, reason):
+ self.deferred.callback(reason.value.exitCode)
+
+
+def getProcessValue(executable, args=(), env={}, path=None, reactor=None):
+ """Spawn a process and return its exit code as a Deferred."""
+ return _callProtocolWithDeferred(_ValueGetter, executable, args, env, path, reactor)
+
+
+class _EverythingGetter(protocol.ProcessProtocol):
+ def __init__(self, deferred, stdinBytes=None):
+ self.deferred = deferred
+ self.outBuf = BytesIO()
+ self.errBuf = BytesIO()
+ self.outReceived = self.outBuf.write
+ self.errReceived = self.errBuf.write
+ self.stdinBytes = stdinBytes
+
+ def connectionMade(self):
+ if self.stdinBytes is not None:
+ self.transport.writeToChild(0, self.stdinBytes)
+ # The only compelling reason not to _always_ close stdin here is
+ # backwards compatibility.
+ self.transport.closeStdin()
+
+ def processEnded(self, reason):
+ out = self.outBuf.getvalue()
+ err = self.errBuf.getvalue()
+ e = reason.value
+ code = e.exitCode
+ if e.signal:
+ self.deferred.errback((out, err, e.signal))
+ else:
+ self.deferred.callback((out, err, code))
+
+
+def getProcessOutputAndValue(
+ executable, args=(), env={}, path=None, reactor=None, stdinBytes=None
+):
+ """Spawn a process and returns a Deferred that will be called back with
+ its output (from stdout and stderr) and it's exit code as (out, err, code)
+ If a signal is raised, the Deferred will errback with the stdout and
+ stderr up to that point, along with the signal, as (out, err, signalNum)
+ """
+ return _callProtocolWithDeferred(
+ _EverythingGetter,
+ executable,
+ args,
+ env,
+ path,
+ reactor,
+ protoArgs=(stdinBytes,),
+ )
+
+
+def _resetWarningFilters(passthrough, addedFilters):
+ for f in addedFilters:
+ try:
+ warnings.filters.remove(f)
+ except ValueError:
+ pass
+ return passthrough
+
+
+def runWithWarningsSuppressed(suppressedWarnings, f, *a, **kw):
+ """
+ Run the function I{f}, but with some warnings suppressed.
+
+ This calls L{warnings.filterwarnings} to add warning filters before
+ invoking I{f}. If I{f} returns a L{Deferred} then the added filters are
+ removed once the deferred fires. Otherwise they are removed immediately.
+
+ Note that the list of warning filters is a process-wide resource, so
+ calling this function will affect all threads.
+
+ @param suppressedWarnings:
+ A list of arguments to pass to L{warnings.filterwarnings}, a sequence
+ of (args, kwargs) 2-tuples.
+
+ @param f: A callable, which may return a L{Deferred}.
+
+ @param a: Positional arguments passed to I{f}
+
+ @param kw: Keyword arguments passed to I{f}
+
+ @return: The result of C{f(*a, **kw)}
+
+ @seealso: L{twisted.python.util.runWithWarningsSuppressed}
+ functions similarly, but doesn't handled L{Deferred}s.
+ """
+ for args, kwargs in suppressedWarnings:
+ warnings.filterwarnings(*args, **kwargs)
+ addedFilters = warnings.filters[: len(suppressedWarnings)]
+ try:
+ result = f(*a, **kw)
+ except BaseException:
+ exc_info = sys.exc_info()
+ _resetWarningFilters(None, addedFilters)
+ raise exc_info[1].with_traceback(exc_info[2])
+ else:
+ if isinstance(result, defer.Deferred):
+ result.addBoth(_resetWarningFilters, addedFilters)
+ else:
+ _resetWarningFilters(None, addedFilters)
+ return result
+
+
+def suppressWarnings(f, *suppressedWarnings):
+ """
+ Wrap C{f} in a callable which suppresses the indicated warnings before
+ invoking C{f} and unsuppresses them afterwards. If f returns a Deferred,
+ warnings will remain suppressed until the Deferred fires.
+ """
+
+ @wraps(f)
+ def warningSuppressingWrapper(*a, **kw):
+ return runWithWarningsSuppressed(suppressedWarnings, f, *a, **kw)
+
+ return warningSuppressingWrapper
+
+
+__all__ = [
+ "runWithWarningsSuppressed",
+ "suppressWarnings",
+ "getProcessOutput",
+ "getProcessValue",
+ "getProcessOutputAndValue",
+]
diff --git a/contrib/python/Twisted/py3/twisted/internet/win32eventreactor.py b/contrib/python/Twisted/py3/twisted/internet/win32eventreactor.py
new file mode 100644
index 0000000000..0e96012ea5
--- /dev/null
+++ b/contrib/python/Twisted/py3/twisted/internet/win32eventreactor.py
@@ -0,0 +1,425 @@
+# Copyright (c) Twisted Matrix Laboratories.
+# See LICENSE for details.
+
+
+"""
+A win32event based implementation of the Twisted main loop.
+
+This requires pywin32 (formerly win32all) or ActivePython to be installed.
+
+To install the event loop (and you should do this before any connections,
+listeners or connectors are added)::
+
+ from twisted.internet import win32eventreactor
+ win32eventreactor.install()
+
+LIMITATIONS:
+ 1. WaitForMultipleObjects and thus the event loop can only handle 64 objects.
+ 2. Process running has some problems (see L{twisted.internet.process} docstring).
+
+
+TODO:
+ 1. Event loop handling of writes is *very* problematic (this is causing failed tests).
+ Switch to doing it the correct way, whatever that means (see below).
+ 2. Replace icky socket loopback waker with event based waker (use dummyEvent object)
+ 3. Switch everyone to using Free Software so we don't have to deal with proprietary APIs.
+
+
+ALTERNATIVE SOLUTIONS:
+ - IIRC, sockets can only be registered once. So we switch to a structure
+ like the poll() reactor, thus allowing us to deal with write events in
+ a decent fashion. This should allow us to pass tests, but we're still
+ limited to 64 events.
+
+Or:
+
+ - Instead of doing a reactor, we make this an addon to the select reactor.
+ The WFMO event loop runs in a separate thread. This means no need to maintain
+ separate code for networking, 64 event limit doesn't apply to sockets,
+ we can run processes and other win32 stuff in default event loop. The
+ only problem is that we're stuck with the icky socket based waker.
+ Another benefit is that this could be extended to support >64 events
+ in a simpler manner than the previous solution.
+
+The 2nd solution is probably what will get implemented.
+"""
+
+import sys
+
+# System imports
+import time
+from threading import Thread
+from weakref import WeakKeyDictionary
+
+from zope.interface import implementer
+
+# Win32 imports
+from win32file import ( # type: ignore[import]
+ FD_ACCEPT,
+ FD_CLOSE,
+ FD_CONNECT,
+ FD_READ,
+ WSAEventSelect,
+)
+
+try:
+ # WSAEnumNetworkEvents was added in pywin32 215
+ from win32file import WSAEnumNetworkEvents
+except ImportError:
+ import warnings
+
+ warnings.warn(
+ "Reliable disconnection notification requires pywin32 215 or later",
+ category=UserWarning,
+ )
+
+ def WSAEnumNetworkEvents(fd, event):
+ return {FD_READ}
+
+
+import win32gui # type: ignore[import]
+from win32event import ( # type: ignore[import]
+ QS_ALLINPUT,
+ WAIT_OBJECT_0,
+ WAIT_TIMEOUT,
+ CreateEvent,
+ MsgWaitForMultipleObjects,
+)
+
+# Twisted imports
+from twisted.internet import posixbase
+from twisted.internet.interfaces import IReactorFDSet, IReactorWin32Events
+from twisted.internet.threads import blockingCallFromThread
+from twisted.python import failure, log, threadable
+
+
+@implementer(IReactorFDSet, IReactorWin32Events)
+class Win32Reactor(posixbase.PosixReactorBase):
+ """
+ Reactor that uses Win32 event APIs.
+
+ @ivar _reads: A dictionary mapping L{FileDescriptor} instances to a
+ win32 event object used to check for read events for that descriptor.
+
+ @ivar _writes: A dictionary mapping L{FileDescriptor} instances to a
+ arbitrary value. Keys in this dictionary will be given a chance to
+ write out their data.
+
+ @ivar _events: A dictionary mapping win32 event object to tuples of
+ L{FileDescriptor} instances and event masks.
+
+ @ivar _closedAndReading: Along with C{_closedAndNotReading}, keeps track of
+ descriptors which have had close notification delivered from the OS but
+ which we have not finished reading data from. MsgWaitForMultipleObjects
+ will only deliver close notification to us once, so we remember it in
+ these two dictionaries until we're ready to act on it. The OS has
+ delivered close notification for each descriptor in this dictionary, and
+ the descriptors are marked as allowed to handle read events in the
+ reactor, so they can be processed. When a descriptor is marked as not
+ allowed to handle read events in the reactor (ie, it is passed to
+ L{IReactorFDSet.removeReader}), it is moved out of this dictionary and
+ into C{_closedAndNotReading}. The descriptors are keys in this
+ dictionary. The values are arbitrary.
+ @type _closedAndReading: C{dict}
+
+ @ivar _closedAndNotReading: These descriptors have had close notification
+ delivered from the OS, but are not marked as allowed to handle read
+ events in the reactor. They are saved here to record their closed
+ state, but not processed at all. When one of these descriptors is
+ passed to L{IReactorFDSet.addReader}, it is moved out of this dictionary
+ and into C{_closedAndReading}. The descriptors are keys in this
+ dictionary. The values are arbitrary. This is a weak key dictionary so
+ that if an application tells the reactor to stop reading from a
+ descriptor and then forgets about that descriptor itself, the reactor
+ will also forget about it.
+ @type _closedAndNotReading: C{WeakKeyDictionary}
+ """
+
+ dummyEvent = CreateEvent(None, 0, 0, None)
+
+ def __init__(self):
+ self._reads = {}
+ self._writes = {}
+ self._events = {}
+ self._closedAndReading = {}
+ self._closedAndNotReading = WeakKeyDictionary()
+ posixbase.PosixReactorBase.__init__(self)
+
+ def _makeSocketEvent(self, fd, action, why):
+ """
+ Make a win32 event object for a socket.
+ """
+ event = CreateEvent(None, 0, 0, None)
+ WSAEventSelect(fd, event, why)
+ self._events[event] = (fd, action)
+ return event
+
+ def addEvent(self, event, fd, action):
+ """
+ Add a new win32 event to the event loop.
+ """
+ self._events[event] = (fd, action)
+
+ def removeEvent(self, event):
+ """
+ Remove an event.
+ """
+ del self._events[event]
+
+ def addReader(self, reader):
+ """
+ Add a socket FileDescriptor for notification of data available to read.
+ """
+ if reader not in self._reads:
+ self._reads[reader] = self._makeSocketEvent(
+ reader, "doRead", FD_READ | FD_ACCEPT | FD_CONNECT | FD_CLOSE
+ )
+ # If the reader is closed, move it over to the dictionary of reading
+ # descriptors.
+ if reader in self._closedAndNotReading:
+ self._closedAndReading[reader] = True
+ del self._closedAndNotReading[reader]
+
+ def addWriter(self, writer):
+ """
+ Add a socket FileDescriptor for notification of data available to write.
+ """
+ if writer not in self._writes:
+ self._writes[writer] = 1
+
+ def removeReader(self, reader):
+ """Remove a Selectable for notification of data available to read."""
+ if reader in self._reads:
+ del self._events[self._reads[reader]]
+ del self._reads[reader]
+
+ # If the descriptor is closed, move it out of the dictionary of
+ # reading descriptors into the dictionary of waiting descriptors.
+ if reader in self._closedAndReading:
+ self._closedAndNotReading[reader] = True
+ del self._closedAndReading[reader]
+
+ def removeWriter(self, writer):
+ """Remove a Selectable for notification of data available to write."""
+ if writer in self._writes:
+ del self._writes[writer]
+
+ def removeAll(self):
+ """
+ Remove all selectables, and return a list of them.
+ """
+ return self._removeAll(self._reads, self._writes)
+
+ def getReaders(self):
+ return list(self._reads.keys())
+
+ def getWriters(self):
+ return list(self._writes.keys())
+
+ def doWaitForMultipleEvents(self, timeout):
+ log.msg(channel="system", event="iteration", reactor=self)
+ if timeout is None:
+ timeout = 100
+
+ # Keep track of whether we run any application code before we get to the
+ # MsgWaitForMultipleObjects. If so, there's a chance it will schedule a
+ # new timed call or stop the reactor or do something else that means we
+ # shouldn't block in MsgWaitForMultipleObjects for the full timeout.
+ ranUserCode = False
+
+ # If any descriptors are trying to close, try to get them out of the way
+ # first.
+ for reader in list(self._closedAndReading.keys()):
+ ranUserCode = True
+ self._runAction("doRead", reader)
+
+ for fd in list(self._writes.keys()):
+ ranUserCode = True
+ log.callWithLogger(fd, self._runWrite, fd)
+
+ if ranUserCode:
+ # If application code *might* have scheduled an event, assume it
+ # did. If we're wrong, we'll get back here shortly anyway. If
+ # we're right, we'll be sure to handle the event (including reactor
+ # shutdown) in a timely manner.
+ timeout = 0
+
+ if not (self._events or self._writes):
+ # sleep so we don't suck up CPU time
+ time.sleep(timeout)
+ return
+
+ handles = list(self._events.keys()) or [self.dummyEvent]
+ timeout = int(timeout * 1000)
+ val = MsgWaitForMultipleObjects(handles, 0, timeout, QS_ALLINPUT)
+ if val == WAIT_TIMEOUT:
+ return
+ elif val == WAIT_OBJECT_0 + len(handles):
+ exit = win32gui.PumpWaitingMessages()
+ if exit:
+ self.callLater(0, self.stop)
+ return
+ elif val >= WAIT_OBJECT_0 and val < WAIT_OBJECT_0 + len(handles):
+ event = handles[val - WAIT_OBJECT_0]
+ fd, action = self._events[event]
+
+ if fd in self._reads:
+ # Before anything, make sure it's still a valid file descriptor.
+ fileno = fd.fileno()
+ if fileno == -1:
+ self._disconnectSelectable(fd, posixbase._NO_FILEDESC, False)
+ return
+
+ # Since it's a socket (not another arbitrary event added via
+ # addEvent) and we asked for FD_READ | FD_CLOSE, check to see if
+ # we actually got FD_CLOSE. This needs a special check because
+ # it only gets delivered once. If we miss it, it's gone forever
+ # and we'll never know that the connection is closed.
+ events = WSAEnumNetworkEvents(fileno, event)
+ if FD_CLOSE in events:
+ self._closedAndReading[fd] = True
+ log.callWithLogger(fd, self._runAction, action, fd)
+
+ def _runWrite(self, fd):
+ closed = 0
+ try:
+ closed = fd.doWrite()
+ except BaseException:
+ closed = sys.exc_info()[1]
+ log.deferr()
+
+ if closed:
+ self.removeReader(fd)
+ self.removeWriter(fd)
+ try:
+ fd.connectionLost(failure.Failure(closed))
+ except BaseException:
+ log.deferr()
+ elif closed is None:
+ return 1
+
+ def _runAction(self, action, fd):
+ try:
+ closed = getattr(fd, action)()
+ except BaseException:
+ closed = sys.exc_info()[1]
+ log.deferr()
+ if closed:
+ self._disconnectSelectable(fd, closed, action == "doRead")
+
+ doIteration = doWaitForMultipleEvents
+
+
+class _ThreadFDWrapper:
+ """
+ This wraps an event handler and translates notification in the helper
+ L{Win32Reactor} thread into a notification in the primary reactor thread.
+
+ @ivar _reactor: The primary reactor, the one to which event notification
+ will be sent.
+
+ @ivar _fd: The L{FileDescriptor} to which the event will be dispatched.
+
+ @ivar _action: A C{str} giving the method of C{_fd} which handles the event.
+
+ @ivar _logPrefix: The pre-fetched log prefix string for C{_fd}, so that
+ C{_fd.logPrefix} does not need to be called in a non-main thread.
+ """
+
+ def __init__(self, reactor, fd, action, logPrefix):
+ self._reactor = reactor
+ self._fd = fd
+ self._action = action
+ self._logPrefix = logPrefix
+
+ def logPrefix(self):
+ """
+ Return the original handler's log prefix, as it was given to
+ C{__init__}.
+ """
+ return self._logPrefix
+
+ def _execute(self):
+ """
+ Callback fired when the associated event is set. Run the C{action}
+ callback on the wrapped descriptor in the main reactor thread and raise
+ or return whatever it raises or returns to cause this event handler to
+ be removed from C{self._reactor} if appropriate.
+ """
+ return blockingCallFromThread(
+ self._reactor, lambda: getattr(self._fd, self._action)()
+ )
+
+ def connectionLost(self, reason):
+ """
+ Pass through to the wrapped descriptor, but in the main reactor thread
+ instead of the helper C{Win32Reactor} thread.
+ """
+ self._reactor.callFromThread(self._fd.connectionLost, reason)
+
+
+@implementer(IReactorWin32Events)
+class _ThreadedWin32EventsMixin:
+ """
+ This mixin implements L{IReactorWin32Events} for another reactor by running
+ a L{Win32Reactor} in a separate thread and dispatching work to it.
+
+ @ivar _reactor: The L{Win32Reactor} running in the other thread. This is
+ L{None} until it is actually needed.
+
+ @ivar _reactorThread: The L{threading.Thread} which is running the
+ L{Win32Reactor}. This is L{None} until it is actually needed.
+ """
+
+ _reactor = None
+ _reactorThread = None
+
+ def _unmakeHelperReactor(self):
+ """
+ Stop and discard the reactor started by C{_makeHelperReactor}.
+ """
+ self._reactor.callFromThread(self._reactor.stop)
+ self._reactor = None
+
+ def _makeHelperReactor(self):
+ """
+ Create and (in a new thread) start a L{Win32Reactor} instance to use for
+ the implementation of L{IReactorWin32Events}.
+ """
+ self._reactor = Win32Reactor()
+ # This is a helper reactor, it is not the global reactor and its thread
+ # is not "the" I/O thread. Prevent it from registering it as such.
+ self._reactor._registerAsIOThread = False
+ self._reactorThread = Thread(target=self._reactor.run, args=(False,))
+ self.addSystemEventTrigger("after", "shutdown", self._unmakeHelperReactor)
+ self._reactorThread.start()
+
+ def addEvent(self, event, fd, action):
+ """
+ @see: L{IReactorWin32Events}
+ """
+ if self._reactor is None:
+ self._makeHelperReactor()
+ self._reactor.callFromThread(
+ self._reactor.addEvent,
+ event,
+ _ThreadFDWrapper(self, fd, action, fd.logPrefix()),
+ "_execute",
+ )
+
+ def removeEvent(self, event):
+ """
+ @see: L{IReactorWin32Events}
+ """
+ self._reactor.callFromThread(self._reactor.removeEvent, event)
+
+
+def install():
+ threadable.init(1)
+ r = Win32Reactor()
+ from . import main
+
+ main.installReactor(r)
+
+
+__all__ = ["Win32Reactor", "install"]
diff --git a/contrib/python/Twisted/py3/twisted/internet/wxreactor.py b/contrib/python/Twisted/py3/twisted/internet/wxreactor.py
new file mode 100644
index 0000000000..b988724dfa
--- /dev/null
+++ b/contrib/python/Twisted/py3/twisted/internet/wxreactor.py
@@ -0,0 +1,188 @@
+# Copyright (c) Twisted Matrix Laboratories.
+# See LICENSE for details.
+
+"""
+This module provides wxPython event loop support for Twisted.
+
+In order to use this support, simply do the following::
+
+ | from twisted.internet import wxreactor
+ | wxreactor.install()
+
+Then, when your root wxApp has been created::
+
+ | from twisted.internet import reactor
+ | reactor.registerWxApp(yourApp)
+ | reactor.run()
+
+Then use twisted.internet APIs as usual. Stop the event loop using
+reactor.stop(), not yourApp.ExitMainLoop().
+
+IMPORTANT: tests will fail when run under this reactor. This is
+expected and probably does not reflect on the reactor's ability to run
+real applications.
+"""
+
+from queue import Empty, Queue
+
+try:
+ from wx import ( # type: ignore[import]
+ CallAfter as wxCallAfter,
+ PySimpleApp as wxPySimpleApp,
+ Timer as wxTimer,
+ )
+except ImportError:
+ # older version of wxPython:
+ from wxPython.wx import wxPySimpleApp, wxCallAfter, wxTimer # type: ignore[import]
+
+from twisted.internet import _threadedselect
+from twisted.python import log, runtime
+
+
+class ProcessEventsTimer(wxTimer):
+ """
+ Timer that tells wx to process pending events.
+
+ This is necessary on macOS, probably due to a bug in wx, if we want
+ wxCallAfters to be handled when modal dialogs, menus, etc. are open.
+ """
+
+ def __init__(self, wxapp):
+ wxTimer.__init__(self)
+ self.wxapp = wxapp
+
+ def Notify(self):
+ """
+ Called repeatedly by wx event loop.
+ """
+ self.wxapp.ProcessPendingEvents()
+
+
+class WxReactor(_threadedselect.ThreadedSelectReactor):
+ """
+ wxPython reactor.
+
+ wxPython drives the event loop, select() runs in a thread.
+ """
+
+ _stopping = False
+
+ def registerWxApp(self, wxapp):
+ """
+ Register wxApp instance with the reactor.
+ """
+ self.wxapp = wxapp
+
+ def _installSignalHandlersAgain(self):
+ """
+ wx sometimes removes our own signal handlers, so re-add them.
+ """
+ try:
+ # make _handleSignals happy:
+ import signal
+
+ signal.signal(signal.SIGINT, signal.default_int_handler)
+ except ImportError:
+ return
+ self._signals.install()
+
+ def stop(self):
+ """
+ Stop the reactor.
+ """
+ if self._stopping:
+ return
+ self._stopping = True
+ _threadedselect.ThreadedSelectReactor.stop(self)
+
+ def _runInMainThread(self, f):
+ """
+ Schedule function to run in main wx/Twisted thread.
+
+ Called by the select() thread.
+ """
+ if hasattr(self, "wxapp"):
+ wxCallAfter(f)
+ else:
+ # wx shutdown but twisted hasn't
+ self._postQueue.put(f)
+
+ def _stopWx(self):
+ """
+ Stop the wx event loop if it hasn't already been stopped.
+
+ Called during Twisted event loop shutdown.
+ """
+ if hasattr(self, "wxapp"):
+ self.wxapp.ExitMainLoop()
+
+ def run(self, installSignalHandlers=True):
+ """
+ Start the reactor.
+ """
+ self._postQueue = Queue()
+ if not hasattr(self, "wxapp"):
+ log.msg(
+ "registerWxApp() was not called on reactor, "
+ "registering my own wxApp instance."
+ )
+ self.registerWxApp(wxPySimpleApp())
+
+ # start select() thread:
+ self.interleave(
+ self._runInMainThread, installSignalHandlers=installSignalHandlers
+ )
+ if installSignalHandlers:
+ self.callLater(0, self._installSignalHandlersAgain)
+
+ # add cleanup events:
+ self.addSystemEventTrigger("after", "shutdown", self._stopWx)
+ self.addSystemEventTrigger(
+ "after", "shutdown", lambda: self._postQueue.put(None)
+ )
+
+ # On macOS, work around wx bug by starting timer to ensure
+ # wxCallAfter calls are always processed. We don't wake up as
+ # often as we could since that uses too much CPU.
+ if runtime.platform.isMacOSX():
+ t = ProcessEventsTimer(self.wxapp)
+ t.Start(2) # wake up every 2ms
+
+ self.wxapp.MainLoop()
+ wxapp = self.wxapp
+ del self.wxapp
+
+ if not self._stopping:
+ # wx event loop exited without reactor.stop() being
+ # called. At this point events from select() thread will
+ # be added to _postQueue, but some may still be waiting
+ # unprocessed in wx, thus the ProcessPendingEvents()
+ # below.
+ self.stop()
+ wxapp.ProcessPendingEvents() # deal with any queued wxCallAfters
+ while 1:
+ try:
+ f = self._postQueue.get(timeout=0.01)
+ except Empty:
+ continue
+ else:
+ if f is None:
+ break
+ try:
+ f()
+ except BaseException:
+ log.err()
+
+
+def install():
+ """
+ Configure the twisted mainloop to be run inside the wxPython mainloop.
+ """
+ reactor = WxReactor()
+ from twisted.internet.main import installReactor
+
+ installReactor(reactor)
+ return reactor
+
+
+__all__ = ["install"]
diff --git a/contrib/python/Twisted/py3/twisted/internet/wxsupport.py b/contrib/python/Twisted/py3/twisted/internet/wxsupport.py
new file mode 100644
index 0000000000..a9fab83d37
--- /dev/null
+++ b/contrib/python/Twisted/py3/twisted/internet/wxsupport.py
@@ -0,0 +1,57 @@
+# Copyright (c) Twisted Matrix Laboratories.
+# See LICENSE for details.
+
+#
+"""Old method of wxPython support for Twisted.
+
+twisted.internet.wxreactor is probably a better choice.
+
+To use::
+
+ | # given a wxApp instance called myWxAppInstance:
+ | from twisted.internet import wxsupport
+ | wxsupport.install(myWxAppInstance)
+
+Use Twisted's APIs for running and stopping the event loop, don't use
+wxPython's methods.
+
+On Windows the Twisted event loop might block when dialogs are open
+or menus are selected.
+
+Maintainer: Itamar Shtull-Trauring
+"""
+
+import warnings
+
+warnings.warn("wxsupport is not fully functional on Windows, wxreactor is better.")
+
+from twisted.internet import reactor
+
+
+class wxRunner:
+ """Make sure GUI events are handled."""
+
+ def __init__(self, app):
+ self.app = app
+
+ def run(self):
+ """
+ Execute pending WX events followed by WX idle events and
+ reschedule.
+ """
+ # run wx events
+ while self.app.Pending():
+ self.app.Dispatch()
+
+ # run wx idle events
+ self.app.ProcessIdle()
+ reactor.callLater(0.02, self.run)
+
+
+def install(app):
+ """Install the wxPython support, given a wxApp instance"""
+ runner = wxRunner(app)
+ reactor.callLater(0.02, runner.run)
+
+
+__all__ = ["install"]