diff options
author | shmel1k <shmel1k@ydb.tech> | 2023-11-26 18:16:14 +0300 |
---|---|---|
committer | shmel1k <shmel1k@ydb.tech> | 2023-11-26 18:43:30 +0300 |
commit | b8cf9e88f4c5c64d9406af533d8948deb050d695 (patch) | |
tree | 218eb61fb3c3b96ec08b4d8cdfef383104a87d63 /contrib/python/Twisted/py3/twisted/_threads/_pool.py | |
parent | 523f645a83a0ec97a0332dbc3863bb354c92a328 (diff) | |
download | ydb-b8cf9e88f4c5c64d9406af533d8948deb050d695.tar.gz |
add kikimr_configure
Diffstat (limited to 'contrib/python/Twisted/py3/twisted/_threads/_pool.py')
-rw-r--r-- | contrib/python/Twisted/py3/twisted/_threads/_pool.py | 73 |
1 files changed, 73 insertions, 0 deletions
diff --git a/contrib/python/Twisted/py3/twisted/_threads/_pool.py b/contrib/python/Twisted/py3/twisted/_threads/_pool.py new file mode 100644 index 0000000000..99c055d240 --- /dev/null +++ b/contrib/python/Twisted/py3/twisted/_threads/_pool.py @@ -0,0 +1,73 @@ +# -*- test-case-name: twisted._threads.test -*- +# Copyright (c) Twisted Matrix Laboratories. +# See LICENSE for details. + +""" +Top level thread pool interface, used to implement +L{twisted.python.threadpool}. +""" + + +from queue import Queue +from threading import Lock, Thread, local as LocalStorage +from typing import Callable, Optional + +from typing_extensions import Protocol + +from twisted.python.log import err +from ._ithreads import IWorker +from ._team import Team +from ._threadworker import LockWorker, ThreadWorker + + +class _ThreadFactory(Protocol): + def __call__(self, *, target: Callable[..., object]) -> Thread: + ... + + +def pool( + currentLimit: Callable[[], int], threadFactory: _ThreadFactory = Thread +) -> Team: + """ + Construct a L{Team} that spawns threads as a thread pool, with the given + limiting function. + + @note: Future maintainers: while the public API for the eventual move to + twisted.threads should look I{something} like this, and while this + function is necessary to implement the API described by + L{twisted.python.threadpool}, I am starting to think the idea of a hard + upper limit on threadpool size is just bad (turning memory performance + issues into correctness issues well before we run into memory + pressure), and instead we should build something with reactor + integration for slowly releasing idle threads when they're not needed + and I{rate} limiting the creation of new threads rather than just + hard-capping it. + + @param currentLimit: a callable that returns the current limit on the + number of workers that the returned L{Team} should create; if it + already has more workers than that value, no new workers will be + created. + @type currentLimit: 0-argument callable returning L{int} + + @param threadFactory: Factory that, when given a C{target} keyword argument, + returns a L{threading.Thread} that will run that target. + @type threadFactory: callable returning a L{threading.Thread} + + @return: a new L{Team}. + """ + + def startThread(target: Callable[..., object]) -> None: + return threadFactory(target=target).start() + + def limitedWorkerCreator() -> Optional[IWorker]: + stats = team.statistics() + if stats.busyWorkerCount + stats.idleWorkerCount >= currentLimit(): + return None + return ThreadWorker(startThread, Queue()) + + team = Team( + coordinator=LockWorker(Lock(), LocalStorage()), + createWorker=limitedWorkerCreator, + logException=err, + ) + return team |