aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/python/Twisted/py2/twisted/_threads/_pool.py
diff options
context:
space:
mode:
authorshmel1k <shmel1k@ydb.tech>2023-11-26 18:16:14 +0300
committershmel1k <shmel1k@ydb.tech>2023-11-26 18:43:30 +0300
commitb8cf9e88f4c5c64d9406af533d8948deb050d695 (patch)
tree218eb61fb3c3b96ec08b4d8cdfef383104a87d63 /contrib/python/Twisted/py2/twisted/_threads/_pool.py
parent523f645a83a0ec97a0332dbc3863bb354c92a328 (diff)
downloadydb-b8cf9e88f4c5c64d9406af533d8948deb050d695.tar.gz
add kikimr_configure
Diffstat (limited to 'contrib/python/Twisted/py2/twisted/_threads/_pool.py')
-rw-r--r--contrib/python/Twisted/py2/twisted/_threads/_pool.py69
1 files changed, 69 insertions, 0 deletions
diff --git a/contrib/python/Twisted/py2/twisted/_threads/_pool.py b/contrib/python/Twisted/py2/twisted/_threads/_pool.py
new file mode 100644
index 0000000000..043c43ecf8
--- /dev/null
+++ b/contrib/python/Twisted/py2/twisted/_threads/_pool.py
@@ -0,0 +1,69 @@
+# -*- test-case-name: twisted._threads.test -*-
+# Copyright (c) Twisted Matrix Laboratories.
+# See LICENSE for details.
+
+"""
+Top level thread pool interface, used to implement
+L{twisted.python.threadpool}.
+"""
+
+from __future__ import absolute_import, division, print_function
+
+from threading import Thread, Lock, local as LocalStorage
+try:
+ from Queue import Queue
+except ImportError:
+ from queue import Queue
+
+from twisted.python.log import err
+
+from ._threadworker import LockWorker
+from ._team import Team
+from ._threadworker import ThreadWorker
+
+
+def pool(currentLimit, threadFactory=Thread):
+ """
+ Construct a L{Team} that spawns threads as a thread pool, with the given
+ limiting function.
+
+ @note: Future maintainers: while the public API for the eventual move to
+ twisted.threads should look I{something} like this, and while this
+ function is necessary to implement the API described by
+ L{twisted.python.threadpool}, I am starting to think the idea of a hard
+ upper limit on threadpool size is just bad (turning memory performance
+ issues into correctness issues well before we run into memory
+ pressure), and instead we should build something with reactor
+ integration for slowly releasing idle threads when they're not needed
+ and I{rate} limiting the creation of new threads rather than just
+ hard-capping it.
+
+ @param currentLimit: a callable that returns the current limit on the
+ number of workers that the returned L{Team} should create; if it
+ already has more workers than that value, no new workers will be
+ created.
+ @type currentLimit: 0-argument callable returning L{int}
+
+ @param reactor: If passed, the L{IReactorFromThreads} / L{IReactorCore} to
+ be used to coordinate actions on the L{Team} itself. Otherwise, a
+ L{LockWorker} will be used.
+
+ @return: a new L{Team}.
+ """
+
+ def startThread(target):
+ return threadFactory(target=target).start()
+
+ def limitedWorkerCreator():
+ stats = team.statistics()
+ if stats.busyWorkerCount + stats.idleWorkerCount >= currentLimit():
+ return None
+ return ThreadWorker(startThread, Queue())
+
+ team = Team(coordinator=LockWorker(Lock(), LocalStorage()),
+ createWorker=limitedWorkerCreator,
+ logException=err)
+ return team
+
+
+