aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/tools/python3/src/Python/ceval_gil.h
diff options
context:
space:
mode:
authorshadchin <shadchin@yandex-team.ru>2022-02-10 16:44:30 +0300
committerDaniil Cherednik <dcherednik@yandex-team.ru>2022-02-10 16:44:30 +0300
commit2598ef1d0aee359b4b6d5fdd1758916d5907d04f (patch)
tree012bb94d777798f1f56ac1cec429509766d05181 /contrib/tools/python3/src/Python/ceval_gil.h
parent6751af0b0c1b952fede40b19b71da8025b5d8bcf (diff)
downloadydb-2598ef1d0aee359b4b6d5fdd1758916d5907d04f.tar.gz
Restoring authorship annotation for <shadchin@yandex-team.ru>. Commit 1 of 2.
Diffstat (limited to 'contrib/tools/python3/src/Python/ceval_gil.h')
-rw-r--r--contrib/tools/python3/src/Python/ceval_gil.h320
1 files changed, 160 insertions, 160 deletions
diff --git a/contrib/tools/python3/src/Python/ceval_gil.h b/contrib/tools/python3/src/Python/ceval_gil.h
index 3510675a69..3624d9462f 100644
--- a/contrib/tools/python3/src/Python/ceval_gil.h
+++ b/contrib/tools/python3/src/Python/ceval_gil.h
@@ -5,7 +5,7 @@
#include <stdlib.h>
#include <errno.h>
-#include "pycore_atomic.h"
+#include "pycore_atomic.h"
/*
@@ -90,242 +90,242 @@
#define DEFAULT_INTERVAL 5000
-static void _gil_initialize(struct _gil_runtime_state *gil)
+static void _gil_initialize(struct _gil_runtime_state *gil)
{
_Py_atomic_int uninitialized = {-1};
- gil->locked = uninitialized;
- gil->interval = DEFAULT_INTERVAL;
+ gil->locked = uninitialized;
+ gil->interval = DEFAULT_INTERVAL;
}
-static int gil_created(struct _gil_runtime_state *gil)
+static int gil_created(struct _gil_runtime_state *gil)
{
- return (_Py_atomic_load_explicit(&gil->locked, _Py_memory_order_acquire) >= 0);
+ return (_Py_atomic_load_explicit(&gil->locked, _Py_memory_order_acquire) >= 0);
}
-static void create_gil(struct _gil_runtime_state *gil)
+static void create_gil(struct _gil_runtime_state *gil)
{
- MUTEX_INIT(gil->mutex);
+ MUTEX_INIT(gil->mutex);
#ifdef FORCE_SWITCHING
- MUTEX_INIT(gil->switch_mutex);
+ MUTEX_INIT(gil->switch_mutex);
#endif
- COND_INIT(gil->cond);
+ COND_INIT(gil->cond);
#ifdef FORCE_SWITCHING
- COND_INIT(gil->switch_cond);
+ COND_INIT(gil->switch_cond);
#endif
- _Py_atomic_store_relaxed(&gil->last_holder, 0);
- _Py_ANNOTATE_RWLOCK_CREATE(&gil->locked);
- _Py_atomic_store_explicit(&gil->locked, 0, _Py_memory_order_release);
+ _Py_atomic_store_relaxed(&gil->last_holder, 0);
+ _Py_ANNOTATE_RWLOCK_CREATE(&gil->locked);
+ _Py_atomic_store_explicit(&gil->locked, 0, _Py_memory_order_release);
}
-static void destroy_gil(struct _gil_runtime_state *gil)
+static void destroy_gil(struct _gil_runtime_state *gil)
{
/* some pthread-like implementations tie the mutex to the cond
* and must have the cond destroyed first.
*/
- COND_FINI(gil->cond);
- MUTEX_FINI(gil->mutex);
+ COND_FINI(gil->cond);
+ MUTEX_FINI(gil->mutex);
#ifdef FORCE_SWITCHING
- COND_FINI(gil->switch_cond);
- MUTEX_FINI(gil->switch_mutex);
+ COND_FINI(gil->switch_cond);
+ MUTEX_FINI(gil->switch_mutex);
#endif
- _Py_atomic_store_explicit(&gil->locked, -1,
+ _Py_atomic_store_explicit(&gil->locked, -1,
_Py_memory_order_release);
- _Py_ANNOTATE_RWLOCK_DESTROY(&gil->locked);
+ _Py_ANNOTATE_RWLOCK_DESTROY(&gil->locked);
}
-static void recreate_gil(struct _gil_runtime_state *gil)
+static void recreate_gil(struct _gil_runtime_state *gil)
{
- _Py_ANNOTATE_RWLOCK_DESTROY(&gil->locked);
+ _Py_ANNOTATE_RWLOCK_DESTROY(&gil->locked);
/* XXX should we destroy the old OS resources here? */
- create_gil(gil);
+ create_gil(gil);
}
-static void
-drop_gil(struct _ceval_runtime_state *ceval, struct _ceval_state *ceval2,
- PyThreadState *tstate)
+static void
+drop_gil(struct _ceval_runtime_state *ceval, struct _ceval_state *ceval2,
+ PyThreadState *tstate)
{
- struct _gil_runtime_state *gil = &ceval->gil;
- if (!_Py_atomic_load_relaxed(&gil->locked)) {
+ struct _gil_runtime_state *gil = &ceval->gil;
+ if (!_Py_atomic_load_relaxed(&gil->locked)) {
Py_FatalError("drop_gil: GIL is not locked");
- }
-
+ }
+
/* tstate is allowed to be NULL (early interpreter init) */
if (tstate != NULL) {
/* Sub-interpreter support: threads might have been switched
under our feet using PyThreadState_Swap(). Fix the GIL last
holder variable so that our heuristics work. */
- _Py_atomic_store_relaxed(&gil->last_holder, (uintptr_t)tstate);
+ _Py_atomic_store_relaxed(&gil->last_holder, (uintptr_t)tstate);
}
- MUTEX_LOCK(gil->mutex);
- _Py_ANNOTATE_RWLOCK_RELEASED(&gil->locked, /*is_write=*/1);
- _Py_atomic_store_relaxed(&gil->locked, 0);
- COND_SIGNAL(gil->cond);
- MUTEX_UNLOCK(gil->mutex);
+ MUTEX_LOCK(gil->mutex);
+ _Py_ANNOTATE_RWLOCK_RELEASED(&gil->locked, /*is_write=*/1);
+ _Py_atomic_store_relaxed(&gil->locked, 0);
+ COND_SIGNAL(gil->cond);
+ MUTEX_UNLOCK(gil->mutex);
#ifdef FORCE_SWITCHING
- if (_Py_atomic_load_relaxed(&ceval2->gil_drop_request) && tstate != NULL) {
- MUTEX_LOCK(gil->switch_mutex);
+ if (_Py_atomic_load_relaxed(&ceval2->gil_drop_request) && tstate != NULL) {
+ MUTEX_LOCK(gil->switch_mutex);
/* Not switched yet => wait */
- if (((PyThreadState*)_Py_atomic_load_relaxed(&gil->last_holder)) == tstate)
+ if (((PyThreadState*)_Py_atomic_load_relaxed(&gil->last_holder)) == tstate)
{
- assert(is_tstate_valid(tstate));
- RESET_GIL_DROP_REQUEST(tstate->interp);
+ assert(is_tstate_valid(tstate));
+ RESET_GIL_DROP_REQUEST(tstate->interp);
/* NOTE: if COND_WAIT does not atomically start waiting when
releasing the mutex, another thread can run through, take
the GIL and drop it again, and reset the condition
before we even had a chance to wait for it. */
- COND_WAIT(gil->switch_cond, gil->switch_mutex);
- }
- MUTEX_UNLOCK(gil->switch_mutex);
+ COND_WAIT(gil->switch_cond, gil->switch_mutex);
+ }
+ MUTEX_UNLOCK(gil->switch_mutex);
}
#endif
}
-
-/* Check if a Python thread must exit immediately, rather than taking the GIL
- if Py_Finalize() has been called.
-
- When this function is called by a daemon thread after Py_Finalize() has been
- called, the GIL does no longer exist.
-
- tstate must be non-NULL. */
-static inline int
-tstate_must_exit(PyThreadState *tstate)
-{
- /* bpo-39877: Access _PyRuntime directly rather than using
- tstate->interp->runtime to support calls from Python daemon threads.
- After Py_Finalize() has been called, tstate can be a dangling pointer:
- point to PyThreadState freed memory. */
- PyThreadState *finalizing = _PyRuntimeState_GetFinalizing(&_PyRuntime);
- return (finalizing != NULL && finalizing != tstate);
-}
-
-
-/* Take the GIL.
-
- The function saves errno at entry and restores its value at exit.
-
- tstate must be non-NULL. */
-static void
-take_gil(PyThreadState *tstate)
+
+/* Check if a Python thread must exit immediately, rather than taking the GIL
+ if Py_Finalize() has been called.
+
+ When this function is called by a daemon thread after Py_Finalize() has been
+ called, the GIL does no longer exist.
+
+ tstate must be non-NULL. */
+static inline int
+tstate_must_exit(PyThreadState *tstate)
+{
+ /* bpo-39877: Access _PyRuntime directly rather than using
+ tstate->interp->runtime to support calls from Python daemon threads.
+ After Py_Finalize() has been called, tstate can be a dangling pointer:
+ point to PyThreadState freed memory. */
+ PyThreadState *finalizing = _PyRuntimeState_GetFinalizing(&_PyRuntime);
+ return (finalizing != NULL && finalizing != tstate);
+}
+
+
+/* Take the GIL.
+
+ The function saves errno at entry and restores its value at exit.
+
+ tstate must be non-NULL. */
+static void
+take_gil(PyThreadState *tstate)
{
- int err = errno;
-
- assert(tstate != NULL);
-
- if (tstate_must_exit(tstate)) {
- /* bpo-39877: If Py_Finalize() has been called and tstate is not the
- thread which called Py_Finalize(), exit immediately the thread.
-
- This code path can be reached by a daemon thread after Py_Finalize()
- completes. In this case, tstate is a dangling pointer: points to
- PyThreadState freed memory. */
- PyThread_exit_thread();
- }
-
- assert(is_tstate_valid(tstate));
- PyInterpreterState *interp = tstate->interp;
- struct _ceval_runtime_state *ceval = &interp->runtime->ceval;
- struct _ceval_state *ceval2 = &interp->ceval;
- struct _gil_runtime_state *gil = &ceval->gil;
-
- /* Check that _PyEval_InitThreads() was called to create the lock */
- assert(gil_created(gil));
-
- MUTEX_LOCK(gil->mutex);
-
- if (!_Py_atomic_load_relaxed(&gil->locked)) {
+ int err = errno;
+
+ assert(tstate != NULL);
+
+ if (tstate_must_exit(tstate)) {
+ /* bpo-39877: If Py_Finalize() has been called and tstate is not the
+ thread which called Py_Finalize(), exit immediately the thread.
+
+ This code path can be reached by a daemon thread after Py_Finalize()
+ completes. In this case, tstate is a dangling pointer: points to
+ PyThreadState freed memory. */
+ PyThread_exit_thread();
+ }
+
+ assert(is_tstate_valid(tstate));
+ PyInterpreterState *interp = tstate->interp;
+ struct _ceval_runtime_state *ceval = &interp->runtime->ceval;
+ struct _ceval_state *ceval2 = &interp->ceval;
+ struct _gil_runtime_state *gil = &ceval->gil;
+
+ /* Check that _PyEval_InitThreads() was called to create the lock */
+ assert(gil_created(gil));
+
+ MUTEX_LOCK(gil->mutex);
+
+ if (!_Py_atomic_load_relaxed(&gil->locked)) {
goto _ready;
- }
-
- while (_Py_atomic_load_relaxed(&gil->locked)) {
- unsigned long saved_switchnum = gil->switch_number;
+ }
- unsigned long interval = (gil->interval >= 1 ? gil->interval : 1);
- int timed_out = 0;
- COND_TIMED_WAIT(gil->cond, gil->mutex, interval, timed_out);
+ while (_Py_atomic_load_relaxed(&gil->locked)) {
+ unsigned long saved_switchnum = gil->switch_number;
+ unsigned long interval = (gil->interval >= 1 ? gil->interval : 1);
+ int timed_out = 0;
+ COND_TIMED_WAIT(gil->cond, gil->mutex, interval, timed_out);
+
/* If we timed out and no switch occurred in the meantime, it is time
to ask the GIL-holding thread to drop it. */
if (timed_out &&
- _Py_atomic_load_relaxed(&gil->locked) &&
- gil->switch_number == saved_switchnum)
- {
- if (tstate_must_exit(tstate)) {
- MUTEX_UNLOCK(gil->mutex);
- PyThread_exit_thread();
- }
- assert(is_tstate_valid(tstate));
-
- SET_GIL_DROP_REQUEST(interp);
+ _Py_atomic_load_relaxed(&gil->locked) &&
+ gil->switch_number == saved_switchnum)
+ {
+ if (tstate_must_exit(tstate)) {
+ MUTEX_UNLOCK(gil->mutex);
+ PyThread_exit_thread();
+ }
+ assert(is_tstate_valid(tstate));
+
+ SET_GIL_DROP_REQUEST(interp);
}
}
-
+
_ready:
#ifdef FORCE_SWITCHING
- /* This mutex must be taken before modifying gil->last_holder:
- see drop_gil(). */
- MUTEX_LOCK(gil->switch_mutex);
+ /* This mutex must be taken before modifying gil->last_holder:
+ see drop_gil(). */
+ MUTEX_LOCK(gil->switch_mutex);
#endif
/* We now hold the GIL */
- _Py_atomic_store_relaxed(&gil->locked, 1);
- _Py_ANNOTATE_RWLOCK_ACQUIRED(&gil->locked, /*is_write=*/1);
+ _Py_atomic_store_relaxed(&gil->locked, 1);
+ _Py_ANNOTATE_RWLOCK_ACQUIRED(&gil->locked, /*is_write=*/1);
- if (tstate != (PyThreadState*)_Py_atomic_load_relaxed(&gil->last_holder)) {
- _Py_atomic_store_relaxed(&gil->last_holder, (uintptr_t)tstate);
- ++gil->switch_number;
+ if (tstate != (PyThreadState*)_Py_atomic_load_relaxed(&gil->last_holder)) {
+ _Py_atomic_store_relaxed(&gil->last_holder, (uintptr_t)tstate);
+ ++gil->switch_number;
}
#ifdef FORCE_SWITCHING
- COND_SIGNAL(gil->switch_cond);
- MUTEX_UNLOCK(gil->switch_mutex);
+ COND_SIGNAL(gil->switch_cond);
+ MUTEX_UNLOCK(gil->switch_mutex);
#endif
-
- if (tstate_must_exit(tstate)) {
- /* bpo-36475: If Py_Finalize() has been called and tstate is not
- the thread which called Py_Finalize(), exit immediately the
- thread.
-
- This code path can be reached by a daemon thread which was waiting
- in take_gil() while the main thread called
- wait_for_thread_shutdown() from Py_Finalize(). */
- MUTEX_UNLOCK(gil->mutex);
- drop_gil(ceval, ceval2, tstate);
- PyThread_exit_thread();
- }
- assert(is_tstate_valid(tstate));
-
- if (_Py_atomic_load_relaxed(&ceval2->gil_drop_request)) {
- RESET_GIL_DROP_REQUEST(interp);
+
+ if (tstate_must_exit(tstate)) {
+ /* bpo-36475: If Py_Finalize() has been called and tstate is not
+ the thread which called Py_Finalize(), exit immediately the
+ thread.
+
+ This code path can be reached by a daemon thread which was waiting
+ in take_gil() while the main thread called
+ wait_for_thread_shutdown() from Py_Finalize(). */
+ MUTEX_UNLOCK(gil->mutex);
+ drop_gil(ceval, ceval2, tstate);
+ PyThread_exit_thread();
}
- else {
- /* bpo-40010: eval_breaker should be recomputed to be set to 1 if there
- is a pending signal: signal received by another thread which cannot
- handle signals.
-
- Note: RESET_GIL_DROP_REQUEST() calls COMPUTE_EVAL_BREAKER(). */
- COMPUTE_EVAL_BREAKER(interp, ceval, ceval2);
- }
-
- /* Don't access tstate if the thread must exit */
+ assert(is_tstate_valid(tstate));
+
+ if (_Py_atomic_load_relaxed(&ceval2->gil_drop_request)) {
+ RESET_GIL_DROP_REQUEST(interp);
+ }
+ else {
+ /* bpo-40010: eval_breaker should be recomputed to be set to 1 if there
+ is a pending signal: signal received by another thread which cannot
+ handle signals.
+
+ Note: RESET_GIL_DROP_REQUEST() calls COMPUTE_EVAL_BREAKER(). */
+ COMPUTE_EVAL_BREAKER(interp, ceval, ceval2);
+ }
+
+ /* Don't access tstate if the thread must exit */
if (tstate->async_exc != NULL) {
- _PyEval_SignalAsyncExc(tstate);
+ _PyEval_SignalAsyncExc(tstate);
}
- MUTEX_UNLOCK(gil->mutex);
-
+ MUTEX_UNLOCK(gil->mutex);
+
errno = err;
}
void _PyEval_SetSwitchInterval(unsigned long microseconds)
{
- struct _gil_runtime_state *gil = &_PyRuntime.ceval.gil;
- gil->interval = microseconds;
+ struct _gil_runtime_state *gil = &_PyRuntime.ceval.gil;
+ gil->interval = microseconds;
}
unsigned long _PyEval_GetSwitchInterval()
{
- struct _gil_runtime_state *gil = &_PyRuntime.ceval.gil;
- return gil->interval;
+ struct _gil_runtime_state *gil = &_PyRuntime.ceval.gil;
+ return gil->interval;
}