aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/go/_std_1.21/src/runtime/proc.go
diff options
context:
space:
mode:
authorpg <pg@yandex-team.com>2024-02-08 21:53:38 +0300
committerpg <pg@yandex-team.com>2024-02-08 22:09:55 +0300
commit8f0f06e7e5ea421375dc26ae5146934f64c21a5c (patch)
treec3999cdb83dfb8022079d7a631c835b2c3700916 /contrib/go/_std_1.21/src/runtime/proc.go
parent675108d8da834c3551181bb62db4d19e012d4941 (diff)
downloadydb-8f0f06e7e5ea421375dc26ae5146934f64c21a5c.tar.gz
Diffstat (limited to 'contrib/go/_std_1.21/src/runtime/proc.go')
-rw-r--r--contrib/go/_std_1.21/src/runtime/proc.go43
1 files changed, 19 insertions, 24 deletions
diff --git a/contrib/go/_std_1.21/src/runtime/proc.go b/contrib/go/_std_1.21/src/runtime/proc.go
index afb33c1e8b..b408337015 100644
--- a/contrib/go/_std_1.21/src/runtime/proc.go
+++ b/contrib/go/_std_1.21/src/runtime/proc.go
@@ -703,6 +703,8 @@ func schedinit() {
lockInit(&reflectOffs.lock, lockRankReflectOffs)
lockInit(&finlock, lockRankFin)
lockInit(&cpuprof.lock, lockRankCpuprof)
+ allocmLock.init(lockRankAllocmR, lockRankAllocmRInternal, lockRankAllocmW)
+ execLock.init(lockRankExecR, lockRankExecRInternal, lockRankExecW)
traceLockInit()
// Enforce that this lock is always a leaf lock.
// All of this lock's critical sections should be
@@ -2037,30 +2039,10 @@ func needm(signal bool) {
osSetupTLS(mp)
// Install g (= m->g0) and set the stack bounds
- // to match the current stack. If we don't actually know
- // how big the stack is, like we don't know how big any
- // scheduling stack is, but we assume there's at least 32 kB.
- // If we can get a more accurate stack bound from pthread,
- // use that.
+ // to match the current stack.
setg(mp.g0)
- gp := getg()
- gp.stack.hi = getcallersp() + 1024
- gp.stack.lo = getcallersp() - 32*1024
- if !signal && _cgo_getstackbound != nil {
- // Don't adjust if called from the signal handler.
- // We are on the signal stack, not the pthread stack.
- // (We could get the stack bounds from sigaltstack, but
- // we're getting out of the signal handler very soon
- // anyway. Not worth it.)
- var bounds [2]uintptr
- asmcgocall(_cgo_getstackbound, unsafe.Pointer(&bounds))
- // getstackbound is an unsupported no-op on Windows.
- if bounds[0] != 0 {
- gp.stack.lo = bounds[0]
- gp.stack.hi = bounds[1]
- }
- }
- gp.stackguard0 = gp.stack.lo + stackGuard
+ sp := getcallersp()
+ callbackUpdateSystemStack(mp, sp, signal)
// Should mark we are already in Go now.
// Otherwise, we may call needm again when we get a signal, before cgocallbackg1,
@@ -2177,9 +2159,14 @@ func oneNewExtraM() {
// So that the destructor would invoke dropm while the non-Go thread is exiting.
// This is much faster since it avoids expensive signal-related syscalls.
//
-// NOTE: this always runs without a P, so, nowritebarrierrec required.
+// This always runs without a P, so //go:nowritebarrierrec is required.
+//
+// This may run with a different stack than was recorded in g0 (there is no
+// call to callbackUpdateSystemStack prior to dropm), so this must be
+// //go:nosplit to avoid the stack bounds check.
//
//go:nowritebarrierrec
+//go:nosplit
func dropm() {
// Clear m and g, and return m to the extra list.
// After the call to setg we can only call nosplit functions
@@ -2201,6 +2188,14 @@ func dropm() {
setg(nil)
+ // Clear g0 stack bounds to ensure that needm always refreshes the
+ // bounds when reusing this M.
+ g0 := mp.g0
+ g0.stack.hi = 0
+ g0.stack.lo = 0
+ g0.stackguard0 = 0
+ g0.stackguard1 = 0
+
putExtraM(mp)
msigrestore(sigmask)