aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorpg <pg@yandex-team.com>2024-02-08 21:53:38 +0300
committerpg <pg@yandex-team.com>2024-02-08 22:09:55 +0300
commit8f0f06e7e5ea421375dc26ae5146934f64c21a5c (patch)
treec3999cdb83dfb8022079d7a631c835b2c3700916
parent675108d8da834c3551181bb62db4d19e012d4941 (diff)
downloadydb-8f0f06e7e5ea421375dc26ae5146934f64c21a5c.tar.gz
-rw-r--r--contrib/go/_std_1.21/src/crypto/internal/boring/notboring.go1
-rw-r--r--contrib/go/_std_1.21/src/crypto/tls/cipher_suites.go8
-rw-r--r--contrib/go/_std_1.21/src/crypto/tls/handshake_client.go4
-rw-r--r--contrib/go/_std_1.21/src/crypto/tls/handshake_client_tls13.go4
-rw-r--r--contrib/go/_std_1.21/src/crypto/tls/handshake_server_tls13.go7
-rw-r--r--contrib/go/_std_1.21/src/crypto/tls/notboring.go2
-rw-r--r--contrib/go/_std_1.21/src/go/types/subst.go1
-rw-r--r--contrib/go/_std_1.21/src/go/types/typeset.go1
-rw-r--r--contrib/go/_std_1.21/src/internal/buildcfg/zbootstrap.go2
-rw-r--r--contrib/go/_std_1.21/src/runtime/cgocall.go70
-rw-r--r--contrib/go/_std_1.21/src/runtime/lockrank.go212
-rw-r--r--contrib/go/_std_1.21/src/runtime/pinner.go5
-rw-r--r--contrib/go/_std_1.21/src/runtime/proc.go43
-rw-r--r--contrib/go/_std_1.21/src/runtime/rwmutex.go52
-rw-r--r--contrib/go/_std_1.21/src/runtime/traceback.go9
15 files changed, 269 insertions, 152 deletions
diff --git a/contrib/go/_std_1.21/src/crypto/internal/boring/notboring.go b/contrib/go/_std_1.21/src/crypto/internal/boring/notboring.go
index e478791217..1c5e4c742d 100644
--- a/contrib/go/_std_1.21/src/crypto/internal/boring/notboring.go
+++ b/contrib/go/_std_1.21/src/crypto/internal/boring/notboring.go
@@ -50,7 +50,6 @@ func NewHMAC(h func() hash.Hash, key []byte) hash.Hash { panic("boringcrypto: no
func NewAESCipher(key []byte) (cipher.Block, error) { panic("boringcrypto: not available") }
func NewGCMTLS(cipher.Block) (cipher.AEAD, error) { panic("boringcrypto: not available") }
-func NewGCMTLS13(cipher.Block) (cipher.AEAD, error) { panic("boringcrypto: not available") }
type PublicKeyECDSA struct{ _ int }
type PrivateKeyECDSA struct{ _ int }
diff --git a/contrib/go/_std_1.21/src/crypto/tls/cipher_suites.go b/contrib/go/_std_1.21/src/crypto/tls/cipher_suites.go
index fd538da0f7..589e8b6faf 100644
--- a/contrib/go/_std_1.21/src/crypto/tls/cipher_suites.go
+++ b/contrib/go/_std_1.21/src/crypto/tls/cipher_suites.go
@@ -532,13 +532,7 @@ func aeadAESGCMTLS13(key, nonceMask []byte) aead {
if err != nil {
panic(err)
}
- var aead cipher.AEAD
- if boring.Enabled {
- aead, err = boring.NewGCMTLS13(aes)
- } else {
- boring.Unreachable()
- aead, err = cipher.NewGCM(aes)
- }
+ aead, err := cipher.NewGCM(aes)
if err != nil {
panic(err)
}
diff --git a/contrib/go/_std_1.21/src/crypto/tls/handshake_client.go b/contrib/go/_std_1.21/src/crypto/tls/handshake_client.go
index e81764a912..4649f36dea 100644
--- a/contrib/go/_std_1.21/src/crypto/tls/handshake_client.go
+++ b/contrib/go/_std_1.21/src/crypto/tls/handshake_client.go
@@ -139,9 +139,7 @@ func (c *Conn) makeClientHello() (*clientHelloMsg, *ecdh.PrivateKey, error) {
if len(hello.supportedVersions) == 1 {
hello.cipherSuites = nil
}
- if needFIPS() {
- hello.cipherSuites = append(hello.cipherSuites, defaultCipherSuitesTLS13FIPS...)
- } else if hasAESGCMHardwareSupport {
+ if hasAESGCMHardwareSupport {
hello.cipherSuites = append(hello.cipherSuites, defaultCipherSuitesTLS13...)
} else {
hello.cipherSuites = append(hello.cipherSuites, defaultCipherSuitesTLS13NoAES...)
diff --git a/contrib/go/_std_1.21/src/crypto/tls/handshake_client_tls13.go b/contrib/go/_std_1.21/src/crypto/tls/handshake_client_tls13.go
index a84cede1b0..2f59f6888c 100644
--- a/contrib/go/_std_1.21/src/crypto/tls/handshake_client_tls13.go
+++ b/contrib/go/_std_1.21/src/crypto/tls/handshake_client_tls13.go
@@ -41,6 +41,10 @@ type clientHandshakeStateTLS13 struct {
func (hs *clientHandshakeStateTLS13) handshake() error {
c := hs.c
+ if needFIPS() {
+ return errors.New("tls: internal error: TLS 1.3 reached in FIPS mode")
+ }
+
// The server must not select TLS 1.3 in a renegotiation. See RFC 8446,
// sections 4.1.2 and 4.1.3.
if c.handshakes > 0 {
diff --git a/contrib/go/_std_1.21/src/crypto/tls/handshake_server_tls13.go b/contrib/go/_std_1.21/src/crypto/tls/handshake_server_tls13.go
index dd5298b728..07b1a3851e 100644
--- a/contrib/go/_std_1.21/src/crypto/tls/handshake_server_tls13.go
+++ b/contrib/go/_std_1.21/src/crypto/tls/handshake_server_tls13.go
@@ -45,6 +45,10 @@ type serverHandshakeStateTLS13 struct {
func (hs *serverHandshakeStateTLS13) handshake() error {
c := hs.c
+ if needFIPS() {
+ return errors.New("tls: internal error: TLS 1.3 reached in FIPS mode")
+ }
+
// For an overview of the TLS 1.3 handshake, see RFC 8446, Section 2.
if err := hs.processClientHello(); err != nil {
return err
@@ -159,9 +163,6 @@ func (hs *serverHandshakeStateTLS13) processClientHello() error {
if !hasAESGCMHardwareSupport || !aesgcmPreferred(hs.clientHello.cipherSuites) {
preferenceList = defaultCipherSuitesTLS13NoAES
}
- if needFIPS() {
- preferenceList = defaultCipherSuitesTLS13FIPS
- }
for _, suiteID := range preferenceList {
hs.suite = mutualCipherSuiteTLS13(hs.clientHello.cipherSuites, suiteID)
if hs.suite != nil {
diff --git a/contrib/go/_std_1.21/src/crypto/tls/notboring.go b/contrib/go/_std_1.21/src/crypto/tls/notboring.go
index edccb44d87..7d85b39c59 100644
--- a/contrib/go/_std_1.21/src/crypto/tls/notboring.go
+++ b/contrib/go/_std_1.21/src/crypto/tls/notboring.go
@@ -18,5 +18,3 @@ func fipsCurvePreferences(c *Config) []CurveID { panic("fipsCurvePreferences") }
func fipsCipherSuites(c *Config) []uint16 { panic("fipsCipherSuites") }
var fipsSupportedSignatureAlgorithms []SignatureScheme
-
-var defaultCipherSuitesTLS13FIPS []uint16
diff --git a/contrib/go/_std_1.21/src/go/types/subst.go b/contrib/go/_std_1.21/src/go/types/subst.go
index 30c48e1bad..96bc341a5f 100644
--- a/contrib/go/_std_1.21/src/go/types/subst.go
+++ b/contrib/go/_std_1.21/src/go/types/subst.go
@@ -171,6 +171,7 @@ func (subst *subster) typ(typ Type) Type {
if mcopied || ecopied {
iface := subst.check.newInterface()
iface.embeddeds = embeddeds
+ iface.embedPos = t.embedPos
iface.implicit = t.implicit
iface.complete = t.complete
// If we've changed the interface type, we may need to replace its
diff --git a/contrib/go/_std_1.21/src/go/types/typeset.go b/contrib/go/_std_1.21/src/go/types/typeset.go
index 206aa3da08..4cd118a226 100644
--- a/contrib/go/_std_1.21/src/go/types/typeset.go
+++ b/contrib/go/_std_1.21/src/go/types/typeset.go
@@ -302,7 +302,6 @@ func computeInterfaceTypeSet(check *Checker, pos token.Pos, ityp *Interface) *_T
// separately. Here we only need to intersect the term lists and comparable bits.
allTerms, allComparable = intersectTermLists(allTerms, allComparable, terms, comparable)
}
- ityp.embedPos = nil // not needed anymore (errors have been reported)
ityp.tset.comparable = allComparable
if len(allMethods) != 0 {
diff --git a/contrib/go/_std_1.21/src/internal/buildcfg/zbootstrap.go b/contrib/go/_std_1.21/src/internal/buildcfg/zbootstrap.go
index b83b3ead43..7b084247c2 100644
--- a/contrib/go/_std_1.21/src/internal/buildcfg/zbootstrap.go
+++ b/contrib/go/_std_1.21/src/internal/buildcfg/zbootstrap.go
@@ -13,6 +13,6 @@ const defaultGOPPC64 = `power8`
const defaultGOEXPERIMENT = ``
const defaultGO_EXTLINK_ENABLED = ``
const defaultGO_LDSO = ``
-const version = `go1.21.6`
+const version = `go1.21.7`
const defaultGOOS = runtime.GOOS
const defaultGOARCH = runtime.GOARCH
diff --git a/contrib/go/_std_1.21/src/runtime/cgocall.go b/contrib/go/_std_1.21/src/runtime/cgocall.go
index 1da7249abc..d226c2e907 100644
--- a/contrib/go/_std_1.21/src/runtime/cgocall.go
+++ b/contrib/go/_std_1.21/src/runtime/cgocall.go
@@ -206,6 +206,73 @@ func cgocall(fn, arg unsafe.Pointer) int32 {
return errno
}
+// Set or reset the system stack bounds for a callback on sp.
+//
+// Must be nosplit because it is called by needm prior to fully initializing
+// the M.
+//
+//go:nosplit
+func callbackUpdateSystemStack(mp *m, sp uintptr, signal bool) {
+ g0 := mp.g0
+ if sp > g0.stack.lo && sp <= g0.stack.hi {
+ // Stack already in bounds, nothing to do.
+ return
+ }
+
+ if mp.ncgo > 0 {
+ // ncgo > 0 indicates that this M was in Go further up the stack
+ // (it called C and is now receiving a callback). It is not
+ // safe for the C call to change the stack out from under us.
+
+ // Note that this case isn't possible for signal == true, as
+ // that is always passing a new M from needm.
+
+ // Stack is bogus, but reset the bounds anyway so we can print.
+ hi := g0.stack.hi
+ lo := g0.stack.lo
+ g0.stack.hi = sp + 1024
+ g0.stack.lo = sp - 32*1024
+ g0.stackguard0 = g0.stack.lo + stackGuard
+
+ print("M ", mp.id, " procid ", mp.procid, " runtime: cgocallback with sp=", hex(sp), " out of bounds [", hex(lo), ", ", hex(hi), "]")
+ print("\n")
+ exit(2)
+ }
+
+ // This M does not have Go further up the stack. However, it may have
+ // previously called into Go, initializing the stack bounds. Between
+ // that call returning and now the stack may have changed (perhaps the
+ // C thread is running a coroutine library). We need to update the
+ // stack bounds for this case.
+ //
+ // Set the stack bounds to match the current stack. If we don't
+ // actually know how big the stack is, like we don't know how big any
+ // scheduling stack is, but we assume there's at least 32 kB. If we
+ // can get a more accurate stack bound from pthread, use that, provided
+ // it actually contains SP..
+ g0.stack.hi = sp + 1024
+ g0.stack.lo = sp - 32*1024
+ if !signal && _cgo_getstackbound != nil {
+ // Don't adjust if called from the signal handler.
+ // We are on the signal stack, not the pthread stack.
+ // (We could get the stack bounds from sigaltstack, but
+ // we're getting out of the signal handler very soon
+ // anyway. Not worth it.)
+ var bounds [2]uintptr
+ asmcgocall(_cgo_getstackbound, unsafe.Pointer(&bounds))
+ // getstackbound is an unsupported no-op on Windows.
+ //
+ // Don't use these bounds if they don't contain SP. Perhaps we
+ // were called by something not using the standard thread
+ // stack.
+ if bounds[0] != 0 && sp > bounds[0] && sp <= bounds[1] {
+ g0.stack.lo = bounds[0]
+ g0.stack.hi = bounds[1]
+ }
+ }
+ g0.stackguard0 = g0.stack.lo + stackGuard
+}
+
// Call from C back to Go. fn must point to an ABIInternal Go entry-point.
//
//go:nosplit
@@ -216,6 +283,9 @@ func cgocallbackg(fn, frame unsafe.Pointer, ctxt uintptr) {
exit(2)
}
+ sp := gp.m.g0.sched.sp // system sp saved by cgocallback.
+ callbackUpdateSystemStack(gp.m, sp, false)
+
// The call from C is on gp.m's g0 stack, so we must ensure
// that we stay on that M. We have to do this before calling
// exitsyscall, since it would otherwise be free to move us to
diff --git a/contrib/go/_std_1.21/src/runtime/lockrank.go b/contrib/go/_std_1.21/src/runtime/lockrank.go
index 4d661e93dc..fb99536a57 100644
--- a/contrib/go/_std_1.21/src/runtime/lockrank.go
+++ b/contrib/go/_std_1.21/src/runtime/lockrank.go
@@ -18,8 +18,15 @@ const (
lockRankSweepWaiters
lockRankAssistQueue
lockRankSweep
- lockRankPollDesc
+ lockRankTestR
+ lockRankTestW
+ lockRankAllocmW
+ lockRankExecW
lockRankCpuprof
+ lockRankPollDesc
+ // SCHED
+ lockRankAllocmR
+ lockRankExecR
lockRankSched
lockRankAllg
lockRankAllp
@@ -28,8 +35,6 @@ const (
lockRankHchan
lockRankNotifyList
lockRankSudog
- lockRankRwmutexW
- lockRankRwmutexR
lockRankRoot
lockRankItab
lockRankReflectOffs
@@ -63,6 +68,9 @@ const (
lockRankPanic
lockRankDeadlock
lockRankRaceFini
+ lockRankAllocmRInternal
+ lockRankExecRInternal
+ lockRankTestRInternal
)
// lockRankLeafRank is the rank of lock that does not have a declared rank,
@@ -71,52 +79,59 @@ const lockRankLeafRank lockRank = 1000
// lockNames gives the names associated with each of the above ranks.
var lockNames = []string{
- lockRankSysmon: "sysmon",
- lockRankScavenge: "scavenge",
- lockRankForcegc: "forcegc",
- lockRankDefer: "defer",
- lockRankSweepWaiters: "sweepWaiters",
- lockRankAssistQueue: "assistQueue",
- lockRankSweep: "sweep",
- lockRankPollDesc: "pollDesc",
- lockRankCpuprof: "cpuprof",
- lockRankSched: "sched",
- lockRankAllg: "allg",
- lockRankAllp: "allp",
- lockRankTimers: "timers",
- lockRankNetpollInit: "netpollInit",
- lockRankHchan: "hchan",
- lockRankNotifyList: "notifyList",
- lockRankSudog: "sudog",
- lockRankRwmutexW: "rwmutexW",
- lockRankRwmutexR: "rwmutexR",
- lockRankRoot: "root",
- lockRankItab: "itab",
- lockRankReflectOffs: "reflectOffs",
- lockRankUserArenaState: "userArenaState",
- lockRankTraceBuf: "traceBuf",
- lockRankTraceStrings: "traceStrings",
- lockRankFin: "fin",
- lockRankSpanSetSpine: "spanSetSpine",
- lockRankMspanSpecial: "mspanSpecial",
- lockRankGcBitsArenas: "gcBitsArenas",
- lockRankProfInsert: "profInsert",
- lockRankProfBlock: "profBlock",
- lockRankProfMemActive: "profMemActive",
- lockRankProfMemFuture: "profMemFuture",
- lockRankGscan: "gscan",
- lockRankStackpool: "stackpool",
- lockRankStackLarge: "stackLarge",
- lockRankHchanLeaf: "hchanLeaf",
- lockRankWbufSpans: "wbufSpans",
- lockRankMheap: "mheap",
- lockRankMheapSpecial: "mheapSpecial",
- lockRankGlobalAlloc: "globalAlloc",
- lockRankTrace: "trace",
- lockRankTraceStackTab: "traceStackTab",
- lockRankPanic: "panic",
- lockRankDeadlock: "deadlock",
- lockRankRaceFini: "raceFini",
+ lockRankSysmon: "sysmon",
+ lockRankScavenge: "scavenge",
+ lockRankForcegc: "forcegc",
+ lockRankDefer: "defer",
+ lockRankSweepWaiters: "sweepWaiters",
+ lockRankAssistQueue: "assistQueue",
+ lockRankSweep: "sweep",
+ lockRankTestR: "testR",
+ lockRankTestW: "testW",
+ lockRankAllocmW: "allocmW",
+ lockRankExecW: "execW",
+ lockRankCpuprof: "cpuprof",
+ lockRankPollDesc: "pollDesc",
+ lockRankAllocmR: "allocmR",
+ lockRankExecR: "execR",
+ lockRankSched: "sched",
+ lockRankAllg: "allg",
+ lockRankAllp: "allp",
+ lockRankTimers: "timers",
+ lockRankNetpollInit: "netpollInit",
+ lockRankHchan: "hchan",
+ lockRankNotifyList: "notifyList",
+ lockRankSudog: "sudog",
+ lockRankRoot: "root",
+ lockRankItab: "itab",
+ lockRankReflectOffs: "reflectOffs",
+ lockRankUserArenaState: "userArenaState",
+ lockRankTraceBuf: "traceBuf",
+ lockRankTraceStrings: "traceStrings",
+ lockRankFin: "fin",
+ lockRankSpanSetSpine: "spanSetSpine",
+ lockRankMspanSpecial: "mspanSpecial",
+ lockRankGcBitsArenas: "gcBitsArenas",
+ lockRankProfInsert: "profInsert",
+ lockRankProfBlock: "profBlock",
+ lockRankProfMemActive: "profMemActive",
+ lockRankProfMemFuture: "profMemFuture",
+ lockRankGscan: "gscan",
+ lockRankStackpool: "stackpool",
+ lockRankStackLarge: "stackLarge",
+ lockRankHchanLeaf: "hchanLeaf",
+ lockRankWbufSpans: "wbufSpans",
+ lockRankMheap: "mheap",
+ lockRankMheapSpecial: "mheapSpecial",
+ lockRankGlobalAlloc: "globalAlloc",
+ lockRankTrace: "trace",
+ lockRankTraceStackTab: "traceStackTab",
+ lockRankPanic: "panic",
+ lockRankDeadlock: "deadlock",
+ lockRankRaceFini: "raceFini",
+ lockRankAllocmRInternal: "allocmRInternal",
+ lockRankExecRInternal: "execRInternal",
+ lockRankTestRInternal: "testRInternal",
}
func (rank lockRank) String() string {
@@ -138,50 +153,57 @@ func (rank lockRank) String() string {
//
// Lock ranks that allow self-cycles list themselves.
var lockPartialOrder [][]lockRank = [][]lockRank{
- lockRankSysmon: {},
- lockRankScavenge: {lockRankSysmon},
- lockRankForcegc: {lockRankSysmon},
- lockRankDefer: {},
- lockRankSweepWaiters: {},
- lockRankAssistQueue: {},
- lockRankSweep: {},
- lockRankPollDesc: {},
- lockRankCpuprof: {},
- lockRankSched: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof},
- lockRankAllg: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched},
- lockRankAllp: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched},
- lockRankTimers: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllp, lockRankTimers},
- lockRankNetpollInit: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllp, lockRankTimers},
- lockRankHchan: {lockRankSysmon, lockRankScavenge, lockRankSweep, lockRankHchan},
- lockRankNotifyList: {},
- lockRankSudog: {lockRankSysmon, lockRankScavenge, lockRankSweep, lockRankHchan, lockRankNotifyList},
- lockRankRwmutexW: {},
- lockRankRwmutexR: {lockRankSysmon, lockRankRwmutexW},
- lockRankRoot: {},
- lockRankItab: {},
- lockRankReflectOffs: {lockRankItab},
- lockRankUserArenaState: {},
- lockRankTraceBuf: {lockRankSysmon, lockRankScavenge},
- lockRankTraceStrings: {lockRankSysmon, lockRankScavenge, lockRankTraceBuf},
- lockRankFin: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankHchan, lockRankNotifyList, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings},
- lockRankSpanSetSpine: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankHchan, lockRankNotifyList, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings},
- lockRankMspanSpecial: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankHchan, lockRankNotifyList, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings},
- lockRankGcBitsArenas: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankHchan, lockRankNotifyList, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankMspanSpecial},
- lockRankProfInsert: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankHchan, lockRankNotifyList, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings},
- lockRankProfBlock: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankHchan, lockRankNotifyList, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings},
- lockRankProfMemActive: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankHchan, lockRankNotifyList, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings},
- lockRankProfMemFuture: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankHchan, lockRankNotifyList, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankProfMemActive},
- lockRankGscan: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankHchan, lockRankNotifyList, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture},
- lockRankStackpool: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankHchan, lockRankNotifyList, lockRankRwmutexW, lockRankRwmutexR, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan},
- lockRankStackLarge: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankHchan, lockRankNotifyList, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan},
- lockRankHchanLeaf: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankHchan, lockRankNotifyList, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankHchanLeaf},
- lockRankWbufSpans: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankHchan, lockRankNotifyList, lockRankSudog, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan},
- lockRankMheap: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankHchan, lockRankNotifyList, lockRankSudog, lockRankRwmutexW, lockRankRwmutexR, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans},
- lockRankMheapSpecial: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankHchan, lockRankNotifyList, lockRankSudog, lockRankRwmutexW, lockRankRwmutexR, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap},
- lockRankGlobalAlloc: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankHchan, lockRankNotifyList, lockRankSudog, lockRankRwmutexW, lockRankRwmutexR, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap, lockRankMheapSpecial},
- lockRankTrace: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankHchan, lockRankNotifyList, lockRankSudog, lockRankRwmutexW, lockRankRwmutexR, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap},
- lockRankTraceStackTab: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankHchan, lockRankNotifyList, lockRankSudog, lockRankRwmutexW, lockRankRwmutexR, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap, lockRankTrace},
- lockRankPanic: {},
- lockRankDeadlock: {lockRankPanic, lockRankDeadlock},
- lockRankRaceFini: {lockRankPanic},
+ lockRankSysmon: {},
+ lockRankScavenge: {lockRankSysmon},
+ lockRankForcegc: {lockRankSysmon},
+ lockRankDefer: {},
+ lockRankSweepWaiters: {},
+ lockRankAssistQueue: {},
+ lockRankSweep: {},
+ lockRankTestR: {},
+ lockRankTestW: {},
+ lockRankAllocmW: {},
+ lockRankExecW: {},
+ lockRankCpuprof: {},
+ lockRankPollDesc: {},
+ lockRankAllocmR: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankCpuprof, lockRankPollDesc},
+ lockRankExecR: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankCpuprof, lockRankPollDesc},
+ lockRankSched: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankCpuprof, lockRankPollDesc, lockRankAllocmR, lockRankExecR},
+ lockRankAllg: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankCpuprof, lockRankPollDesc, lockRankAllocmR, lockRankExecR, lockRankSched},
+ lockRankAllp: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankCpuprof, lockRankPollDesc, lockRankAllocmR, lockRankExecR, lockRankSched},
+ lockRankTimers: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankCpuprof, lockRankPollDesc, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllp, lockRankTimers},
+ lockRankNetpollInit: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankCpuprof, lockRankPollDesc, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllp, lockRankTimers},
+ lockRankHchan: {lockRankSysmon, lockRankScavenge, lockRankSweep, lockRankTestR, lockRankHchan},
+ lockRankNotifyList: {},
+ lockRankSudog: {lockRankSysmon, lockRankScavenge, lockRankSweep, lockRankTestR, lockRankHchan, lockRankNotifyList},
+ lockRankRoot: {},
+ lockRankItab: {},
+ lockRankReflectOffs: {lockRankItab},
+ lockRankUserArenaState: {},
+ lockRankTraceBuf: {lockRankSysmon, lockRankScavenge},
+ lockRankTraceStrings: {lockRankSysmon, lockRankScavenge, lockRankTraceBuf},
+ lockRankFin: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankHchan, lockRankNotifyList, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings},
+ lockRankSpanSetSpine: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankHchan, lockRankNotifyList, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings},
+ lockRankMspanSpecial: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankHchan, lockRankNotifyList, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings},
+ lockRankGcBitsArenas: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankHchan, lockRankNotifyList, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankMspanSpecial},
+ lockRankProfInsert: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankHchan, lockRankNotifyList, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings},
+ lockRankProfBlock: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankHchan, lockRankNotifyList, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings},
+ lockRankProfMemActive: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankHchan, lockRankNotifyList, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings},
+ lockRankProfMemFuture: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankHchan, lockRankNotifyList, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankProfMemActive},
+ lockRankGscan: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankHchan, lockRankNotifyList, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture},
+ lockRankStackpool: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankHchan, lockRankNotifyList, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan},
+ lockRankStackLarge: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankHchan, lockRankNotifyList, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan},
+ lockRankHchanLeaf: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankHchan, lockRankNotifyList, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankHchanLeaf},
+ lockRankWbufSpans: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankHchan, lockRankNotifyList, lockRankSudog, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan},
+ lockRankMheap: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankHchan, lockRankNotifyList, lockRankSudog, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans},
+ lockRankMheapSpecial: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankHchan, lockRankNotifyList, lockRankSudog, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap},
+ lockRankGlobalAlloc: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankHchan, lockRankNotifyList, lockRankSudog, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap, lockRankMheapSpecial},
+ lockRankTrace: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankHchan, lockRankNotifyList, lockRankSudog, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap},
+ lockRankTraceStackTab: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankHchan, lockRankNotifyList, lockRankSudog, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap, lockRankTrace},
+ lockRankPanic: {},
+ lockRankDeadlock: {lockRankPanic, lockRankDeadlock},
+ lockRankRaceFini: {lockRankPanic},
+ lockRankAllocmRInternal: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankAllocmW, lockRankCpuprof, lockRankPollDesc, lockRankAllocmR},
+ lockRankExecRInternal: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankExecR},
+ lockRankTestRInternal: {lockRankTestR, lockRankTestW},
}
diff --git a/contrib/go/_std_1.21/src/runtime/pinner.go b/contrib/go/_std_1.21/src/runtime/pinner.go
index 8bb351eb8f..75de8be02a 100644
--- a/contrib/go/_std_1.21/src/runtime/pinner.go
+++ b/contrib/go/_std_1.21/src/runtime/pinner.go
@@ -25,10 +25,7 @@ type Pinner struct {
// objects, these objects must be pinned separately if they are going to be
// accessed from C code.
//
-// The argument must be a pointer of any type or an
-// unsafe.Pointer. It must be the result of calling new,
-// taking the address of a composite literal, or taking the address of a
-// local variable. If one of these conditions is not met, Pin will panic.
+// The argument must be a pointer of any type or an unsafe.Pointer.
func (p *Pinner) Pin(pointer any) {
if p.pinner == nil {
// Check the pinner cache first.
diff --git a/contrib/go/_std_1.21/src/runtime/proc.go b/contrib/go/_std_1.21/src/runtime/proc.go
index afb33c1e8b..b408337015 100644
--- a/contrib/go/_std_1.21/src/runtime/proc.go
+++ b/contrib/go/_std_1.21/src/runtime/proc.go
@@ -703,6 +703,8 @@ func schedinit() {
lockInit(&reflectOffs.lock, lockRankReflectOffs)
lockInit(&finlock, lockRankFin)
lockInit(&cpuprof.lock, lockRankCpuprof)
+ allocmLock.init(lockRankAllocmR, lockRankAllocmRInternal, lockRankAllocmW)
+ execLock.init(lockRankExecR, lockRankExecRInternal, lockRankExecW)
traceLockInit()
// Enforce that this lock is always a leaf lock.
// All of this lock's critical sections should be
@@ -2037,30 +2039,10 @@ func needm(signal bool) {
osSetupTLS(mp)
// Install g (= m->g0) and set the stack bounds
- // to match the current stack. If we don't actually know
- // how big the stack is, like we don't know how big any
- // scheduling stack is, but we assume there's at least 32 kB.
- // If we can get a more accurate stack bound from pthread,
- // use that.
+ // to match the current stack.
setg(mp.g0)
- gp := getg()
- gp.stack.hi = getcallersp() + 1024
- gp.stack.lo = getcallersp() - 32*1024
- if !signal && _cgo_getstackbound != nil {
- // Don't adjust if called from the signal handler.
- // We are on the signal stack, not the pthread stack.
- // (We could get the stack bounds from sigaltstack, but
- // we're getting out of the signal handler very soon
- // anyway. Not worth it.)
- var bounds [2]uintptr
- asmcgocall(_cgo_getstackbound, unsafe.Pointer(&bounds))
- // getstackbound is an unsupported no-op on Windows.
- if bounds[0] != 0 {
- gp.stack.lo = bounds[0]
- gp.stack.hi = bounds[1]
- }
- }
- gp.stackguard0 = gp.stack.lo + stackGuard
+ sp := getcallersp()
+ callbackUpdateSystemStack(mp, sp, signal)
// Should mark we are already in Go now.
// Otherwise, we may call needm again when we get a signal, before cgocallbackg1,
@@ -2177,9 +2159,14 @@ func oneNewExtraM() {
// So that the destructor would invoke dropm while the non-Go thread is exiting.
// This is much faster since it avoids expensive signal-related syscalls.
//
-// NOTE: this always runs without a P, so, nowritebarrierrec required.
+// This always runs without a P, so //go:nowritebarrierrec is required.
+//
+// This may run with a different stack than was recorded in g0 (there is no
+// call to callbackUpdateSystemStack prior to dropm), so this must be
+// //go:nosplit to avoid the stack bounds check.
//
//go:nowritebarrierrec
+//go:nosplit
func dropm() {
// Clear m and g, and return m to the extra list.
// After the call to setg we can only call nosplit functions
@@ -2201,6 +2188,14 @@ func dropm() {
setg(nil)
+ // Clear g0 stack bounds to ensure that needm always refreshes the
+ // bounds when reusing this M.
+ g0 := mp.g0
+ g0.stack.hi = 0
+ g0.stack.lo = 0
+ g0.stackguard0 = 0
+ g0.stackguard1 = 0
+
putExtraM(mp)
msigrestore(sigmask)
diff --git a/contrib/go/_std_1.21/src/runtime/rwmutex.go b/contrib/go/_std_1.21/src/runtime/rwmutex.go
index ede3d13599..34d8f675c1 100644
--- a/contrib/go/_std_1.21/src/runtime/rwmutex.go
+++ b/contrib/go/_std_1.21/src/runtime/rwmutex.go
@@ -25,6 +25,43 @@ type rwmutex struct {
readerCount atomic.Int32 // number of pending readers
readerWait atomic.Int32 // number of departing readers
+
+ readRank lockRank // semantic lock rank for read locking
+}
+
+// Lock ranking an rwmutex has two aspects:
+//
+// Semantic ranking: this rwmutex represents some higher level lock that
+// protects some resource (e.g., allocmLock protects creation of new Ms). The
+// read and write locks of that resource need to be represented in the lock
+// rank.
+//
+// Internal ranking: as an implementation detail, rwmutex uses two mutexes:
+// rLock and wLock. These have lock order requirements: wLock must be locked
+// before rLock. This also needs to be represented in the lock rank.
+//
+// Semantic ranking is represented by acquiring readRank during read lock and
+// writeRank during write lock.
+//
+// wLock is held for the duration of a write lock, so it uses writeRank
+// directly, both for semantic and internal ranking. rLock is only held
+// temporarily inside the rlock/lock methods, so it uses readRankInternal to
+// represent internal ranking. Semantic ranking is represented by a separate
+// acquire of readRank for the duration of a read lock.
+//
+// The lock ranking must document this ordering:
+// - readRankInternal is a leaf lock.
+// - readRank is taken before readRankInternal.
+// - writeRank is taken before readRankInternal.
+// - readRank is placed in the lock order wherever a read lock of this rwmutex
+// belongs.
+// - writeRank is placed in the lock order wherever a write lock of this
+// rwmutex belongs.
+func (rw *rwmutex) init(readRank, readRankInternal, writeRank lockRank) {
+ rw.readRank = readRank
+
+ lockInit(&rw.rLock, readRankInternal)
+ lockInit(&rw.wLock, writeRank)
}
const rwmutexMaxReaders = 1 << 30
@@ -36,10 +73,14 @@ func (rw *rwmutex) rlock() {
// deadlock (issue #20903). Alternatively, we could drop the P
// while sleeping.
acquirem()
+
+ acquireLockRank(rw.readRank)
+ lockWithRankMayAcquire(&rw.rLock, getLockRank(&rw.rLock))
+
if rw.readerCount.Add(1) < 0 {
// A writer is pending. Park on the reader queue.
systemstack(func() {
- lockWithRank(&rw.rLock, lockRankRwmutexR)
+ lock(&rw.rLock)
if rw.readerPass > 0 {
// Writer finished.
rw.readerPass -= 1
@@ -67,7 +108,7 @@ func (rw *rwmutex) runlock() {
// A writer is pending.
if rw.readerWait.Add(-1) == 0 {
// The last reader unblocks the writer.
- lockWithRank(&rw.rLock, lockRankRwmutexR)
+ lock(&rw.rLock)
w := rw.writer.ptr()
if w != nil {
notewakeup(&w.park)
@@ -75,18 +116,19 @@ func (rw *rwmutex) runlock() {
unlock(&rw.rLock)
}
}
+ releaseLockRank(rw.readRank)
releasem(getg().m)
}
// lock locks rw for writing.
func (rw *rwmutex) lock() {
// Resolve competition with other writers and stick to our P.
- lockWithRank(&rw.wLock, lockRankRwmutexW)
+ lock(&rw.wLock)
m := getg().m
// Announce that there is a pending writer.
r := rw.readerCount.Add(-rwmutexMaxReaders) + rwmutexMaxReaders
// Wait for any active readers to complete.
- lockWithRank(&rw.rLock, lockRankRwmutexR)
+ lock(&rw.rLock)
if r != 0 && rw.readerWait.Add(r) != 0 {
// Wait for reader to wake us up.
systemstack(func() {
@@ -108,7 +150,7 @@ func (rw *rwmutex) unlock() {
throw("unlock of unlocked rwmutex")
}
// Unblock blocked readers.
- lockWithRank(&rw.rLock, lockRankRwmutexR)
+ lock(&rw.rLock)
for rw.readers.ptr() != nil {
reader := rw.readers.ptr()
rw.readers = reader.schedlink
diff --git a/contrib/go/_std_1.21/src/runtime/traceback.go b/contrib/go/_std_1.21/src/runtime/traceback.go
index 72200d436f..32a538526c 100644
--- a/contrib/go/_std_1.21/src/runtime/traceback.go
+++ b/contrib/go/_std_1.21/src/runtime/traceback.go
@@ -359,15 +359,12 @@ func (u *unwinder) resolveInternal(innermost, isSyscall bool) {
//
// uSE uPE inn | action
// T _ _ | frame.lr = 0
- // F T F | frame.lr = 0; print
- // F T T | frame.lr = 0
+ // F T _ | frame.lr = 0
// F F F | print; panic
// F F T | ignore SPWrite
- if u.flags&unwindSilentErrors == 0 && !innermost {
+ if u.flags&(unwindPrintErrors|unwindSilentErrors) == 0 && !innermost {
println("traceback: unexpected SPWRITE function", funcname(f))
- if u.flags&unwindPrintErrors == 0 {
- throw("traceback")
- }
+ throw("traceback")
}
frame.lr = 0
} else {