aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/go/_std_1.21/src/runtime/debugcall.go
blob: e79354503634de37afd62c2d4c7b7afb02796e05 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

//go:build amd64 || arm64

package runtime

import (
	"internal/abi"
	"unsafe"
)

const (
	debugCallSystemStack = "executing on Go runtime stack"
	debugCallUnknownFunc = "call from unknown function"
	debugCallRuntime     = "call from within the Go runtime"
	debugCallUnsafePoint = "call not at safe point"
)

func debugCallV2()
func debugCallPanicked(val any)

// debugCallCheck checks whether it is safe to inject a debugger
// function call with return PC pc. If not, it returns a string
// explaining why.
//
//go:nosplit
func debugCallCheck(pc uintptr) string {
	// No user calls from the system stack.
	if getg() != getg().m.curg {
		return debugCallSystemStack
	}
	if sp := getcallersp(); !(getg().stack.lo < sp && sp <= getg().stack.hi) {
		// Fast syscalls (nanotime) and racecall switch to the
		// g0 stack without switching g. We can't safely make
		// a call in this state. (We can't even safely
		// systemstack.)
		return debugCallSystemStack
	}

	// Switch to the system stack to avoid overflowing the user
	// stack.
	var ret string
	systemstack(func() {
		f := findfunc(pc)
		if !f.valid() {
			ret = debugCallUnknownFunc
			return
		}

		name := funcname(f)

		switch name {
		case "debugCall32",
			"debugCall64",
			"debugCall128",
			"debugCall256",
			"debugCall512",
			"debugCall1024",
			"debugCall2048",
			"debugCall4096",
			"debugCall8192",
			"debugCall16384",
			"debugCall32768",
			"debugCall65536":
			// These functions are allowed so that the debugger can initiate multiple function calls.
			// See: https://golang.org/cl/161137/
			return
		}

		// Disallow calls from the runtime. We could
		// potentially make this condition tighter (e.g., not
		// when locks are held), but there are enough tightly
		// coded sequences (e.g., defer handling) that it's
		// better to play it safe.
		if pfx := "runtime."; len(name) > len(pfx) && name[:len(pfx)] == pfx {
			ret = debugCallRuntime
			return
		}

		// Check that this isn't an unsafe-point.
		if pc != f.entry() {
			pc--
		}
		up := pcdatavalue(f, abi.PCDATA_UnsafePoint, pc, nil)
		if up != abi.UnsafePointSafe {
			// Not at a safe point.
			ret = debugCallUnsafePoint
		}
	})
	return ret
}

// debugCallWrap starts a new goroutine to run a debug call and blocks
// the calling goroutine. On the goroutine, it prepares to recover
// panics from the debug call, and then calls the call dispatching
// function at PC dispatch.
//
// This must be deeply nosplit because there are untyped values on the
// stack from debugCallV2.
//
//go:nosplit
func debugCallWrap(dispatch uintptr) {
	var lockedExt uint32
	callerpc := getcallerpc()
	gp := getg()

	// Lock ourselves to the OS thread.
	//
	// Debuggers rely on us running on the same thread until we get to
	// dispatch the function they asked as to.
	//
	// We're going to transfer this to the new G we just created.
	lockOSThread()

	// Create a new goroutine to execute the call on. Run this on
	// the system stack to avoid growing our stack.
	systemstack(func() {
		// TODO(mknyszek): It would be nice to wrap these arguments in an allocated
		// closure and start the goroutine with that closure, but the compiler disallows
		// implicit closure allocation in the runtime.
		fn := debugCallWrap1
		newg := newproc1(*(**funcval)(unsafe.Pointer(&fn)), gp, callerpc)
		args := &debugCallWrapArgs{
			dispatch: dispatch,
			callingG: gp,
		}
		newg.param = unsafe.Pointer(args)

		// Transfer locked-ness to the new goroutine.
		// Save lock state to restore later.
		mp := gp.m
		if mp != gp.lockedm.ptr() {
			throw("inconsistent lockedm")
		}
		// Save the external lock count and clear it so
		// that it can't be unlocked from the debug call.
		// Note: we already locked internally to the thread,
		// so if we were locked before we're still locked now.
		lockedExt = mp.lockedExt
		mp.lockedExt = 0

		mp.lockedg.set(newg)
		newg.lockedm.set(mp)
		gp.lockedm = 0

		// Mark the calling goroutine as being at an async
		// safe-point, since it has a few conservative frames
		// at the bottom of the stack. This also prevents
		// stack shrinks.
		gp.asyncSafePoint = true

		// Stash newg away so we can execute it below (mcall's
		// closure can't capture anything).
		gp.schedlink.set(newg)
	})

	// Switch to the new goroutine.
	mcall(func(gp *g) {
		// Get newg.
		newg := gp.schedlink.ptr()
		gp.schedlink = 0

		// Park the calling goroutine.
		if traceEnabled() {
			traceGoPark(traceBlockDebugCall, 1)
		}
		casGToWaiting(gp, _Grunning, waitReasonDebugCall)
		dropg()

		// Directly execute the new goroutine. The debug
		// protocol will continue on the new goroutine, so
		// it's important we not just let the scheduler do
		// this or it may resume a different goroutine.
		execute(newg, true)
	})

	// We'll resume here when the call returns.

	// Restore locked state.
	mp := gp.m
	mp.lockedExt = lockedExt
	mp.lockedg.set(gp)
	gp.lockedm.set(mp)

	// Undo the lockOSThread we did earlier.
	unlockOSThread()

	gp.asyncSafePoint = false
}

type debugCallWrapArgs struct {
	dispatch uintptr
	callingG *g
}

// debugCallWrap1 is the continuation of debugCallWrap on the callee
// goroutine.
func debugCallWrap1() {
	gp := getg()
	args := (*debugCallWrapArgs)(gp.param)
	dispatch, callingG := args.dispatch, args.callingG
	gp.param = nil

	// Dispatch call and trap panics.
	debugCallWrap2(dispatch)

	// Resume the caller goroutine.
	getg().schedlink.set(callingG)
	mcall(func(gp *g) {
		callingG := gp.schedlink.ptr()
		gp.schedlink = 0

		// Unlock this goroutine from the M if necessary. The
		// calling G will relock.
		if gp.lockedm != 0 {
			gp.lockedm = 0
			gp.m.lockedg = 0
		}

		// Switch back to the calling goroutine. At some point
		// the scheduler will schedule us again and we'll
		// finish exiting.
		if traceEnabled() {
			traceGoSched()
		}
		casgstatus(gp, _Grunning, _Grunnable)
		dropg()
		lock(&sched.lock)
		globrunqput(gp)
		unlock(&sched.lock)

		if traceEnabled() {
			traceGoUnpark(callingG, 0)
		}
		casgstatus(callingG, _Gwaiting, _Grunnable)
		execute(callingG, true)
	})
}

func debugCallWrap2(dispatch uintptr) {
	// Call the dispatch function and trap panics.
	var dispatchF func()
	dispatchFV := funcval{dispatch}
	*(*unsafe.Pointer)(unsafe.Pointer(&dispatchF)) = noescape(unsafe.Pointer(&dispatchFV))

	var ok bool
	defer func() {
		if !ok {
			err := recover()
			debugCallPanicked(err)
		}
	}()
	dispatchF()
	ok = true
}