aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/go/_std_1.22/src/runtime/libfuzzer_arm64.s
blob: 37b35173c3b52f7d5dd7fabe11bb2469c7cf50f3 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

//go:build libfuzzer

#include "go_asm.h"
#include "textflag.h"

// Based on race_arm64.s; see commentary there.

#define RARG0 R0
#define RARG1 R1
#define RARG2 R2
#define RARG3 R3

#define REPEAT_2(a) a a
#define REPEAT_8(a) REPEAT_2(REPEAT_2(REPEAT_2(a)))
#define REPEAT_128(a) REPEAT_2(REPEAT_8(REPEAT_8(a)))

// void runtime·libfuzzerCallTraceIntCmp(fn, arg0, arg1, fakePC uintptr)
// Calls C function fn from libFuzzer and passes 2 arguments to it after
// manipulating the return address so that libfuzzer's integer compare hooks
// work.
// The problem statement and solution are documented in detail in libfuzzer_amd64.s.
// See commentary there.
TEXT	runtime·libfuzzerCallTraceIntCmp(SB), NOSPLIT, $8-32
	MOVD	fn+0(FP), R9
	MOVD	arg0+8(FP), RARG0
	MOVD	arg1+16(FP), RARG1
	MOVD	fakePC+24(FP), R8
	// Save the original return address in a local variable
	MOVD	R30, savedRetAddr-8(SP)

	MOVD	g_m(g), R10

	// Switch to g0 stack.
	MOVD	RSP, R19	// callee-saved, preserved across the CALL
	MOVD	m_g0(R10), R11
	CMP	R11, g
	BEQ	call	// already on g0
	MOVD	(g_sched+gobuf_sp)(R11), R12
	MOVD	R12, RSP
call:
	// Load address of the ret sled into the default register for the return
	// address.
	ADR	ret_sled, R30
	// Clear the lowest 2 bits of fakePC. All ARM64 instructions are four
	// bytes long, so we cannot get better return address granularity than
	// multiples of 4.
	AND	$-4, R8, R8
	// Add the offset of the fake_pc-th ret.
	ADD	R8, R30, R30
	// Call the function by jumping to it and reusing all registers except
	// for the modified return address register R30.
	JMP	(R9)

// The ret sled for ARM64 consists of 128 br instructions jumping to the
// end of the function. Each instruction is 4 bytes long. The sled thus
// has the same byte length of 4 * 128 = 512 as the x86_64 sled, but
// coarser granularity.
#define RET_SLED \
	JMP	end_of_function;

ret_sled:
	REPEAT_128(RET_SLED);

end_of_function:
	MOVD	R19, RSP
	MOVD	savedRetAddr-8(SP), R30
	RET

// void runtime·libfuzzerCall4(fn, hookId int, s1, s2 unsafe.Pointer, result uintptr)
// Calls C function fn from libFuzzer and passes 4 arguments to it.
TEXT	runtime·libfuzzerCall4(SB), NOSPLIT, $0-40
	MOVD	fn+0(FP), R9
	MOVD	hookId+8(FP), RARG0
	MOVD	s1+16(FP), RARG1
	MOVD	s2+24(FP), RARG2
	MOVD	result+32(FP), RARG3

	MOVD	g_m(g), R10

	// Switch to g0 stack.
	MOVD	RSP, R19	// callee-saved, preserved across the CALL
	MOVD	m_g0(R10), R11
	CMP	R11, g
	BEQ	call	// already on g0
	MOVD	(g_sched+gobuf_sp)(R11), R12
	MOVD	R12, RSP
call:
	BL	R9
	MOVD	R19, RSP
	RET

// void runtime·libfuzzerCallWithTwoByteBuffers(fn, start, end *byte)
// Calls C function fn from libFuzzer and passes 2 arguments of type *byte to it.
TEXT	runtime·libfuzzerCallWithTwoByteBuffers(SB), NOSPLIT, $0-24
	MOVD	fn+0(FP), R9
	MOVD	start+8(FP), R0
	MOVD	end+16(FP), R1

	MOVD	g_m(g), R10

	// Switch to g0 stack.
	MOVD	RSP, R19	// callee-saved, preserved across the CALL
	MOVD	m_g0(R10), R11
	CMP	R11, g
	BEQ	call	// already on g0
	MOVD	(g_sched+gobuf_sp)(R11), R12
	MOVD	R12, RSP
call:
	BL	R9
	MOVD	R19, RSP
	RET