forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-08 01573e231f18eb2d99162747186f59511f56b64d
kernel/arch/arm64/kernel/entry-ftrace.S
....@@ -1,18 +1,148 @@
1
+/* SPDX-License-Identifier: GPL-2.0-only */
12 /*
23 * arch/arm64/kernel/entry-ftrace.S
34 *
45 * Copyright (C) 2013 Linaro Limited
56 * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
6
- *
7
- * This program is free software; you can redistribute it and/or modify
8
- * it under the terms of the GNU General Public License version 2 as
9
- * published by the Free Software Foundation.
107 */
118
129 #include <linux/linkage.h>
10
+#include <asm/asm-offsets.h>
1311 #include <asm/assembler.h>
1412 #include <asm/ftrace.h>
1513 #include <asm/insn.h>
14
+
15
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
16
+/*
17
+ * Due to -fpatchable-function-entry=2, the compiler has placed two NOPs before
18
+ * the regular function prologue. For an enabled callsite, ftrace_init_nop() and
19
+ * ftrace_make_call() have patched those NOPs to:
20
+ *
21
+ * MOV X9, LR
22
+ * BL <entry>
23
+ *
24
+ * ... where <entry> is either ftrace_caller or ftrace_regs_caller.
25
+ *
26
+ * Each instrumented function follows the AAPCS, so here x0-x8 and x18-x30 are
27
+ * live (x18 holds the Shadow Call Stack pointer), and x9-x17 are safe to
28
+ * clobber.
29
+ *
30
+ * We save the callsite's context into a pt_regs before invoking any ftrace
31
+ * callbacks. So that we can get a sensible backtrace, we create a stack record
32
+ * for the callsite and the ftrace entry assembly. This is not sufficient for
33
+ * reliable stacktrace: until we create the callsite stack record, its caller
34
+ * is missing from the LR and existing chain of frame records.
35
+ */
36
+ .macro ftrace_regs_entry, allregs=0
37
+ /* Make room for pt_regs, plus a callee frame */
38
+ sub sp, sp, #(S_FRAME_SIZE + 16)
39
+
40
+ /* Save function arguments (and x9 for simplicity) */
41
+ stp x0, x1, [sp, #S_X0]
42
+ stp x2, x3, [sp, #S_X2]
43
+ stp x4, x5, [sp, #S_X4]
44
+ stp x6, x7, [sp, #S_X6]
45
+ stp x8, x9, [sp, #S_X8]
46
+
47
+ /* Optionally save the callee-saved registers, always save the FP */
48
+ .if \allregs == 1
49
+ stp x10, x11, [sp, #S_X10]
50
+ stp x12, x13, [sp, #S_X12]
51
+ stp x14, x15, [sp, #S_X14]
52
+ stp x16, x17, [sp, #S_X16]
53
+ stp x18, x19, [sp, #S_X18]
54
+ stp x20, x21, [sp, #S_X20]
55
+ stp x22, x23, [sp, #S_X22]
56
+ stp x24, x25, [sp, #S_X24]
57
+ stp x26, x27, [sp, #S_X26]
58
+ stp x28, x29, [sp, #S_X28]
59
+ .else
60
+ str x29, [sp, #S_FP]
61
+ .endif
62
+
63
+ /* Save the callsite's SP and LR */
64
+ add x10, sp, #(S_FRAME_SIZE + 16)
65
+ stp x9, x10, [sp, #S_LR]
66
+
67
+ /* Save the PC after the ftrace callsite */
68
+ str x30, [sp, #S_PC]
69
+
70
+ /* Create a frame record for the callsite above pt_regs */
71
+ stp x29, x9, [sp, #S_FRAME_SIZE]
72
+ add x29, sp, #S_FRAME_SIZE
73
+
74
+ /* Create our frame record within pt_regs. */
75
+ stp x29, x30, [sp, #S_STACKFRAME]
76
+ add x29, sp, #S_STACKFRAME
77
+ .endm
78
+
79
+SYM_CODE_START(ftrace_regs_caller)
80
+#ifdef BTI_C
81
+ BTI_C
82
+#endif
83
+ ftrace_regs_entry 1
84
+ b ftrace_common
85
+SYM_CODE_END(ftrace_regs_caller)
86
+
87
+SYM_CODE_START(ftrace_caller)
88
+#ifdef BTI_C
89
+ BTI_C
90
+#endif
91
+ ftrace_regs_entry 0
92
+ b ftrace_common
93
+SYM_CODE_END(ftrace_caller)
94
+
95
+SYM_CODE_START(ftrace_common)
96
+ sub x0, x30, #AARCH64_INSN_SIZE // ip (callsite's BL insn)
97
+ mov x1, x9 // parent_ip (callsite's LR)
98
+ ldr_l x2, function_trace_op // op
99
+ mov x3, sp // regs
100
+
101
+SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL)
102
+ bl ftrace_stub
103
+
104
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
105
+SYM_INNER_LABEL(ftrace_graph_call, SYM_L_GLOBAL) // ftrace_graph_caller();
106
+ nop // If enabled, this will be replaced
107
+ // "b ftrace_graph_caller"
108
+#endif
109
+
110
+/*
111
+ * At the callsite x0-x8 and x19-x30 were live. Any C code will have preserved
112
+ * x19-x29 per the AAPCS, and we created frame records upon entry, so we need
113
+ * to restore x0-x8, x29, and x30.
114
+ */
115
+ftrace_common_return:
116
+ /* Restore function arguments */
117
+ ldp x0, x1, [sp]
118
+ ldp x2, x3, [sp, #S_X2]
119
+ ldp x4, x5, [sp, #S_X4]
120
+ ldp x6, x7, [sp, #S_X6]
121
+ ldr x8, [sp, #S_X8]
122
+
123
+ /* Restore the callsite's FP, LR, PC */
124
+ ldr x29, [sp, #S_FP]
125
+ ldr x30, [sp, #S_LR]
126
+ ldr x9, [sp, #S_PC]
127
+
128
+ /* Restore the callsite's SP */
129
+ add sp, sp, #S_FRAME_SIZE + 16
130
+
131
+ ret x9
132
+SYM_CODE_END(ftrace_common)
133
+
134
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
135
+SYM_CODE_START(ftrace_graph_caller)
136
+ ldr x0, [sp, #S_PC]
137
+ sub x0, x0, #AARCH64_INSN_SIZE // ip (callsite's BL insn)
138
+ add x1, sp, #S_LR // parent_ip (callsite's LR)
139
+ ldr x2, [sp, #S_FRAME_SIZE] // parent fp (callsite's FP)
140
+ bl prepare_ftrace_return
141
+ b ftrace_common_return
142
+SYM_CODE_END(ftrace_graph_caller)
143
+#endif
144
+
145
+#else /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
16146
17147 /*
18148 * Gcc with -pg will put the following code in the beginning of each function:
....@@ -95,7 +225,7 @@
95225 * - tracer function to probe instrumented function's entry,
96226 * - ftrace_graph_caller to set up an exit hook
97227 */
98
-ENTRY(_mcount)
228
+SYM_FUNC_START(_mcount)
99229 mcount_enter
100230
101231 ldr_l x2, ftrace_trace_function
....@@ -119,7 +249,9 @@
119249 b.ne ftrace_graph_caller // ftrace_graph_caller();
120250 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
121251 mcount_exit
122
-ENDPROC(_mcount)
252
+SYM_FUNC_END(_mcount)
253
+EXPORT_SYMBOL(_mcount)
254
+NOKPROBE(_mcount)
123255
124256 #else /* CONFIG_DYNAMIC_FTRACE */
125257 /*
....@@ -128,9 +260,11 @@
128260 * and later on, NOP to branch to ftrace_caller() when enabled or branch to
129261 * NOP when disabled per-function base.
130262 */
131
-ENTRY(_mcount)
263
+SYM_FUNC_START(_mcount)
132264 ret
133
-ENDPROC(_mcount)
265
+SYM_FUNC_END(_mcount)
266
+EXPORT_SYMBOL(_mcount)
267
+NOKPROBE(_mcount)
134268
135269 /*
136270 * void ftrace_caller(unsigned long return_address)
....@@ -141,51 +275,27 @@
141275 * - tracer function to probe instrumented function's entry,
142276 * - ftrace_graph_caller to set up an exit hook
143277 */
144
-ENTRY(ftrace_caller)
278
+SYM_FUNC_START(ftrace_caller)
145279 mcount_enter
146280
147281 mcount_get_pc0 x0 // function's pc
148282 mcount_get_lr x1 // function's lr
149283
150
- .global ftrace_call
151
-ftrace_call: // tracer(pc, lr);
284
+SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL) // tracer(pc, lr);
152285 nop // This will be replaced with "bl xxx"
153286 // where xxx can be any kind of tracer.
154287
155288 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
156
- .global ftrace_graph_call
157
-ftrace_graph_call: // ftrace_graph_caller();
289
+SYM_INNER_LABEL(ftrace_graph_call, SYM_L_GLOBAL) // ftrace_graph_caller();
158290 nop // If enabled, this will be replaced
159291 // "b ftrace_graph_caller"
160292 #endif
161293
162294 mcount_exit
163
-ENDPROC(ftrace_caller)
295
+SYM_FUNC_END(ftrace_caller)
164296 #endif /* CONFIG_DYNAMIC_FTRACE */
165297
166
-ENTRY(ftrace_stub)
167
- ret
168
-ENDPROC(ftrace_stub)
169
-
170298 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
171
- /* save return value regs*/
172
- .macro save_return_regs
173
- sub sp, sp, #64
174
- stp x0, x1, [sp]
175
- stp x2, x3, [sp, #16]
176
- stp x4, x5, [sp, #32]
177
- stp x6, x7, [sp, #48]
178
- .endm
179
-
180
- /* restore return value regs*/
181
- .macro restore_return_regs
182
- ldp x0, x1, [sp]
183
- ldp x2, x3, [sp, #16]
184
- ldp x4, x5, [sp, #32]
185
- ldp x6, x7, [sp, #48]
186
- add sp, sp, #64
187
- .endm
188
-
189299 /*
190300 * void ftrace_graph_caller(void)
191301 *
....@@ -195,28 +305,47 @@
195305 * the call stack in order to intercept instrumented function's return path
196306 * and run return_to_handler() later on its exit.
197307 */
198
-ENTRY(ftrace_graph_caller)
199
- mcount_get_lr_addr x0 // pointer to function's saved lr
200
- mcount_get_pc x1 // function's pc
308
+SYM_FUNC_START(ftrace_graph_caller)
309
+ mcount_get_pc x0 // function's pc
310
+ mcount_get_lr_addr x1 // pointer to function's saved lr
201311 mcount_get_parent_fp x2 // parent's fp
202
- bl prepare_ftrace_return // prepare_ftrace_return(&lr, pc, fp)
312
+ bl prepare_ftrace_return // prepare_ftrace_return(pc, &lr, fp)
203313
204314 mcount_exit
205
-ENDPROC(ftrace_graph_caller)
315
+SYM_FUNC_END(ftrace_graph_caller)
316
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
317
+#endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
206318
319
+SYM_FUNC_START(ftrace_stub)
320
+ ret
321
+SYM_FUNC_END(ftrace_stub)
322
+
323
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
207324 /*
208325 * void return_to_handler(void)
209326 *
210327 * Run ftrace_return_to_handler() before going back to parent.
211
- * @fp is checked against the value passed by ftrace_graph_caller()
212
- * only when HAVE_FUNCTION_GRAPH_FP_TEST is enabled.
328
+ * @fp is checked against the value passed by ftrace_graph_caller().
213329 */
214
-ENTRY(return_to_handler)
215
- save_return_regs
330
+SYM_CODE_START(return_to_handler)
331
+ /* save return value regs */
332
+ sub sp, sp, #64
333
+ stp x0, x1, [sp]
334
+ stp x2, x3, [sp, #16]
335
+ stp x4, x5, [sp, #32]
336
+ stp x6, x7, [sp, #48]
337
+
216338 mov x0, x29 // parent's fp
217339 bl ftrace_return_to_handler// addr = ftrace_return_to_hander(fp);
218340 mov x30, x0 // restore the original return address
219
- restore_return_regs
341
+
342
+ /* restore return value regs */
343
+ ldp x0, x1, [sp]
344
+ ldp x2, x3, [sp, #16]
345
+ ldp x4, x5, [sp, #32]
346
+ ldp x6, x7, [sp, #48]
347
+ add sp, sp, #64
348
+
220349 ret
221
-END(return_to_handler)
350
+SYM_CODE_END(return_to_handler)
222351 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */