forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-12-19 9370bb92b2d16684ee45cf24e879c93c509162da
kernel/arch/arm64/kernel/probes/kprobes.c
....@@ -1,3 +1,4 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * arch/arm64/kernel/probes/kprobes.c
34 *
....@@ -5,16 +6,6 @@
56 *
67 * Copyright (C) 2013 Linaro Limited.
78 * Author: Sandeepa Prabhu <sandeepa.prabhu@linaro.org>
8
- *
9
- * This program is free software; you can redistribute it and/or modify
10
- * it under the terms of the GNU General Public License version 2 as
11
- * published by the Free Software Foundation.
12
- *
13
- * This program is distributed in the hope that it will be useful,
14
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
15
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16
- * General Public License for more details.
17
- *
189 */
1910 #include <linux/kasan.h>
2011 #include <linux/kernel.h>
....@@ -30,6 +21,7 @@
3021 #include <asm/ptrace.h>
3122 #include <asm/cacheflush.h>
3223 #include <asm/debug-monitors.h>
24
+#include <asm/daifflags.h>
3325 #include <asm/system_misc.h>
3426 #include <asm/insn.h>
3527 #include <linux/uaccess.h>
....@@ -44,25 +36,16 @@
4436 static void __kprobes
4537 post_kprobe_handler(struct kprobe_ctlblk *, struct pt_regs *);
4638
47
-static int __kprobes patch_text(kprobe_opcode_t *addr, u32 opcode)
48
-{
49
- void *addrs[1];
50
- u32 insns[1];
51
-
52
- addrs[0] = addr;
53
- insns[0] = opcode;
54
-
55
- return aarch64_insn_patch_text(addrs, insns, 1);
56
-}
57
-
5839 static void __kprobes arch_prepare_ss_slot(struct kprobe *p)
5940 {
60
- /* prepare insn slot */
61
- patch_text(p->ainsn.api.insn, p->opcode);
41
+ kprobe_opcode_t *addr = p->ainsn.api.insn;
42
+ void *addrs[] = {addr, addr + 1};
43
+ u32 insns[] = {p->opcode, BRK64_OPCODE_KPROBES_SS};
6244
63
- flush_icache_range((uintptr_t) (p->ainsn.api.insn),
64
- (uintptr_t) (p->ainsn.api.insn) +
65
- MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
45
+ /* prepare insn slot */
46
+ aarch64_insn_patch_text(addrs, insns, 2);
47
+
48
+ flush_icache_range((uintptr_t)addr, (uintptr_t)(addr + MAX_INSN_SIZE));
6649
6750 /*
6851 * Needs restoring of return address after stepping xol.
....@@ -91,8 +74,6 @@
9174 int __kprobes arch_prepare_kprobe(struct kprobe *p)
9275 {
9376 unsigned long probe_addr = (unsigned long)p->addr;
94
- extern char __start_rodata[];
95
- extern char __end_rodata[];
9677
9778 if (probe_addr & 0x3)
9879 return -EINVAL;
....@@ -100,10 +81,7 @@
10081 /* copy instruction */
10182 p->opcode = le32_to_cpu(*p->addr);
10283
103
- if (in_exception_text(probe_addr))
104
- return -EINVAL;
105
- if (probe_addr >= (unsigned long) __start_rodata &&
106
- probe_addr <= (unsigned long) __end_rodata)
84
+ if (search_exception_tables(probe_addr))
10785 return -EINVAL;
10886
10987 /* decode instruction */
....@@ -120,7 +98,7 @@
12098 if (!p->ainsn.api.insn)
12199 return -ENOMEM;
122100 break;
123
- };
101
+ }
124102
125103 /* prepare the instruction */
126104 if (p->ainsn.api.insn)
....@@ -133,25 +111,26 @@
133111
134112 void *alloc_insn_page(void)
135113 {
136
- void *page;
137
-
138
- page = vmalloc_exec(PAGE_SIZE);
139
- if (page)
140
- set_memory_ro((unsigned long)page, 1);
141
-
142
- return page;
114
+ return __vmalloc_node_range(PAGE_SIZE, 1, VMALLOC_START, VMALLOC_END,
115
+ GFP_KERNEL, PAGE_KERNEL_ROX, VM_FLUSH_RESET_PERMS,
116
+ NUMA_NO_NODE, __builtin_return_address(0));
143117 }
144118
145119 /* arm kprobe: install breakpoint in text */
146120 void __kprobes arch_arm_kprobe(struct kprobe *p)
147121 {
148
- patch_text(p->addr, BRK64_OPCODE_KPROBES);
122
+ void *addr = p->addr;
123
+ u32 insn = BRK64_OPCODE_KPROBES;
124
+
125
+ aarch64_insn_patch_text(&addr, &insn, 1);
149126 }
150127
151128 /* disarm kprobe: remove breakpoint from text */
152129 void __kprobes arch_disarm_kprobe(struct kprobe *p)
153130 {
154
- patch_text(p->addr, p->opcode);
131
+ void *addr = p->addr;
132
+
133
+ aarch64_insn_patch_text(&addr, &p->opcode, 1);
155134 }
156135
157136 void __kprobes arch_remove_kprobe(struct kprobe *p)
....@@ -180,54 +159,22 @@
180159 }
181160
182161 /*
183
- * When PSTATE.D is set (masked), then software step exceptions can not be
184
- * generated.
185
- * SPSR's D bit shows the value of PSTATE.D immediately before the
186
- * exception was taken. PSTATE.D is set while entering into any exception
187
- * mode, however software clears it for any normal (none-debug-exception)
188
- * mode in the exception entry. Therefore, when we are entering into kprobe
189
- * breakpoint handler from any normal mode then SPSR.D bit is already
190
- * cleared, however it is set when we are entering from any debug exception
191
- * mode.
192
- * Since we always need to generate single step exception after a kprobe
193
- * breakpoint exception therefore we need to clear it unconditionally, when
194
- * we become sure that the current breakpoint exception is for kprobe.
195
- */
196
-static void __kprobes
197
-spsr_set_debug_flag(struct pt_regs *regs, int mask)
198
-{
199
- unsigned long spsr = regs->pstate;
200
-
201
- if (mask)
202
- spsr |= PSR_D_BIT;
203
- else
204
- spsr &= ~PSR_D_BIT;
205
-
206
- regs->pstate = spsr;
207
-}
208
-
209
-/*
210
- * Interrupts need to be disabled before single-step mode is set, and not
211
- * reenabled until after single-step mode ends.
212
- * Without disabling interrupt on local CPU, there is a chance of
213
- * interrupt occurrence in the period of exception return and start of
214
- * out-of-line single-step, that result in wrongly single stepping
215
- * into the interrupt handler.
162
+ * Mask all of DAIF while executing the instruction out-of-line, to keep things
163
+ * simple and avoid nesting exceptions. Interrupts do have to be disabled since
164
+ * the kprobe state is per-CPU and doesn't get migrated.
216165 */
217166 static void __kprobes kprobes_save_local_irqflag(struct kprobe_ctlblk *kcb,
218167 struct pt_regs *regs)
219168 {
220
- kcb->saved_irqflag = regs->pstate;
221
- regs->pstate |= PSR_I_BIT;
169
+ kcb->saved_irqflag = regs->pstate & DAIF_MASK;
170
+ regs->pstate |= DAIF_MASK;
222171 }
223172
224173 static void __kprobes kprobes_restore_local_irqflag(struct kprobe_ctlblk *kcb,
225174 struct pt_regs *regs)
226175 {
227
- if (kcb->saved_irqflag & PSR_I_BIT)
228
- regs->pstate |= PSR_I_BIT;
229
- else
230
- regs->pstate &= ~PSR_I_BIT;
176
+ regs->pstate &= ~DAIF_MASK;
177
+ regs->pstate |= kcb->saved_irqflag;
231178 }
232179
233180 static void __kprobes
....@@ -263,12 +210,7 @@
263210 slot = (unsigned long)p->ainsn.api.insn;
264211
265212 set_ss_context(kcb, slot); /* mark pending ss */
266
-
267
- spsr_set_debug_flag(regs, 0);
268
-
269
- /* IRQs and single stepping do not mix well. */
270213 kprobes_save_local_irqflag(kcb, regs);
271
- kernel_enable_single_step(regs);
272214 instruction_pointer_set(regs, slot);
273215 } else {
274216 /* insn simulation */
....@@ -319,12 +261,8 @@
319261 }
320262 /* call post handler */
321263 kcb->kprobe_status = KPROBE_HIT_SSDONE;
322
- if (cur->post_handler) {
323
- /* post_handler can hit breakpoint and single step
324
- * again, so we enable D-flag for recursive exception.
325
- */
264
+ if (cur->post_handler)
326265 cur->post_handler(cur, regs, 0);
327
- }
328266
329267 reset_current_kprobe();
330268 }
....@@ -348,12 +286,12 @@
348286 if (!instruction_pointer(regs))
349287 BUG();
350288
351
- kernel_disable_single_step();
352
-
353
- if (kcb->kprobe_status == KPROBE_REENTER)
289
+ if (kcb->kprobe_status == KPROBE_REENTER) {
354290 restore_previous_kprobe(kcb);
355
- else
291
+ } else {
292
+ kprobes_restore_local_irqflag(kcb, regs);
356293 reset_current_kprobe();
294
+ }
357295
358296 break;
359297 case KPROBE_HIT_ACTIVE:
....@@ -411,10 +349,6 @@
411349 * pre-handler and it returned non-zero, it will
412350 * modify the execution path and no need to single
413351 * stepping. Let's just reset current kprobe and exit.
414
- *
415
- * pre_handler can hit a breakpoint and can step thru
416
- * before return, keep PSTATE D-flag enabled until
417
- * pre_handler return back.
418352 */
419353 if (!p->pre_handler || !p->pre_handler(p, regs)) {
420354 setup_singlestep(p, regs, kcb, 0);
....@@ -444,143 +378,80 @@
444378 return DBG_HOOK_ERROR;
445379 }
446380
447
-int __kprobes
448
-kprobe_single_step_handler(struct pt_regs *regs, unsigned int esr)
381
+static int __kprobes
382
+kprobe_breakpoint_ss_handler(struct pt_regs *regs, unsigned int esr)
449383 {
450384 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
451385 int retval;
452
-
453
- if (user_mode(regs))
454
- return DBG_HOOK_ERROR;
455386
456387 /* return error if this is not our step */
457388 retval = kprobe_ss_hit(kcb, instruction_pointer(regs));
458389
459390 if (retval == DBG_HOOK_HANDLED) {
460391 kprobes_restore_local_irqflag(kcb, regs);
461
- kernel_disable_single_step();
462
-
463392 post_kprobe_handler(kcb, regs);
464393 }
465394
466395 return retval;
467396 }
468397
469
-int __kprobes
398
+static struct break_hook kprobes_break_ss_hook = {
399
+ .imm = KPROBES_BRK_SS_IMM,
400
+ .fn = kprobe_breakpoint_ss_handler,
401
+};
402
+
403
+static int __kprobes
470404 kprobe_breakpoint_handler(struct pt_regs *regs, unsigned int esr)
471405 {
472
- if (user_mode(regs))
473
- return DBG_HOOK_ERROR;
474
-
475406 kprobe_handler(regs);
476407 return DBG_HOOK_HANDLED;
477408 }
478409
479
-bool arch_within_kprobe_blacklist(unsigned long addr)
410
+static struct break_hook kprobes_break_hook = {
411
+ .imm = KPROBES_BRK_IMM,
412
+ .fn = kprobe_breakpoint_handler,
413
+};
414
+
415
+/*
416
+ * Provide a blacklist of symbols identifying ranges which cannot be kprobed.
417
+ * This blacklist is exposed to userspace via debugfs (kprobes/blacklist).
418
+ */
419
+int __init arch_populate_kprobe_blacklist(void)
480420 {
481
- if ((addr >= (unsigned long)__kprobes_text_start &&
482
- addr < (unsigned long)__kprobes_text_end) ||
483
- (addr >= (unsigned long)__entry_text_start &&
484
- addr < (unsigned long)__entry_text_end) ||
485
- (addr >= (unsigned long)__idmap_text_start &&
486
- addr < (unsigned long)__idmap_text_end) ||
487
- (addr >= (unsigned long)__hyp_text_start &&
488
- addr < (unsigned long)__hyp_text_end) ||
489
- !!search_exception_tables(addr))
490
- return true;
421
+ int ret;
491422
492
- if (!is_kernel_in_hyp_mode()) {
493
- if ((addr >= (unsigned long)__hyp_idmap_text_start &&
494
- addr < (unsigned long)__hyp_idmap_text_end))
495
- return true;
496
- }
497
-
498
- return false;
423
+ ret = kprobe_add_area_blacklist((unsigned long)__entry_text_start,
424
+ (unsigned long)__entry_text_end);
425
+ if (ret)
426
+ return ret;
427
+ ret = kprobe_add_area_blacklist((unsigned long)__irqentry_text_start,
428
+ (unsigned long)__irqentry_text_end);
429
+ if (ret)
430
+ return ret;
431
+ ret = kprobe_add_area_blacklist((unsigned long)__idmap_text_start,
432
+ (unsigned long)__idmap_text_end);
433
+ if (ret)
434
+ return ret;
435
+ ret = kprobe_add_area_blacklist((unsigned long)__hyp_text_start,
436
+ (unsigned long)__hyp_text_end);
437
+ if (ret || is_kernel_in_hyp_mode())
438
+ return ret;
439
+ ret = kprobe_add_area_blacklist((unsigned long)__hyp_idmap_text_start,
440
+ (unsigned long)__hyp_idmap_text_end);
441
+ return ret;
499442 }
500443
501444 void __kprobes __used *trampoline_probe_handler(struct pt_regs *regs)
502445 {
503
- struct kretprobe_instance *ri = NULL;
504
- struct hlist_head *head, empty_rp;
505
- struct hlist_node *tmp;
506
- unsigned long flags, orig_ret_address = 0;
507
- unsigned long trampoline_address =
508
- (unsigned long)&kretprobe_trampoline;
509
- kprobe_opcode_t *correct_ret_addr = NULL;
510
-
511
- INIT_HLIST_HEAD(&empty_rp);
512
- kretprobe_hash_lock(current, &head, &flags);
513
-
514
- /*
515
- * It is possible to have multiple instances associated with a given
516
- * task either because multiple functions in the call path have
517
- * return probes installed on them, and/or more than one
518
- * return probe was registered for a target function.
519
- *
520
- * We can handle this because:
521
- * - instances are always pushed into the head of the list
522
- * - when multiple return probes are registered for the same
523
- * function, the (chronologically) first instance's ret_addr
524
- * will be the real return address, and all the rest will
525
- * point to kretprobe_trampoline.
526
- */
527
- hlist_for_each_entry_safe(ri, tmp, head, hlist) {
528
- if (ri->task != current)
529
- /* another task is sharing our hash bucket */
530
- continue;
531
-
532
- orig_ret_address = (unsigned long)ri->ret_addr;
533
-
534
- if (orig_ret_address != trampoline_address)
535
- /*
536
- * This is the real return address. Any other
537
- * instances associated with this task are for
538
- * other calls deeper on the call stack
539
- */
540
- break;
541
- }
542
-
543
- kretprobe_assert(ri, orig_ret_address, trampoline_address);
544
-
545
- correct_ret_addr = ri->ret_addr;
546
- hlist_for_each_entry_safe(ri, tmp, head, hlist) {
547
- if (ri->task != current)
548
- /* another task is sharing our hash bucket */
549
- continue;
550
-
551
- orig_ret_address = (unsigned long)ri->ret_addr;
552
- if (ri->rp && ri->rp->handler) {
553
- __this_cpu_write(current_kprobe, &ri->rp->kp);
554
- get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
555
- ri->ret_addr = correct_ret_addr;
556
- ri->rp->handler(ri, regs);
557
- __this_cpu_write(current_kprobe, NULL);
558
- }
559
-
560
- recycle_rp_inst(ri, &empty_rp);
561
-
562
- if (orig_ret_address != trampoline_address)
563
- /*
564
- * This is the real return address. Any other
565
- * instances associated with this task are for
566
- * other calls deeper on the call stack
567
- */
568
- break;
569
- }
570
-
571
- kretprobe_hash_unlock(current, &flags);
572
-
573
- hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
574
- hlist_del(&ri->hlist);
575
- kfree(ri);
576
- }
577
- return (void *)orig_ret_address;
446
+ return (void *)kretprobe_trampoline_handler(regs, &kretprobe_trampoline,
447
+ (void *)kernel_stack_pointer(regs));
578448 }
579449
580450 void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
581451 struct pt_regs *regs)
582452 {
583453 ri->ret_addr = (kprobe_opcode_t *)regs->regs[30];
454
+ ri->fp = (void *)kernel_stack_pointer(regs);
584455
585456 /* replace return addr (x30) with trampoline */
586457 regs->regs[30] = (long)&kretprobe_trampoline;
....@@ -593,5 +464,8 @@
593464
594465 int __init arch_init_kprobes(void)
595466 {
467
+ register_kernel_break_hook(&kprobes_break_hook);
468
+ register_kernel_break_hook(&kprobes_break_ss_hook);
469
+
596470 return 0;
597471 }