.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-or-later |
---|
1 | 2 | /* |
---|
2 | 3 | * Kernel Probes (KProbes) |
---|
3 | | - * |
---|
4 | | - * This program is free software; you can redistribute it and/or modify |
---|
5 | | - * it under the terms of the GNU General Public License as published by |
---|
6 | | - * the Free Software Foundation; either version 2 of the License, or |
---|
7 | | - * (at your option) any later version. |
---|
8 | | - * |
---|
9 | | - * This program is distributed in the hope that it will be useful, |
---|
10 | | - * but WITHOUT ANY WARRANTY; without even the implied warranty of |
---|
11 | | - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
---|
12 | | - * GNU General Public License for more details. |
---|
13 | | - * |
---|
14 | | - * You should have received a copy of the GNU General Public License |
---|
15 | | - * along with this program; if not, write to the Free Software |
---|
16 | | - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
---|
17 | 4 | * |
---|
18 | 5 | * Copyright (C) IBM Corporation, 2002, 2004 |
---|
19 | 6 | * |
---|
.. | .. |
---|
46 | 33 | #include <linux/hardirq.h> |
---|
47 | 34 | #include <linux/preempt.h> |
---|
48 | 35 | #include <linux/sched/debug.h> |
---|
| 36 | +#include <linux/perf_event.h> |
---|
49 | 37 | #include <linux/extable.h> |
---|
50 | 38 | #include <linux/kdebug.h> |
---|
51 | 39 | #include <linux/kallsyms.h> |
---|
52 | 40 | #include <linux/ftrace.h> |
---|
53 | | -#include <linux/frame.h> |
---|
54 | 41 | #include <linux/kasan.h> |
---|
55 | 42 | #include <linux/moduleloader.h> |
---|
| 43 | +#include <linux/objtool.h> |
---|
| 44 | +#include <linux/vmalloc.h> |
---|
| 45 | +#include <linux/pgtable.h> |
---|
56 | 46 | |
---|
57 | 47 | #include <asm/text-patching.h> |
---|
58 | 48 | #include <asm/cacheflush.h> |
---|
59 | 49 | #include <asm/desc.h> |
---|
60 | | -#include <asm/pgtable.h> |
---|
61 | 50 | #include <linux/uaccess.h> |
---|
62 | 51 | #include <asm/alternative.h> |
---|
63 | 52 | #include <asm/insn.h> |
---|
.. | .. |
---|
69 | 58 | DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; |
---|
70 | 59 | DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); |
---|
71 | 60 | |
---|
72 | | -#define stack_addr(regs) ((unsigned long *)kernel_stack_pointer(regs)) |
---|
| 61 | +#define stack_addr(regs) ((unsigned long *)regs->sp) |
---|
73 | 62 | |
---|
74 | 63 | #define W(row, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf)\ |
---|
75 | 64 | (((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) | \ |
---|
.. | .. |
---|
132 | 121 | /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/ |
---|
133 | 122 | void synthesize_reljump(void *dest, void *from, void *to) |
---|
134 | 123 | { |
---|
135 | | - __synthesize_relative_insn(dest, from, to, RELATIVEJUMP_OPCODE); |
---|
| 124 | + __synthesize_relative_insn(dest, from, to, JMP32_INSN_OPCODE); |
---|
136 | 125 | } |
---|
137 | 126 | NOKPROBE_SYMBOL(synthesize_reljump); |
---|
138 | 127 | |
---|
139 | 128 | /* Insert a call instruction at address 'from', which calls address 'to'.*/ |
---|
140 | 129 | void synthesize_relcall(void *dest, void *from, void *to) |
---|
141 | 130 | { |
---|
142 | | - __synthesize_relative_insn(dest, from, to, RELATIVECALL_OPCODE); |
---|
| 131 | + __synthesize_relative_insn(dest, from, to, CALL_INSN_OPCODE); |
---|
143 | 132 | } |
---|
144 | 133 | NOKPROBE_SYMBOL(synthesize_relcall); |
---|
145 | 134 | |
---|
.. | .. |
---|
262 | 251 | * Fortunately, we know that the original code is the ideal 5-byte |
---|
263 | 252 | * long NOP. |
---|
264 | 253 | */ |
---|
265 | | - if (probe_kernel_read(buf, (void *)addr, |
---|
| 254 | + if (copy_from_kernel_nofault(buf, (void *)addr, |
---|
266 | 255 | MAX_INSN_SIZE * sizeof(kprobe_opcode_t))) |
---|
267 | 256 | return 0UL; |
---|
268 | 257 | |
---|
.. | .. |
---|
321 | 310 | * Another debugging subsystem might insert this breakpoint. |
---|
322 | 311 | * In that case, we can't recover it. |
---|
323 | 312 | */ |
---|
324 | | - if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION) |
---|
| 313 | + if (insn.opcode.bytes[0] == INT3_INSN_OPCODE) |
---|
325 | 314 | return 0; |
---|
326 | 315 | addr += insn.length; |
---|
327 | 316 | } |
---|
.. | .. |
---|
365 | 354 | return 0; |
---|
366 | 355 | |
---|
367 | 356 | /* This can access kernel text if given address is not recovered */ |
---|
368 | | - if (probe_kernel_read(dest, (void *)recovered_insn, MAX_INSN_SIZE)) |
---|
| 357 | + if (copy_from_kernel_nofault(dest, (void *)recovered_insn, |
---|
| 358 | + MAX_INSN_SIZE)) |
---|
369 | 359 | return 0; |
---|
370 | 360 | |
---|
371 | 361 | kernel_insn_init(insn, dest, MAX_INSN_SIZE); |
---|
372 | 362 | insn_get_length(insn); |
---|
373 | 363 | |
---|
| 364 | + /* We can not probe force emulate prefixed instruction */ |
---|
| 365 | + if (insn_has_emulate_prefix(insn)) |
---|
| 366 | + return 0; |
---|
| 367 | + |
---|
374 | 368 | /* Another subsystem puts a breakpoint, failed to recover */ |
---|
375 | | - if (insn->opcode.bytes[0] == BREAKPOINT_INSTRUCTION) |
---|
| 369 | + if (insn->opcode.bytes[0] == INT3_INSN_OPCODE) |
---|
376 | 370 | return 0; |
---|
377 | 371 | |
---|
378 | 372 | /* We should not singlestep on the exception masking instructions */ |
---|
.. | .. |
---|
416 | 410 | int len = insn->length; |
---|
417 | 411 | |
---|
418 | 412 | if (can_boost(insn, p->addr) && |
---|
419 | | - MAX_INSN_SIZE - len >= RELATIVEJUMP_SIZE) { |
---|
| 413 | + MAX_INSN_SIZE - len >= JMP32_INSN_SIZE) { |
---|
420 | 414 | /* |
---|
421 | 415 | * These instructions can be executed directly if it |
---|
422 | 416 | * jumps back to correct address. |
---|
423 | 417 | */ |
---|
424 | 418 | synthesize_reljump(buf + len, p->ainsn.insn + len, |
---|
425 | 419 | p->addr + insn->length); |
---|
426 | | - len += RELATIVEJUMP_SIZE; |
---|
| 420 | + len += JMP32_INSN_SIZE; |
---|
427 | 421 | p->ainsn.boostable = true; |
---|
428 | 422 | } else { |
---|
429 | 423 | p->ainsn.boostable = false; |
---|
.. | .. |
---|
441 | 435 | if (!page) |
---|
442 | 436 | return NULL; |
---|
443 | 437 | |
---|
| 438 | + set_vm_flush_reset_perms(page); |
---|
444 | 439 | /* |
---|
445 | 440 | * First make the page read-only, and only then make it executable to |
---|
446 | 441 | * prevent it from being W+X in between. |
---|
.. | .. |
---|
459 | 454 | /* Recover page to RW mode before releasing it */ |
---|
460 | 455 | void free_insn_page(void *page) |
---|
461 | 456 | { |
---|
462 | | - /* |
---|
463 | | - * First make the page non-executable, and only then make it writable to |
---|
464 | | - * prevent it from being W+X in between. |
---|
465 | | - */ |
---|
466 | | - set_memory_nx((unsigned long)page, 1); |
---|
467 | | - set_memory_rw((unsigned long)page, 1); |
---|
468 | 457 | module_memfree(page); |
---|
469 | 458 | } |
---|
470 | 459 | |
---|
.. | .. |
---|
490 | 479 | |
---|
491 | 480 | /* Also, displacement change doesn't affect the first byte */ |
---|
492 | 481 | p->opcode = buf[0]; |
---|
| 482 | + |
---|
| 483 | + p->ainsn.tp_len = len; |
---|
| 484 | + perf_event_text_poke(p->ainsn.insn, NULL, 0, buf, len); |
---|
493 | 485 | |
---|
494 | 486 | /* OK, write back the instruction(s) into ROX insn buffer */ |
---|
495 | 487 | text_poke(p->ainsn.insn, buf, len); |
---|
.. | .. |
---|
522 | 514 | |
---|
523 | 515 | void arch_arm_kprobe(struct kprobe *p) |
---|
524 | 516 | { |
---|
525 | | - text_poke(p->addr, ((unsigned char []){BREAKPOINT_INSTRUCTION}), 1); |
---|
| 517 | + u8 int3 = INT3_INSN_OPCODE; |
---|
| 518 | + |
---|
| 519 | + text_poke(p->addr, &int3, 1); |
---|
| 520 | + text_poke_sync(); |
---|
| 521 | + perf_event_text_poke(p->addr, &p->opcode, 1, &int3, 1); |
---|
526 | 522 | } |
---|
527 | 523 | |
---|
528 | 524 | void arch_disarm_kprobe(struct kprobe *p) |
---|
529 | 525 | { |
---|
| 526 | + u8 int3 = INT3_INSN_OPCODE; |
---|
| 527 | + |
---|
| 528 | + perf_event_text_poke(p->addr, &int3, 1, &p->opcode, 1); |
---|
530 | 529 | text_poke(p->addr, &p->opcode, 1); |
---|
| 530 | + text_poke_sync(); |
---|
531 | 531 | } |
---|
532 | 532 | |
---|
533 | 533 | void arch_remove_kprobe(struct kprobe *p) |
---|
534 | 534 | { |
---|
535 | 535 | if (p->ainsn.insn) { |
---|
| 536 | + /* Record the perf event before freeing the slot */ |
---|
| 537 | + perf_event_text_poke(p->ainsn.insn, p->ainsn.insn, |
---|
| 538 | + p->ainsn.tp_len, NULL, 0); |
---|
536 | 539 | free_insn_slot(p->ainsn.insn, p->ainsn.boostable); |
---|
537 | 540 | p->ainsn.insn = NULL; |
---|
538 | 541 | } |
---|
.. | .. |
---|
605 | 608 | if (setup_detour_execution(p, regs, reenter)) |
---|
606 | 609 | return; |
---|
607 | 610 | |
---|
608 | | -#if !defined(CONFIG_PREEMPT) |
---|
| 611 | +#if !defined(CONFIG_PREEMPTION) |
---|
609 | 612 | if (p->ainsn.boostable && !p->post_handler) { |
---|
610 | 613 | /* Boost up -- we can execute copied instructions directly */ |
---|
611 | 614 | if (!reenter) |
---|
.. | .. |
---|
630 | 633 | regs->flags |= X86_EFLAGS_TF; |
---|
631 | 634 | regs->flags &= ~X86_EFLAGS_IF; |
---|
632 | 635 | /* single step inline if the instruction is an int3 */ |
---|
633 | | - if (p->opcode == BREAKPOINT_INSTRUCTION) |
---|
| 636 | + if (p->opcode == INT3_INSN_OPCODE) |
---|
634 | 637 | regs->ip = (unsigned long)p->addr; |
---|
635 | 638 | else |
---|
636 | 639 | regs->ip = (unsigned long)p->ainsn.insn; |
---|
.. | .. |
---|
716 | 719 | reset_current_kprobe(); |
---|
717 | 720 | return 1; |
---|
718 | 721 | } |
---|
719 | | - } else if (*addr != BREAKPOINT_INSTRUCTION) { |
---|
| 722 | + } else if (*addr != INT3_INSN_OPCODE) { |
---|
720 | 723 | /* |
---|
721 | 724 | * The breakpoint instruction was removed right |
---|
722 | 725 | * after we hit it. Another cpu has removed |
---|
.. | .. |
---|
739 | 742 | * calls trampoline_handler() runs, which calls the kretprobe's handler. |
---|
740 | 743 | */ |
---|
741 | 744 | asm( |
---|
| 745 | + ".text\n" |
---|
742 | 746 | ".global kretprobe_trampoline\n" |
---|
743 | 747 | ".type kretprobe_trampoline, @function\n" |
---|
744 | 748 | "kretprobe_trampoline:\n" |
---|
745 | | -#ifdef CONFIG_X86_64 |
---|
746 | 749 | /* We don't bother saving the ss register */ |
---|
| 750 | +#ifdef CONFIG_X86_64 |
---|
747 | 751 | " pushq %rsp\n" |
---|
748 | 752 | " pushfq\n" |
---|
749 | 753 | SAVE_REGS_STRING |
---|
750 | 754 | " movq %rsp, %rdi\n" |
---|
751 | 755 | " call trampoline_handler\n" |
---|
752 | 756 | /* Replace saved sp with true return address. */ |
---|
753 | | - " movq %rax, 152(%rsp)\n" |
---|
| 757 | + " movq %rax, 19*8(%rsp)\n" |
---|
754 | 758 | RESTORE_REGS_STRING |
---|
755 | 759 | " popfq\n" |
---|
756 | 760 | #else |
---|
757 | | - " pushf\n" |
---|
| 761 | + " pushl %esp\n" |
---|
| 762 | + " pushfl\n" |
---|
758 | 763 | SAVE_REGS_STRING |
---|
759 | 764 | " movl %esp, %eax\n" |
---|
760 | 765 | " call trampoline_handler\n" |
---|
761 | | - /* Move flags to cs */ |
---|
762 | | - " movl 56(%esp), %edx\n" |
---|
763 | | - " movl %edx, 52(%esp)\n" |
---|
764 | | - /* Replace saved flags with true return address. */ |
---|
765 | | - " movl %eax, 56(%esp)\n" |
---|
| 766 | + /* Replace saved sp with true return address. */ |
---|
| 767 | + " movl %eax, 15*4(%esp)\n" |
---|
766 | 768 | RESTORE_REGS_STRING |
---|
767 | | - " popf\n" |
---|
| 769 | + " popfl\n" |
---|
768 | 770 | #endif |
---|
769 | | - " ret\n" |
---|
| 771 | + ASM_RET |
---|
770 | 772 | ".size kretprobe_trampoline, .-kretprobe_trampoline\n" |
---|
771 | 773 | ); |
---|
772 | 774 | NOKPROBE_SYMBOL(kretprobe_trampoline); |
---|
773 | 775 | STACK_FRAME_NON_STANDARD(kretprobe_trampoline); |
---|
774 | 776 | |
---|
| 777 | + |
---|
775 | 778 | /* |
---|
776 | 779 | * Called from kretprobe_trampoline |
---|
777 | 780 | */ |
---|
778 | | -__visible __used void *trampoline_handler(struct pt_regs *regs) |
---|
| 781 | +__used __visible void *trampoline_handler(struct pt_regs *regs) |
---|
779 | 782 | { |
---|
780 | | - struct kretprobe_instance *ri = NULL; |
---|
781 | | - struct hlist_head *head, empty_rp; |
---|
782 | | - struct hlist_node *tmp; |
---|
783 | | - unsigned long flags, orig_ret_address = 0; |
---|
784 | | - unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline; |
---|
785 | | - kprobe_opcode_t *correct_ret_addr = NULL; |
---|
786 | | - void *frame_pointer; |
---|
787 | | - bool skipped = false; |
---|
788 | | - |
---|
789 | | - /* |
---|
790 | | - * Set a dummy kprobe for avoiding kretprobe recursion. |
---|
791 | | - * Since kretprobe never run in kprobe handler, kprobe must not |
---|
792 | | - * be running at this point. |
---|
793 | | - */ |
---|
794 | | - kprobe_busy_begin(); |
---|
795 | | - |
---|
796 | | - INIT_HLIST_HEAD(&empty_rp); |
---|
797 | | - kretprobe_hash_lock(current, &head, &flags); |
---|
798 | 783 | /* fixup registers */ |
---|
799 | | -#ifdef CONFIG_X86_64 |
---|
800 | 784 | regs->cs = __KERNEL_CS; |
---|
801 | | - /* On x86-64, we use pt_regs->sp for return address holder. */ |
---|
802 | | - frame_pointer = ®s->sp; |
---|
803 | | -#else |
---|
804 | | - regs->cs = __KERNEL_CS | get_kernel_rpl(); |
---|
| 785 | +#ifdef CONFIG_X86_32 |
---|
805 | 786 | regs->gs = 0; |
---|
806 | | - /* On x86-32, we use pt_regs->flags for return address holder. */ |
---|
807 | | - frame_pointer = ®s->flags; |
---|
808 | 787 | #endif |
---|
809 | | - regs->ip = trampoline_address; |
---|
| 788 | + regs->ip = (unsigned long)&kretprobe_trampoline; |
---|
810 | 789 | regs->orig_ax = ~0UL; |
---|
811 | 790 | |
---|
812 | | - /* |
---|
813 | | - * It is possible to have multiple instances associated with a given |
---|
814 | | - * task either because multiple functions in the call path have |
---|
815 | | - * return probes installed on them, and/or more than one |
---|
816 | | - * return probe was registered for a target function. |
---|
817 | | - * |
---|
818 | | - * We can handle this because: |
---|
819 | | - * - instances are always pushed into the head of the list |
---|
820 | | - * - when multiple return probes are registered for the same |
---|
821 | | - * function, the (chronologically) first instance's ret_addr |
---|
822 | | - * will be the real return address, and all the rest will |
---|
823 | | - * point to kretprobe_trampoline. |
---|
824 | | - */ |
---|
825 | | - hlist_for_each_entry(ri, head, hlist) { |
---|
826 | | - if (ri->task != current) |
---|
827 | | - /* another task is sharing our hash bucket */ |
---|
828 | | - continue; |
---|
829 | | - /* |
---|
830 | | - * Return probes must be pushed on this hash list correct |
---|
831 | | - * order (same as return order) so that it can be poped |
---|
832 | | - * correctly. However, if we find it is pushed it incorrect |
---|
833 | | - * order, this means we find a function which should not be |
---|
834 | | - * probed, because the wrong order entry is pushed on the |
---|
835 | | - * path of processing other kretprobe itself. |
---|
836 | | - */ |
---|
837 | | - if (ri->fp != frame_pointer) { |
---|
838 | | - if (!skipped) |
---|
839 | | - pr_warn("kretprobe is stacked incorrectly. Trying to fixup.\n"); |
---|
840 | | - skipped = true; |
---|
841 | | - continue; |
---|
842 | | - } |
---|
843 | | - |
---|
844 | | - orig_ret_address = (unsigned long)ri->ret_addr; |
---|
845 | | - if (skipped) |
---|
846 | | - pr_warn("%ps must be blacklisted because of incorrect kretprobe order\n", |
---|
847 | | - ri->rp->kp.addr); |
---|
848 | | - |
---|
849 | | - if (orig_ret_address != trampoline_address) |
---|
850 | | - /* |
---|
851 | | - * This is the real return address. Any other |
---|
852 | | - * instances associated with this task are for |
---|
853 | | - * other calls deeper on the call stack |
---|
854 | | - */ |
---|
855 | | - break; |
---|
856 | | - } |
---|
857 | | - |
---|
858 | | - kretprobe_assert(ri, orig_ret_address, trampoline_address); |
---|
859 | | - |
---|
860 | | - correct_ret_addr = ri->ret_addr; |
---|
861 | | - hlist_for_each_entry_safe(ri, tmp, head, hlist) { |
---|
862 | | - if (ri->task != current) |
---|
863 | | - /* another task is sharing our hash bucket */ |
---|
864 | | - continue; |
---|
865 | | - if (ri->fp != frame_pointer) |
---|
866 | | - continue; |
---|
867 | | - |
---|
868 | | - orig_ret_address = (unsigned long)ri->ret_addr; |
---|
869 | | - if (ri->rp && ri->rp->handler) { |
---|
870 | | - __this_cpu_write(current_kprobe, &ri->rp->kp); |
---|
871 | | - ri->ret_addr = correct_ret_addr; |
---|
872 | | - ri->rp->handler(ri, regs); |
---|
873 | | - __this_cpu_write(current_kprobe, &kprobe_busy); |
---|
874 | | - } |
---|
875 | | - |
---|
876 | | - recycle_rp_inst(ri, &empty_rp); |
---|
877 | | - |
---|
878 | | - if (orig_ret_address != trampoline_address) |
---|
879 | | - /* |
---|
880 | | - * This is the real return address. Any other |
---|
881 | | - * instances associated with this task are for |
---|
882 | | - * other calls deeper on the call stack |
---|
883 | | - */ |
---|
884 | | - break; |
---|
885 | | - } |
---|
886 | | - |
---|
887 | | - kretprobe_hash_unlock(current, &flags); |
---|
888 | | - |
---|
889 | | - kprobe_busy_end(); |
---|
890 | | - |
---|
891 | | - hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { |
---|
892 | | - hlist_del(&ri->hlist); |
---|
893 | | - kfree(ri); |
---|
894 | | - } |
---|
895 | | - return (void *)orig_ret_address; |
---|
| 791 | + return (void *)kretprobe_trampoline_handler(regs, &kretprobe_trampoline, ®s->sp); |
---|
896 | 792 | } |
---|
897 | 793 | NOKPROBE_SYMBOL(trampoline_handler); |
---|
898 | 794 | |
---|
.. | .. |
---|
1082 | 978 | */ |
---|
1083 | 979 | if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) |
---|
1084 | 980 | return 1; |
---|
1085 | | - |
---|
1086 | | - /* |
---|
1087 | | - * In case the user-specified fault handler returned |
---|
1088 | | - * zero, try to fix up. |
---|
1089 | | - */ |
---|
1090 | | - if (fixup_exception(regs, trapnr)) |
---|
1091 | | - return 1; |
---|
1092 | | - |
---|
1093 | | - /* |
---|
1094 | | - * fixup routine could not handle it, |
---|
1095 | | - * Let do_page_fault() fix it. |
---|
1096 | | - */ |
---|
1097 | 981 | } |
---|
1098 | 982 | |
---|
1099 | 983 | return 0; |
---|
1100 | 984 | } |
---|
1101 | 985 | NOKPROBE_SYMBOL(kprobe_fault_handler); |
---|
1102 | | - |
---|
1103 | | -/* |
---|
1104 | | - * Wrapper routine for handling exceptions. |
---|
1105 | | - */ |
---|
1106 | | -int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, |
---|
1107 | | - void *data) |
---|
1108 | | -{ |
---|
1109 | | - struct die_args *args = data; |
---|
1110 | | - int ret = NOTIFY_DONE; |
---|
1111 | | - |
---|
1112 | | - if (args->regs && user_mode(args->regs)) |
---|
1113 | | - return ret; |
---|
1114 | | - |
---|
1115 | | - if (val == DIE_GPF) { |
---|
1116 | | - /* |
---|
1117 | | - * To be potentially processing a kprobe fault and to |
---|
1118 | | - * trust the result from kprobe_running(), we have |
---|
1119 | | - * be non-preemptible. |
---|
1120 | | - */ |
---|
1121 | | - if (!preemptible() && kprobe_running() && |
---|
1122 | | - kprobe_fault_handler(args->regs, args->trapnr)) |
---|
1123 | | - ret = NOTIFY_STOP; |
---|
1124 | | - } |
---|
1125 | | - return ret; |
---|
1126 | | -} |
---|
1127 | | -NOKPROBE_SYMBOL(kprobe_exceptions_notify); |
---|
1128 | | - |
---|
1129 | | -bool arch_within_kprobe_blacklist(unsigned long addr) |
---|
1130 | | -{ |
---|
1131 | | - bool is_in_entry_trampoline_section = false; |
---|
1132 | | - |
---|
1133 | | -#ifdef CONFIG_X86_64 |
---|
1134 | | - is_in_entry_trampoline_section = |
---|
1135 | | - (addr >= (unsigned long)__entry_trampoline_start && |
---|
1136 | | - addr < (unsigned long)__entry_trampoline_end); |
---|
1137 | | -#endif |
---|
1138 | | - return (addr >= (unsigned long)__kprobes_text_start && |
---|
1139 | | - addr < (unsigned long)__kprobes_text_end) || |
---|
1140 | | - (addr >= (unsigned long)__entry_text_start && |
---|
1141 | | - addr < (unsigned long)__entry_text_end) || |
---|
1142 | | - is_in_entry_trampoline_section; |
---|
1143 | | -} |
---|
1144 | 986 | |
---|
1145 | 987 | int __init arch_populate_kprobe_blacklist(void) |
---|
1146 | 988 | { |
---|