| .. | .. |
|---|
| 18 | 18 | #define __parainstructions_end NULL |
|---|
| 19 | 19 | #endif |
|---|
| 20 | 20 | |
|---|
| 21 | | -extern void *text_poke_early(void *addr, const void *opcode, size_t len); |
|---|
| 21 | +/* |
|---|
| 22 | + * Currently, the max observed size in the kernel code is |
|---|
| 23 | + * JUMP_LABEL_NOP_SIZE/RELATIVEJUMP_SIZE, which are 5. |
|---|
| 24 | + * Raise it if needed. |
|---|
| 25 | + */ |
|---|
| 26 | +#define POKE_MAX_OPCODE_SIZE 5 |
|---|
| 27 | + |
|---|
| 28 | +extern void text_poke_early(void *addr, const void *opcode, size_t len); |
|---|
| 22 | 29 | |
|---|
| 23 | 30 | /* |
|---|
| 24 | 31 | * Clear and restore the kernel write-protection flag on the local CPU. |
|---|
| .. | .. |
|---|
| 31 | 38 | * no thread can be preempted in the instructions being modified (no iret to an |
|---|
| 32 | 39 | * invalid instruction possible) or if the instructions are changed from a |
|---|
| 33 | 40 | * consistent state to another consistent state atomically. |
|---|
| 34 | | - * On the local CPU you need to be protected again NMI or MCE handlers seeing an |
|---|
| 35 | | - * inconsistent instruction while you patch. |
|---|
| 41 | + * On the local CPU you need to be protected against NMI or MCE handlers seeing |
|---|
| 42 | + * an inconsistent instruction while you patch. |
|---|
| 36 | 43 | */ |
|---|
| 37 | 44 | extern void *text_poke(void *addr, const void *opcode, size_t len); |
|---|
| 45 | +extern void text_poke_sync(void); |
|---|
| 46 | +extern void *text_poke_kgdb(void *addr, const void *opcode, size_t len); |
|---|
| 38 | 47 | extern int poke_int3_handler(struct pt_regs *regs); |
|---|
| 39 | | -extern void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler); |
|---|
| 48 | +extern void text_poke_bp(void *addr, const void *opcode, size_t len, const void *emulate); |
|---|
| 49 | + |
|---|
| 50 | +extern void text_poke_queue(void *addr, const void *opcode, size_t len, const void *emulate); |
|---|
| 51 | +extern void text_poke_finish(void); |
|---|
| 52 | + |
|---|
| 53 | +#define INT3_INSN_SIZE 1 |
|---|
| 54 | +#define INT3_INSN_OPCODE 0xCC |
|---|
| 55 | + |
|---|
| 56 | +#define RET_INSN_SIZE 1 |
|---|
| 57 | +#define RET_INSN_OPCODE 0xC3 |
|---|
| 58 | + |
|---|
| 59 | +#define CALL_INSN_SIZE 5 |
|---|
| 60 | +#define CALL_INSN_OPCODE 0xE8 |
|---|
| 61 | + |
|---|
| 62 | +#define JMP32_INSN_SIZE 5 |
|---|
| 63 | +#define JMP32_INSN_OPCODE 0xE9 |
|---|
| 64 | + |
|---|
| 65 | +#define JMP8_INSN_SIZE 2 |
|---|
| 66 | +#define JMP8_INSN_OPCODE 0xEB |
|---|
| 67 | + |
|---|
| 68 | +#define DISP32_SIZE 4 |
|---|
| 69 | + |
|---|
| 70 | +static __always_inline int text_opcode_size(u8 opcode) |
|---|
| 71 | +{ |
|---|
| 72 | + int size = 0; |
|---|
| 73 | + |
|---|
| 74 | +#define __CASE(insn) \ |
|---|
| 75 | + case insn##_INSN_OPCODE: size = insn##_INSN_SIZE; break |
|---|
| 76 | + |
|---|
| 77 | + switch(opcode) { |
|---|
| 78 | + __CASE(INT3); |
|---|
| 79 | + __CASE(RET); |
|---|
| 80 | + __CASE(CALL); |
|---|
| 81 | + __CASE(JMP32); |
|---|
| 82 | + __CASE(JMP8); |
|---|
| 83 | + } |
|---|
| 84 | + |
|---|
| 85 | +#undef __CASE |
|---|
| 86 | + |
|---|
| 87 | + return size; |
|---|
| 88 | +} |
|---|
| 89 | + |
|---|
| 90 | +union text_poke_insn { |
|---|
| 91 | + u8 text[POKE_MAX_OPCODE_SIZE]; |
|---|
| 92 | + struct { |
|---|
| 93 | + u8 opcode; |
|---|
| 94 | + s32 disp; |
|---|
| 95 | + } __attribute__((packed)); |
|---|
| 96 | +}; |
|---|
| 97 | + |
|---|
| 98 | +static __always_inline |
|---|
| 99 | +void *text_gen_insn(u8 opcode, const void *addr, const void *dest) |
|---|
| 100 | +{ |
|---|
| 101 | + static union text_poke_insn insn; /* per instance */ |
|---|
| 102 | + int size = text_opcode_size(opcode); |
|---|
| 103 | + |
|---|
| 104 | + insn.opcode = opcode; |
|---|
| 105 | + |
|---|
| 106 | + if (size > 1) { |
|---|
| 107 | + insn.disp = (long)dest - (long)(addr + size); |
|---|
| 108 | + if (size == 2) { |
|---|
| 109 | + /* |
|---|
| 110 | + * Ensure that for JMP9 the displacement |
|---|
| 111 | + * actually fits the signed byte. |
|---|
| 112 | + */ |
|---|
| 113 | + BUG_ON((insn.disp >> 31) != (insn.disp >> 7)); |
|---|
| 114 | + } |
|---|
| 115 | + } |
|---|
| 116 | + |
|---|
| 117 | + return &insn.text; |
|---|
| 118 | +} |
|---|
| 119 | + |
|---|
| 40 | 120 | extern int after_bootmem; |
|---|
| 121 | +extern __ro_after_init struct mm_struct *poking_mm; |
|---|
| 122 | +extern __ro_after_init unsigned long poking_addr; |
|---|
| 41 | 123 | |
|---|
| 42 | 124 | #ifndef CONFIG_UML_X86 |
|---|
| 43 | | -static inline void int3_emulate_jmp(struct pt_regs *regs, unsigned long ip) |
|---|
| 125 | +static __always_inline |
|---|
| 126 | +void int3_emulate_jmp(struct pt_regs *regs, unsigned long ip) |
|---|
| 44 | 127 | { |
|---|
| 45 | 128 | regs->ip = ip; |
|---|
| 46 | 129 | } |
|---|
| 47 | 130 | |
|---|
| 48 | | -#define INT3_INSN_SIZE 1 |
|---|
| 49 | | -#define CALL_INSN_SIZE 5 |
|---|
| 50 | | - |
|---|
| 51 | | -#ifdef CONFIG_X86_64 |
|---|
| 52 | | -static inline void int3_emulate_push(struct pt_regs *regs, unsigned long val) |
|---|
| 131 | +static __always_inline |
|---|
| 132 | +void int3_emulate_push(struct pt_regs *regs, unsigned long val) |
|---|
| 53 | 133 | { |
|---|
| 54 | 134 | /* |
|---|
| 55 | 135 | * The int3 handler in entry_64.S adds a gap between the |
|---|
| 56 | 136 | * stack where the break point happened, and the saving of |
|---|
| 57 | 137 | * pt_regs. We can extend the original stack because of |
|---|
| 58 | 138 | * this gap. See the idtentry macro's create_gap option. |
|---|
| 139 | + * |
|---|
| 140 | + * Similarly entry_32.S will have a gap on the stack for (any) hardware |
|---|
| 141 | + * exception and pt_regs; see FIXUP_FRAME. |
|---|
| 59 | 142 | */ |
|---|
| 60 | 143 | regs->sp -= sizeof(unsigned long); |
|---|
| 61 | 144 | *(unsigned long *)regs->sp = val; |
|---|
| 62 | 145 | } |
|---|
| 63 | 146 | |
|---|
| 64 | | -static inline void int3_emulate_call(struct pt_regs *regs, unsigned long func) |
|---|
| 147 | +static __always_inline |
|---|
| 148 | +unsigned long int3_emulate_pop(struct pt_regs *regs) |
|---|
| 149 | +{ |
|---|
| 150 | + unsigned long val = *(unsigned long *)regs->sp; |
|---|
| 151 | + regs->sp += sizeof(unsigned long); |
|---|
| 152 | + return val; |
|---|
| 153 | +} |
|---|
| 154 | + |
|---|
| 155 | +static __always_inline |
|---|
| 156 | +void int3_emulate_call(struct pt_regs *regs, unsigned long func) |
|---|
| 65 | 157 | { |
|---|
| 66 | 158 | int3_emulate_push(regs, regs->ip - INT3_INSN_SIZE + CALL_INSN_SIZE); |
|---|
| 67 | 159 | int3_emulate_jmp(regs, func); |
|---|
| 68 | 160 | } |
|---|
| 69 | | -#endif /* CONFIG_X86_64 */ |
|---|
| 161 | + |
|---|
| 162 | +static __always_inline |
|---|
| 163 | +void int3_emulate_ret(struct pt_regs *regs) |
|---|
| 164 | +{ |
|---|
| 165 | + unsigned long ip = int3_emulate_pop(regs); |
|---|
| 166 | + int3_emulate_jmp(regs, ip); |
|---|
| 167 | +} |
|---|
| 70 | 168 | #endif /* !CONFIG_UML_X86 */ |
|---|
| 71 | 169 | |
|---|
| 72 | 170 | #endif /* _ASM_X86_TEXT_PATCHING_H */ |
|---|