.. | .. |
---|
11 | 11 | #include <asm/kprobes.h> |
---|
12 | 12 | #include <asm/cacheflush.h> |
---|
13 | 13 | #include <asm/fixmap.h> |
---|
| 14 | +#include <asm/ftrace.h> |
---|
14 | 15 | #include <asm/patch.h> |
---|
15 | 16 | |
---|
16 | 17 | struct patch_insn { |
---|
.. | .. |
---|
18 | 19 | u32 insn; |
---|
19 | 20 | atomic_t cpu_count; |
---|
20 | 21 | }; |
---|
| 22 | + |
---|
| 23 | +int riscv_patch_in_stop_machine = false; |
---|
21 | 24 | |
---|
22 | 25 | #ifdef CONFIG_MMU |
---|
23 | 26 | static void *patch_map(void *addr, int fixmap) |
---|
.. | .. |
---|
55 | 58 | * Before reaching here, it was expected to lock the text_mutex |
---|
56 | 59 | * already, so we don't need to give another lock here and could |
---|
57 | 60 | * ensure that it was safe between each cores. |
---|
| 61 | + * |
---|
| 62 | + * We're currently using stop_machine() for ftrace & kprobes, and while |
---|
| 63 | + * that ensures text_mutex is held before installing the mappings it |
---|
| 64 | + * does not ensure text_mutex is held by the calling thread. That's |
---|
| 65 | + * safe but triggers a lockdep failure, so just elide it for that |
---|
| 66 | + * specific case. |
---|
58 | 67 | */ |
---|
59 | | - lockdep_assert_held(&text_mutex); |
---|
| 68 | + if (!riscv_patch_in_stop_machine) |
---|
| 69 | + lockdep_assert_held(&text_mutex); |
---|
60 | 70 | |
---|
61 | 71 | if (across_pages) |
---|
62 | 72 | patch_map(addr + len, FIX_TEXT_POKE1); |
---|
.. | .. |
---|
117 | 127 | |
---|
118 | 128 | int patch_text(void *addr, u32 insn) |
---|
119 | 129 | { |
---|
| 130 | + int ret; |
---|
120 | 131 | struct patch_insn patch = { |
---|
121 | 132 | .addr = addr, |
---|
122 | 133 | .insn = insn, |
---|
123 | 134 | .cpu_count = ATOMIC_INIT(0), |
---|
124 | 135 | }; |
---|
125 | 136 | |
---|
126 | | - return stop_machine_cpuslocked(patch_text_cb, |
---|
127 | | - &patch, cpu_online_mask); |
---|
| 137 | + /* |
---|
| 138 | + * kprobes takes text_mutex, before calling patch_text(), but as we call |
---|
| 139 | + * calls stop_machine(), the lockdep assertion in patch_insn_write() |
---|
| 140 | + * gets confused by the context in which the lock is taken. |
---|
| 141 | + * Instead, ensure the lock is held before calling stop_machine(), and |
---|
| 142 | + * set riscv_patch_in_stop_machine to skip the check in |
---|
| 143 | + * patch_insn_write(). |
---|
| 144 | + */ |
---|
| 145 | + lockdep_assert_held(&text_mutex); |
---|
| 146 | + riscv_patch_in_stop_machine = true; |
---|
| 147 | + ret = stop_machine_cpuslocked(patch_text_cb, &patch, cpu_online_mask); |
---|
| 148 | + riscv_patch_in_stop_machine = false; |
---|
| 149 | + return ret; |
---|
128 | 150 | } |
---|
129 | 151 | NOKPROBE_SYMBOL(patch_text); |
---|