hc
2023-12-09 b22da3d8526a935aa31e086e63f60ff3246cb61c
kernel/kernel/cfi.c
....@@ -1,16 +1,18 @@
1
+// SPDX-License-Identifier: GPL-2.0
12 /*
2
- * CFI (Control Flow Integrity) error and slowpath handling
3
+ * Clang Control Flow Integrity (CFI) error and slowpath handling.
34 *
4
- * Copyright (C) 2017 Google, Inc.
5
+ * Copyright (C) 2019 Google LLC
56 */
67
7
-#include <linux/gfp.h>
8
+#include <linux/hardirq.h>
9
+#include <linux/kallsyms.h>
810 #include <linux/module.h>
11
+#include <linux/mutex.h>
912 #include <linux/printk.h>
1013 #include <linux/ratelimit.h>
1114 #include <linux/rcupdate.h>
12
-#include <linux/spinlock.h>
13
-#include <asm/bug.h>
15
+#include <linux/vmalloc.h>
1416 #include <asm/cacheflush.h>
1517 #include <asm/set_memory.h>
1618
....@@ -25,74 +27,85 @@
2527
2628 static inline void handle_cfi_failure(void *ptr)
2729 {
28
-#ifdef CONFIG_CFI_PERMISSIVE
29
- WARN_RATELIMIT(1, "CFI failure (target: %pF):\n", ptr);
30
-#else
31
- pr_err("CFI failure (target: %pF):\n", ptr);
32
- BUG();
33
-#endif
30
+ if (IS_ENABLED(CONFIG_CFI_PERMISSIVE))
31
+ WARN_RATELIMIT(1, "CFI failure (target: %pS):\n", ptr);
32
+ else
33
+ panic("CFI failure (target: %pS)\n", ptr);
3434 }
3535
3636 #ifdef CONFIG_MODULES
3737 #ifdef CONFIG_CFI_CLANG_SHADOW
38
-struct shadow_range {
39
- /* Module address range */
40
- unsigned long mod_min_addr;
41
- unsigned long mod_max_addr;
42
- /* Module page range */
43
- unsigned long min_page;
44
- unsigned long max_page;
45
-};
46
-
47
-#define SHADOW_ORDER 1
48
-#define SHADOW_PAGES (1 << SHADOW_ORDER)
49
-#define SHADOW_SIZE \
50
- ((SHADOW_PAGES * PAGE_SIZE - sizeof(struct shadow_range)) / sizeof(u16))
51
-#define SHADOW_INVALID 0xFFFF
38
+/*
39
+ * Index type. A 16-bit index can address at most (2^16)-2 pages (taking
40
+ * into account SHADOW_INVALID), i.e. ~256M with 4k pages.
41
+ */
42
+typedef u16 shadow_t;
43
+#define SHADOW_INVALID ((shadow_t)~0UL)
5244
5345 struct cfi_shadow {
54
- /* Page range covered by the shadow */
55
- struct shadow_range r;
56
- /* Page offsets to __cfi_check functions in modules */
57
- u16 shadow[SHADOW_SIZE];
58
-};
46
+ /* Page index for the beginning of the shadow */
47
+ unsigned long base;
48
+ /* An array of __cfi_check locations (as indices to the shadow) */
49
+ shadow_t shadow[1];
50
+} __packed;
5951
60
-static DEFINE_SPINLOCK(shadow_update_lock);
61
-static struct cfi_shadow __rcu *cfi_shadow __read_mostly = NULL;
52
+/*
53
+ * The shadow covers ~128M from the beginning of the module region. If
54
+ * the region is larger, we fall back to __module_address for the rest.
55
+ */
56
+#define __SHADOW_RANGE (_UL(SZ_128M) >> PAGE_SHIFT)
6257
58
+/* The in-memory size of struct cfi_shadow, always at least one page */
59
+#define __SHADOW_PAGES ((__SHADOW_RANGE * sizeof(shadow_t)) >> PAGE_SHIFT)
60
+#define SHADOW_PAGES max(1UL, __SHADOW_PAGES)
61
+#define SHADOW_SIZE (SHADOW_PAGES << PAGE_SHIFT)
62
+
63
+/* The actual size of the shadow array, minus metadata */
64
+#define SHADOW_ARR_SIZE (SHADOW_SIZE - offsetof(struct cfi_shadow, shadow))
65
+#define SHADOW_ARR_SLOTS (SHADOW_ARR_SIZE / sizeof(shadow_t))
66
+
67
+static DEFINE_MUTEX(shadow_update_lock);
68
+static struct cfi_shadow __rcu *cfi_shadow __read_mostly;
69
+
70
+/* Returns the index in the shadow for the given address */
6371 static inline int ptr_to_shadow(const struct cfi_shadow *s, unsigned long ptr)
6472 {
6573 unsigned long index;
6674 unsigned long page = ptr >> PAGE_SHIFT;
6775
68
- if (unlikely(page < s->r.min_page))
76
+ if (unlikely(page < s->base))
6977 return -1; /* Outside of module area */
7078
71
- index = page - s->r.min_page;
79
+ index = page - s->base;
7280
73
- if (index >= SHADOW_SIZE)
81
+ if (index >= SHADOW_ARR_SLOTS)
7482 return -1; /* Cannot be addressed with shadow */
7583
7684 return (int)index;
7785 }
7886
87
+/* Returns the page address for an index in the shadow */
7988 static inline unsigned long shadow_to_ptr(const struct cfi_shadow *s,
8089 int index)
8190 {
82
- BUG_ON(index < 0 || index >= SHADOW_SIZE);
91
+ if (unlikely(index < 0 || index >= SHADOW_ARR_SLOTS))
92
+ return 0;
93
+
94
+ return (s->base + index) << PAGE_SHIFT;
95
+}
96
+
97
+/* Returns the __cfi_check function address for the given shadow location */
98
+static inline unsigned long shadow_to_check_fn(const struct cfi_shadow *s,
99
+ int index)
100
+{
101
+ if (unlikely(index < 0 || index >= SHADOW_ARR_SLOTS))
102
+ return 0;
83103
84104 if (unlikely(s->shadow[index] == SHADOW_INVALID))
85105 return 0;
86106
87
- return (s->r.min_page + s->shadow[index]) << PAGE_SHIFT;
88
-}
89
-
90
-static inline unsigned long shadow_to_page(const struct cfi_shadow *s,
91
- int index)
92
-{
93
- BUG_ON(index < 0 || index >= SHADOW_SIZE);
94
-
95
- return (s->r.min_page + index) << PAGE_SHIFT;
107
+ /* __cfi_check is always page aligned */
108
+ return (s->base + s->shadow[index]) << PAGE_SHIFT;
96109 }
97110
98111 static void prepare_next_shadow(const struct cfi_shadow __rcu *prev,
....@@ -101,128 +114,123 @@
101114 int i, index, check;
102115
103116 /* Mark everything invalid */
104
- memset(next->shadow, 0xFF, sizeof(next->shadow));
117
+ memset(next->shadow, 0xFF, SHADOW_ARR_SIZE);
105118
106119 if (!prev)
107120 return; /* No previous shadow */
108121
109
- /* If the base address didn't change, update is not needed */
110
- if (prev->r.min_page == next->r.min_page) {
111
- memcpy(next->shadow, prev->shadow, sizeof(next->shadow));
122
+ /* If the base address didn't change, an update is not needed */
123
+ if (prev->base == next->base) {
124
+ memcpy(next->shadow, prev->shadow, SHADOW_ARR_SIZE);
112125 return;
113126 }
114127
115128 /* Convert the previous shadow to the new address range */
116
- for (i = 0; i < SHADOW_SIZE; ++i) {
129
+ for (i = 0; i < SHADOW_ARR_SLOTS; ++i) {
117130 if (prev->shadow[i] == SHADOW_INVALID)
118131 continue;
119132
120
- index = ptr_to_shadow(next, shadow_to_page(prev, i));
133
+ index = ptr_to_shadow(next, shadow_to_ptr(prev, i));
121134 if (index < 0)
122135 continue;
123136
124137 check = ptr_to_shadow(next,
125
- shadow_to_ptr(prev, prev->shadow[i]));
138
+ shadow_to_check_fn(prev, prev->shadow[i]));
126139 if (check < 0)
127140 continue;
128141
129
- next->shadow[index] = (u16)check;
142
+ next->shadow[index] = (shadow_t)check;
130143 }
131144 }
132145
133
-static void add_module_to_shadow(struct cfi_shadow *s, struct module *mod)
146
+static void add_module_to_shadow(struct cfi_shadow *s, struct module *mod,
147
+ unsigned long min_addr, unsigned long max_addr)
134148 {
135
- unsigned long ptr;
136
- unsigned long min_page_addr;
137
- unsigned long max_page_addr;
149
+ int check_index;
138150 unsigned long check = (unsigned long)mod->cfi_check;
139
- int check_index = ptr_to_shadow(s, check);
151
+ unsigned long ptr;
140152
141
- BUG_ON((check & PAGE_MASK) != check); /* Must be page aligned */
153
+ if (unlikely(!PAGE_ALIGNED(check))) {
154
+ pr_warn("cfi: not using shadow for module %s\n", mod->name);
155
+ return;
156
+ }
142157
158
+ check_index = ptr_to_shadow(s, check);
143159 if (check_index < 0)
144160 return; /* Module not addressable with shadow */
145161
146
- min_page_addr = (unsigned long)mod->core_layout.base & PAGE_MASK;
147
- max_page_addr = (unsigned long)mod->core_layout.base +
148
- mod->core_layout.text_size;
149
- max_page_addr &= PAGE_MASK;
150
-
151162 /* For each page, store the check function index in the shadow */
152
- for (ptr = min_page_addr; ptr <= max_page_addr; ptr += PAGE_SIZE) {
163
+ for (ptr = min_addr; ptr <= max_addr; ptr += PAGE_SIZE) {
153164 int index = ptr_to_shadow(s, ptr);
165
+
154166 if (index >= 0) {
155
- /* Assume a page only contains code for one module */
156
- BUG_ON(s->shadow[index] != SHADOW_INVALID);
157
- s->shadow[index] = (u16)check_index;
167
+ /* Each page must only contain one module */
168
+ WARN_ON_ONCE(s->shadow[index] != SHADOW_INVALID);
169
+ s->shadow[index] = (shadow_t)check_index;
158170 }
159171 }
160172 }
161173
162
-static void remove_module_from_shadow(struct cfi_shadow *s, struct module *mod)
174
+static void remove_module_from_shadow(struct cfi_shadow *s, struct module *mod,
175
+ unsigned long min_addr, unsigned long max_addr)
163176 {
164177 unsigned long ptr;
165
- unsigned long min_page_addr;
166
- unsigned long max_page_addr;
167178
168
- min_page_addr = (unsigned long)mod->core_layout.base & PAGE_MASK;
169
- max_page_addr = (unsigned long)mod->core_layout.base +
170
- mod->core_layout.text_size;
171
- max_page_addr &= PAGE_MASK;
172
-
173
- for (ptr = min_page_addr; ptr <= max_page_addr; ptr += PAGE_SIZE) {
179
+ for (ptr = min_addr; ptr <= max_addr; ptr += PAGE_SIZE) {
174180 int index = ptr_to_shadow(s, ptr);
181
+
175182 if (index >= 0)
176183 s->shadow[index] = SHADOW_INVALID;
177184 }
178185 }
179186
180
-typedef void (*update_shadow_fn)(struct cfi_shadow *, struct module *);
187
+typedef void (*update_shadow_fn)(struct cfi_shadow *, struct module *,
188
+ unsigned long min_addr, unsigned long max_addr);
181189
182
-static void update_shadow(struct module *mod, unsigned long min_addr,
183
- unsigned long max_addr, update_shadow_fn fn)
190
+static void update_shadow(struct module *mod, unsigned long base_addr,
191
+ update_shadow_fn fn)
184192 {
185193 struct cfi_shadow *prev;
186
- struct cfi_shadow *next = (struct cfi_shadow *)
187
- __get_free_pages(GFP_KERNEL, SHADOW_ORDER);
194
+ struct cfi_shadow *next;
195
+ unsigned long min_addr, max_addr;
188196
189
- BUG_ON(!next);
197
+ next = (struct cfi_shadow *)vmalloc(SHADOW_SIZE);
198
+ WARN_ON(!next);
190199
191
- next->r.mod_min_addr = min_addr;
192
- next->r.mod_max_addr = max_addr;
193
- next->r.min_page = min_addr >> PAGE_SHIFT;
194
- next->r.max_page = max_addr >> PAGE_SHIFT;
200
+ mutex_lock(&shadow_update_lock);
201
+ prev = rcu_dereference_protected(cfi_shadow,
202
+ mutex_is_locked(&shadow_update_lock));
195203
196
- spin_lock(&shadow_update_lock);
197
- prev = rcu_dereference_protected(cfi_shadow, 1);
198
- prepare_next_shadow(prev, next);
204
+ if (next) {
205
+ next->base = base_addr >> PAGE_SHIFT;
206
+ prepare_next_shadow(prev, next);
199207
200
- fn(next, mod);
201
- set_memory_ro((unsigned long)next, SHADOW_PAGES);
208
+ min_addr = (unsigned long)mod->core_layout.base;
209
+ max_addr = min_addr + mod->core_layout.text_size;
210
+ fn(next, mod, min_addr & PAGE_MASK, max_addr & PAGE_MASK);
211
+
212
+ set_memory_ro((unsigned long)next, SHADOW_PAGES);
213
+ }
214
+
202215 rcu_assign_pointer(cfi_shadow, next);
203
-
204
- spin_unlock(&shadow_update_lock);
205
- synchronize_rcu();
216
+ mutex_unlock(&shadow_update_lock);
217
+ synchronize_rcu_expedited();
206218
207219 if (prev) {
208220 set_memory_rw((unsigned long)prev, SHADOW_PAGES);
209
- free_pages((unsigned long)prev, SHADOW_ORDER);
221
+ vfree(prev);
210222 }
211223 }
212224
213
-void cfi_module_add(struct module *mod, unsigned long min_addr,
214
- unsigned long max_addr)
225
+void cfi_module_add(struct module *mod, unsigned long base_addr)
215226 {
216
- update_shadow(mod, min_addr, max_addr, add_module_to_shadow);
227
+ update_shadow(mod, base_addr, add_module_to_shadow);
217228 }
218
-EXPORT_SYMBOL_GPL(cfi_module_add);
219229
220
-void cfi_module_remove(struct module *mod, unsigned long min_addr,
221
- unsigned long max_addr)
230
+void cfi_module_remove(struct module *mod, unsigned long base_addr)
222231 {
223
- update_shadow(mod, min_addr, max_addr, remove_module_from_shadow);
232
+ update_shadow(mod, base_addr, remove_module_from_shadow);
224233 }
225
-EXPORT_SYMBOL_GPL(cfi_module_remove);
226234
227235 static inline cfi_check_fn ptr_to_check_fn(const struct cfi_shadow __rcu *s,
228236 unsigned long ptr)
....@@ -232,73 +240,111 @@
232240 if (unlikely(!s))
233241 return NULL; /* No shadow available */
234242
235
- if (ptr < s->r.mod_min_addr || ptr > s->r.mod_max_addr)
236
- return NULL; /* Not in a mapped module */
237
-
238243 index = ptr_to_shadow(s, ptr);
239244 if (index < 0)
240245 return NULL; /* Cannot be addressed with shadow */
241246
242
- return (cfi_check_fn)shadow_to_ptr(s, index);
247
+ return (cfi_check_fn)shadow_to_check_fn(s, index);
243248 }
249
+
250
+static inline cfi_check_fn __find_shadow_check_fn(unsigned long ptr)
251
+{
252
+ cfi_check_fn fn;
253
+
254
+ rcu_read_lock_sched_notrace();
255
+ fn = ptr_to_check_fn(rcu_dereference_sched(cfi_shadow), ptr);
256
+ rcu_read_unlock_sched_notrace();
257
+
258
+ return fn;
259
+}
260
+
261
+#else /* !CONFIG_CFI_CLANG_SHADOW */
262
+
263
+static inline cfi_check_fn __find_shadow_check_fn(unsigned long ptr)
264
+{
265
+ return NULL;
266
+}
267
+
244268 #endif /* CONFIG_CFI_CLANG_SHADOW */
245269
246
-static inline cfi_check_fn find_module_cfi_check(void *ptr)
270
+static inline cfi_check_fn __find_module_check_fn(unsigned long ptr)
247271 {
272
+ cfi_check_fn fn = NULL;
248273 struct module *mod;
249274
250
- preempt_disable();
251
- mod = __module_address((unsigned long)ptr);
252
- preempt_enable();
253
-
275
+ rcu_read_lock_sched_notrace();
276
+ mod = __module_address(ptr);
254277 if (mod)
255
- return mod->cfi_check;
278
+ fn = mod->cfi_check;
279
+ rcu_read_unlock_sched_notrace();
256280
257
- return CFI_CHECK_FN;
281
+ return fn;
258282 }
259283
260
-static inline cfi_check_fn find_cfi_check(void *ptr)
284
+static inline cfi_check_fn find_check_fn(unsigned long ptr)
261285 {
262
-#ifdef CONFIG_CFI_CLANG_SHADOW
263
- cfi_check_fn f;
264
-
265
- if (!rcu_access_pointer(cfi_shadow))
266
- return CFI_CHECK_FN; /* No loaded modules */
267
-
268
- /* Look up the __cfi_check function to use */
269
- rcu_read_lock();
270
- f = ptr_to_check_fn(rcu_dereference(cfi_shadow), (unsigned long)ptr);
271
- rcu_read_unlock();
272
-
273
- if (f)
274
- return f;
286
+ bool rcu;
287
+ cfi_check_fn fn = NULL;
275288
276289 /*
277
- * Fall back to find_module_cfi_check, which works also for a larger
278
- * module address space, but is slower.
290
+ * Indirect call checks can happen when RCU is not watching. Both
291
+ * the shadow and __module_address use RCU, so we need to wake it
292
+ * up before proceeding. Use rcu_nmi_enter/exit() as these calls
293
+ * can happen anywhere.
279294 */
280
-#endif /* CONFIG_CFI_CLANG_SHADOW */
295
+ rcu = rcu_is_watching();
296
+ if (!rcu)
297
+ rcu_nmi_enter();
281298
282
- return find_module_cfi_check(ptr);
299
+ if (IS_ENABLED(CONFIG_CFI_CLANG_SHADOW)) {
300
+ fn = __find_shadow_check_fn(ptr);
301
+ if (fn)
302
+ goto out;
303
+ }
304
+
305
+ if (is_kernel_text(ptr)) {
306
+ fn = __cfi_check;
307
+ goto out;
308
+ }
309
+
310
+ fn = __find_module_check_fn(ptr);
311
+
312
+out:
313
+ if (!rcu)
314
+ rcu_nmi_exit();
315
+
316
+ return fn;
283317 }
284318
285319 void cfi_slowpath_handler(uint64_t id, void *ptr, void *diag)
286320 {
287
- cfi_check_fn check = find_cfi_check(ptr);
321
+ cfi_check_fn fn = find_check_fn((unsigned long)ptr);
288322
289
- if (likely(check))
290
- check(id, ptr, diag);
323
+ if (!IS_ENABLED(CONFIG_CFI_PERMISSIVE))
324
+ diag = NULL;
325
+
326
+ if (likely(fn))
327
+ fn(id, ptr, diag);
291328 else /* Don't allow unchecked modules */
292329 handle_cfi_failure(ptr);
293330 }
294
-EXPORT_SYMBOL_GPL(cfi_slowpath_handler);
331
+
332
+#else /* !CONFIG_MODULES */
333
+
334
+void cfi_slowpath_handler(uint64_t id, void *ptr, void *diag)
335
+{
336
+ handle_cfi_failure(ptr); /* No modules */
337
+}
338
+
295339 #endif /* CONFIG_MODULES */
340
+
341
+EXPORT_SYMBOL(cfi_slowpath_handler);
296342
297343 void cfi_failure_handler(void *data, void *ptr, void *vtable)
298344 {
299345 handle_cfi_failure(ptr);
300346 }
301
-EXPORT_SYMBOL_GPL(cfi_failure_handler);
347
+EXPORT_SYMBOL(cfi_failure_handler);
302348
303349 void __cfi_check_fail(void *data, void *ptr)
304350 {