.. | .. |
---|
5 | 5 | /* cpu_feature_enabled() cannot be used this early */ |
---|
6 | 6 | #define USE_EARLY_PGTABLE_L5 |
---|
7 | 7 | |
---|
8 | | -#include <linux/bootmem.h> |
---|
| 8 | +#include <linux/memblock.h> |
---|
9 | 9 | #include <linux/kasan.h> |
---|
10 | 10 | #include <linux/kdebug.h> |
---|
11 | | -#include <linux/memblock.h> |
---|
12 | 11 | #include <linux/mm.h> |
---|
13 | 12 | #include <linux/sched.h> |
---|
14 | 13 | #include <linux/sched/task.h> |
---|
.. | .. |
---|
18 | 17 | #include <asm/pgalloc.h> |
---|
19 | 18 | #include <asm/tlbflush.h> |
---|
20 | 19 | #include <asm/sections.h> |
---|
21 | | -#include <asm/pgtable.h> |
---|
22 | 20 | #include <asm/cpu_entry_area.h> |
---|
23 | 21 | |
---|
24 | 22 | extern struct range pfn_mapped[E820_MAX_ENTRIES]; |
---|
25 | 23 | |
---|
26 | 24 | static p4d_t tmp_p4d_table[MAX_PTRS_PER_P4D] __initdata __aligned(PAGE_SIZE); |
---|
27 | 25 | |
---|
28 | | -static __init void *early_alloc(size_t size, int nid, bool panic) |
---|
| 26 | +static __init void *early_alloc(size_t size, int nid, bool should_panic) |
---|
29 | 27 | { |
---|
30 | | - if (panic) |
---|
31 | | - return memblock_virt_alloc_try_nid(size, size, |
---|
32 | | - __pa(MAX_DMA_ADDRESS), BOOTMEM_ALLOC_ACCESSIBLE, nid); |
---|
33 | | - else |
---|
34 | | - return memblock_virt_alloc_try_nid_nopanic(size, size, |
---|
35 | | - __pa(MAX_DMA_ADDRESS), BOOTMEM_ALLOC_ACCESSIBLE, nid); |
---|
| 28 | + void *ptr = memblock_alloc_try_nid(size, size, |
---|
| 29 | + __pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, nid); |
---|
| 30 | + |
---|
| 31 | + if (!ptr && should_panic) |
---|
| 32 | + panic("%pS: Failed to allocate page, nid=%d from=%lx\n", |
---|
| 33 | + (void *)_RET_IP_, nid, __pa(MAX_DMA_ADDRESS)); |
---|
| 34 | + |
---|
| 35 | + return ptr; |
---|
36 | 36 | } |
---|
37 | 37 | |
---|
38 | 38 | static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr, |
---|
.. | .. |
---|
244 | 244 | } while (pgd++, addr = next, addr != end); |
---|
245 | 245 | } |
---|
246 | 246 | |
---|
247 | | -#ifdef CONFIG_KASAN_INLINE |
---|
248 | | -static int kasan_die_handler(struct notifier_block *self, |
---|
249 | | - unsigned long val, |
---|
250 | | - void *data) |
---|
| 247 | +static void __init kasan_shallow_populate_p4ds(pgd_t *pgd, |
---|
| 248 | + unsigned long addr, |
---|
| 249 | + unsigned long end) |
---|
251 | 250 | { |
---|
252 | | - if (val == DIE_GPF) { |
---|
253 | | - pr_emerg("CONFIG_KASAN_INLINE enabled\n"); |
---|
254 | | - pr_emerg("GPF could be caused by NULL-ptr deref or user memory access\n"); |
---|
255 | | - } |
---|
256 | | - return NOTIFY_OK; |
---|
| 251 | + p4d_t *p4d; |
---|
| 252 | + unsigned long next; |
---|
| 253 | + void *p; |
---|
| 254 | + |
---|
| 255 | + p4d = p4d_offset(pgd, addr); |
---|
| 256 | + do { |
---|
| 257 | + next = p4d_addr_end(addr, end); |
---|
| 258 | + |
---|
| 259 | + if (p4d_none(*p4d)) { |
---|
| 260 | + p = early_alloc(PAGE_SIZE, NUMA_NO_NODE, true); |
---|
| 261 | + p4d_populate(&init_mm, p4d, p); |
---|
| 262 | + } |
---|
| 263 | + } while (p4d++, addr = next, addr != end); |
---|
257 | 264 | } |
---|
258 | 265 | |
---|
259 | | -static struct notifier_block kasan_die_notifier = { |
---|
260 | | - .notifier_call = kasan_die_handler, |
---|
261 | | -}; |
---|
262 | | -#endif |
---|
| 266 | +static void __init kasan_shallow_populate_pgds(void *start, void *end) |
---|
| 267 | +{ |
---|
| 268 | + unsigned long addr, next; |
---|
| 269 | + pgd_t *pgd; |
---|
| 270 | + void *p; |
---|
| 271 | + |
---|
| 272 | + addr = (unsigned long)start; |
---|
| 273 | + pgd = pgd_offset_k(addr); |
---|
| 274 | + do { |
---|
| 275 | + next = pgd_addr_end(addr, (unsigned long)end); |
---|
| 276 | + |
---|
| 277 | + if (pgd_none(*pgd)) { |
---|
| 278 | + p = early_alloc(PAGE_SIZE, NUMA_NO_NODE, true); |
---|
| 279 | + pgd_populate(&init_mm, pgd, p); |
---|
| 280 | + } |
---|
| 281 | + |
---|
| 282 | + /* |
---|
| 283 | + * we need to populate p4ds to be synced when running in |
---|
| 284 | + * four level mode - see sync_global_pgds_l4() |
---|
| 285 | + */ |
---|
| 286 | + kasan_shallow_populate_p4ds(pgd, addr, next); |
---|
| 287 | + } while (pgd++, addr = next, addr != (unsigned long)end); |
---|
| 288 | +} |
---|
263 | 289 | |
---|
264 | 290 | void __init kasan_early_init(void) |
---|
265 | 291 | { |
---|
.. | .. |
---|
296 | 322 | { |
---|
297 | 323 | int i; |
---|
298 | 324 | void *shadow_cpu_entry_begin, *shadow_cpu_entry_end; |
---|
299 | | - |
---|
300 | | -#ifdef CONFIG_KASAN_INLINE |
---|
301 | | - register_die_notifier(&kasan_die_notifier); |
---|
302 | | -#endif |
---|
303 | 325 | |
---|
304 | 326 | memcpy(early_top_pgt, init_top_pgt, sizeof(early_top_pgt)); |
---|
305 | 327 | |
---|
.. | .. |
---|
353 | 375 | |
---|
354 | 376 | kasan_populate_early_shadow( |
---|
355 | 377 | kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM), |
---|
| 378 | + kasan_mem_to_shadow((void *)VMALLOC_START)); |
---|
| 379 | + |
---|
| 380 | + /* |
---|
| 381 | + * If we're in full vmalloc mode, don't back vmalloc space with early |
---|
| 382 | + * shadow pages. Instead, prepopulate pgds/p4ds so they are synced to |
---|
| 383 | + * the global table and we can populate the lower levels on demand. |
---|
| 384 | + */ |
---|
| 385 | + if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) |
---|
| 386 | + kasan_shallow_populate_pgds( |
---|
| 387 | + kasan_mem_to_shadow((void *)VMALLOC_START), |
---|
| 388 | + kasan_mem_to_shadow((void *)VMALLOC_END)); |
---|
| 389 | + else |
---|
| 390 | + kasan_populate_early_shadow( |
---|
| 391 | + kasan_mem_to_shadow((void *)VMALLOC_START), |
---|
| 392 | + kasan_mem_to_shadow((void *)VMALLOC_END)); |
---|
| 393 | + |
---|
| 394 | + kasan_populate_early_shadow( |
---|
| 395 | + kasan_mem_to_shadow((void *)VMALLOC_END + 1), |
---|
356 | 396 | shadow_cpu_entry_begin); |
---|
357 | 397 | |
---|
358 | 398 | kasan_populate_shadow((unsigned long)shadow_cpu_entry_begin, |
---|