forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-10-22 8ac6c7a54ed1b98d142dce24b11c6de6a1e239a5
kernel/mm/kasan/init.c
....@@ -1,21 +1,15 @@
11 // SPDX-License-Identifier: GPL-2.0
22 /*
3
- * This file contains some kasan initialization code.
3
+ * This file contains KASAN shadow initialization code.
44 *
55 * Copyright (c) 2015 Samsung Electronics Co., Ltd.
66 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
7
- *
8
- * This program is free software; you can redistribute it and/or modify
9
- * it under the terms of the GNU General Public License version 2 as
10
- * published by the Free Software Foundation.
11
- *
127 */
138
14
-#include <linux/bootmem.h>
9
+#include <linux/memblock.h>
1510 #include <linux/init.h>
1611 #include <linux/kasan.h>
1712 #include <linux/kernel.h>
18
-#include <linux/memblock.h>
1913 #include <linux/mm.h>
2014 #include <linux/pfn.h>
2115 #include <linux/slab.h>
....@@ -70,7 +64,8 @@
7064 return false;
7165 }
7266 #endif
73
-pte_t kasan_early_shadow_pte[PTRS_PER_PTE] __page_aligned_bss;
67
+pte_t kasan_early_shadow_pte[PTRS_PER_PTE + PTE_HWTABLE_PTRS]
68
+ __page_aligned_bss;
7469
7570 static inline bool kasan_pte_table(pmd_t pmd)
7671 {
....@@ -84,8 +79,14 @@
8479
8580 static __init void *early_alloc(size_t size, int node)
8681 {
87
- return memblock_virt_alloc_try_nid(size, size, __pa(MAX_DMA_ADDRESS),
88
- BOOTMEM_ALLOC_ACCESSIBLE, node);
82
+ void *ptr = memblock_alloc_try_nid(size, size, __pa(MAX_DMA_ADDRESS),
83
+ MEMBLOCK_ALLOC_ACCESSIBLE, node);
84
+
85
+ if (!ptr)
86
+ panic("%s: Failed to allocate %zu bytes align=%zx nid=%d from=%llx\n",
87
+ __func__, size, size, node, (u64)__pa(MAX_DMA_ADDRESS));
88
+
89
+ return ptr;
8990 }
9091
9192 static void __ref zero_pte_populate(pmd_t *pmd, unsigned long addr,
....@@ -124,7 +125,7 @@
124125 pte_t *p;
125126
126127 if (slab_is_available())
127
- p = pte_alloc_one_kernel(&init_mm, addr);
128
+ p = pte_alloc_one_kernel(&init_mm);
128129 else
129130 p = early_alloc(PAGE_SIZE, NUMA_NO_NODE);
130131 if (!p)
....@@ -245,20 +246,9 @@
245246 * 3,2 - level page tables where we don't have
246247 * puds,pmds, so pgd_populate(), pud_populate()
247248 * is noops.
248
- *
249
- * The ifndef is required to avoid build breakage.
250
- *
251
- * With 5level-fixup.h, pgd_populate() is not nop and
252
- * we reference kasan_early_shadow_p4d. It's not defined
253
- * unless 5-level paging enabled.
254
- *
255
- * The ifndef can be dropped once all KASAN-enabled
256
- * architectures will switch to pgtable-nop4d.h.
257249 */
258
-#ifndef __ARCH_HAS_5LEVEL_HACK
259250 pgd_populate(&init_mm, pgd,
260251 lm_alias(kasan_early_shadow_p4d));
261
-#endif
262252 p4d = p4d_offset(pgd, addr);
263253 p4d_populate(&init_mm, p4d,
264254 lm_alias(kasan_early_shadow_pud));
....@@ -455,9 +445,8 @@
455445 addr = (unsigned long)kasan_mem_to_shadow(start);
456446 end = addr + (size >> KASAN_SHADOW_SCALE_SHIFT);
457447
458
- if (WARN_ON((unsigned long)start %
459
- (KASAN_SHADOW_SCALE_SIZE * PAGE_SIZE)) ||
460
- WARN_ON(size % (KASAN_SHADOW_SCALE_SIZE * PAGE_SIZE)))
448
+ if (WARN_ON((unsigned long)start % KASAN_MEMORY_PER_SHADOW_PAGE) ||
449
+ WARN_ON(size % KASAN_MEMORY_PER_SHADOW_PAGE))
461450 return;
462451
463452 for (; addr < end; addr = next) {
....@@ -491,9 +480,8 @@
491480 shadow_start = kasan_mem_to_shadow(start);
492481 shadow_end = shadow_start + (size >> KASAN_SHADOW_SCALE_SHIFT);
493482
494
- if (WARN_ON((unsigned long)start %
495
- (KASAN_SHADOW_SCALE_SIZE * PAGE_SIZE)) ||
496
- WARN_ON(size % (KASAN_SHADOW_SCALE_SIZE * PAGE_SIZE)))
483
+ if (WARN_ON((unsigned long)start % KASAN_MEMORY_PER_SHADOW_PAGE) ||
484
+ WARN_ON(size % KASAN_MEMORY_PER_SHADOW_PAGE))
497485 return -EINVAL;
498486
499487 ret = kasan_populate_early_shadow(shadow_start, shadow_end);