.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* |
---|
2 | 3 | * This file contains kasan initialization code for ARM64. |
---|
3 | 4 | * |
---|
4 | 5 | * Copyright (c) 2015 Samsung Electronics Co., Ltd. |
---|
5 | 6 | * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com> |
---|
6 | | - * |
---|
7 | | - * This program is free software; you can redistribute it and/or modify |
---|
8 | | - * it under the terms of the GNU General Public License version 2 as |
---|
9 | | - * published by the Free Software Foundation. |
---|
10 | | - * |
---|
11 | 7 | */ |
---|
12 | 8 | |
---|
13 | 9 | #define pr_fmt(fmt) "kasan: " fmt |
---|
14 | | -#include <linux/bootmem.h> |
---|
15 | 10 | #include <linux/kasan.h> |
---|
16 | 11 | #include <linux/kernel.h> |
---|
17 | 12 | #include <linux/sched/task.h> |
---|
.. | .. |
---|
23 | 18 | #include <asm/kernel-pgtable.h> |
---|
24 | 19 | #include <asm/page.h> |
---|
25 | 20 | #include <asm/pgalloc.h> |
---|
26 | | -#include <asm/pgtable.h> |
---|
27 | 21 | #include <asm/sections.h> |
---|
28 | 22 | #include <asm/tlbflush.h> |
---|
| 23 | + |
---|
| 24 | +#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) |
---|
29 | 25 | |
---|
30 | 26 | static pgd_t tmp_pg_dir[PTRS_PER_PGD] __initdata __aligned(PGD_SIZE); |
---|
31 | 27 | |
---|
.. | .. |
---|
38 | 34 | |
---|
39 | 35 | static phys_addr_t __init kasan_alloc_zeroed_page(int node) |
---|
40 | 36 | { |
---|
41 | | - void *p = memblock_virt_alloc_try_nid(PAGE_SIZE, PAGE_SIZE, |
---|
| 37 | + void *p = memblock_alloc_try_nid(PAGE_SIZE, PAGE_SIZE, |
---|
42 | 38 | __pa(MAX_DMA_ADDRESS), |
---|
43 | 39 | MEMBLOCK_ALLOC_KASAN, node); |
---|
| 40 | + if (!p) |
---|
| 41 | + panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%llx\n", |
---|
| 42 | + __func__, PAGE_SIZE, PAGE_SIZE, node, |
---|
| 43 | + __pa(MAX_DMA_ADDRESS)); |
---|
| 44 | + |
---|
| 45 | + return __pa(p); |
---|
| 46 | +} |
---|
| 47 | + |
---|
| 48 | +static phys_addr_t __init kasan_alloc_raw_page(int node) |
---|
| 49 | +{ |
---|
| 50 | + void *p = memblock_alloc_try_nid_raw(PAGE_SIZE, PAGE_SIZE, |
---|
| 51 | + __pa(MAX_DMA_ADDRESS), |
---|
| 52 | + MEMBLOCK_ALLOC_KASAN, node); |
---|
| 53 | + if (!p) |
---|
| 54 | + panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%llx\n", |
---|
| 55 | + __func__, PAGE_SIZE, PAGE_SIZE, node, |
---|
| 56 | + __pa(MAX_DMA_ADDRESS)); |
---|
| 57 | + |
---|
44 | 58 | return __pa(p); |
---|
45 | 59 | } |
---|
46 | 60 | |
---|
.. | .. |
---|
71 | 85 | return early ? pmd_offset_kimg(pudp, addr) : pmd_offset(pudp, addr); |
---|
72 | 86 | } |
---|
73 | 87 | |
---|
74 | | -static pud_t *__init kasan_pud_offset(pgd_t *pgdp, unsigned long addr, int node, |
---|
| 88 | +static pud_t *__init kasan_pud_offset(p4d_t *p4dp, unsigned long addr, int node, |
---|
75 | 89 | bool early) |
---|
76 | 90 | { |
---|
77 | | - if (pgd_none(READ_ONCE(*pgdp))) { |
---|
| 91 | + if (p4d_none(READ_ONCE(*p4dp))) { |
---|
78 | 92 | phys_addr_t pud_phys = early ? |
---|
79 | 93 | __pa_symbol(kasan_early_shadow_pud) |
---|
80 | 94 | : kasan_alloc_zeroed_page(node); |
---|
81 | | - __pgd_populate(pgdp, pud_phys, PMD_TYPE_TABLE); |
---|
| 95 | + __p4d_populate(p4dp, pud_phys, PMD_TYPE_TABLE); |
---|
82 | 96 | } |
---|
83 | 97 | |
---|
84 | | - return early ? pud_offset_kimg(pgdp, addr) : pud_offset(pgdp, addr); |
---|
| 98 | + return early ? pud_offset_kimg(p4dp, addr) : pud_offset(p4dp, addr); |
---|
85 | 99 | } |
---|
86 | 100 | |
---|
87 | 101 | static void __init kasan_pte_populate(pmd_t *pmdp, unsigned long addr, |
---|
.. | .. |
---|
93 | 107 | do { |
---|
94 | 108 | phys_addr_t page_phys = early ? |
---|
95 | 109 | __pa_symbol(kasan_early_shadow_page) |
---|
96 | | - : kasan_alloc_zeroed_page(node); |
---|
| 110 | + : kasan_alloc_raw_page(node); |
---|
97 | 111 | if (!early) |
---|
98 | 112 | memset(__va(page_phys), KASAN_SHADOW_INIT, PAGE_SIZE); |
---|
99 | 113 | next = addr + PAGE_SIZE; |
---|
.. | .. |
---|
113 | 127 | } while (pmdp++, addr = next, addr != end && pmd_none(READ_ONCE(*pmdp))); |
---|
114 | 128 | } |
---|
115 | 129 | |
---|
116 | | -static void __init kasan_pud_populate(pgd_t *pgdp, unsigned long addr, |
---|
| 130 | +static void __init kasan_pud_populate(p4d_t *p4dp, unsigned long addr, |
---|
117 | 131 | unsigned long end, int node, bool early) |
---|
118 | 132 | { |
---|
119 | 133 | unsigned long next; |
---|
120 | | - pud_t *pudp = kasan_pud_offset(pgdp, addr, node, early); |
---|
| 134 | + pud_t *pudp = kasan_pud_offset(p4dp, addr, node, early); |
---|
121 | 135 | |
---|
122 | 136 | do { |
---|
123 | 137 | next = pud_addr_end(addr, end); |
---|
124 | 138 | kasan_pmd_populate(pudp, addr, next, node, early); |
---|
125 | 139 | } while (pudp++, addr = next, addr != end && pud_none(READ_ONCE(*pudp))); |
---|
| 140 | +} |
---|
| 141 | + |
---|
| 142 | +static void __init kasan_p4d_populate(pgd_t *pgdp, unsigned long addr, |
---|
| 143 | + unsigned long end, int node, bool early) |
---|
| 144 | +{ |
---|
| 145 | + unsigned long next; |
---|
| 146 | + p4d_t *p4dp = p4d_offset(pgdp, addr); |
---|
| 147 | + |
---|
| 148 | + do { |
---|
| 149 | + next = p4d_addr_end(addr, end); |
---|
| 150 | + kasan_pud_populate(p4dp, addr, next, node, early); |
---|
| 151 | + } while (p4dp++, addr = next, addr != end); |
---|
126 | 152 | } |
---|
127 | 153 | |
---|
128 | 154 | static void __init kasan_pgd_populate(unsigned long addr, unsigned long end, |
---|
.. | .. |
---|
134 | 160 | pgdp = pgd_offset_k(addr); |
---|
135 | 161 | do { |
---|
136 | 162 | next = pgd_addr_end(addr, end); |
---|
137 | | - kasan_pud_populate(pgdp, addr, next, node, early); |
---|
| 163 | + kasan_p4d_populate(pgdp, addr, next, node, early); |
---|
138 | 164 | } while (pgdp++, addr = next, addr != end); |
---|
139 | 165 | } |
---|
140 | 166 | |
---|
.. | .. |
---|
143 | 169 | { |
---|
144 | 170 | BUILD_BUG_ON(KASAN_SHADOW_OFFSET != |
---|
145 | 171 | KASAN_SHADOW_END - (1UL << (64 - KASAN_SHADOW_SCALE_SHIFT))); |
---|
146 | | - BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, PGDIR_SIZE)); |
---|
| 172 | + BUILD_BUG_ON(!IS_ALIGNED(_KASAN_SHADOW_START(VA_BITS), PGDIR_SIZE)); |
---|
| 173 | + BUILD_BUG_ON(!IS_ALIGNED(_KASAN_SHADOW_START(VA_BITS_MIN), PGDIR_SIZE)); |
---|
147 | 174 | BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, PGDIR_SIZE)); |
---|
148 | 175 | kasan_pgd_populate(KASAN_SHADOW_START, KASAN_SHADOW_END, NUMA_NO_NODE, |
---|
149 | 176 | true); |
---|
.. | .. |
---|
165 | 192 | |
---|
166 | 193 | pgdp = pgd_offset_k(KASAN_SHADOW_START); |
---|
167 | 194 | pgdp_end = pgd_offset_k(KASAN_SHADOW_END); |
---|
168 | | - pgdp_new = pgd_offset_raw(pgdir, KASAN_SHADOW_START); |
---|
| 195 | + pgdp_new = pgd_offset_pgd(pgdir, KASAN_SHADOW_START); |
---|
169 | 196 | do { |
---|
170 | 197 | set_pgd(pgdp_new, READ_ONCE(*pgdp)); |
---|
171 | 198 | } while (pgdp++, pgdp_new++, pgdp != pgdp_end); |
---|
.. | .. |
---|
183 | 210 | set_pgd(pgd_offset_k(start), __pgd(0)); |
---|
184 | 211 | } |
---|
185 | 212 | |
---|
186 | | -void __init kasan_init(void) |
---|
| 213 | +static void __init kasan_init_shadow(void) |
---|
187 | 214 | { |
---|
188 | 215 | u64 kimg_shadow_start, kimg_shadow_end; |
---|
189 | 216 | u64 mod_shadow_start, mod_shadow_end; |
---|
190 | | - struct memblock_region *reg; |
---|
191 | | - int i; |
---|
| 217 | + u64 vmalloc_shadow_end; |
---|
| 218 | + phys_addr_t pa_start, pa_end; |
---|
| 219 | + u64 i; |
---|
192 | 220 | |
---|
193 | | - kimg_shadow_start = (u64)kasan_mem_to_shadow(_text) & PAGE_MASK; |
---|
194 | | - kimg_shadow_end = PAGE_ALIGN((u64)kasan_mem_to_shadow(_end)); |
---|
| 221 | + kimg_shadow_start = (u64)kasan_mem_to_shadow(KERNEL_START) & PAGE_MASK; |
---|
| 222 | + kimg_shadow_end = PAGE_ALIGN((u64)kasan_mem_to_shadow(KERNEL_END)); |
---|
195 | 223 | |
---|
196 | 224 | mod_shadow_start = (u64)kasan_mem_to_shadow((void *)MODULES_VADDR); |
---|
197 | 225 | mod_shadow_end = (u64)kasan_mem_to_shadow((void *)MODULES_END); |
---|
| 226 | + |
---|
| 227 | + vmalloc_shadow_end = (u64)kasan_mem_to_shadow((void *)VMALLOC_END); |
---|
198 | 228 | |
---|
199 | 229 | /* |
---|
200 | 230 | * We are going to perform proper setup of shadow memory. |
---|
.. | .. |
---|
210 | 240 | clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END); |
---|
211 | 241 | |
---|
212 | 242 | kasan_map_populate(kimg_shadow_start, kimg_shadow_end, |
---|
213 | | - early_pfn_to_nid(virt_to_pfn(lm_alias(_text)))); |
---|
| 243 | + early_pfn_to_nid(virt_to_pfn(lm_alias(KERNEL_START)))); |
---|
214 | 244 | |
---|
215 | | - kasan_populate_early_shadow((void *)KASAN_SHADOW_START, |
---|
216 | | - (void *)mod_shadow_start); |
---|
217 | | - kasan_populate_early_shadow((void *)kimg_shadow_end, |
---|
218 | | - kasan_mem_to_shadow((void *)PAGE_OFFSET)); |
---|
| 245 | + kasan_populate_early_shadow(kasan_mem_to_shadow((void *)PAGE_END), |
---|
| 246 | + (void *)mod_shadow_start); |
---|
219 | 247 | |
---|
220 | | - if (kimg_shadow_start > mod_shadow_end) |
---|
221 | | - kasan_populate_early_shadow((void *)mod_shadow_end, |
---|
222 | | - (void *)kimg_shadow_start); |
---|
| 248 | + if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) { |
---|
| 249 | + BUILD_BUG_ON(VMALLOC_START != MODULES_END); |
---|
| 250 | + kasan_populate_early_shadow((void *)vmalloc_shadow_end, |
---|
| 251 | + (void *)KASAN_SHADOW_END); |
---|
| 252 | + } else { |
---|
| 253 | + kasan_populate_early_shadow((void *)kimg_shadow_end, |
---|
| 254 | + (void *)KASAN_SHADOW_END); |
---|
| 255 | + if (kimg_shadow_start > mod_shadow_end) |
---|
| 256 | + kasan_populate_early_shadow((void *)mod_shadow_end, |
---|
| 257 | + (void *)kimg_shadow_start); |
---|
| 258 | + } |
---|
223 | 259 | |
---|
224 | | - for_each_memblock(memory, reg) { |
---|
225 | | - void *start = (void *)__phys_to_virt(reg->base); |
---|
226 | | - void *end = (void *)__phys_to_virt(reg->base + reg->size); |
---|
| 260 | + for_each_mem_range(i, &pa_start, &pa_end) { |
---|
| 261 | + void *start = (void *)__phys_to_virt(pa_start); |
---|
| 262 | + void *end = (void *)__phys_to_virt(pa_end); |
---|
227 | 263 | |
---|
228 | 264 | if (start >= end) |
---|
229 | 265 | break; |
---|
.. | .. |
---|
244 | 280 | |
---|
245 | 281 | memset(kasan_early_shadow_page, KASAN_SHADOW_INIT, PAGE_SIZE); |
---|
246 | 282 | cpu_replace_ttbr1(lm_alias(swapper_pg_dir)); |
---|
247 | | - |
---|
248 | | - /* At this point kasan is fully initialized. Enable error messages */ |
---|
249 | | - init_task.kasan_depth = 0; |
---|
250 | | - pr_info("KernelAddressSanitizer initialized\n"); |
---|
251 | 283 | } |
---|
| 284 | + |
---|
| 285 | +static void __init kasan_init_depth(void) |
---|
| 286 | +{ |
---|
| 287 | + init_task.kasan_depth = 0; |
---|
| 288 | +} |
---|
| 289 | + |
---|
| 290 | +void __init kasan_init(void) |
---|
| 291 | +{ |
---|
| 292 | + kasan_init_shadow(); |
---|
| 293 | + kasan_init_depth(); |
---|
| 294 | +#if defined(CONFIG_KASAN_GENERIC) |
---|
| 295 | + /* CONFIG_KASAN_SW_TAGS also requires kasan_init_sw_tags(). */ |
---|
| 296 | + pr_info("KernelAddressSanitizer initialized\n"); |
---|
| 297 | +#endif |
---|
| 298 | +} |
---|
| 299 | + |
---|
| 300 | +#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */ |
---|