.. | .. |
---|
| 1 | +/* SPDX-License-Identifier: GPL-2.0-only */ |
---|
1 | 2 | /* |
---|
2 | 3 | * Based on arch/arm/include/asm/memory.h |
---|
3 | 4 | * |
---|
4 | 5 | * Copyright (C) 2000-2002 Russell King |
---|
5 | 6 | * Copyright (C) 2012 ARM Ltd. |
---|
6 | 7 | * |
---|
7 | | - * This program is free software; you can redistribute it and/or modify |
---|
8 | | - * it under the terms of the GNU General Public License version 2 as |
---|
9 | | - * published by the Free Software Foundation. |
---|
10 | | - * |
---|
11 | | - * This program is distributed in the hope that it will be useful, |
---|
12 | | - * but WITHOUT ANY WARRANTY; without even the implied warranty of |
---|
13 | | - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
---|
14 | | - * GNU General Public License for more details. |
---|
15 | | - * |
---|
16 | | - * You should have received a copy of the GNU General Public License |
---|
17 | | - * along with this program. If not, see <http://www.gnu.org/licenses/>. |
---|
18 | | - * |
---|
19 | 8 | * Note: this file should not be included by non-asm/.h files |
---|
20 | 9 | */ |
---|
21 | 10 | #ifndef __ASM_MEMORY_H |
---|
22 | 11 | #define __ASM_MEMORY_H |
---|
23 | 12 | |
---|
24 | | -#include <linux/compiler.h> |
---|
25 | 13 | #include <linux/const.h> |
---|
26 | | -#include <linux/types.h> |
---|
27 | | -#include <asm/bug.h> |
---|
| 14 | +#include <linux/sizes.h> |
---|
28 | 15 | #include <asm/page-def.h> |
---|
29 | | -#include <asm/sizes.h> |
---|
30 | 16 | |
---|
31 | 17 | /* |
---|
32 | 18 | * Size of the PCI I/O space. This must remain a power of two so that |
---|
.. | .. |
---|
35 | 21 | #define PCI_IO_SIZE SZ_16M |
---|
36 | 22 | |
---|
37 | 23 | /* |
---|
38 | | - * Log2 of the upper bound of the size of a struct page. Used for sizing |
---|
39 | | - * the vmemmap region only, does not affect actual memory footprint. |
---|
40 | | - * We don't use sizeof(struct page) directly since taking its size here |
---|
41 | | - * requires its definition to be available at this point in the inclusion |
---|
42 | | - * chain, and it may not be a power of 2 in the first place. |
---|
43 | | - */ |
---|
44 | | -#define STRUCT_PAGE_MAX_SHIFT 6 |
---|
45 | | - |
---|
46 | | -/* |
---|
47 | 24 | * VMEMMAP_SIZE - allows the whole linear region to be covered by |
---|
48 | 25 | * a struct page array |
---|
| 26 | + * |
---|
| 27 | + * If we are configured with a 52-bit kernel VA then our VMEMMAP_SIZE |
---|
| 28 | + * needs to cover the memory region from the beginning of the 52-bit |
---|
| 29 | + * PAGE_OFFSET all the way to PAGE_END for 48-bit. This allows us to |
---|
| 30 | + * keep a constant PAGE_OFFSET and "fallback" to using the higher end |
---|
| 31 | + * of the VMEMMAP where 52-bit support is not available in hardware. |
---|
49 | 32 | */ |
---|
50 | | -#define VMEMMAP_SIZE (UL(1) << (VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT)) |
---|
| 33 | +#define VMEMMAP_SIZE ((_PAGE_END(VA_BITS_MIN) - PAGE_OFFSET) \ |
---|
| 34 | + >> (PAGE_SHIFT - STRUCT_PAGE_MAX_SHIFT)) |
---|
51 | 35 | |
---|
52 | 36 | /* |
---|
53 | | - * PAGE_OFFSET - the virtual address of the start of the linear map (top |
---|
54 | | - * (VA_BITS - 1)) |
---|
55 | | - * KIMAGE_VADDR - the virtual address of the start of the kernel image |
---|
| 37 | + * PAGE_OFFSET - the virtual address of the start of the linear map, at the |
---|
| 38 | + * start of the TTBR1 address space. |
---|
| 39 | + * PAGE_END - the end of the linear map, where all other kernel mappings begin. |
---|
| 40 | + * KIMAGE_VADDR - the virtual address of the start of the kernel image. |
---|
56 | 41 | * VA_BITS - the maximum number of bits for virtual addresses. |
---|
57 | | - * VA_START - the first kernel virtual address. |
---|
58 | 42 | */ |
---|
59 | 43 | #define VA_BITS (CONFIG_ARM64_VA_BITS) |
---|
60 | | -#define VA_START (UL(0xffffffffffffffff) - \ |
---|
61 | | - (UL(1) << VA_BITS) + 1) |
---|
62 | | -#define PAGE_OFFSET (UL(0xffffffffffffffff) - \ |
---|
63 | | - (UL(1) << (VA_BITS - 1)) + 1) |
---|
| 44 | +#define _PAGE_OFFSET(va) (-(UL(1) << (va))) |
---|
| 45 | +#define PAGE_OFFSET (_PAGE_OFFSET(VA_BITS)) |
---|
64 | 46 | #define KIMAGE_VADDR (MODULES_END) |
---|
65 | 47 | #define MODULES_END (MODULES_VADDR + MODULES_VSIZE) |
---|
66 | | -#define MODULES_VADDR (VA_START + KASAN_SHADOW_SIZE) |
---|
| 48 | +#define MODULES_VADDR (KASAN_SHADOW_END) |
---|
67 | 49 | #define MODULES_VSIZE (SZ_128M) |
---|
68 | | -#define VMEMMAP_START (PAGE_OFFSET - VMEMMAP_SIZE) |
---|
| 50 | +#define VMEMMAP_START (-VMEMMAP_SIZE - SZ_2M) |
---|
| 51 | +#define VMEMMAP_END (VMEMMAP_START + VMEMMAP_SIZE) |
---|
69 | 52 | #define PCI_IO_END (VMEMMAP_START - SZ_2M) |
---|
70 | 53 | #define PCI_IO_START (PCI_IO_END - PCI_IO_SIZE) |
---|
71 | 54 | #define FIXADDR_TOP (PCI_IO_START - SZ_2M) |
---|
72 | 55 | |
---|
73 | | -#define KERNEL_START _text |
---|
74 | | -#define KERNEL_END _end |
---|
| 56 | +#if VA_BITS > 48 |
---|
| 57 | +#define VA_BITS_MIN (48) |
---|
| 58 | +#else |
---|
| 59 | +#define VA_BITS_MIN (VA_BITS) |
---|
| 60 | +#endif |
---|
| 61 | + |
---|
| 62 | +#define _PAGE_END(va) (-(UL(1) << ((va) - 1))) |
---|
| 63 | + |
---|
| 64 | +#define KERNEL_START _text |
---|
| 65 | +#define KERNEL_END _end |
---|
75 | 66 | |
---|
76 | 67 | /* |
---|
77 | 68 | * Generic and tag-based KASAN require 1/8th and 1/16th of the kernel virtual |
---|
78 | 69 | * address space for the shadow region respectively. They can bloat the stack |
---|
79 | 70 | * significantly, so double the (minimum) stack size when they are in use. |
---|
80 | 71 | */ |
---|
81 | | -#ifdef CONFIG_KASAN |
---|
82 | | -#define KASAN_SHADOW_SIZE (UL(1) << (VA_BITS - KASAN_SHADOW_SCALE_SHIFT)) |
---|
| 72 | +#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) |
---|
| 73 | +#define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL) |
---|
| 74 | +#define KASAN_SHADOW_END ((UL(1) << (64 - KASAN_SHADOW_SCALE_SHIFT)) \ |
---|
| 75 | + + KASAN_SHADOW_OFFSET) |
---|
83 | 76 | #define KASAN_THREAD_SHIFT 1 |
---|
84 | 77 | #else |
---|
85 | | -#define KASAN_SHADOW_SIZE (0) |
---|
86 | 78 | #define KASAN_THREAD_SHIFT 0 |
---|
87 | | -#endif |
---|
| 79 | +#define KASAN_SHADOW_END (_PAGE_END(VA_BITS_MIN)) |
---|
| 80 | +#endif /* CONFIG_KASAN */ |
---|
88 | 81 | |
---|
89 | 82 | #define MIN_THREAD_SHIFT (14 + KASAN_THREAD_SHIFT) |
---|
90 | 83 | |
---|
.. | .. |
---|
121 | 114 | |
---|
122 | 115 | /* |
---|
123 | 116 | * Alignment of kernel segments (e.g. .text, .data). |
---|
124 | | - */ |
---|
125 | | -#if defined(CONFIG_DEBUG_ALIGN_RODATA) |
---|
126 | | -/* |
---|
127 | | - * 4 KB granule: 1 level 2 entry |
---|
128 | | - * 16 KB granule: 128 level 3 entries, with contiguous bit |
---|
129 | | - * 64 KB granule: 32 level 3 entries, with contiguous bit |
---|
130 | | - */ |
---|
131 | | -#define SEGMENT_ALIGN SZ_2M |
---|
132 | | -#else |
---|
133 | | -/* |
---|
| 117 | + * |
---|
134 | 118 | * 4 KB granule: 16 level 3 entries, with contiguous bit |
---|
135 | 119 | * 16 KB granule: 4 level 3 entries, without contiguous bit |
---|
136 | 120 | * 64 KB granule: 1 level 3 entry |
---|
137 | 121 | */ |
---|
138 | | -#define SEGMENT_ALIGN SZ_64K |
---|
139 | | -#endif |
---|
| 122 | +#define SEGMENT_ALIGN SZ_64K |
---|
140 | 123 | |
---|
141 | 124 | /* |
---|
142 | 125 | * Memory types available. |
---|
| 126 | + * |
---|
| 127 | + * IMPORTANT: MT_NORMAL must be index 0 since vm_get_page_prot() may 'or' in |
---|
| 128 | + * the MT_NORMAL_TAGGED memory type for PROT_MTE mappings. Note |
---|
| 129 | + * that protection_map[] only contains MT_NORMAL attributes. |
---|
143 | 130 | */ |
---|
144 | | -#define MT_DEVICE_nGnRnE 0 |
---|
145 | | -#define MT_DEVICE_nGnRE 1 |
---|
146 | | -#define MT_DEVICE_GRE 2 |
---|
147 | | -#define MT_NORMAL_NC 3 |
---|
148 | | -#define MT_NORMAL 4 |
---|
149 | | -#define MT_NORMAL_WT 5 |
---|
| 131 | +#define MT_NORMAL 0 |
---|
| 132 | +#define MT_NORMAL_TAGGED 1 |
---|
| 133 | +#define MT_NORMAL_NC 2 |
---|
| 134 | +#define MT_NORMAL_WT 3 |
---|
| 135 | +#define MT_DEVICE_nGnRnE 4 |
---|
| 136 | +#define MT_DEVICE_nGnRE 5 |
---|
| 137 | +#define MT_DEVICE_GRE 6 |
---|
| 138 | +#define MT_NORMAL_iNC_oWB 7 |
---|
150 | 139 | |
---|
151 | 140 | /* |
---|
152 | 141 | * Memory types for Stage-2 translation |
---|
.. | .. |
---|
167 | 156 | #define IOREMAP_MAX_ORDER (PMD_SHIFT) |
---|
168 | 157 | #endif |
---|
169 | 158 | |
---|
170 | | -#ifdef CONFIG_BLK_DEV_INITRD |
---|
171 | | -#define __early_init_dt_declare_initrd(__start, __end) \ |
---|
172 | | - do { \ |
---|
173 | | - initrd_start = (__start); \ |
---|
174 | | - initrd_end = (__end); \ |
---|
175 | | - } while (0) |
---|
176 | | -#endif |
---|
177 | | - |
---|
178 | 159 | #ifndef __ASSEMBLY__ |
---|
179 | 160 | |
---|
180 | 161 | #include <linux/bitops.h> |
---|
| 162 | +#include <linux/compiler.h> |
---|
181 | 163 | #include <linux/mmdebug.h> |
---|
| 164 | +#include <linux/types.h> |
---|
| 165 | +#include <asm/bug.h> |
---|
| 166 | + |
---|
| 167 | +extern u64 vabits_actual; |
---|
| 168 | +#define PAGE_END (_PAGE_END(vabits_actual)) |
---|
182 | 169 | |
---|
183 | 170 | extern s64 memstart_addr; |
---|
184 | 171 | /* PHYS_OFFSET - the physical address of the start of memory. */ |
---|
185 | 172 | #define PHYS_OFFSET ({ VM_BUG_ON(memstart_addr & 1); memstart_addr; }) |
---|
186 | 173 | |
---|
187 | | -/* the virtual base of the kernel image (minus TEXT_OFFSET) */ |
---|
| 174 | +/* the virtual base of the kernel image */ |
---|
188 | 175 | extern u64 kimage_vaddr; |
---|
189 | 176 | |
---|
190 | 177 | /* the offset between the kernel virtual and physical mappings */ |
---|
.. | .. |
---|
225 | 212 | (__force __typeof__(addr))__addr; \ |
---|
226 | 213 | }) |
---|
227 | 214 | |
---|
228 | | -#ifdef CONFIG_KASAN_SW_TAGS |
---|
| 215 | +#if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS) |
---|
229 | 216 | #define __tag_shifted(tag) ((u64)(tag) << 56) |
---|
230 | 217 | #define __tag_reset(addr) __untagged_addr(addr) |
---|
231 | 218 | #define __tag_get(addr) (__u8)((u64)(addr) >> 56) |
---|
.. | .. |
---|
233 | 220 | #define __tag_shifted(tag) 0UL |
---|
234 | 221 | #define __tag_reset(addr) (addr) |
---|
235 | 222 | #define __tag_get(addr) 0 |
---|
236 | | -#endif |
---|
| 223 | +#endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */ |
---|
237 | 224 | |
---|
238 | 225 | static inline const void *__tag_set(const void *addr, u8 tag) |
---|
239 | 226 | { |
---|
240 | 227 | u64 __addr = (u64)addr & ~__tag_shifted(0xff); |
---|
241 | 228 | return (const void *)(__addr | __tag_shifted(tag)); |
---|
242 | 229 | } |
---|
| 230 | + |
---|
| 231 | +#ifdef CONFIG_KASAN_HW_TAGS |
---|
| 232 | +#define arch_enable_tagging_sync() mte_enable_kernel_sync() |
---|
| 233 | +#define arch_enable_tagging_async() mte_enable_kernel_async() |
---|
| 234 | +#define arch_set_tagging_report_once(state) mte_set_report_once(state) |
---|
| 235 | +#define arch_force_async_tag_fault() mte_check_tfsr_exit() |
---|
| 236 | +#define arch_get_random_tag() mte_get_random_tag() |
---|
| 237 | +#define arch_get_mem_tag(addr) mte_get_mem_tag(addr) |
---|
| 238 | +#define arch_set_mem_tag_range(addr, size, tag, init) \ |
---|
| 239 | + mte_set_mem_tag_range((addr), (size), (tag), (init)) |
---|
| 240 | +#endif /* CONFIG_KASAN_HW_TAGS */ |
---|
243 | 241 | |
---|
244 | 242 | /* |
---|
245 | 243 | * Physical vs virtual RAM address space conversion. These are |
---|
.. | .. |
---|
249 | 247 | |
---|
250 | 248 | |
---|
251 | 249 | /* |
---|
252 | | - * The linear kernel range starts in the middle of the virtual adddress |
---|
253 | | - * space. Testing the top bit for the start of the region is a |
---|
254 | | - * sufficient check. |
---|
| 250 | + * Check whether an arbitrary address is within the linear map, which |
---|
| 251 | + * lives in the [PAGE_OFFSET, PAGE_END) interval at the bottom of the |
---|
| 252 | + * kernel's TTBR1 address range. |
---|
255 | 253 | */ |
---|
256 | | -#define __is_lm_address(addr) (!!((addr) & BIT(VA_BITS - 1))) |
---|
| 254 | +#define __is_lm_address(addr) (((u64)(addr) ^ PAGE_OFFSET) < (PAGE_END - PAGE_OFFSET)) |
---|
257 | 255 | |
---|
258 | 256 | #define __lm_to_phys(addr) (((addr) & ~PAGE_OFFSET) + PHYS_OFFSET) |
---|
259 | 257 | #define __kimg_to_phys(addr) ((addr) - kimage_voffset) |
---|
260 | 258 | |
---|
261 | 259 | #define __virt_to_phys_nodebug(x) ({ \ |
---|
262 | | - phys_addr_t __x = (phys_addr_t)(x); \ |
---|
263 | | - __is_lm_address(__x) ? __lm_to_phys(__x) : \ |
---|
264 | | - __kimg_to_phys(__x); \ |
---|
| 260 | + phys_addr_t __x = (phys_addr_t)(__tag_reset(x)); \ |
---|
| 261 | + __is_lm_address(__x) ? __lm_to_phys(__x) : __kimg_to_phys(__x); \ |
---|
265 | 262 | }) |
---|
266 | 263 | |
---|
267 | 264 | #define __pa_symbol_nodebug(x) __kimg_to_phys((phys_addr_t)(x)) |
---|
.. | .. |
---|
272 | 269 | #else |
---|
273 | 270 | #define __virt_to_phys(x) __virt_to_phys_nodebug(x) |
---|
274 | 271 | #define __phys_addr_symbol(x) __pa_symbol_nodebug(x) |
---|
275 | | -#endif |
---|
| 272 | +#endif /* CONFIG_DEBUG_VIRTUAL */ |
---|
276 | 273 | |
---|
277 | 274 | #define __phys_to_virt(x) ((unsigned long)((x) - PHYS_OFFSET) | PAGE_OFFSET) |
---|
278 | 275 | #define __phys_to_kimg(x) ((unsigned long)((x) + kimage_voffset)) |
---|
.. | .. |
---|
308 | 305 | #define __pa_nodebug(x) __virt_to_phys_nodebug((unsigned long)(x)) |
---|
309 | 306 | #define __va(x) ((void *)__phys_to_virt((phys_addr_t)(x))) |
---|
310 | 307 | #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) |
---|
311 | | -#define virt_to_pfn(x) __phys_to_pfn(__virt_to_phys((unsigned long)(x))) |
---|
312 | | -#define sym_to_pfn(x) __phys_to_pfn(__pa_symbol(x)) |
---|
| 308 | +#define virt_to_pfn(x) __phys_to_pfn(__virt_to_phys((unsigned long)(x))) |
---|
| 309 | +#define sym_to_pfn(x) __phys_to_pfn(__pa_symbol(x)) |
---|
313 | 310 | |
---|
314 | 311 | /* |
---|
315 | 312 | * With non-canonical CFI jump tables, the compiler replaces function |
---|
.. | .. |
---|
320 | 317 | * virtual address. Therefore, use inline assembly to ensure we are |
---|
321 | 318 | * always taking the address of the actual function. |
---|
322 | 319 | */ |
---|
323 | | -#define __pa_function(x) ({ \ |
---|
324 | | - unsigned long addr; \ |
---|
| 320 | +#define __va_function(x) ({ \ |
---|
| 321 | + void *addr; \ |
---|
325 | 322 | asm("adrp %0, " __stringify(x) "\n\t" \ |
---|
326 | 323 | "add %0, %0, :lo12:" __stringify(x) : "=r" (addr)); \ |
---|
327 | | - __pa_symbol(addr); \ |
---|
| 324 | + addr; \ |
---|
328 | 325 | }) |
---|
329 | 326 | |
---|
| 327 | +#define __pa_function(x) __pa_symbol(__va_function(x)) |
---|
| 328 | + |
---|
330 | 329 | /* |
---|
331 | | - * virt_to_page(k) convert a _valid_ virtual address to struct page * |
---|
332 | | - * virt_addr_valid(k) indicates whether a virtual address is valid |
---|
| 330 | + * virt_to_page(x) convert a _valid_ virtual address to struct page * |
---|
| 331 | + * virt_addr_valid(x) indicates whether a virtual address is valid |
---|
333 | 332 | */ |
---|
334 | 333 | #define ARCH_PFN_OFFSET ((unsigned long)PHYS_PFN_OFFSET) |
---|
335 | 334 | |
---|
336 | | -#ifndef CONFIG_SPARSEMEM_VMEMMAP |
---|
337 | | -#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) |
---|
338 | | -#define _virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) |
---|
| 335 | +#if !defined(CONFIG_SPARSEMEM_VMEMMAP) || defined(CONFIG_DEBUG_VIRTUAL) |
---|
| 336 | +#define page_to_virt(x) ({ \ |
---|
| 337 | + __typeof__(x) __page = x; \ |
---|
| 338 | + void *__addr = __va(page_to_phys(__page)); \ |
---|
| 339 | + (void *)__tag_set((const void *)__addr, page_kasan_tag(__page));\ |
---|
| 340 | +}) |
---|
| 341 | +#define virt_to_page(x) pfn_to_page(virt_to_pfn(x)) |
---|
339 | 342 | #else |
---|
340 | | -#define __virt_to_pgoff(kaddr) (((u64)(kaddr) & ~PAGE_OFFSET) / PAGE_SIZE * sizeof(struct page)) |
---|
341 | | -#define __page_to_voff(kaddr) (((u64)(kaddr) & ~VMEMMAP_START) * PAGE_SIZE / sizeof(struct page)) |
---|
342 | | - |
---|
343 | | -#define page_to_virt(page) ({ \ |
---|
344 | | - unsigned long __addr = \ |
---|
345 | | - ((__page_to_voff(page)) | PAGE_OFFSET); \ |
---|
346 | | - const void *__addr_tag = \ |
---|
347 | | - __tag_set((void *)__addr, page_kasan_tag(page)); \ |
---|
348 | | - ((void *)__addr_tag); \ |
---|
| 343 | +#define page_to_virt(x) ({ \ |
---|
| 344 | + __typeof__(x) __page = x; \ |
---|
| 345 | + u64 __idx = ((u64)__page - VMEMMAP_START) / sizeof(struct page);\ |
---|
| 346 | + u64 __addr = PAGE_OFFSET + (__idx * PAGE_SIZE); \ |
---|
| 347 | + (void *)__tag_set((const void *)__addr, page_kasan_tag(__page));\ |
---|
349 | 348 | }) |
---|
350 | 349 | |
---|
351 | | -#define virt_to_page(vaddr) ((struct page *)((__virt_to_pgoff(vaddr)) | VMEMMAP_START)) |
---|
| 350 | +#define virt_to_page(x) ({ \ |
---|
| 351 | + u64 __idx = (__tag_reset((u64)x) - PAGE_OFFSET) / PAGE_SIZE; \ |
---|
| 352 | + u64 __addr = VMEMMAP_START + (__idx * sizeof(struct page)); \ |
---|
| 353 | + (struct page *)__addr; \ |
---|
| 354 | +}) |
---|
| 355 | +#endif /* !CONFIG_SPARSEMEM_VMEMMAP || CONFIG_DEBUG_VIRTUAL */ |
---|
352 | 356 | |
---|
353 | | -#define _virt_addr_valid(kaddr) pfn_valid((((u64)(kaddr) & ~PAGE_OFFSET) \ |
---|
354 | | - + PHYS_OFFSET) >> PAGE_SHIFT) |
---|
355 | | -#endif |
---|
356 | | -#endif |
---|
| 357 | +#define virt_addr_valid(addr) ({ \ |
---|
| 358 | + __typeof__(addr) __addr = __tag_reset(addr); \ |
---|
| 359 | + __is_lm_address(__addr) && pfn_valid(virt_to_pfn(__addr)); \ |
---|
| 360 | +}) |
---|
357 | 361 | |
---|
358 | | -#define _virt_addr_is_linear(kaddr) \ |
---|
359 | | - (__tag_reset((u64)(kaddr)) >= PAGE_OFFSET) |
---|
360 | | -#define virt_addr_valid(kaddr) \ |
---|
361 | | - (_virt_addr_is_linear(kaddr) && _virt_addr_valid(kaddr)) |
---|
| 362 | +void dump_mem_limit(void); |
---|
| 363 | +#endif /* !ASSEMBLY */ |
---|
| 364 | + |
---|
| 365 | +/* |
---|
| 366 | + * Given that the GIC architecture permits ITS implementations that can only be |
---|
| 367 | + * configured with a LPI table address once, GICv3 systems with many CPUs may |
---|
| 368 | + * end up reserving a lot of different regions after a kexec for their LPI |
---|
| 369 | + * tables (one per CPU), as we are forced to reuse the same memory after kexec |
---|
| 370 | + * (and thus reserve it persistently with EFI beforehand) |
---|
| 371 | + */ |
---|
| 372 | +#if defined(CONFIG_EFI) && defined(CONFIG_ARM_GIC_V3_ITS) |
---|
| 373 | +# define INIT_MEMBLOCK_RESERVED_REGIONS (INIT_MEMBLOCK_REGIONS + NR_CPUS + 1) |
---|
| 374 | +#endif |
---|
362 | 375 | |
---|
363 | 376 | #include <asm-generic/memory_model.h> |
---|
364 | 377 | |
---|
365 | | -#endif |
---|
| 378 | +#endif /* __ASM_MEMORY_H */ |
---|