From 95099d4622f8cb224d94e314c7a8e0df60b13f87 Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Sat, 09 Dec 2023 08:38:01 +0000
Subject: [PATCH] enable docker ppp
---
kernel/arch/arm64/include/asm/memory.h | 229 ++++++++++++++++++++++++++++++---------------------------
1 files changed, 121 insertions(+), 108 deletions(-)
diff --git a/kernel/arch/arm64/include/asm/memory.h b/kernel/arch/arm64/include/asm/memory.h
index 3e0f9e3..a1f0e75 100644
--- a/kernel/arch/arm64/include/asm/memory.h
+++ b/kernel/arch/arm64/include/asm/memory.h
@@ -1,32 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Based on arch/arm/include/asm/memory.h
*
* Copyright (C) 2000-2002 Russell King
* Copyright (C) 2012 ARM Ltd.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
* Note: this file should not be included by non-asm/.h files
*/
#ifndef __ASM_MEMORY_H
#define __ASM_MEMORY_H
-#include <linux/compiler.h>
#include <linux/const.h>
-#include <linux/types.h>
-#include <asm/bug.h>
+#include <linux/sizes.h>
#include <asm/page-def.h>
-#include <asm/sizes.h>
/*
* Size of the PCI I/O space. This must remain a power of two so that
@@ -35,56 +21,63 @@
#define PCI_IO_SIZE SZ_16M
/*
- * Log2 of the upper bound of the size of a struct page. Used for sizing
- * the vmemmap region only, does not affect actual memory footprint.
- * We don't use sizeof(struct page) directly since taking its size here
- * requires its definition to be available at this point in the inclusion
- * chain, and it may not be a power of 2 in the first place.
- */
-#define STRUCT_PAGE_MAX_SHIFT 6
-
-/*
* VMEMMAP_SIZE - allows the whole linear region to be covered by
* a struct page array
+ *
+ * If we are configured with a 52-bit kernel VA then our VMEMMAP_SIZE
+ * needs to cover the memory region from the beginning of the 52-bit
+ * PAGE_OFFSET all the way to PAGE_END for 48-bit. This allows us to
+ * keep a constant PAGE_OFFSET and "fallback" to using the higher end
+ * of the VMEMMAP where 52-bit support is not available in hardware.
*/
-#define VMEMMAP_SIZE (UL(1) << (VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT))
+#define VMEMMAP_SIZE ((_PAGE_END(VA_BITS_MIN) - PAGE_OFFSET) \
+ >> (PAGE_SHIFT - STRUCT_PAGE_MAX_SHIFT))
/*
- * PAGE_OFFSET - the virtual address of the start of the linear map (top
- * (VA_BITS - 1))
- * KIMAGE_VADDR - the virtual address of the start of the kernel image
+ * PAGE_OFFSET - the virtual address of the start of the linear map, at the
+ * start of the TTBR1 address space.
+ * PAGE_END - the end of the linear map, where all other kernel mappings begin.
+ * KIMAGE_VADDR - the virtual address of the start of the kernel image.
* VA_BITS - the maximum number of bits for virtual addresses.
- * VA_START - the first kernel virtual address.
*/
#define VA_BITS (CONFIG_ARM64_VA_BITS)
-#define VA_START (UL(0xffffffffffffffff) - \
- (UL(1) << VA_BITS) + 1)
-#define PAGE_OFFSET (UL(0xffffffffffffffff) - \
- (UL(1) << (VA_BITS - 1)) + 1)
+#define _PAGE_OFFSET(va) (-(UL(1) << (va)))
+#define PAGE_OFFSET (_PAGE_OFFSET(VA_BITS))
#define KIMAGE_VADDR (MODULES_END)
#define MODULES_END (MODULES_VADDR + MODULES_VSIZE)
-#define MODULES_VADDR (VA_START + KASAN_SHADOW_SIZE)
+#define MODULES_VADDR (KASAN_SHADOW_END)
#define MODULES_VSIZE (SZ_128M)
-#define VMEMMAP_START (PAGE_OFFSET - VMEMMAP_SIZE)
+#define VMEMMAP_START (-VMEMMAP_SIZE - SZ_2M)
+#define VMEMMAP_END (VMEMMAP_START + VMEMMAP_SIZE)
#define PCI_IO_END (VMEMMAP_START - SZ_2M)
#define PCI_IO_START (PCI_IO_END - PCI_IO_SIZE)
#define FIXADDR_TOP (PCI_IO_START - SZ_2M)
-#define KERNEL_START _text
-#define KERNEL_END _end
+#if VA_BITS > 48
+#define VA_BITS_MIN (48)
+#else
+#define VA_BITS_MIN (VA_BITS)
+#endif
+
+#define _PAGE_END(va) (-(UL(1) << ((va) - 1)))
+
+#define KERNEL_START _text
+#define KERNEL_END _end
/*
* Generic and tag-based KASAN require 1/8th and 1/16th of the kernel virtual
* address space for the shadow region respectively. They can bloat the stack
* significantly, so double the (minimum) stack size when they are in use.
*/
-#ifdef CONFIG_KASAN
-#define KASAN_SHADOW_SIZE (UL(1) << (VA_BITS - KASAN_SHADOW_SCALE_SHIFT))
+#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
+#define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL)
+#define KASAN_SHADOW_END ((UL(1) << (64 - KASAN_SHADOW_SCALE_SHIFT)) \
+ + KASAN_SHADOW_OFFSET)
#define KASAN_THREAD_SHIFT 1
#else
-#define KASAN_SHADOW_SIZE (0)
#define KASAN_THREAD_SHIFT 0
-#endif
+#define KASAN_SHADOW_END (_PAGE_END(VA_BITS_MIN))
+#endif /* CONFIG_KASAN */
#define MIN_THREAD_SHIFT (14 + KASAN_THREAD_SHIFT)
@@ -121,32 +114,28 @@
/*
* Alignment of kernel segments (e.g. .text, .data).
- */
-#if defined(CONFIG_DEBUG_ALIGN_RODATA)
-/*
- * 4 KB granule: 1 level 2 entry
- * 16 KB granule: 128 level 3 entries, with contiguous bit
- * 64 KB granule: 32 level 3 entries, with contiguous bit
- */
-#define SEGMENT_ALIGN SZ_2M
-#else
-/*
+ *
* 4 KB granule: 16 level 3 entries, with contiguous bit
* 16 KB granule: 4 level 3 entries, without contiguous bit
* 64 KB granule: 1 level 3 entry
*/
-#define SEGMENT_ALIGN SZ_64K
-#endif
+#define SEGMENT_ALIGN SZ_64K
/*
* Memory types available.
+ *
+ * IMPORTANT: MT_NORMAL must be index 0 since vm_get_page_prot() may 'or' in
+ * the MT_NORMAL_TAGGED memory type for PROT_MTE mappings. Note
+ * that protection_map[] only contains MT_NORMAL attributes.
*/
-#define MT_DEVICE_nGnRnE 0
-#define MT_DEVICE_nGnRE 1
-#define MT_DEVICE_GRE 2
-#define MT_NORMAL_NC 3
-#define MT_NORMAL 4
-#define MT_NORMAL_WT 5
+#define MT_NORMAL 0
+#define MT_NORMAL_TAGGED 1
+#define MT_NORMAL_NC 2
+#define MT_NORMAL_WT 3
+#define MT_DEVICE_nGnRnE 4
+#define MT_DEVICE_nGnRE 5
+#define MT_DEVICE_GRE 6
+#define MT_NORMAL_iNC_oWB 7
/*
* Memory types for Stage-2 translation
@@ -167,24 +156,22 @@
#define IOREMAP_MAX_ORDER (PMD_SHIFT)
#endif
-#ifdef CONFIG_BLK_DEV_INITRD
-#define __early_init_dt_declare_initrd(__start, __end) \
- do { \
- initrd_start = (__start); \
- initrd_end = (__end); \
- } while (0)
-#endif
-
#ifndef __ASSEMBLY__
#include <linux/bitops.h>
+#include <linux/compiler.h>
#include <linux/mmdebug.h>
+#include <linux/types.h>
+#include <asm/bug.h>
+
+extern u64 vabits_actual;
+#define PAGE_END (_PAGE_END(vabits_actual))
extern s64 memstart_addr;
/* PHYS_OFFSET - the physical address of the start of memory. */
#define PHYS_OFFSET ({ VM_BUG_ON(memstart_addr & 1); memstart_addr; })
-/* the virtual base of the kernel image (minus TEXT_OFFSET) */
+/* the virtual base of the kernel image */
extern u64 kimage_vaddr;
/* the offset between the kernel virtual and physical mappings */
@@ -225,7 +212,7 @@
(__force __typeof__(addr))__addr; \
})
-#ifdef CONFIG_KASAN_SW_TAGS
+#if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
#define __tag_shifted(tag) ((u64)(tag) << 56)
#define __tag_reset(addr) __untagged_addr(addr)
#define __tag_get(addr) (__u8)((u64)(addr) >> 56)
@@ -233,13 +220,24 @@
#define __tag_shifted(tag) 0UL
#define __tag_reset(addr) (addr)
#define __tag_get(addr) 0
-#endif
+#endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
static inline const void *__tag_set(const void *addr, u8 tag)
{
u64 __addr = (u64)addr & ~__tag_shifted(0xff);
return (const void *)(__addr | __tag_shifted(tag));
}
+
+#ifdef CONFIG_KASAN_HW_TAGS
+#define arch_enable_tagging_sync() mte_enable_kernel_sync()
+#define arch_enable_tagging_async() mte_enable_kernel_async()
+#define arch_set_tagging_report_once(state) mte_set_report_once(state)
+#define arch_force_async_tag_fault() mte_check_tfsr_exit()
+#define arch_get_random_tag() mte_get_random_tag()
+#define arch_get_mem_tag(addr) mte_get_mem_tag(addr)
+#define arch_set_mem_tag_range(addr, size, tag, init) \
+ mte_set_mem_tag_range((addr), (size), (tag), (init))
+#endif /* CONFIG_KASAN_HW_TAGS */
/*
* Physical vs virtual RAM address space conversion. These are
@@ -249,19 +247,18 @@
/*
- * The linear kernel range starts in the middle of the virtual adddress
- * space. Testing the top bit for the start of the region is a
- * sufficient check.
+ * Check whether an arbitrary address is within the linear map, which
+ * lives in the [PAGE_OFFSET, PAGE_END) interval at the bottom of the
+ * kernel's TTBR1 address range.
*/
-#define __is_lm_address(addr) (!!((addr) & BIT(VA_BITS - 1)))
+#define __is_lm_address(addr) (((u64)(addr) ^ PAGE_OFFSET) < (PAGE_END - PAGE_OFFSET))
#define __lm_to_phys(addr) (((addr) & ~PAGE_OFFSET) + PHYS_OFFSET)
#define __kimg_to_phys(addr) ((addr) - kimage_voffset)
#define __virt_to_phys_nodebug(x) ({ \
- phys_addr_t __x = (phys_addr_t)(x); \
- __is_lm_address(__x) ? __lm_to_phys(__x) : \
- __kimg_to_phys(__x); \
+ phys_addr_t __x = (phys_addr_t)(__tag_reset(x)); \
+ __is_lm_address(__x) ? __lm_to_phys(__x) : __kimg_to_phys(__x); \
})
#define __pa_symbol_nodebug(x) __kimg_to_phys((phys_addr_t)(x))
@@ -272,7 +269,7 @@
#else
#define __virt_to_phys(x) __virt_to_phys_nodebug(x)
#define __phys_addr_symbol(x) __pa_symbol_nodebug(x)
-#endif
+#endif /* CONFIG_DEBUG_VIRTUAL */
#define __phys_to_virt(x) ((unsigned long)((x) - PHYS_OFFSET) | PAGE_OFFSET)
#define __phys_to_kimg(x) ((unsigned long)((x) + kimage_voffset))
@@ -308,8 +305,8 @@
#define __pa_nodebug(x) __virt_to_phys_nodebug((unsigned long)(x))
#define __va(x) ((void *)__phys_to_virt((phys_addr_t)(x)))
#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
-#define virt_to_pfn(x) __phys_to_pfn(__virt_to_phys((unsigned long)(x)))
-#define sym_to_pfn(x) __phys_to_pfn(__pa_symbol(x))
+#define virt_to_pfn(x) __phys_to_pfn(__virt_to_phys((unsigned long)(x)))
+#define sym_to_pfn(x) __phys_to_pfn(__pa_symbol(x))
/*
* With non-canonical CFI jump tables, the compiler replaces function
@@ -320,46 +317,62 @@
* virtual address. Therefore, use inline assembly to ensure we are
* always taking the address of the actual function.
*/
-#define __pa_function(x) ({ \
- unsigned long addr; \
+#define __va_function(x) ({ \
+ void *addr; \
asm("adrp %0, " __stringify(x) "\n\t" \
"add %0, %0, :lo12:" __stringify(x) : "=r" (addr)); \
- __pa_symbol(addr); \
+ addr; \
})
+#define __pa_function(x) __pa_symbol(__va_function(x))
+
/*
- * virt_to_page(k) convert a _valid_ virtual address to struct page *
- * virt_addr_valid(k) indicates whether a virtual address is valid
+ * virt_to_page(x) convert a _valid_ virtual address to struct page *
+ * virt_addr_valid(x) indicates whether a virtual address is valid
*/
#define ARCH_PFN_OFFSET ((unsigned long)PHYS_PFN_OFFSET)
-#ifndef CONFIG_SPARSEMEM_VMEMMAP
-#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
-#define _virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
+#if !defined(CONFIG_SPARSEMEM_VMEMMAP) || defined(CONFIG_DEBUG_VIRTUAL)
+#define page_to_virt(x) ({ \
+ __typeof__(x) __page = x; \
+ void *__addr = __va(page_to_phys(__page)); \
+ (void *)__tag_set((const void *)__addr, page_kasan_tag(__page));\
+})
+#define virt_to_page(x) pfn_to_page(virt_to_pfn(x))
#else
-#define __virt_to_pgoff(kaddr) (((u64)(kaddr) & ~PAGE_OFFSET) / PAGE_SIZE * sizeof(struct page))
-#define __page_to_voff(kaddr) (((u64)(kaddr) & ~VMEMMAP_START) * PAGE_SIZE / sizeof(struct page))
-
-#define page_to_virt(page) ({ \
- unsigned long __addr = \
- ((__page_to_voff(page)) | PAGE_OFFSET); \
- const void *__addr_tag = \
- __tag_set((void *)__addr, page_kasan_tag(page)); \
- ((void *)__addr_tag); \
+#define page_to_virt(x) ({ \
+ __typeof__(x) __page = x; \
+ u64 __idx = ((u64)__page - VMEMMAP_START) / sizeof(struct page);\
+ u64 __addr = PAGE_OFFSET + (__idx * PAGE_SIZE); \
+ (void *)__tag_set((const void *)__addr, page_kasan_tag(__page));\
})
-#define virt_to_page(vaddr) ((struct page *)((__virt_to_pgoff(vaddr)) | VMEMMAP_START))
+#define virt_to_page(x) ({ \
+ u64 __idx = (__tag_reset((u64)x) - PAGE_OFFSET) / PAGE_SIZE; \
+ u64 __addr = VMEMMAP_START + (__idx * sizeof(struct page)); \
+ (struct page *)__addr; \
+})
+#endif /* !CONFIG_SPARSEMEM_VMEMMAP || CONFIG_DEBUG_VIRTUAL */
-#define _virt_addr_valid(kaddr) pfn_valid((((u64)(kaddr) & ~PAGE_OFFSET) \
- + PHYS_OFFSET) >> PAGE_SHIFT)
-#endif
-#endif
+#define virt_addr_valid(addr) ({ \
+ __typeof__(addr) __addr = __tag_reset(addr); \
+ __is_lm_address(__addr) && pfn_valid(virt_to_pfn(__addr)); \
+})
-#define _virt_addr_is_linear(kaddr) \
- (__tag_reset((u64)(kaddr)) >= PAGE_OFFSET)
-#define virt_addr_valid(kaddr) \
- (_virt_addr_is_linear(kaddr) && _virt_addr_valid(kaddr))
+void dump_mem_limit(void);
+#endif /* !ASSEMBLY */
+
+/*
+ * Given that the GIC architecture permits ITS implementations that can only be
+ * configured with a LPI table address once, GICv3 systems with many CPUs may
+ * end up reserving a lot of different regions after a kexec for their LPI
+ * tables (one per CPU), as we are forced to reuse the same memory after kexec
+ * (and thus reserve it persistently with EFI beforehand)
+ */
+#if defined(CONFIG_EFI) && defined(CONFIG_ARM_GIC_V3_ITS)
+# define INIT_MEMBLOCK_RESERVED_REGIONS (INIT_MEMBLOCK_REGIONS + NR_CPUS + 1)
+#endif
#include <asm-generic/memory_model.h>
-#endif
+#endif /* __ASM_MEMORY_H */
--
Gitblit v1.6.2