hc
2024-05-10 23fa18eaa71266feff7ba8d83022d9e1cc83c65a
kernel/arch/powerpc/include/asm/page.h
....@@ -1,13 +1,9 @@
1
+/* SPDX-License-Identifier: GPL-2.0-or-later */
12 #ifndef _ASM_POWERPC_PAGE_H
23 #define _ASM_POWERPC_PAGE_H
34
45 /*
56 * Copyright (C) 2001,2005 IBM Corporation.
6
- *
7
- * This program is free software; you can redistribute it and/or
8
- * modify it under the terms of the GNU General Public License
9
- * as published by the Free Software Foundation; either version
10
- * 2 of the License, or (at your option) any later version.
117 */
128
139 #ifndef __ASSEMBLY__
....@@ -20,28 +16,23 @@
2016
2117 /*
2218 * On regular PPC32 page size is 4K (but we support 4K/16K/64K/256K pages
23
- * on PPC44x). For PPC64 we support either 4K or 64K software
19
+ * on PPC44x and 4K/16K on 8xx). For PPC64 we support either 4K or 64K software
2420 * page size. When using 64K pages however, whether we are really supporting
2521 * 64K pages in HW or not is irrelevant to those definitions.
2622 */
27
-#if defined(CONFIG_PPC_256K_PAGES)
28
-#define PAGE_SHIFT 18
29
-#elif defined(CONFIG_PPC_64K_PAGES)
30
-#define PAGE_SHIFT 16
31
-#elif defined(CONFIG_PPC_16K_PAGES)
32
-#define PAGE_SHIFT 14
33
-#else
34
-#define PAGE_SHIFT 12
35
-#endif
36
-
23
+#define PAGE_SHIFT CONFIG_PPC_PAGE_SHIFT
3724 #define PAGE_SIZE (ASM_CONST(1) << PAGE_SHIFT)
3825
3926 #ifndef __ASSEMBLY__
40
-#ifdef CONFIG_HUGETLB_PAGE
41
-extern bool hugetlb_disabled;
42
-extern unsigned int HPAGE_SHIFT;
43
-#else
27
+#ifndef CONFIG_HUGETLB_PAGE
4428 #define HPAGE_SHIFT PAGE_SHIFT
29
+#elif defined(CONFIG_PPC_BOOK3S_64)
30
+extern unsigned int hpage_shift;
31
+#define HPAGE_SHIFT hpage_shift
32
+#elif defined(CONFIG_PPC_8xx)
33
+#define HPAGE_SHIFT 19 /* 512k pages */
34
+#elif defined(CONFIG_PPC_FSL_BOOK3E)
35
+#define HPAGE_SHIFT 22 /* 4M pages */
4536 #endif
4637 #define HPAGE_SIZE ((1UL) << HPAGE_SHIFT)
4738 #define HPAGE_MASK (~(HPAGE_SIZE - 1))
....@@ -141,18 +132,11 @@
141132 #define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr))
142133 #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
143134
144
-#ifdef CONFIG_PPC_BOOK3S_64
145
-/*
146
- * On hash the vmalloc and other regions alias to the kernel region when passed
147
- * through __pa(), which virt_to_pfn() uses. That means virt_addr_valid() can
148
- * return true for some vmalloc addresses, which is incorrect. So explicitly
149
- * check that the address is in the kernel region.
150
- */
151
-#define virt_addr_valid(kaddr) (REGION_ID(kaddr) == KERNEL_REGION_ID && \
152
- pfn_valid(virt_to_pfn(kaddr)))
153
-#else
154
-#define virt_addr_valid(kaddr) pfn_valid(virt_to_pfn(kaddr))
155
-#endif
135
+#define virt_addr_valid(vaddr) ({ \
136
+ unsigned long _addr = (unsigned long)vaddr; \
137
+ _addr >= PAGE_OFFSET && _addr < (unsigned long)high_memory && \
138
+ pfn_valid(virt_to_pfn(_addr)); \
139
+})
156140
157141 /*
158142 * On Book-E parts we need __va to parse the device tree and we can't
....@@ -229,15 +213,28 @@
229213 */
230214 #if defined(CONFIG_PPC32) && defined(CONFIG_BOOKE)
231215 #define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + VIRT_PHYS_OFFSET))
232
-#define __pa(x) ((unsigned long)(x) - VIRT_PHYS_OFFSET)
216
+#define __pa(x) ((phys_addr_t)(unsigned long)(x) - VIRT_PHYS_OFFSET)
233217 #else
234218 #ifdef CONFIG_PPC64
219
+
220
+#define VIRTUAL_WARN_ON(x) WARN_ON(IS_ENABLED(CONFIG_DEBUG_VIRTUAL) && (x))
221
+
235222 /*
236223 * gcc miscompiles (unsigned long)(&static_var) - PAGE_OFFSET
237224 * with -mcmodel=medium, so we use & and | instead of - and + on 64-bit.
225
+ * This also results in better code generation.
238226 */
239
-#define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) | PAGE_OFFSET))
240
-#define __pa(x) ((unsigned long)(x) & 0x0fffffffffffffffUL)
227
+#define __va(x) \
228
+({ \
229
+ VIRTUAL_WARN_ON((unsigned long)(x) >= PAGE_OFFSET); \
230
+ (void *)(unsigned long)((phys_addr_t)(x) | PAGE_OFFSET); \
231
+})
232
+
233
+#define __pa(x) \
234
+({ \
235
+ VIRTUAL_WARN_ON((unsigned long)(x) < PAGE_OFFSET); \
236
+ (unsigned long)(x) & 0x0fffffffffffffffUL; \
237
+})
241238
242239 #else /* 32-bit, non book E */
243240 #define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + PAGE_OFFSET - MEMORY_START))
....@@ -250,13 +247,8 @@
250247 * and needs to be executable. This means the whole heap ends
251248 * up being executable.
252249 */
253
-#define VM_DATA_DEFAULT_FLAGS32 \
254
- (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
255
- VM_READ | VM_WRITE | \
256
- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
257
-
258
-#define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
259
- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
250
+#define VM_DATA_DEFAULT_FLAGS32 VM_DATA_FLAGS_TSK_EXEC
251
+#define VM_DATA_DEFAULT_FLAGS64 VM_DATA_FLAGS_NON_EXEC
260252
261253 #ifdef __powerpc64__
262254 #include <asm/page_64.h>
....@@ -264,21 +256,16 @@
264256 #include <asm/page_32.h>
265257 #endif
266258
267
-/* align addr on a size boundary - adjust address up/down if needed */
268
-#define _ALIGN_UP(addr, size) __ALIGN_KERNEL(addr, size)
269
-#define _ALIGN_DOWN(addr, size) ((addr)&(~((typeof(addr))(size)-1)))
270
-
271
-/* align addr on a size boundary - adjust address up if needed */
272
-#define _ALIGN(addr,size) _ALIGN_UP(addr,size)
273
-
274259 /*
275260 * Don't compare things with KERNELBASE or PAGE_OFFSET to test for
276261 * "kernelness", use is_kernel_addr() - it should do what you want.
277262 */
278263 #ifdef CONFIG_PPC_BOOK3E_64
279264 #define is_kernel_addr(x) ((x) >= 0x8000000000000000ul)
280
-#else
265
+#elif defined(CONFIG_PPC_BOOK3S_64)
281266 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
267
+#else
268
+#define is_kernel_addr(x) ((x) >= TASK_SIZE)
282269 #endif
283270
284271 #ifndef CONFIG_PPC_BOOK3S_64
....@@ -289,7 +276,7 @@
289276 * page tables at arbitrary addresses, this breaks and will have to change.
290277 */
291278 #ifdef CONFIG_PPC64
292
-#define PD_HUGE 0x8000000000000000
279
+#define PD_HUGE 0x8000000000000000UL
293280 #else
294281 #define PD_HUGE 0x80000000
295282 #endif
....@@ -305,8 +292,13 @@
305292 /*
306293 * Some number of bits at the level of the page table that points to
307294 * a hugepte are used to encode the size. This masks those bits.
295
+ * On 8xx, HW assistance requires 4k alignment for the hugepte.
308296 */
297
+#ifdef CONFIG_PPC_8xx
298
+#define HUGEPD_SHIFT_MASK 0xfff
299
+#else
309300 #define HUGEPD_SHIFT_MASK 0x3f
301
+#endif
310302
311303 #ifndef __ASSEMBLY__
312304
....@@ -326,7 +318,6 @@
326318 extern void clear_user_page(void *page, unsigned long vaddr, struct page *pg);
327319 extern void copy_user_page(void *to, void *from, unsigned long vaddr,
328320 struct page *p);
329
-extern int page_is_ram(unsigned long pfn);
330321 extern int devmem_is_allowed(unsigned long pfn);
331322
332323 #ifdef CONFIG_PPC_SMLPAR
....@@ -335,20 +326,13 @@
335326 #endif
336327
337328 struct vm_area_struct;
338
-#ifdef CONFIG_PPC_BOOK3S_64
339
-/*
340
- * For BOOK3s 64 with 4k and 64K linux page size
341
- * we want to use pointers, because the page table
342
- * actually store pfn
343
- */
344
-typedef pte_t *pgtable_t;
345
-#else
346
-#if defined(CONFIG_PPC_64K_PAGES) && defined(CONFIG_PPC64)
347
-typedef pte_t *pgtable_t;
348
-#else
349
-typedef struct page *pgtable_t;
350
-#endif
351
-#endif
329
+
330
+extern unsigned long kernstart_virt_addr;
331
+
332
+static inline unsigned long kaslr_offset(void)
333
+{
334
+ return kernstart_virt_addr - KERNELBASE;
335
+}
352336
353337 #include <asm-generic/memory_model.h>
354338 #endif /* __ASSEMBLY__ */