hc
2024-12-19 9370bb92b2d16684ee45cf24e879c93c509162da
kernel/arch/microblaze/include/asm/pgtable.h
....@@ -1,11 +1,8 @@
1
+/* SPDX-License-Identifier: GPL-2.0 */
12 /*
23 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
34 * Copyright (C) 2008-2009 PetaLogix
45 * Copyright (C) 2006 Atmark Techno, Inc.
5
- *
6
- * This file is subject to the terms and conditions of the GNU General Public
7
- * License. See the file "COPYING" in the main directory of this archive
8
- * for more details.
96 */
107
118 #ifndef _ASM_MICROBLAZE_PGTABLE_H
....@@ -24,7 +21,6 @@
2421 #define pgd_bad(pgd) (0)
2522 #define pgd_clear(pgdp)
2623 #define kern_addr_valid(addr) (1)
27
-#define pmd_offset(a, b) ((void *) 0)
2824
2925 #define PAGE_NONE __pgprot(0) /* these mean nothing to non MMU */
3026 #define PAGE_SHARED __pgprot(0) /* these mean nothing to non MMU */
....@@ -46,8 +42,6 @@
4642
4743 #define swapper_pg_dir ((pgd_t *) NULL)
4844
49
-#define pgtable_cache_init() do {} while (0)
50
-
5145 #define arch_enter_lazy_cpu_mode() do {} while (0)
5246
5347 #define pgprot_noncached_wc(prot) prot
....@@ -61,9 +55,7 @@
6155
6256 #else /* CONFIG_MMU */
6357
64
-#include <asm-generic/4level-fixup.h>
65
-
66
-#define __PAGETABLE_PMD_FOLDED 1
58
+#include <asm-generic/pgtable-nopmd.h>
6759
6860 #ifdef __KERNEL__
6961 #ifndef __ASSEMBLY__
....@@ -83,10 +75,6 @@
8375 * The following only work if pte_present() is true.
8476 * Undefined behaviour if not..
8577 */
86
-
87
-static inline int pte_special(pte_t pte) { return 0; }
88
-
89
-static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
9078
9179 /* Start and end of the vmalloc area. */
9280 /* Make sure to map the vmalloc area above the pinned kernel memory area
....@@ -140,13 +128,8 @@
140128 *
141129 */
142130
143
-/* PMD_SHIFT determines the size of the area mapped by the PTE pages */
144
-#define PMD_SHIFT (PAGE_SHIFT + PTE_SHIFT)
145
-#define PMD_SIZE (1UL << PMD_SHIFT)
146
-#define PMD_MASK (~(PMD_SIZE-1))
147
-
148131 /* PGDIR_SHIFT determines what a top-level page table entry can map */
149
-#define PGDIR_SHIFT PMD_SHIFT
132
+#define PGDIR_SHIFT (PAGE_SHIFT + PTE_SHIFT)
150133 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
151134 #define PGDIR_MASK (~(PGDIR_SIZE-1))
152135
....@@ -167,9 +150,6 @@
167150 #define pte_ERROR(e) \
168151 printk(KERN_ERR "%s:%d: bad pte "PTE_FMT".\n", \
169152 __FILE__, __LINE__, pte_val(e))
170
-#define pmd_ERROR(e) \
171
- printk(KERN_ERR "%s:%d: bad pmd %08lx.\n", \
172
- __FILE__, __LINE__, pmd_val(e))
173153 #define pgd_ERROR(e) \
174154 printk(KERN_ERR "%s:%d: bad pgd %08lx.\n", \
175155 __FILE__, __LINE__, pgd_val(e))
....@@ -200,7 +180,7 @@
200180 * is cleared in the TLB miss handler before the TLB entry is loaded.
201181 * - All other bits of the PTE are loaded into TLBLO without
202182 * * modification, leaving us only the bits 20, 21, 24, 25, 26, 30 for
203
- * software PTE bits. We actually use use bits 21, 24, 25, and
183
+ * software PTE bits. We actually use bits 21, 24, 25, and
204184 * 30 respectively for the software bits: ACCESSED, DIRTY, RW, and
205185 * PRESENT.
206186 */
....@@ -315,18 +295,6 @@
315295 __pte(((pte_basic_t)(pfn) << PFN_SHIFT_OFFSET) | pgprot_val(prot))
316296
317297 #ifndef __ASSEMBLY__
318
-/*
319
- * The "pgd_xxx()" functions here are trivial for a folded two-level
320
- * setup: the pgd is never bad, and a pmd always exists (as it's folded
321
- * into the pgd entry)
322
- */
323
-static inline int pgd_none(pgd_t pgd) { return 0; }
324
-static inline int pgd_bad(pgd_t pgd) { return 0; }
325
-static inline int pgd_present(pgd_t pgd) { return 1; }
326
-#define pgd_clear(xp) do { } while (0)
327
-#define pgd_page(pgd) \
328
- ((unsigned long) __va(pgd_val(pgd) & PAGE_MASK))
329
-
330298 /*
331299 * The following only work if pte_present() is true.
332300 * Undefined behaviour if not..
....@@ -469,33 +437,15 @@
469437 /* Convert pmd entry to page */
470438 /* our pmd entry is an effective address of pte table*/
471439 /* returns effective address of the pmd entry*/
472
-#define pmd_page_kernel(pmd) ((unsigned long) (pmd_val(pmd) & PAGE_MASK))
440
+static inline unsigned long pmd_page_vaddr(pmd_t pmd)
441
+{
442
+ return ((unsigned long) (pmd_val(pmd) & PAGE_MASK));
443
+}
473444
474445 /* returns struct *page of the pmd entry*/
475446 #define pmd_page(pmd) (pfn_to_page(__pa(pmd_val(pmd)) >> PAGE_SHIFT))
476447
477
-/* to find an entry in a kernel page-table-directory */
478
-#define pgd_offset_k(address) pgd_offset(&init_mm, address)
479
-
480
-/* to find an entry in a page-table-directory */
481
-#define pgd_index(address) ((address) >> PGDIR_SHIFT)
482
-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
483
-
484
-/* Find an entry in the second-level page table.. */
485
-static inline pmd_t *pmd_offset(pgd_t *dir, unsigned long address)
486
-{
487
- return (pmd_t *) dir;
488
-}
489
-
490448 /* Find an entry in the third-level page table.. */
491
-#define pte_index(address) \
492
- (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
493
-#define pte_offset_kernel(dir, addr) \
494
- ((pte_t *) pmd_page_kernel(*(dir)) + pte_index(addr))
495
-#define pte_offset_map(dir, addr) \
496
- ((pte_t *) kmap_atomic(pmd_page(*(dir))) + pte_index(addr))
497
-
498
-#define pte_unmap(pte) kunmap_atomic(pte)
499449
500450 extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
501451
....@@ -526,11 +476,6 @@
526476 /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
527477 #define kern_addr_valid(addr) (1)
528478
529
-/*
530
- * No page table caches to initialise
531
- */
532
-#define pgtable_cache_init() do { } while (0)
533
-
534479 void do_page_fault(struct pt_regs *regs, unsigned long address,
535480 unsigned long error_code);
536481
....@@ -549,11 +494,7 @@
549494 #endif /* CONFIG_MMU */
550495
551496 #ifndef __ASSEMBLY__
552
-#include <asm-generic/pgtable.h>
553
-
554497 extern unsigned long ioremap_bot, ioremap_base;
555
-
556
-unsigned long consistent_virt_to_pfn(void *vaddr);
557498
558499 void setup_memory(void);
559500 #endif /* __ASSEMBLY__ */