hc
2024-05-10 37f49e37ab4cb5d0bc4c60eb5c6d4dd57db767bb
kernel/arch/parisc/include/asm/pgalloc.h
....@@ -10,148 +10,65 @@
1010
1111 #include <asm/cache.h>
1212
13
-/* Allocate the top level pgd (page directory)
14
- *
15
- * Here (for 64 bit kernels) we implement a Hybrid L2/L3 scheme: we
16
- * allocate the first pmd adjacent to the pgd. This means that we can
17
- * subtract a constant offset to get to it. The pmd and pgd sizes are
18
- * arranged so that a single pmd covers 4GB (giving a full 64-bit
19
- * process access to 8TB) so our lookups are effectively L2 for the
20
- * first 4GB of the kernel (i.e. for all ILP32 processes and all the
21
- * kernel for machines with under 4GB of memory) */
13
+#define __HAVE_ARCH_PMD_ALLOC_ONE
14
+#define __HAVE_ARCH_PMD_FREE
15
+#define __HAVE_ARCH_PGD_FREE
16
+#include <asm-generic/pgalloc.h>
17
+
18
+/* Allocate the top level pgd (page directory) */
2219 static inline pgd_t *pgd_alloc(struct mm_struct *mm)
2320 {
24
- pgd_t *pgd = (pgd_t *)__get_free_pages(GFP_KERNEL,
25
- PGD_ALLOC_ORDER);
26
- pgd_t *actual_pgd = pgd;
21
+ pgd_t *pgd;
2722
28
- if (likely(pgd != NULL)) {
29
- memset(pgd, 0, PAGE_SIZE<<PGD_ALLOC_ORDER);
30
-#if CONFIG_PGTABLE_LEVELS == 3
31
- actual_pgd += PTRS_PER_PGD;
32
- /* Populate first pmd with allocated memory. We mark it
33
- * with PxD_FLAG_ATTACHED as a signal to the system that this
34
- * pmd entry may not be cleared. */
35
- __pgd_val_set(*actual_pgd, (PxD_FLAG_PRESENT |
36
- PxD_FLAG_VALID |
37
- PxD_FLAG_ATTACHED)
38
- + (__u32)(__pa((unsigned long)pgd) >> PxD_VALUE_SHIFT));
39
- /* The first pmd entry also is marked with PxD_FLAG_ATTACHED as
40
- * a signal that this pmd may not be freed */
41
- __pgd_val_set(*pgd, PxD_FLAG_ATTACHED);
42
-#endif
43
- }
44
- return actual_pgd;
23
+ pgd = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ORDER);
24
+ if (unlikely(pgd == NULL))
25
+ return NULL;
26
+
27
+ memset(pgd, 0, PAGE_SIZE << PGD_ORDER);
28
+
29
+ return pgd;
4530 }
4631
4732 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
4833 {
49
-#if CONFIG_PGTABLE_LEVELS == 3
50
- pgd -= PTRS_PER_PGD;
51
-#endif
52
- free_pages((unsigned long)pgd, PGD_ALLOC_ORDER);
34
+ free_pages((unsigned long)pgd, PGD_ORDER);
5335 }
5436
5537 #if CONFIG_PGTABLE_LEVELS == 3
5638
5739 /* Three Level Page Table Support for pmd's */
5840
59
-static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
41
+static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6042 {
61
- __pgd_val_set(*pgd, (PxD_FLAG_PRESENT | PxD_FLAG_VALID) +
62
- (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT));
43
+ set_pud(pud, __pud((PxD_FLAG_PRESENT | PxD_FLAG_VALID) +
44
+ (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT)));
6345 }
6446
6547 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
6648 {
67
- pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL, PMD_ORDER);
68
- if (pmd)
69
- memset(pmd, 0, PAGE_SIZE<<PMD_ORDER);
49
+ pmd_t *pmd;
50
+
51
+ pmd = (pmd_t *)__get_free_pages(GFP_PGTABLE_KERNEL, PMD_ORDER);
52
+ if (likely(pmd))
53
+ memset ((void *)pmd, 0, PAGE_SIZE << PMD_ORDER);
7054 return pmd;
7155 }
7256
7357 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
7458 {
75
- if (pmd_flag(*pmd) & PxD_FLAG_ATTACHED) {
76
- /*
77
- * This is the permanent pmd attached to the pgd;
78
- * cannot free it.
79
- * Increment the counter to compensate for the decrement
80
- * done by generic mm code.
81
- */
82
- mm_inc_nr_pmds(mm);
83
- return;
84
- }
8559 free_pages((unsigned long)pmd, PMD_ORDER);
8660 }
87
-
88
-#else
89
-
90
-/* Two Level Page Table Support for pmd's */
91
-
92
-/*
93
- * allocating and freeing a pmd is trivial: the 1-entry pmd is
94
- * inside the pgd, so has no extra memory associated with it.
95
- */
96
-
97
-#define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
98
-#define pmd_free(mm, x) do { } while (0)
99
-#define pgd_populate(mm, pmd, pte) BUG()
100
-
10161 #endif
10262
10363 static inline void
10464 pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
10565 {
106
-#if CONFIG_PGTABLE_LEVELS == 3
107
- /* preserve the gateway marker if this is the beginning of
108
- * the permanent pmd */
109
- if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED)
110
- __pmd_val_set(*pmd, (PxD_FLAG_PRESENT |
111
- PxD_FLAG_VALID |
112
- PxD_FLAG_ATTACHED)
113
- + (__u32)(__pa((unsigned long)pte) >> PxD_VALUE_SHIFT));
114
- else
115
-#endif
116
- __pmd_val_set(*pmd, (PxD_FLAG_PRESENT | PxD_FLAG_VALID)
117
- + (__u32)(__pa((unsigned long)pte) >> PxD_VALUE_SHIFT));
66
+ set_pmd(pmd, __pmd((PxD_FLAG_PRESENT | PxD_FLAG_VALID)
67
+ + (__u32)(__pa((unsigned long)pte) >> PxD_VALUE_SHIFT)));
11868 }
11969
12070 #define pmd_populate(mm, pmd, pte_page) \
12171 pmd_populate_kernel(mm, pmd, page_address(pte_page))
12272 #define pmd_pgtable(pmd) pmd_page(pmd)
123
-
124
-static inline pgtable_t
125
-pte_alloc_one(struct mm_struct *mm, unsigned long address)
126
-{
127
- struct page *page = alloc_page(GFP_KERNEL|__GFP_ZERO);
128
- if (!page)
129
- return NULL;
130
- if (!pgtable_page_ctor(page)) {
131
- __free_page(page);
132
- return NULL;
133
- }
134
- return page;
135
-}
136
-
137
-static inline pte_t *
138
-pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr)
139
-{
140
- pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
141
- return pte;
142
-}
143
-
144
-static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
145
-{
146
- free_page((unsigned long)pte);
147
-}
148
-
149
-static inline void pte_free(struct mm_struct *mm, struct page *pte)
150
-{
151
- pgtable_page_dtor(pte);
152
- pte_free_kernel(mm, page_address(pte));
153
-}
154
-
155
-#define check_pgt_cache() do { } while (0)
15673
15774 #endif