hc
2023-12-08 01573e231f18eb2d99162747186f59511f56b64d
kernel/arch/powerpc/include/asm/book3s/64/pgalloc.h
....@@ -1,10 +1,7 @@
1
+/* SPDX-License-Identifier: GPL-2.0-or-later */
12 #ifndef _ASM_POWERPC_BOOK3S_64_PGALLOC_H
23 #define _ASM_POWERPC_BOOK3S_64_PGALLOC_H
34 /*
4
- * This program is free software; you can redistribute it and/or
5
- * modify it under the terms of the GNU General Public License
6
- * as published by the Free Software Foundation; either version
7
- * 2 of the License, or (at your option) any later version.
85 */
96
107 #include <linux/slab.h>
....@@ -19,37 +16,11 @@
1916 };
2017 extern struct vmemmap_backing *vmemmap_list;
2118
22
-/*
23
- * Functions that deal with pagetables that could be at any level of
24
- * the table need to be passed an "index_size" so they know how to
25
- * handle allocation. For PTE pages (which are linked to a struct
26
- * page for now, and drawn from the main get_free_pages() pool), the
27
- * allocation size will be (2^index_size * sizeof(pointer)) and
28
- * allocations are drawn from the kmem_cache in PGT_CACHE(index_size).
29
- *
30
- * The maximum index size needs to be big enough to allow any
31
- * pagetable sizes we need, but small enough to fit in the low bits of
32
- * any page table pointer. In other words all pagetables, even tiny
33
- * ones, must be aligned to allow at least enough low 0 bits to
34
- * contain this value. This value is also used as a mask, so it must
35
- * be one less than a power of two.
36
- */
37
-#define MAX_PGTABLE_INDEX_SIZE 0xf
38
-
39
-extern struct kmem_cache *pgtable_cache[];
40
-#define PGT_CACHE(shift) ({ \
41
- BUG_ON(!(shift)); \
42
- pgtable_cache[(shift) - 1]; \
43
- })
44
-
45
-extern pte_t *pte_fragment_alloc(struct mm_struct *, unsigned long, int);
4619 extern pmd_t *pmd_fragment_alloc(struct mm_struct *, unsigned long);
47
-extern void pte_fragment_free(unsigned long *, int);
4820 extern void pmd_fragment_free(unsigned long *);
4921 extern void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift);
50
-#ifdef CONFIG_SMP
5122 extern void __tlb_remove_table(void *_table);
52
-#endif
23
+void pte_frag_destroy(void *pte_frag);
5324
5425 static inline pgd_t *radix__pgd_alloc(struct mm_struct *mm)
5526 {
....@@ -114,9 +85,9 @@
11485 kmem_cache_free(PGT_CACHE(PGD_INDEX_SIZE), pgd);
11586 }
11687
117
-static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
88
+static inline void p4d_populate(struct mm_struct *mm, p4d_t *pgd, pud_t *pud)
11889 {
119
- pgd_set(pgd, __pgtable_ptr_val(pud) | PGD_VAL_BITS);
90
+ *pgd = __p4d(__pgtable_ptr_val(pud) | PGD_VAL_BITS);
12091 }
12192
12293 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
....@@ -136,24 +107,33 @@
136107 return pud;
137108 }
138109
110
+static inline void __pud_free(pud_t *pud)
111
+{
112
+ struct page *page = virt_to_page(pud);
113
+
114
+ /*
115
+ * Early pud pages allocated via memblock allocator
116
+ * can't be directly freed to slab
117
+ */
118
+ if (PageReserved(page))
119
+ free_reserved_page(page);
120
+ else
121
+ kmem_cache_free(PGT_CACHE(PUD_CACHE_INDEX), pud);
122
+}
123
+
139124 static inline void pud_free(struct mm_struct *mm, pud_t *pud)
140125 {
141
- kmem_cache_free(PGT_CACHE(PUD_CACHE_INDEX), pud);
126
+ return __pud_free(pud);
142127 }
143128
144129 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
145130 {
146
- pud_set(pud, __pgtable_ptr_val(pmd) | PUD_VAL_BITS);
131
+ *pud = __pud(__pgtable_ptr_val(pmd) | PUD_VAL_BITS);
147132 }
148133
149134 static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
150135 unsigned long address)
151136 {
152
- /*
153
- * By now all the pud entries should be none entries. So go
154
- * ahead and flush the page walk cache
155
- */
156
- flush_tlb_pgtable(tlb, address);
157137 pgtable_free_tlb(tlb, pud, PUD_INDEX);
158138 }
159139
....@@ -170,65 +150,26 @@
170150 static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
171151 unsigned long address)
172152 {
173
- /*
174
- * By now all the pud entries should be none entries. So go
175
- * ahead and flush the page walk cache
176
- */
177
- flush_tlb_pgtable(tlb, address);
178153 return pgtable_free_tlb(tlb, pmd, PMD_INDEX);
179154 }
180155
181156 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
182157 pte_t *pte)
183158 {
184
- pmd_set(pmd, __pgtable_ptr_val(pte) | PMD_VAL_BITS);
159
+ *pmd = __pmd(__pgtable_ptr_val(pte) | PMD_VAL_BITS);
185160 }
186161
187162 static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
188163 pgtable_t pte_page)
189164 {
190
- pmd_set(pmd, __pgtable_ptr_val(pte_page) | PMD_VAL_BITS);
191
-}
192
-
193
-static inline pgtable_t pmd_pgtable(pmd_t pmd)
194
-{
195
- return (pgtable_t)pmd_page_vaddr(pmd);
196
-}
197
-
198
-static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
199
- unsigned long address)
200
-{
201
- return (pte_t *)pte_fragment_alloc(mm, address, 1);
202
-}
203
-
204
-static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
205
- unsigned long address)
206
-{
207
- return (pgtable_t)pte_fragment_alloc(mm, address, 0);
208
-}
209
-
210
-static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
211
-{
212
- pte_fragment_free((unsigned long *)pte, 1);
213
-}
214
-
215
-static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
216
-{
217
- pte_fragment_free((unsigned long *)ptepage, 0);
165
+ *pmd = __pmd(__pgtable_ptr_val(pte_page) | PMD_VAL_BITS);
218166 }
219167
220168 static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
221169 unsigned long address)
222170 {
223
- /*
224
- * By now all the pud entries should be none entries. So go
225
- * ahead and flush the page walk cache
226
- */
227
- flush_tlb_pgtable(tlb, address);
228171 pgtable_free_tlb(tlb, table, PTE_INDEX);
229172 }
230
-
231
-#define check_pgt_cache() do { } while (0)
232173
233174 extern atomic_long_t direct_pages_count[MMU_PAGE_COUNT];
234175 static inline void update_page_count(int psize, long count)