.. | .. |
---|
| 1 | +/* SPDX-License-Identifier: GPL-2.0-or-later */ |
---|
1 | 2 | #ifndef _ASM_POWERPC_BOOK3S_64_PGALLOC_H |
---|
2 | 3 | #define _ASM_POWERPC_BOOK3S_64_PGALLOC_H |
---|
3 | 4 | /* |
---|
4 | | - * This program is free software; you can redistribute it and/or |
---|
5 | | - * modify it under the terms of the GNU General Public License |
---|
6 | | - * as published by the Free Software Foundation; either version |
---|
7 | | - * 2 of the License, or (at your option) any later version. |
---|
8 | 5 | */ |
---|
9 | 6 | |
---|
10 | 7 | #include <linux/slab.h> |
---|
.. | .. |
---|
19 | 16 | }; |
---|
20 | 17 | extern struct vmemmap_backing *vmemmap_list; |
---|
21 | 18 | |
---|
22 | | -/* |
---|
23 | | - * Functions that deal with pagetables that could be at any level of |
---|
24 | | - * the table need to be passed an "index_size" so they know how to |
---|
25 | | - * handle allocation. For PTE pages (which are linked to a struct |
---|
26 | | - * page for now, and drawn from the main get_free_pages() pool), the |
---|
27 | | - * allocation size will be (2^index_size * sizeof(pointer)) and |
---|
28 | | - * allocations are drawn from the kmem_cache in PGT_CACHE(index_size). |
---|
29 | | - * |
---|
30 | | - * The maximum index size needs to be big enough to allow any |
---|
31 | | - * pagetable sizes we need, but small enough to fit in the low bits of |
---|
32 | | - * any page table pointer. In other words all pagetables, even tiny |
---|
33 | | - * ones, must be aligned to allow at least enough low 0 bits to |
---|
34 | | - * contain this value. This value is also used as a mask, so it must |
---|
35 | | - * be one less than a power of two. |
---|
36 | | - */ |
---|
37 | | -#define MAX_PGTABLE_INDEX_SIZE 0xf |
---|
38 | | - |
---|
39 | | -extern struct kmem_cache *pgtable_cache[]; |
---|
40 | | -#define PGT_CACHE(shift) ({ \ |
---|
41 | | - BUG_ON(!(shift)); \ |
---|
42 | | - pgtable_cache[(shift) - 1]; \ |
---|
43 | | - }) |
---|
44 | | - |
---|
45 | | -extern pte_t *pte_fragment_alloc(struct mm_struct *, unsigned long, int); |
---|
46 | 19 | extern pmd_t *pmd_fragment_alloc(struct mm_struct *, unsigned long); |
---|
47 | | -extern void pte_fragment_free(unsigned long *, int); |
---|
48 | 20 | extern void pmd_fragment_free(unsigned long *); |
---|
49 | 21 | extern void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift); |
---|
50 | | -#ifdef CONFIG_SMP |
---|
51 | 22 | extern void __tlb_remove_table(void *_table); |
---|
52 | | -#endif |
---|
| 23 | +void pte_frag_destroy(void *pte_frag); |
---|
53 | 24 | |
---|
54 | 25 | static inline pgd_t *radix__pgd_alloc(struct mm_struct *mm) |
---|
55 | 26 | { |
---|
.. | .. |
---|
114 | 85 | kmem_cache_free(PGT_CACHE(PGD_INDEX_SIZE), pgd); |
---|
115 | 86 | } |
---|
116 | 87 | |
---|
117 | | -static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud) |
---|
| 88 | +static inline void p4d_populate(struct mm_struct *mm, p4d_t *pgd, pud_t *pud) |
---|
118 | 89 | { |
---|
119 | | - pgd_set(pgd, __pgtable_ptr_val(pud) | PGD_VAL_BITS); |
---|
| 90 | + *pgd = __p4d(__pgtable_ptr_val(pud) | PGD_VAL_BITS); |
---|
120 | 91 | } |
---|
121 | 92 | |
---|
122 | 93 | static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) |
---|
.. | .. |
---|
136 | 107 | return pud; |
---|
137 | 108 | } |
---|
138 | 109 | |
---|
| 110 | +static inline void __pud_free(pud_t *pud) |
---|
| 111 | +{ |
---|
| 112 | + struct page *page = virt_to_page(pud); |
---|
| 113 | + |
---|
| 114 | + /* |
---|
| 115 | + * Early pud pages allocated via memblock allocator |
---|
| 116 | + * can't be directly freed to slab |
---|
| 117 | + */ |
---|
| 118 | + if (PageReserved(page)) |
---|
| 119 | + free_reserved_page(page); |
---|
| 120 | + else |
---|
| 121 | + kmem_cache_free(PGT_CACHE(PUD_CACHE_INDEX), pud); |
---|
| 122 | +} |
---|
| 123 | + |
---|
139 | 124 | static inline void pud_free(struct mm_struct *mm, pud_t *pud) |
---|
140 | 125 | { |
---|
141 | | - kmem_cache_free(PGT_CACHE(PUD_CACHE_INDEX), pud); |
---|
| 126 | + return __pud_free(pud); |
---|
142 | 127 | } |
---|
143 | 128 | |
---|
144 | 129 | static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) |
---|
145 | 130 | { |
---|
146 | | - pud_set(pud, __pgtable_ptr_val(pmd) | PUD_VAL_BITS); |
---|
| 131 | + *pud = __pud(__pgtable_ptr_val(pmd) | PUD_VAL_BITS); |
---|
147 | 132 | } |
---|
148 | 133 | |
---|
149 | 134 | static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud, |
---|
150 | 135 | unsigned long address) |
---|
151 | 136 | { |
---|
152 | | - /* |
---|
153 | | - * By now all the pud entries should be none entries. So go |
---|
154 | | - * ahead and flush the page walk cache |
---|
155 | | - */ |
---|
156 | | - flush_tlb_pgtable(tlb, address); |
---|
157 | 137 | pgtable_free_tlb(tlb, pud, PUD_INDEX); |
---|
158 | 138 | } |
---|
159 | 139 | |
---|
.. | .. |
---|
170 | 150 | static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, |
---|
171 | 151 | unsigned long address) |
---|
172 | 152 | { |
---|
173 | | - /* |
---|
174 | | - * By now all the pud entries should be none entries. So go |
---|
175 | | - * ahead and flush the page walk cache |
---|
176 | | - */ |
---|
177 | | - flush_tlb_pgtable(tlb, address); |
---|
178 | 153 | return pgtable_free_tlb(tlb, pmd, PMD_INDEX); |
---|
179 | 154 | } |
---|
180 | 155 | |
---|
181 | 156 | static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, |
---|
182 | 157 | pte_t *pte) |
---|
183 | 158 | { |
---|
184 | | - pmd_set(pmd, __pgtable_ptr_val(pte) | PMD_VAL_BITS); |
---|
| 159 | + *pmd = __pmd(__pgtable_ptr_val(pte) | PMD_VAL_BITS); |
---|
185 | 160 | } |
---|
186 | 161 | |
---|
187 | 162 | static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, |
---|
188 | 163 | pgtable_t pte_page) |
---|
189 | 164 | { |
---|
190 | | - pmd_set(pmd, __pgtable_ptr_val(pte_page) | PMD_VAL_BITS); |
---|
191 | | -} |
---|
192 | | - |
---|
193 | | -static inline pgtable_t pmd_pgtable(pmd_t pmd) |
---|
194 | | -{ |
---|
195 | | - return (pgtable_t)pmd_page_vaddr(pmd); |
---|
196 | | -} |
---|
197 | | - |
---|
198 | | -static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, |
---|
199 | | - unsigned long address) |
---|
200 | | -{ |
---|
201 | | - return (pte_t *)pte_fragment_alloc(mm, address, 1); |
---|
202 | | -} |
---|
203 | | - |
---|
204 | | -static inline pgtable_t pte_alloc_one(struct mm_struct *mm, |
---|
205 | | - unsigned long address) |
---|
206 | | -{ |
---|
207 | | - return (pgtable_t)pte_fragment_alloc(mm, address, 0); |
---|
208 | | -} |
---|
209 | | - |
---|
210 | | -static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) |
---|
211 | | -{ |
---|
212 | | - pte_fragment_free((unsigned long *)pte, 1); |
---|
213 | | -} |
---|
214 | | - |
---|
215 | | -static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage) |
---|
216 | | -{ |
---|
217 | | - pte_fragment_free((unsigned long *)ptepage, 0); |
---|
| 165 | + *pmd = __pmd(__pgtable_ptr_val(pte_page) | PMD_VAL_BITS); |
---|
218 | 166 | } |
---|
219 | 167 | |
---|
220 | 168 | static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table, |
---|
221 | 169 | unsigned long address) |
---|
222 | 170 | { |
---|
223 | | - /* |
---|
224 | | - * By now all the pud entries should be none entries. So go |
---|
225 | | - * ahead and flush the page walk cache |
---|
226 | | - */ |
---|
227 | | - flush_tlb_pgtable(tlb, address); |
---|
228 | 171 | pgtable_free_tlb(tlb, table, PTE_INDEX); |
---|
229 | 172 | } |
---|
230 | | - |
---|
231 | | -#define check_pgt_cache() do { } while (0) |
---|
232 | 173 | |
---|
233 | 174 | extern atomic_long_t direct_pages_count[MMU_PAGE_COUNT]; |
---|
234 | 175 | static inline void update_page_count(int psize, long count) |
---|