.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-or-later |
---|
1 | 2 | /* |
---|
2 | | - * This file contains ioremap and related functions for 64-bit machines. |
---|
| 3 | + * This file contains pgtable related functions for 64-bit machines. |
---|
3 | 4 | * |
---|
4 | 5 | * Derived from arch/ppc64/mm/init.c |
---|
5 | 6 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) |
---|
.. | .. |
---|
13 | 14 | * |
---|
14 | 15 | * Dave Engebretsen <engebret@us.ibm.com> |
---|
15 | 16 | * Rework for PPC64 port. |
---|
16 | | - * |
---|
17 | | - * This program is free software; you can redistribute it and/or |
---|
18 | | - * modify it under the terms of the GNU General Public License |
---|
19 | | - * as published by the Free Software Foundation; either version |
---|
20 | | - * 2 of the License, or (at your option) any later version. |
---|
21 | | - * |
---|
22 | 17 | */ |
---|
23 | 18 | |
---|
24 | 19 | #include <linux/signal.h> |
---|
.. | .. |
---|
36 | 31 | #include <linux/slab.h> |
---|
37 | 32 | #include <linux/hugetlb.h> |
---|
38 | 33 | |
---|
39 | | -#include <asm/pgalloc.h> |
---|
40 | 34 | #include <asm/page.h> |
---|
41 | 35 | #include <asm/prom.h> |
---|
42 | | -#include <asm/io.h> |
---|
43 | 36 | #include <asm/mmu_context.h> |
---|
44 | | -#include <asm/pgtable.h> |
---|
45 | 37 | #include <asm/mmu.h> |
---|
46 | 38 | #include <asm/smp.h> |
---|
47 | 39 | #include <asm/machdep.h> |
---|
.. | .. |
---|
52 | 44 | #include <asm/firmware.h> |
---|
53 | 45 | #include <asm/dma.h> |
---|
54 | 46 | |
---|
55 | | -#include "mmu_decl.h" |
---|
| 47 | +#include <mm/mmu_decl.h> |
---|
56 | 48 | |
---|
57 | 49 | |
---|
58 | 50 | #ifdef CONFIG_PPC_BOOK3S_64 |
---|
.. | .. |
---|
90 | 82 | EXPORT_SYMBOL(__pgd_val_bits); |
---|
91 | 83 | unsigned long __kernel_virt_start; |
---|
92 | 84 | EXPORT_SYMBOL(__kernel_virt_start); |
---|
93 | | -unsigned long __kernel_virt_size; |
---|
94 | | -EXPORT_SYMBOL(__kernel_virt_size); |
---|
95 | 85 | unsigned long __vmalloc_start; |
---|
96 | 86 | EXPORT_SYMBOL(__vmalloc_start); |
---|
97 | 87 | unsigned long __vmalloc_end; |
---|
98 | 88 | EXPORT_SYMBOL(__vmalloc_end); |
---|
99 | 89 | unsigned long __kernel_io_start; |
---|
100 | 90 | EXPORT_SYMBOL(__kernel_io_start); |
---|
| 91 | +unsigned long __kernel_io_end; |
---|
101 | 92 | struct page *vmemmap; |
---|
102 | 93 | EXPORT_SYMBOL(vmemmap); |
---|
103 | 94 | unsigned long __pte_frag_nr; |
---|
104 | 95 | EXPORT_SYMBOL(__pte_frag_nr); |
---|
105 | 96 | unsigned long __pte_frag_size_shift; |
---|
106 | 97 | EXPORT_SYMBOL(__pte_frag_size_shift); |
---|
107 | | -unsigned long ioremap_bot; |
---|
108 | | -#else /* !CONFIG_PPC_BOOK3S_64 */ |
---|
109 | | -unsigned long ioremap_bot = IOREMAP_BASE; |
---|
110 | 98 | #endif |
---|
111 | | - |
---|
112 | | -/** |
---|
113 | | - * __ioremap_at - Low level function to establish the page tables |
---|
114 | | - * for an IO mapping |
---|
115 | | - */ |
---|
116 | | -void __iomem * __ioremap_at(phys_addr_t pa, void *ea, unsigned long size, |
---|
117 | | - unsigned long flags) |
---|
118 | | -{ |
---|
119 | | - unsigned long i; |
---|
120 | | - |
---|
121 | | - /* Make sure we have the base flags */ |
---|
122 | | - if ((flags & _PAGE_PRESENT) == 0) |
---|
123 | | - flags |= pgprot_val(PAGE_KERNEL); |
---|
124 | | - |
---|
125 | | - /* We don't support the 4K PFN hack with ioremap */ |
---|
126 | | - if (flags & H_PAGE_4K_PFN) |
---|
127 | | - return NULL; |
---|
128 | | - |
---|
129 | | - WARN_ON(pa & ~PAGE_MASK); |
---|
130 | | - WARN_ON(((unsigned long)ea) & ~PAGE_MASK); |
---|
131 | | - WARN_ON(size & ~PAGE_MASK); |
---|
132 | | - |
---|
133 | | - for (i = 0; i < size; i += PAGE_SIZE) |
---|
134 | | - if (map_kernel_page((unsigned long)ea+i, pa+i, flags)) |
---|
135 | | - return NULL; |
---|
136 | | - |
---|
137 | | - return (void __iomem *)ea; |
---|
138 | | -} |
---|
139 | | - |
---|
140 | | -/** |
---|
141 | | - * __iounmap_from - Low level function to tear down the page tables |
---|
142 | | - * for an IO mapping. This is used for mappings that |
---|
143 | | - * are manipulated manually, like partial unmapping of |
---|
144 | | - * PCI IOs or ISA space. |
---|
145 | | - */ |
---|
146 | | -void __iounmap_at(void *ea, unsigned long size) |
---|
147 | | -{ |
---|
148 | | - WARN_ON(((unsigned long)ea) & ~PAGE_MASK); |
---|
149 | | - WARN_ON(size & ~PAGE_MASK); |
---|
150 | | - |
---|
151 | | - unmap_kernel_range((unsigned long)ea, size); |
---|
152 | | -} |
---|
153 | | - |
---|
154 | | -void __iomem * __ioremap_caller(phys_addr_t addr, unsigned long size, |
---|
155 | | - unsigned long flags, void *caller) |
---|
156 | | -{ |
---|
157 | | - phys_addr_t paligned; |
---|
158 | | - void __iomem *ret; |
---|
159 | | - |
---|
160 | | - /* |
---|
161 | | - * Choose an address to map it to. |
---|
162 | | - * Once the imalloc system is running, we use it. |
---|
163 | | - * Before that, we map using addresses going |
---|
164 | | - * up from ioremap_bot. imalloc will use |
---|
165 | | - * the addresses from ioremap_bot through |
---|
166 | | - * IMALLOC_END |
---|
167 | | - * |
---|
168 | | - */ |
---|
169 | | - paligned = addr & PAGE_MASK; |
---|
170 | | - size = PAGE_ALIGN(addr + size) - paligned; |
---|
171 | | - |
---|
172 | | - if ((size == 0) || (paligned == 0)) |
---|
173 | | - return NULL; |
---|
174 | | - |
---|
175 | | - if (slab_is_available()) { |
---|
176 | | - struct vm_struct *area; |
---|
177 | | - |
---|
178 | | - area = __get_vm_area_caller(size, VM_IOREMAP, |
---|
179 | | - ioremap_bot, IOREMAP_END, |
---|
180 | | - caller); |
---|
181 | | - if (area == NULL) |
---|
182 | | - return NULL; |
---|
183 | | - |
---|
184 | | - area->phys_addr = paligned; |
---|
185 | | - ret = __ioremap_at(paligned, area->addr, size, flags); |
---|
186 | | - if (!ret) |
---|
187 | | - vunmap(area->addr); |
---|
188 | | - } else { |
---|
189 | | - ret = __ioremap_at(paligned, (void *)ioremap_bot, size, flags); |
---|
190 | | - if (ret) |
---|
191 | | - ioremap_bot += size; |
---|
192 | | - } |
---|
193 | | - |
---|
194 | | - if (ret) |
---|
195 | | - ret += addr & ~PAGE_MASK; |
---|
196 | | - return ret; |
---|
197 | | -} |
---|
198 | | - |
---|
199 | | -void __iomem * __ioremap(phys_addr_t addr, unsigned long size, |
---|
200 | | - unsigned long flags) |
---|
201 | | -{ |
---|
202 | | - return __ioremap_caller(addr, size, flags, __builtin_return_address(0)); |
---|
203 | | -} |
---|
204 | | - |
---|
205 | | -void __iomem * ioremap(phys_addr_t addr, unsigned long size) |
---|
206 | | -{ |
---|
207 | | - unsigned long flags = pgprot_val(pgprot_noncached(__pgprot(0))); |
---|
208 | | - void *caller = __builtin_return_address(0); |
---|
209 | | - |
---|
210 | | - if (ppc_md.ioremap) |
---|
211 | | - return ppc_md.ioremap(addr, size, flags, caller); |
---|
212 | | - return __ioremap_caller(addr, size, flags, caller); |
---|
213 | | -} |
---|
214 | | - |
---|
215 | | -void __iomem * ioremap_wc(phys_addr_t addr, unsigned long size) |
---|
216 | | -{ |
---|
217 | | - unsigned long flags = pgprot_val(pgprot_noncached_wc(__pgprot(0))); |
---|
218 | | - void *caller = __builtin_return_address(0); |
---|
219 | | - |
---|
220 | | - if (ppc_md.ioremap) |
---|
221 | | - return ppc_md.ioremap(addr, size, flags, caller); |
---|
222 | | - return __ioremap_caller(addr, size, flags, caller); |
---|
223 | | -} |
---|
224 | | - |
---|
225 | | -void __iomem * ioremap_prot(phys_addr_t addr, unsigned long size, |
---|
226 | | - unsigned long flags) |
---|
227 | | -{ |
---|
228 | | - void *caller = __builtin_return_address(0); |
---|
229 | | - |
---|
230 | | - /* writeable implies dirty for kernel addresses */ |
---|
231 | | - if (flags & _PAGE_WRITE) |
---|
232 | | - flags |= _PAGE_DIRTY; |
---|
233 | | - |
---|
234 | | - /* we don't want to let _PAGE_EXEC leak out */ |
---|
235 | | - flags &= ~_PAGE_EXEC; |
---|
236 | | - /* |
---|
237 | | - * Force kernel mapping. |
---|
238 | | - */ |
---|
239 | | - flags &= ~_PAGE_USER; |
---|
240 | | - flags |= _PAGE_PRIVILEGED; |
---|
241 | | - |
---|
242 | | - if (ppc_md.ioremap) |
---|
243 | | - return ppc_md.ioremap(addr, size, flags, caller); |
---|
244 | | - return __ioremap_caller(addr, size, flags, caller); |
---|
245 | | -} |
---|
246 | | - |
---|
247 | | - |
---|
248 | | -/* |
---|
249 | | - * Unmap an IO region and remove it from imalloc'd list. |
---|
250 | | - * Access to IO memory should be serialized by driver. |
---|
251 | | - */ |
---|
252 | | -void __iounmap(volatile void __iomem *token) |
---|
253 | | -{ |
---|
254 | | - void *addr; |
---|
255 | | - |
---|
256 | | - if (!slab_is_available()) |
---|
257 | | - return; |
---|
258 | | - |
---|
259 | | - addr = (void *) ((unsigned long __force) |
---|
260 | | - PCI_FIX_ADDR(token) & PAGE_MASK); |
---|
261 | | - if ((unsigned long)addr < ioremap_bot) { |
---|
262 | | - printk(KERN_WARNING "Attempt to iounmap early bolted mapping" |
---|
263 | | - " at 0x%p\n", addr); |
---|
264 | | - return; |
---|
265 | | - } |
---|
266 | | - vunmap(addr); |
---|
267 | | -} |
---|
268 | | - |
---|
269 | | -void iounmap(volatile void __iomem *token) |
---|
270 | | -{ |
---|
271 | | - if (ppc_md.iounmap) |
---|
272 | | - ppc_md.iounmap(token); |
---|
273 | | - else |
---|
274 | | - __iounmap(token); |
---|
275 | | -} |
---|
276 | | - |
---|
277 | | -EXPORT_SYMBOL(ioremap); |
---|
278 | | -EXPORT_SYMBOL(ioremap_wc); |
---|
279 | | -EXPORT_SYMBOL(ioremap_prot); |
---|
280 | | -EXPORT_SYMBOL(__ioremap); |
---|
281 | | -EXPORT_SYMBOL(__ioremap_at); |
---|
282 | | -EXPORT_SYMBOL(iounmap); |
---|
283 | | -EXPORT_SYMBOL(__iounmap); |
---|
284 | | -EXPORT_SYMBOL(__iounmap_at); |
---|
285 | 99 | |
---|
286 | 100 | #ifndef __PAGETABLE_PUD_FOLDED |
---|
287 | 101 | /* 4 level page table */ |
---|
288 | | -struct page *pgd_page(pgd_t pgd) |
---|
| 102 | +struct page *p4d_page(p4d_t p4d) |
---|
289 | 103 | { |
---|
290 | | - if (pgd_huge(pgd)) |
---|
291 | | - return pte_page(pgd_pte(pgd)); |
---|
292 | | - return virt_to_page(pgd_page_vaddr(pgd)); |
---|
| 104 | + if (p4d_is_leaf(p4d)) { |
---|
| 105 | + if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP)) |
---|
| 106 | + VM_WARN_ON(!p4d_huge(p4d)); |
---|
| 107 | + return pte_page(p4d_pte(p4d)); |
---|
| 108 | + } |
---|
| 109 | + return virt_to_page(p4d_pgtable(p4d)); |
---|
293 | 110 | } |
---|
294 | 111 | #endif |
---|
295 | 112 | |
---|
296 | 113 | struct page *pud_page(pud_t pud) |
---|
297 | 114 | { |
---|
298 | | - if (pud_huge(pud)) |
---|
| 115 | + if (pud_is_leaf(pud)) { |
---|
| 116 | + if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP)) |
---|
| 117 | + VM_WARN_ON(!pud_huge(pud)); |
---|
299 | 118 | return pte_page(pud_pte(pud)); |
---|
300 | | - return virt_to_page(pud_page_vaddr(pud)); |
---|
| 119 | + } |
---|
| 120 | + return virt_to_page(pud_pgtable(pud)); |
---|
301 | 121 | } |
---|
302 | 122 | |
---|
303 | 123 | /* |
---|
.. | .. |
---|
306 | 126 | */ |
---|
307 | 127 | struct page *pmd_page(pmd_t pmd) |
---|
308 | 128 | { |
---|
309 | | - if (pmd_trans_huge(pmd) || pmd_huge(pmd) || pmd_devmap(pmd)) |
---|
| 129 | + if (pmd_is_leaf(pmd)) { |
---|
| 130 | + /* |
---|
| 131 | + * vmalloc_to_page may be called on any vmap address (not only |
---|
| 132 | + * vmalloc), and it uses pmd_page() etc., when huge vmap is |
---|
| 133 | + * enabled so these checks can't be used. |
---|
| 134 | + */ |
---|
| 135 | + if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP)) |
---|
| 136 | + VM_WARN_ON(!(pmd_large(pmd) || pmd_huge(pmd))); |
---|
310 | 137 | return pte_page(pmd_pte(pmd)); |
---|
| 138 | + } |
---|
311 | 139 | return virt_to_page(pmd_page_vaddr(pmd)); |
---|
312 | 140 | } |
---|
313 | 141 | |
---|
.. | .. |
---|
323 | 151 | radix__mark_rodata_ro(); |
---|
324 | 152 | else |
---|
325 | 153 | hash__mark_rodata_ro(); |
---|
| 154 | + |
---|
| 155 | + // mark_initmem_nx() should have already run by now |
---|
| 156 | + ptdump_check_wx(); |
---|
326 | 157 | } |
---|
327 | 158 | |
---|
328 | 159 | void mark_initmem_nx(void) |
---|