hc
2024-10-12 a5969cabbb4660eab42b6ef0412cbbd1200cf14d
kernel/arch/powerpc/mm/pgtable_64.c
....@@ -1,5 +1,6 @@
1
+// SPDX-License-Identifier: GPL-2.0-or-later
12 /*
2
- * This file contains ioremap and related functions for 64-bit machines.
3
+ * This file contains pgtable related functions for 64-bit machines.
34 *
45 * Derived from arch/ppc64/mm/init.c
56 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
....@@ -13,12 +14,6 @@
1314 *
1415 * Dave Engebretsen <engebret@us.ibm.com>
1516 * Rework for PPC64 port.
16
- *
17
- * This program is free software; you can redistribute it and/or
18
- * modify it under the terms of the GNU General Public License
19
- * as published by the Free Software Foundation; either version
20
- * 2 of the License, or (at your option) any later version.
21
- *
2217 */
2318
2419 #include <linux/signal.h>
....@@ -36,12 +31,9 @@
3631 #include <linux/slab.h>
3732 #include <linux/hugetlb.h>
3833
39
-#include <asm/pgalloc.h>
4034 #include <asm/page.h>
4135 #include <asm/prom.h>
42
-#include <asm/io.h>
4336 #include <asm/mmu_context.h>
44
-#include <asm/pgtable.h>
4537 #include <asm/mmu.h>
4638 #include <asm/smp.h>
4739 #include <asm/machdep.h>
....@@ -52,7 +44,7 @@
5244 #include <asm/firmware.h>
5345 #include <asm/dma.h>
5446
55
-#include "mmu_decl.h"
47
+#include <mm/mmu_decl.h>
5648
5749
5850 #ifdef CONFIG_PPC_BOOK3S_64
....@@ -90,214 +82,42 @@
9082 EXPORT_SYMBOL(__pgd_val_bits);
9183 unsigned long __kernel_virt_start;
9284 EXPORT_SYMBOL(__kernel_virt_start);
93
-unsigned long __kernel_virt_size;
94
-EXPORT_SYMBOL(__kernel_virt_size);
9585 unsigned long __vmalloc_start;
9686 EXPORT_SYMBOL(__vmalloc_start);
9787 unsigned long __vmalloc_end;
9888 EXPORT_SYMBOL(__vmalloc_end);
9989 unsigned long __kernel_io_start;
10090 EXPORT_SYMBOL(__kernel_io_start);
91
+unsigned long __kernel_io_end;
10192 struct page *vmemmap;
10293 EXPORT_SYMBOL(vmemmap);
10394 unsigned long __pte_frag_nr;
10495 EXPORT_SYMBOL(__pte_frag_nr);
10596 unsigned long __pte_frag_size_shift;
10697 EXPORT_SYMBOL(__pte_frag_size_shift);
107
-unsigned long ioremap_bot;
108
-#else /* !CONFIG_PPC_BOOK3S_64 */
109
-unsigned long ioremap_bot = IOREMAP_BASE;
11098 #endif
111
-
112
-/**
113
- * __ioremap_at - Low level function to establish the page tables
114
- * for an IO mapping
115
- */
116
-void __iomem * __ioremap_at(phys_addr_t pa, void *ea, unsigned long size,
117
- unsigned long flags)
118
-{
119
- unsigned long i;
120
-
121
- /* Make sure we have the base flags */
122
- if ((flags & _PAGE_PRESENT) == 0)
123
- flags |= pgprot_val(PAGE_KERNEL);
124
-
125
- /* We don't support the 4K PFN hack with ioremap */
126
- if (flags & H_PAGE_4K_PFN)
127
- return NULL;
128
-
129
- WARN_ON(pa & ~PAGE_MASK);
130
- WARN_ON(((unsigned long)ea) & ~PAGE_MASK);
131
- WARN_ON(size & ~PAGE_MASK);
132
-
133
- for (i = 0; i < size; i += PAGE_SIZE)
134
- if (map_kernel_page((unsigned long)ea+i, pa+i, flags))
135
- return NULL;
136
-
137
- return (void __iomem *)ea;
138
-}
139
-
140
-/**
141
- * __iounmap_from - Low level function to tear down the page tables
142
- * for an IO mapping. This is used for mappings that
143
- * are manipulated manually, like partial unmapping of
144
- * PCI IOs or ISA space.
145
- */
146
-void __iounmap_at(void *ea, unsigned long size)
147
-{
148
- WARN_ON(((unsigned long)ea) & ~PAGE_MASK);
149
- WARN_ON(size & ~PAGE_MASK);
150
-
151
- unmap_kernel_range((unsigned long)ea, size);
152
-}
153
-
154
-void __iomem * __ioremap_caller(phys_addr_t addr, unsigned long size,
155
- unsigned long flags, void *caller)
156
-{
157
- phys_addr_t paligned;
158
- void __iomem *ret;
159
-
160
- /*
161
- * Choose an address to map it to.
162
- * Once the imalloc system is running, we use it.
163
- * Before that, we map using addresses going
164
- * up from ioremap_bot. imalloc will use
165
- * the addresses from ioremap_bot through
166
- * IMALLOC_END
167
- *
168
- */
169
- paligned = addr & PAGE_MASK;
170
- size = PAGE_ALIGN(addr + size) - paligned;
171
-
172
- if ((size == 0) || (paligned == 0))
173
- return NULL;
174
-
175
- if (slab_is_available()) {
176
- struct vm_struct *area;
177
-
178
- area = __get_vm_area_caller(size, VM_IOREMAP,
179
- ioremap_bot, IOREMAP_END,
180
- caller);
181
- if (area == NULL)
182
- return NULL;
183
-
184
- area->phys_addr = paligned;
185
- ret = __ioremap_at(paligned, area->addr, size, flags);
186
- if (!ret)
187
- vunmap(area->addr);
188
- } else {
189
- ret = __ioremap_at(paligned, (void *)ioremap_bot, size, flags);
190
- if (ret)
191
- ioremap_bot += size;
192
- }
193
-
194
- if (ret)
195
- ret += addr & ~PAGE_MASK;
196
- return ret;
197
-}
198
-
199
-void __iomem * __ioremap(phys_addr_t addr, unsigned long size,
200
- unsigned long flags)
201
-{
202
- return __ioremap_caller(addr, size, flags, __builtin_return_address(0));
203
-}
204
-
205
-void __iomem * ioremap(phys_addr_t addr, unsigned long size)
206
-{
207
- unsigned long flags = pgprot_val(pgprot_noncached(__pgprot(0)));
208
- void *caller = __builtin_return_address(0);
209
-
210
- if (ppc_md.ioremap)
211
- return ppc_md.ioremap(addr, size, flags, caller);
212
- return __ioremap_caller(addr, size, flags, caller);
213
-}
214
-
215
-void __iomem * ioremap_wc(phys_addr_t addr, unsigned long size)
216
-{
217
- unsigned long flags = pgprot_val(pgprot_noncached_wc(__pgprot(0)));
218
- void *caller = __builtin_return_address(0);
219
-
220
- if (ppc_md.ioremap)
221
- return ppc_md.ioremap(addr, size, flags, caller);
222
- return __ioremap_caller(addr, size, flags, caller);
223
-}
224
-
225
-void __iomem * ioremap_prot(phys_addr_t addr, unsigned long size,
226
- unsigned long flags)
227
-{
228
- void *caller = __builtin_return_address(0);
229
-
230
- /* writeable implies dirty for kernel addresses */
231
- if (flags & _PAGE_WRITE)
232
- flags |= _PAGE_DIRTY;
233
-
234
- /* we don't want to let _PAGE_EXEC leak out */
235
- flags &= ~_PAGE_EXEC;
236
- /*
237
- * Force kernel mapping.
238
- */
239
- flags &= ~_PAGE_USER;
240
- flags |= _PAGE_PRIVILEGED;
241
-
242
- if (ppc_md.ioremap)
243
- return ppc_md.ioremap(addr, size, flags, caller);
244
- return __ioremap_caller(addr, size, flags, caller);
245
-}
246
-
247
-
248
-/*
249
- * Unmap an IO region and remove it from imalloc'd list.
250
- * Access to IO memory should be serialized by driver.
251
- */
252
-void __iounmap(volatile void __iomem *token)
253
-{
254
- void *addr;
255
-
256
- if (!slab_is_available())
257
- return;
258
-
259
- addr = (void *) ((unsigned long __force)
260
- PCI_FIX_ADDR(token) & PAGE_MASK);
261
- if ((unsigned long)addr < ioremap_bot) {
262
- printk(KERN_WARNING "Attempt to iounmap early bolted mapping"
263
- " at 0x%p\n", addr);
264
- return;
265
- }
266
- vunmap(addr);
267
-}
268
-
269
-void iounmap(volatile void __iomem *token)
270
-{
271
- if (ppc_md.iounmap)
272
- ppc_md.iounmap(token);
273
- else
274
- __iounmap(token);
275
-}
276
-
277
-EXPORT_SYMBOL(ioremap);
278
-EXPORT_SYMBOL(ioremap_wc);
279
-EXPORT_SYMBOL(ioremap_prot);
280
-EXPORT_SYMBOL(__ioremap);
281
-EXPORT_SYMBOL(__ioremap_at);
282
-EXPORT_SYMBOL(iounmap);
283
-EXPORT_SYMBOL(__iounmap);
284
-EXPORT_SYMBOL(__iounmap_at);
28599
286100 #ifndef __PAGETABLE_PUD_FOLDED
287101 /* 4 level page table */
288
-struct page *pgd_page(pgd_t pgd)
102
+struct page *p4d_page(p4d_t p4d)
289103 {
290
- if (pgd_huge(pgd))
291
- return pte_page(pgd_pte(pgd));
292
- return virt_to_page(pgd_page_vaddr(pgd));
104
+ if (p4d_is_leaf(p4d)) {
105
+ if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP))
106
+ VM_WARN_ON(!p4d_huge(p4d));
107
+ return pte_page(p4d_pte(p4d));
108
+ }
109
+ return virt_to_page(p4d_pgtable(p4d));
293110 }
294111 #endif
295112
296113 struct page *pud_page(pud_t pud)
297114 {
298
- if (pud_huge(pud))
115
+ if (pud_is_leaf(pud)) {
116
+ if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP))
117
+ VM_WARN_ON(!pud_huge(pud));
299118 return pte_page(pud_pte(pud));
300
- return virt_to_page(pud_page_vaddr(pud));
119
+ }
120
+ return virt_to_page(pud_pgtable(pud));
301121 }
302122
303123 /*
....@@ -306,8 +126,16 @@
306126 */
307127 struct page *pmd_page(pmd_t pmd)
308128 {
309
- if (pmd_trans_huge(pmd) || pmd_huge(pmd) || pmd_devmap(pmd))
129
+ if (pmd_is_leaf(pmd)) {
130
+ /*
131
+ * vmalloc_to_page may be called on any vmap address (not only
132
+ * vmalloc), and it uses pmd_page() etc., when huge vmap is
133
+ * enabled so these checks can't be used.
134
+ */
135
+ if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP))
136
+ VM_WARN_ON(!(pmd_large(pmd) || pmd_huge(pmd)));
310137 return pte_page(pmd_pte(pmd));
138
+ }
311139 return virt_to_page(pmd_page_vaddr(pmd));
312140 }
313141
....@@ -323,6 +151,9 @@
323151 radix__mark_rodata_ro();
324152 else
325153 hash__mark_rodata_ro();
154
+
155
+ // mark_initmem_nx() should have already run by now
156
+ ptdump_check_wx();
326157 }
327158
328159 void mark_initmem_nx(void)