forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-11 04dd17822334871b23ea2862f7798fb0e0007777
kernel/arch/powerpc/mm/pgtable_32.c
....@@ -1,3 +1,4 @@
1
+// SPDX-License-Identifier: GPL-2.0-or-later
12 /*
23 * This file contains the routines setting up the linux page tables.
34 * -- paulus
....@@ -11,12 +12,6 @@
1112 *
1213 * Derived from "arch/i386/mm/init.c"
1314 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
14
- *
15
- * This program is free software; you can redistribute it and/or
16
- * modify it under the terms of the GNU General Public License
17
- * as published by the Free Software Foundation; either version
18
- * 2 of the License, or (at your option) any later version.
19
- *
2015 */
2116
2217 #include <linux/kernel.h>
....@@ -29,205 +24,73 @@
2924 #include <linux/memblock.h>
3025 #include <linux/slab.h>
3126
32
-#include <asm/pgtable.h>
3327 #include <asm/pgalloc.h>
3428 #include <asm/fixmap.h>
35
-#include <asm/io.h>
3629 #include <asm/setup.h>
3730 #include <asm/sections.h>
31
+#include <asm/early_ioremap.h>
3832
39
-#include "mmu_decl.h"
40
-
41
-unsigned long ioremap_bot;
42
-EXPORT_SYMBOL(ioremap_bot); /* aka VMALLOC_END */
33
+#include <mm/mmu_decl.h>
4334
4435 extern char etext[], _stext[], _sinittext[], _einittext[];
4536
46
-__ref pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
47
-{
48
- pte_t *pte;
37
+static u8 early_fixmap_pagetable[FIXMAP_PTE_SIZE] __page_aligned_data;
4938
50
- if (slab_is_available()) {
51
- pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
52
- } else {
53
- pte = __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE));
54
- if (pte)
55
- clear_page(pte);
39
+notrace void __init early_ioremap_init(void)
40
+{
41
+ unsigned long addr = ALIGN_DOWN(FIXADDR_START, PGDIR_SIZE);
42
+ pte_t *ptep = (pte_t *)early_fixmap_pagetable;
43
+ pmd_t *pmdp = pmd_off_k(addr);
44
+
45
+ for (; (s32)(FIXADDR_TOP - addr) > 0;
46
+ addr += PGDIR_SIZE, ptep += PTRS_PER_PTE, pmdp++)
47
+ pmd_populate_kernel(&init_mm, pmdp, ptep);
48
+
49
+ early_ioremap_setup();
50
+}
51
+
52
+static void __init *early_alloc_pgtable(unsigned long size)
53
+{
54
+ void *ptr = memblock_alloc(size, size);
55
+
56
+ if (!ptr)
57
+ panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
58
+ __func__, size, size);
59
+
60
+ return ptr;
61
+}
62
+
63
+pte_t __init *early_pte_alloc_kernel(pmd_t *pmdp, unsigned long va)
64
+{
65
+ if (pmd_none(*pmdp)) {
66
+ pte_t *ptep = early_alloc_pgtable(PTE_FRAG_SIZE);
67
+
68
+ pmd_populate_kernel(&init_mm, pmdp, ptep);
5669 }
57
- return pte;
70
+ return pte_offset_kernel(pmdp, va);
5871 }
5972
60
-pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
61
-{
62
- struct page *ptepage;
6373
64
- gfp_t flags = GFP_KERNEL | __GFP_ZERO | __GFP_ACCOUNT;
65
-
66
- ptepage = alloc_pages(flags, 0);
67
- if (!ptepage)
68
- return NULL;
69
- if (!pgtable_page_ctor(ptepage)) {
70
- __free_page(ptepage);
71
- return NULL;
72
- }
73
- return ptepage;
74
-}
75
-
76
-void __iomem *
77
-ioremap(phys_addr_t addr, unsigned long size)
78
-{
79
- return __ioremap_caller(addr, size, _PAGE_NO_CACHE | _PAGE_GUARDED,
80
- __builtin_return_address(0));
81
-}
82
-EXPORT_SYMBOL(ioremap);
83
-
84
-void __iomem *
85
-ioremap_wc(phys_addr_t addr, unsigned long size)
86
-{
87
- return __ioremap_caller(addr, size, _PAGE_NO_CACHE,
88
- __builtin_return_address(0));
89
-}
90
-EXPORT_SYMBOL(ioremap_wc);
91
-
92
-void __iomem *
93
-ioremap_prot(phys_addr_t addr, unsigned long size, unsigned long flags)
94
-{
95
- /* writeable implies dirty for kernel addresses */
96
- if ((flags & (_PAGE_RW | _PAGE_RO)) != _PAGE_RO)
97
- flags |= _PAGE_DIRTY | _PAGE_HWWRITE;
98
-
99
- /* we don't want to let _PAGE_USER and _PAGE_EXEC leak out */
100
- flags &= ~(_PAGE_USER | _PAGE_EXEC);
101
- flags |= _PAGE_PRIVILEGED;
102
-
103
- return __ioremap_caller(addr, size, flags, __builtin_return_address(0));
104
-}
105
-EXPORT_SYMBOL(ioremap_prot);
106
-
107
-void __iomem *
108
-__ioremap(phys_addr_t addr, unsigned long size, unsigned long flags)
109
-{
110
- return __ioremap_caller(addr, size, flags, __builtin_return_address(0));
111
-}
112
-
113
-void __iomem *
114
-__ioremap_caller(phys_addr_t addr, unsigned long size, unsigned long flags,
115
- void *caller)
116
-{
117
- unsigned long v, i;
118
- phys_addr_t p;
119
- int err;
120
-
121
- /* Make sure we have the base flags */
122
- if ((flags & _PAGE_PRESENT) == 0)
123
- flags |= pgprot_val(PAGE_KERNEL);
124
-
125
- /* Non-cacheable page cannot be coherent */
126
- if (flags & _PAGE_NO_CACHE)
127
- flags &= ~_PAGE_COHERENT;
128
-
129
- /*
130
- * Choose an address to map it to.
131
- * Once the vmalloc system is running, we use it.
132
- * Before then, we use space going down from IOREMAP_TOP
133
- * (ioremap_bot records where we're up to).
134
- */
135
- p = addr & PAGE_MASK;
136
- size = PAGE_ALIGN(addr + size) - p;
137
-
138
- /*
139
- * If the address lies within the first 16 MB, assume it's in ISA
140
- * memory space
141
- */
142
- if (p < 16*1024*1024)
143
- p += _ISA_MEM_BASE;
144
-
145
-#ifndef CONFIG_CRASH_DUMP
146
- /*
147
- * Don't allow anybody to remap normal RAM that we're using.
148
- * mem_init() sets high_memory so only do the check after that.
149
- */
150
- if (slab_is_available() && (p < virt_to_phys(high_memory)) &&
151
- page_is_ram(__phys_to_pfn(p))) {
152
- printk("__ioremap(): phys addr 0x%llx is RAM lr %ps\n",
153
- (unsigned long long)p, __builtin_return_address(0));
154
- return NULL;
155
- }
156
-#endif
157
-
158
- if (size == 0)
159
- return NULL;
160
-
161
- /*
162
- * Is it already mapped? Perhaps overlapped by a previous
163
- * mapping.
164
- */
165
- v = p_block_mapped(p);
166
- if (v)
167
- goto out;
168
-
169
- if (slab_is_available()) {
170
- struct vm_struct *area;
171
- area = get_vm_area_caller(size, VM_IOREMAP, caller);
172
- if (area == 0)
173
- return NULL;
174
- area->phys_addr = p;
175
- v = (unsigned long) area->addr;
176
- } else {
177
- v = (ioremap_bot -= size);
178
- }
179
-
180
- /*
181
- * Should check if it is a candidate for a BAT mapping
182
- */
183
-
184
- err = 0;
185
- for (i = 0; i < size && err == 0; i += PAGE_SIZE)
186
- err = map_kernel_page(v+i, p+i, flags);
187
- if (err) {
188
- if (slab_is_available())
189
- vunmap((void *)v);
190
- return NULL;
191
- }
192
-
193
-out:
194
- return (void __iomem *) (v + ((unsigned long)addr & ~PAGE_MASK));
195
-}
196
-EXPORT_SYMBOL(__ioremap);
197
-
198
-void iounmap(volatile void __iomem *addr)
199
-{
200
- /*
201
- * If mapped by BATs then there is nothing to do.
202
- * Calling vfree() generates a benign warning.
203
- */
204
- if (v_block_mapped((unsigned long)addr))
205
- return;
206
-
207
- if (addr > high_memory && (unsigned long) addr < ioremap_bot)
208
- vunmap((void *) (PAGE_MASK & (unsigned long)addr));
209
-}
210
-EXPORT_SYMBOL(iounmap);
211
-
212
-int map_kernel_page(unsigned long va, phys_addr_t pa, int flags)
74
+int __ref map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot)
21375 {
21476 pmd_t *pd;
21577 pte_t *pg;
21678 int err = -ENOMEM;
21779
21880 /* Use upper 10 bits of VA to index the first level map */
219
- pd = pmd_offset(pud_offset(pgd_offset_k(va), va), va);
81
+ pd = pmd_off_k(va);
22082 /* Use middle 10 bits of VA to index the second-level map */
221
- pg = pte_alloc_kernel(pd, va);
83
+ if (likely(slab_is_available()))
84
+ pg = pte_alloc_kernel(pd, va);
85
+ else
86
+ pg = early_pte_alloc_kernel(pd, va);
22287 if (pg != 0) {
22388 err = 0;
22489 /* The PTE should never be already set nor present in the
22590 * hash table
22691 */
227
- BUG_ON((pte_val(*pg) & (_PAGE_PRESENT | _PAGE_HASHPTE)) &&
228
- flags);
229
- set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT,
230
- __pgprot(flags)));
92
+ BUG_ON((pte_present(*pg) | pte_hashpte(*pg)) && pgprot_val(prot));
93
+ set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT, prot));
23194 }
23295 smp_wmb();
23396 return err;
....@@ -238,7 +101,7 @@
238101 */
239102 static void __init __mapin_ram_chunk(unsigned long offset, unsigned long top)
240103 {
241
- unsigned long v, s, f;
104
+ unsigned long v, s;
242105 phys_addr_t p;
243106 int ktext;
244107
....@@ -248,11 +111,10 @@
248111 for (; s < top; s += PAGE_SIZE) {
249112 ktext = ((char *)v >= _stext && (char *)v < etext) ||
250113 ((char *)v >= _sinittext && (char *)v < _einittext);
251
- f = ktext ? pgprot_val(PAGE_KERNEL_TEXT) : pgprot_val(PAGE_KERNEL);
252
- map_kernel_page(v, p, f);
253
-#ifdef CONFIG_PPC_STD_MMU_32
114
+ map_kernel_page(v, p, ktext ? PAGE_KERNEL_TEXT : PAGE_KERNEL);
115
+#ifdef CONFIG_PPC_BOOK3S_32
254116 if (ktext)
255
- hash_preload(&init_mm, v, 0, 0x300);
117
+ hash_preload(&init_mm, v);
256118 #endif
257119 v += PAGE_SIZE;
258120 p += PAGE_SIZE;
....@@ -261,66 +123,22 @@
261123
262124 void __init mapin_ram(void)
263125 {
264
- unsigned long s, top;
126
+ phys_addr_t base, end;
127
+ u64 i;
265128
266
-#ifndef CONFIG_WII
267
- top = total_lowmem;
268
- s = mmu_mapin_ram(top);
269
- __mapin_ram_chunk(s, top);
270
-#else
271
- if (!wii_hole_size) {
272
- s = mmu_mapin_ram(total_lowmem);
273
- __mapin_ram_chunk(s, total_lowmem);
274
- } else {
275
- top = wii_hole_start;
276
- s = mmu_mapin_ram(top);
277
- __mapin_ram_chunk(s, top);
129
+ for_each_mem_range(i, &base, &end) {
130
+ phys_addr_t top = min(end, total_lowmem);
278131
279
- top = memblock_end_of_DRAM();
280
- s = wii_mmu_mapin_mem2(top);
281
- __mapin_ram_chunk(s, top);
132
+ if (base >= top)
133
+ continue;
134
+ base = mmu_mapin_ram(base, top);
135
+ __mapin_ram_chunk(base, top);
282136 }
283
-#endif
284
-}
285
-
286
-/* Scan the real Linux page tables and return a PTE pointer for
287
- * a virtual address in a context.
288
- * Returns true (1) if PTE was found, zero otherwise. The pointer to
289
- * the PTE pointer is unmodified if PTE is not found.
290
- */
291
-static int
292
-get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep, pmd_t **pmdp)
293
-{
294
- pgd_t *pgd;
295
- pud_t *pud;
296
- pmd_t *pmd;
297
- pte_t *pte;
298
- int retval = 0;
299
-
300
- pgd = pgd_offset(mm, addr & PAGE_MASK);
301
- if (pgd) {
302
- pud = pud_offset(pgd, addr & PAGE_MASK);
303
- if (pud && pud_present(*pud)) {
304
- pmd = pmd_offset(pud, addr & PAGE_MASK);
305
- if (pmd_present(*pmd)) {
306
- pte = pte_offset_map(pmd, addr & PAGE_MASK);
307
- if (pte) {
308
- retval = 1;
309
- *ptep = pte;
310
- if (pmdp)
311
- *pmdp = pmd;
312
- /* XXX caller needs to do pte_unmap, yuck */
313
- }
314
- }
315
- }
316
- }
317
- return(retval);
318137 }
319138
320139 static int __change_page_attr_noflush(struct page *page, pgprot_t prot)
321140 {
322141 pte_t *kpte;
323
- pmd_t *kpmd;
324142 unsigned long address;
325143
326144 BUG_ON(PageHighMem(page));
....@@ -328,10 +146,10 @@
328146
329147 if (v_block_mapped(address))
330148 return 0;
331
- if (!get_pteptr(&init_mm, address, &kpte, &kpmd))
149
+ kpte = virt_to_kpte(address);
150
+ if (!kpte)
332151 return -EINVAL;
333152 __set_pte_at(&init_mm, address, kpte, mk_pte(page, prot), 0);
334
- pte_unmap(kpte);
335153
336154 return 0;
337155 }
....@@ -366,7 +184,10 @@
366184 unsigned long numpages = PFN_UP((unsigned long)_einittext) -
367185 PFN_DOWN((unsigned long)_sinittext);
368186
369
- change_page_attr(page, numpages, PAGE_KERNEL);
187
+ if (v_block_mapped((unsigned long)_sinittext))
188
+ mmu_mark_initmem_nx();
189
+ else
190
+ change_page_attr(page, numpages, PAGE_KERNEL);
370191 }
371192
372193 #ifdef CONFIG_STRICT_KERNEL_RWX
....@@ -374,6 +195,12 @@
374195 {
375196 struct page *page;
376197 unsigned long numpages;
198
+
199
+ if (v_block_mapped((unsigned long)_stext + 1)) {
200
+ mmu_mark_rodata_ro();
201
+ ptdump_check_wx();
202
+ return;
203
+ }
377204
378205 page = virt_to_page(_stext);
379206 numpages = PFN_UP((unsigned long)_etext) -
....@@ -389,6 +216,9 @@
389216 PFN_DOWN((unsigned long)__start_rodata);
390217
391218 change_page_attr(page, numpages, PAGE_KERNEL_RO);
219
+
220
+ // mark_initmem_nx() should have already run by now
221
+ ptdump_check_wx();
392222 }
393223 #endif
394224