hc
2024-05-10 23fa18eaa71266feff7ba8d83022d9e1cc83c65a
kernel/arch/powerpc/include/asm/nohash/pgtable.h
....@@ -8,29 +8,60 @@
88 #include <asm/nohash/32/pgtable.h>
99 #endif
1010
11
+/* Permission masks used for kernel mappings */
12
+#define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW)
13
+#define PAGE_KERNEL_NC __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | _PAGE_NO_CACHE)
14
+#define PAGE_KERNEL_NCG __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \
15
+ _PAGE_NO_CACHE | _PAGE_GUARDED)
16
+#define PAGE_KERNEL_X __pgprot(_PAGE_BASE | _PAGE_KERNEL_RWX)
17
+#define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO)
18
+#define PAGE_KERNEL_ROX __pgprot(_PAGE_BASE | _PAGE_KERNEL_ROX)
19
+
20
+/*
21
+ * Protection used for kernel text. We want the debuggers to be able to
22
+ * set breakpoints anywhere, so don't write protect the kernel text
23
+ * on platforms where such control is possible.
24
+ */
25
+#if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) ||\
26
+ defined(CONFIG_KPROBES) || defined(CONFIG_DYNAMIC_FTRACE)
27
+#define PAGE_KERNEL_TEXT PAGE_KERNEL_X
28
+#else
29
+#define PAGE_KERNEL_TEXT PAGE_KERNEL_ROX
30
+#endif
31
+
32
+/* Make modules code happy. We don't set RO yet */
33
+#define PAGE_KERNEL_EXEC PAGE_KERNEL_X
34
+
35
+/* Advertise special mapping type for AGP */
36
+#define PAGE_AGP (PAGE_KERNEL_NC)
37
+#define HAVE_PAGE_AGP
38
+
1139 #ifndef __ASSEMBLY__
1240
1341 /* Generic accessors to PTE bits */
42
+#ifndef pte_write
1443 static inline int pte_write(pte_t pte)
1544 {
16
- return (pte_val(pte) & (_PAGE_RW | _PAGE_RO)) != _PAGE_RO;
45
+ return pte_val(pte) & _PAGE_RW;
1746 }
47
+#endif
1848 static inline int pte_read(pte_t pte) { return 1; }
1949 static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
2050 static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; }
2151 static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; }
22
-static inline pgprot_t pte_pgprot(pte_t pte) { return __pgprot(pte_val(pte) & PAGE_PROT_BITS); }
52
+static inline bool pte_hashpte(pte_t pte) { return false; }
53
+static inline bool pte_ci(pte_t pte) { return pte_val(pte) & _PAGE_NO_CACHE; }
54
+static inline bool pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC; }
2355
2456 #ifdef CONFIG_NUMA_BALANCING
2557 /*
2658 * These work without NUMA balancing but the kernel does not care. See the
27
- * comment in include/asm-generic/pgtable.h . On powerpc, this will only
59
+ * comment in include/linux/pgtable.h . On powerpc, this will only
2860 * work for user pages and always return true for kernel pages.
2961 */
3062 static inline int pte_protnone(pte_t pte)
3163 {
32
- return (pte_val(pte) &
33
- (_PAGE_PRESENT | _PAGE_USER)) == _PAGE_PRESENT;
64
+ return pte_present(pte) && !pte_user(pte);
3465 }
3566
3667 static inline int pmd_protnone(pmd_t pmd)
....@@ -43,6 +74,23 @@
4374 {
4475 return pte_val(pte) & _PAGE_PRESENT;
4576 }
77
+
78
+static inline bool pte_hw_valid(pte_t pte)
79
+{
80
+ return pte_val(pte) & _PAGE_PRESENT;
81
+}
82
+
83
+/*
84
+ * Don't just check for any non zero bits in __PAGE_USER, since for book3e
85
+ * and PTE_64BIT, PAGE_KERNEL_X contains _PAGE_BAP_SR which is also in
86
+ * _PAGE_USER. Need to explicitly match _PAGE_BAP_UR bit in that case too.
87
+ */
88
+#ifndef pte_user
89
+static inline bool pte_user(pte_t pte)
90
+{
91
+ return (pte_val(pte) & _PAGE_USER) == _PAGE_USER;
92
+}
93
+#endif
4694
4795 /*
4896 * We only find page table entry in the last level
....@@ -77,18 +125,14 @@
77125 return pte_val(pte) >> PTE_RPN_SHIFT; }
78126
79127 /* Generic modifiers for PTE bits */
80
-static inline pte_t pte_wrprotect(pte_t pte)
128
+static inline pte_t pte_exprotect(pte_t pte)
81129 {
82
- pte_basic_t ptev;
83
-
84
- ptev = pte_val(pte) & ~(_PAGE_RW | _PAGE_HWWRITE);
85
- ptev |= _PAGE_RO;
86
- return __pte(ptev);
130
+ return __pte(pte_val(pte) & ~_PAGE_EXEC);
87131 }
88132
89133 static inline pte_t pte_mkclean(pte_t pte)
90134 {
91
- return __pte(pte_val(pte) & ~(_PAGE_DIRTY | _PAGE_HWWRITE));
135
+ return __pte(pte_val(pte) & ~_PAGE_DIRTY);
92136 }
93137
94138 static inline pte_t pte_mkold(pte_t pte)
....@@ -96,34 +140,31 @@
96140 return __pte(pte_val(pte) & ~_PAGE_ACCESSED);
97141 }
98142
99
-static inline pte_t pte_mkwrite(pte_t pte)
100
-{
101
- pte_basic_t ptev;
102
-
103
- ptev = pte_val(pte) & ~_PAGE_RO;
104
- ptev |= _PAGE_RW;
105
- return __pte(ptev);
106
-}
107
-
108
-static inline pte_t pte_mkdirty(pte_t pte)
109
-{
110
- return __pte(pte_val(pte) | _PAGE_DIRTY);
111
-}
112
-
113
-static inline pte_t pte_mkyoung(pte_t pte)
114
-{
115
- return __pte(pte_val(pte) | _PAGE_ACCESSED);
116
-}
117
-
118143 static inline pte_t pte_mkspecial(pte_t pte)
119144 {
120145 return __pte(pte_val(pte) | _PAGE_SPECIAL);
121146 }
122147
148
+#ifndef pte_mkhuge
123149 static inline pte_t pte_mkhuge(pte_t pte)
124150 {
125
- return __pte(pte_val(pte) | _PAGE_HUGE);
151
+ return __pte(pte_val(pte));
126152 }
153
+#endif
154
+
155
+#ifndef pte_mkprivileged
156
+static inline pte_t pte_mkprivileged(pte_t pte)
157
+{
158
+ return __pte(pte_val(pte) & ~_PAGE_USER);
159
+}
160
+#endif
161
+
162
+#ifndef pte_mkuser
163
+static inline pte_t pte_mkuser(pte_t pte)
164
+{
165
+ return __pte(pte_val(pte) | _PAGE_USER);
166
+}
167
+#endif
127168
128169 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
129170 {
....@@ -161,7 +202,11 @@
161202 /* Anything else just stores the PTE normally. That covers all 64-bit
162203 * cases, and 32-bit non-hash with 32-bit PTEs.
163204 */
205
+#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PPC_16K_PAGES)
206
+ ptep->pte = ptep->pte1 = ptep->pte2 = ptep->pte3 = pte_val(pte);
207
+#else
164208 *ptep = pte;
209
+#endif
165210
166211 /*
167212 * With hardware tablewalk, a sync is needed to ensure that
....@@ -197,6 +242,8 @@
197242 #if _PAGE_WRITETHRU != 0
198243 #define pgprot_cached_wthru(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
199244 _PAGE_COHERENT | _PAGE_WRITETHRU))
245
+#else
246
+#define pgprot_cached_wthru(prot) pgprot_noncached(prot)
200247 #endif
201248
202249 #define pgprot_cached_noncoherent(prot) \
....@@ -213,7 +260,7 @@
213260 static inline int hugepd_ok(hugepd_t hpd)
214261 {
215262 #ifdef CONFIG_PPC_8xx
216
- return ((hpd_val(hpd) & 0x4) != 0);
263
+ return ((hpd_val(hpd) & _PMD_PAGE_MASK) == _PMD_PAGE_8M);
217264 #else
218265 /* We clear the top bit to indicate hugepd */
219266 return (hpd_val(hpd) && (hpd_val(hpd) & PD_HUGE) == 0);
....@@ -239,5 +286,18 @@
239286 #define is_hugepd(hpd) (hugepd_ok(hpd))
240287 #endif
241288
289
+/*
290
+ * This gets called at the end of handling a page fault, when
291
+ * the kernel has put a new PTE into the page table for the process.
292
+ * We use it to ensure coherency between the i-cache and d-cache
293
+ * for the page which has just been mapped in.
294
+ */
295
+#if defined(CONFIG_PPC_FSL_BOOK3E) && defined(CONFIG_HUGETLB_PAGE)
296
+void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep);
297
+#else
298
+static inline
299
+void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) {}
300
+#endif
301
+
242302 #endif /* __ASSEMBLY__ */
243303 #endif