.. | .. |
---|
8 | 8 | #include <asm/nohash/32/pgtable.h> |
---|
9 | 9 | #endif |
---|
10 | 10 | |
---|
| 11 | +/* Permission masks used for kernel mappings */ |
---|
| 12 | +#define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW) |
---|
| 13 | +#define PAGE_KERNEL_NC __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | _PAGE_NO_CACHE) |
---|
| 14 | +#define PAGE_KERNEL_NCG __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \ |
---|
| 15 | + _PAGE_NO_CACHE | _PAGE_GUARDED) |
---|
| 16 | +#define PAGE_KERNEL_X __pgprot(_PAGE_BASE | _PAGE_KERNEL_RWX) |
---|
| 17 | +#define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO) |
---|
| 18 | +#define PAGE_KERNEL_ROX __pgprot(_PAGE_BASE | _PAGE_KERNEL_ROX) |
---|
| 19 | + |
---|
| 20 | +/* |
---|
| 21 | + * Protection used for kernel text. We want the debuggers to be able to |
---|
| 22 | + * set breakpoints anywhere, so don't write protect the kernel text |
---|
| 23 | + * on platforms where such control is possible. |
---|
| 24 | + */ |
---|
| 25 | +#if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) ||\ |
---|
| 26 | + defined(CONFIG_KPROBES) || defined(CONFIG_DYNAMIC_FTRACE) |
---|
| 27 | +#define PAGE_KERNEL_TEXT PAGE_KERNEL_X |
---|
| 28 | +#else |
---|
| 29 | +#define PAGE_KERNEL_TEXT PAGE_KERNEL_ROX |
---|
| 30 | +#endif |
---|
| 31 | + |
---|
| 32 | +/* Make modules code happy. We don't set RO yet */ |
---|
| 33 | +#define PAGE_KERNEL_EXEC PAGE_KERNEL_X |
---|
| 34 | + |
---|
| 35 | +/* Advertise special mapping type for AGP */ |
---|
| 36 | +#define PAGE_AGP (PAGE_KERNEL_NC) |
---|
| 37 | +#define HAVE_PAGE_AGP |
---|
| 38 | + |
---|
11 | 39 | #ifndef __ASSEMBLY__ |
---|
12 | 40 | |
---|
13 | 41 | /* Generic accessors to PTE bits */ |
---|
| 42 | +#ifndef pte_write |
---|
14 | 43 | static inline int pte_write(pte_t pte) |
---|
15 | 44 | { |
---|
16 | | - return (pte_val(pte) & (_PAGE_RW | _PAGE_RO)) != _PAGE_RO; |
---|
| 45 | + return pte_val(pte) & _PAGE_RW; |
---|
17 | 46 | } |
---|
| 47 | +#endif |
---|
18 | 48 | static inline int pte_read(pte_t pte) { return 1; } |
---|
19 | 49 | static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } |
---|
20 | 50 | static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; } |
---|
21 | 51 | static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; } |
---|
22 | | -static inline pgprot_t pte_pgprot(pte_t pte) { return __pgprot(pte_val(pte) & PAGE_PROT_BITS); } |
---|
| 52 | +static inline bool pte_hashpte(pte_t pte) { return false; } |
---|
| 53 | +static inline bool pte_ci(pte_t pte) { return pte_val(pte) & _PAGE_NO_CACHE; } |
---|
| 54 | +static inline bool pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC; } |
---|
23 | 55 | |
---|
24 | 56 | #ifdef CONFIG_NUMA_BALANCING |
---|
25 | 57 | /* |
---|
26 | 58 | * These work without NUMA balancing but the kernel does not care. See the |
---|
27 | | - * comment in include/asm-generic/pgtable.h . On powerpc, this will only |
---|
| 59 | + * comment in include/linux/pgtable.h . On powerpc, this will only |
---|
28 | 60 | * work for user pages and always return true for kernel pages. |
---|
29 | 61 | */ |
---|
30 | 62 | static inline int pte_protnone(pte_t pte) |
---|
31 | 63 | { |
---|
32 | | - return (pte_val(pte) & |
---|
33 | | - (_PAGE_PRESENT | _PAGE_USER)) == _PAGE_PRESENT; |
---|
| 64 | + return pte_present(pte) && !pte_user(pte); |
---|
34 | 65 | } |
---|
35 | 66 | |
---|
36 | 67 | static inline int pmd_protnone(pmd_t pmd) |
---|
.. | .. |
---|
43 | 74 | { |
---|
44 | 75 | return pte_val(pte) & _PAGE_PRESENT; |
---|
45 | 76 | } |
---|
| 77 | + |
---|
| 78 | +static inline bool pte_hw_valid(pte_t pte) |
---|
| 79 | +{ |
---|
| 80 | + return pte_val(pte) & _PAGE_PRESENT; |
---|
| 81 | +} |
---|
| 82 | + |
---|
| 83 | +/* |
---|
| 84 | + * Don't just check for any non zero bits in __PAGE_USER, since for book3e |
---|
| 85 | + * and PTE_64BIT, PAGE_KERNEL_X contains _PAGE_BAP_SR which is also in |
---|
| 86 | + * _PAGE_USER. Need to explicitly match _PAGE_BAP_UR bit in that case too. |
---|
| 87 | + */ |
---|
| 88 | +#ifndef pte_user |
---|
| 89 | +static inline bool pte_user(pte_t pte) |
---|
| 90 | +{ |
---|
| 91 | + return (pte_val(pte) & _PAGE_USER) == _PAGE_USER; |
---|
| 92 | +} |
---|
| 93 | +#endif |
---|
46 | 94 | |
---|
47 | 95 | /* |
---|
48 | 96 | * We only find page table entry in the last level |
---|
.. | .. |
---|
77 | 125 | return pte_val(pte) >> PTE_RPN_SHIFT; } |
---|
78 | 126 | |
---|
79 | 127 | /* Generic modifiers for PTE bits */ |
---|
80 | | -static inline pte_t pte_wrprotect(pte_t pte) |
---|
| 128 | +static inline pte_t pte_exprotect(pte_t pte) |
---|
81 | 129 | { |
---|
82 | | - pte_basic_t ptev; |
---|
83 | | - |
---|
84 | | - ptev = pte_val(pte) & ~(_PAGE_RW | _PAGE_HWWRITE); |
---|
85 | | - ptev |= _PAGE_RO; |
---|
86 | | - return __pte(ptev); |
---|
| 130 | + return __pte(pte_val(pte) & ~_PAGE_EXEC); |
---|
87 | 131 | } |
---|
88 | 132 | |
---|
89 | 133 | static inline pte_t pte_mkclean(pte_t pte) |
---|
90 | 134 | { |
---|
91 | | - return __pte(pte_val(pte) & ~(_PAGE_DIRTY | _PAGE_HWWRITE)); |
---|
| 135 | + return __pte(pte_val(pte) & ~_PAGE_DIRTY); |
---|
92 | 136 | } |
---|
93 | 137 | |
---|
94 | 138 | static inline pte_t pte_mkold(pte_t pte) |
---|
.. | .. |
---|
96 | 140 | return __pte(pte_val(pte) & ~_PAGE_ACCESSED); |
---|
97 | 141 | } |
---|
98 | 142 | |
---|
99 | | -static inline pte_t pte_mkwrite(pte_t pte) |
---|
100 | | -{ |
---|
101 | | - pte_basic_t ptev; |
---|
102 | | - |
---|
103 | | - ptev = pte_val(pte) & ~_PAGE_RO; |
---|
104 | | - ptev |= _PAGE_RW; |
---|
105 | | - return __pte(ptev); |
---|
106 | | -} |
---|
107 | | - |
---|
108 | | -static inline pte_t pte_mkdirty(pte_t pte) |
---|
109 | | -{ |
---|
110 | | - return __pte(pte_val(pte) | _PAGE_DIRTY); |
---|
111 | | -} |
---|
112 | | - |
---|
113 | | -static inline pte_t pte_mkyoung(pte_t pte) |
---|
114 | | -{ |
---|
115 | | - return __pte(pte_val(pte) | _PAGE_ACCESSED); |
---|
116 | | -} |
---|
117 | | - |
---|
118 | 143 | static inline pte_t pte_mkspecial(pte_t pte) |
---|
119 | 144 | { |
---|
120 | 145 | return __pte(pte_val(pte) | _PAGE_SPECIAL); |
---|
121 | 146 | } |
---|
122 | 147 | |
---|
| 148 | +#ifndef pte_mkhuge |
---|
123 | 149 | static inline pte_t pte_mkhuge(pte_t pte) |
---|
124 | 150 | { |
---|
125 | | - return __pte(pte_val(pte) | _PAGE_HUGE); |
---|
| 151 | + return __pte(pte_val(pte)); |
---|
126 | 152 | } |
---|
| 153 | +#endif |
---|
| 154 | + |
---|
| 155 | +#ifndef pte_mkprivileged |
---|
| 156 | +static inline pte_t pte_mkprivileged(pte_t pte) |
---|
| 157 | +{ |
---|
| 158 | + return __pte(pte_val(pte) & ~_PAGE_USER); |
---|
| 159 | +} |
---|
| 160 | +#endif |
---|
| 161 | + |
---|
| 162 | +#ifndef pte_mkuser |
---|
| 163 | +static inline pte_t pte_mkuser(pte_t pte) |
---|
| 164 | +{ |
---|
| 165 | + return __pte(pte_val(pte) | _PAGE_USER); |
---|
| 166 | +} |
---|
| 167 | +#endif |
---|
127 | 168 | |
---|
128 | 169 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) |
---|
129 | 170 | { |
---|
.. | .. |
---|
161 | 202 | /* Anything else just stores the PTE normally. That covers all 64-bit |
---|
162 | 203 | * cases, and 32-bit non-hash with 32-bit PTEs. |
---|
163 | 204 | */ |
---|
| 205 | +#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PPC_16K_PAGES) |
---|
| 206 | + ptep->pte = ptep->pte1 = ptep->pte2 = ptep->pte3 = pte_val(pte); |
---|
| 207 | +#else |
---|
164 | 208 | *ptep = pte; |
---|
| 209 | +#endif |
---|
165 | 210 | |
---|
166 | 211 | /* |
---|
167 | 212 | * With hardware tablewalk, a sync is needed to ensure that |
---|
.. | .. |
---|
197 | 242 | #if _PAGE_WRITETHRU != 0 |
---|
198 | 243 | #define pgprot_cached_wthru(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \ |
---|
199 | 244 | _PAGE_COHERENT | _PAGE_WRITETHRU)) |
---|
| 245 | +#else |
---|
| 246 | +#define pgprot_cached_wthru(prot) pgprot_noncached(prot) |
---|
200 | 247 | #endif |
---|
201 | 248 | |
---|
202 | 249 | #define pgprot_cached_noncoherent(prot) \ |
---|
.. | .. |
---|
213 | 260 | static inline int hugepd_ok(hugepd_t hpd) |
---|
214 | 261 | { |
---|
215 | 262 | #ifdef CONFIG_PPC_8xx |
---|
216 | | - return ((hpd_val(hpd) & 0x4) != 0); |
---|
| 263 | + return ((hpd_val(hpd) & _PMD_PAGE_MASK) == _PMD_PAGE_8M); |
---|
217 | 264 | #else |
---|
218 | 265 | /* We clear the top bit to indicate hugepd */ |
---|
219 | 266 | return (hpd_val(hpd) && (hpd_val(hpd) & PD_HUGE) == 0); |
---|
.. | .. |
---|
239 | 286 | #define is_hugepd(hpd) (hugepd_ok(hpd)) |
---|
240 | 287 | #endif |
---|
241 | 288 | |
---|
| 289 | +/* |
---|
| 290 | + * This gets called at the end of handling a page fault, when |
---|
| 291 | + * the kernel has put a new PTE into the page table for the process. |
---|
| 292 | + * We use it to ensure coherency between the i-cache and d-cache |
---|
| 293 | + * for the page which has just been mapped in. |
---|
| 294 | + */ |
---|
| 295 | +#if defined(CONFIG_PPC_FSL_BOOK3E) && defined(CONFIG_HUGETLB_PAGE) |
---|
| 296 | +void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep); |
---|
| 297 | +#else |
---|
| 298 | +static inline |
---|
| 299 | +void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) {} |
---|
| 300 | +#endif |
---|
| 301 | + |
---|
242 | 302 | #endif /* __ASSEMBLY__ */ |
---|
243 | 303 | #endif |
---|