| .. | .. |
|---|
| 2 | 2 | #ifndef _ASM_POWERPC_BOOK3S_32_PGTABLE_H |
|---|
| 3 | 3 | #define _ASM_POWERPC_BOOK3S_32_PGTABLE_H |
|---|
| 4 | 4 | |
|---|
| 5 | | -#define __ARCH_USE_5LEVEL_HACK |
|---|
| 6 | 5 | #include <asm-generic/pgtable-nopmd.h> |
|---|
| 7 | 6 | |
|---|
| 8 | 7 | #include <asm/book3s/32/hash.h> |
|---|
| 9 | 8 | |
|---|
| 10 | 9 | /* And here we include common definitions */ |
|---|
| 11 | | -#include <asm/pte-common.h> |
|---|
| 10 | + |
|---|
| 11 | +#define _PAGE_KERNEL_RO 0 |
|---|
| 12 | +#define _PAGE_KERNEL_ROX (_PAGE_EXEC) |
|---|
| 13 | +#define _PAGE_KERNEL_RW (_PAGE_DIRTY | _PAGE_RW) |
|---|
| 14 | +#define _PAGE_KERNEL_RWX (_PAGE_DIRTY | _PAGE_RW | _PAGE_EXEC) |
|---|
| 15 | + |
|---|
| 16 | +#define _PAGE_HPTEFLAGS _PAGE_HASHPTE |
|---|
| 17 | + |
|---|
| 18 | +#ifndef __ASSEMBLY__ |
|---|
| 19 | + |
|---|
| 20 | +static inline bool pte_user(pte_t pte) |
|---|
| 21 | +{ |
|---|
| 22 | + return pte_val(pte) & _PAGE_USER; |
|---|
| 23 | +} |
|---|
| 24 | +#endif /* __ASSEMBLY__ */ |
|---|
| 25 | + |
|---|
| 26 | +/* |
|---|
| 27 | + * Location of the PFN in the PTE. Most 32-bit platforms use the same |
|---|
| 28 | + * as _PAGE_SHIFT here (ie, naturally aligned). |
|---|
| 29 | + * Platform who don't just pre-define the value so we don't override it here. |
|---|
| 30 | + */ |
|---|
| 31 | +#define PTE_RPN_SHIFT (PAGE_SHIFT) |
|---|
| 32 | + |
|---|
| 33 | +/* |
|---|
| 34 | + * The mask covered by the RPN must be a ULL on 32-bit platforms with |
|---|
| 35 | + * 64-bit PTEs. |
|---|
| 36 | + */ |
|---|
| 37 | +#ifdef CONFIG_PTE_64BIT |
|---|
| 38 | +#define PTE_RPN_MASK (~((1ULL << PTE_RPN_SHIFT) - 1)) |
|---|
| 39 | +#define MAX_POSSIBLE_PHYSMEM_BITS 36 |
|---|
| 40 | +#else |
|---|
| 41 | +#define PTE_RPN_MASK (~((1UL << PTE_RPN_SHIFT) - 1)) |
|---|
| 42 | +#define MAX_POSSIBLE_PHYSMEM_BITS 32 |
|---|
| 43 | +#endif |
|---|
| 44 | + |
|---|
| 45 | +/* |
|---|
| 46 | + * _PAGE_CHG_MASK masks of bits that are to be preserved across |
|---|
| 47 | + * pgprot changes. |
|---|
| 48 | + */ |
|---|
| 49 | +#define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HASHPTE | _PAGE_DIRTY | \ |
|---|
| 50 | + _PAGE_ACCESSED | _PAGE_SPECIAL) |
|---|
| 51 | + |
|---|
| 52 | +/* |
|---|
| 53 | + * We define 2 sets of base prot bits, one for basic pages (ie, |
|---|
| 54 | + * cacheable kernel and user pages) and one for non cacheable |
|---|
| 55 | + * pages. We always set _PAGE_COHERENT when SMP is enabled or |
|---|
| 56 | + * the processor might need it for DMA coherency. |
|---|
| 57 | + */ |
|---|
| 58 | +#define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED) |
|---|
| 59 | +#define _PAGE_BASE (_PAGE_BASE_NC | _PAGE_COHERENT) |
|---|
| 60 | + |
|---|
| 61 | +/* |
|---|
| 62 | + * Permission masks used to generate the __P and __S table. |
|---|
| 63 | + * |
|---|
| 64 | + * Note:__pgprot is defined in arch/powerpc/include/asm/page.h |
|---|
| 65 | + * |
|---|
| 66 | + * Write permissions imply read permissions for now. |
|---|
| 67 | + */ |
|---|
| 68 | +#define PAGE_NONE __pgprot(_PAGE_BASE) |
|---|
| 69 | +#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW) |
|---|
| 70 | +#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC) |
|---|
| 71 | +#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER) |
|---|
| 72 | +#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) |
|---|
| 73 | +#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER) |
|---|
| 74 | +#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) |
|---|
| 75 | + |
|---|
| 76 | +/* Permission masks used for kernel mappings */ |
|---|
| 77 | +#define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW) |
|---|
| 78 | +#define PAGE_KERNEL_NC __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | _PAGE_NO_CACHE) |
|---|
| 79 | +#define PAGE_KERNEL_NCG __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \ |
|---|
| 80 | + _PAGE_NO_CACHE | _PAGE_GUARDED) |
|---|
| 81 | +#define PAGE_KERNEL_X __pgprot(_PAGE_BASE | _PAGE_KERNEL_RWX) |
|---|
| 82 | +#define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO) |
|---|
| 83 | +#define PAGE_KERNEL_ROX __pgprot(_PAGE_BASE | _PAGE_KERNEL_ROX) |
|---|
| 84 | + |
|---|
| 85 | +/* |
|---|
| 86 | + * Protection used for kernel text. We want the debuggers to be able to |
|---|
| 87 | + * set breakpoints anywhere, so don't write protect the kernel text |
|---|
| 88 | + * on platforms where such control is possible. |
|---|
| 89 | + */ |
|---|
| 90 | +#if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) ||\ |
|---|
| 91 | + defined(CONFIG_KPROBES) || defined(CONFIG_DYNAMIC_FTRACE) |
|---|
| 92 | +#define PAGE_KERNEL_TEXT PAGE_KERNEL_X |
|---|
| 93 | +#else |
|---|
| 94 | +#define PAGE_KERNEL_TEXT PAGE_KERNEL_ROX |
|---|
| 95 | +#endif |
|---|
| 96 | + |
|---|
| 97 | +/* Make modules code happy. We don't set RO yet */ |
|---|
| 98 | +#define PAGE_KERNEL_EXEC PAGE_KERNEL_X |
|---|
| 99 | + |
|---|
| 100 | +/* Advertise special mapping type for AGP */ |
|---|
| 101 | +#define PAGE_AGP (PAGE_KERNEL_NC) |
|---|
| 102 | +#define HAVE_PAGE_AGP |
|---|
| 12 | 103 | |
|---|
| 13 | 104 | #define PTE_INDEX_SIZE PTE_SHIFT |
|---|
| 14 | 105 | #define PMD_INDEX_SIZE 0 |
|---|
| .. | .. |
|---|
| 23 | 114 | #define PMD_TABLE_SIZE 0 |
|---|
| 24 | 115 | #define PUD_TABLE_SIZE 0 |
|---|
| 25 | 116 | #define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE) |
|---|
| 117 | + |
|---|
| 118 | +/* Bits to mask out from a PMD to get to the PTE page */ |
|---|
| 119 | +#define PMD_MASKED_BITS (PTE_TABLE_SIZE - 1) |
|---|
| 26 | 120 | #endif /* __ASSEMBLY__ */ |
|---|
| 27 | 121 | |
|---|
| 28 | 122 | #define PTRS_PER_PTE (1 << PTE_INDEX_SIZE) |
|---|
| .. | .. |
|---|
| 44 | 138 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) |
|---|
| 45 | 139 | |
|---|
| 46 | 140 | #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) |
|---|
| 141 | + |
|---|
| 142 | +#ifndef __ASSEMBLY__ |
|---|
| 143 | + |
|---|
| 144 | +int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot); |
|---|
| 145 | +void unmap_kernel_page(unsigned long va); |
|---|
| 146 | + |
|---|
| 147 | +#endif /* !__ASSEMBLY__ */ |
|---|
| 148 | + |
|---|
| 47 | 149 | /* |
|---|
| 48 | 150 | * This is the bottom of the PKMAP area with HIGHMEM or an arbitrary |
|---|
| 49 | 151 | * value (for now) on others, from where we can start layout kernel |
|---|
| 50 | 152 | * virtual space that goes below PKMAP and FIXMAP |
|---|
| 51 | 153 | */ |
|---|
| 52 | | -#ifdef CONFIG_HIGHMEM |
|---|
| 53 | | -#define KVIRT_TOP PKMAP_BASE |
|---|
| 54 | | -#else |
|---|
| 55 | | -#define KVIRT_TOP (0xfe000000UL) /* for now, could be FIXMAP_BASE ? */ |
|---|
| 56 | | -#endif |
|---|
| 154 | +#include <asm/fixmap.h> |
|---|
| 57 | 155 | |
|---|
| 58 | 156 | /* |
|---|
| 59 | 157 | * ioremap_bot starts at that address. Early ioremaps move down from there, |
|---|
| 60 | 158 | * until mem_init() at which point this becomes the top of the vmalloc |
|---|
| 61 | 159 | * and ioremap space |
|---|
| 62 | 160 | */ |
|---|
| 63 | | -#ifdef CONFIG_NOT_COHERENT_CACHE |
|---|
| 64 | | -#define IOREMAP_TOP ((KVIRT_TOP - CONFIG_CONSISTENT_SIZE) & PAGE_MASK) |
|---|
| 161 | +#ifdef CONFIG_HIGHMEM |
|---|
| 162 | +#define IOREMAP_TOP PKMAP_BASE |
|---|
| 65 | 163 | #else |
|---|
| 66 | | -#define IOREMAP_TOP KVIRT_TOP |
|---|
| 164 | +#define IOREMAP_TOP FIXADDR_START |
|---|
| 67 | 165 | #endif |
|---|
| 166 | + |
|---|
| 167 | +/* PPC32 shares vmalloc area with ioremap */ |
|---|
| 168 | +#define IOREMAP_START VMALLOC_START |
|---|
| 169 | +#define IOREMAP_END VMALLOC_END |
|---|
| 68 | 170 | |
|---|
| 69 | 171 | /* |
|---|
| 70 | 172 | * Just any arbitrary offset to the start of the vmalloc VM area: the |
|---|
| .. | .. |
|---|
| 84 | 186 | * of RAM. -- Cort |
|---|
| 85 | 187 | */ |
|---|
| 86 | 188 | #define VMALLOC_OFFSET (0x1000000) /* 16M */ |
|---|
| 189 | + |
|---|
| 87 | 190 | #define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))) |
|---|
| 191 | + |
|---|
| 192 | +#ifdef CONFIG_KASAN_VMALLOC |
|---|
| 193 | +#define VMALLOC_END ALIGN_DOWN(ioremap_bot, PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT) |
|---|
| 194 | +#else |
|---|
| 88 | 195 | #define VMALLOC_END ioremap_bot |
|---|
| 196 | +#endif |
|---|
| 197 | + |
|---|
| 198 | +#ifdef CONFIG_STRICT_KERNEL_RWX |
|---|
| 199 | +#define MODULES_END ALIGN_DOWN(PAGE_OFFSET, SZ_256M) |
|---|
| 200 | +#define MODULES_VADDR (MODULES_END - SZ_256M) |
|---|
| 201 | +#endif |
|---|
| 89 | 202 | |
|---|
| 90 | 203 | #ifndef __ASSEMBLY__ |
|---|
| 91 | 204 | #include <linux/sched.h> |
|---|
| 92 | 205 | #include <linux/threads.h> |
|---|
| 93 | | - |
|---|
| 94 | | -extern unsigned long ioremap_bot; |
|---|
| 95 | 206 | |
|---|
| 96 | 207 | /* Bits to mask out from a PGD to get to the PUD page */ |
|---|
| 97 | 208 | #define PGD_MASKED_BITS 0 |
|---|
| .. | .. |
|---|
| 107 | 218 | */ |
|---|
| 108 | 219 | |
|---|
| 109 | 220 | #define pte_clear(mm, addr, ptep) \ |
|---|
| 110 | | - do { pte_update(ptep, ~_PAGE_HASHPTE, 0); } while (0) |
|---|
| 221 | + do { pte_update(mm, addr, ptep, ~_PAGE_HASHPTE, 0, 0); } while (0) |
|---|
| 111 | 222 | |
|---|
| 112 | 223 | #define pmd_none(pmd) (!pmd_val(pmd)) |
|---|
| 113 | 224 | #define pmd_bad(pmd) (pmd_val(pmd) & _PMD_BAD) |
|---|
| .. | .. |
|---|
| 142 | 253 | * and the PTE may be either 32 or 64 bit wide. In the later case, |
|---|
| 143 | 254 | * when using atomic updates, only the low part of the PTE is |
|---|
| 144 | 255 | * accessed atomically. |
|---|
| 145 | | - * |
|---|
| 146 | | - * In addition, on 44x, we also maintain a global flag indicating |
|---|
| 147 | | - * that an executable user mapping was modified, which is needed |
|---|
| 148 | | - * to properly flush the virtually tagged instruction cache of |
|---|
| 149 | | - * those implementations. |
|---|
| 150 | 256 | */ |
|---|
| 151 | | -#ifndef CONFIG_PTE_64BIT |
|---|
| 152 | | -static inline unsigned long pte_update(pte_t *p, |
|---|
| 153 | | - unsigned long clr, |
|---|
| 154 | | - unsigned long set) |
|---|
| 257 | +static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p, |
|---|
| 258 | + unsigned long clr, unsigned long set, int huge) |
|---|
| 155 | 259 | { |
|---|
| 156 | | - unsigned long old, tmp; |
|---|
| 157 | | - |
|---|
| 158 | | - __asm__ __volatile__("\ |
|---|
| 159 | | -1: lwarx %0,0,%3\n\ |
|---|
| 160 | | - andc %1,%0,%4\n\ |
|---|
| 161 | | - or %1,%1,%5\n" |
|---|
| 162 | | -" stwcx. %1,0,%3\n\ |
|---|
| 163 | | - bne- 1b" |
|---|
| 164 | | - : "=&r" (old), "=&r" (tmp), "=m" (*p) |
|---|
| 165 | | - : "r" (p), "r" (clr), "r" (set), "m" (*p) |
|---|
| 166 | | - : "cc" ); |
|---|
| 167 | | - |
|---|
| 168 | | - return old; |
|---|
| 169 | | -} |
|---|
| 170 | | -#else /* CONFIG_PTE_64BIT */ |
|---|
| 171 | | -static inline unsigned long long pte_update(pte_t *p, |
|---|
| 172 | | - unsigned long clr, |
|---|
| 173 | | - unsigned long set) |
|---|
| 174 | | -{ |
|---|
| 175 | | - unsigned long long old; |
|---|
| 260 | + pte_basic_t old; |
|---|
| 176 | 261 | unsigned long tmp; |
|---|
| 177 | 262 | |
|---|
| 178 | | - __asm__ __volatile__("\ |
|---|
| 179 | | -1: lwarx %L0,0,%4\n\ |
|---|
| 180 | | - lwzx %0,0,%3\n\ |
|---|
| 181 | | - andc %1,%L0,%5\n\ |
|---|
| 182 | | - or %1,%1,%6\n" |
|---|
| 183 | | -" stwcx. %1,0,%4\n\ |
|---|
| 184 | | - bne- 1b" |
|---|
| 263 | + __asm__ __volatile__( |
|---|
| 264 | +#ifndef CONFIG_PTE_64BIT |
|---|
| 265 | +"1: lwarx %0, 0, %3\n" |
|---|
| 266 | +" andc %1, %0, %4\n" |
|---|
| 267 | +#else |
|---|
| 268 | +"1: lwarx %L0, 0, %3\n" |
|---|
| 269 | +" lwz %0, -4(%3)\n" |
|---|
| 270 | +" andc %1, %L0, %4\n" |
|---|
| 271 | +#endif |
|---|
| 272 | +" or %1, %1, %5\n" |
|---|
| 273 | +" stwcx. %1, 0, %3\n" |
|---|
| 274 | +" bne- 1b" |
|---|
| 185 | 275 | : "=&r" (old), "=&r" (tmp), "=m" (*p) |
|---|
| 186 | | - : "r" (p), "r" ((unsigned long)(p) + 4), "r" (clr), "r" (set), "m" (*p) |
|---|
| 276 | +#ifndef CONFIG_PTE_64BIT |
|---|
| 277 | + : "r" (p), |
|---|
| 278 | +#else |
|---|
| 279 | + : "b" ((unsigned long)(p) + 4), |
|---|
| 280 | +#endif |
|---|
| 281 | + "r" (clr), "r" (set), "m" (*p) |
|---|
| 187 | 282 | : "cc" ); |
|---|
| 188 | 283 | |
|---|
| 189 | 284 | return old; |
|---|
| 190 | 285 | } |
|---|
| 191 | | -#endif /* CONFIG_PTE_64BIT */ |
|---|
| 192 | 286 | |
|---|
| 193 | 287 | /* |
|---|
| 194 | 288 | * 2.6 calls this without flushing the TLB entry; this is wrong |
|---|
| 195 | 289 | * for our hash-based implementation, we fix that up here. |
|---|
| 196 | 290 | */ |
|---|
| 197 | 291 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG |
|---|
| 198 | | -static inline int __ptep_test_and_clear_young(unsigned int context, unsigned long addr, pte_t *ptep) |
|---|
| 292 | +static inline int __ptep_test_and_clear_young(struct mm_struct *mm, |
|---|
| 293 | + unsigned long addr, pte_t *ptep) |
|---|
| 199 | 294 | { |
|---|
| 200 | 295 | unsigned long old; |
|---|
| 201 | | - old = pte_update(ptep, _PAGE_ACCESSED, 0); |
|---|
| 296 | + old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0); |
|---|
| 202 | 297 | if (old & _PAGE_HASHPTE) { |
|---|
| 203 | 298 | unsigned long ptephys = __pa(ptep) & PAGE_MASK; |
|---|
| 204 | | - flush_hash_pages(context, addr, ptephys, 1); |
|---|
| 299 | + flush_hash_pages(mm->context.id, addr, ptephys, 1); |
|---|
| 205 | 300 | } |
|---|
| 206 | 301 | return (old & _PAGE_ACCESSED) != 0; |
|---|
| 207 | 302 | } |
|---|
| 208 | 303 | #define ptep_test_and_clear_young(__vma, __addr, __ptep) \ |
|---|
| 209 | | - __ptep_test_and_clear_young((__vma)->vm_mm->context.id, __addr, __ptep) |
|---|
| 304 | + __ptep_test_and_clear_young((__vma)->vm_mm, __addr, __ptep) |
|---|
| 210 | 305 | |
|---|
| 211 | 306 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR |
|---|
| 212 | 307 | static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, |
|---|
| 213 | 308 | pte_t *ptep) |
|---|
| 214 | 309 | { |
|---|
| 215 | | - return __pte(pte_update(ptep, ~_PAGE_HASHPTE, 0)); |
|---|
| 310 | + return __pte(pte_update(mm, addr, ptep, ~_PAGE_HASHPTE, 0, 0)); |
|---|
| 216 | 311 | } |
|---|
| 217 | 312 | |
|---|
| 218 | 313 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT |
|---|
| 219 | 314 | static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, |
|---|
| 220 | 315 | pte_t *ptep) |
|---|
| 221 | 316 | { |
|---|
| 222 | | - pte_update(ptep, (_PAGE_RW | _PAGE_HWWRITE), _PAGE_RO); |
|---|
| 317 | + pte_update(mm, addr, ptep, _PAGE_RW, 0, 0); |
|---|
| 223 | 318 | } |
|---|
| 224 | | -static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, |
|---|
| 225 | | - unsigned long addr, pte_t *ptep) |
|---|
| 226 | | -{ |
|---|
| 227 | | - ptep_set_wrprotect(mm, addr, ptep); |
|---|
| 228 | | -} |
|---|
| 229 | | - |
|---|
| 230 | 319 | |
|---|
| 231 | 320 | static inline void __ptep_set_access_flags(struct vm_area_struct *vma, |
|---|
| 232 | 321 | pte_t *ptep, pte_t entry, |
|---|
| .. | .. |
|---|
| 235 | 324 | { |
|---|
| 236 | 325 | unsigned long set = pte_val(entry) & |
|---|
| 237 | 326 | (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC); |
|---|
| 238 | | - unsigned long clr = ~pte_val(entry) & _PAGE_RO; |
|---|
| 239 | 327 | |
|---|
| 240 | | - pte_update(ptep, clr, set); |
|---|
| 328 | + pte_update(vma->vm_mm, address, ptep, 0, set, 0); |
|---|
| 241 | 329 | |
|---|
| 242 | 330 | flush_tlb_page(vma, address); |
|---|
| 243 | 331 | } |
|---|
| .. | .. |
|---|
| 245 | 333 | #define __HAVE_ARCH_PTE_SAME |
|---|
| 246 | 334 | #define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0) |
|---|
| 247 | 335 | |
|---|
| 248 | | -/* |
|---|
| 249 | | - * Note that on Book E processors, the pmd contains the kernel virtual |
|---|
| 250 | | - * (lowmem) address of the pte page. The physical address is less useful |
|---|
| 251 | | - * because everything runs with translation enabled (even the TLB miss |
|---|
| 252 | | - * handler). On everything else the pmd contains the physical address |
|---|
| 253 | | - * of the pte page. -- paulus |
|---|
| 254 | | - */ |
|---|
| 255 | | -#ifndef CONFIG_BOOKE |
|---|
| 256 | | -#define pmd_page_vaddr(pmd) \ |
|---|
| 257 | | - ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) |
|---|
| 258 | 336 | #define pmd_page(pmd) \ |
|---|
| 259 | 337 | pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT) |
|---|
| 260 | | -#else |
|---|
| 261 | | -#define pmd_page_vaddr(pmd) \ |
|---|
| 262 | | - ((unsigned long) (pmd_val(pmd) & PAGE_MASK)) |
|---|
| 263 | | -#define pmd_page(pmd) \ |
|---|
| 264 | | - pfn_to_page((__pa(pmd_val(pmd)) >> PAGE_SHIFT)) |
|---|
| 265 | | -#endif |
|---|
| 266 | | - |
|---|
| 267 | | -/* to find an entry in a kernel page-table-directory */ |
|---|
| 268 | | -#define pgd_offset_k(address) pgd_offset(&init_mm, address) |
|---|
| 269 | | - |
|---|
| 270 | | -/* to find an entry in a page-table-directory */ |
|---|
| 271 | | -#define pgd_index(address) ((address) >> PGDIR_SHIFT) |
|---|
| 272 | | -#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) |
|---|
| 273 | | - |
|---|
| 274 | | -/* Find an entry in the third-level page table.. */ |
|---|
| 275 | | -#define pte_index(address) \ |
|---|
| 276 | | - (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) |
|---|
| 277 | | -#define pte_offset_kernel(dir, addr) \ |
|---|
| 278 | | - ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(addr)) |
|---|
| 279 | | -#define pte_offset_map(dir, addr) \ |
|---|
| 280 | | - ((pte_t *) kmap_atomic(pmd_page(*(dir))) + pte_index(addr)) |
|---|
| 281 | | -#define pte_unmap(pte) kunmap_atomic(pte) |
|---|
| 282 | 338 | |
|---|
| 283 | 339 | /* |
|---|
| 284 | 340 | * Encode and decode a swap entry. |
|---|
| .. | .. |
|---|
| 292 | 348 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 3 }) |
|---|
| 293 | 349 | #define __swp_entry_to_pte(x) ((pte_t) { (x).val << 3 }) |
|---|
| 294 | 350 | |
|---|
| 295 | | -int map_kernel_page(unsigned long va, phys_addr_t pa, int flags); |
|---|
| 296 | | - |
|---|
| 297 | 351 | /* Generic accessors to PTE bits */ |
|---|
| 298 | 352 | static inline int pte_write(pte_t pte) { return !!(pte_val(pte) & _PAGE_RW);} |
|---|
| 299 | 353 | static inline int pte_read(pte_t pte) { return 1; } |
|---|
| .. | .. |
|---|
| 301 | 355 | static inline int pte_young(pte_t pte) { return !!(pte_val(pte) & _PAGE_ACCESSED); } |
|---|
| 302 | 356 | static inline int pte_special(pte_t pte) { return !!(pte_val(pte) & _PAGE_SPECIAL); } |
|---|
| 303 | 357 | static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; } |
|---|
| 304 | | -static inline pgprot_t pte_pgprot(pte_t pte) { return __pgprot(pte_val(pte) & PAGE_PROT_BITS); } |
|---|
| 358 | +static inline bool pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC; } |
|---|
| 305 | 359 | |
|---|
| 306 | 360 | static inline int pte_present(pte_t pte) |
|---|
| 307 | 361 | { |
|---|
| 308 | 362 | return pte_val(pte) & _PAGE_PRESENT; |
|---|
| 363 | +} |
|---|
| 364 | + |
|---|
| 365 | +static inline bool pte_hw_valid(pte_t pte) |
|---|
| 366 | +{ |
|---|
| 367 | + return pte_val(pte) & _PAGE_PRESENT; |
|---|
| 368 | +} |
|---|
| 369 | + |
|---|
| 370 | +static inline bool pte_hashpte(pte_t pte) |
|---|
| 371 | +{ |
|---|
| 372 | + return !!(pte_val(pte) & _PAGE_HASHPTE); |
|---|
| 373 | +} |
|---|
| 374 | + |
|---|
| 375 | +static inline bool pte_ci(pte_t pte) |
|---|
| 376 | +{ |
|---|
| 377 | + return !!(pte_val(pte) & _PAGE_NO_CACHE); |
|---|
| 309 | 378 | } |
|---|
| 310 | 379 | |
|---|
| 311 | 380 | /* |
|---|
| .. | .. |
|---|
| 315 | 384 | #define pte_access_permitted pte_access_permitted |
|---|
| 316 | 385 | static inline bool pte_access_permitted(pte_t pte, bool write) |
|---|
| 317 | 386 | { |
|---|
| 318 | | - unsigned long pteval = pte_val(pte); |
|---|
| 319 | 387 | /* |
|---|
| 320 | 388 | * A read-only access is controlled by _PAGE_USER bit. |
|---|
| 321 | 389 | * We have _PAGE_READ set for WRITE and EXECUTE |
|---|
| 322 | 390 | */ |
|---|
| 323 | | - unsigned long need_pte_bits = _PAGE_PRESENT | _PAGE_USER; |
|---|
| 391 | + if (!pte_present(pte) || !pte_user(pte) || !pte_read(pte)) |
|---|
| 392 | + return false; |
|---|
| 324 | 393 | |
|---|
| 325 | | - if (write) |
|---|
| 326 | | - need_pte_bits |= _PAGE_WRITE; |
|---|
| 327 | | - |
|---|
| 328 | | - if ((pteval & need_pte_bits) != need_pte_bits) |
|---|
| 394 | + if (write && !pte_write(pte)) |
|---|
| 329 | 395 | return false; |
|---|
| 330 | 396 | |
|---|
| 331 | 397 | return true; |
|---|
| .. | .. |
|---|
| 354 | 420 | return __pte(pte_val(pte) & ~_PAGE_RW); |
|---|
| 355 | 421 | } |
|---|
| 356 | 422 | |
|---|
| 423 | +static inline pte_t pte_exprotect(pte_t pte) |
|---|
| 424 | +{ |
|---|
| 425 | + return __pte(pte_val(pte) & ~_PAGE_EXEC); |
|---|
| 426 | +} |
|---|
| 427 | + |
|---|
| 357 | 428 | static inline pte_t pte_mkclean(pte_t pte) |
|---|
| 358 | 429 | { |
|---|
| 359 | 430 | return __pte(pte_val(pte) & ~_PAGE_DIRTY); |
|---|
| .. | .. |
|---|
| 362 | 433 | static inline pte_t pte_mkold(pte_t pte) |
|---|
| 363 | 434 | { |
|---|
| 364 | 435 | return __pte(pte_val(pte) & ~_PAGE_ACCESSED); |
|---|
| 436 | +} |
|---|
| 437 | + |
|---|
| 438 | +static inline pte_t pte_mkexec(pte_t pte) |
|---|
| 439 | +{ |
|---|
| 440 | + return __pte(pte_val(pte) | _PAGE_EXEC); |
|---|
| 441 | +} |
|---|
| 442 | + |
|---|
| 443 | +static inline pte_t pte_mkpte(pte_t pte) |
|---|
| 444 | +{ |
|---|
| 445 | + return pte; |
|---|
| 365 | 446 | } |
|---|
| 366 | 447 | |
|---|
| 367 | 448 | static inline pte_t pte_mkwrite(pte_t pte) |
|---|
| .. | .. |
|---|
| 389 | 470 | return pte; |
|---|
| 390 | 471 | } |
|---|
| 391 | 472 | |
|---|
| 473 | +static inline pte_t pte_mkprivileged(pte_t pte) |
|---|
| 474 | +{ |
|---|
| 475 | + return __pte(pte_val(pte) & ~_PAGE_USER); |
|---|
| 476 | +} |
|---|
| 477 | + |
|---|
| 478 | +static inline pte_t pte_mkuser(pte_t pte) |
|---|
| 479 | +{ |
|---|
| 480 | + return __pte(pte_val(pte) | _PAGE_USER); |
|---|
| 481 | +} |
|---|
| 482 | + |
|---|
| 392 | 483 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) |
|---|
| 393 | 484 | { |
|---|
| 394 | 485 | return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot)); |
|---|
| .. | .. |
|---|
| 404 | 495 | static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, |
|---|
| 405 | 496 | pte_t *ptep, pte_t pte, int percpu) |
|---|
| 406 | 497 | { |
|---|
| 407 | | -#if defined(CONFIG_PPC_STD_MMU_32) && defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT) |
|---|
| 498 | +#if defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT) |
|---|
| 408 | 499 | /* First case is 32-bit Hash MMU in SMP mode with 32-bit PTEs. We use the |
|---|
| 409 | 500 | * helper pte_update() which does an atomic update. We need to do that |
|---|
| 410 | 501 | * because a concurrent invalidation can clear _PAGE_HASHPTE. If it's a |
|---|
| .. | .. |
|---|
| 415 | 506 | *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) |
|---|
| 416 | 507 | | (pte_val(pte) & ~_PAGE_HASHPTE)); |
|---|
| 417 | 508 | else |
|---|
| 418 | | - pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte)); |
|---|
| 509 | + pte_update(mm, addr, ptep, ~_PAGE_HASHPTE, pte_val(pte), 0); |
|---|
| 419 | 510 | |
|---|
| 420 | | -#elif defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT) |
|---|
| 511 | +#elif defined(CONFIG_PTE_64BIT) |
|---|
| 421 | 512 | /* Second case is 32-bit with 64-bit PTE. In this case, we |
|---|
| 422 | 513 | * can just store as long as we do the two halves in the right order |
|---|
| 423 | 514 | * with a barrier in between. This is possible because we take care, |
|---|
| .. | .. |
|---|
| 440 | 531 | : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4)) |
|---|
| 441 | 532 | : "r" (pte) : "memory"); |
|---|
| 442 | 533 | |
|---|
| 443 | | -#elif defined(CONFIG_PPC_STD_MMU_32) |
|---|
| 534 | +#else |
|---|
| 444 | 535 | /* Third case is 32-bit hash table in UP mode, we need to preserve |
|---|
| 445 | 536 | * the _PAGE_HASHPTE bit since we may not have invalidated the previous |
|---|
| 446 | 537 | * translation in the hash yet (done in a subsequent flush_tlb_xxx()) |
|---|
| .. | .. |
|---|
| 448 | 539 | */ |
|---|
| 449 | 540 | *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) |
|---|
| 450 | 541 | | (pte_val(pte) & ~_PAGE_HASHPTE)); |
|---|
| 451 | | - |
|---|
| 452 | | -#else |
|---|
| 453 | | -#error "Not supported " |
|---|
| 454 | 542 | #endif |
|---|
| 455 | 543 | } |
|---|
| 456 | 544 | |
|---|