| .. | .. |
|---|
| 6 | 6 | * the ppc64 non-hashed page table. |
|---|
| 7 | 7 | */ |
|---|
| 8 | 8 | |
|---|
| 9 | +#include <linux/sizes.h> |
|---|
| 10 | + |
|---|
| 9 | 11 | #include <asm/nohash/64/pgtable-4k.h> |
|---|
| 10 | 12 | #include <asm/barrier.h> |
|---|
| 11 | 13 | #include <asm/asm-const.h> |
|---|
| 12 | | - |
|---|
| 13 | | -#ifdef CONFIG_PPC_64K_PAGES |
|---|
| 14 | | -#error "Page size not supported" |
|---|
| 15 | | -#endif |
|---|
| 16 | 14 | |
|---|
| 17 | 15 | #define FIRST_USER_ADDRESS 0UL |
|---|
| 18 | 16 | |
|---|
| .. | .. |
|---|
| 23 | 21 | PUD_INDEX_SIZE + PGD_INDEX_SIZE + PAGE_SHIFT) |
|---|
| 24 | 22 | #define PGTABLE_RANGE (ASM_CONST(1) << PGTABLE_EADDR_SIZE) |
|---|
| 25 | 23 | |
|---|
| 26 | | -#ifdef CONFIG_TRANSPARENT_HUGEPAGE |
|---|
| 27 | | -#define PMD_CACHE_INDEX (PMD_INDEX_SIZE + 1) |
|---|
| 28 | | -#else |
|---|
| 29 | 24 | #define PMD_CACHE_INDEX PMD_INDEX_SIZE |
|---|
| 30 | | -#endif |
|---|
| 31 | 25 | #define PUD_CACHE_INDEX PUD_INDEX_SIZE |
|---|
| 32 | 26 | |
|---|
| 33 | 27 | /* |
|---|
| .. | .. |
|---|
| 61 | 55 | #define PHB_IO_BASE (ISA_IO_END) |
|---|
| 62 | 56 | #define PHB_IO_END (KERN_IO_START + FULL_IO_SIZE) |
|---|
| 63 | 57 | #define IOREMAP_BASE (PHB_IO_END) |
|---|
| 64 | | -#define IOREMAP_END (KERN_VIRT_START + KERN_VIRT_SIZE) |
|---|
| 58 | +#define IOREMAP_START (ioremap_bot) |
|---|
| 59 | +#define IOREMAP_END (KERN_VIRT_START + KERN_VIRT_SIZE - FIXADDR_SIZE) |
|---|
| 60 | +#define FIXADDR_SIZE SZ_32M |
|---|
| 65 | 61 | |
|---|
| 66 | 62 | |
|---|
| 67 | 63 | /* |
|---|
| .. | .. |
|---|
| 73 | 69 | |
|---|
| 74 | 70 | #define VMALLOC_REGION_ID (REGION_ID(VMALLOC_START)) |
|---|
| 75 | 71 | #define KERNEL_REGION_ID (REGION_ID(PAGE_OFFSET)) |
|---|
| 76 | | -#define VMEMMAP_REGION_ID (0xfUL) /* Server only */ |
|---|
| 77 | 72 | #define USER_REGION_ID (0UL) |
|---|
| 78 | 73 | |
|---|
| 79 | 74 | /* |
|---|
| .. | .. |
|---|
| 89 | 84 | * Include the PTE bits definitions |
|---|
| 90 | 85 | */ |
|---|
| 91 | 86 | #include <asm/nohash/pte-book3e.h> |
|---|
| 92 | | -#include <asm/pte-common.h> |
|---|
| 87 | + |
|---|
| 88 | +#define _PAGE_SAO 0 |
|---|
| 89 | + |
|---|
| 90 | +#define PTE_RPN_MASK (~((1UL << PTE_RPN_SHIFT) - 1)) |
|---|
| 91 | + |
|---|
| 92 | +/* |
|---|
| 93 | + * _PAGE_CHG_MASK masks of bits that are to be preserved across |
|---|
| 94 | + * pgprot changes. |
|---|
| 95 | + */ |
|---|
| 96 | +#define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_SPECIAL) |
|---|
| 97 | + |
|---|
| 98 | +#define H_PAGE_4K_PFN 0 |
|---|
| 93 | 99 | |
|---|
| 94 | 100 | #ifndef __ASSEMBLY__ |
|---|
| 95 | 101 | /* pte_clear moved to later in this file */ |
|---|
| 102 | + |
|---|
| 103 | +static inline pte_t pte_mkwrite(pte_t pte) |
|---|
| 104 | +{ |
|---|
| 105 | + return __pte(pte_val(pte) | _PAGE_RW); |
|---|
| 106 | +} |
|---|
| 107 | + |
|---|
| 108 | +static inline pte_t pte_mkdirty(pte_t pte) |
|---|
| 109 | +{ |
|---|
| 110 | + return __pte(pte_val(pte) | _PAGE_DIRTY); |
|---|
| 111 | +} |
|---|
| 112 | + |
|---|
| 113 | +static inline pte_t pte_mkyoung(pte_t pte) |
|---|
| 114 | +{ |
|---|
| 115 | + return __pte(pte_val(pte) | _PAGE_ACCESSED); |
|---|
| 116 | +} |
|---|
| 117 | + |
|---|
| 118 | +static inline pte_t pte_wrprotect(pte_t pte) |
|---|
| 119 | +{ |
|---|
| 120 | + return __pte(pte_val(pte) & ~_PAGE_RW); |
|---|
| 121 | +} |
|---|
| 122 | + |
|---|
| 123 | +static inline pte_t pte_mkexec(pte_t pte) |
|---|
| 124 | +{ |
|---|
| 125 | + return __pte(pte_val(pte) | _PAGE_EXEC); |
|---|
| 126 | +} |
|---|
| 96 | 127 | |
|---|
| 97 | 128 | #define PMD_BAD_BITS (PTE_TABLE_SIZE-1) |
|---|
| 98 | 129 | #define PUD_BAD_BITS (PMD_TABLE_SIZE-1) |
|---|
| .. | .. |
|---|
| 133 | 164 | #define pud_bad(pud) (!is_kernel_addr(pud_val(pud)) \ |
|---|
| 134 | 165 | || (pud_val(pud) & PUD_BAD_BITS)) |
|---|
| 135 | 166 | #define pud_present(pud) (pud_val(pud) != 0) |
|---|
| 136 | | -#define pud_page_vaddr(pud) (pud_val(pud) & ~PUD_MASKED_BITS) |
|---|
| 167 | + |
|---|
| 168 | +static inline pmd_t *pud_pgtable(pud_t pud) |
|---|
| 169 | +{ |
|---|
| 170 | + return (pmd_t *)(pud_val(pud) & ~PUD_MASKED_BITS); |
|---|
| 171 | +} |
|---|
| 137 | 172 | |
|---|
| 138 | 173 | extern struct page *pud_page(pud_t pud); |
|---|
| 139 | 174 | |
|---|
| .. | .. |
|---|
| 147 | 182 | return __pud(pte_val(pte)); |
|---|
| 148 | 183 | } |
|---|
| 149 | 184 | #define pud_write(pud) pte_write(pud_pte(pud)) |
|---|
| 150 | | -#define pgd_write(pgd) pte_write(pgd_pte(pgd)) |
|---|
| 185 | +#define p4d_write(pgd) pte_write(p4d_pte(p4d)) |
|---|
| 151 | 186 | |
|---|
| 152 | | -static inline void pgd_set(pgd_t *pgdp, unsigned long val) |
|---|
| 187 | +static inline void p4d_set(p4d_t *p4dp, unsigned long val) |
|---|
| 153 | 188 | { |
|---|
| 154 | | - *pgdp = __pgd(val); |
|---|
| 189 | + *p4dp = __p4d(val); |
|---|
| 155 | 190 | } |
|---|
| 156 | | - |
|---|
| 157 | | -/* |
|---|
| 158 | | - * Find an entry in a page-table-directory. We combine the address region |
|---|
| 159 | | - * (the high order N bits) and the pgd portion of the address. |
|---|
| 160 | | - */ |
|---|
| 161 | | -#define pgd_index(address) (((address) >> (PGDIR_SHIFT)) & (PTRS_PER_PGD - 1)) |
|---|
| 162 | | - |
|---|
| 163 | | -#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) |
|---|
| 164 | | - |
|---|
| 165 | | -#define pmd_offset(pudp,addr) \ |
|---|
| 166 | | - (((pmd_t *) pud_page_vaddr(*(pudp))) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))) |
|---|
| 167 | | - |
|---|
| 168 | | -#define pte_offset_kernel(dir,addr) \ |
|---|
| 169 | | - (((pte_t *) pmd_page_vaddr(*(dir))) + (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))) |
|---|
| 170 | | - |
|---|
| 171 | | -#define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr)) |
|---|
| 172 | | -#define pte_unmap(pte) do { } while(0) |
|---|
| 173 | | - |
|---|
| 174 | | -/* to find an entry in a kernel page-table-directory */ |
|---|
| 175 | | -/* This now only contains the vmalloc pages */ |
|---|
| 176 | | -#define pgd_offset_k(address) pgd_offset(&init_mm, address) |
|---|
| 177 | 191 | |
|---|
| 178 | 192 | /* Atomic PTE updates */ |
|---|
| 179 | 193 | static inline unsigned long pte_update(struct mm_struct *mm, |
|---|
| .. | .. |
|---|
| 182 | 196 | unsigned long set, |
|---|
| 183 | 197 | int huge) |
|---|
| 184 | 198 | { |
|---|
| 185 | | -#ifdef PTE_ATOMIC_UPDATES |
|---|
| 186 | | - unsigned long old, tmp; |
|---|
| 187 | | - |
|---|
| 188 | | - __asm__ __volatile__( |
|---|
| 189 | | - "1: ldarx %0,0,%3 # pte_update\n\ |
|---|
| 190 | | - andc %1,%0,%4 \n\ |
|---|
| 191 | | - or %1,%1,%6\n\ |
|---|
| 192 | | - stdcx. %1,0,%3 \n\ |
|---|
| 193 | | - bne- 1b" |
|---|
| 194 | | - : "=&r" (old), "=&r" (tmp), "=m" (*ptep) |
|---|
| 195 | | - : "r" (ptep), "r" (clr), "m" (*ptep), "r" (set) |
|---|
| 196 | | - : "cc" ); |
|---|
| 197 | | -#else |
|---|
| 198 | 199 | unsigned long old = pte_val(*ptep); |
|---|
| 199 | 200 | *ptep = __pte((old & ~clr) | set); |
|---|
| 200 | | -#endif |
|---|
| 201 | + |
|---|
| 201 | 202 | /* huge pages use the old page table lock */ |
|---|
| 202 | 203 | if (!huge) |
|---|
| 203 | 204 | assert_pte_locked(mm, addr); |
|---|
| .. | .. |
|---|
| 239 | 240 | pte_update(mm, addr, ptep, _PAGE_RW, 0, 0); |
|---|
| 240 | 241 | } |
|---|
| 241 | 242 | |
|---|
| 243 | +#define __HAVE_ARCH_HUGE_PTEP_SET_WRPROTECT |
|---|
| 242 | 244 | static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, |
|---|
| 243 | 245 | unsigned long addr, pte_t *ptep) |
|---|
| 244 | 246 | { |
|---|
| .. | .. |
|---|
| 280 | 282 | unsigned long bits = pte_val(entry) & |
|---|
| 281 | 283 | (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC); |
|---|
| 282 | 284 | |
|---|
| 283 | | -#ifdef PTE_ATOMIC_UPDATES |
|---|
| 284 | | - unsigned long old, tmp; |
|---|
| 285 | | - |
|---|
| 286 | | - __asm__ __volatile__( |
|---|
| 287 | | - "1: ldarx %0,0,%4\n\ |
|---|
| 288 | | - or %0,%3,%0\n\ |
|---|
| 289 | | - stdcx. %0,0,%4\n\ |
|---|
| 290 | | - bne- 1b" |
|---|
| 291 | | - :"=&r" (old), "=&r" (tmp), "=m" (*ptep) |
|---|
| 292 | | - :"r" (bits), "r" (ptep), "m" (*ptep) |
|---|
| 293 | | - :"cc"); |
|---|
| 294 | | -#else |
|---|
| 295 | 285 | unsigned long old = pte_val(*ptep); |
|---|
| 296 | 286 | *ptep = __pte(old | bits); |
|---|
| 297 | | -#endif |
|---|
| 298 | 287 | |
|---|
| 299 | 288 | flush_tlb_page(vma, address); |
|---|
| 300 | 289 | } |
|---|
| .. | .. |
|---|
| 313 | 302 | #define MAX_SWAPFILES_CHECK() do { \ |
|---|
| 314 | 303 | BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS); \ |
|---|
| 315 | 304 | } while (0) |
|---|
| 316 | | -/* |
|---|
| 317 | | - * on pte we don't need handle RADIX_TREE_EXCEPTIONAL_SHIFT; |
|---|
| 318 | | - */ |
|---|
| 305 | + |
|---|
| 319 | 306 | #define SWP_TYPE_BITS 5 |
|---|
| 320 | 307 | #define __swp_type(x) (((x).val >> _PAGE_BIT_SWAP_TYPE) \ |
|---|
| 321 | 308 | & ((1UL << SWP_TYPE_BITS) - 1)) |
|---|
| .. | .. |
|---|
| 327 | 314 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val((pte)) }) |
|---|
| 328 | 315 | #define __swp_entry_to_pte(x) __pte((x).val) |
|---|
| 329 | 316 | |
|---|
| 330 | | -extern int map_kernel_page(unsigned long ea, unsigned long pa, |
|---|
| 331 | | - unsigned long flags); |
|---|
| 317 | +int map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot); |
|---|
| 318 | +void unmap_kernel_page(unsigned long va); |
|---|
| 332 | 319 | extern int __meminit vmemmap_create_mapping(unsigned long start, |
|---|
| 333 | 320 | unsigned long page_size, |
|---|
| 334 | 321 | unsigned long phys); |
|---|