.. | .. |
---|
2 | 2 | #ifndef _PARISC_PGTABLE_H |
---|
3 | 3 | #define _PARISC_PGTABLE_H |
---|
4 | 4 | |
---|
5 | | -#include <asm-generic/4level-fixup.h> |
---|
| 5 | +#include <asm/page.h> |
---|
| 6 | + |
---|
| 7 | +#if CONFIG_PGTABLE_LEVELS == 3 |
---|
| 8 | +#include <asm-generic/pgtable-nopud.h> |
---|
| 9 | +#elif CONFIG_PGTABLE_LEVELS == 2 |
---|
| 10 | +#include <asm-generic/pgtable-nopmd.h> |
---|
| 11 | +#endif |
---|
6 | 12 | |
---|
7 | 13 | #include <asm/fixmap.h> |
---|
8 | 14 | |
---|
.. | .. |
---|
16 | 22 | #include <linux/mm_types.h> |
---|
17 | 23 | #include <asm/processor.h> |
---|
18 | 24 | #include <asm/cache.h> |
---|
19 | | - |
---|
20 | | -extern spinlock_t pa_tlb_lock; |
---|
21 | 25 | |
---|
22 | 26 | /* |
---|
23 | 27 | * kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel |
---|
.. | .. |
---|
34 | 38 | */ |
---|
35 | 39 | #define kern_addr_valid(addr) (1) |
---|
36 | 40 | |
---|
37 | | -/* Purge data and instruction TLB entries. Must be called holding |
---|
38 | | - * the pa_tlb_lock. The TLB purge instructions are slow on SMP |
---|
39 | | - * machines since the purge must be broadcast to all CPUs. |
---|
| 41 | +/* This is for the serialization of PxTLB broadcasts. At least on the N class |
---|
| 42 | + * systems, only one PxTLB inter processor broadcast can be active at any one |
---|
| 43 | + * time on the Merced bus. */ |
---|
| 44 | +extern spinlock_t pa_tlb_flush_lock; |
---|
| 45 | +#if defined(CONFIG_64BIT) && defined(CONFIG_SMP) |
---|
| 46 | +extern int pa_serialize_tlb_flushes; |
---|
| 47 | +#else |
---|
| 48 | +#define pa_serialize_tlb_flushes (0) |
---|
| 49 | +#endif |
---|
| 50 | + |
---|
| 51 | +#define purge_tlb_start(flags) do { \ |
---|
| 52 | + if (pa_serialize_tlb_flushes) \ |
---|
| 53 | + spin_lock_irqsave(&pa_tlb_flush_lock, flags); \ |
---|
| 54 | + else \ |
---|
| 55 | + local_irq_save(flags); \ |
---|
| 56 | + } while (0) |
---|
| 57 | +#define purge_tlb_end(flags) do { \ |
---|
| 58 | + if (pa_serialize_tlb_flushes) \ |
---|
| 59 | + spin_unlock_irqrestore(&pa_tlb_flush_lock, flags); \ |
---|
| 60 | + else \ |
---|
| 61 | + local_irq_restore(flags); \ |
---|
| 62 | + } while (0) |
---|
| 63 | + |
---|
| 64 | +/* Purge data and instruction TLB entries. The TLB purge instructions |
---|
| 65 | + * are slow on SMP machines since the purge must be broadcast to all CPUs. |
---|
40 | 66 | */ |
---|
41 | 67 | |
---|
42 | 68 | static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr) |
---|
43 | 69 | { |
---|
| 70 | + unsigned long flags; |
---|
| 71 | + |
---|
| 72 | + purge_tlb_start(flags); |
---|
44 | 73 | mtsp(mm->context, 1); |
---|
45 | 74 | pdtlb(addr); |
---|
46 | | - if (unlikely(split_tlb)) |
---|
47 | | - pitlb(addr); |
---|
| 75 | + pitlb(addr); |
---|
| 76 | + purge_tlb_end(flags); |
---|
48 | 77 | } |
---|
| 78 | + |
---|
| 79 | +extern void __update_cache(pte_t pte); |
---|
49 | 80 | |
---|
50 | 81 | /* Certain architectures need to do special things when PTEs |
---|
51 | 82 | * within a page table are directly modified. Thus, the following |
---|
52 | 83 | * hook is made available. |
---|
53 | 84 | */ |
---|
54 | | -#define set_pte(pteptr, pteval) \ |
---|
55 | | - do{ \ |
---|
56 | | - *(pteptr) = (pteval); \ |
---|
57 | | - } while(0) |
---|
| 85 | +#define set_pte(pteptr, pteval) \ |
---|
| 86 | + do { \ |
---|
| 87 | + *(pteptr) = (pteval); \ |
---|
| 88 | + mb(); \ |
---|
| 89 | + } while(0) |
---|
58 | 90 | |
---|
59 | | -#define pte_inserted(x) \ |
---|
60 | | - ((pte_val(x) & (_PAGE_PRESENT|_PAGE_ACCESSED)) \ |
---|
61 | | - == (_PAGE_PRESENT|_PAGE_ACCESSED)) |
---|
62 | | - |
---|
63 | | -#define set_pte_at(mm, addr, ptep, pteval) \ |
---|
64 | | - do { \ |
---|
65 | | - pte_t old_pte; \ |
---|
66 | | - unsigned long flags; \ |
---|
67 | | - spin_lock_irqsave(&pa_tlb_lock, flags); \ |
---|
68 | | - old_pte = *ptep; \ |
---|
69 | | - if (pte_inserted(old_pte)) \ |
---|
70 | | - purge_tlb_entries(mm, addr); \ |
---|
71 | | - set_pte(ptep, pteval); \ |
---|
72 | | - spin_unlock_irqrestore(&pa_tlb_lock, flags); \ |
---|
| 91 | +#define set_pte_at(mm, addr, pteptr, pteval) \ |
---|
| 92 | + do { \ |
---|
| 93 | + if (pte_present(pteval) && \ |
---|
| 94 | + pte_user(pteval)) \ |
---|
| 95 | + __update_cache(pteval); \ |
---|
| 96 | + *(pteptr) = (pteval); \ |
---|
| 97 | + purge_tlb_entries(mm, addr); \ |
---|
73 | 98 | } while (0) |
---|
74 | 99 | |
---|
75 | 100 | #endif /* !__ASSEMBLY__ */ |
---|
76 | 101 | |
---|
77 | | -#include <asm/page.h> |
---|
78 | | - |
---|
79 | 102 | #define pte_ERROR(e) \ |
---|
80 | 103 | printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e)) |
---|
| 104 | +#if CONFIG_PGTABLE_LEVELS == 3 |
---|
81 | 105 | #define pmd_ERROR(e) \ |
---|
82 | 106 | printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, (unsigned long)pmd_val(e)) |
---|
| 107 | +#endif |
---|
83 | 108 | #define pgd_ERROR(e) \ |
---|
84 | 109 | printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, (unsigned long)pgd_val(e)) |
---|
85 | 110 | |
---|
.. | .. |
---|
92 | 117 | #define KERNEL_INITIAL_SIZE (1 << KERNEL_INITIAL_ORDER) |
---|
93 | 118 | |
---|
94 | 119 | #if CONFIG_PGTABLE_LEVELS == 3 |
---|
95 | | -#define PGD_ORDER 1 /* Number of pages per pgd */ |
---|
96 | | -#define PMD_ORDER 1 /* Number of pages per pmd */ |
---|
97 | | -#define PGD_ALLOC_ORDER 2 /* first pgd contains pmd */ |
---|
| 120 | +#define PMD_ORDER 1 |
---|
| 121 | +#define PGD_ORDER 0 |
---|
98 | 122 | #else |
---|
99 | | -#define PGD_ORDER 1 /* Number of pages per pgd */ |
---|
100 | | -#define PGD_ALLOC_ORDER PGD_ORDER |
---|
| 123 | +#define PGD_ORDER 1 |
---|
101 | 124 | #endif |
---|
102 | 125 | |
---|
103 | 126 | /* Definitions for 3rd level (we use PLD here for Page Lower directory |
---|
.. | .. |
---|
109 | 132 | #define PTRS_PER_PTE (1UL << BITS_PER_PTE) |
---|
110 | 133 | |
---|
111 | 134 | /* Definitions for 2nd level */ |
---|
112 | | -#define pgtable_cache_init() do { } while (0) |
---|
113 | | - |
---|
| 135 | +#if CONFIG_PGTABLE_LEVELS == 3 |
---|
114 | 136 | #define PMD_SHIFT (PLD_SHIFT + BITS_PER_PTE) |
---|
115 | 137 | #define PMD_SIZE (1UL << PMD_SHIFT) |
---|
116 | 138 | #define PMD_MASK (~(PMD_SIZE-1)) |
---|
117 | | -#if CONFIG_PGTABLE_LEVELS == 3 |
---|
118 | 139 | #define BITS_PER_PMD (PAGE_SHIFT + PMD_ORDER - BITS_PER_PMD_ENTRY) |
---|
| 140 | +#define PTRS_PER_PMD (1UL << BITS_PER_PMD) |
---|
119 | 141 | #else |
---|
120 | | -#define __PAGETABLE_PMD_FOLDED 1 |
---|
121 | 142 | #define BITS_PER_PMD 0 |
---|
122 | 143 | #endif |
---|
123 | | -#define PTRS_PER_PMD (1UL << BITS_PER_PMD) |
---|
124 | 144 | |
---|
125 | 145 | /* Definitions for 1st level */ |
---|
126 | | -#define PGDIR_SHIFT (PMD_SHIFT + BITS_PER_PMD) |
---|
| 146 | +#define PGDIR_SHIFT (PLD_SHIFT + BITS_PER_PTE + BITS_PER_PMD) |
---|
127 | 147 | #if (PGDIR_SHIFT + PAGE_SHIFT + PGD_ORDER - BITS_PER_PGD_ENTRY) > BITS_PER_LONG |
---|
128 | 148 | #define BITS_PER_PGD (BITS_PER_LONG - PGDIR_SHIFT) |
---|
129 | 149 | #else |
---|
.. | .. |
---|
202 | 222 | #define _PAGE_HUGE (1 << xlate_pabit(_PAGE_HPAGE_BIT)) |
---|
203 | 223 | #define _PAGE_USER (1 << xlate_pabit(_PAGE_USER_BIT)) |
---|
204 | 224 | |
---|
205 | | -#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED) |
---|
| 225 | +#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED) |
---|
206 | 226 | #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) |
---|
207 | 227 | #define _PAGE_KERNEL_RO (_PAGE_PRESENT | _PAGE_READ | _PAGE_DIRTY | _PAGE_ACCESSED) |
---|
208 | 228 | #define _PAGE_KERNEL_EXEC (_PAGE_KERNEL_RO | _PAGE_EXEC) |
---|
.. | .. |
---|
215 | 235 | * able to effectively address 40/42/44-bits of physical address space |
---|
216 | 236 | * depending on 4k/16k/64k PAGE_SIZE */ |
---|
217 | 237 | #define _PxD_PRESENT_BIT 31 |
---|
218 | | -#define _PxD_ATTACHED_BIT 30 |
---|
219 | | -#define _PxD_VALID_BIT 29 |
---|
| 238 | +#define _PxD_VALID_BIT 30 |
---|
220 | 239 | |
---|
221 | 240 | #define PxD_FLAG_PRESENT (1 << xlate_pabit(_PxD_PRESENT_BIT)) |
---|
222 | | -#define PxD_FLAG_ATTACHED (1 << xlate_pabit(_PxD_ATTACHED_BIT)) |
---|
223 | 241 | #define PxD_FLAG_VALID (1 << xlate_pabit(_PxD_VALID_BIT)) |
---|
224 | 242 | #define PxD_FLAG_MASK (0xf) |
---|
225 | 243 | #define PxD_FLAG_SHIFT (4) |
---|
.. | .. |
---|
227 | 245 | |
---|
228 | 246 | #ifndef __ASSEMBLY__ |
---|
229 | 247 | |
---|
230 | | -#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) |
---|
231 | | -#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED) |
---|
| 248 | +#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_USER) |
---|
| 249 | +#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE) |
---|
232 | 250 | /* Others seem to make this executable, I don't know if that's correct |
---|
233 | 251 | or not. The stack is mapped this way though so this is necessary |
---|
234 | 252 | in the short term - dhd@linuxcare.com, 2000-08-08 */ |
---|
235 | | -#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED) |
---|
236 | | -#define PAGE_WRITEONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_WRITE | _PAGE_ACCESSED) |
---|
237 | | -#define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED) |
---|
| 253 | +#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ) |
---|
| 254 | +#define PAGE_WRITEONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_WRITE) |
---|
| 255 | +#define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC) |
---|
238 | 256 | #define PAGE_COPY PAGE_EXECREAD |
---|
239 | | -#define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED) |
---|
| 257 | +#define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC) |
---|
240 | 258 | #define PAGE_KERNEL __pgprot(_PAGE_KERNEL) |
---|
241 | 259 | #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC) |
---|
242 | 260 | #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX) |
---|
243 | 261 | #define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL_RO) |
---|
244 | 262 | #define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE) |
---|
245 | | -#define PAGE_GATEWAY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_GATEWAY| _PAGE_READ) |
---|
| 263 | +#define PAGE_GATEWAY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_GATEWAY| _PAGE_READ) |
---|
246 | 264 | |
---|
247 | 265 | |
---|
248 | 266 | /* |
---|
.. | .. |
---|
292 | 310 | |
---|
293 | 311 | #define pte_none(x) (pte_val(x) == 0) |
---|
294 | 312 | #define pte_present(x) (pte_val(x) & _PAGE_PRESENT) |
---|
| 313 | +#define pte_user(x) (pte_val(x) & _PAGE_USER) |
---|
295 | 314 | #define pte_clear(mm, addr, xp) set_pte_at(mm, addr, xp, __pte(0)) |
---|
296 | 315 | |
---|
297 | 316 | #define pmd_flag(x) (pmd_val(x) & PxD_FLAG_MASK) |
---|
298 | 317 | #define pmd_address(x) ((unsigned long)(pmd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT) |
---|
| 318 | +#define pud_flag(x) (pud_val(x) & PxD_FLAG_MASK) |
---|
| 319 | +#define pud_address(x) ((unsigned long)(pud_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT) |
---|
299 | 320 | #define pgd_flag(x) (pgd_val(x) & PxD_FLAG_MASK) |
---|
300 | 321 | #define pgd_address(x) ((unsigned long)(pgd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT) |
---|
301 | 322 | |
---|
302 | | -#if CONFIG_PGTABLE_LEVELS == 3 |
---|
303 | | -/* The first entry of the permanent pmd is not there if it contains |
---|
304 | | - * the gateway marker */ |
---|
305 | | -#define pmd_none(x) (!pmd_val(x) || pmd_flag(x) == PxD_FLAG_ATTACHED) |
---|
306 | | -#else |
---|
307 | 323 | #define pmd_none(x) (!pmd_val(x)) |
---|
308 | | -#endif |
---|
309 | 324 | #define pmd_bad(x) (!(pmd_flag(x) & PxD_FLAG_VALID)) |
---|
310 | 325 | #define pmd_present(x) (pmd_flag(x) & PxD_FLAG_PRESENT) |
---|
311 | 326 | static inline void pmd_clear(pmd_t *pmd) { |
---|
312 | | -#if CONFIG_PGTABLE_LEVELS == 3 |
---|
313 | | - if (pmd_flag(*pmd) & PxD_FLAG_ATTACHED) |
---|
314 | | - /* This is the entry pointing to the permanent pmd |
---|
315 | | - * attached to the pgd; cannot clear it */ |
---|
316 | | - __pmd_val_set(*pmd, PxD_FLAG_ATTACHED); |
---|
317 | | - else |
---|
318 | | -#endif |
---|
319 | | - __pmd_val_set(*pmd, 0); |
---|
| 327 | + set_pmd(pmd, __pmd(0)); |
---|
320 | 328 | } |
---|
321 | 329 | |
---|
322 | 330 | |
---|
323 | 331 | |
---|
324 | 332 | #if CONFIG_PGTABLE_LEVELS == 3 |
---|
325 | | -#define pgd_page_vaddr(pgd) ((unsigned long) __va(pgd_address(pgd))) |
---|
326 | | -#define pgd_page(pgd) virt_to_page((void *)pgd_page_vaddr(pgd)) |
---|
| 333 | +#define pud_pgtable(pud) ((pmd_t *) __va(pud_address(pud))) |
---|
| 334 | +#define pud_page(pud) virt_to_page((void *)pud_pgtable(pud)) |
---|
327 | 335 | |
---|
328 | 336 | /* For 64 bit we have three level tables */ |
---|
329 | 337 | |
---|
330 | | -#define pgd_none(x) (!pgd_val(x)) |
---|
331 | | -#define pgd_bad(x) (!(pgd_flag(x) & PxD_FLAG_VALID)) |
---|
332 | | -#define pgd_present(x) (pgd_flag(x) & PxD_FLAG_PRESENT) |
---|
333 | | -static inline void pgd_clear(pgd_t *pgd) { |
---|
334 | | -#if CONFIG_PGTABLE_LEVELS == 3 |
---|
335 | | - if(pgd_flag(*pgd) & PxD_FLAG_ATTACHED) |
---|
336 | | - /* This is the permanent pmd attached to the pgd; cannot |
---|
337 | | - * free it */ |
---|
338 | | - return; |
---|
339 | | -#endif |
---|
340 | | - __pgd_val_set(*pgd, 0); |
---|
| 338 | +#define pud_none(x) (!pud_val(x)) |
---|
| 339 | +#define pud_bad(x) (!(pud_flag(x) & PxD_FLAG_VALID)) |
---|
| 340 | +#define pud_present(x) (pud_flag(x) & PxD_FLAG_PRESENT) |
---|
| 341 | +static inline void pud_clear(pud_t *pud) { |
---|
| 342 | + set_pud(pud, __pud(0)); |
---|
341 | 343 | } |
---|
342 | | -#else |
---|
343 | | -/* |
---|
344 | | - * The "pgd_xxx()" functions here are trivial for a folded two-level |
---|
345 | | - * setup: the pgd is never bad, and a pmd always exists (as it's folded |
---|
346 | | - * into the pgd entry) |
---|
347 | | - */ |
---|
348 | | -static inline int pgd_none(pgd_t pgd) { return 0; } |
---|
349 | | -static inline int pgd_bad(pgd_t pgd) { return 0; } |
---|
350 | | -static inline int pgd_present(pgd_t pgd) { return 1; } |
---|
351 | | -static inline void pgd_clear(pgd_t * pgdp) { } |
---|
352 | 344 | #endif |
---|
353 | 345 | |
---|
354 | 346 | /* |
---|
.. | .. |
---|
358 | 350 | static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } |
---|
359 | 351 | static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } |
---|
360 | 352 | static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; } |
---|
361 | | -static inline int pte_special(pte_t pte) { return 0; } |
---|
362 | 353 | |
---|
363 | 354 | static inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~_PAGE_DIRTY; return pte; } |
---|
364 | 355 | static inline pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; } |
---|
.. | .. |
---|
366 | 357 | static inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= _PAGE_DIRTY; return pte; } |
---|
367 | 358 | static inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= _PAGE_ACCESSED; return pte; } |
---|
368 | 359 | static inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) |= _PAGE_WRITE; return pte; } |
---|
369 | | -static inline pte_t pte_mkspecial(pte_t pte) { return pte; } |
---|
370 | 360 | |
---|
371 | 361 | /* |
---|
372 | 362 | * Huge pte definitions. |
---|
.. | .. |
---|
412 | 402 | |
---|
413 | 403 | #define pte_page(pte) (pfn_to_page(pte_pfn(pte))) |
---|
414 | 404 | |
---|
415 | | -#define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_address(pmd))) |
---|
| 405 | +static inline unsigned long pmd_page_vaddr(pmd_t pmd) |
---|
| 406 | +{ |
---|
| 407 | + return ((unsigned long) __va(pmd_address(pmd))); |
---|
| 408 | +} |
---|
416 | 409 | |
---|
417 | 410 | #define __pmd_page(pmd) ((unsigned long) __va(pmd_address(pmd))) |
---|
418 | 411 | #define pmd_page(pmd) virt_to_page((void *)__pmd_page(pmd)) |
---|
419 | 412 | |
---|
420 | | -#define pgd_index(address) ((address) >> PGDIR_SHIFT) |
---|
421 | | - |
---|
422 | | -/* to find an entry in a page-table-directory */ |
---|
423 | | -#define pgd_offset(mm, address) \ |
---|
424 | | -((mm)->pgd + ((address) >> PGDIR_SHIFT)) |
---|
425 | | - |
---|
426 | | -/* to find an entry in a kernel page-table-directory */ |
---|
427 | | -#define pgd_offset_k(address) pgd_offset(&init_mm, address) |
---|
428 | | - |
---|
429 | 413 | /* Find an entry in the second-level page table.. */ |
---|
430 | | - |
---|
431 | | -#if CONFIG_PGTABLE_LEVELS == 3 |
---|
432 | | -#define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)) |
---|
433 | | -#define pmd_offset(dir,address) \ |
---|
434 | | -((pmd_t *) pgd_page_vaddr(*(dir)) + pmd_index(address)) |
---|
435 | | -#else |
---|
436 | | -#define pmd_offset(dir,addr) ((pmd_t *) dir) |
---|
437 | | -#endif |
---|
438 | | - |
---|
439 | | -/* Find an entry in the third-level page table.. */ |
---|
440 | | -#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1)) |
---|
441 | | -#define pte_offset_kernel(pmd, address) \ |
---|
442 | | - ((pte_t *) pmd_page_vaddr(*(pmd)) + pte_index(address)) |
---|
443 | | -#define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address) |
---|
444 | | -#define pte_unmap(pte) do { } while (0) |
---|
445 | | - |
---|
446 | | -#define pte_unmap(pte) do { } while (0) |
---|
447 | | -#define pte_unmap_nested(pte) do { } while (0) |
---|
448 | 414 | |
---|
449 | 415 | extern void paging_init (void); |
---|
450 | 416 | |
---|
.. | .. |
---|
452 | 418 | |
---|
453 | 419 | #define PG_dcache_dirty PG_arch_1 |
---|
454 | 420 | |
---|
455 | | -extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *); |
---|
| 421 | +#define update_mmu_cache(vms,addr,ptep) __update_cache(*ptep) |
---|
456 | 422 | |
---|
457 | 423 | /* Encode and de-code a swap entry */ |
---|
458 | 424 | |
---|
.. | .. |
---|
468 | 434 | static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) |
---|
469 | 435 | { |
---|
470 | 436 | pte_t pte; |
---|
471 | | - unsigned long flags; |
---|
472 | 437 | |
---|
473 | 438 | if (!pte_young(*ptep)) |
---|
474 | 439 | return 0; |
---|
475 | 440 | |
---|
476 | | - spin_lock_irqsave(&pa_tlb_lock, flags); |
---|
477 | 441 | pte = *ptep; |
---|
478 | 442 | if (!pte_young(pte)) { |
---|
479 | | - spin_unlock_irqrestore(&pa_tlb_lock, flags); |
---|
480 | 443 | return 0; |
---|
481 | 444 | } |
---|
482 | | - purge_tlb_entries(vma->vm_mm, addr); |
---|
483 | | - set_pte(ptep, pte_mkold(pte)); |
---|
484 | | - spin_unlock_irqrestore(&pa_tlb_lock, flags); |
---|
| 445 | + set_pte_at(vma->vm_mm, addr, ptep, pte_mkold(pte)); |
---|
485 | 446 | return 1; |
---|
486 | 447 | } |
---|
487 | 448 | |
---|
.. | .. |
---|
489 | 450 | static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) |
---|
490 | 451 | { |
---|
491 | 452 | pte_t old_pte; |
---|
492 | | - unsigned long flags; |
---|
493 | 453 | |
---|
494 | | - spin_lock_irqsave(&pa_tlb_lock, flags); |
---|
495 | 454 | old_pte = *ptep; |
---|
496 | | - if (pte_inserted(old_pte)) |
---|
497 | | - purge_tlb_entries(mm, addr); |
---|
498 | | - set_pte(ptep, __pte(0)); |
---|
499 | | - spin_unlock_irqrestore(&pa_tlb_lock, flags); |
---|
| 455 | + set_pte_at(mm, addr, ptep, __pte(0)); |
---|
500 | 456 | |
---|
501 | 457 | return old_pte; |
---|
502 | 458 | } |
---|
503 | 459 | |
---|
504 | 460 | static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) |
---|
505 | 461 | { |
---|
506 | | - unsigned long flags; |
---|
507 | | - spin_lock_irqsave(&pa_tlb_lock, flags); |
---|
508 | | - purge_tlb_entries(mm, addr); |
---|
509 | | - set_pte(ptep, pte_wrprotect(*ptep)); |
---|
510 | | - spin_unlock_irqrestore(&pa_tlb_lock, flags); |
---|
| 462 | + set_pte_at(mm, addr, ptep, pte_wrprotect(*ptep)); |
---|
511 | 463 | } |
---|
512 | 464 | |
---|
513 | 465 | #define pte_same(A,B) (pte_val(A) == pte_val(B)) |
---|
.. | .. |
---|
548 | 500 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR |
---|
549 | 501 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT |
---|
550 | 502 | #define __HAVE_ARCH_PTE_SAME |
---|
551 | | -#include <asm-generic/pgtable.h> |
---|
552 | 503 | |
---|
553 | 504 | #endif /* _PARISC_PGTABLE_H */ |
---|