.. | .. |
---|
17 | 17 | #include <asm/pgtable-64.h> |
---|
18 | 18 | #endif |
---|
19 | 19 | |
---|
| 20 | +#include <asm/cmpxchg.h> |
---|
20 | 21 | #include <asm/io.h> |
---|
21 | 22 | #include <asm/pgtable-bits.h> |
---|
| 23 | +#include <asm/cpu-features.h> |
---|
22 | 24 | |
---|
23 | 25 | struct mm_struct; |
---|
24 | 26 | struct vm_area_struct; |
---|
.. | .. |
---|
35 | 37 | _PAGE_GLOBAL | _page_cachable_default) |
---|
36 | 38 | #define PAGE_KERNEL_NC __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \ |
---|
37 | 39 | _PAGE_GLOBAL | _CACHE_CACHABLE_NONCOHERENT) |
---|
38 | | -#define PAGE_USERIO __pgprot(_PAGE_PRESENT | _PAGE_WRITE | \ |
---|
39 | | - _page_cachable_default) |
---|
40 | 40 | #define PAGE_KERNEL_UNCACHED __pgprot(_PAGE_PRESENT | __READABLE | \ |
---|
41 | 41 | __WRITEABLE | _PAGE_GLOBAL | _CACHE_UNCACHED) |
---|
42 | 42 | |
---|
.. | .. |
---|
197 | 197 | static inline void set_pte(pte_t *ptep, pte_t pteval) |
---|
198 | 198 | { |
---|
199 | 199 | *ptep = pteval; |
---|
200 | | -#if !defined(CONFIG_CPU_R3000) && !defined(CONFIG_CPU_TX39XX) |
---|
| 200 | +#if !defined(CONFIG_CPU_R3K_TLB) |
---|
201 | 201 | if (pte_val(pteval) & _PAGE_GLOBAL) { |
---|
202 | 202 | pte_t *buddy = ptep_buddy(ptep); |
---|
203 | 203 | /* |
---|
204 | 204 | * Make sure the buddy is global too (if it's !none, |
---|
205 | 205 | * it better already be global) |
---|
206 | 206 | */ |
---|
207 | | -#ifdef CONFIG_SMP |
---|
208 | | - /* |
---|
209 | | - * For SMP, multiple CPUs can race, so we need to do |
---|
210 | | - * this atomically. |
---|
211 | | - */ |
---|
212 | | - unsigned long page_global = _PAGE_GLOBAL; |
---|
213 | | - unsigned long tmp; |
---|
214 | | - |
---|
215 | | - if (kernel_uses_llsc && R10000_LLSC_WAR) { |
---|
216 | | - __asm__ __volatile__ ( |
---|
217 | | - " .set arch=r4000 \n" |
---|
218 | | - " .set push \n" |
---|
219 | | - " .set noreorder \n" |
---|
220 | | - "1:" __LL "%[tmp], %[buddy] \n" |
---|
221 | | - " bnez %[tmp], 2f \n" |
---|
222 | | - " or %[tmp], %[tmp], %[global] \n" |
---|
223 | | - __SC "%[tmp], %[buddy] \n" |
---|
224 | | - " beqzl %[tmp], 1b \n" |
---|
225 | | - " nop \n" |
---|
226 | | - "2: \n" |
---|
227 | | - " .set pop \n" |
---|
228 | | - " .set mips0 \n" |
---|
229 | | - : [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp) |
---|
230 | | - : [global] "r" (page_global)); |
---|
231 | | - } else if (kernel_uses_llsc) { |
---|
232 | | - __asm__ __volatile__ ( |
---|
233 | | - " .set "MIPS_ISA_ARCH_LEVEL" \n" |
---|
234 | | - " .set push \n" |
---|
235 | | - " .set noreorder \n" |
---|
236 | | - "1:" __LL "%[tmp], %[buddy] \n" |
---|
237 | | - " bnez %[tmp], 2f \n" |
---|
238 | | - " or %[tmp], %[tmp], %[global] \n" |
---|
239 | | - __SC "%[tmp], %[buddy] \n" |
---|
240 | | - " beqz %[tmp], 1b \n" |
---|
241 | | - " nop \n" |
---|
242 | | - "2: \n" |
---|
243 | | - " .set pop \n" |
---|
244 | | - " .set mips0 \n" |
---|
245 | | - : [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp) |
---|
246 | | - : [global] "r" (page_global)); |
---|
247 | | - } |
---|
248 | | -#else /* !CONFIG_SMP */ |
---|
249 | | - if (pte_none(*buddy)) |
---|
250 | | - pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL; |
---|
251 | | -#endif /* CONFIG_SMP */ |
---|
| 207 | +# if defined(CONFIG_PHYS_ADDR_T_64BIT) && !defined(CONFIG_CPU_MIPS32) |
---|
| 208 | + cmpxchg64(&buddy->pte, 0, _PAGE_GLOBAL); |
---|
| 209 | +# else |
---|
| 210 | + cmpxchg(&buddy->pte, 0, _PAGE_GLOBAL); |
---|
| 211 | +# endif |
---|
252 | 212 | } |
---|
253 | 213 | #endif |
---|
254 | 214 | } |
---|
.. | .. |
---|
256 | 216 | static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) |
---|
257 | 217 | { |
---|
258 | 218 | htw_stop(); |
---|
259 | | -#if !defined(CONFIG_CPU_R3000) && !defined(CONFIG_CPU_TX39XX) |
---|
| 219 | +#if !defined(CONFIG_CPU_R3K_TLB) |
---|
260 | 220 | /* Preserve global status for the pair */ |
---|
261 | 221 | if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL) |
---|
262 | 222 | set_pte_at(mm, addr, ptep, __pte(_PAGE_GLOBAL)); |
---|
.. | .. |
---|
306 | 266 | * to find that this expression is a constant, so the size is dropped. |
---|
307 | 267 | */ |
---|
308 | 268 | extern pgd_t swapper_pg_dir[]; |
---|
| 269 | + |
---|
| 270 | +/* |
---|
| 271 | + * Platform specific pte_special() and pte_mkspecial() definitions |
---|
| 272 | + * are required only when ARCH_HAS_PTE_SPECIAL is enabled. |
---|
| 273 | + */ |
---|
| 274 | +#if defined(CONFIG_ARCH_HAS_PTE_SPECIAL) |
---|
| 275 | +#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) |
---|
| 276 | +static inline int pte_special(pte_t pte) |
---|
| 277 | +{ |
---|
| 278 | + return pte.pte_low & _PAGE_SPECIAL; |
---|
| 279 | +} |
---|
| 280 | + |
---|
| 281 | +static inline pte_t pte_mkspecial(pte_t pte) |
---|
| 282 | +{ |
---|
| 283 | + pte.pte_low |= _PAGE_SPECIAL; |
---|
| 284 | + return pte; |
---|
| 285 | +} |
---|
| 286 | +#else |
---|
| 287 | +static inline int pte_special(pte_t pte) |
---|
| 288 | +{ |
---|
| 289 | + return pte_val(pte) & _PAGE_SPECIAL; |
---|
| 290 | +} |
---|
| 291 | + |
---|
| 292 | +static inline pte_t pte_mkspecial(pte_t pte) |
---|
| 293 | +{ |
---|
| 294 | + pte_val(pte) |= _PAGE_SPECIAL; |
---|
| 295 | + return pte; |
---|
| 296 | +} |
---|
| 297 | +#endif |
---|
| 298 | +#endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */ |
---|
309 | 299 | |
---|
310 | 300 | /* |
---|
311 | 301 | * The following only work if pte_present() is true. |
---|
.. | .. |
---|
408 | 398 | |
---|
409 | 399 | static inline pte_t pte_mkdirty(pte_t pte) |
---|
410 | 400 | { |
---|
411 | | - pte_val(pte) |= _PAGE_MODIFIED; |
---|
| 401 | + pte_val(pte) |= _PAGE_MODIFIED | _PAGE_SOFT_DIRTY; |
---|
412 | 402 | if (pte_val(pte) & _PAGE_WRITE) |
---|
413 | 403 | pte_val(pte) |= _PAGE_SILENT_WRITE; |
---|
414 | 404 | return pte; |
---|
.. | .. |
---|
422 | 412 | return pte; |
---|
423 | 413 | } |
---|
424 | 414 | |
---|
| 415 | +#define pte_sw_mkyoung pte_mkyoung |
---|
| 416 | + |
---|
425 | 417 | #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT |
---|
426 | 418 | static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_HUGE; } |
---|
427 | 419 | |
---|
.. | .. |
---|
431 | 423 | return pte; |
---|
432 | 424 | } |
---|
433 | 425 | #endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */ |
---|
| 426 | + |
---|
| 427 | +#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY |
---|
| 428 | +static inline bool pte_soft_dirty(pte_t pte) |
---|
| 429 | +{ |
---|
| 430 | + return pte_val(pte) & _PAGE_SOFT_DIRTY; |
---|
| 431 | +} |
---|
| 432 | +#define pte_swp_soft_dirty pte_soft_dirty |
---|
| 433 | + |
---|
| 434 | +static inline pte_t pte_mksoft_dirty(pte_t pte) |
---|
| 435 | +{ |
---|
| 436 | + pte_val(pte) |= _PAGE_SOFT_DIRTY; |
---|
| 437 | + return pte; |
---|
| 438 | +} |
---|
| 439 | +#define pte_swp_mksoft_dirty pte_mksoft_dirty |
---|
| 440 | + |
---|
| 441 | +static inline pte_t pte_clear_soft_dirty(pte_t pte) |
---|
| 442 | +{ |
---|
| 443 | + pte_val(pte) &= ~(_PAGE_SOFT_DIRTY); |
---|
| 444 | + return pte; |
---|
| 445 | +} |
---|
| 446 | +#define pte_swp_clear_soft_dirty pte_clear_soft_dirty |
---|
| 447 | + |
---|
| 448 | +#endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */ |
---|
| 449 | + |
---|
434 | 450 | #endif |
---|
435 | | -static inline int pte_special(pte_t pte) { return 0; } |
---|
436 | | -static inline pte_t pte_mkspecial(pte_t pte) { return pte; } |
---|
437 | 451 | |
---|
438 | 452 | /* |
---|
439 | 453 | * Macro to make mark a page protection value as "uncacheable". Note |
---|
.. | .. |
---|
464 | 478 | return __pgprot(prot); |
---|
465 | 479 | } |
---|
466 | 480 | |
---|
| 481 | +static inline void flush_tlb_fix_spurious_fault(struct vm_area_struct *vma, |
---|
| 482 | + unsigned long address) |
---|
| 483 | +{ |
---|
| 484 | +} |
---|
| 485 | + |
---|
| 486 | +#define __HAVE_ARCH_PTE_SAME |
---|
| 487 | +static inline int pte_same(pte_t pte_a, pte_t pte_b) |
---|
| 488 | +{ |
---|
| 489 | + return pte_val(pte_a) == pte_val(pte_b); |
---|
| 490 | +} |
---|
| 491 | + |
---|
| 492 | +#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS |
---|
| 493 | +static inline int ptep_set_access_flags(struct vm_area_struct *vma, |
---|
| 494 | + unsigned long address, pte_t *ptep, |
---|
| 495 | + pte_t entry, int dirty) |
---|
| 496 | +{ |
---|
| 497 | + if (!pte_same(*ptep, entry)) |
---|
| 498 | + set_pte_at(vma->vm_mm, address, ptep, entry); |
---|
| 499 | + /* |
---|
| 500 | + * update_mmu_cache will unconditionally execute, handling both |
---|
| 501 | + * the case that the PTE changed and the spurious fault case. |
---|
| 502 | + */ |
---|
| 503 | + return true; |
---|
| 504 | +} |
---|
| 505 | + |
---|
467 | 506 | /* |
---|
468 | 507 | * Conversion functions: convert a page and protection to a page entry, |
---|
469 | 508 | * and a page entry and page directory to the page they refer to. |
---|
.. | .. |
---|
491 | 530 | #else |
---|
492 | 531 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) |
---|
493 | 532 | { |
---|
494 | | - return __pte((pte_val(pte) & _PAGE_CHG_MASK) | |
---|
495 | | - (pgprot_val(newprot) & ~_PAGE_CHG_MASK)); |
---|
| 533 | + pte_val(pte) &= _PAGE_CHG_MASK; |
---|
| 534 | + pte_val(pte) |= pgprot_val(newprot) & ~_PAGE_CHG_MASK; |
---|
| 535 | + if ((pte_val(pte) & _PAGE_ACCESSED) && !(pte_val(pte) & _PAGE_NO_READ)) |
---|
| 536 | + pte_val(pte) |= _PAGE_SILENT_READ; |
---|
| 537 | + return pte; |
---|
496 | 538 | } |
---|
497 | 539 | #endif |
---|
498 | 540 | |
---|
.. | .. |
---|
507 | 549 | __update_tlb(vma, address, pte); |
---|
508 | 550 | } |
---|
509 | 551 | |
---|
| 552 | +#define __HAVE_ARCH_UPDATE_MMU_TLB |
---|
| 553 | +#define update_mmu_tlb update_mmu_cache |
---|
| 554 | + |
---|
510 | 555 | static inline void update_mmu_cache_pmd(struct vm_area_struct *vma, |
---|
511 | 556 | unsigned long address, pmd_t *pmdp) |
---|
512 | 557 | { |
---|
.. | .. |
---|
517 | 562 | |
---|
518 | 563 | #define kern_addr_valid(addr) (1) |
---|
519 | 564 | |
---|
520 | | -#ifdef CONFIG_PHYS_ADDR_T_64BIT |
---|
521 | | -extern int remap_pfn_range(struct vm_area_struct *vma, unsigned long from, unsigned long pfn, unsigned long size, pgprot_t prot); |
---|
522 | | - |
---|
523 | | -static inline int io_remap_pfn_range(struct vm_area_struct *vma, |
---|
524 | | - unsigned long vaddr, |
---|
525 | | - unsigned long pfn, |
---|
526 | | - unsigned long size, |
---|
527 | | - pgprot_t prot) |
---|
528 | | -{ |
---|
529 | | - phys_addr_t phys_addr_high = fixup_bigphys_addr(pfn << PAGE_SHIFT, size); |
---|
530 | | - return remap_pfn_range(vma, vaddr, phys_addr_high >> PAGE_SHIFT, size, prot); |
---|
531 | | -} |
---|
| 565 | +/* |
---|
| 566 | + * Allow physical addresses to be fixed up to help 36-bit peripherals. |
---|
| 567 | + */ |
---|
| 568 | +#ifdef CONFIG_MIPS_FIXUP_BIGPHYS_ADDR |
---|
| 569 | +phys_addr_t fixup_bigphys_addr(phys_addr_t addr, phys_addr_t size); |
---|
| 570 | +int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long vaddr, |
---|
| 571 | + unsigned long pfn, unsigned long size, pgprot_t prot); |
---|
532 | 572 | #define io_remap_pfn_range io_remap_pfn_range |
---|
533 | | -#endif |
---|
| 573 | +#else |
---|
| 574 | +#define fixup_bigphys_addr(addr, size) (addr) |
---|
| 575 | +#endif /* CONFIG_MIPS_FIXUP_BIGPHYS_ADDR */ |
---|
534 | 576 | |
---|
535 | 577 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
---|
536 | 578 | |
---|
.. | .. |
---|
589 | 631 | |
---|
590 | 632 | static inline pmd_t pmd_mkdirty(pmd_t pmd) |
---|
591 | 633 | { |
---|
592 | | - pmd_val(pmd) |= _PAGE_MODIFIED; |
---|
| 634 | + pmd_val(pmd) |= _PAGE_MODIFIED | _PAGE_SOFT_DIRTY; |
---|
593 | 635 | if (pmd_val(pmd) & _PAGE_WRITE) |
---|
594 | 636 | pmd_val(pmd) |= _PAGE_SILENT_WRITE; |
---|
595 | 637 | |
---|
.. | .. |
---|
618 | 660 | return pmd; |
---|
619 | 661 | } |
---|
620 | 662 | |
---|
| 663 | +#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY |
---|
| 664 | +static inline int pmd_soft_dirty(pmd_t pmd) |
---|
| 665 | +{ |
---|
| 666 | + return !!(pmd_val(pmd) & _PAGE_SOFT_DIRTY); |
---|
| 667 | +} |
---|
| 668 | + |
---|
| 669 | +static inline pmd_t pmd_mksoft_dirty(pmd_t pmd) |
---|
| 670 | +{ |
---|
| 671 | + pmd_val(pmd) |= _PAGE_SOFT_DIRTY; |
---|
| 672 | + return pmd; |
---|
| 673 | +} |
---|
| 674 | + |
---|
| 675 | +static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd) |
---|
| 676 | +{ |
---|
| 677 | + pmd_val(pmd) &= ~(_PAGE_SOFT_DIRTY); |
---|
| 678 | + return pmd; |
---|
| 679 | +} |
---|
| 680 | + |
---|
| 681 | +#endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */ |
---|
| 682 | + |
---|
621 | 683 | /* Extern to avoid header file madness */ |
---|
622 | 684 | extern pmd_t mk_pmd(struct page *page, pgprot_t prot); |
---|
623 | 685 | |
---|
.. | .. |
---|
641 | 703 | return pmd; |
---|
642 | 704 | } |
---|
643 | 705 | |
---|
644 | | -static inline pmd_t pmd_mknotpresent(pmd_t pmd) |
---|
| 706 | +static inline pmd_t pmd_mkinvalid(pmd_t pmd) |
---|
645 | 707 | { |
---|
646 | 708 | pmd_val(pmd) &= ~(_PAGE_PRESENT | _PAGE_VALID | _PAGE_DIRTY); |
---|
647 | 709 | |
---|
.. | .. |
---|
665 | 727 | |
---|
666 | 728 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
---|
667 | 729 | |
---|
668 | | -#include <asm-generic/pgtable.h> |
---|
669 | | - |
---|
670 | | -/* |
---|
671 | | - * uncached accelerated TLB map for video memory access |
---|
672 | | - */ |
---|
673 | | -#ifdef CONFIG_CPU_SUPPORTS_UNCACHED_ACCELERATED |
---|
674 | | -#define __HAVE_PHYS_MEM_ACCESS_PROT |
---|
675 | | - |
---|
676 | | -struct file; |
---|
677 | | -pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, |
---|
678 | | - unsigned long size, pgprot_t vma_prot); |
---|
| 730 | +#ifdef _PAGE_HUGE |
---|
| 731 | +#define pmd_leaf(pmd) ((pmd_val(pmd) & _PAGE_HUGE) != 0) |
---|
| 732 | +#define pud_leaf(pud) ((pud_val(pud) & _PAGE_HUGE) != 0) |
---|
679 | 733 | #endif |
---|
| 734 | + |
---|
| 735 | +#define gup_fast_permitted(start, end) (!cpu_has_dc_aliases) |
---|
680 | 736 | |
---|
681 | 737 | /* |
---|
682 | 738 | * We provide our own get_unmapped area to cope with the virtual aliasing |
---|
.. | .. |
---|
684 | 740 | */ |
---|
685 | 741 | #define HAVE_ARCH_UNMAPPED_AREA |
---|
686 | 742 | #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN |
---|
687 | | - |
---|
688 | | -/* |
---|
689 | | - * No page table caches to initialise |
---|
690 | | - */ |
---|
691 | | -#define pgtable_cache_init() do { } while (0) |
---|
692 | 743 | |
---|
693 | 744 | #endif /* _ASM_PGTABLE_H */ |
---|