.. | .. |
---|
2 | 2 | #ifndef _ASM_POWERPC_BOOK3S_64_PGTABLE_H_ |
---|
3 | 3 | #define _ASM_POWERPC_BOOK3S_64_PGTABLE_H_ |
---|
4 | 4 | |
---|
5 | | -#include <asm-generic/5level-fixup.h> |
---|
| 5 | +#include <asm-generic/pgtable-nop4d.h> |
---|
6 | 6 | |
---|
7 | 7 | #ifndef __ASSEMBLY__ |
---|
8 | 8 | #include <linux/mmdebug.h> |
---|
9 | 9 | #include <linux/bug.h> |
---|
| 10 | +#include <linux/sizes.h> |
---|
10 | 11 | #endif |
---|
11 | 12 | |
---|
12 | 13 | /* |
---|
13 | 14 | * Common bits between hash and Radix page table |
---|
14 | 15 | */ |
---|
15 | 16 | #define _PAGE_BIT_SWAP_TYPE 0 |
---|
16 | | - |
---|
17 | | -#define _PAGE_NA 0 |
---|
18 | | -#define _PAGE_RO 0 |
---|
19 | | -#define _PAGE_USER 0 |
---|
20 | 17 | |
---|
21 | 18 | #define _PAGE_EXEC 0x00001 /* execute permission */ |
---|
22 | 19 | #define _PAGE_WRITE 0x00002 /* write access allowed */ |
---|
.. | .. |
---|
36 | 33 | #define _RPAGE_SW1 0x00800 |
---|
37 | 34 | #define _RPAGE_SW2 0x00400 |
---|
38 | 35 | #define _RPAGE_SW3 0x00200 |
---|
39 | | -#define _RPAGE_RSV1 0x1000000000000000UL |
---|
40 | | -#define _RPAGE_RSV2 0x0800000000000000UL |
---|
41 | | -#define _RPAGE_RSV3 0x0400000000000000UL |
---|
42 | | -#define _RPAGE_RSV4 0x0200000000000000UL |
---|
43 | | -#define _RPAGE_RSV5 0x00040UL |
---|
| 36 | +#define _RPAGE_RSV1 0x00040UL |
---|
| 37 | + |
---|
| 38 | +#define _RPAGE_PKEY_BIT4 0x1000000000000000UL |
---|
| 39 | +#define _RPAGE_PKEY_BIT3 0x0800000000000000UL |
---|
| 40 | +#define _RPAGE_PKEY_BIT2 0x0400000000000000UL |
---|
| 41 | +#define _RPAGE_PKEY_BIT1 0x0200000000000000UL |
---|
| 42 | +#define _RPAGE_PKEY_BIT0 0x0100000000000000UL |
---|
44 | 43 | |
---|
45 | 44 | #define _PAGE_PTE 0x4000000000000000UL /* distinguishes PTEs from pointers */ |
---|
46 | 45 | #define _PAGE_PRESENT 0x8000000000000000UL /* pte contains a translation */ |
---|
.. | .. |
---|
62 | 61 | */ |
---|
63 | 62 | #define _RPAGE_RPN0 0x01000 |
---|
64 | 63 | #define _RPAGE_RPN1 0x02000 |
---|
65 | | -#define _RPAGE_RPN44 0x0100000000000000UL |
---|
66 | 64 | #define _RPAGE_RPN43 0x0080000000000000UL |
---|
67 | 65 | #define _RPAGE_RPN42 0x0040000000000000UL |
---|
68 | 66 | #define _RPAGE_RPN41 0x0020000000000000UL |
---|
69 | 67 | |
---|
70 | 68 | /* Max physical address bit as per radix table */ |
---|
71 | | -#define _RPAGE_PA_MAX 57 |
---|
| 69 | +#define _RPAGE_PA_MAX 56 |
---|
72 | 70 | |
---|
73 | 71 | /* |
---|
74 | 72 | * Max physical address bit we will use for now. |
---|
.. | .. |
---|
94 | 92 | #define _PAGE_SOFT_DIRTY _RPAGE_SW3 /* software: software dirty tracking */ |
---|
95 | 93 | #define _PAGE_SPECIAL _RPAGE_SW2 /* software: special page */ |
---|
96 | 94 | #define _PAGE_DEVMAP _RPAGE_SW1 /* software: ZONE_DEVICE page */ |
---|
97 | | -#define __HAVE_ARCH_PTE_DEVMAP |
---|
98 | 95 | |
---|
99 | 96 | /* |
---|
100 | 97 | * Drivers request for cache inhibited pte mapping using _PAGE_NO_CACHE |
---|
.. | .. |
---|
123 | 120 | #define _PAGE_KERNEL_RWX (_PAGE_PRIVILEGED | _PAGE_DIRTY | \ |
---|
124 | 121 | _PAGE_RW | _PAGE_EXEC) |
---|
125 | 122 | /* |
---|
126 | | - * No page size encoding in the linux PTE |
---|
127 | | - */ |
---|
128 | | -#define _PAGE_PSIZE 0 |
---|
129 | | -/* |
---|
130 | 123 | * _PAGE_CHG_MASK masks of bits that are to be preserved across |
---|
131 | 124 | * pgprot changes |
---|
132 | 125 | */ |
---|
.. | .. |
---|
134 | 127 | _PAGE_ACCESSED | _PAGE_SPECIAL | _PAGE_PTE | \ |
---|
135 | 128 | _PAGE_SOFT_DIRTY | _PAGE_DEVMAP) |
---|
136 | 129 | |
---|
137 | | -#define H_PTE_PKEY (H_PTE_PKEY_BIT0 | H_PTE_PKEY_BIT1 | H_PTE_PKEY_BIT2 | \ |
---|
138 | | - H_PTE_PKEY_BIT3 | H_PTE_PKEY_BIT4) |
---|
139 | | -/* |
---|
140 | | - * Mask of bits returned by pte_pgprot() |
---|
141 | | - */ |
---|
142 | | -#define PAGE_PROT_BITS (_PAGE_SAO | _PAGE_NON_IDEMPOTENT | _PAGE_TOLERANT | \ |
---|
143 | | - H_PAGE_4K_PFN | _PAGE_PRIVILEGED | _PAGE_ACCESSED | \ |
---|
144 | | - _PAGE_READ | _PAGE_WRITE | _PAGE_DIRTY | _PAGE_EXEC | \ |
---|
145 | | - _PAGE_SOFT_DIRTY | H_PTE_PKEY) |
---|
146 | 130 | /* |
---|
147 | 131 | * We define 2 sets of base prot bits, one for basic pages (ie, |
---|
148 | 132 | * cacheable kernel and user pages) and one for non cacheable |
---|
149 | 133 | * pages. We always set _PAGE_COHERENT when SMP is enabled or |
---|
150 | 134 | * the processor might need it for DMA coherency. |
---|
151 | 135 | */ |
---|
152 | | -#define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_PSIZE) |
---|
| 136 | +#define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED) |
---|
153 | 137 | #define _PAGE_BASE (_PAGE_BASE_NC) |
---|
154 | 138 | |
---|
155 | 139 | /* Permission masks used to generate the __P and __S table, |
---|
.. | .. |
---|
159 | 143 | * Write permissions imply read permissions for now (we could make write-only |
---|
160 | 144 | * pages on BookE but we don't bother for now). Execute permission control is |
---|
161 | 145 | * possible on platforms that define _PAGE_EXEC |
---|
162 | | - * |
---|
163 | | - * Note due to the way vm flags are laid out, the bits are XWR |
---|
164 | 146 | */ |
---|
165 | 147 | #define PAGE_NONE __pgprot(_PAGE_BASE | _PAGE_PRIVILEGED) |
---|
166 | 148 | #define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_RW) |
---|
.. | .. |
---|
169 | 151 | #define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_EXEC) |
---|
170 | 152 | #define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_READ) |
---|
171 | 153 | #define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_EXEC) |
---|
172 | | - |
---|
173 | | -#define __P000 PAGE_NONE |
---|
174 | | -#define __P001 PAGE_READONLY |
---|
175 | | -#define __P010 PAGE_COPY |
---|
176 | | -#define __P011 PAGE_COPY |
---|
177 | | -#define __P100 PAGE_READONLY_X |
---|
178 | | -#define __P101 PAGE_READONLY_X |
---|
179 | | -#define __P110 PAGE_COPY_X |
---|
180 | | -#define __P111 PAGE_COPY_X |
---|
181 | | - |
---|
182 | | -#define __S000 PAGE_NONE |
---|
183 | | -#define __S001 PAGE_READONLY |
---|
184 | | -#define __S010 PAGE_SHARED |
---|
185 | | -#define __S011 PAGE_SHARED |
---|
186 | | -#define __S100 PAGE_READONLY_X |
---|
187 | | -#define __S101 PAGE_READONLY_X |
---|
188 | | -#define __S110 PAGE_SHARED_X |
---|
189 | | -#define __S111 PAGE_SHARED_X |
---|
190 | 154 | |
---|
191 | 155 | /* Permission masks used for kernel mappings */ |
---|
192 | 156 | #define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW) |
---|
.. | .. |
---|
287 | 251 | /* Bits to mask out from a PUD to get to the PMD page */ |
---|
288 | 252 | #define PUD_MASKED_BITS 0xc0000000000000ffUL |
---|
289 | 253 | /* Bits to mask out from a PGD to get to the PUD page */ |
---|
290 | | -#define PGD_MASKED_BITS 0xc0000000000000ffUL |
---|
| 254 | +#define P4D_MASKED_BITS 0xc0000000000000ffUL |
---|
291 | 255 | |
---|
292 | 256 | /* |
---|
293 | 257 | * Used as an indicator for rcu callback functions |
---|
.. | .. |
---|
309 | 273 | #define VMALLOC_START __vmalloc_start |
---|
310 | 274 | #define VMALLOC_END __vmalloc_end |
---|
311 | 275 | |
---|
| 276 | +static inline unsigned int ioremap_max_order(void) |
---|
| 277 | +{ |
---|
| 278 | + if (radix_enabled()) |
---|
| 279 | + return PUD_SHIFT; |
---|
| 280 | + return 7 + PAGE_SHIFT; /* default from linux/vmalloc.h */ |
---|
| 281 | +} |
---|
| 282 | +#define IOREMAP_MAX_ORDER ioremap_max_order() |
---|
| 283 | + |
---|
312 | 284 | extern unsigned long __kernel_virt_start; |
---|
313 | | -extern unsigned long __kernel_virt_size; |
---|
314 | 285 | extern unsigned long __kernel_io_start; |
---|
| 286 | +extern unsigned long __kernel_io_end; |
---|
315 | 287 | #define KERN_VIRT_START __kernel_virt_start |
---|
316 | | -#define KERN_VIRT_SIZE __kernel_virt_size |
---|
317 | 288 | #define KERN_IO_START __kernel_io_start |
---|
| 289 | +#define KERN_IO_END __kernel_io_end |
---|
| 290 | + |
---|
318 | 291 | extern struct page *vmemmap; |
---|
319 | | -extern unsigned long ioremap_bot; |
---|
320 | 292 | extern unsigned long pci_io_base; |
---|
321 | 293 | #endif /* __ASSEMBLY__ */ |
---|
322 | 294 | |
---|
323 | 295 | #include <asm/book3s/64/hash.h> |
---|
324 | 296 | #include <asm/book3s/64/radix.h> |
---|
| 297 | + |
---|
| 298 | +#if H_MAX_PHYSMEM_BITS > R_MAX_PHYSMEM_BITS |
---|
| 299 | +#define MAX_PHYSMEM_BITS H_MAX_PHYSMEM_BITS |
---|
| 300 | +#else |
---|
| 301 | +#define MAX_PHYSMEM_BITS R_MAX_PHYSMEM_BITS |
---|
| 302 | +#endif |
---|
| 303 | + |
---|
325 | 304 | |
---|
326 | 305 | #ifdef CONFIG_PPC_64K_PAGES |
---|
327 | 306 | #include <asm/book3s/64/pgtable-64k.h> |
---|
.. | .. |
---|
331 | 310 | |
---|
332 | 311 | #include <asm/barrier.h> |
---|
333 | 312 | /* |
---|
334 | | - * The second half of the kernel virtual space is used for IO mappings, |
---|
335 | | - * it's itself carved into the PIO region (ISA and PHB IO space) and |
---|
| 313 | + * IO space itself carved into the PIO region (ISA and PHB IO space) and |
---|
336 | 314 | * the ioremap space |
---|
337 | 315 | * |
---|
338 | 316 | * ISA_IO_BASE = KERN_IO_START, 64K reserved area |
---|
.. | .. |
---|
345 | 323 | #define PHB_IO_BASE (ISA_IO_END) |
---|
346 | 324 | #define PHB_IO_END (KERN_IO_START + FULL_IO_SIZE) |
---|
347 | 325 | #define IOREMAP_BASE (PHB_IO_END) |
---|
348 | | -#define IOREMAP_END (KERN_VIRT_START + KERN_VIRT_SIZE) |
---|
| 326 | +#define IOREMAP_START (ioremap_bot) |
---|
| 327 | +#define IOREMAP_END (KERN_IO_END - FIXADDR_SIZE) |
---|
| 328 | +#define FIXADDR_SIZE SZ_32M |
---|
349 | 329 | |
---|
350 | 330 | /* Advertise special mapping type for AGP */ |
---|
351 | 331 | #define HAVE_PAGE_AGP |
---|
.. | .. |
---|
461 | 441 | pte_update(mm, addr, ptep, 0, _PAGE_PRIVILEGED, 0); |
---|
462 | 442 | } |
---|
463 | 443 | |
---|
| 444 | +#define __HAVE_ARCH_HUGE_PTEP_SET_WRPROTECT |
---|
464 | 445 | static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, |
---|
465 | 446 | unsigned long addr, pte_t *ptep) |
---|
466 | 447 | { |
---|
.. | .. |
---|
519 | 500 | return !!(pte_raw(pte) & cpu_to_be64(_PAGE_SPECIAL)); |
---|
520 | 501 | } |
---|
521 | 502 | |
---|
522 | | -static inline pgprot_t pte_pgprot(pte_t pte) { return __pgprot(pte_val(pte) & PAGE_PROT_BITS); } |
---|
| 503 | +static inline bool pte_exec(pte_t pte) |
---|
| 504 | +{ |
---|
| 505 | + return !!(pte_raw(pte) & cpu_to_be64(_PAGE_EXEC)); |
---|
| 506 | +} |
---|
| 507 | + |
---|
523 | 508 | |
---|
524 | 509 | #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY |
---|
525 | 510 | static inline bool pte_soft_dirty(pte_t pte) |
---|
.. | .. |
---|
529 | 514 | |
---|
530 | 515 | static inline pte_t pte_mksoft_dirty(pte_t pte) |
---|
531 | 516 | { |
---|
532 | | - return __pte(pte_val(pte) | _PAGE_SOFT_DIRTY); |
---|
| 517 | + return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_SOFT_DIRTY)); |
---|
533 | 518 | } |
---|
534 | 519 | |
---|
535 | 520 | static inline pte_t pte_clear_soft_dirty(pte_t pte) |
---|
536 | 521 | { |
---|
537 | | - return __pte(pte_val(pte) & ~_PAGE_SOFT_DIRTY); |
---|
| 522 | + return __pte_raw(pte_raw(pte) & cpu_to_be64(~_PAGE_SOFT_DIRTY)); |
---|
538 | 523 | } |
---|
539 | 524 | #endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */ |
---|
540 | 525 | |
---|
.. | .. |
---|
555 | 540 | */ |
---|
556 | 541 | VM_BUG_ON((pte_raw(pte) & cpu_to_be64(_PAGE_PRESENT | _PAGE_RWX | _PAGE_PRIVILEGED)) != |
---|
557 | 542 | cpu_to_be64(_PAGE_PRESENT | _PAGE_PRIVILEGED)); |
---|
558 | | - return __pte(pte_val(pte) & ~_PAGE_PRIVILEGED); |
---|
| 543 | + return __pte_raw(pte_raw(pte) & cpu_to_be64(~_PAGE_PRIVILEGED)); |
---|
559 | 544 | } |
---|
560 | 545 | |
---|
561 | 546 | #define pte_clear_savedwrite pte_clear_savedwrite |
---|
.. | .. |
---|
565 | 550 | * Used by KSM subsystem to make a protnone pte readonly. |
---|
566 | 551 | */ |
---|
567 | 552 | VM_BUG_ON(!pte_protnone(pte)); |
---|
568 | | - return __pte(pte_val(pte) | _PAGE_PRIVILEGED); |
---|
| 553 | + return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_PRIVILEGED)); |
---|
569 | 554 | } |
---|
570 | 555 | #else |
---|
571 | 556 | #define pte_clear_savedwrite pte_clear_savedwrite |
---|
572 | 557 | static inline pte_t pte_clear_savedwrite(pte_t pte) |
---|
573 | 558 | { |
---|
574 | 559 | VM_WARN_ON(1); |
---|
575 | | - return __pte(pte_val(pte) & ~_PAGE_WRITE); |
---|
| 560 | + return __pte_raw(pte_raw(pte) & cpu_to_be64(~_PAGE_WRITE)); |
---|
576 | 561 | } |
---|
577 | 562 | #endif /* CONFIG_NUMA_BALANCING */ |
---|
| 563 | + |
---|
| 564 | +static inline bool pte_hw_valid(pte_t pte) |
---|
| 565 | +{ |
---|
| 566 | + return (pte_raw(pte) & cpu_to_be64(_PAGE_PRESENT | _PAGE_PTE)) == |
---|
| 567 | + cpu_to_be64(_PAGE_PRESENT | _PAGE_PTE); |
---|
| 568 | +} |
---|
578 | 569 | |
---|
579 | 570 | static inline int pte_present(pte_t pte) |
---|
580 | 571 | { |
---|
.. | .. |
---|
584 | 575 | * invalid during ptep_set_access_flags. Hence we look for _PAGE_INVALID |
---|
585 | 576 | * if we find _PAGE_PRESENT cleared. |
---|
586 | 577 | */ |
---|
587 | | - return !!(pte_raw(pte) & cpu_to_be64(_PAGE_PRESENT | _PAGE_INVALID)); |
---|
| 578 | + |
---|
| 579 | + if (pte_hw_valid(pte)) |
---|
| 580 | + return true; |
---|
| 581 | + return (pte_raw(pte) & cpu_to_be64(_PAGE_INVALID | _PAGE_PTE)) == |
---|
| 582 | + cpu_to_be64(_PAGE_INVALID | _PAGE_PTE); |
---|
588 | 583 | } |
---|
589 | 584 | |
---|
590 | 585 | #ifdef CONFIG_PPC_MEM_KEYS |
---|
.. | .. |
---|
596 | 591 | } |
---|
597 | 592 | #endif /* CONFIG_PPC_MEM_KEYS */ |
---|
598 | 593 | |
---|
| 594 | +static inline bool pte_user(pte_t pte) |
---|
| 595 | +{ |
---|
| 596 | + return !(pte_raw(pte) & cpu_to_be64(_PAGE_PRIVILEGED)); |
---|
| 597 | +} |
---|
| 598 | + |
---|
599 | 599 | #define pte_access_permitted pte_access_permitted |
---|
600 | 600 | static inline bool pte_access_permitted(pte_t pte, bool write) |
---|
601 | 601 | { |
---|
602 | | - unsigned long pteval = pte_val(pte); |
---|
603 | | - /* Also check for pte_user */ |
---|
604 | | - unsigned long clear_pte_bits = _PAGE_PRIVILEGED; |
---|
605 | 602 | /* |
---|
606 | 603 | * _PAGE_READ is needed for any access and will be |
---|
607 | 604 | * cleared for PROT_NONE |
---|
608 | 605 | */ |
---|
609 | | - unsigned long need_pte_bits = _PAGE_PRESENT | _PAGE_READ; |
---|
610 | | - |
---|
611 | | - if (write) |
---|
612 | | - need_pte_bits |= _PAGE_WRITE; |
---|
613 | | - |
---|
614 | | - if ((pteval & need_pte_bits) != need_pte_bits) |
---|
| 606 | + if (!pte_present(pte) || !pte_user(pte) || !pte_read(pte)) |
---|
615 | 607 | return false; |
---|
616 | 608 | |
---|
617 | | - if ((pteval & clear_pte_bits) == clear_pte_bits) |
---|
| 609 | + if (write && !pte_write(pte)) |
---|
618 | 610 | return false; |
---|
619 | 611 | |
---|
620 | 612 | return arch_pte_access_permitted(pte_val(pte), write, 0); |
---|
.. | .. |
---|
629 | 621 | */ |
---|
630 | 622 | static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) |
---|
631 | 623 | { |
---|
632 | | - return __pte((((pte_basic_t)(pfn) << PAGE_SHIFT) & PTE_RPN_MASK) | |
---|
633 | | - pgprot_val(pgprot)); |
---|
| 624 | + VM_BUG_ON(pfn >> (64 - PAGE_SHIFT)); |
---|
| 625 | + VM_BUG_ON((pfn << PAGE_SHIFT) & ~PTE_RPN_MASK); |
---|
| 626 | + |
---|
| 627 | + return __pte(((pte_basic_t)pfn << PAGE_SHIFT) | pgprot_val(pgprot) | _PAGE_PTE); |
---|
634 | 628 | } |
---|
635 | 629 | |
---|
636 | 630 | static inline unsigned long pte_pfn(pte_t pte) |
---|
.. | .. |
---|
643 | 637 | { |
---|
644 | 638 | if (unlikely(pte_savedwrite(pte))) |
---|
645 | 639 | return pte_clear_savedwrite(pte); |
---|
646 | | - return __pte(pte_val(pte) & ~_PAGE_WRITE); |
---|
| 640 | + return __pte_raw(pte_raw(pte) & cpu_to_be64(~_PAGE_WRITE)); |
---|
| 641 | +} |
---|
| 642 | + |
---|
| 643 | +static inline pte_t pte_exprotect(pte_t pte) |
---|
| 644 | +{ |
---|
| 645 | + return __pte_raw(pte_raw(pte) & cpu_to_be64(~_PAGE_EXEC)); |
---|
647 | 646 | } |
---|
648 | 647 | |
---|
649 | 648 | static inline pte_t pte_mkclean(pte_t pte) |
---|
650 | 649 | { |
---|
651 | | - return __pte(pte_val(pte) & ~_PAGE_DIRTY); |
---|
| 650 | + return __pte_raw(pte_raw(pte) & cpu_to_be64(~_PAGE_DIRTY)); |
---|
652 | 651 | } |
---|
653 | 652 | |
---|
654 | 653 | static inline pte_t pte_mkold(pte_t pte) |
---|
655 | 654 | { |
---|
656 | | - return __pte(pte_val(pte) & ~_PAGE_ACCESSED); |
---|
| 655 | + return __pte_raw(pte_raw(pte) & cpu_to_be64(~_PAGE_ACCESSED)); |
---|
| 656 | +} |
---|
| 657 | + |
---|
| 658 | +static inline pte_t pte_mkexec(pte_t pte) |
---|
| 659 | +{ |
---|
| 660 | + return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_EXEC)); |
---|
657 | 661 | } |
---|
658 | 662 | |
---|
659 | 663 | static inline pte_t pte_mkwrite(pte_t pte) |
---|
.. | .. |
---|
661 | 665 | /* |
---|
662 | 666 | * write implies read, hence set both |
---|
663 | 667 | */ |
---|
664 | | - return __pte(pte_val(pte) | _PAGE_RW); |
---|
| 668 | + return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_RW)); |
---|
665 | 669 | } |
---|
666 | 670 | |
---|
667 | 671 | static inline pte_t pte_mkdirty(pte_t pte) |
---|
668 | 672 | { |
---|
669 | | - return __pte(pte_val(pte) | _PAGE_DIRTY | _PAGE_SOFT_DIRTY); |
---|
| 673 | + return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_DIRTY | _PAGE_SOFT_DIRTY)); |
---|
670 | 674 | } |
---|
671 | 675 | |
---|
672 | 676 | static inline pte_t pte_mkyoung(pte_t pte) |
---|
673 | 677 | { |
---|
674 | | - return __pte(pte_val(pte) | _PAGE_ACCESSED); |
---|
| 678 | + return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_ACCESSED)); |
---|
675 | 679 | } |
---|
676 | 680 | |
---|
677 | 681 | static inline pte_t pte_mkspecial(pte_t pte) |
---|
678 | 682 | { |
---|
679 | | - return __pte(pte_val(pte) | _PAGE_SPECIAL); |
---|
| 683 | + return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_SPECIAL)); |
---|
680 | 684 | } |
---|
681 | 685 | |
---|
682 | 686 | static inline pte_t pte_mkhuge(pte_t pte) |
---|
.. | .. |
---|
686 | 690 | |
---|
687 | 691 | static inline pte_t pte_mkdevmap(pte_t pte) |
---|
688 | 692 | { |
---|
689 | | - return __pte(pte_val(pte) | _PAGE_SPECIAL|_PAGE_DEVMAP); |
---|
| 693 | + return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_SPECIAL | _PAGE_DEVMAP)); |
---|
| 694 | +} |
---|
| 695 | + |
---|
| 696 | +static inline pte_t pte_mkprivileged(pte_t pte) |
---|
| 697 | +{ |
---|
| 698 | + return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_PRIVILEGED)); |
---|
| 699 | +} |
---|
| 700 | + |
---|
| 701 | +static inline pte_t pte_mkuser(pte_t pte) |
---|
| 702 | +{ |
---|
| 703 | + return __pte_raw(pte_raw(pte) & cpu_to_be64(~_PAGE_PRIVILEGED)); |
---|
690 | 704 | } |
---|
691 | 705 | |
---|
692 | 706 | /* |
---|
.. | .. |
---|
705 | 719 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) |
---|
706 | 720 | { |
---|
707 | 721 | /* FIXME!! check whether this need to be a conditional */ |
---|
708 | | - return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot)); |
---|
709 | | -} |
---|
710 | | - |
---|
711 | | -static inline bool pte_user(pte_t pte) |
---|
712 | | -{ |
---|
713 | | - return !(pte_raw(pte) & cpu_to_be64(_PAGE_PRIVILEGED)); |
---|
| 722 | + return __pte_raw((pte_raw(pte) & cpu_to_be64(_PAGE_CHG_MASK)) | |
---|
| 723 | + cpu_to_be64(pgprot_val(newprot))); |
---|
714 | 724 | } |
---|
715 | 725 | |
---|
716 | 726 | /* Encode and de-code a swap entry */ |
---|
.. | .. |
---|
723 | 733 | BUILD_BUG_ON(_PAGE_HPTEFLAGS & (0x1f << _PAGE_BIT_SWAP_TYPE)); \ |
---|
724 | 734 | BUILD_BUG_ON(_PAGE_HPTEFLAGS & _PAGE_SWP_SOFT_DIRTY); \ |
---|
725 | 735 | } while (0) |
---|
726 | | -/* |
---|
727 | | - * on pte we don't need handle RADIX_TREE_EXCEPTIONAL_SHIFT; |
---|
728 | | - */ |
---|
| 736 | + |
---|
729 | 737 | #define SWP_TYPE_BITS 5 |
---|
730 | 738 | #define __swp_type(x) (((x).val >> _PAGE_BIT_SWAP_TYPE) \ |
---|
731 | 739 | & ((1UL << SWP_TYPE_BITS) - 1)) |
---|
.. | .. |
---|
741 | 749 | */ |
---|
742 | 750 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val((pte)) & ~_PAGE_PTE }) |
---|
743 | 751 | #define __swp_entry_to_pte(x) __pte((x).val | _PAGE_PTE) |
---|
| 752 | +#define __pmd_to_swp_entry(pmd) (__pte_to_swp_entry(pmd_pte(pmd))) |
---|
| 753 | +#define __swp_entry_to_pmd(x) (pte_pmd(__swp_entry_to_pte(x))) |
---|
744 | 754 | |
---|
745 | 755 | #ifdef CONFIG_MEM_SOFT_DIRTY |
---|
746 | 756 | #define _PAGE_SWP_SOFT_DIRTY (1UL << (SWP_TYPE_BITS + _PAGE_BIT_SWAP_TYPE)) |
---|
.. | .. |
---|
751 | 761 | #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY |
---|
752 | 762 | static inline pte_t pte_swp_mksoft_dirty(pte_t pte) |
---|
753 | 763 | { |
---|
754 | | - return __pte(pte_val(pte) | _PAGE_SWP_SOFT_DIRTY); |
---|
| 764 | + return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_SWP_SOFT_DIRTY)); |
---|
755 | 765 | } |
---|
756 | 766 | |
---|
757 | 767 | static inline bool pte_swp_soft_dirty(pte_t pte) |
---|
.. | .. |
---|
761 | 771 | |
---|
762 | 772 | static inline pte_t pte_swp_clear_soft_dirty(pte_t pte) |
---|
763 | 773 | { |
---|
764 | | - return __pte(pte_val(pte) & ~_PAGE_SWP_SOFT_DIRTY); |
---|
| 774 | + return __pte_raw(pte_raw(pte) & cpu_to_be64(~_PAGE_SWP_SOFT_DIRTY)); |
---|
765 | 775 | } |
---|
766 | 776 | #endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */ |
---|
767 | 777 | |
---|
.. | .. |
---|
813 | 823 | static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, |
---|
814 | 824 | pte_t *ptep, pte_t pte, int percpu) |
---|
815 | 825 | { |
---|
| 826 | + |
---|
| 827 | + VM_WARN_ON(!(pte_raw(pte) & cpu_to_be64(_PAGE_PTE))); |
---|
| 828 | + /* |
---|
| 829 | + * Keep the _PAGE_PTE added till we are sure we handle _PAGE_PTE |
---|
| 830 | + * in all the callers. |
---|
| 831 | + */ |
---|
| 832 | + pte = __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_PTE)); |
---|
| 833 | + |
---|
816 | 834 | if (radix_enabled()) |
---|
817 | 835 | return radix__set_pte_at(mm, addr, ptep, pte, percpu); |
---|
818 | 836 | return hash__set_pte_at(mm, addr, ptep, pte, percpu); |
---|
819 | 837 | } |
---|
820 | 838 | |
---|
821 | | -#define _PAGE_CACHE_CTL (_PAGE_NON_IDEMPOTENT | _PAGE_TOLERANT) |
---|
| 839 | +#define _PAGE_CACHE_CTL (_PAGE_SAO | _PAGE_NON_IDEMPOTENT | _PAGE_TOLERANT) |
---|
822 | 840 | |
---|
823 | 841 | #define pgprot_noncached pgprot_noncached |
---|
824 | 842 | static inline pgprot_t pgprot_noncached(pgprot_t prot) |
---|
.. | .. |
---|
850 | 868 | */ |
---|
851 | 869 | static inline bool pte_ci(pte_t pte) |
---|
852 | 870 | { |
---|
853 | | - unsigned long pte_v = pte_val(pte); |
---|
| 871 | + __be64 pte_v = pte_raw(pte); |
---|
854 | 872 | |
---|
855 | | - if (((pte_v & _PAGE_CACHE_CTL) == _PAGE_TOLERANT) || |
---|
856 | | - ((pte_v & _PAGE_CACHE_CTL) == _PAGE_NON_IDEMPOTENT)) |
---|
| 873 | + if (((pte_v & cpu_to_be64(_PAGE_CACHE_CTL)) == cpu_to_be64(_PAGE_TOLERANT)) || |
---|
| 874 | + ((pte_v & cpu_to_be64(_PAGE_CACHE_CTL)) == cpu_to_be64(_PAGE_NON_IDEMPOTENT))) |
---|
857 | 875 | return true; |
---|
858 | 876 | return false; |
---|
859 | 877 | } |
---|
860 | 878 | |
---|
861 | | -static inline void pmd_set(pmd_t *pmdp, unsigned long val) |
---|
862 | | -{ |
---|
863 | | - *pmdp = __pmd(val); |
---|
864 | | -} |
---|
865 | | - |
---|
866 | 879 | static inline void pmd_clear(pmd_t *pmdp) |
---|
867 | 880 | { |
---|
| 881 | + if (IS_ENABLED(CONFIG_DEBUG_VM) && !radix_enabled()) { |
---|
| 882 | + /* |
---|
| 883 | + * Don't use this if we can possibly have a hash page table |
---|
| 884 | + * entry mapping this. |
---|
| 885 | + */ |
---|
| 886 | + WARN_ON((pmd_val(*pmdp) & (H_PAGE_HASHPTE | _PAGE_PTE)) == (H_PAGE_HASHPTE | _PAGE_PTE)); |
---|
| 887 | + } |
---|
868 | 888 | *pmdp = __pmd(0); |
---|
869 | 889 | } |
---|
870 | 890 | |
---|
.. | .. |
---|
875 | 895 | |
---|
876 | 896 | static inline int pmd_present(pmd_t pmd) |
---|
877 | 897 | { |
---|
| 898 | + /* |
---|
| 899 | + * A pmd is considerent present if _PAGE_PRESENT is set. |
---|
| 900 | + * We also need to consider the pmd present which is marked |
---|
| 901 | + * invalid during a split. Hence we look for _PAGE_INVALID |
---|
| 902 | + * if we find _PAGE_PRESENT cleared. |
---|
| 903 | + */ |
---|
| 904 | + if (pmd_raw(pmd) & cpu_to_be64(_PAGE_PRESENT | _PAGE_INVALID)) |
---|
| 905 | + return true; |
---|
878 | 906 | |
---|
879 | | - return !pmd_none(pmd); |
---|
| 907 | + return false; |
---|
| 908 | +} |
---|
| 909 | + |
---|
| 910 | +static inline int pmd_is_serializing(pmd_t pmd) |
---|
| 911 | +{ |
---|
| 912 | + /* |
---|
| 913 | + * If the pmd is undergoing a split, the _PAGE_PRESENT bit is clear |
---|
| 914 | + * and _PAGE_INVALID is set (see pmd_present, pmdp_invalidate). |
---|
| 915 | + * |
---|
| 916 | + * This condition may also occur when flushing a pmd while flushing |
---|
| 917 | + * it (see ptep_modify_prot_start), so callers must ensure this |
---|
| 918 | + * case is fine as well. |
---|
| 919 | + */ |
---|
| 920 | + if ((pmd_raw(pmd) & cpu_to_be64(_PAGE_PRESENT | _PAGE_INVALID)) == |
---|
| 921 | + cpu_to_be64(_PAGE_INVALID)) |
---|
| 922 | + return true; |
---|
| 923 | + |
---|
| 924 | + return false; |
---|
880 | 925 | } |
---|
881 | 926 | |
---|
882 | 927 | static inline int pmd_bad(pmd_t pmd) |
---|
.. | .. |
---|
886 | 931 | return hash__pmd_bad(pmd); |
---|
887 | 932 | } |
---|
888 | 933 | |
---|
889 | | -static inline void pud_set(pud_t *pudp, unsigned long val) |
---|
890 | | -{ |
---|
891 | | - *pudp = __pud(val); |
---|
892 | | -} |
---|
893 | | - |
---|
894 | 934 | static inline void pud_clear(pud_t *pudp) |
---|
895 | 935 | { |
---|
| 936 | + if (IS_ENABLED(CONFIG_DEBUG_VM) && !radix_enabled()) { |
---|
| 937 | + /* |
---|
| 938 | + * Don't use this if we can possibly have a hash page table |
---|
| 939 | + * entry mapping this. |
---|
| 940 | + */ |
---|
| 941 | + WARN_ON((pud_val(*pudp) & (H_PAGE_HASHPTE | _PAGE_PTE)) == (H_PAGE_HASHPTE | _PAGE_PTE)); |
---|
| 942 | + } |
---|
896 | 943 | *pudp = __pud(0); |
---|
897 | 944 | } |
---|
898 | 945 | |
---|
.. | .. |
---|
903 | 950 | |
---|
904 | 951 | static inline int pud_present(pud_t pud) |
---|
905 | 952 | { |
---|
906 | | - return !pud_none(pud); |
---|
| 953 | + return !!(pud_raw(pud) & cpu_to_be64(_PAGE_PRESENT)); |
---|
907 | 954 | } |
---|
908 | 955 | |
---|
909 | 956 | extern struct page *pud_page(pud_t pud); |
---|
.. | .. |
---|
932 | 979 | return pte_access_permitted(pud_pte(pud), write); |
---|
933 | 980 | } |
---|
934 | 981 | |
---|
935 | | -#define pgd_write(pgd) pte_write(pgd_pte(pgd)) |
---|
936 | | -static inline void pgd_set(pgd_t *pgdp, unsigned long val) |
---|
| 982 | +#define __p4d_raw(x) ((p4d_t) { __pgd_raw(x) }) |
---|
| 983 | +static inline __be64 p4d_raw(p4d_t x) |
---|
937 | 984 | { |
---|
938 | | - *pgdp = __pgd(val); |
---|
| 985 | + return pgd_raw(x.pgd); |
---|
939 | 986 | } |
---|
940 | 987 | |
---|
941 | | -static inline void pgd_clear(pgd_t *pgdp) |
---|
| 988 | +#define p4d_write(p4d) pte_write(p4d_pte(p4d)) |
---|
| 989 | + |
---|
| 990 | +static inline void p4d_clear(p4d_t *p4dp) |
---|
942 | 991 | { |
---|
943 | | - *pgdp = __pgd(0); |
---|
| 992 | + *p4dp = __p4d(0); |
---|
944 | 993 | } |
---|
945 | 994 | |
---|
946 | | -static inline int pgd_none(pgd_t pgd) |
---|
| 995 | +static inline int p4d_none(p4d_t p4d) |
---|
947 | 996 | { |
---|
948 | | - return !pgd_raw(pgd); |
---|
| 997 | + return !p4d_raw(p4d); |
---|
949 | 998 | } |
---|
950 | 999 | |
---|
951 | | -static inline int pgd_present(pgd_t pgd) |
---|
| 1000 | +static inline int p4d_present(p4d_t p4d) |
---|
952 | 1001 | { |
---|
953 | | - return !pgd_none(pgd); |
---|
| 1002 | + return !!(p4d_raw(p4d) & cpu_to_be64(_PAGE_PRESENT)); |
---|
954 | 1003 | } |
---|
955 | 1004 | |
---|
956 | | -static inline pte_t pgd_pte(pgd_t pgd) |
---|
| 1005 | +static inline pte_t p4d_pte(p4d_t p4d) |
---|
957 | 1006 | { |
---|
958 | | - return __pte_raw(pgd_raw(pgd)); |
---|
| 1007 | + return __pte_raw(p4d_raw(p4d)); |
---|
959 | 1008 | } |
---|
960 | 1009 | |
---|
961 | | -static inline pgd_t pte_pgd(pte_t pte) |
---|
| 1010 | +static inline p4d_t pte_p4d(pte_t pte) |
---|
962 | 1011 | { |
---|
963 | | - return __pgd_raw(pte_raw(pte)); |
---|
| 1012 | + return __p4d_raw(pte_raw(pte)); |
---|
964 | 1013 | } |
---|
965 | 1014 | |
---|
966 | | -static inline int pgd_bad(pgd_t pgd) |
---|
| 1015 | +static inline int p4d_bad(p4d_t p4d) |
---|
967 | 1016 | { |
---|
968 | 1017 | if (radix_enabled()) |
---|
969 | | - return radix__pgd_bad(pgd); |
---|
970 | | - return hash__pgd_bad(pgd); |
---|
| 1018 | + return radix__p4d_bad(p4d); |
---|
| 1019 | + return hash__p4d_bad(p4d); |
---|
971 | 1020 | } |
---|
972 | 1021 | |
---|
973 | | -#define pgd_access_permitted pgd_access_permitted |
---|
974 | | -static inline bool pgd_access_permitted(pgd_t pgd, bool write) |
---|
| 1022 | +#define p4d_access_permitted p4d_access_permitted |
---|
| 1023 | +static inline bool p4d_access_permitted(p4d_t p4d, bool write) |
---|
975 | 1024 | { |
---|
976 | | - return pte_access_permitted(pgd_pte(pgd), write); |
---|
| 1025 | + return pte_access_permitted(p4d_pte(p4d), write); |
---|
977 | 1026 | } |
---|
978 | 1027 | |
---|
979 | | -extern struct page *pgd_page(pgd_t pgd); |
---|
| 1028 | +extern struct page *p4d_page(p4d_t p4d); |
---|
980 | 1029 | |
---|
981 | 1030 | /* Pointers in the page table tree are physical addresses */ |
---|
982 | 1031 | #define __pgtable_ptr_val(ptr) __pa(ptr) |
---|
983 | 1032 | |
---|
984 | | -#define pmd_page_vaddr(pmd) __va(pmd_val(pmd) & ~PMD_MASKED_BITS) |
---|
985 | | -#define pud_page_vaddr(pud) __va(pud_val(pud) & ~PUD_MASKED_BITS) |
---|
986 | | -#define pgd_page_vaddr(pgd) __va(pgd_val(pgd) & ~PGD_MASKED_BITS) |
---|
987 | | - |
---|
988 | | -static inline unsigned long pgd_index(unsigned long address) |
---|
| 1033 | +static inline pud_t *p4d_pgtable(p4d_t p4d) |
---|
989 | 1034 | { |
---|
990 | | - return (address >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1); |
---|
| 1035 | + return (pud_t *)__va(p4d_val(p4d) & ~P4D_MASKED_BITS); |
---|
991 | 1036 | } |
---|
992 | 1037 | |
---|
993 | | -static inline unsigned long pud_index(unsigned long address) |
---|
| 1038 | +static inline pmd_t *pud_pgtable(pud_t pud) |
---|
994 | 1039 | { |
---|
995 | | - return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1); |
---|
| 1040 | + return (pmd_t *)__va(pud_val(pud) & ~PUD_MASKED_BITS); |
---|
996 | 1041 | } |
---|
997 | | - |
---|
998 | | -static inline unsigned long pmd_index(unsigned long address) |
---|
999 | | -{ |
---|
1000 | | - return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1); |
---|
1001 | | -} |
---|
1002 | | - |
---|
1003 | | -static inline unsigned long pte_index(unsigned long address) |
---|
1004 | | -{ |
---|
1005 | | - return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); |
---|
1006 | | -} |
---|
1007 | | - |
---|
1008 | | -/* |
---|
1009 | | - * Find an entry in a page-table-directory. We combine the address region |
---|
1010 | | - * (the high order N bits) and the pgd portion of the address. |
---|
1011 | | - */ |
---|
1012 | | - |
---|
1013 | | -#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) |
---|
1014 | | - |
---|
1015 | | -#define pud_offset(pgdp, addr) \ |
---|
1016 | | - (((pud_t *) pgd_page_vaddr(*(pgdp))) + pud_index(addr)) |
---|
1017 | | -#define pmd_offset(pudp,addr) \ |
---|
1018 | | - (((pmd_t *) pud_page_vaddr(*(pudp))) + pmd_index(addr)) |
---|
1019 | | -#define pte_offset_kernel(dir,addr) \ |
---|
1020 | | - (((pte_t *) pmd_page_vaddr(*(dir))) + pte_index(addr)) |
---|
1021 | | - |
---|
1022 | | -#define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr)) |
---|
1023 | | -#define pte_unmap(pte) do { } while(0) |
---|
1024 | | - |
---|
1025 | | -/* to find an entry in a kernel page-table-directory */ |
---|
1026 | | -/* This now only contains the vmalloc pages */ |
---|
1027 | | -#define pgd_offset_k(address) pgd_offset(&init_mm, address) |
---|
1028 | 1042 | |
---|
1029 | 1043 | #define pte_ERROR(e) \ |
---|
1030 | 1044 | pr_err("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e)) |
---|
.. | .. |
---|
1035 | 1049 | #define pgd_ERROR(e) \ |
---|
1036 | 1050 | pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) |
---|
1037 | 1051 | |
---|
1038 | | -static inline int map_kernel_page(unsigned long ea, unsigned long pa, |
---|
1039 | | - unsigned long flags) |
---|
| 1052 | +static inline int map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot) |
---|
1040 | 1053 | { |
---|
1041 | 1054 | if (radix_enabled()) { |
---|
1042 | 1055 | #if defined(CONFIG_PPC_RADIX_MMU) && defined(DEBUG_VM) |
---|
1043 | 1056 | unsigned long page_size = 1 << mmu_psize_defs[mmu_io_psize].shift; |
---|
1044 | 1057 | WARN((page_size != PAGE_SIZE), "I/O page size != PAGE_SIZE"); |
---|
1045 | 1058 | #endif |
---|
1046 | | - return radix__map_kernel_page(ea, pa, __pgprot(flags), PAGE_SIZE); |
---|
| 1059 | + return radix__map_kernel_page(ea, pa, prot, PAGE_SIZE); |
---|
1047 | 1060 | } |
---|
1048 | | - return hash__map_kernel_page(ea, pa, flags); |
---|
| 1061 | + return hash__map_kernel_page(ea, pa, prot); |
---|
1049 | 1062 | } |
---|
| 1063 | + |
---|
| 1064 | +void unmap_kernel_page(unsigned long va); |
---|
1050 | 1065 | |
---|
1051 | 1066 | static inline int __meminit vmemmap_create_mapping(unsigned long start, |
---|
1052 | 1067 | unsigned long page_size, |
---|
.. | .. |
---|
1097 | 1112 | #define pmd_soft_dirty(pmd) pte_soft_dirty(pmd_pte(pmd)) |
---|
1098 | 1113 | #define pmd_mksoft_dirty(pmd) pte_pmd(pte_mksoft_dirty(pmd_pte(pmd))) |
---|
1099 | 1114 | #define pmd_clear_soft_dirty(pmd) pte_pmd(pte_clear_soft_dirty(pmd_pte(pmd))) |
---|
| 1115 | + |
---|
| 1116 | +#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION |
---|
| 1117 | +#define pmd_swp_mksoft_dirty(pmd) pte_pmd(pte_swp_mksoft_dirty(pmd_pte(pmd))) |
---|
| 1118 | +#define pmd_swp_soft_dirty(pmd) pte_swp_soft_dirty(pmd_pte(pmd)) |
---|
| 1119 | +#define pmd_swp_clear_soft_dirty(pmd) pte_pmd(pte_swp_clear_soft_dirty(pmd_pte(pmd))) |
---|
| 1120 | +#endif |
---|
1100 | 1121 | #endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */ |
---|
1101 | 1122 | |
---|
1102 | 1123 | #ifdef CONFIG_NUMA_BALANCING |
---|
.. | .. |
---|
1113 | 1134 | #define pmd_access_permitted pmd_access_permitted |
---|
1114 | 1135 | static inline bool pmd_access_permitted(pmd_t pmd, bool write) |
---|
1115 | 1136 | { |
---|
| 1137 | + /* |
---|
| 1138 | + * pmdp_invalidate sets this combination (which is not caught by |
---|
| 1139 | + * !pte_present() check in pte_access_permitted), to prevent |
---|
| 1140 | + * lock-free lookups, as part of the serialize_against_pte_lookup() |
---|
| 1141 | + * synchronisation. |
---|
| 1142 | + * |
---|
| 1143 | + * This also catches the case where the PTE's hardware PRESENT bit is |
---|
| 1144 | + * cleared while TLB is flushed, which is suboptimal but should not |
---|
| 1145 | + * be frequent. |
---|
| 1146 | + */ |
---|
| 1147 | + if (pmd_is_serializing(pmd)) |
---|
| 1148 | + return false; |
---|
| 1149 | + |
---|
1116 | 1150 | return pte_access_permitted(pmd_pte(pmd), write); |
---|
1117 | 1151 | } |
---|
1118 | 1152 | |
---|
.. | .. |
---|
1122 | 1156 | extern pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot); |
---|
1123 | 1157 | extern void set_pmd_at(struct mm_struct *mm, unsigned long addr, |
---|
1124 | 1158 | pmd_t *pmdp, pmd_t pmd); |
---|
1125 | | -extern void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr, |
---|
1126 | | - pmd_t *pmd); |
---|
| 1159 | +static inline void update_mmu_cache_pmd(struct vm_area_struct *vma, |
---|
| 1160 | + unsigned long addr, pmd_t *pmd) |
---|
| 1161 | +{ |
---|
| 1162 | +} |
---|
| 1163 | + |
---|
1127 | 1164 | extern int hash__has_transparent_hugepage(void); |
---|
1128 | 1165 | static inline int has_transparent_hugepage(void) |
---|
1129 | 1166 | { |
---|
.. | .. |
---|
1142 | 1179 | return hash__pmd_hugepage_update(mm, addr, pmdp, clr, set); |
---|
1143 | 1180 | } |
---|
1144 | 1181 | |
---|
| 1182 | +/* |
---|
| 1183 | + * returns true for pmd migration entries, THP, devmap, hugetlb |
---|
| 1184 | + * But compile time dependent on THP config |
---|
| 1185 | + */ |
---|
1145 | 1186 | static inline int pmd_large(pmd_t pmd) |
---|
1146 | 1187 | { |
---|
1147 | 1188 | return !!(pmd_raw(pmd) & cpu_to_be64(_PAGE_PTE)); |
---|
1148 | 1189 | } |
---|
1149 | 1190 | |
---|
1150 | | -static inline pmd_t pmd_mknotpresent(pmd_t pmd) |
---|
1151 | | -{ |
---|
1152 | | - return __pmd(pmd_val(pmd) & ~_PAGE_PRESENT); |
---|
1153 | | -} |
---|
1154 | 1191 | /* |
---|
1155 | 1192 | * For radix we should always find H_PAGE_HASHPTE zero. Hence |
---|
1156 | 1193 | * the below will work for radix too |
---|
.. | .. |
---|
1176 | 1213 | pmd_hugepage_update(mm, addr, pmdp, 0, _PAGE_PRIVILEGED); |
---|
1177 | 1214 | } |
---|
1178 | 1215 | |
---|
| 1216 | +/* |
---|
| 1217 | + * Only returns true for a THP. False for pmd migration entry. |
---|
| 1218 | + * We also need to return true when we come across a pte that |
---|
| 1219 | + * in between a thp split. While splitting THP, we mark the pmd |
---|
| 1220 | + * invalid (pmdp_invalidate()) before we set it with pte page |
---|
| 1221 | + * address. A pmd_trans_huge() check against a pmd entry during that time |
---|
| 1222 | + * should return true. |
---|
| 1223 | + * We should not call this on a hugetlb entry. We should check for HugeTLB |
---|
| 1224 | + * entry using vma->vm_flags |
---|
| 1225 | + * The page table walk rule is explained in Documentation/vm/transhuge.rst |
---|
| 1226 | + */ |
---|
1179 | 1227 | static inline int pmd_trans_huge(pmd_t pmd) |
---|
1180 | 1228 | { |
---|
| 1229 | + if (!pmd_present(pmd)) |
---|
| 1230 | + return false; |
---|
| 1231 | + |
---|
1181 | 1232 | if (radix_enabled()) |
---|
1182 | 1233 | return radix__pmd_trans_huge(pmd); |
---|
1183 | 1234 | return hash__pmd_trans_huge(pmd); |
---|
.. | .. |
---|
1224 | 1275 | return hash__pmdp_collapse_flush(vma, address, pmdp); |
---|
1225 | 1276 | } |
---|
1226 | 1277 | #define pmdp_collapse_flush pmdp_collapse_flush |
---|
| 1278 | + |
---|
| 1279 | +#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL |
---|
| 1280 | +pmd_t pmdp_huge_get_and_clear_full(struct vm_area_struct *vma, |
---|
| 1281 | + unsigned long addr, |
---|
| 1282 | + pmd_t *pmdp, int full); |
---|
1227 | 1283 | |
---|
1228 | 1284 | #define __HAVE_ARCH_PGTABLE_DEPOSIT |
---|
1229 | 1285 | static inline void pgtable_trans_huge_deposit(struct mm_struct *mm, |
---|
.. | .. |
---|
1289 | 1345 | } |
---|
1290 | 1346 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
---|
1291 | 1347 | |
---|
1292 | | -static inline const int pud_pfn(pud_t pud) |
---|
| 1348 | +static inline int pud_pfn(pud_t pud) |
---|
1293 | 1349 | { |
---|
1294 | 1350 | /* |
---|
1295 | 1351 | * Currently all calls to pud_pfn() are gated around a pud_devmap() |
---|
.. | .. |
---|
1299 | 1355 | BUILD_BUG(); |
---|
1300 | 1356 | return 0; |
---|
1301 | 1357 | } |
---|
| 1358 | +#define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION |
---|
| 1359 | +pte_t ptep_modify_prot_start(struct vm_area_struct *, unsigned long, pte_t *); |
---|
| 1360 | +void ptep_modify_prot_commit(struct vm_area_struct *, unsigned long, |
---|
| 1361 | + pte_t *, pte_t, pte_t); |
---|
| 1362 | + |
---|
| 1363 | +/* |
---|
| 1364 | + * Returns true for a R -> RW upgrade of pte |
---|
| 1365 | + */ |
---|
| 1366 | +static inline bool is_pte_rw_upgrade(unsigned long old_val, unsigned long new_val) |
---|
| 1367 | +{ |
---|
| 1368 | + if (!(old_val & _PAGE_READ)) |
---|
| 1369 | + return false; |
---|
| 1370 | + |
---|
| 1371 | + if ((!(old_val & _PAGE_WRITE)) && (new_val & _PAGE_WRITE)) |
---|
| 1372 | + return true; |
---|
| 1373 | + |
---|
| 1374 | + return false; |
---|
| 1375 | +} |
---|
| 1376 | + |
---|
| 1377 | +/* |
---|
| 1378 | + * Like pmd_huge() and pmd_large(), but works regardless of config options |
---|
| 1379 | + */ |
---|
| 1380 | +#define pmd_is_leaf pmd_is_leaf |
---|
| 1381 | +#define pmd_leaf pmd_is_leaf |
---|
| 1382 | +static inline bool pmd_is_leaf(pmd_t pmd) |
---|
| 1383 | +{ |
---|
| 1384 | + return !!(pmd_raw(pmd) & cpu_to_be64(_PAGE_PTE)); |
---|
| 1385 | +} |
---|
| 1386 | + |
---|
| 1387 | +#define pud_is_leaf pud_is_leaf |
---|
| 1388 | +#define pud_leaf pud_is_leaf |
---|
| 1389 | +static inline bool pud_is_leaf(pud_t pud) |
---|
| 1390 | +{ |
---|
| 1391 | + return !!(pud_raw(pud) & cpu_to_be64(_PAGE_PTE)); |
---|
| 1392 | +} |
---|
| 1393 | + |
---|
| 1394 | +#define p4d_is_leaf p4d_is_leaf |
---|
| 1395 | +#define p4d_leaf p4d_is_leaf |
---|
| 1396 | +static inline bool p4d_is_leaf(p4d_t p4d) |
---|
| 1397 | +{ |
---|
| 1398 | + return !!(p4d_raw(p4d) & cpu_to_be64(_PAGE_PTE)); |
---|
| 1399 | +} |
---|
1302 | 1400 | |
---|
1303 | 1401 | #endif /* __ASSEMBLY__ */ |
---|
1304 | 1402 | #endif /* _ASM_POWERPC_BOOK3S_64_PGTABLE_H_ */ |
---|