.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* |
---|
2 | 3 | * Copyright (C) 2009 Red Hat, Inc. |
---|
3 | | - * |
---|
4 | | - * This work is licensed under the terms of the GNU GPL, version 2. See |
---|
5 | | - * the COPYING file in the top-level directory. |
---|
6 | 4 | */ |
---|
7 | 5 | |
---|
8 | 6 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
---|
.. | .. |
---|
33 | 31 | #include <linux/page_idle.h> |
---|
34 | 32 | #include <linux/shmem_fs.h> |
---|
35 | 33 | #include <linux/oom.h> |
---|
| 34 | +#include <linux/numa.h> |
---|
36 | 35 | #include <linux/page_owner.h> |
---|
37 | | - |
---|
| 36 | +#include <trace/hooks/mm.h> |
---|
38 | 37 | #include <asm/tlb.h> |
---|
39 | 38 | #include <asm/pgalloc.h> |
---|
40 | 39 | #include "internal.h" |
---|
.. | .. |
---|
64 | 63 | struct page *huge_zero_page __read_mostly; |
---|
65 | 64 | unsigned long huge_zero_pfn __read_mostly = ~0UL; |
---|
66 | 65 | |
---|
67 | | -bool transparent_hugepage_enabled(struct vm_area_struct *vma) |
---|
| 66 | +static inline bool file_thp_enabled(struct vm_area_struct *vma) |
---|
68 | 67 | { |
---|
| 68 | + return transhuge_vma_enabled(vma, vma->vm_flags) && vma->vm_file && |
---|
| 69 | + !inode_is_open_for_write(vma->vm_file->f_inode) && |
---|
| 70 | + (vma->vm_flags & VM_EXEC); |
---|
| 71 | +} |
---|
| 72 | + |
---|
| 73 | +bool transparent_hugepage_active(struct vm_area_struct *vma) |
---|
| 74 | +{ |
---|
| 75 | + /* The addr is used to check if the vma size fits */ |
---|
| 76 | + unsigned long addr = (vma->vm_end & HPAGE_PMD_MASK) - HPAGE_PMD_SIZE; |
---|
| 77 | + |
---|
| 78 | + if (!transhuge_vma_suitable(vma, addr)) |
---|
| 79 | + return false; |
---|
69 | 80 | if (vma_is_anonymous(vma)) |
---|
70 | 81 | return __transparent_hugepage_enabled(vma); |
---|
71 | | - if (vma_is_shmem(vma) && shmem_huge_enabled(vma)) |
---|
72 | | - return __transparent_hugepage_enabled(vma); |
---|
| 82 | + if (vma_is_shmem(vma)) |
---|
| 83 | + return shmem_huge_enabled(vma); |
---|
| 84 | + if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS)) |
---|
| 85 | + return file_thp_enabled(vma); |
---|
73 | 86 | |
---|
74 | 87 | return false; |
---|
75 | 88 | } |
---|
.. | .. |
---|
302 | 315 | static struct kobj_attribute hpage_pmd_size_attr = |
---|
303 | 316 | __ATTR_RO(hpage_pmd_size); |
---|
304 | 317 | |
---|
305 | | -#ifdef CONFIG_DEBUG_VM |
---|
306 | | -static ssize_t debug_cow_show(struct kobject *kobj, |
---|
307 | | - struct kobj_attribute *attr, char *buf) |
---|
308 | | -{ |
---|
309 | | - return single_hugepage_flag_show(kobj, attr, buf, |
---|
310 | | - TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG); |
---|
311 | | -} |
---|
312 | | -static ssize_t debug_cow_store(struct kobject *kobj, |
---|
313 | | - struct kobj_attribute *attr, |
---|
314 | | - const char *buf, size_t count) |
---|
315 | | -{ |
---|
316 | | - return single_hugepage_flag_store(kobj, attr, buf, count, |
---|
317 | | - TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG); |
---|
318 | | -} |
---|
319 | | -static struct kobj_attribute debug_cow_attr = |
---|
320 | | - __ATTR(debug_cow, 0644, debug_cow_show, debug_cow_store); |
---|
321 | | -#endif /* CONFIG_DEBUG_VM */ |
---|
322 | | - |
---|
323 | 318 | static struct attribute *hugepage_attr[] = { |
---|
324 | 319 | &enabled_attr.attr, |
---|
325 | 320 | &defrag_attr.attr, |
---|
326 | 321 | &use_zero_page_attr.attr, |
---|
327 | 322 | &hpage_pmd_size_attr.attr, |
---|
328 | | -#if defined(CONFIG_SHMEM) && defined(CONFIG_TRANSPARENT_HUGE_PAGECACHE) |
---|
| 323 | +#ifdef CONFIG_SHMEM |
---|
329 | 324 | &shmem_enabled_attr.attr, |
---|
330 | | -#endif |
---|
331 | | -#ifdef CONFIG_DEBUG_VM |
---|
332 | | - &debug_cow_attr.attr, |
---|
333 | 325 | #endif |
---|
334 | 326 | NULL, |
---|
335 | 327 | }; |
---|
.. | .. |
---|
392 | 384 | struct kobject *hugepage_kobj; |
---|
393 | 385 | |
---|
394 | 386 | if (!has_transparent_hugepage()) { |
---|
395 | | - transparent_hugepage_flags = 0; |
---|
| 387 | + /* |
---|
| 388 | + * Hardware doesn't support hugepages, hence disable |
---|
| 389 | + * DAX PMD support. |
---|
| 390 | + */ |
---|
| 391 | + transparent_hugepage_flags = 1 << TRANSPARENT_HUGEPAGE_NEVER_DAX; |
---|
396 | 392 | return -EINVAL; |
---|
397 | 393 | } |
---|
398 | 394 | |
---|
.. | .. |
---|
426 | 422 | * where the extra memory used could hurt more than TLB overhead |
---|
427 | 423 | * is likely to save. The admin can still enable it through /sys. |
---|
428 | 424 | */ |
---|
429 | | - if (totalram_pages < (512 << (20 - PAGE_SHIFT))) { |
---|
| 425 | + if (totalram_pages() < (512 << (20 - PAGE_SHIFT))) { |
---|
430 | 426 | transparent_hugepage_flags = 0; |
---|
431 | 427 | return 0; |
---|
432 | 428 | } |
---|
.. | .. |
---|
487 | 483 | return pmd; |
---|
488 | 484 | } |
---|
489 | 485 | |
---|
490 | | -static inline struct list_head *page_deferred_list(struct page *page) |
---|
| 486 | +#ifdef CONFIG_MEMCG |
---|
| 487 | +static inline struct deferred_split *get_deferred_split_queue(struct page *page) |
---|
491 | 488 | { |
---|
492 | | - /* ->lru in the tail pages is occupied by compound_head. */ |
---|
493 | | - return &page[2].deferred_list; |
---|
| 489 | + struct mem_cgroup *memcg = compound_head(page)->mem_cgroup; |
---|
| 490 | + struct pglist_data *pgdat = NODE_DATA(page_to_nid(page)); |
---|
| 491 | + |
---|
| 492 | + if (memcg) |
---|
| 493 | + return &memcg->deferred_split_queue; |
---|
| 494 | + else |
---|
| 495 | + return &pgdat->deferred_split_queue; |
---|
494 | 496 | } |
---|
| 497 | +#else |
---|
| 498 | +static inline struct deferred_split *get_deferred_split_queue(struct page *page) |
---|
| 499 | +{ |
---|
| 500 | + struct pglist_data *pgdat = NODE_DATA(page_to_nid(page)); |
---|
| 501 | + |
---|
| 502 | + return &pgdat->deferred_split_queue; |
---|
| 503 | +} |
---|
| 504 | +#endif |
---|
495 | 505 | |
---|
496 | 506 | void prep_transhuge_page(struct page *page) |
---|
497 | 507 | { |
---|
.. | .. |
---|
503 | 513 | INIT_LIST_HEAD(page_deferred_list(page)); |
---|
504 | 514 | set_compound_page_dtor(page, TRANSHUGE_PAGE_DTOR); |
---|
505 | 515 | } |
---|
| 516 | + |
---|
| 517 | +bool is_transparent_hugepage(struct page *page) |
---|
| 518 | +{ |
---|
| 519 | + if (!PageCompound(page)) |
---|
| 520 | + return false; |
---|
| 521 | + |
---|
| 522 | + page = compound_head(page); |
---|
| 523 | + return is_huge_zero_page(page) || |
---|
| 524 | + page[1].compound_dtor == TRANSHUGE_PAGE_DTOR; |
---|
| 525 | +} |
---|
| 526 | +EXPORT_SYMBOL_GPL(is_transparent_hugepage); |
---|
506 | 527 | |
---|
507 | 528 | static unsigned long __thp_get_unmapped_area(struct file *filp, |
---|
508 | 529 | unsigned long addr, unsigned long len, |
---|
.. | .. |
---|
561 | 582 | struct page *page, gfp_t gfp) |
---|
562 | 583 | { |
---|
563 | 584 | struct vm_area_struct *vma = vmf->vma; |
---|
564 | | - struct mem_cgroup *memcg; |
---|
565 | 585 | pgtable_t pgtable; |
---|
566 | 586 | unsigned long haddr = vmf->address & HPAGE_PMD_MASK; |
---|
567 | 587 | vm_fault_t ret = 0; |
---|
568 | 588 | |
---|
569 | 589 | VM_BUG_ON_PAGE(!PageCompound(page), page); |
---|
570 | 590 | |
---|
571 | | - if (mem_cgroup_try_charge_delay(page, vma->vm_mm, gfp, &memcg, true)) { |
---|
| 591 | + if (mem_cgroup_charge(page, vma->vm_mm, gfp)) { |
---|
572 | 592 | put_page(page); |
---|
573 | 593 | count_vm_event(THP_FAULT_FALLBACK); |
---|
| 594 | + count_vm_event(THP_FAULT_FALLBACK_CHARGE); |
---|
574 | 595 | return VM_FAULT_FALLBACK; |
---|
575 | 596 | } |
---|
| 597 | + cgroup_throttle_swaprate(page, gfp); |
---|
576 | 598 | |
---|
577 | | - pgtable = pte_alloc_one(vma->vm_mm, haddr); |
---|
| 599 | + pgtable = pte_alloc_one(vma->vm_mm); |
---|
578 | 600 | if (unlikely(!pgtable)) { |
---|
579 | 601 | ret = VM_FAULT_OOM; |
---|
580 | 602 | goto release; |
---|
.. | .. |
---|
603 | 625 | vm_fault_t ret2; |
---|
604 | 626 | |
---|
605 | 627 | spin_unlock(vmf->ptl); |
---|
606 | | - mem_cgroup_cancel_charge(page, memcg, true); |
---|
607 | 628 | put_page(page); |
---|
608 | 629 | pte_free(vma->vm_mm, pgtable); |
---|
609 | 630 | ret2 = handle_userfault(vmf, VM_UFFD_MISSING); |
---|
.. | .. |
---|
614 | 635 | entry = mk_huge_pmd(page, vma->vm_page_prot); |
---|
615 | 636 | entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); |
---|
616 | 637 | page_add_new_anon_rmap(page, vma, haddr, true); |
---|
617 | | - mem_cgroup_commit_charge(page, memcg, false, true); |
---|
618 | | - lru_cache_add_active_or_unevictable(page, vma); |
---|
| 638 | + lru_cache_add_inactive_or_unevictable(page, vma); |
---|
619 | 639 | pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable); |
---|
620 | 640 | set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); |
---|
621 | 641 | add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR); |
---|
622 | 642 | mm_inc_nr_ptes(vma->vm_mm); |
---|
623 | 643 | spin_unlock(vmf->ptl); |
---|
624 | 644 | count_vm_event(THP_FAULT_ALLOC); |
---|
| 645 | + count_memcg_event_mm(vma->vm_mm, THP_FAULT_ALLOC); |
---|
625 | 646 | } |
---|
626 | 647 | |
---|
627 | 648 | return 0; |
---|
.. | .. |
---|
630 | 651 | release: |
---|
631 | 652 | if (pgtable) |
---|
632 | 653 | pte_free(vma->vm_mm, pgtable); |
---|
633 | | - mem_cgroup_cancel_charge(page, memcg, true); |
---|
634 | 654 | put_page(page); |
---|
635 | 655 | return ret; |
---|
636 | 656 | |
---|
.. | .. |
---|
649 | 669 | { |
---|
650 | 670 | const bool vma_madvised = !!(vma->vm_flags & VM_HUGEPAGE); |
---|
651 | 671 | |
---|
| 672 | + /* Always do synchronous compaction */ |
---|
652 | 673 | if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags)) |
---|
653 | 674 | return GFP_TRANSHUGE | (vma_madvised ? 0 : __GFP_NORETRY); |
---|
| 675 | + |
---|
| 676 | + /* Kick kcompactd and fail quickly */ |
---|
654 | 677 | if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags)) |
---|
655 | 678 | return GFP_TRANSHUGE_LIGHT | __GFP_KSWAPD_RECLAIM; |
---|
| 679 | + |
---|
| 680 | + /* Synchronous compaction if madvised, otherwise kick kcompactd */ |
---|
656 | 681 | if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags)) |
---|
657 | | - return GFP_TRANSHUGE_LIGHT | (vma_madvised ? __GFP_DIRECT_RECLAIM : |
---|
658 | | - __GFP_KSWAPD_RECLAIM); |
---|
| 682 | + return GFP_TRANSHUGE_LIGHT | |
---|
| 683 | + (vma_madvised ? __GFP_DIRECT_RECLAIM : |
---|
| 684 | + __GFP_KSWAPD_RECLAIM); |
---|
| 685 | + |
---|
| 686 | + /* Only do synchronous compaction if madvised */ |
---|
659 | 687 | if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags)) |
---|
660 | | - return GFP_TRANSHUGE_LIGHT | (vma_madvised ? __GFP_DIRECT_RECLAIM : |
---|
661 | | - 0); |
---|
| 688 | + return GFP_TRANSHUGE_LIGHT | |
---|
| 689 | + (vma_madvised ? __GFP_DIRECT_RECLAIM : 0); |
---|
| 690 | + |
---|
662 | 691 | return GFP_TRANSHUGE_LIGHT; |
---|
663 | 692 | } |
---|
664 | 693 | |
---|
.. | .. |
---|
686 | 715 | struct page *page; |
---|
687 | 716 | unsigned long haddr = vmf->address & HPAGE_PMD_MASK; |
---|
688 | 717 | |
---|
689 | | - if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end) |
---|
| 718 | + if (!transhuge_vma_suitable(vma, haddr)) |
---|
690 | 719 | return VM_FAULT_FALLBACK; |
---|
691 | 720 | if (unlikely(anon_vma_prepare(vma))) |
---|
692 | 721 | return VM_FAULT_OOM; |
---|
.. | .. |
---|
698 | 727 | pgtable_t pgtable; |
---|
699 | 728 | struct page *zero_page; |
---|
700 | 729 | vm_fault_t ret; |
---|
701 | | - pgtable = pte_alloc_one(vma->vm_mm, haddr); |
---|
| 730 | + pgtable = pte_alloc_one(vma->vm_mm); |
---|
702 | 731 | if (unlikely(!pgtable)) |
---|
703 | 732 | return VM_FAULT_OOM; |
---|
704 | 733 | zero_page = mm_get_huge_zero_page(vma->vm_mm); |
---|
.. | .. |
---|
787 | 816 | pte_free(mm, pgtable); |
---|
788 | 817 | } |
---|
789 | 818 | |
---|
790 | | -vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write) |
---|
| 819 | +/** |
---|
| 820 | + * vmf_insert_pfn_pmd_prot - insert a pmd size pfn |
---|
| 821 | + * @vmf: Structure describing the fault |
---|
| 822 | + * @pfn: pfn to insert |
---|
| 823 | + * @pgprot: page protection to use |
---|
| 824 | + * @write: whether it's a write fault |
---|
| 825 | + * |
---|
| 826 | + * Insert a pmd size pfn. See vmf_insert_pfn() for additional info and |
---|
| 827 | + * also consult the vmf_insert_mixed_prot() documentation when |
---|
| 828 | + * @pgprot != @vmf->vma->vm_page_prot. |
---|
| 829 | + * |
---|
| 830 | + * Return: vm_fault_t value. |
---|
| 831 | + */ |
---|
| 832 | +vm_fault_t vmf_insert_pfn_pmd_prot(struct vm_fault *vmf, pfn_t pfn, |
---|
| 833 | + pgprot_t pgprot, bool write) |
---|
791 | 834 | { |
---|
792 | 835 | unsigned long addr = vmf->address & PMD_MASK; |
---|
793 | 836 | struct vm_area_struct *vma = vmf->vma; |
---|
794 | | - pgprot_t pgprot = vma->vm_page_prot; |
---|
795 | 837 | pgtable_t pgtable = NULL; |
---|
796 | 838 | |
---|
797 | 839 | /* |
---|
.. | .. |
---|
809 | 851 | return VM_FAULT_SIGBUS; |
---|
810 | 852 | |
---|
811 | 853 | if (arch_needs_pgtable_deposit()) { |
---|
812 | | - pgtable = pte_alloc_one(vma->vm_mm, addr); |
---|
| 854 | + pgtable = pte_alloc_one(vma->vm_mm); |
---|
813 | 855 | if (!pgtable) |
---|
814 | 856 | return VM_FAULT_OOM; |
---|
815 | 857 | } |
---|
.. | .. |
---|
819 | 861 | insert_pfn_pmd(vma, addr, vmf->pmd, pfn, pgprot, write, pgtable); |
---|
820 | 862 | return VM_FAULT_NOPAGE; |
---|
821 | 863 | } |
---|
822 | | -EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd); |
---|
| 864 | +EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd_prot); |
---|
823 | 865 | |
---|
824 | 866 | #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD |
---|
825 | 867 | static pud_t maybe_pud_mkwrite(pud_t pud, struct vm_area_struct *vma) |
---|
.. | .. |
---|
865 | 907 | spin_unlock(ptl); |
---|
866 | 908 | } |
---|
867 | 909 | |
---|
868 | | -vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write) |
---|
| 910 | +/** |
---|
| 911 | + * vmf_insert_pfn_pud_prot - insert a pud size pfn |
---|
| 912 | + * @vmf: Structure describing the fault |
---|
| 913 | + * @pfn: pfn to insert |
---|
| 914 | + * @pgprot: page protection to use |
---|
| 915 | + * @write: whether it's a write fault |
---|
| 916 | + * |
---|
| 917 | + * Insert a pud size pfn. See vmf_insert_pfn() for additional info and |
---|
| 918 | + * also consult the vmf_insert_mixed_prot() documentation when |
---|
| 919 | + * @pgprot != @vmf->vma->vm_page_prot. |
---|
| 920 | + * |
---|
| 921 | + * Return: vm_fault_t value. |
---|
| 922 | + */ |
---|
| 923 | +vm_fault_t vmf_insert_pfn_pud_prot(struct vm_fault *vmf, pfn_t pfn, |
---|
| 924 | + pgprot_t pgprot, bool write) |
---|
869 | 925 | { |
---|
870 | 926 | unsigned long addr = vmf->address & PUD_MASK; |
---|
871 | 927 | struct vm_area_struct *vma = vmf->vma; |
---|
872 | | - pgprot_t pgprot = vma->vm_page_prot; |
---|
873 | 928 | |
---|
874 | 929 | /* |
---|
875 | 930 | * If we had pud_special, we could avoid all these restrictions, |
---|
.. | .. |
---|
890 | 945 | insert_pfn_pud(vma, addr, vmf->pud, pfn, pgprot, write); |
---|
891 | 946 | return VM_FAULT_NOPAGE; |
---|
892 | 947 | } |
---|
893 | | -EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud); |
---|
| 948 | +EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud_prot); |
---|
894 | 949 | #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ |
---|
895 | 950 | |
---|
896 | 951 | static void touch_pmd(struct vm_area_struct *vma, unsigned long addr, |
---|
.. | .. |
---|
907 | 962 | } |
---|
908 | 963 | |
---|
909 | 964 | struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr, |
---|
910 | | - pmd_t *pmd, int flags) |
---|
| 965 | + pmd_t *pmd, int flags, struct dev_pagemap **pgmap) |
---|
911 | 966 | { |
---|
912 | 967 | unsigned long pfn = pmd_pfn(*pmd); |
---|
913 | 968 | struct mm_struct *mm = vma->vm_mm; |
---|
914 | | - struct dev_pagemap *pgmap; |
---|
915 | 969 | struct page *page; |
---|
916 | 970 | |
---|
917 | 971 | assert_spin_locked(pmd_lockptr(mm, pmd)); |
---|
.. | .. |
---|
921 | 975 | * not be in this function with `flags & FOLL_COW` set. |
---|
922 | 976 | */ |
---|
923 | 977 | WARN_ONCE(flags & FOLL_COW, "mm: In follow_devmap_pmd with FOLL_COW set"); |
---|
| 978 | + |
---|
| 979 | + /* FOLL_GET and FOLL_PIN are mutually exclusive. */ |
---|
| 980 | + if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) == |
---|
| 981 | + (FOLL_PIN | FOLL_GET))) |
---|
| 982 | + return NULL; |
---|
924 | 983 | |
---|
925 | 984 | if (flags & FOLL_WRITE && !pmd_write(*pmd)) |
---|
926 | 985 | return NULL; |
---|
.. | .. |
---|
937 | 996 | * device mapped pages can only be returned if the |
---|
938 | 997 | * caller will manage the page reference count. |
---|
939 | 998 | */ |
---|
940 | | - if (!(flags & FOLL_GET)) |
---|
| 999 | + if (!(flags & (FOLL_GET | FOLL_PIN))) |
---|
941 | 1000 | return ERR_PTR(-EEXIST); |
---|
942 | 1001 | |
---|
943 | 1002 | pfn += (addr & ~PMD_MASK) >> PAGE_SHIFT; |
---|
944 | | - pgmap = get_dev_pagemap(pfn, NULL); |
---|
945 | | - if (!pgmap) |
---|
| 1003 | + *pgmap = get_dev_pagemap(pfn, *pgmap); |
---|
| 1004 | + if (!*pgmap) |
---|
946 | 1005 | return ERR_PTR(-EFAULT); |
---|
947 | 1006 | page = pfn_to_page(pfn); |
---|
948 | | - get_page(page); |
---|
949 | | - put_dev_pagemap(pgmap); |
---|
| 1007 | + if (!try_grab_page(page, flags)) |
---|
| 1008 | + page = ERR_PTR(-ENOMEM); |
---|
950 | 1009 | |
---|
951 | 1010 | return page; |
---|
952 | 1011 | } |
---|
953 | 1012 | |
---|
954 | 1013 | int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, |
---|
955 | 1014 | pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, |
---|
956 | | - struct vm_area_struct *vma) |
---|
| 1015 | + struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma) |
---|
957 | 1016 | { |
---|
958 | 1017 | spinlock_t *dst_ptl, *src_ptl; |
---|
959 | 1018 | struct page *src_page; |
---|
.. | .. |
---|
962 | 1021 | int ret = -ENOMEM; |
---|
963 | 1022 | |
---|
964 | 1023 | /* Skip if can be re-fill on fault */ |
---|
965 | | - if (!vma_is_anonymous(vma)) |
---|
| 1024 | + if (!vma_is_anonymous(dst_vma)) |
---|
966 | 1025 | return 0; |
---|
967 | 1026 | |
---|
968 | | - pgtable = pte_alloc_one(dst_mm, addr); |
---|
| 1027 | + pgtable = pte_alloc_one(dst_mm); |
---|
969 | 1028 | if (unlikely(!pgtable)) |
---|
970 | 1029 | goto out; |
---|
971 | 1030 | |
---|
.. | .. |
---|
986 | 1045 | pmd = swp_entry_to_pmd(entry); |
---|
987 | 1046 | if (pmd_swp_soft_dirty(*src_pmd)) |
---|
988 | 1047 | pmd = pmd_swp_mksoft_dirty(pmd); |
---|
| 1048 | + if (pmd_swp_uffd_wp(*src_pmd)) |
---|
| 1049 | + pmd = pmd_swp_mkuffd_wp(pmd); |
---|
989 | 1050 | set_pmd_at(src_mm, addr, src_pmd, pmd); |
---|
990 | 1051 | } |
---|
991 | 1052 | add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR); |
---|
992 | 1053 | mm_inc_nr_ptes(dst_mm); |
---|
993 | 1054 | pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable); |
---|
| 1055 | + if (!userfaultfd_wp(dst_vma)) |
---|
| 1056 | + pmd = pmd_swp_clear_uffd_wp(pmd); |
---|
994 | 1057 | set_pmd_at(dst_mm, addr, dst_pmd, pmd); |
---|
995 | 1058 | ret = 0; |
---|
996 | 1059 | goto out_unlock; |
---|
.. | .. |
---|
1007 | 1070 | * a page table. |
---|
1008 | 1071 | */ |
---|
1009 | 1072 | if (is_huge_zero_pmd(pmd)) { |
---|
1010 | | - struct page *zero_page; |
---|
1011 | 1073 | /* |
---|
1012 | 1074 | * get_huge_zero_page() will never allocate a new page here, |
---|
1013 | 1075 | * since we already have a zero page to copy. It just takes a |
---|
1014 | 1076 | * reference. |
---|
1015 | 1077 | */ |
---|
1016 | | - zero_page = mm_get_huge_zero_page(dst_mm); |
---|
1017 | | - set_huge_zero_page(pgtable, dst_mm, vma, addr, dst_pmd, |
---|
1018 | | - zero_page); |
---|
1019 | | - ret = 0; |
---|
1020 | | - goto out_unlock; |
---|
| 1078 | + mm_get_huge_zero_page(dst_mm); |
---|
| 1079 | + goto out_zero_page; |
---|
1021 | 1080 | } |
---|
1022 | 1081 | |
---|
1023 | 1082 | src_page = pmd_page(pmd); |
---|
1024 | 1083 | VM_BUG_ON_PAGE(!PageHead(src_page), src_page); |
---|
| 1084 | + |
---|
| 1085 | + /* |
---|
| 1086 | + * If this page is a potentially pinned page, split and retry the fault |
---|
| 1087 | + * with smaller page size. Normally this should not happen because the |
---|
| 1088 | + * userspace should use MADV_DONTFORK upon pinned regions. This is a |
---|
| 1089 | + * best effort that the pinned pages won't be replaced by another |
---|
| 1090 | + * random page during the coming copy-on-write. |
---|
| 1091 | + */ |
---|
| 1092 | + if (unlikely(is_cow_mapping(src_vma->vm_flags) && |
---|
| 1093 | + atomic_read(&src_mm->has_pinned) && |
---|
| 1094 | + page_maybe_dma_pinned(src_page))) { |
---|
| 1095 | + pte_free(dst_mm, pgtable); |
---|
| 1096 | + spin_unlock(src_ptl); |
---|
| 1097 | + spin_unlock(dst_ptl); |
---|
| 1098 | + __split_huge_pmd(src_vma, src_pmd, addr, false, NULL); |
---|
| 1099 | + return -EAGAIN; |
---|
| 1100 | + } |
---|
| 1101 | + |
---|
1025 | 1102 | get_page(src_page); |
---|
1026 | 1103 | page_dup_rmap(src_page, true); |
---|
1027 | 1104 | add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR); |
---|
| 1105 | +out_zero_page: |
---|
1028 | 1106 | mm_inc_nr_ptes(dst_mm); |
---|
1029 | 1107 | pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable); |
---|
1030 | | - |
---|
1031 | 1108 | pmdp_set_wrprotect(src_mm, addr, src_pmd); |
---|
| 1109 | + if (!userfaultfd_wp(dst_vma)) |
---|
| 1110 | + pmd = pmd_clear_uffd_wp(pmd); |
---|
1032 | 1111 | pmd = pmd_mkold(pmd_wrprotect(pmd)); |
---|
1033 | 1112 | set_pmd_at(dst_mm, addr, dst_pmd, pmd); |
---|
1034 | 1113 | |
---|
.. | .. |
---|
1055 | 1134 | } |
---|
1056 | 1135 | |
---|
1057 | 1136 | struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr, |
---|
1058 | | - pud_t *pud, int flags) |
---|
| 1137 | + pud_t *pud, int flags, struct dev_pagemap **pgmap) |
---|
1059 | 1138 | { |
---|
1060 | 1139 | unsigned long pfn = pud_pfn(*pud); |
---|
1061 | 1140 | struct mm_struct *mm = vma->vm_mm; |
---|
1062 | | - struct dev_pagemap *pgmap; |
---|
1063 | 1141 | struct page *page; |
---|
1064 | 1142 | |
---|
1065 | 1143 | assert_spin_locked(pud_lockptr(mm, pud)); |
---|
1066 | 1144 | |
---|
1067 | 1145 | if (flags & FOLL_WRITE && !pud_write(*pud)) |
---|
| 1146 | + return NULL; |
---|
| 1147 | + |
---|
| 1148 | + /* FOLL_GET and FOLL_PIN are mutually exclusive. */ |
---|
| 1149 | + if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) == |
---|
| 1150 | + (FOLL_PIN | FOLL_GET))) |
---|
1068 | 1151 | return NULL; |
---|
1069 | 1152 | |
---|
1070 | 1153 | if (pud_present(*pud) && pud_devmap(*pud)) |
---|
.. | .. |
---|
1078 | 1161 | /* |
---|
1079 | 1162 | * device mapped pages can only be returned if the |
---|
1080 | 1163 | * caller will manage the page reference count. |
---|
| 1164 | + * |
---|
| 1165 | + * At least one of FOLL_GET | FOLL_PIN must be set, so assert that here: |
---|
1081 | 1166 | */ |
---|
1082 | | - if (!(flags & FOLL_GET)) |
---|
| 1167 | + if (!(flags & (FOLL_GET | FOLL_PIN))) |
---|
1083 | 1168 | return ERR_PTR(-EEXIST); |
---|
1084 | 1169 | |
---|
1085 | 1170 | pfn += (addr & ~PUD_MASK) >> PAGE_SHIFT; |
---|
1086 | | - pgmap = get_dev_pagemap(pfn, NULL); |
---|
1087 | | - if (!pgmap) |
---|
| 1171 | + *pgmap = get_dev_pagemap(pfn, *pgmap); |
---|
| 1172 | + if (!*pgmap) |
---|
1088 | 1173 | return ERR_PTR(-EFAULT); |
---|
1089 | 1174 | page = pfn_to_page(pfn); |
---|
1090 | | - get_page(page); |
---|
1091 | | - put_dev_pagemap(pgmap); |
---|
| 1175 | + if (!try_grab_page(page, flags)) |
---|
| 1176 | + page = ERR_PTR(-ENOMEM); |
---|
1092 | 1177 | |
---|
1093 | 1178 | return page; |
---|
1094 | 1179 | } |
---|
.. | .. |
---|
1117 | 1202 | */ |
---|
1118 | 1203 | if (is_huge_zero_pud(pud)) { |
---|
1119 | 1204 | /* No huge zero pud yet */ |
---|
| 1205 | + } |
---|
| 1206 | + |
---|
| 1207 | + /* Please refer to comments in copy_huge_pmd() */ |
---|
| 1208 | + if (unlikely(is_cow_mapping(vma->vm_flags) && |
---|
| 1209 | + atomic_read(&src_mm->has_pinned) && |
---|
| 1210 | + page_maybe_dma_pinned(pud_page(pud)))) { |
---|
| 1211 | + spin_unlock(src_ptl); |
---|
| 1212 | + spin_unlock(dst_ptl); |
---|
| 1213 | + __split_huge_pud(vma, src_pud, addr); |
---|
| 1214 | + return -EAGAIN; |
---|
1120 | 1215 | } |
---|
1121 | 1216 | |
---|
1122 | 1217 | pudp_set_wrprotect(src_mm, addr, src_pud); |
---|
.. | .. |
---|
1173 | 1268 | spin_unlock(vmf->ptl); |
---|
1174 | 1269 | } |
---|
1175 | 1270 | |
---|
1176 | | -static vm_fault_t do_huge_pmd_wp_page_fallback(struct vm_fault *vmf, |
---|
1177 | | - pmd_t orig_pmd, struct page *page) |
---|
1178 | | -{ |
---|
1179 | | - struct vm_area_struct *vma = vmf->vma; |
---|
1180 | | - unsigned long haddr = vmf->address & HPAGE_PMD_MASK; |
---|
1181 | | - struct mem_cgroup *memcg; |
---|
1182 | | - pgtable_t pgtable; |
---|
1183 | | - pmd_t _pmd; |
---|
1184 | | - int i; |
---|
1185 | | - vm_fault_t ret = 0; |
---|
1186 | | - struct page **pages; |
---|
1187 | | - unsigned long mmun_start; /* For mmu_notifiers */ |
---|
1188 | | - unsigned long mmun_end; /* For mmu_notifiers */ |
---|
1189 | | - |
---|
1190 | | - pages = kmalloc_array(HPAGE_PMD_NR, sizeof(struct page *), |
---|
1191 | | - GFP_KERNEL); |
---|
1192 | | - if (unlikely(!pages)) { |
---|
1193 | | - ret |= VM_FAULT_OOM; |
---|
1194 | | - goto out; |
---|
1195 | | - } |
---|
1196 | | - |
---|
1197 | | - for (i = 0; i < HPAGE_PMD_NR; i++) { |
---|
1198 | | - pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE, vma, |
---|
1199 | | - vmf->address, page_to_nid(page)); |
---|
1200 | | - if (unlikely(!pages[i] || |
---|
1201 | | - mem_cgroup_try_charge_delay(pages[i], vma->vm_mm, |
---|
1202 | | - GFP_KERNEL, &memcg, false))) { |
---|
1203 | | - if (pages[i]) |
---|
1204 | | - put_page(pages[i]); |
---|
1205 | | - while (--i >= 0) { |
---|
1206 | | - memcg = (void *)page_private(pages[i]); |
---|
1207 | | - set_page_private(pages[i], 0); |
---|
1208 | | - mem_cgroup_cancel_charge(pages[i], memcg, |
---|
1209 | | - false); |
---|
1210 | | - put_page(pages[i]); |
---|
1211 | | - } |
---|
1212 | | - kfree(pages); |
---|
1213 | | - ret |= VM_FAULT_OOM; |
---|
1214 | | - goto out; |
---|
1215 | | - } |
---|
1216 | | - set_page_private(pages[i], (unsigned long)memcg); |
---|
1217 | | - } |
---|
1218 | | - |
---|
1219 | | - for (i = 0; i < HPAGE_PMD_NR; i++) { |
---|
1220 | | - copy_user_highpage(pages[i], page + i, |
---|
1221 | | - haddr + PAGE_SIZE * i, vma); |
---|
1222 | | - __SetPageUptodate(pages[i]); |
---|
1223 | | - cond_resched(); |
---|
1224 | | - } |
---|
1225 | | - |
---|
1226 | | - mmun_start = haddr; |
---|
1227 | | - mmun_end = haddr + HPAGE_PMD_SIZE; |
---|
1228 | | - mmu_notifier_invalidate_range_start(vma->vm_mm, mmun_start, mmun_end); |
---|
1229 | | - |
---|
1230 | | - vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); |
---|
1231 | | - if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) |
---|
1232 | | - goto out_free_pages; |
---|
1233 | | - VM_BUG_ON_PAGE(!PageHead(page), page); |
---|
1234 | | - |
---|
1235 | | - /* |
---|
1236 | | - * Leave pmd empty until pte is filled note we must notify here as |
---|
1237 | | - * concurrent CPU thread might write to new page before the call to |
---|
1238 | | - * mmu_notifier_invalidate_range_end() happens which can lead to a |
---|
1239 | | - * device seeing memory write in different order than CPU. |
---|
1240 | | - * |
---|
1241 | | - * See Documentation/vm/mmu_notifier.rst |
---|
1242 | | - */ |
---|
1243 | | - pmdp_huge_clear_flush_notify(vma, haddr, vmf->pmd); |
---|
1244 | | - |
---|
1245 | | - pgtable = pgtable_trans_huge_withdraw(vma->vm_mm, vmf->pmd); |
---|
1246 | | - pmd_populate(vma->vm_mm, &_pmd, pgtable); |
---|
1247 | | - |
---|
1248 | | - for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) { |
---|
1249 | | - pte_t entry; |
---|
1250 | | - entry = mk_pte(pages[i], vma->vm_page_prot); |
---|
1251 | | - entry = maybe_mkwrite(pte_mkdirty(entry), vma); |
---|
1252 | | - memcg = (void *)page_private(pages[i]); |
---|
1253 | | - set_page_private(pages[i], 0); |
---|
1254 | | - page_add_new_anon_rmap(pages[i], vmf->vma, haddr, false); |
---|
1255 | | - mem_cgroup_commit_charge(pages[i], memcg, false, false); |
---|
1256 | | - lru_cache_add_active_or_unevictable(pages[i], vma); |
---|
1257 | | - vmf->pte = pte_offset_map(&_pmd, haddr); |
---|
1258 | | - VM_BUG_ON(!pte_none(*vmf->pte)); |
---|
1259 | | - set_pte_at(vma->vm_mm, haddr, vmf->pte, entry); |
---|
1260 | | - pte_unmap(vmf->pte); |
---|
1261 | | - } |
---|
1262 | | - kfree(pages); |
---|
1263 | | - |
---|
1264 | | - smp_wmb(); /* make pte visible before pmd */ |
---|
1265 | | - pmd_populate(vma->vm_mm, vmf->pmd, pgtable); |
---|
1266 | | - page_remove_rmap(page, true); |
---|
1267 | | - spin_unlock(vmf->ptl); |
---|
1268 | | - |
---|
1269 | | - /* |
---|
1270 | | - * No need to double call mmu_notifier->invalidate_range() callback as |
---|
1271 | | - * the above pmdp_huge_clear_flush_notify() did already call it. |
---|
1272 | | - */ |
---|
1273 | | - mmu_notifier_invalidate_range_only_end(vma->vm_mm, mmun_start, |
---|
1274 | | - mmun_end); |
---|
1275 | | - |
---|
1276 | | - ret |= VM_FAULT_WRITE; |
---|
1277 | | - put_page(page); |
---|
1278 | | - |
---|
1279 | | -out: |
---|
1280 | | - return ret; |
---|
1281 | | - |
---|
1282 | | -out_free_pages: |
---|
1283 | | - spin_unlock(vmf->ptl); |
---|
1284 | | - mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end); |
---|
1285 | | - for (i = 0; i < HPAGE_PMD_NR; i++) { |
---|
1286 | | - memcg = (void *)page_private(pages[i]); |
---|
1287 | | - set_page_private(pages[i], 0); |
---|
1288 | | - mem_cgroup_cancel_charge(pages[i], memcg, false); |
---|
1289 | | - put_page(pages[i]); |
---|
1290 | | - } |
---|
1291 | | - kfree(pages); |
---|
1292 | | - goto out; |
---|
1293 | | -} |
---|
1294 | | - |
---|
1295 | 1271 | vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd) |
---|
1296 | 1272 | { |
---|
1297 | 1273 | struct vm_area_struct *vma = vmf->vma; |
---|
1298 | | - struct page *page = NULL, *new_page; |
---|
1299 | | - struct mem_cgroup *memcg; |
---|
| 1274 | + struct page *page; |
---|
1300 | 1275 | unsigned long haddr = vmf->address & HPAGE_PMD_MASK; |
---|
1301 | | - unsigned long mmun_start; /* For mmu_notifiers */ |
---|
1302 | | - unsigned long mmun_end; /* For mmu_notifiers */ |
---|
1303 | | - gfp_t huge_gfp; /* for allocation and charge */ |
---|
1304 | | - vm_fault_t ret = 0; |
---|
1305 | 1276 | |
---|
1306 | 1277 | vmf->ptl = pmd_lockptr(vma->vm_mm, vmf->pmd); |
---|
1307 | 1278 | VM_BUG_ON_VMA(!vma->anon_vma, vma); |
---|
| 1279 | + |
---|
1308 | 1280 | if (is_huge_zero_pmd(orig_pmd)) |
---|
1309 | | - goto alloc; |
---|
| 1281 | + goto fallback; |
---|
| 1282 | + |
---|
1310 | 1283 | spin_lock(vmf->ptl); |
---|
1311 | | - if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) |
---|
1312 | | - goto out_unlock; |
---|
| 1284 | + |
---|
| 1285 | + if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) { |
---|
| 1286 | + spin_unlock(vmf->ptl); |
---|
| 1287 | + return 0; |
---|
| 1288 | + } |
---|
1313 | 1289 | |
---|
1314 | 1290 | page = pmd_page(orig_pmd); |
---|
1315 | 1291 | VM_BUG_ON_PAGE(!PageCompound(page) || !PageHead(page), page); |
---|
1316 | | - /* |
---|
1317 | | - * We can only reuse the page if nobody else maps the huge page or it's |
---|
1318 | | - * part. |
---|
1319 | | - */ |
---|
| 1292 | + |
---|
| 1293 | + /* Lock page for reuse_swap_page() */ |
---|
1320 | 1294 | if (!trylock_page(page)) { |
---|
1321 | 1295 | get_page(page); |
---|
1322 | 1296 | spin_unlock(vmf->ptl); |
---|
1323 | 1297 | lock_page(page); |
---|
1324 | 1298 | spin_lock(vmf->ptl); |
---|
1325 | 1299 | if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) { |
---|
| 1300 | + spin_unlock(vmf->ptl); |
---|
1326 | 1301 | unlock_page(page); |
---|
1327 | 1302 | put_page(page); |
---|
1328 | | - goto out_unlock; |
---|
| 1303 | + return 0; |
---|
1329 | 1304 | } |
---|
1330 | 1305 | put_page(page); |
---|
1331 | 1306 | } |
---|
| 1307 | + |
---|
| 1308 | + /* |
---|
| 1309 | + * We can only reuse the page if nobody else maps the huge page or it's |
---|
| 1310 | + * part. |
---|
| 1311 | + */ |
---|
1332 | 1312 | if (reuse_swap_page(page, NULL)) { |
---|
1333 | 1313 | pmd_t entry; |
---|
1334 | 1314 | entry = pmd_mkyoung(orig_pmd); |
---|
1335 | 1315 | entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); |
---|
1336 | | - if (pmdp_set_access_flags(vma, haddr, vmf->pmd, entry, 1)) |
---|
| 1316 | + if (pmdp_set_access_flags(vma, haddr, vmf->pmd, entry, 1)) |
---|
1337 | 1317 | update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); |
---|
1338 | | - ret |= VM_FAULT_WRITE; |
---|
1339 | 1318 | unlock_page(page); |
---|
1340 | | - goto out_unlock; |
---|
1341 | | - } |
---|
1342 | | - unlock_page(page); |
---|
1343 | | - get_page(page); |
---|
1344 | | - spin_unlock(vmf->ptl); |
---|
1345 | | -alloc: |
---|
1346 | | - if (__transparent_hugepage_enabled(vma) && |
---|
1347 | | - !transparent_hugepage_debug_cow()) { |
---|
1348 | | - huge_gfp = alloc_hugepage_direct_gfpmask(vma); |
---|
1349 | | - new_page = alloc_hugepage_vma(huge_gfp, vma, haddr, HPAGE_PMD_ORDER); |
---|
1350 | | - } else |
---|
1351 | | - new_page = NULL; |
---|
1352 | | - |
---|
1353 | | - if (likely(new_page)) { |
---|
1354 | | - prep_transhuge_page(new_page); |
---|
1355 | | - } else { |
---|
1356 | | - if (!page) { |
---|
1357 | | - split_huge_pmd(vma, vmf->pmd, vmf->address); |
---|
1358 | | - ret |= VM_FAULT_FALLBACK; |
---|
1359 | | - } else { |
---|
1360 | | - ret = do_huge_pmd_wp_page_fallback(vmf, orig_pmd, page); |
---|
1361 | | - if (ret & VM_FAULT_OOM) { |
---|
1362 | | - split_huge_pmd(vma, vmf->pmd, vmf->address); |
---|
1363 | | - ret |= VM_FAULT_FALLBACK; |
---|
1364 | | - } |
---|
1365 | | - put_page(page); |
---|
1366 | | - } |
---|
1367 | | - count_vm_event(THP_FAULT_FALLBACK); |
---|
1368 | | - goto out; |
---|
1369 | | - } |
---|
1370 | | - |
---|
1371 | | - if (unlikely(mem_cgroup_try_charge_delay(new_page, vma->vm_mm, |
---|
1372 | | - huge_gfp, &memcg, true))) { |
---|
1373 | | - put_page(new_page); |
---|
1374 | | - split_huge_pmd(vma, vmf->pmd, vmf->address); |
---|
1375 | | - if (page) |
---|
1376 | | - put_page(page); |
---|
1377 | | - ret |= VM_FAULT_FALLBACK; |
---|
1378 | | - count_vm_event(THP_FAULT_FALLBACK); |
---|
1379 | | - goto out; |
---|
1380 | | - } |
---|
1381 | | - |
---|
1382 | | - count_vm_event(THP_FAULT_ALLOC); |
---|
1383 | | - |
---|
1384 | | - if (!page) |
---|
1385 | | - clear_huge_page(new_page, vmf->address, HPAGE_PMD_NR); |
---|
1386 | | - else |
---|
1387 | | - copy_user_huge_page(new_page, page, vmf->address, |
---|
1388 | | - vma, HPAGE_PMD_NR); |
---|
1389 | | - __SetPageUptodate(new_page); |
---|
1390 | | - |
---|
1391 | | - mmun_start = haddr; |
---|
1392 | | - mmun_end = haddr + HPAGE_PMD_SIZE; |
---|
1393 | | - mmu_notifier_invalidate_range_start(vma->vm_mm, mmun_start, mmun_end); |
---|
1394 | | - |
---|
1395 | | - spin_lock(vmf->ptl); |
---|
1396 | | - if (page) |
---|
1397 | | - put_page(page); |
---|
1398 | | - if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) { |
---|
1399 | 1319 | spin_unlock(vmf->ptl); |
---|
1400 | | - mem_cgroup_cancel_charge(new_page, memcg, true); |
---|
1401 | | - put_page(new_page); |
---|
1402 | | - goto out_mn; |
---|
1403 | | - } else { |
---|
1404 | | - pmd_t entry; |
---|
1405 | | - entry = mk_huge_pmd(new_page, vma->vm_page_prot); |
---|
1406 | | - entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); |
---|
1407 | | - pmdp_huge_clear_flush_notify(vma, haddr, vmf->pmd); |
---|
1408 | | - page_add_new_anon_rmap(new_page, vma, haddr, true); |
---|
1409 | | - mem_cgroup_commit_charge(new_page, memcg, false, true); |
---|
1410 | | - lru_cache_add_active_or_unevictable(new_page, vma); |
---|
1411 | | - set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); |
---|
1412 | | - update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); |
---|
1413 | | - if (!page) { |
---|
1414 | | - add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR); |
---|
1415 | | - } else { |
---|
1416 | | - VM_BUG_ON_PAGE(!PageHead(page), page); |
---|
1417 | | - page_remove_rmap(page, true); |
---|
1418 | | - put_page(page); |
---|
1419 | | - } |
---|
1420 | | - ret |= VM_FAULT_WRITE; |
---|
| 1320 | + return VM_FAULT_WRITE; |
---|
1421 | 1321 | } |
---|
| 1322 | + |
---|
| 1323 | + unlock_page(page); |
---|
1422 | 1324 | spin_unlock(vmf->ptl); |
---|
1423 | | -out_mn: |
---|
1424 | | - /* |
---|
1425 | | - * No need to double call mmu_notifier->invalidate_range() callback as |
---|
1426 | | - * the above pmdp_huge_clear_flush_notify() did already call it. |
---|
1427 | | - */ |
---|
1428 | | - mmu_notifier_invalidate_range_only_end(vma->vm_mm, mmun_start, |
---|
1429 | | - mmun_end); |
---|
1430 | | -out: |
---|
1431 | | - return ret; |
---|
1432 | | -out_unlock: |
---|
1433 | | - spin_unlock(vmf->ptl); |
---|
1434 | | - return ret; |
---|
| 1325 | +fallback: |
---|
| 1326 | + __split_huge_pmd(vma, vmf->pmd, vmf->address, false, NULL); |
---|
| 1327 | + return VM_FAULT_FALLBACK; |
---|
1435 | 1328 | } |
---|
1436 | 1329 | |
---|
1437 | 1330 | /* |
---|
1438 | | - * FOLL_FORCE or a forced COW break can write even to unwritable pmd's, |
---|
1439 | | - * but only after we've gone through a COW cycle and they are dirty. |
---|
| 1331 | + * FOLL_FORCE can write to even unwritable pmd's, but only |
---|
| 1332 | + * after we've gone through a COW cycle and they are dirty. |
---|
1440 | 1333 | */ |
---|
1441 | 1334 | static inline bool can_follow_write_pmd(pmd_t pmd, unsigned int flags) |
---|
1442 | 1335 | { |
---|
1443 | | - return pmd_write(pmd) || ((flags & FOLL_COW) && pmd_dirty(pmd)); |
---|
| 1336 | + return pmd_write(pmd) || |
---|
| 1337 | + ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pmd_dirty(pmd)); |
---|
1444 | 1338 | } |
---|
1445 | 1339 | |
---|
1446 | 1340 | struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, |
---|
.. | .. |
---|
1466 | 1360 | |
---|
1467 | 1361 | page = pmd_page(*pmd); |
---|
1468 | 1362 | VM_BUG_ON_PAGE(!PageHead(page) && !is_zone_device_page(page), page); |
---|
| 1363 | + |
---|
| 1364 | + if (!try_grab_page(page, flags)) |
---|
| 1365 | + return ERR_PTR(-ENOMEM); |
---|
| 1366 | + |
---|
1469 | 1367 | if (flags & FOLL_TOUCH) |
---|
1470 | 1368 | touch_pmd(vma, addr, pmd, flags); |
---|
| 1369 | + |
---|
1471 | 1370 | if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) { |
---|
1472 | 1371 | /* |
---|
1473 | 1372 | * We don't mlock() pte-mapped THPs. This way we can avoid |
---|
.. | .. |
---|
1496 | 1395 | goto skip_mlock; |
---|
1497 | 1396 | if (!trylock_page(page)) |
---|
1498 | 1397 | goto skip_mlock; |
---|
1499 | | - lru_add_drain(); |
---|
1500 | 1398 | if (page->mapping && !PageDoubleMap(page)) |
---|
1501 | 1399 | mlock_vma_page(page); |
---|
1502 | 1400 | unlock_page(page); |
---|
.. | .. |
---|
1504 | 1402 | skip_mlock: |
---|
1505 | 1403 | page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT; |
---|
1506 | 1404 | VM_BUG_ON_PAGE(!PageCompound(page) && !is_zone_device_page(page), page); |
---|
1507 | | - if (flags & FOLL_GET) |
---|
1508 | | - get_page(page); |
---|
1509 | 1405 | |
---|
1510 | 1406 | out: |
---|
1511 | 1407 | return page; |
---|
.. | .. |
---|
1518 | 1414 | struct anon_vma *anon_vma = NULL; |
---|
1519 | 1415 | struct page *page; |
---|
1520 | 1416 | unsigned long haddr = vmf->address & HPAGE_PMD_MASK; |
---|
1521 | | - int page_nid = -1, this_nid = numa_node_id(); |
---|
| 1417 | + int page_nid = NUMA_NO_NODE, this_nid = numa_node_id(); |
---|
1522 | 1418 | int target_nid, last_cpupid = -1; |
---|
1523 | 1419 | bool page_locked; |
---|
1524 | 1420 | bool migrated = false; |
---|
.. | .. |
---|
1539 | 1435 | if (!get_page_unless_zero(page)) |
---|
1540 | 1436 | goto out_unlock; |
---|
1541 | 1437 | spin_unlock(vmf->ptl); |
---|
1542 | | - wait_on_page_locked(page); |
---|
1543 | | - put_page(page); |
---|
| 1438 | + put_and_wait_on_page_locked(page); |
---|
1544 | 1439 | goto out; |
---|
1545 | 1440 | } |
---|
1546 | 1441 | |
---|
.. | .. |
---|
1564 | 1459 | */ |
---|
1565 | 1460 | page_locked = trylock_page(page); |
---|
1566 | 1461 | target_nid = mpol_misplaced(page, vma, haddr); |
---|
1567 | | - if (target_nid == -1) { |
---|
| 1462 | + if (target_nid == NUMA_NO_NODE) { |
---|
1568 | 1463 | /* If the page was locked, there are no parallel migrations */ |
---|
1569 | 1464 | if (page_locked) |
---|
1570 | 1465 | goto clear_pmdnuma; |
---|
.. | .. |
---|
1572 | 1467 | |
---|
1573 | 1468 | /* Migration could have started since the pmd_trans_migrating check */ |
---|
1574 | 1469 | if (!page_locked) { |
---|
1575 | | - page_nid = -1; |
---|
| 1470 | + page_nid = NUMA_NO_NODE; |
---|
1576 | 1471 | if (!get_page_unless_zero(page)) |
---|
1577 | 1472 | goto out_unlock; |
---|
1578 | 1473 | spin_unlock(vmf->ptl); |
---|
1579 | | - wait_on_page_locked(page); |
---|
1580 | | - put_page(page); |
---|
| 1474 | + put_and_wait_on_page_locked(page); |
---|
1581 | 1475 | goto out; |
---|
1582 | 1476 | } |
---|
1583 | 1477 | |
---|
.. | .. |
---|
1587 | 1481 | */ |
---|
1588 | 1482 | get_page(page); |
---|
1589 | 1483 | spin_unlock(vmf->ptl); |
---|
1590 | | - anon_vma = page_lock_anon_vma_read(page); |
---|
| 1484 | + anon_vma = page_lock_anon_vma_read(page, NULL); |
---|
1591 | 1485 | |
---|
1592 | 1486 | /* Confirm the PMD did not change while page_table_lock was released */ |
---|
1593 | 1487 | spin_lock(vmf->ptl); |
---|
1594 | 1488 | if (unlikely(!pmd_same(pmd, *vmf->pmd))) { |
---|
1595 | 1489 | unlock_page(page); |
---|
1596 | 1490 | put_page(page); |
---|
1597 | | - page_nid = -1; |
---|
| 1491 | + page_nid = NUMA_NO_NODE; |
---|
1598 | 1492 | goto out_unlock; |
---|
1599 | 1493 | } |
---|
1600 | 1494 | |
---|
1601 | 1495 | /* Bail if we fail to protect against THP splits for any reason */ |
---|
1602 | 1496 | if (unlikely(!anon_vma)) { |
---|
1603 | 1497 | put_page(page); |
---|
1604 | | - page_nid = -1; |
---|
| 1498 | + page_nid = NUMA_NO_NODE; |
---|
1605 | 1499 | goto clear_pmdnuma; |
---|
1606 | 1500 | } |
---|
1607 | 1501 | |
---|
.. | .. |
---|
1616 | 1510 | * We are not sure a pending tlb flush here is for a huge page |
---|
1617 | 1511 | * mapping or not. Hence use the tlb range variant |
---|
1618 | 1512 | */ |
---|
1619 | | - if (mm_tlb_flush_pending(vma->vm_mm)) |
---|
| 1513 | + if (mm_tlb_flush_pending(vma->vm_mm)) { |
---|
1620 | 1514 | flush_tlb_range(vma, haddr, haddr + HPAGE_PMD_SIZE); |
---|
| 1515 | + /* |
---|
| 1516 | + * change_huge_pmd() released the pmd lock before |
---|
| 1517 | + * invalidating the secondary MMUs sharing the primary |
---|
| 1518 | + * MMU pagetables (with ->invalidate_range()). The |
---|
| 1519 | + * mmu_notifier_invalidate_range_end() (which |
---|
| 1520 | + * internally calls ->invalidate_range()) in |
---|
| 1521 | + * change_pmd_range() will run after us, so we can't |
---|
| 1522 | + * rely on it here and we need an explicit invalidate. |
---|
| 1523 | + */ |
---|
| 1524 | + mmu_notifier_invalidate_range(vma->vm_mm, haddr, |
---|
| 1525 | + haddr + HPAGE_PMD_SIZE); |
---|
| 1526 | + } |
---|
1621 | 1527 | |
---|
1622 | 1528 | /* |
---|
1623 | 1529 | * Migrate the THP to the requested node, returns with page unlocked |
---|
.. | .. |
---|
1651 | 1557 | if (anon_vma) |
---|
1652 | 1558 | page_unlock_anon_vma_read(anon_vma); |
---|
1653 | 1559 | |
---|
1654 | | - if (page_nid != -1) |
---|
| 1560 | + if (page_nid != NUMA_NO_NODE) |
---|
1655 | 1561 | task_numa_fault(last_cpupid, page_nid, HPAGE_PMD_NR, |
---|
1656 | 1562 | flags); |
---|
1657 | 1563 | |
---|
.. | .. |
---|
1671 | 1577 | struct mm_struct *mm = tlb->mm; |
---|
1672 | 1578 | bool ret = false; |
---|
1673 | 1579 | |
---|
1674 | | - tlb_remove_check_page_size_change(tlb, HPAGE_PMD_SIZE); |
---|
| 1580 | + tlb_change_page_size(tlb, HPAGE_PMD_SIZE); |
---|
1675 | 1581 | |
---|
1676 | 1582 | ptl = pmd_trans_huge_lock(pmd, vma); |
---|
1677 | 1583 | if (!ptl) |
---|
.. | .. |
---|
1747 | 1653 | pmd_t orig_pmd; |
---|
1748 | 1654 | spinlock_t *ptl; |
---|
1749 | 1655 | |
---|
1750 | | - tlb_remove_check_page_size_change(tlb, HPAGE_PMD_SIZE); |
---|
| 1656 | + tlb_change_page_size(tlb, HPAGE_PMD_SIZE); |
---|
1751 | 1657 | |
---|
1752 | 1658 | ptl = __pmd_trans_huge_lock(pmd, vma); |
---|
1753 | 1659 | if (!ptl) |
---|
.. | .. |
---|
1758 | 1664 | * pgtable_trans_huge_withdraw after finishing pmdp related |
---|
1759 | 1665 | * operations. |
---|
1760 | 1666 | */ |
---|
1761 | | - orig_pmd = pmdp_huge_get_and_clear_full(tlb->mm, addr, pmd, |
---|
1762 | | - tlb->fullmm); |
---|
| 1667 | + orig_pmd = pmdp_huge_get_and_clear_full(vma, addr, pmd, |
---|
| 1668 | + tlb->fullmm); |
---|
1763 | 1669 | tlb_remove_pmd_tlb_entry(tlb, pmd, addr); |
---|
1764 | | - if (vma_is_dax(vma)) { |
---|
| 1670 | + if (vma_is_special_huge(vma)) { |
---|
1765 | 1671 | if (arch_needs_pgtable_deposit()) |
---|
1766 | 1672 | zap_deposited_table(tlb->mm, pmd); |
---|
1767 | 1673 | spin_unlock(ptl); |
---|
.. | .. |
---|
1785 | 1691 | |
---|
1786 | 1692 | VM_BUG_ON(!is_pmd_migration_entry(orig_pmd)); |
---|
1787 | 1693 | entry = pmd_to_swp_entry(orig_pmd); |
---|
1788 | | - page = pfn_to_page(swp_offset(entry)); |
---|
| 1694 | + page = migration_entry_to_page(entry); |
---|
1789 | 1695 | flush_needed = 0; |
---|
1790 | 1696 | } else |
---|
1791 | 1697 | WARN_ONCE(1, "Non present huge pmd without pmd migration enabled!"); |
---|
.. | .. |
---|
1833 | 1739 | } |
---|
1834 | 1740 | |
---|
1835 | 1741 | bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, |
---|
1836 | | - unsigned long new_addr, unsigned long old_end, |
---|
1837 | | - pmd_t *old_pmd, pmd_t *new_pmd) |
---|
| 1742 | + unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd) |
---|
1838 | 1743 | { |
---|
1839 | 1744 | spinlock_t *old_ptl, *new_ptl; |
---|
1840 | 1745 | pmd_t pmd; |
---|
1841 | 1746 | struct mm_struct *mm = vma->vm_mm; |
---|
1842 | 1747 | bool force_flush = false; |
---|
1843 | | - |
---|
1844 | | - if ((old_addr & ~HPAGE_PMD_MASK) || |
---|
1845 | | - (new_addr & ~HPAGE_PMD_MASK) || |
---|
1846 | | - old_end - old_addr < HPAGE_PMD_SIZE) |
---|
1847 | | - return false; |
---|
1848 | 1748 | |
---|
1849 | 1749 | /* |
---|
1850 | 1750 | * The destination pmd shouldn't be established, free_pgtables() |
---|
.. | .. |
---|
1857 | 1757 | |
---|
1858 | 1758 | /* |
---|
1859 | 1759 | * We don't have to worry about the ordering of src and dst |
---|
1860 | | - * ptlocks because exclusive mmap_sem prevents deadlock. |
---|
| 1760 | + * ptlocks because exclusive mmap_lock prevents deadlock. |
---|
1861 | 1761 | */ |
---|
1862 | 1762 | old_ptl = __pmd_trans_huge_lock(old_pmd, vma); |
---|
1863 | 1763 | if (old_ptl) { |
---|
.. | .. |
---|
1893 | 1793 | * - HPAGE_PMD_NR is protections changed and TLB flush necessary |
---|
1894 | 1794 | */ |
---|
1895 | 1795 | int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, |
---|
1896 | | - unsigned long addr, pgprot_t newprot, int prot_numa) |
---|
| 1796 | + unsigned long addr, pgprot_t newprot, unsigned long cp_flags) |
---|
1897 | 1797 | { |
---|
1898 | 1798 | struct mm_struct *mm = vma->vm_mm; |
---|
1899 | 1799 | spinlock_t *ptl; |
---|
1900 | 1800 | pmd_t entry; |
---|
1901 | 1801 | bool preserve_write; |
---|
1902 | 1802 | int ret; |
---|
| 1803 | + bool prot_numa = cp_flags & MM_CP_PROT_NUMA; |
---|
| 1804 | + bool uffd_wp = cp_flags & MM_CP_UFFD_WP; |
---|
| 1805 | + bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE; |
---|
1903 | 1806 | |
---|
1904 | 1807 | ptl = __pmd_trans_huge_lock(pmd, vma); |
---|
1905 | 1808 | if (!ptl) |
---|
.. | .. |
---|
1923 | 1826 | newpmd = swp_entry_to_pmd(entry); |
---|
1924 | 1827 | if (pmd_swp_soft_dirty(*pmd)) |
---|
1925 | 1828 | newpmd = pmd_swp_mksoft_dirty(newpmd); |
---|
| 1829 | + if (pmd_swp_uffd_wp(*pmd)) |
---|
| 1830 | + newpmd = pmd_swp_mkuffd_wp(newpmd); |
---|
1926 | 1831 | set_pmd_at(mm, addr, pmd, newpmd); |
---|
1927 | 1832 | } |
---|
1928 | 1833 | goto unlock; |
---|
.. | .. |
---|
1941 | 1846 | goto unlock; |
---|
1942 | 1847 | |
---|
1943 | 1848 | /* |
---|
1944 | | - * In case prot_numa, we are under down_read(mmap_sem). It's critical |
---|
| 1849 | + * In case prot_numa, we are under mmap_read_lock(mm). It's critical |
---|
1945 | 1850 | * to not clear pmd intermittently to avoid race with MADV_DONTNEED |
---|
1946 | | - * which is also under down_read(mmap_sem): |
---|
| 1851 | + * which is also under mmap_read_lock(mm): |
---|
1947 | 1852 | * |
---|
1948 | 1853 | * CPU0: CPU1: |
---|
1949 | 1854 | * change_huge_pmd(prot_numa=1) |
---|
.. | .. |
---|
1966 | 1871 | entry = pmd_modify(entry, newprot); |
---|
1967 | 1872 | if (preserve_write) |
---|
1968 | 1873 | entry = pmd_mk_savedwrite(entry); |
---|
| 1874 | + if (uffd_wp) { |
---|
| 1875 | + entry = pmd_wrprotect(entry); |
---|
| 1876 | + entry = pmd_mkuffd_wp(entry); |
---|
| 1877 | + } else if (uffd_wp_resolve) { |
---|
| 1878 | + /* |
---|
| 1879 | + * Leave the write bit to be handled by PF interrupt |
---|
| 1880 | + * handler, then things like COW could be properly |
---|
| 1881 | + * handled. |
---|
| 1882 | + */ |
---|
| 1883 | + entry = pmd_clear_uffd_wp(entry); |
---|
| 1884 | + } |
---|
1969 | 1885 | ret = HPAGE_PMD_NR; |
---|
1970 | 1886 | set_pmd_at(mm, addr, pmd, entry); |
---|
1971 | 1887 | BUG_ON(vma_is_anonymous(vma) && !preserve_write && pmd_write(entry)); |
---|
.. | .. |
---|
2012 | 1928 | int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma, |
---|
2013 | 1929 | pud_t *pud, unsigned long addr) |
---|
2014 | 1930 | { |
---|
2015 | | - pud_t orig_pud; |
---|
2016 | 1931 | spinlock_t *ptl; |
---|
2017 | 1932 | |
---|
2018 | 1933 | ptl = __pud_trans_huge_lock(pud, vma); |
---|
.. | .. |
---|
2024 | 1939 | * pgtable_trans_huge_withdraw after finishing pudp related |
---|
2025 | 1940 | * operations. |
---|
2026 | 1941 | */ |
---|
2027 | | - orig_pud = pudp_huge_get_and_clear_full(tlb->mm, addr, pud, |
---|
2028 | | - tlb->fullmm); |
---|
| 1942 | + pudp_huge_get_and_clear_full(tlb->mm, addr, pud, tlb->fullmm); |
---|
2029 | 1943 | tlb_remove_pud_tlb_entry(tlb, pud, addr); |
---|
2030 | | - if (vma_is_dax(vma)) { |
---|
| 1944 | + if (vma_is_special_huge(vma)) { |
---|
2031 | 1945 | spin_unlock(ptl); |
---|
2032 | 1946 | /* No zero page support yet */ |
---|
2033 | 1947 | } else { |
---|
.. | .. |
---|
2054 | 1968 | unsigned long address) |
---|
2055 | 1969 | { |
---|
2056 | 1970 | spinlock_t *ptl; |
---|
2057 | | - struct mm_struct *mm = vma->vm_mm; |
---|
2058 | | - unsigned long haddr = address & HPAGE_PUD_MASK; |
---|
| 1971 | + struct mmu_notifier_range range; |
---|
2059 | 1972 | |
---|
2060 | | - mmu_notifier_invalidate_range_start(mm, haddr, haddr + HPAGE_PUD_SIZE); |
---|
2061 | | - ptl = pud_lock(mm, pud); |
---|
| 1973 | + mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm, |
---|
| 1974 | + address & HPAGE_PUD_MASK, |
---|
| 1975 | + (address & HPAGE_PUD_MASK) + HPAGE_PUD_SIZE); |
---|
| 1976 | + mmu_notifier_invalidate_range_start(&range); |
---|
| 1977 | + ptl = pud_lock(vma->vm_mm, pud); |
---|
2062 | 1978 | if (unlikely(!pud_trans_huge(*pud) && !pud_devmap(*pud))) |
---|
2063 | 1979 | goto out; |
---|
2064 | | - __split_huge_pud_locked(vma, pud, haddr); |
---|
| 1980 | + __split_huge_pud_locked(vma, pud, range.start); |
---|
2065 | 1981 | |
---|
2066 | 1982 | out: |
---|
2067 | 1983 | spin_unlock(ptl); |
---|
.. | .. |
---|
2069 | 1985 | * No need to double call mmu_notifier->invalidate_range() callback as |
---|
2070 | 1986 | * the above pudp_huge_clear_flush_notify() did already call it. |
---|
2071 | 1987 | */ |
---|
2072 | | - mmu_notifier_invalidate_range_only_end(mm, haddr, haddr + |
---|
2073 | | - HPAGE_PUD_SIZE); |
---|
| 1988 | + mmu_notifier_invalidate_range_only_end(&range); |
---|
2074 | 1989 | } |
---|
2075 | 1990 | #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ |
---|
2076 | 1991 | |
---|
.. | .. |
---|
2079 | 1994 | { |
---|
2080 | 1995 | struct mm_struct *mm = vma->vm_mm; |
---|
2081 | 1996 | pgtable_t pgtable; |
---|
2082 | | - pmd_t _pmd; |
---|
| 1997 | + pmd_t _pmd, old_pmd; |
---|
2083 | 1998 | int i; |
---|
2084 | 1999 | |
---|
2085 | 2000 | /* |
---|
.. | .. |
---|
2090 | 2005 | * |
---|
2091 | 2006 | * See Documentation/vm/mmu_notifier.rst |
---|
2092 | 2007 | */ |
---|
2093 | | - pmdp_huge_clear_flush(vma, haddr, pmd); |
---|
| 2008 | + old_pmd = pmdp_huge_clear_flush(vma, haddr, pmd); |
---|
2094 | 2009 | |
---|
2095 | 2010 | pgtable = pgtable_trans_huge_withdraw(mm, pmd); |
---|
2096 | 2011 | pmd_populate(mm, &_pmd, pgtable); |
---|
.. | .. |
---|
2099 | 2014 | pte_t *pte, entry; |
---|
2100 | 2015 | entry = pfn_pte(my_zero_pfn(haddr), vma->vm_page_prot); |
---|
2101 | 2016 | entry = pte_mkspecial(entry); |
---|
| 2017 | + if (pmd_uffd_wp(old_pmd)) |
---|
| 2018 | + entry = pte_mkuffd_wp(entry); |
---|
2102 | 2019 | pte = pte_offset_map(&_pmd, haddr); |
---|
2103 | 2020 | VM_BUG_ON(!pte_none(*pte)); |
---|
2104 | 2021 | set_pte_at(mm, haddr, pte, entry); |
---|
.. | .. |
---|
2115 | 2032 | struct page *page; |
---|
2116 | 2033 | pgtable_t pgtable; |
---|
2117 | 2034 | pmd_t old_pmd, _pmd; |
---|
2118 | | - bool young, write, soft_dirty, pmd_migration = false; |
---|
| 2035 | + bool young, write, soft_dirty, pmd_migration = false, uffd_wp = false; |
---|
2119 | 2036 | unsigned long addr; |
---|
2120 | 2037 | int i; |
---|
| 2038 | + bool success = false; |
---|
2121 | 2039 | |
---|
2122 | 2040 | VM_BUG_ON(haddr & ~HPAGE_PMD_MASK); |
---|
2123 | 2041 | VM_BUG_ON_VMA(vma->vm_start > haddr, vma); |
---|
.. | .. |
---|
2135 | 2053 | */ |
---|
2136 | 2054 | if (arch_needs_pgtable_deposit()) |
---|
2137 | 2055 | zap_deposited_table(mm, pmd); |
---|
2138 | | - if (vma_is_dax(vma)) |
---|
| 2056 | + if (vma_is_special_huge(vma)) |
---|
2139 | 2057 | return; |
---|
2140 | 2058 | if (unlikely(is_pmd_migration_entry(old_pmd))) { |
---|
2141 | 2059 | swp_entry_t entry; |
---|
.. | .. |
---|
2176 | 2094 | * free), userland could trigger a small page size TLB miss on the |
---|
2177 | 2095 | * small sized TLB while the hugepage TLB entry is still established in |
---|
2178 | 2096 | * the huge TLB. Some CPU doesn't like that. |
---|
2179 | | - * See http://support.amd.com/us/Processor_TechDocs/41322.pdf, Erratum |
---|
2180 | | - * 383 on page 93. Intel should be safe but is also warns that it's |
---|
| 2097 | + * See http://support.amd.com/TechDocs/41322_10h_Rev_Gd.pdf, Erratum |
---|
| 2098 | + * 383 on page 105. Intel should be safe but is also warns that it's |
---|
2181 | 2099 | * only safe if the permission and cache attributes of the two entries |
---|
2182 | 2100 | * loaded in the two TLB is identical (which should be the case here). |
---|
2183 | 2101 | * But it is generally safer to never allow small and huge TLB entries |
---|
.. | .. |
---|
2195 | 2113 | swp_entry_t entry; |
---|
2196 | 2114 | |
---|
2197 | 2115 | entry = pmd_to_swp_entry(old_pmd); |
---|
2198 | | - page = pfn_to_page(swp_offset(entry)); |
---|
| 2116 | + page = migration_entry_to_page(entry); |
---|
2199 | 2117 | write = is_write_migration_entry(entry); |
---|
2200 | 2118 | young = false; |
---|
2201 | 2119 | soft_dirty = pmd_swp_soft_dirty(old_pmd); |
---|
| 2120 | + uffd_wp = pmd_swp_uffd_wp(old_pmd); |
---|
2202 | 2121 | } else { |
---|
2203 | 2122 | page = pmd_page(old_pmd); |
---|
2204 | 2123 | if (pmd_dirty(old_pmd)) |
---|
.. | .. |
---|
2206 | 2125 | write = pmd_write(old_pmd); |
---|
2207 | 2126 | young = pmd_young(old_pmd); |
---|
2208 | 2127 | soft_dirty = pmd_soft_dirty(old_pmd); |
---|
| 2128 | + uffd_wp = pmd_uffd_wp(old_pmd); |
---|
2209 | 2129 | } |
---|
2210 | 2130 | VM_BUG_ON_PAGE(!page_count(page), page); |
---|
2211 | 2131 | page_ref_add(page, HPAGE_PMD_NR - 1); |
---|
.. | .. |
---|
2230 | 2150 | entry = swp_entry_to_pte(swp_entry); |
---|
2231 | 2151 | if (soft_dirty) |
---|
2232 | 2152 | entry = pte_swp_mksoft_dirty(entry); |
---|
| 2153 | + if (uffd_wp) |
---|
| 2154 | + entry = pte_swp_mkuffd_wp(entry); |
---|
2233 | 2155 | } else { |
---|
2234 | 2156 | entry = mk_pte(page + i, READ_ONCE(vma->vm_page_prot)); |
---|
2235 | | - entry = maybe_mkwrite(entry, vma); |
---|
| 2157 | + entry = maybe_mkwrite(entry, vma->vm_flags); |
---|
2236 | 2158 | if (!write) |
---|
2237 | 2159 | entry = pte_wrprotect(entry); |
---|
2238 | 2160 | if (!young) |
---|
2239 | 2161 | entry = pte_mkold(entry); |
---|
2240 | 2162 | if (soft_dirty) |
---|
2241 | 2163 | entry = pte_mksoft_dirty(entry); |
---|
| 2164 | + if (uffd_wp) |
---|
| 2165 | + entry = pte_mkuffd_wp(entry); |
---|
2242 | 2166 | } |
---|
2243 | 2167 | pte = pte_offset_map(&_pmd, addr); |
---|
2244 | 2168 | BUG_ON(!pte_none(*pte)); |
---|
2245 | 2169 | set_pte_at(mm, addr, pte, entry); |
---|
2246 | | - if (!pmd_migration) |
---|
2247 | | - atomic_inc(&page[i]._mapcount); |
---|
| 2170 | + if (!pmd_migration) { |
---|
| 2171 | + trace_android_vh_update_page_mapcount(&page[i], true, |
---|
| 2172 | + false, NULL, &success); |
---|
| 2173 | + if (!success) |
---|
| 2174 | + atomic_inc(&page[i]._mapcount); |
---|
| 2175 | + } |
---|
2248 | 2176 | pte_unmap(pte); |
---|
2249 | 2177 | } |
---|
2250 | 2178 | |
---|
.. | .. |
---|
2255 | 2183 | */ |
---|
2256 | 2184 | if (compound_mapcount(page) > 1 && |
---|
2257 | 2185 | !TestSetPageDoubleMap(page)) { |
---|
2258 | | - for (i = 0; i < HPAGE_PMD_NR; i++) |
---|
2259 | | - atomic_inc(&page[i]._mapcount); |
---|
| 2186 | + for (i = 0; i < HPAGE_PMD_NR; i++) { |
---|
| 2187 | + trace_android_vh_update_page_mapcount(&page[i], true, |
---|
| 2188 | + false, NULL, &success); |
---|
| 2189 | + if (!success) |
---|
| 2190 | + atomic_inc(&page[i]._mapcount); |
---|
| 2191 | + } |
---|
2260 | 2192 | } |
---|
2261 | 2193 | |
---|
2262 | 2194 | lock_page_memcg(page); |
---|
.. | .. |
---|
2265 | 2197 | __dec_lruvec_page_state(page, NR_ANON_THPS); |
---|
2266 | 2198 | if (TestClearPageDoubleMap(page)) { |
---|
2267 | 2199 | /* No need in mapcount reference anymore */ |
---|
2268 | | - for (i = 0; i < HPAGE_PMD_NR; i++) |
---|
2269 | | - atomic_dec(&page[i]._mapcount); |
---|
| 2200 | + for (i = 0; i < HPAGE_PMD_NR; i++) { |
---|
| 2201 | + trace_android_vh_update_page_mapcount(&page[i], |
---|
| 2202 | + false, false, NULL, &success); |
---|
| 2203 | + if (!success) |
---|
| 2204 | + atomic_dec(&page[i]._mapcount); |
---|
| 2205 | + } |
---|
2270 | 2206 | } |
---|
2271 | 2207 | } |
---|
2272 | 2208 | unlock_page_memcg(page); |
---|
.. | .. |
---|
2287 | 2223 | unsigned long address, bool freeze, struct page *page) |
---|
2288 | 2224 | { |
---|
2289 | 2225 | spinlock_t *ptl; |
---|
2290 | | - struct mm_struct *mm = vma->vm_mm; |
---|
2291 | | - unsigned long haddr = address & HPAGE_PMD_MASK; |
---|
| 2226 | + struct mmu_notifier_range range; |
---|
2292 | 2227 | bool do_unlock_page = false; |
---|
2293 | 2228 | pmd_t _pmd; |
---|
2294 | 2229 | |
---|
2295 | | - mmu_notifier_invalidate_range_start(mm, haddr, haddr + HPAGE_PMD_SIZE); |
---|
2296 | | - ptl = pmd_lock(mm, pmd); |
---|
| 2230 | + mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm, |
---|
| 2231 | + address & HPAGE_PMD_MASK, |
---|
| 2232 | + (address & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE); |
---|
| 2233 | + mmu_notifier_invalidate_range_start(&range); |
---|
| 2234 | + ptl = pmd_lock(vma->vm_mm, pmd); |
---|
2297 | 2235 | |
---|
2298 | 2236 | /* |
---|
2299 | 2237 | * If caller asks to setup a migration entries, we need a page to check |
---|
.. | .. |
---|
2339 | 2277 | clear_page_mlock(page); |
---|
2340 | 2278 | } else if (!(pmd_devmap(*pmd) || is_pmd_migration_entry(*pmd))) |
---|
2341 | 2279 | goto out; |
---|
2342 | | - __split_huge_pmd_locked(vma, pmd, haddr, freeze); |
---|
| 2280 | + __split_huge_pmd_locked(vma, pmd, range.start, freeze); |
---|
2343 | 2281 | out: |
---|
2344 | 2282 | spin_unlock(ptl); |
---|
2345 | 2283 | if (do_unlock_page) |
---|
.. | .. |
---|
2357 | 2295 | * any further changes to individual pte will notify. So no need |
---|
2358 | 2296 | * to call mmu_notifier->invalidate_range() |
---|
2359 | 2297 | */ |
---|
2360 | | - mmu_notifier_invalidate_range_only_end(mm, haddr, haddr + |
---|
2361 | | - HPAGE_PMD_SIZE); |
---|
| 2298 | + mmu_notifier_invalidate_range_only_end(&range); |
---|
2362 | 2299 | } |
---|
2363 | 2300 | |
---|
2364 | 2301 | void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address, |
---|
.. | .. |
---|
2413 | 2350 | |
---|
2414 | 2351 | /* |
---|
2415 | 2352 | * If we're also updating the vma->vm_next->vm_start, if the new |
---|
2416 | | - * vm_next->vm_start isn't page aligned and it could previously |
---|
| 2353 | + * vm_next->vm_start isn't hpage aligned and it could previously |
---|
2417 | 2354 | * contain an hugepage: check if we need to split an huge pmd. |
---|
2418 | 2355 | */ |
---|
2419 | 2356 | if (adjust_next > 0) { |
---|
2420 | 2357 | struct vm_area_struct *next = vma->vm_next; |
---|
2421 | 2358 | unsigned long nstart = next->vm_start; |
---|
2422 | | - nstart += adjust_next << PAGE_SHIFT; |
---|
| 2359 | + nstart += adjust_next; |
---|
2423 | 2360 | if (nstart & ~HPAGE_PMD_MASK && |
---|
2424 | 2361 | (nstart & HPAGE_PMD_MASK) >= next->vm_start && |
---|
2425 | 2362 | (nstart & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= next->vm_end) |
---|
.. | .. |
---|
2429 | 2366 | |
---|
2430 | 2367 | static void unmap_page(struct page *page) |
---|
2431 | 2368 | { |
---|
2432 | | - enum ttu_flags ttu_flags = TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS | |
---|
2433 | | - TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD | TTU_SYNC; |
---|
| 2369 | + enum ttu_flags ttu_flags = TTU_IGNORE_MLOCK | TTU_SYNC | |
---|
| 2370 | + TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD; |
---|
2434 | 2371 | |
---|
2435 | 2372 | VM_BUG_ON_PAGE(!PageHead(page), page); |
---|
2436 | 2373 | |
---|
.. | .. |
---|
2442 | 2379 | VM_WARN_ON_ONCE_PAGE(page_mapped(page), page); |
---|
2443 | 2380 | } |
---|
2444 | 2381 | |
---|
2445 | | -static void remap_page(struct page *page) |
---|
| 2382 | +static void remap_page(struct page *page, unsigned int nr) |
---|
2446 | 2383 | { |
---|
2447 | 2384 | int i; |
---|
2448 | 2385 | if (PageTransHuge(page)) { |
---|
2449 | 2386 | remove_migration_ptes(page, page, true); |
---|
2450 | 2387 | } else { |
---|
2451 | | - for (i = 0; i < HPAGE_PMD_NR; i++) |
---|
| 2388 | + for (i = 0; i < nr; i++) |
---|
2452 | 2389 | remove_migration_ptes(page + i, page + i, true); |
---|
2453 | 2390 | } |
---|
2454 | 2391 | } |
---|
.. | .. |
---|
2477 | 2414 | (1L << PG_workingset) | |
---|
2478 | 2415 | (1L << PG_locked) | |
---|
2479 | 2416 | (1L << PG_unevictable) | |
---|
| 2417 | +#ifdef CONFIG_64BIT |
---|
| 2418 | + (1L << PG_arch_2) | |
---|
| 2419 | +#endif |
---|
2480 | 2420 | (1L << PG_dirty))); |
---|
2481 | 2421 | |
---|
2482 | 2422 | /* ->mapping in first tail page is compound_mapcount */ |
---|
.. | .. |
---|
2519 | 2459 | pgoff_t end, unsigned long flags) |
---|
2520 | 2460 | { |
---|
2521 | 2461 | struct page *head = compound_head(page); |
---|
2522 | | - struct zone *zone = page_zone(head); |
---|
| 2462 | + pg_data_t *pgdat = page_pgdat(head); |
---|
2523 | 2463 | struct lruvec *lruvec; |
---|
| 2464 | + struct address_space *swap_cache = NULL; |
---|
| 2465 | + unsigned long offset = 0; |
---|
| 2466 | + unsigned int nr = thp_nr_pages(head); |
---|
2524 | 2467 | int i; |
---|
2525 | 2468 | |
---|
2526 | | - lruvec = mem_cgroup_page_lruvec(head, zone->zone_pgdat); |
---|
| 2469 | + lruvec = mem_cgroup_page_lruvec(head, pgdat); |
---|
2527 | 2470 | |
---|
2528 | 2471 | /* complete memcg works before add pages to LRU */ |
---|
2529 | | - mem_cgroup_split_huge_fixup(head); |
---|
| 2472 | + split_page_memcg(head, nr); |
---|
2530 | 2473 | |
---|
2531 | | - for (i = HPAGE_PMD_NR - 1; i >= 1; i--) { |
---|
| 2474 | + if (PageAnon(head) && PageSwapCache(head)) { |
---|
| 2475 | + swp_entry_t entry = { .val = page_private(head) }; |
---|
| 2476 | + |
---|
| 2477 | + offset = swp_offset(entry); |
---|
| 2478 | + swap_cache = swap_address_space(entry); |
---|
| 2479 | + xa_lock(&swap_cache->i_pages); |
---|
| 2480 | + } |
---|
| 2481 | + |
---|
| 2482 | + for (i = nr - 1; i >= 1; i--) { |
---|
2532 | 2483 | __split_huge_page_tail(head, i, lruvec, list); |
---|
2533 | 2484 | /* Some pages can be beyond i_size: drop them from page cache */ |
---|
2534 | 2485 | if (head[i].index >= end) { |
---|
.. | .. |
---|
2537 | 2488 | if (IS_ENABLED(CONFIG_SHMEM) && PageSwapBacked(head)) |
---|
2538 | 2489 | shmem_uncharge(head->mapping->host, 1); |
---|
2539 | 2490 | put_page(head + i); |
---|
| 2491 | + } else if (!PageAnon(page)) { |
---|
| 2492 | + __xa_store(&head->mapping->i_pages, head[i].index, |
---|
| 2493 | + head + i, 0); |
---|
| 2494 | + } else if (swap_cache) { |
---|
| 2495 | + __xa_store(&swap_cache->i_pages, offset + i, |
---|
| 2496 | + head + i, 0); |
---|
2540 | 2497 | } |
---|
2541 | 2498 | } |
---|
2542 | 2499 | |
---|
2543 | 2500 | ClearPageCompound(head); |
---|
2544 | 2501 | |
---|
2545 | | - split_page_owner(head, HPAGE_PMD_ORDER); |
---|
| 2502 | + split_page_owner(head, nr); |
---|
2546 | 2503 | |
---|
2547 | 2504 | /* See comment in __split_huge_page_tail() */ |
---|
2548 | 2505 | if (PageAnon(head)) { |
---|
2549 | | - /* Additional pin to radix tree of swap cache */ |
---|
2550 | | - if (PageSwapCache(head)) |
---|
| 2506 | + /* Additional pin to swap cache */ |
---|
| 2507 | + if (PageSwapCache(head)) { |
---|
2551 | 2508 | page_ref_add(head, 2); |
---|
2552 | | - else |
---|
| 2509 | + xa_unlock(&swap_cache->i_pages); |
---|
| 2510 | + } else { |
---|
2553 | 2511 | page_ref_inc(head); |
---|
| 2512 | + } |
---|
2554 | 2513 | } else { |
---|
2555 | | - /* Additional pin to radix tree */ |
---|
| 2514 | + /* Additional pin to page cache */ |
---|
2556 | 2515 | page_ref_add(head, 2); |
---|
2557 | 2516 | xa_unlock(&head->mapping->i_pages); |
---|
2558 | 2517 | } |
---|
2559 | 2518 | |
---|
2560 | | - spin_unlock_irqrestore(zone_lru_lock(page_zone(head)), flags); |
---|
| 2519 | + spin_unlock_irqrestore(&pgdat->lru_lock, flags); |
---|
2561 | 2520 | |
---|
2562 | | - remap_page(head); |
---|
| 2521 | + remap_page(head, nr); |
---|
2563 | 2522 | |
---|
2564 | | - for (i = 0; i < HPAGE_PMD_NR; i++) { |
---|
| 2523 | + if (PageSwapCache(head)) { |
---|
| 2524 | + swp_entry_t entry = { .val = page_private(head) }; |
---|
| 2525 | + |
---|
| 2526 | + split_swap_cluster(entry); |
---|
| 2527 | + } |
---|
| 2528 | + |
---|
| 2529 | + for (i = 0; i < nr; i++) { |
---|
2565 | 2530 | struct page *subpage = head + i; |
---|
2566 | 2531 | if (subpage == page) |
---|
2567 | 2532 | continue; |
---|
.. | .. |
---|
2580 | 2545 | |
---|
2581 | 2546 | int total_mapcount(struct page *page) |
---|
2582 | 2547 | { |
---|
2583 | | - int i, compound, ret; |
---|
| 2548 | + int i, compound, nr, ret; |
---|
2584 | 2549 | |
---|
2585 | 2550 | VM_BUG_ON_PAGE(PageTail(page), page); |
---|
2586 | 2551 | |
---|
.. | .. |
---|
2588 | 2553 | return atomic_read(&page->_mapcount) + 1; |
---|
2589 | 2554 | |
---|
2590 | 2555 | compound = compound_mapcount(page); |
---|
| 2556 | + nr = compound_nr(page); |
---|
2591 | 2557 | if (PageHuge(page)) |
---|
2592 | 2558 | return compound; |
---|
2593 | 2559 | ret = compound; |
---|
2594 | | - for (i = 0; i < HPAGE_PMD_NR; i++) |
---|
| 2560 | + for (i = 0; i < nr; i++) |
---|
2595 | 2561 | ret += atomic_read(&page[i]._mapcount) + 1; |
---|
2596 | 2562 | /* File pages has compound_mapcount included in _mapcount */ |
---|
2597 | 2563 | if (!PageAnon(page)) |
---|
2598 | | - return ret - compound * HPAGE_PMD_NR; |
---|
| 2564 | + return ret - compound * nr; |
---|
2599 | 2565 | if (PageDoubleMap(page)) |
---|
2600 | | - ret -= HPAGE_PMD_NR; |
---|
| 2566 | + ret -= nr; |
---|
2601 | 2567 | return ret; |
---|
2602 | 2568 | } |
---|
2603 | 2569 | |
---|
.. | .. |
---|
2642 | 2608 | page = compound_head(page); |
---|
2643 | 2609 | |
---|
2644 | 2610 | _total_mapcount = ret = 0; |
---|
2645 | | - for (i = 0; i < HPAGE_PMD_NR; i++) { |
---|
| 2611 | + for (i = 0; i < thp_nr_pages(page); i++) { |
---|
2646 | 2612 | mapcount = atomic_read(&page[i]._mapcount) + 1; |
---|
2647 | 2613 | ret = max(ret, mapcount); |
---|
2648 | 2614 | _total_mapcount += mapcount; |
---|
2649 | 2615 | } |
---|
2650 | 2616 | if (PageDoubleMap(page)) { |
---|
2651 | 2617 | ret -= 1; |
---|
2652 | | - _total_mapcount -= HPAGE_PMD_NR; |
---|
| 2618 | + _total_mapcount -= thp_nr_pages(page); |
---|
2653 | 2619 | } |
---|
2654 | 2620 | mapcount = compound_mapcount(page); |
---|
2655 | 2621 | ret += mapcount; |
---|
.. | .. |
---|
2664 | 2630 | { |
---|
2665 | 2631 | int extra_pins; |
---|
2666 | 2632 | |
---|
2667 | | - /* Additional pins from radix tree */ |
---|
| 2633 | + /* Additional pins from page cache */ |
---|
2668 | 2634 | if (PageAnon(page)) |
---|
2669 | | - extra_pins = PageSwapCache(page) ? HPAGE_PMD_NR : 0; |
---|
| 2635 | + extra_pins = PageSwapCache(page) ? thp_nr_pages(page) : 0; |
---|
2670 | 2636 | else |
---|
2671 | | - extra_pins = HPAGE_PMD_NR; |
---|
| 2637 | + extra_pins = thp_nr_pages(page); |
---|
2672 | 2638 | if (pextra_pins) |
---|
2673 | 2639 | *pextra_pins = extra_pins; |
---|
2674 | 2640 | return total_mapcount(page) == page_count(page) - extra_pins - 1; |
---|
.. | .. |
---|
2697 | 2663 | { |
---|
2698 | 2664 | struct page *head = compound_head(page); |
---|
2699 | 2665 | struct pglist_data *pgdata = NODE_DATA(page_to_nid(head)); |
---|
| 2666 | + struct deferred_split *ds_queue = get_deferred_split_queue(head); |
---|
2700 | 2667 | struct anon_vma *anon_vma = NULL; |
---|
2701 | 2668 | struct address_space *mapping = NULL; |
---|
2702 | 2669 | int extra_pins, ret; |
---|
2703 | | - bool mlocked; |
---|
2704 | 2670 | unsigned long flags; |
---|
2705 | 2671 | pgoff_t end; |
---|
2706 | 2672 | |
---|
2707 | 2673 | VM_BUG_ON_PAGE(is_huge_zero_page(head), head); |
---|
2708 | | - VM_BUG_ON_PAGE(!PageLocked(page), page); |
---|
2709 | | - VM_BUG_ON_PAGE(!PageCompound(page), page); |
---|
| 2674 | + VM_BUG_ON_PAGE(!PageLocked(head), head); |
---|
| 2675 | + VM_BUG_ON_PAGE(!PageCompound(head), head); |
---|
2710 | 2676 | |
---|
2711 | | - if (PageWriteback(page)) |
---|
| 2677 | + if (PageWriteback(head)) |
---|
2712 | 2678 | return -EBUSY; |
---|
2713 | 2679 | |
---|
2714 | 2680 | if (PageAnon(head)) { |
---|
2715 | 2681 | /* |
---|
2716 | | - * The caller does not necessarily hold an mmap_sem that would |
---|
| 2682 | + * The caller does not necessarily hold an mmap_lock that would |
---|
2717 | 2683 | * prevent the anon_vma disappearing so we first we take a |
---|
2718 | 2684 | * reference to it and then lock the anon_vma for write. This |
---|
2719 | 2685 | * is similar to page_lock_anon_vma_read except the write lock |
---|
.. | .. |
---|
2759 | 2725 | goto out_unlock; |
---|
2760 | 2726 | } |
---|
2761 | 2727 | |
---|
2762 | | - mlocked = PageMlocked(page); |
---|
2763 | 2728 | unmap_page(head); |
---|
2764 | 2729 | |
---|
2765 | | - /* Make sure the page is not on per-CPU pagevec as it takes pin */ |
---|
2766 | | - if (mlocked) |
---|
2767 | | - lru_add_drain(); |
---|
2768 | | - |
---|
2769 | 2730 | /* prevent PageLRU to go away from under us, and freeze lru stats */ |
---|
2770 | | - spin_lock_irqsave(zone_lru_lock(page_zone(head)), flags); |
---|
| 2731 | + spin_lock_irqsave(&pgdata->lru_lock, flags); |
---|
2771 | 2732 | |
---|
2772 | 2733 | if (mapping) { |
---|
2773 | | - void **pslot; |
---|
| 2734 | + XA_STATE(xas, &mapping->i_pages, page_index(head)); |
---|
2774 | 2735 | |
---|
2775 | | - xa_lock(&mapping->i_pages); |
---|
2776 | | - pslot = radix_tree_lookup_slot(&mapping->i_pages, |
---|
2777 | | - page_index(head)); |
---|
2778 | 2736 | /* |
---|
2779 | | - * Check if the head page is present in radix tree. |
---|
| 2737 | + * Check if the head page is present in page cache. |
---|
2780 | 2738 | * We assume all tail are present too, if head is there. |
---|
2781 | 2739 | */ |
---|
2782 | | - if (radix_tree_deref_slot_protected(pslot, |
---|
2783 | | - &mapping->i_pages.xa_lock) != head) |
---|
| 2740 | + xa_lock(&mapping->i_pages); |
---|
| 2741 | + if (xas_load(&xas) != head) |
---|
2784 | 2742 | goto fail; |
---|
2785 | 2743 | } |
---|
2786 | 2744 | |
---|
2787 | 2745 | /* Prevent deferred_split_scan() touching ->_refcount */ |
---|
2788 | | - spin_lock(&pgdata->split_queue_lock); |
---|
| 2746 | + spin_lock(&ds_queue->split_queue_lock); |
---|
2789 | 2747 | if (page_ref_freeze(head, 1 + extra_pins)) { |
---|
2790 | 2748 | if (!list_empty(page_deferred_list(head))) { |
---|
2791 | | - pgdata->split_queue_len--; |
---|
| 2749 | + ds_queue->split_queue_len--; |
---|
2792 | 2750 | list_del(page_deferred_list(head)); |
---|
2793 | 2751 | } |
---|
2794 | | - if (mapping) |
---|
2795 | | - __dec_node_page_state(page, NR_SHMEM_THPS); |
---|
2796 | | - spin_unlock(&pgdata->split_queue_lock); |
---|
2797 | | - __split_huge_page(page, list, end, flags); |
---|
2798 | | - if (PageSwapCache(head)) { |
---|
2799 | | - swp_entry_t entry = { .val = page_private(head) }; |
---|
| 2752 | + spin_unlock(&ds_queue->split_queue_lock); |
---|
| 2753 | + if (mapping) { |
---|
| 2754 | + if (PageSwapBacked(head)) |
---|
| 2755 | + __dec_node_page_state(head, NR_SHMEM_THPS); |
---|
| 2756 | + else |
---|
| 2757 | + __dec_node_page_state(head, NR_FILE_THPS); |
---|
| 2758 | + } |
---|
2800 | 2759 | |
---|
2801 | | - ret = split_swap_cluster(entry); |
---|
2802 | | - } else |
---|
2803 | | - ret = 0; |
---|
| 2760 | + __split_huge_page(page, list, end, flags); |
---|
| 2761 | + ret = 0; |
---|
2804 | 2762 | } else { |
---|
2805 | | - spin_unlock(&pgdata->split_queue_lock); |
---|
| 2763 | + spin_unlock(&ds_queue->split_queue_lock); |
---|
2806 | 2764 | fail: |
---|
2807 | 2765 | if (mapping) |
---|
2808 | 2766 | xa_unlock(&mapping->i_pages); |
---|
2809 | | - spin_unlock_irqrestore(zone_lru_lock(page_zone(head)), flags); |
---|
2810 | | - remap_page(head); |
---|
| 2767 | + spin_unlock_irqrestore(&pgdata->lru_lock, flags); |
---|
| 2768 | + remap_page(head, thp_nr_pages(head)); |
---|
2811 | 2769 | ret = -EBUSY; |
---|
2812 | 2770 | } |
---|
2813 | 2771 | |
---|
.. | .. |
---|
2825 | 2783 | |
---|
2826 | 2784 | void free_transhuge_page(struct page *page) |
---|
2827 | 2785 | { |
---|
2828 | | - struct pglist_data *pgdata = NODE_DATA(page_to_nid(page)); |
---|
| 2786 | + struct deferred_split *ds_queue = get_deferred_split_queue(page); |
---|
2829 | 2787 | unsigned long flags; |
---|
2830 | 2788 | |
---|
2831 | | - spin_lock_irqsave(&pgdata->split_queue_lock, flags); |
---|
| 2789 | + spin_lock_irqsave(&ds_queue->split_queue_lock, flags); |
---|
2832 | 2790 | if (!list_empty(page_deferred_list(page))) { |
---|
2833 | | - pgdata->split_queue_len--; |
---|
| 2791 | + ds_queue->split_queue_len--; |
---|
2834 | 2792 | list_del(page_deferred_list(page)); |
---|
2835 | 2793 | } |
---|
2836 | | - spin_unlock_irqrestore(&pgdata->split_queue_lock, flags); |
---|
| 2794 | + spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags); |
---|
2837 | 2795 | free_compound_page(page); |
---|
2838 | 2796 | } |
---|
2839 | 2797 | |
---|
2840 | 2798 | void deferred_split_huge_page(struct page *page) |
---|
2841 | 2799 | { |
---|
2842 | | - struct pglist_data *pgdata = NODE_DATA(page_to_nid(page)); |
---|
| 2800 | + struct deferred_split *ds_queue = get_deferred_split_queue(page); |
---|
| 2801 | +#ifdef CONFIG_MEMCG |
---|
| 2802 | + struct mem_cgroup *memcg = compound_head(page)->mem_cgroup; |
---|
| 2803 | +#endif |
---|
2843 | 2804 | unsigned long flags; |
---|
2844 | 2805 | |
---|
2845 | 2806 | VM_BUG_ON_PAGE(!PageTransHuge(page), page); |
---|
2846 | 2807 | |
---|
2847 | | - spin_lock_irqsave(&pgdata->split_queue_lock, flags); |
---|
| 2808 | + /* |
---|
| 2809 | + * The try_to_unmap() in page reclaim path might reach here too, |
---|
| 2810 | + * this may cause a race condition to corrupt deferred split queue. |
---|
| 2811 | + * And, if page reclaim is already handling the same page, it is |
---|
| 2812 | + * unnecessary to handle it again in shrinker. |
---|
| 2813 | + * |
---|
| 2814 | + * Check PageSwapCache to determine if the page is being |
---|
| 2815 | + * handled by page reclaim since THP swap would add the page into |
---|
| 2816 | + * swap cache before calling try_to_unmap(). |
---|
| 2817 | + */ |
---|
| 2818 | + if (PageSwapCache(page)) |
---|
| 2819 | + return; |
---|
| 2820 | + |
---|
| 2821 | + if (!list_empty(page_deferred_list(page))) |
---|
| 2822 | + return; |
---|
| 2823 | + |
---|
| 2824 | + spin_lock_irqsave(&ds_queue->split_queue_lock, flags); |
---|
2848 | 2825 | if (list_empty(page_deferred_list(page))) { |
---|
2849 | 2826 | count_vm_event(THP_DEFERRED_SPLIT_PAGE); |
---|
2850 | | - list_add_tail(page_deferred_list(page), &pgdata->split_queue); |
---|
2851 | | - pgdata->split_queue_len++; |
---|
| 2827 | + list_add_tail(page_deferred_list(page), &ds_queue->split_queue); |
---|
| 2828 | + ds_queue->split_queue_len++; |
---|
| 2829 | +#ifdef CONFIG_MEMCG |
---|
| 2830 | + if (memcg) |
---|
| 2831 | + memcg_set_shrinker_bit(memcg, page_to_nid(page), |
---|
| 2832 | + deferred_split_shrinker.id); |
---|
| 2833 | +#endif |
---|
2852 | 2834 | } |
---|
2853 | | - spin_unlock_irqrestore(&pgdata->split_queue_lock, flags); |
---|
| 2835 | + spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags); |
---|
2854 | 2836 | } |
---|
2855 | 2837 | |
---|
2856 | 2838 | static unsigned long deferred_split_count(struct shrinker *shrink, |
---|
2857 | 2839 | struct shrink_control *sc) |
---|
2858 | 2840 | { |
---|
2859 | 2841 | struct pglist_data *pgdata = NODE_DATA(sc->nid); |
---|
2860 | | - return READ_ONCE(pgdata->split_queue_len); |
---|
| 2842 | + struct deferred_split *ds_queue = &pgdata->deferred_split_queue; |
---|
| 2843 | + |
---|
| 2844 | +#ifdef CONFIG_MEMCG |
---|
| 2845 | + if (sc->memcg) |
---|
| 2846 | + ds_queue = &sc->memcg->deferred_split_queue; |
---|
| 2847 | +#endif |
---|
| 2848 | + return READ_ONCE(ds_queue->split_queue_len); |
---|
2861 | 2849 | } |
---|
2862 | 2850 | |
---|
2863 | 2851 | static unsigned long deferred_split_scan(struct shrinker *shrink, |
---|
2864 | 2852 | struct shrink_control *sc) |
---|
2865 | 2853 | { |
---|
2866 | 2854 | struct pglist_data *pgdata = NODE_DATA(sc->nid); |
---|
| 2855 | + struct deferred_split *ds_queue = &pgdata->deferred_split_queue; |
---|
2867 | 2856 | unsigned long flags; |
---|
2868 | 2857 | LIST_HEAD(list), *pos, *next; |
---|
2869 | 2858 | struct page *page; |
---|
2870 | 2859 | int split = 0; |
---|
2871 | 2860 | |
---|
2872 | | - spin_lock_irqsave(&pgdata->split_queue_lock, flags); |
---|
| 2861 | +#ifdef CONFIG_MEMCG |
---|
| 2862 | + if (sc->memcg) |
---|
| 2863 | + ds_queue = &sc->memcg->deferred_split_queue; |
---|
| 2864 | +#endif |
---|
| 2865 | + |
---|
| 2866 | + spin_lock_irqsave(&ds_queue->split_queue_lock, flags); |
---|
2873 | 2867 | /* Take pin on all head pages to avoid freeing them under us */ |
---|
2874 | | - list_for_each_safe(pos, next, &pgdata->split_queue) { |
---|
| 2868 | + list_for_each_safe(pos, next, &ds_queue->split_queue) { |
---|
2875 | 2869 | page = list_entry((void *)pos, struct page, mapping); |
---|
2876 | 2870 | page = compound_head(page); |
---|
2877 | 2871 | if (get_page_unless_zero(page)) { |
---|
.. | .. |
---|
2879 | 2873 | } else { |
---|
2880 | 2874 | /* We lost race with put_compound_page() */ |
---|
2881 | 2875 | list_del_init(page_deferred_list(page)); |
---|
2882 | | - pgdata->split_queue_len--; |
---|
| 2876 | + ds_queue->split_queue_len--; |
---|
2883 | 2877 | } |
---|
2884 | 2878 | if (!--sc->nr_to_scan) |
---|
2885 | 2879 | break; |
---|
2886 | 2880 | } |
---|
2887 | | - spin_unlock_irqrestore(&pgdata->split_queue_lock, flags); |
---|
| 2881 | + spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags); |
---|
2888 | 2882 | |
---|
2889 | 2883 | list_for_each_safe(pos, next, &list) { |
---|
2890 | 2884 | page = list_entry((void *)pos, struct page, mapping); |
---|
.. | .. |
---|
2898 | 2892 | put_page(page); |
---|
2899 | 2893 | } |
---|
2900 | 2894 | |
---|
2901 | | - spin_lock_irqsave(&pgdata->split_queue_lock, flags); |
---|
2902 | | - list_splice_tail(&list, &pgdata->split_queue); |
---|
2903 | | - spin_unlock_irqrestore(&pgdata->split_queue_lock, flags); |
---|
| 2895 | + spin_lock_irqsave(&ds_queue->split_queue_lock, flags); |
---|
| 2896 | + list_splice_tail(&list, &ds_queue->split_queue); |
---|
| 2897 | + spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags); |
---|
2904 | 2898 | |
---|
2905 | 2899 | /* |
---|
2906 | 2900 | * Stop shrinker if we didn't split any page, but the queue is empty. |
---|
2907 | 2901 | * This can happen if pages were freed under us. |
---|
2908 | 2902 | */ |
---|
2909 | | - if (!split && list_empty(&pgdata->split_queue)) |
---|
| 2903 | + if (!split && list_empty(&ds_queue->split_queue)) |
---|
2910 | 2904 | return SHRINK_STOP; |
---|
2911 | 2905 | return split; |
---|
2912 | 2906 | } |
---|
.. | .. |
---|
2915 | 2909 | .count_objects = deferred_split_count, |
---|
2916 | 2910 | .scan_objects = deferred_split_scan, |
---|
2917 | 2911 | .seeks = DEFAULT_SEEKS, |
---|
2918 | | - .flags = SHRINKER_NUMA_AWARE, |
---|
| 2912 | + .flags = SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE | |
---|
| 2913 | + SHRINKER_NONSLAB, |
---|
2919 | 2914 | }; |
---|
2920 | 2915 | |
---|
2921 | 2916 | #ifdef CONFIG_DEBUG_FS |
---|
.. | .. |
---|
2959 | 2954 | |
---|
2960 | 2955 | return 0; |
---|
2961 | 2956 | } |
---|
2962 | | -DEFINE_SIMPLE_ATTRIBUTE(split_huge_pages_fops, NULL, split_huge_pages_set, |
---|
| 2957 | +DEFINE_DEBUGFS_ATTRIBUTE(split_huge_pages_fops, NULL, split_huge_pages_set, |
---|
2963 | 2958 | "%llu\n"); |
---|
2964 | 2959 | |
---|
2965 | 2960 | static int __init split_huge_pages_debugfs(void) |
---|
2966 | 2961 | { |
---|
2967 | | - void *ret; |
---|
2968 | | - |
---|
2969 | | - ret = debugfs_create_file("split_huge_pages", 0200, NULL, NULL, |
---|
2970 | | - &split_huge_pages_fops); |
---|
2971 | | - if (!ret) |
---|
2972 | | - pr_warn("Failed to create split_huge_pages in debugfs"); |
---|
| 2962 | + debugfs_create_file("split_huge_pages", 0200, NULL, NULL, |
---|
| 2963 | + &split_huge_pages_fops); |
---|
2973 | 2964 | return 0; |
---|
2974 | 2965 | } |
---|
2975 | 2966 | late_initcall(split_huge_pages_debugfs); |
---|
.. | .. |
---|
3021 | 3012 | pmde = pmd_mksoft_dirty(pmde); |
---|
3022 | 3013 | if (is_write_migration_entry(entry)) |
---|
3023 | 3014 | pmde = maybe_pmd_mkwrite(pmde, vma); |
---|
| 3015 | + if (pmd_swp_uffd_wp(*pvmw->pmd)) |
---|
| 3016 | + pmde = pmd_wrprotect(pmd_mkuffd_wp(pmde)); |
---|
3024 | 3017 | |
---|
3025 | 3018 | flush_cache_range(vma, mmun_start, mmun_start + HPAGE_PMD_SIZE); |
---|
3026 | 3019 | if (PageAnon(new)) |
---|