hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/mm/hugetlb.c
....@@ -78,6 +78,9 @@
7878 static int num_fault_mutexes;
7979 struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp;
8080
81
+static void hugetlb_unshare_pmds(struct vm_area_struct *vma,
82
+ unsigned long start, unsigned long end);
83
+
8184 static inline bool PageHugeFreed(struct page *head)
8285 {
8386 return page_private(head + 4) == -1UL;
....@@ -3698,6 +3701,25 @@
36983701 {
36993702 if (addr & ~(huge_page_mask(hstate_vma(vma))))
37003703 return -EINVAL;
3704
+
3705
+ /*
3706
+ * PMD sharing is only possible for PUD_SIZE-aligned address ranges
3707
+ * in HugeTLB VMAs. If we will lose PUD_SIZE alignment due to this
3708
+ * split, unshare PMDs in the PUD_SIZE interval surrounding addr now.
3709
+ */
3710
+ if (addr & ~PUD_MASK) {
3711
+ /*
3712
+ * hugetlb_vm_op_split is called right before we attempt to
3713
+ * split the VMA. We will need to unshare PMDs in the old and
3714
+ * new VMAs, so let's unshare before we split.
3715
+ */
3716
+ unsigned long floor = addr & PUD_MASK;
3717
+ unsigned long ceil = floor + PUD_SIZE;
3718
+
3719
+ if (floor >= vma->vm_start && ceil <= vma->vm_end)
3720
+ hugetlb_unshare_pmds(vma, floor, ceil);
3721
+ }
3722
+
37013723 return 0;
37023724 }
37033725
....@@ -5696,14 +5718,14 @@
56965718 return pte_page(*(pte_t *)pgd) + ((address & ~PGDIR_MASK) >> PAGE_SHIFT);
56975719 }
56985720
5699
-bool isolate_huge_page(struct page *page, struct list_head *list)
5721
+int isolate_hugetlb(struct page *page, struct list_head *list)
57005722 {
5701
- bool ret = true;
5723
+ int ret = 0;
57025724
57035725 spin_lock(&hugetlb_lock);
57045726 if (!PageHeadHuge(page) || !page_huge_active(page) ||
57055727 !get_page_unless_zero(page)) {
5706
- ret = false;
5728
+ ret = -EBUSY;
57075729 goto unlock;
57085730 }
57095731 clear_page_huge_active(page);
....@@ -5756,25 +5778,20 @@
57565778 }
57575779 }
57585780
5759
-/*
5760
- * This function will unconditionally remove all the shared pmd pgtable entries
5761
- * within the specific vma for a hugetlbfs memory range.
5762
- */
5763
-void hugetlb_unshare_all_pmds(struct vm_area_struct *vma)
5781
+static void hugetlb_unshare_pmds(struct vm_area_struct *vma,
5782
+ unsigned long start,
5783
+ unsigned long end)
57645784 {
57655785 struct hstate *h = hstate_vma(vma);
57665786 unsigned long sz = huge_page_size(h);
57675787 struct mm_struct *mm = vma->vm_mm;
57685788 struct mmu_notifier_range range;
5769
- unsigned long address, start, end;
5789
+ unsigned long address;
57705790 spinlock_t *ptl;
57715791 pte_t *ptep;
57725792
57735793 if (!(vma->vm_flags & VM_MAYSHARE))
57745794 return;
5775
-
5776
- start = ALIGN(vma->vm_start, PUD_SIZE);
5777
- end = ALIGN_DOWN(vma->vm_end, PUD_SIZE);
57785795
57795796 if (start >= end)
57805797 return;
....@@ -5808,6 +5825,16 @@
58085825 mmu_notifier_invalidate_range_end(&range);
58095826 }
58105827
5828
+/*
5829
+ * This function will unconditionally remove all the shared pmd pgtable entries
5830
+ * within the specific vma for a hugetlbfs memory range.
5831
+ */
5832
+void hugetlb_unshare_all_pmds(struct vm_area_struct *vma)
5833
+{
5834
+ hugetlb_unshare_pmds(vma, ALIGN(vma->vm_start, PUD_SIZE),
5835
+ ALIGN_DOWN(vma->vm_end, PUD_SIZE));
5836
+}
5837
+
58115838 #ifdef CONFIG_CMA
58125839 static bool cma_reserve_called __initdata;
58135840