.. | .. |
---|
78 | 78 | static int num_fault_mutexes; |
---|
79 | 79 | struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp; |
---|
80 | 80 | |
---|
| 81 | +static void hugetlb_unshare_pmds(struct vm_area_struct *vma, |
---|
| 82 | + unsigned long start, unsigned long end); |
---|
| 83 | + |
---|
81 | 84 | static inline bool PageHugeFreed(struct page *head) |
---|
82 | 85 | { |
---|
83 | 86 | return page_private(head + 4) == -1UL; |
---|
.. | .. |
---|
3698 | 3701 | { |
---|
3699 | 3702 | if (addr & ~(huge_page_mask(hstate_vma(vma)))) |
---|
3700 | 3703 | return -EINVAL; |
---|
| 3704 | + |
---|
| 3705 | + /* |
---|
| 3706 | + * PMD sharing is only possible for PUD_SIZE-aligned address ranges |
---|
| 3707 | + * in HugeTLB VMAs. If we will lose PUD_SIZE alignment due to this |
---|
| 3708 | + * split, unshare PMDs in the PUD_SIZE interval surrounding addr now. |
---|
| 3709 | + */ |
---|
| 3710 | + if (addr & ~PUD_MASK) { |
---|
| 3711 | + /* |
---|
| 3712 | + * hugetlb_vm_op_split is called right before we attempt to |
---|
| 3713 | + * split the VMA. We will need to unshare PMDs in the old and |
---|
| 3714 | + * new VMAs, so let's unshare before we split. |
---|
| 3715 | + */ |
---|
| 3716 | + unsigned long floor = addr & PUD_MASK; |
---|
| 3717 | + unsigned long ceil = floor + PUD_SIZE; |
---|
| 3718 | + |
---|
| 3719 | + if (floor >= vma->vm_start && ceil <= vma->vm_end) |
---|
| 3720 | + hugetlb_unshare_pmds(vma, floor, ceil); |
---|
| 3721 | + } |
---|
| 3722 | + |
---|
3701 | 3723 | return 0; |
---|
3702 | 3724 | } |
---|
3703 | 3725 | |
---|
.. | .. |
---|
5696 | 5718 | return pte_page(*(pte_t *)pgd) + ((address & ~PGDIR_MASK) >> PAGE_SHIFT); |
---|
5697 | 5719 | } |
---|
5698 | 5720 | |
---|
5699 | | -bool isolate_huge_page(struct page *page, struct list_head *list) |
---|
| 5721 | +int isolate_hugetlb(struct page *page, struct list_head *list) |
---|
5700 | 5722 | { |
---|
5701 | | - bool ret = true; |
---|
| 5723 | + int ret = 0; |
---|
5702 | 5724 | |
---|
5703 | 5725 | spin_lock(&hugetlb_lock); |
---|
5704 | 5726 | if (!PageHeadHuge(page) || !page_huge_active(page) || |
---|
5705 | 5727 | !get_page_unless_zero(page)) { |
---|
5706 | | - ret = false; |
---|
| 5728 | + ret = -EBUSY; |
---|
5707 | 5729 | goto unlock; |
---|
5708 | 5730 | } |
---|
5709 | 5731 | clear_page_huge_active(page); |
---|
.. | .. |
---|
5756 | 5778 | } |
---|
5757 | 5779 | } |
---|
5758 | 5780 | |
---|
5759 | | -/* |
---|
5760 | | - * This function will unconditionally remove all the shared pmd pgtable entries |
---|
5761 | | - * within the specific vma for a hugetlbfs memory range. |
---|
5762 | | - */ |
---|
5763 | | -void hugetlb_unshare_all_pmds(struct vm_area_struct *vma) |
---|
| 5781 | +static void hugetlb_unshare_pmds(struct vm_area_struct *vma, |
---|
| 5782 | + unsigned long start, |
---|
| 5783 | + unsigned long end) |
---|
5764 | 5784 | { |
---|
5765 | 5785 | struct hstate *h = hstate_vma(vma); |
---|
5766 | 5786 | unsigned long sz = huge_page_size(h); |
---|
5767 | 5787 | struct mm_struct *mm = vma->vm_mm; |
---|
5768 | 5788 | struct mmu_notifier_range range; |
---|
5769 | | - unsigned long address, start, end; |
---|
| 5789 | + unsigned long address; |
---|
5770 | 5790 | spinlock_t *ptl; |
---|
5771 | 5791 | pte_t *ptep; |
---|
5772 | 5792 | |
---|
5773 | 5793 | if (!(vma->vm_flags & VM_MAYSHARE)) |
---|
5774 | 5794 | return; |
---|
5775 | | - |
---|
5776 | | - start = ALIGN(vma->vm_start, PUD_SIZE); |
---|
5777 | | - end = ALIGN_DOWN(vma->vm_end, PUD_SIZE); |
---|
5778 | 5795 | |
---|
5779 | 5796 | if (start >= end) |
---|
5780 | 5797 | return; |
---|
.. | .. |
---|
5808 | 5825 | mmu_notifier_invalidate_range_end(&range); |
---|
5809 | 5826 | } |
---|
5810 | 5827 | |
---|
| 5828 | +/* |
---|
| 5829 | + * This function will unconditionally remove all the shared pmd pgtable entries |
---|
| 5830 | + * within the specific vma for a hugetlbfs memory range. |
---|
| 5831 | + */ |
---|
| 5832 | +void hugetlb_unshare_all_pmds(struct vm_area_struct *vma) |
---|
| 5833 | +{ |
---|
| 5834 | + hugetlb_unshare_pmds(vma, ALIGN(vma->vm_start, PUD_SIZE), |
---|
| 5835 | + ALIGN_DOWN(vma->vm_end, PUD_SIZE)); |
---|
| 5836 | +} |
---|
| 5837 | + |
---|
5811 | 5838 | #ifdef CONFIG_CMA |
---|
5812 | 5839 | static bool cma_reserve_called __initdata; |
---|
5813 | 5840 | |
---|