.. | .. |
---|
246 | 246 | unsigned long addr) |
---|
247 | 247 | { |
---|
248 | 248 | pgtable_t token = pmd_pgtable(*pmd); |
---|
| 249 | +#ifdef CONFIG_SPECULATIVE_PAGE_FAULT |
---|
| 250 | + /* |
---|
| 251 | + * Ensure page table destruction is blocked if __pte_map_lock managed |
---|
| 252 | + * to take this lock. Without this barrier tlb_remove_table_rcu can |
---|
| 253 | + * destroy ptl after __pte_map_lock locked it and during unlock would |
---|
| 254 | + * cause a use-after-free. |
---|
| 255 | + */ |
---|
| 256 | + spinlock_t *ptl = pmd_lock(tlb->mm, pmd); |
---|
| 257 | + spin_unlock(ptl); |
---|
| 258 | +#endif |
---|
249 | 259 | pmd_clear(pmd); |
---|
250 | 260 | pte_free_tlb(tlb, token, addr); |
---|
251 | 261 | mm_dec_nr_ptes(tlb->mm); |
---|
.. | .. |
---|
2627 | 2637 | static bool pte_spinlock(struct vm_fault *vmf) |
---|
2628 | 2638 | { |
---|
2629 | 2639 | bool ret = false; |
---|
2630 | | -#ifdef CONFIG_TRANSPARENT_HUGEPAGE |
---|
2631 | 2640 | pmd_t pmdval; |
---|
2632 | | -#endif |
---|
2633 | 2641 | |
---|
2634 | 2642 | /* Check if vma is still valid */ |
---|
2635 | 2643 | if (!(vmf->flags & FAULT_FLAG_SPECULATIVE)) { |
---|
.. | .. |
---|
2644 | 2652 | goto out; |
---|
2645 | 2653 | } |
---|
2646 | 2654 | |
---|
2647 | | -#ifdef CONFIG_TRANSPARENT_HUGEPAGE |
---|
2648 | 2655 | /* |
---|
2649 | 2656 | * We check if the pmd value is still the same to ensure that there |
---|
2650 | 2657 | * is not a huge collapse operation in progress in our back. |
---|
| 2658 | + * It also ensures that pmd was not cleared by pmd_clear in |
---|
| 2659 | + * free_pte_range and ptl is still valid. |
---|
2651 | 2660 | */ |
---|
2652 | 2661 | pmdval = READ_ONCE(*vmf->pmd); |
---|
2653 | 2662 | if (!pmd_same(pmdval, vmf->orig_pmd)) { |
---|
2654 | 2663 | trace_spf_pmd_changed(_RET_IP_, vmf->vma, vmf->address); |
---|
2655 | 2664 | goto out; |
---|
2656 | 2665 | } |
---|
2657 | | -#endif |
---|
2658 | 2666 | |
---|
2659 | | - vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd); |
---|
| 2667 | + vmf->ptl = pte_lockptr(vmf->vma->vm_mm, &pmdval); |
---|
2660 | 2668 | if (unlikely(!spin_trylock(vmf->ptl))) { |
---|
2661 | 2669 | trace_spf_pte_lock(_RET_IP_, vmf->vma, vmf->address); |
---|
2662 | 2670 | goto out; |
---|
2663 | 2671 | } |
---|
2664 | 2672 | |
---|
| 2673 | + /* |
---|
| 2674 | + * The check below will fail if pte_spinlock passed its ptl barrier |
---|
| 2675 | + * before we took the ptl lock. |
---|
| 2676 | + */ |
---|
2665 | 2677 | if (vma_has_changed(vmf)) { |
---|
2666 | 2678 | spin_unlock(vmf->ptl); |
---|
2667 | 2679 | trace_spf_vma_changed(_RET_IP_, vmf->vma, vmf->address); |
---|
.. | .. |
---|
2679 | 2691 | bool ret = false; |
---|
2680 | 2692 | pte_t *pte; |
---|
2681 | 2693 | spinlock_t *ptl; |
---|
2682 | | -#ifdef CONFIG_TRANSPARENT_HUGEPAGE |
---|
2683 | 2694 | pmd_t pmdval; |
---|
2684 | | -#endif |
---|
2685 | 2695 | |
---|
2686 | 2696 | /* |
---|
2687 | 2697 | * The first vma_has_changed() guarantees the page-tables are still |
---|
.. | .. |
---|
2696 | 2706 | goto out; |
---|
2697 | 2707 | } |
---|
2698 | 2708 | |
---|
2699 | | -#ifdef CONFIG_TRANSPARENT_HUGEPAGE |
---|
2700 | 2709 | /* |
---|
2701 | 2710 | * We check if the pmd value is still the same to ensure that there |
---|
2702 | 2711 | * is not a huge collapse operation in progress in our back. |
---|
.. | .. |
---|
2706 | 2715 | trace_spf_pmd_changed(_RET_IP_, vmf->vma, addr); |
---|
2707 | 2716 | goto out; |
---|
2708 | 2717 | } |
---|
2709 | | -#endif |
---|
2710 | 2718 | |
---|
2711 | 2719 | /* |
---|
2712 | 2720 | * Same as pte_offset_map_lock() except that we call |
---|
.. | .. |
---|
2715 | 2723 | * to invalidate TLB but this CPU has irq disabled. |
---|
2716 | 2724 | * Since we are in a speculative patch, accept it could fail |
---|
2717 | 2725 | */ |
---|
2718 | | - ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd); |
---|
2719 | | - pte = pte_offset_map(vmf->pmd, addr); |
---|
| 2726 | + ptl = pte_lockptr(vmf->vma->vm_mm, &pmdval); |
---|
| 2727 | + pte = pte_offset_map(&pmdval, addr); |
---|
2720 | 2728 | if (unlikely(!spin_trylock(ptl))) { |
---|
2721 | 2729 | pte_unmap(pte); |
---|
2722 | 2730 | trace_spf_pte_lock(_RET_IP_, vmf->vma, addr); |
---|
2723 | 2731 | goto out; |
---|
2724 | 2732 | } |
---|
2725 | 2733 | |
---|
| 2734 | + /* |
---|
| 2735 | + * The check below will fail if __pte_map_lock_speculative passed its ptl |
---|
| 2736 | + * barrier before we took the ptl lock. |
---|
| 2737 | + */ |
---|
2726 | 2738 | if (vma_has_changed(vmf)) { |
---|
2727 | 2739 | pte_unmap_unlock(pte, ptl); |
---|
2728 | 2740 | trace_spf_vma_changed(_RET_IP_, vmf->vma, addr); |
---|
.. | .. |
---|
3651 | 3663 | |
---|
3652 | 3664 | if (!page) { |
---|
3653 | 3665 | struct swap_info_struct *si = swp_swap_info(entry); |
---|
| 3666 | + bool skip_swapcache = false; |
---|
3654 | 3667 | |
---|
3655 | | - if (data_race(si->flags & SWP_SYNCHRONOUS_IO) && |
---|
| 3668 | + trace_android_vh_skip_swapcache(entry, &skip_swapcache); |
---|
| 3669 | + if ((data_race(si->flags & SWP_SYNCHRONOUS_IO) || skip_swapcache) && |
---|
3656 | 3670 | __swap_count(entry) == 1) { |
---|
3657 | 3671 | /* skip swapcache */ |
---|
3658 | 3672 | gfp_t flags = GFP_HIGHUSER_MOVABLE; |
---|