hc
2024-05-11 04dd17822334871b23ea2862f7798fb0e0007777
kernel/mm/memory.c
....@@ -246,6 +246,16 @@
246246 unsigned long addr)
247247 {
248248 pgtable_t token = pmd_pgtable(*pmd);
249
+#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
250
+ /*
251
+ * Ensure page table destruction is blocked if __pte_map_lock managed
252
+ * to take this lock. Without this barrier tlb_remove_table_rcu can
253
+ * destroy ptl after __pte_map_lock locked it and during unlock would
254
+ * cause a use-after-free.
255
+ */
256
+ spinlock_t *ptl = pmd_lock(tlb->mm, pmd);
257
+ spin_unlock(ptl);
258
+#endif
249259 pmd_clear(pmd);
250260 pte_free_tlb(tlb, token, addr);
251261 mm_dec_nr_ptes(tlb->mm);
....@@ -2627,9 +2637,7 @@
26272637 static bool pte_spinlock(struct vm_fault *vmf)
26282638 {
26292639 bool ret = false;
2630
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
26312640 pmd_t pmdval;
2632
-#endif
26332641
26342642 /* Check if vma is still valid */
26352643 if (!(vmf->flags & FAULT_FLAG_SPECULATIVE)) {
....@@ -2644,24 +2652,28 @@
26442652 goto out;
26452653 }
26462654
2647
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
26482655 /*
26492656 * We check if the pmd value is still the same to ensure that there
26502657 * is not a huge collapse operation in progress in our back.
2658
+ * It also ensures that pmd was not cleared by pmd_clear in
2659
+ * free_pte_range and ptl is still valid.
26512660 */
26522661 pmdval = READ_ONCE(*vmf->pmd);
26532662 if (!pmd_same(pmdval, vmf->orig_pmd)) {
26542663 trace_spf_pmd_changed(_RET_IP_, vmf->vma, vmf->address);
26552664 goto out;
26562665 }
2657
-#endif
26582666
2659
- vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd);
2667
+ vmf->ptl = pte_lockptr(vmf->vma->vm_mm, &pmdval);
26602668 if (unlikely(!spin_trylock(vmf->ptl))) {
26612669 trace_spf_pte_lock(_RET_IP_, vmf->vma, vmf->address);
26622670 goto out;
26632671 }
26642672
2673
+ /*
2674
+ * The check below will fail if pte_spinlock passed its ptl barrier
2675
+ * before we took the ptl lock.
2676
+ */
26652677 if (vma_has_changed(vmf)) {
26662678 spin_unlock(vmf->ptl);
26672679 trace_spf_vma_changed(_RET_IP_, vmf->vma, vmf->address);
....@@ -2679,9 +2691,7 @@
26792691 bool ret = false;
26802692 pte_t *pte;
26812693 spinlock_t *ptl;
2682
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
26832694 pmd_t pmdval;
2684
-#endif
26852695
26862696 /*
26872697 * The first vma_has_changed() guarantees the page-tables are still
....@@ -2696,7 +2706,6 @@
26962706 goto out;
26972707 }
26982708
2699
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
27002709 /*
27012710 * We check if the pmd value is still the same to ensure that there
27022711 * is not a huge collapse operation in progress in our back.
....@@ -2706,7 +2715,6 @@
27062715 trace_spf_pmd_changed(_RET_IP_, vmf->vma, addr);
27072716 goto out;
27082717 }
2709
-#endif
27102718
27112719 /*
27122720 * Same as pte_offset_map_lock() except that we call
....@@ -2715,14 +2723,18 @@
27152723 * to invalidate TLB but this CPU has irq disabled.
27162724 * Since we are in a speculative patch, accept it could fail
27172725 */
2718
- ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd);
2719
- pte = pte_offset_map(vmf->pmd, addr);
2726
+ ptl = pte_lockptr(vmf->vma->vm_mm, &pmdval);
2727
+ pte = pte_offset_map(&pmdval, addr);
27202728 if (unlikely(!spin_trylock(ptl))) {
27212729 pte_unmap(pte);
27222730 trace_spf_pte_lock(_RET_IP_, vmf->vma, addr);
27232731 goto out;
27242732 }
27252733
2734
+ /*
2735
+ * The check below will fail if __pte_map_lock_speculative passed its ptl
2736
+ * barrier before we took the ptl lock.
2737
+ */
27262738 if (vma_has_changed(vmf)) {
27272739 pte_unmap_unlock(pte, ptl);
27282740 trace_spf_vma_changed(_RET_IP_, vmf->vma, addr);
....@@ -3651,8 +3663,10 @@
36513663
36523664 if (!page) {
36533665 struct swap_info_struct *si = swp_swap_info(entry);
3666
+ bool skip_swapcache = false;
36543667
3655
- if (data_race(si->flags & SWP_SYNCHRONOUS_IO) &&
3668
+ trace_android_vh_skip_swapcache(entry, &skip_swapcache);
3669
+ if ((data_race(si->flags & SWP_SYNCHRONOUS_IO) || skip_swapcache) &&
36563670 __swap_count(entry) == 1) {
36573671 /* skip swapcache */
36583672 gfp_t flags = GFP_HIGHUSER_MOVABLE;